aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/arith_instrs.tab396
-rw-r--r--erts/emulator/beam/atom.c119
-rw-r--r--erts/emulator/beam/atom.h35
-rw-r--r--erts/emulator/beam/atom.names168
-rw-r--r--erts/emulator/beam/beam_bif_load.c1568
-rw-r--r--erts/emulator/beam/beam_bp.c908
-rw-r--r--erts/emulator/beam/beam_bp.h132
-rw-r--r--erts/emulator/beam/beam_catches.c27
-rw-r--r--erts/emulator/beam/beam_catches.h21
-rw-r--r--erts/emulator/beam/beam_debug.c936
-rw-r--r--erts/emulator/beam/beam_emu.c6052
-rw-r--r--erts/emulator/beam/beam_load.c3021
-rw-r--r--erts/emulator/beam/beam_load.h196
-rw-r--r--erts/emulator/beam/beam_ranges.c287
-rw-r--r--erts/emulator/beam/benchmark.c363
-rw-r--r--erts/emulator/beam/benchmark.h329
-rw-r--r--erts/emulator/beam/bif.c2445
-rw-r--r--erts/emulator/beam/bif.h307
-rw-r--r--erts/emulator/beam/bif.tab183
-rw-r--r--erts/emulator/beam/bif_instrs.tab539
-rw-r--r--erts/emulator/beam/big.c615
-rw-r--r--erts/emulator/beam/big.h81
-rw-r--r--erts/emulator/beam/binary.c1270
-rw-r--r--erts/emulator/beam/break.c494
-rw-r--r--erts/emulator/beam/bs_instrs.tab1021
-rw-r--r--erts/emulator/beam/code_ix.c65
-rw-r--r--erts/emulator/beam/code_ix.h113
-rw-r--r--erts/emulator/beam/copy.c1637
-rw-r--r--erts/emulator/beam/dist.c1981
-rw-r--r--erts/emulator/beam/dist.h160
-rw-r--r--erts/emulator/beam/dtrace-wrapper.h23
-rw-r--r--erts/emulator/beam/elib_memmove.c23
-rw-r--r--erts/emulator/beam/erl_afit_alloc.c29
-rw-r--r--erts/emulator/beam/erl_afit_alloc.h23
-rw-r--r--erts/emulator/beam/erl_alloc.c856
-rw-r--r--erts/emulator/beam/erl_alloc.h229
-rw-r--r--erts/emulator/beam/erl_alloc.types184
-rw-r--r--erts/emulator/beam/erl_alloc_util.c1250
-rw-r--r--erts/emulator/beam/erl_alloc_util.h160
-rw-r--r--erts/emulator/beam/erl_ao_firstfit_alloc.c53
-rw-r--r--erts/emulator/beam/erl_ao_firstfit_alloc.h23
-rw-r--r--erts/emulator/beam/erl_arith.c57
-rw-r--r--erts/emulator/beam/erl_async.c165
-rw-r--r--erts/emulator/beam/erl_async.h48
-rw-r--r--erts/emulator/beam/erl_bestfit_alloc.c29
-rw-r--r--erts/emulator/beam/erl_bestfit_alloc.h23
-rw-r--r--erts/emulator/beam/erl_bif_binary.c1449
-rw-r--r--erts/emulator/beam/erl_bif_chksum.c21
-rw-r--r--erts/emulator/beam/erl_bif_ddll.c214
-rw-r--r--erts/emulator/beam/erl_bif_guard.c127
-rw-r--r--[-rwxr-xr-x]erts/emulator/beam/erl_bif_info.c2073
-rw-r--r--erts/emulator/beam/erl_bif_lists.c124
-rw-r--r--erts/emulator/beam/erl_bif_op.c37
-rw-r--r--erts/emulator/beam/erl_bif_os.c112
-rw-r--r--erts/emulator/beam/erl_bif_port.c368
-rw-r--r--erts/emulator/beam/erl_bif_re.c90
-rw-r--r--erts/emulator/beam/erl_bif_timer.c705
-rw-r--r--erts/emulator/beam/erl_bif_timer.h36
-rw-r--r--erts/emulator/beam/erl_bif_trace.c1172
-rw-r--r--erts/emulator/beam/erl_bif_unique.c847
-rw-r--r--erts/emulator/beam/erl_bif_unique.h440
-rw-r--r--erts/emulator/beam/erl_binary.h355
-rw-r--r--erts/emulator/beam/erl_bits.c149
-rw-r--r--erts/emulator/beam/erl_bits.h82
-rw-r--r--erts/emulator/beam/erl_cpu_topology.c192
-rw-r--r--erts/emulator/beam/erl_cpu_topology.h31
-rw-r--r--erts/emulator/beam/erl_db.c2539
-rw-r--r--erts/emulator/beam/erl_db.h96
-rw-r--r--erts/emulator/beam/erl_db_hash.c3094
-rw-r--r--erts/emulator/beam/erl_db_hash.h63
-rw-r--r--erts/emulator/beam/erl_db_tree.c939
-rw-r--r--erts/emulator/beam/erl_db_tree.h25
-rw-r--r--erts/emulator/beam/erl_db_util.c1793
-rw-r--r--erts/emulator/beam/erl_db_util.h231
-rw-r--r--erts/emulator/beam/erl_debug.c87
-rw-r--r--erts/emulator/beam/erl_debug.h26
-rw-r--r--erts/emulator/beam/erl_dirty_bif.tab87
-rw-r--r--erts/emulator/beam/erl_driver.h213
-rw-r--r--erts/emulator/beam/erl_drv_nif.h143
-rw-r--r--erts/emulator/beam/erl_drv_thread.c236
-rw-r--r--erts/emulator/beam/erl_fun.c127
-rw-r--r--erts/emulator/beam/erl_fun.h39
-rw-r--r--erts/emulator/beam/erl_gc.c2424
-rw-r--r--erts/emulator/beam/erl_gc.h199
-rw-r--r--erts/emulator/beam/erl_goodfit_alloc.c29
-rw-r--r--erts/emulator/beam/erl_goodfit_alloc.h21
-rw-r--r--erts/emulator/beam/erl_hl_timer.c3516
-rw-r--r--erts/emulator/beam/erl_hl_timer.h86
-rw-r--r--erts/emulator/beam/erl_init.c740
-rw-r--r--erts/emulator/beam/erl_instrument.c103
-rw-r--r--erts/emulator/beam/erl_instrument.h27
-rw-r--r--erts/emulator/beam/erl_io_queue.c1231
-rw-r--r--erts/emulator/beam/erl_io_queue.h201
-rw-r--r--erts/emulator/beam/erl_lock_check.c371
-rw-r--r--erts/emulator/beam/erl_lock_check.h79
-rw-r--r--erts/emulator/beam/erl_lock_count.c991
-rw-r--r--erts/emulator/beam/erl_lock_count.h1024
-rw-r--r--erts/emulator/beam/erl_lock_flags.c59
-rw-r--r--erts/emulator/beam/erl_lock_flags.h78
-rw-r--r--erts/emulator/beam/erl_map.c3175
-rw-r--r--erts/emulator/beam/erl_map.h199
-rw-r--r--erts/emulator/beam/erl_math.c54
-rw-r--r--erts/emulator/beam/erl_message.c2154
-rw-r--r--erts/emulator/beam/erl_message.h482
-rw-r--r--erts/emulator/beam/erl_monitors.c133
-rw-r--r--erts/emulator/beam/erl_monitors.h43
-rw-r--r--erts/emulator/beam/erl_msacc.c468
-rw-r--r--erts/emulator/beam/erl_msacc.h439
-rw-r--r--erts/emulator/beam/erl_mtrace.c36
-rw-r--r--erts/emulator/beam/erl_mtrace.h23
-rw-r--r--erts/emulator/beam/erl_nfunc_sched.c180
-rw-r--r--erts/emulator/beam/erl_nfunc_sched.h328
-rw-r--r--erts/emulator/beam/erl_nif.c3008
-rw-r--r--erts/emulator/beam/erl_nif.h245
-rw-r--r--erts/emulator/beam/erl_nif_api_funcs.h134
-rw-r--r--erts/emulator/beam/erl_node_container_utils.h64
-rw-r--r--erts/emulator/beam/erl_node_tables.c978
-rw-r--r--erts/emulator/beam/erl_node_tables.h141
-rw-r--r--erts/emulator/beam/erl_port.h361
-rw-r--r--erts/emulator/beam/erl_port_task.c559
-rw-r--r--erts/emulator/beam/erl_port_task.h100
-rw-r--r--erts/emulator/beam/erl_printf_term.c278
-rw-r--r--erts/emulator/beam/erl_printf_term.h26
-rw-r--r--erts/emulator/beam/erl_process.c9336
-rw-r--r--erts/emulator/beam/erl_process.h1329
-rw-r--r--erts/emulator/beam/erl_process_dict.c351
-rw-r--r--erts/emulator/beam/erl_process_dict.h41
-rw-r--r--erts/emulator/beam/erl_process_dump.c635
-rw-r--r--erts/emulator/beam/erl_process_lock.c565
-rw-r--r--erts/emulator/beam/erl_process_lock.h442
-rw-r--r--erts/emulator/beam/erl_ptab.c283
-rw-r--r--erts/emulator/beam/erl_ptab.h189
-rw-r--r--erts/emulator/beam/erl_rbtree.h1757
-rw-r--r--erts/emulator/beam/erl_sched_spec_pre_alloc.c55
-rw-r--r--erts/emulator/beam/erl_sched_spec_pre_alloc.h36
-rw-r--r--erts/emulator/beam/erl_smp.h1417
-rw-r--r--erts/emulator/beam/erl_sock.h23
-rw-r--r--erts/emulator/beam/erl_sys_driver.h23
-rw-r--r--erts/emulator/beam/erl_term.c170
-rw-r--r--erts/emulator/beam/erl_term.h654
-rw-r--r--erts/emulator/beam/erl_thr_progress.c200
-rw-r--r--erts/emulator/beam/erl_thr_progress.h102
-rw-r--r--erts/emulator/beam/erl_thr_queue.c166
-rw-r--r--erts/emulator/beam/erl_thr_queue.h50
-rw-r--r--erts/emulator/beam/erl_threads.h1737
-rw-r--r--erts/emulator/beam/erl_time.h559
-rw-r--r--erts/emulator/beam/erl_time_sup.c2466
-rw-r--r--erts/emulator/beam/erl_trace.c3959
-rw-r--r--erts/emulator/beam/erl_trace.h169
-rw-r--r--erts/emulator/beam/erl_unicode.c278
-rw-r--r--erts/emulator/beam/erl_unicode.h23
-rw-r--r--erts/emulator/beam/erl_unicode_normalize.h35
-rw-r--r--erts/emulator/beam/erl_utils.h196
-rw-r--r--erts/emulator/beam/erl_vm.h94
-rw-r--r--erts/emulator/beam/erl_zlib.c23
-rw-r--r--erts/emulator/beam/erl_zlib.h23
-rw-r--r--erts/emulator/beam/erlang_dtrace.d50
-rw-r--r--erts/emulator/beam/erlang_lttng.c32
-rw-r--r--erts/emulator/beam/erlang_lttng.h409
-rw-r--r--erts/emulator/beam/error.h118
-rw-r--r--erts/emulator/beam/export.c102
-rw-r--r--erts/emulator/beam/export.h53
-rw-r--r--erts/emulator/beam/external.c1513
-rw-r--r--erts/emulator/beam/external.h52
-rw-r--r--erts/emulator/beam/float_instrs.tab88
-rw-r--r--[-rwxr-xr-x]erts/emulator/beam/global.h1209
-rw-r--r--erts/emulator/beam/hash.c188
-rw-r--r--erts/emulator/beam/hash.h58
-rw-r--r--erts/emulator/beam/index.c38
-rw-r--r--erts/emulator/beam/index.h42
-rw-r--r--erts/emulator/beam/instrs.tab926
-rw-r--r--erts/emulator/beam/io.c3596
-rw-r--r--erts/emulator/beam/lttng-wrapper.h107
-rw-r--r--erts/emulator/beam/macros.tab174
-rw-r--r--erts/emulator/beam/map_instrs.tab159
-rw-r--r--erts/emulator/beam/module.c110
-rw-r--r--erts/emulator/beam/module.h49
-rw-r--r--erts/emulator/beam/msg_instrs.tab390
-rw-r--r--erts/emulator/beam/ops.tab1421
-rw-r--r--erts/emulator/beam/packet_parser.c28
-rw-r--r--erts/emulator/beam/packet_parser.h26
-rw-r--r--erts/emulator/beam/register.c203
-rw-r--r--erts/emulator/beam/register.h27
-rw-r--r--erts/emulator/beam/safe_hash.c99
-rw-r--r--erts/emulator/beam/safe_hash.h43
-rw-r--r--erts/emulator/beam/select_instrs.tab190
-rw-r--r--erts/emulator/beam/sys.h585
-rw-r--r--erts/emulator/beam/time.c1762
-rw-r--r--erts/emulator/beam/trace_instrs.tab168
-rw-r--r--erts/emulator/beam/utils.c3048
-rw-r--r--erts/emulator/beam/version.h23
191 files changed, 73940 insertions, 42325 deletions
diff --git a/erts/emulator/beam/arith_instrs.tab b/erts/emulator/beam/arith_instrs.tab
new file mode 100644
index 0000000000..b828e86788
--- /dev/null
+++ b/erts/emulator/beam/arith_instrs.tab
@@ -0,0 +1,396 @@
+// -*- c -*-
+//
+// %CopyrightBegin%
+//
+// Copyright Ericsson AB 2017. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// %CopyrightEnd%
+//
+
+OUTLINED_ARITH_2(Fail, Live, Name, BIF, Op1, Op2, Dst) {
+ Eterm result;
+ Uint live = $Live;
+ HEAVY_SWAPOUT;
+ reg[live] = $Op1;
+ reg[live+1] = $Op2;
+ result = erts_gc_$Name (c_p, reg, live);
+ HEAVY_SWAPIN;
+ ERTS_HOLE_CHECK(c_p);
+ if (ERTS_LIKELY(is_value(result))) {
+ $REFRESH_GEN_DEST();
+ $Dst = result;
+ $NEXT0();
+ }
+ $BIF_ERROR_ARITY_2($Fail, $BIF, reg[live], reg[live+1]);
+}
+
+
+i_plus := plus.fetch.execute;
+
+plus.head() {
+ Eterm PlusOp1, PlusOp2;
+}
+
+plus.fetch(Op1, Op2) {
+ PlusOp1 = $Op1;
+ PlusOp2 = $Op2;
+}
+
+plus.execute(Fail, Live, Dst) {
+ if (ERTS_LIKELY(is_both_small(PlusOp1, PlusOp2))) {
+ Sint i = signed_val(PlusOp1) + signed_val(PlusOp2);
+ if (ERTS_LIKELY(IS_SSMALL(i))) {
+ $Dst = make_small(i);
+ $NEXT0();
+ }
+ }
+ $OUTLINED_ARITH_2($Fail, $Live, mixed_plus, BIF_splus_2, PlusOp1, PlusOp2, $Dst);
+}
+
+i_minus := minus.fetch.execute;
+
+minus.head() {
+ Eterm MinusOp1, MinusOp2;
+}
+
+minus.fetch(Op1, Op2) {
+ MinusOp1 = $Op1;
+ MinusOp2 = $Op2;
+}
+
+minus.execute(Fail, Live, Dst) {
+ if (ERTS_LIKELY(is_both_small(MinusOp1, MinusOp2))) {
+ Sint i = signed_val(MinusOp1) - signed_val(MinusOp2);
+ if (ERTS_LIKELY(IS_SSMALL(i))) {
+ $Dst = make_small(i);
+ $NEXT0();
+ }
+ }
+ $OUTLINED_ARITH_2($Fail, $Live, mixed_minus, BIF_sminus_2, MinusOp1, MinusOp2, $Dst);
+}
+
+i_increment := increment.fetch.execute;
+
+increment.head() {
+ Eterm increment_reg_val;
+}
+
+increment.fetch(Src) {
+ increment_reg_val = $Src;
+}
+
+increment.execute(IncrementVal, Live, Dst) {
+ Eterm increment_val = $IncrementVal;
+ Uint live;
+ Eterm result;
+
+ if (ERTS_LIKELY(is_small(increment_reg_val))) {
+ Sint i = signed_val(increment_reg_val) + increment_val;
+ if (ERTS_LIKELY(IS_SSMALL(i))) {
+ $Dst = make_small(i);
+ $NEXT0();
+ }
+ }
+ live = $Live;
+ HEAVY_SWAPOUT;
+ reg[live] = increment_reg_val;
+ reg[live+1] = make_small(increment_val);
+ result = erts_gc_mixed_plus(c_p, reg, live);
+ HEAVY_SWAPIN;
+ ERTS_HOLE_CHECK(c_p);
+ if (ERTS_LIKELY(is_value(result))) {
+ $REFRESH_GEN_DEST();
+ $Dst = result;
+ $NEXT0();
+ }
+ ASSERT(c_p->freason != BADMATCH || is_value(c_p->fvalue));
+ goto find_func_info;
+}
+
+i_times(Fail, Live, Op1, Op2, Dst) {
+ Eterm op1 = $Op1;
+ Eterm op2 = $Op2;
+ $OUTLINED_ARITH_2($Fail, $Live, mixed_times, BIF_stimes_2, op1, op2, $Dst);
+}
+
+i_m_div(Fail, Live, Op1, Op2, Dst) {
+ Eterm op1 = $Op1;
+ Eterm op2 = $Op2;
+ $OUTLINED_ARITH_2($Fail, $Live, mixed_div, BIF_div_2, op1, op2, $Dst);
+}
+
+i_int_div(Fail, Live, Op1, Op2, Dst) {
+ Eterm op1 = $Op1;
+ Eterm op2 = $Op2;
+ if (ERTS_UNLIKELY(op2 == SMALL_ZERO)) {
+ c_p->freason = BADARITH;
+ $BIF_ERROR_ARITY_2($Fail, BIF_intdiv_2, op1, op2);
+ } else if (ERTS_LIKELY(is_both_small(op1, op2))) {
+ Sint ires = signed_val(op1) / signed_val(op2);
+ if (ERTS_LIKELY(IS_SSMALL(ires))) {
+ $Dst = make_small(ires);
+ $NEXT0();
+ }
+ }
+ $OUTLINED_ARITH_2($Fail, $Live, int_div, BIF_intdiv_2, op1, op2, $Dst);
+}
+
+i_rem := rem.fetch.execute;
+
+rem.head() {
+ Eterm RemOp1, RemOp2;
+}
+
+rem.fetch(Src1, Src2) {
+ RemOp1 = $Src1;
+ RemOp2 = $Src2;
+}
+
+rem.execute(Fail, Live, Dst) {
+ if (ERTS_UNLIKELY(RemOp2 == SMALL_ZERO)) {
+ c_p->freason = BADARITH;
+ $BIF_ERROR_ARITY_2($Fail, BIF_rem_2, RemOp1, RemOp2);
+ } else if (ERTS_LIKELY(is_both_small(RemOp1, RemOp2))) {
+ $Dst = make_small(signed_val(RemOp1) % signed_val(RemOp2));
+ $NEXT0();
+ } else {
+ $OUTLINED_ARITH_2($Fail, $Live, int_rem, BIF_rem_2, RemOp1, RemOp2, $Dst);
+ }
+}
+
+i_band := band.fetch.execute;
+
+band.head() {
+ Eterm BandOp1, BandOp2;
+}
+
+band.fetch(Src1, Src2) {
+ BandOp1 = $Src1;
+ BandOp2 = $Src2;
+}
+
+band.execute(Fail, Live, Dst) {
+ if (ERTS_LIKELY(is_both_small(BandOp1, BandOp2))) {
+ /*
+ * No need to untag -- TAG & TAG == TAG.
+ */
+ $Dst = BandOp1 & BandOp2;
+ $NEXT0();
+ }
+ $OUTLINED_ARITH_2($Fail, $Live, band, BIF_band_2, BandOp1, BandOp2, $Dst);
+}
+
+i_bor(Fail, Live, Src1, Src2, Dst) {
+ if (ERTS_LIKELY(is_both_small($Src1, $Src2))) {
+ /*
+ * No need to untag -- TAG | TAG == TAG.
+ */
+ $Dst = $Src1 | $Src2;
+ $NEXT0();
+ }
+ $OUTLINED_ARITH_2($Fail, $Live, bor, BIF_bor_2, $Src1, $Src2, $Dst);
+}
+
+i_bxor(Fail, Live, Src1, Src2, Dst) {
+ if (ERTS_LIKELY(is_both_small($Src1, $Src2))) {
+ /*
+ * TAG ^ TAG == 0.
+ *
+ * Therefore, we perform the XOR operation on the tagged values,
+ * and OR in the tag bits.
+ */
+ $Dst = ($Src1 ^ $Src2) | make_small(0);
+ $NEXT0();
+ }
+ $OUTLINED_ARITH_2($Fail, $Live, bxor, BIF_bxor_2, $Src1, $Src2, $Dst);
+}
+
+i_bsl := shift.setup_bsl.execute;
+i_bsr := shift.setup_bsr.execute;
+
+shift.head() {
+ Eterm Op1, Op2;
+ Sint shift_left_count;
+}
+
+shift.setup_bsr(Src1, Src2) {
+ Op1 = $Src1;
+ Op2 = $Src2;
+ shift_left_count = 0;
+ if (ERTS_LIKELY(is_small(Op2))) {
+ shift_left_count = -signed_val(Op2);
+ } else if (is_big(Op2)) {
+ /*
+ * N bsr NegativeBigNum == N bsl MAX_SMALL
+ * N bsr PositiveBigNum == N bsl MIN_SMALL
+ */
+ shift_left_count = make_small(bignum_header_is_neg(*big_val(Op2)) ?
+ MAX_SMALL : MIN_SMALL);
+ }
+}
+
+shift.setup_bsl(Src1, Src2) {
+ Op1 = $Src1;
+ Op2 = $Src2;
+ shift_left_count = 0;
+ if (ERTS_LIKELY(is_small(Op2))) {
+ shift_left_count = signed_val(Op2);
+ } else if (is_big(Op2)) {
+ if (bignum_header_is_neg(*big_val(Op2))) {
+ /*
+ * N bsl NegativeBigNum is either 0 or -1, depending on
+ * the sign of N. Since we don't believe this case
+ * is common, do the calculation with the minimum
+ * amount of code.
+ */
+ shift_left_count = MIN_SMALL;
+ } else if (is_integer(Op1)) {
+ /*
+ * N bsl PositiveBigNum is too large to represent.
+ */
+ shift_left_count = MAX_SMALL;
+ }
+ }
+}
+
+shift.execute(Fail, Live, Dst) {
+ Uint big_words_needed;
+
+ if (ERTS_LIKELY(is_small(Op1))) {
+ Sint int_res = signed_val(Op1);
+ if (ERTS_UNLIKELY(shift_left_count == 0 || int_res == 0)) {
+ if (ERTS_UNLIKELY(is_not_integer(Op2))) {
+ goto shift_error;
+ }
+ if (int_res == 0) {
+ $Dst = Op1;
+ $NEXT0();
+ }
+ } else if (shift_left_count < 0) { /* Right shift */
+ Eterm bsr_res;
+ shift_left_count = -shift_left_count;
+ if (shift_left_count >= SMALL_BITS-1) {
+ bsr_res = (int_res < 0) ? SMALL_MINUS_ONE : SMALL_ZERO;
+ } else {
+ bsr_res = make_small(int_res >> shift_left_count);
+ }
+ $Dst = bsr_res;
+ $NEXT0();
+ } else if (shift_left_count < SMALL_BITS-1) { /* Left shift */
+ if ((int_res > 0 &&
+ ((~(Uint)0 << ((SMALL_BITS-1)-shift_left_count)) & int_res) == 0) ||
+ ((~(Uint)0 << ((SMALL_BITS-1)-shift_left_count)) & ~int_res) == 0) {
+ $Dst = make_small(int_res << shift_left_count);
+ $NEXT0();
+ }
+ }
+ big_words_needed = 1; /* big_size(small_to_big(Op1)) */
+ goto big_shift;
+ } else if (is_big(Op1)) {
+ if (shift_left_count == 0) {
+ if (is_not_integer(Op2)) {
+ goto shift_error;
+ }
+ $Dst = Op1;
+ $NEXT0();
+ }
+ big_words_needed = big_size(Op1);
+
+ big_shift:
+ if (shift_left_count > 0) { /* Left shift. */
+ big_words_needed += (shift_left_count / D_EXP);
+ } else { /* Right shift. */
+ if (big_words_needed <= (-shift_left_count / D_EXP)) {
+ big_words_needed = 3; /* ??? */
+ } else {
+ big_words_needed -= (-shift_left_count / D_EXP);
+ }
+ }
+ {
+ Eterm tmp_big[2];
+ Sint big_need_size = BIG_NEED_SIZE(big_words_needed+1);
+
+ /*
+ * Slightly conservative check the size to avoid
+ * allocating huge amounts of memory for bignums that
+ * clearly would overflow the arity in the header
+ * word.
+ */
+ if (big_need_size-8 > BIG_ARITY_MAX) {
+ $SYSTEM_LIMIT($Fail);
+ }
+ $GC_TEST_PRESERVE(big_need_size+1, $Live, Op1);
+ if (is_small(Op1)) {
+ Op1 = small_to_big(signed_val(Op1), tmp_big);
+ }
+ Op1 = big_lshift(Op1, shift_left_count, HTOP);
+ if (is_big(Op1)) {
+ HTOP += bignum_header_arity(*HTOP) + 1;
+ }
+ HEAP_SPACE_VERIFIED(0);
+ if (ERTS_UNLIKELY(is_nil(Op1))) {
+ /*
+ * This result must have been only slighty larger
+ * than allowed since it wasn't caught by the
+ * previous test.
+ */
+ $SYSTEM_LIMIT($Fail);
+ }
+ ERTS_HOLE_CHECK(c_p);
+ $REFRESH_GEN_DEST();
+ $Dst = Op1;
+ $NEXT0();
+ }
+ }
+
+ /*
+ * One or more non-integer arguments.
+ */
+ shift_error:
+ c_p->freason = BADARITH;
+ if ($Fail) {
+ $FAIL($Fail);
+ } else {
+ reg[0] = Op1;
+ reg[1] = Op2;
+ SWAPOUT;
+ if (IsOpCode(I[0], i_bsl_ssjtd)) {
+ I = handle_error(c_p, I, reg, &bif_export[BIF_bsl_2]->info.mfa);
+ } else {
+ ASSERT(IsOpCode(I[0], i_bsr_ssjtd));
+ I = handle_error(c_p, I, reg, &bif_export[BIF_bsr_2]->info.mfa);
+ }
+ goto post_error_handling;
+ }
+}
+
+i_int_bnot(Fail, Src, Live, Dst) {
+ Eterm bnot_val = $Src;
+ if (ERTS_LIKELY(is_small(bnot_val))) {
+ bnot_val = make_small(~signed_val(bnot_val));
+ } else {
+ Uint live = $Live;
+ HEAVY_SWAPOUT;
+ reg[live] = bnot_val;
+ bnot_val = erts_gc_bnot(c_p, reg, live);
+ HEAVY_SWAPIN;
+ ERTS_HOLE_CHECK(c_p);
+ if (ERTS_UNLIKELY(is_nil(bnot_val))) {
+ $BIF_ERROR_ARITY_1($Fail, BIF_bnot_1, reg[live]);
+ }
+ $REFRESH_GEN_DEST();
+ }
+ $Dst = bnot_val;
+}
diff --git a/erts/emulator/beam/atom.c b/erts/emulator/beam/atom.c
index 84d2d5e3ed..bbe1cb3e11 100644
--- a/erts/emulator/beam/atom.c
+++ b/erts/emulator/beam/atom.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -33,20 +34,18 @@
IndexTable erts_atom_table; /* The index table */
-#include "erl_smp.h"
-
-static erts_smp_rwmtx_t atom_table_lock;
+static erts_rwmtx_t atom_table_lock;
-#define atom_read_lock() erts_smp_rwmtx_rlock(&atom_table_lock)
-#define atom_read_unlock() erts_smp_rwmtx_runlock(&atom_table_lock)
-#define atom_write_lock() erts_smp_rwmtx_rwlock(&atom_table_lock)
-#define atom_write_unlock() erts_smp_rwmtx_rwunlock(&atom_table_lock)
+#define atom_read_lock() erts_rwmtx_rlock(&atom_table_lock)
+#define atom_read_unlock() erts_rwmtx_runlock(&atom_table_lock)
+#define atom_write_lock() erts_rwmtx_rwlock(&atom_table_lock)
+#define atom_write_unlock() erts_rwmtx_rwunlock(&atom_table_lock)
#if 0
#define ERTS_ATOM_PUT_OPS_STAT
#endif
#ifdef ERTS_ATOM_PUT_OPS_STAT
-static erts_smp_atomic_t atom_put_ops;
+static erts_atomic_t atom_put_ops;
#endif
/* Functions for allocating space for the ext of atoms. We do not
@@ -67,7 +66,7 @@ static Uint atom_space; /* Amount of atom text space used */
/*
* Print info about atom tables
*/
-void atom_info(int to, void *to_arg)
+void atom_info(fmtfn_t to, void *to_arg)
{
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
@@ -75,7 +74,7 @@ void atom_info(int to, void *to_arg)
index_info(to, to_arg, &erts_atom_table);
#ifdef ERTS_ATOM_PUT_OPS_STAT
erts_print(to, to_arg, "atom_put_ops: %ld\n",
- erts_smp_atomic_read_nob(&atom_put_ops));
+ erts_atomic_read_nob(&atom_put_ops));
#endif
if (lock)
@@ -137,7 +136,7 @@ atom_hash(Atom* obj)
while(len--) {
v = *p++;
/* latin1 clutch for r16 */
- if ((v & 0xFE) == 0xC2 && (*p & 0xC0) == 0x80) {
+ if (len && (v & 0xFE) == 0xC2 && (*p & 0xC0) == 0x80) {
v = (v << 6) | (*p & 0x3F);
p++; len--;
}
@@ -175,7 +174,7 @@ atom_alloc(Atom* tmpl)
/*
* Precompute ordinal value of first 3 bytes + 7 bits.
- * This is used by utils.c:cmp_atoms().
+ * This is used by utils.c:erts_cmp_atoms().
* We cannot use the full 32 bits of the first 4 bytes,
* since we use the sign of the difference between two
* ordinal values to represent their relative order.
@@ -198,7 +197,7 @@ atom_alloc(Atom* tmpl)
static void
atom_free(Atom* obj)
{
- erts_free(ERTS_ALC_T_ATOM, (void*) obj);
+ ASSERT(obj->slot.index == atom_val(am_ErtsSecretAtom));
}
static void latin1_to_utf8(byte* conv_buf, const byte** srcp, int* lenp)
@@ -232,10 +231,10 @@ need_convertion:
}
/*
- * erts_atom_put() may fail. If it fails THE_NON_VALUE is returned!
+ * erts_atom_put_index() may fail. Returns negative indexes for errors.
*/
-Eterm
-erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
+int
+erts_atom_put_index(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
{
byte utf8_copy[MAX_ATOM_SZ_FROM_LATIN1];
const byte *text = name;
@@ -245,14 +244,14 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
int aix;
#ifdef ERTS_ATOM_PUT_OPS_STAT
- erts_smp_atomic_inc_nob(&atom_put_ops);
+ erts_atomic_inc_nob(&atom_put_ops);
#endif
if (tlen < 0) {
if (trunc)
tlen = 0;
else
- return THE_NON_VALUE;
+ return ATOM_MAX_CHARS_ERROR;
}
switch (enc) {
@@ -261,7 +260,7 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
if (trunc)
tlen = MAX_ATOM_CHARACTERS;
else
- return THE_NON_VALUE;
+ return ATOM_MAX_CHARS_ERROR;
}
#ifdef DEBUG
for (aix = 0; aix < len; aix++) {
@@ -275,7 +274,7 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
if (trunc)
tlen = MAX_ATOM_CHARACTERS;
else
- return THE_NON_VALUE;
+ return ATOM_MAX_CHARS_ERROR;
}
no_latin1_chars = tlen;
latin1_to_utf8(utf8_copy, &text, &tlen);
@@ -283,7 +282,7 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
case ERTS_ATOM_ENC_UTF8:
/* First sanity check; need to verify later */
if (tlen > MAX_ATOM_SZ_LIMIT && !trunc)
- return THE_NON_VALUE;
+ return ATOM_MAX_CHARS_ERROR;
break;
}
@@ -294,7 +293,7 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
atom_read_unlock();
if (aix >= 0) {
/* Already in table no need to verify it */
- return make_atom(aix);
+ return aix;
}
if (enc == ERTS_ATOM_ENC_UTF8) {
@@ -313,13 +312,13 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
case ERTS_UTF8_OK_MAX_CHARS:
/* Truncated... */
if (!trunc)
- return THE_NON_VALUE;
+ return ATOM_MAX_CHARS_ERROR;
ASSERT(no_chars == MAX_ATOM_CHARACTERS);
tlen = err_pos - text;
break;
default:
/* Bad utf8... */
- return THE_NON_VALUE;
+ return ATOM_BAD_ENCODING_ERROR;
}
}
@@ -332,7 +331,20 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
atom_write_lock();
aix = index_put(&erts_atom_table, (void*) &a);
atom_write_unlock();
- return make_atom(aix);
+ return aix;
+}
+
+/*
+ * erts_atom_put() may fail. If it fails THE_NON_VALUE is returned!
+ */
+Eterm
+erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
+{
+ int aix = erts_atom_put_index(name, len, enc, trunc);
+ if (aix >= 0)
+ return make_atom(aix);
+ else
+ return THE_NON_VALUE;
}
Eterm
@@ -345,32 +357,24 @@ am_atom_put(const char* name, int len)
int atom_table_size(void)
{
int ret;
-#ifdef ERTS_SMP
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
atom_read_lock();
-#endif
ret = erts_atom_table.entries;
-#ifdef ERTS_SMP
if (lock)
atom_read_unlock();
-#endif
return ret;
}
int atom_table_sz(void)
{
int ret;
-#ifdef ERTS_SMP
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
atom_read_lock();
-#endif
ret = index_table_sz(&erts_atom_table);
-#ifdef ERTS_SMP
if (lock)
atom_read_unlock();
-#endif
return ret;
}
@@ -398,19 +402,15 @@ erts_atom_get(const char *name, int len, Eterm* ap, ErtsAtomEncoding enc)
void
erts_atom_get_text_space_sizes(Uint *reserved, Uint *used)
{
-#ifdef ERTS_SMP
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
atom_read_lock();
-#endif
if (reserved)
*reserved = reserved_atom_space;
if (used)
*used = atom_space;
-#ifdef ERTS_SMP
if (lock)
atom_read_unlock();
-#endif
}
void
@@ -419,21 +419,25 @@ init_atom_table(void)
HashFunctions f;
int i;
Atom a;
- erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
+ erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
- rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
- rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+ rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED;
#ifdef ERTS_ATOM_PUT_OPS_STAT
- erts_smp_atomic_init_nob(&atom_put_ops, 0);
+ erts_atomic_init_nob(&atom_put_ops, 0);
#endif
- erts_smp_rwmtx_init_opt(&atom_table_lock, &rwmtx_opt, "atom_tab");
+ erts_rwmtx_init_opt(&atom_table_lock, &rwmtx_opt, "atom_tab", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
f.hash = (H_FUN) atom_hash;
f.cmp = (HCMP_FUN) atom_cmp;
f.alloc = (HALLOC_FUN) atom_alloc;
f.free = (HFREE_FUN) atom_free;
+ f.meta_alloc = (HMALLOC_FUN) erts_alloc;
+ f.meta_free = (HMFREE_FUN) erts_free;
+ f.meta_print = (HMPRINT_FUN) erts_print;
atom_text_pos = NULL;
atom_text_end = NULL;
@@ -463,10 +467,13 @@ init_atom_table(void)
atom_space -= a.len;
atom_tab(ix)->name = (byte*)erl_atom_names[i];
}
+
+ /* Hide am_ErtsSecretAtom */
+ hash_erase(&erts_atom_table.htable, atom_tab(atom_val(am_ErtsSecretAtom)));
}
void
-dump_atoms(int to, void *to_arg)
+dump_atoms(fmtfn_t to, void *to_arg)
{
int i = erts_atom_table.entries;
@@ -479,3 +486,9 @@ dump_atoms(int to, void *to_arg)
}
}
}
+
+Uint
+erts_get_atom_limit(void)
+{
+ return erts_atom_table.limit;
+}
diff --git a/erts/emulator/beam/atom.h b/erts/emulator/beam/atom.h
index 5904ae0f7e..be998a46bd 100644
--- a/erts/emulator/beam/atom.h
+++ b/erts/emulator/beam/atom.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -20,10 +21,7 @@
#ifndef __ATOM_H__
#define __ATOM_H__
-#ifndef __INDEX_H__
#include "index.h"
-#endif
-
#include "erl_atom_table.h"
#define MAX_ATOM_CHARACTERS 255
@@ -31,6 +29,8 @@
#define MAX_ATOM_SZ_LIMIT (4*MAX_ATOM_CHARACTERS) /* theoretical byte limit */
#define ATOM_LIMIT (1024*1024)
#define MIN_ATOM_TABLE_SIZE 8192
+#define ATOM_BAD_ENCODING_ERROR -1
+#define ATOM_MAX_CHARS_ERROR -2
#ifndef ARCH_32
/* Internal atom cache needs MAX_ATOM_TABLE_SIZE to be less than an
@@ -128,17 +128,18 @@ typedef enum {
(erts_is_atom_utf8_bytes((byte *) LSTR, sizeof(LSTR) - 1, (TERM)))
#define ERTS_DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
#define ERTS_INIT_AM(S) AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
+#define ERTS_MAKE_AM(Str) am_atom_put(Str, sizeof(Str) - 1)
int atom_table_size(void); /* number of elements */
int atom_table_sz(void); /* table size in bytes, excluding stored objects */
Eterm am_atom_put(const char*, int); /* ONLY 7-bit ascii! */
Eterm erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc);
-int atom_erase(byte*, int);
-int atom_static_put(byte*, int);
+int erts_atom_put_index(const byte *name, int len, ErtsAtomEncoding enc, int trunc);
void init_atom_table(void);
-void atom_info(int, void *);
-void dump_atoms(int, void *);
+void atom_info(fmtfn_t, void *);
+void dump_atoms(fmtfn_t, void *);
+Uint erts_get_atom_limit(void);
int erts_atom_get(const char* name, int len, Eterm* ap, ErtsAtomEncoding enc);
void erts_atom_get_text_space_sizes(Uint *reserved, Uint *used);
#endif
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index d28e519ae1..fc55b687d4 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -1,18 +1,19 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1996-2013. All Rights Reserved.
+# Copyright Ericsson AB 1996-2017. All Rights Reserved.
#
-# The contents of this file are subject to the Erlang Public License,
-# Version 1.1, (the "License"); you may not use this file except in
-# compliance with the License. You should have received a copy of the
-# Erlang Public License along with this software. If not, it can be
-# retrieved online at http://www.erlang.org/.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
#
-# Software distributed under the License is distributed on an "AS IS"
-# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-# the License for the specific language governing rights and limitations
-# under the License.
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
#
# %CopyrightEnd%
#
@@ -42,7 +43,7 @@ atom false true
atom Underscore='_'
atom Noname='nonode@nohost'
atom EOT='$end_of_table'
-atom Cookie=''
+atom Empty=''
#
# Used in the Beam emulator loop. (Smaller literals usually means tighter code.)
@@ -58,17 +59,25 @@ atom nocatch
atom undefined_function
atom undefined_lambda
+# Secret internal atom that can never be found by string lookup
+# and should never leak out to be seen by the user.
+atom ErtsSecretAtom='3RT$'
# All other atoms. Try to keep the order alphabetic.
#
atom DOWN='DOWN'
atom UP='UP'
atom EXIT='EXIT'
+atom abort
atom aborted
atom abs_path
atom absoluteURI
atom ac
+atom accessor
atom active
+atom active_tasks
+atom active_tasks_all
+atom alive
atom all
atom all_but_first
atom all_names
@@ -94,31 +103,37 @@ atom args
atom arg0
atom arity
atom asn1
+atom async
atom asynchronous
atom atom
atom atom_used
atom attributes
+atom await_microstate_accounting_modifications
atom await_port_send_result
atom await_proc_exit
+atom await_result
atom await_sched_wall_time_modifications
atom awaiting_load
atom awaiting_unload
atom backtrace backtrace_depth
-atom badarg badarith badarity badfile badmatch badsig badfun
+atom badarg badarith badarity badfile badfun badkey badmap badmatch badsig
atom bag
atom band
atom big
atom bif_return_trap
+atom bif_timer_server
atom binary
atom binary_bin_to_list_trap
atom binary_copy_trap
+atom binary_find_trap
atom binary_longest_prefix_trap
atom binary_longest_suffix_trap
-atom binary_match_trap
-atom binary_matches_trap
+atom binary_to_list_continue
atom binary_to_term_trap
atom block
+atom block_normal
atom blocked
+atom blocked_normal
atom bm
atom bnot
atom bor
@@ -143,18 +158,23 @@ atom catchlevel
atom cd
atom cdr
atom cflags
+atom CHANGE='CHANGE'
atom characters_to_binary_int
atom characters_to_list_int
atom clear
+atom clock_service
atom close
atom closed
atom code
atom command
+atom commandv
atom compact
atom compat_rel
atom compile
+atom complete
atom compressed
atom config_h
+atom convert_time_unit
atom connect
atom connected
atom connection_closed
@@ -163,6 +183,8 @@ atom const
atom context_switches
atom control
atom copy
+atom copy_literals
+atom counters
atom cpu
atom cpu_timestamp
atom cr
@@ -174,17 +196,29 @@ atom current_stacktrace
atom data
atom debug_flags
atom decimals
+atom default
atom delay_trap
atom dexit
atom depth
atom dgroup_leader
atom dictionary
+atom dirty_bif_exception
+atom dirty_bif_result
+atom dirty_bif_trap
+atom dirty_cpu
atom dirty_cpu_schedulers_online
+atom dirty_execution
+atom dirty_io
+atom dirty_nif_exception
+atom dirty_nif_finalizer
atom disable_trace
atom disabled
+atom discard
atom display_items
atom dist
atom dist_cmd
+atom dist_ctrl_put_data
+atom dist_data
atom Div='/'
atom div
atom dlink
@@ -197,9 +231,12 @@ atom dotall
atom driver
atom driver_options
atom dsend
+atom dsend_continue_trap
atom dunlink
atom duplicate_bag
+atom duplicated
atom dupnames
+atom einval
atom elib_malloc
atom emulator
atom enable_trace
@@ -208,26 +245,34 @@ atom endian
atom env
atom eof
atom eol
-atom exception_from
-atom exception_trace
-atom extended
atom Eq='=:='
atom Eqeq='=='
+atom erl_tracer
atom erlang
+atom erl_signal_server
atom ERROR='ERROR'
atom error_handler
atom error_logger
+atom erts_code_purger
+atom erts_debug
atom erts_internal
atom ets
atom ETS_TRANSFER='ETS-TRANSFER'
atom event
atom exact_reductions
+atom exception_from
+atom exception_trace
atom exclusive
atom exit_status
atom existing
+atom existing_processes
+atom existing_ports
+atom existing
atom exiting
atom exports
+atom extended
atom external
+atom extra
atom false
atom fcgi
atom fd
@@ -235,7 +280,7 @@ atom first
atom firstline
atom flags
atom flush
-atom flush_monitor_message
+atom flush_monitor_messages
atom force
atom format_cpu_topology
atom free
@@ -247,7 +292,13 @@ atom functions
atom function_clause
atom garbage_collecting
atom garbage_collection
+atom garbage_collection_info
atom gc_end
+atom gc_major_end
+atom gc_major_start
+atom gc_max_heap_size
+atom gc_minor_end
+atom gc_minor_start
atom gc_start
atom Ge='>='
atom generational
@@ -256,13 +307,17 @@ atom get_seq_token
atom get_tcw
atom getenv
atom gather_gc_info_result
+atom gather_io_bytes
+atom gather_microstate_accounting_result
atom gather_sched_wall_time_result
+atom gather_system_check_result
atom getting_linked
atom getting_unlinked
atom global
atom Gt='>'
atom grun
atom group_leader
+atom handle
atom have_dt_utag
atom heap_block_size
atom heap_size
@@ -278,7 +333,6 @@ atom http httph https http_response http_request http_header http_eoh http_error
atom id
atom if_clause
atom ignore
-atom imports
atom in
atom in_exiting
atom inactive
@@ -288,6 +342,7 @@ atom index
atom infinity
atom info
atom info_msg
+atom init
atom initial_call
atom input
atom internal
@@ -311,10 +366,12 @@ atom ldflags
atom Le='=<'
atom lf
atom line
+atom line_delimiter
atom line_length
atom linked_in_driver
atom links
atom list
+atom list_to_binary_continue
atom little
atom loaded
atom load_cancelled
@@ -325,25 +382,38 @@ atom long_schedule
atom low
atom Lt='<'
atom machine
+atom magic_ref
+atom major
atom match
atom match_limit
atom match_limit_recursion
atom match_spec
+atom match_spec_result
atom max
atom maximum
+atom max_heap_size
atom max_tables max_processes
atom mbuf_size
+atom md5
atom memory
atom memory_internal
atom memory_types
atom message
atom message_binary
+atom message_queue_data
atom message_queue_len
atom messages
+atom merge_trap
atom meta
atom meta_match_spec
+atom micro_seconds
+atom microsecond
+atom microstate_accounting
+atom milli_seconds
+atom millisecond
atom min_heap_size
atom min_bin_vheap_size
+atom minor
atom minor_version
atom Minus='-'
atom module
@@ -352,13 +422,19 @@ atom monitored_by
atom monitor
atom monitor_nodes
atom monitors
+atom monotonic
+atom monotonic_timestamp
atom more
atom multi_scheduling
atom multiline
+atom nano_seconds
+atom nanosecond
atom name
atom named_table
atom namelist
+atom native
atom native_addresses
+atom need_gc
atom Neq='=/='
atom Neqeq='/='
atom net_kernel
@@ -366,6 +442,8 @@ atom net_kernel_terminated
atom never_utf
atom new
atom new_index
+atom new_processes
+atom new_ports
atom new_uniq
atom newline
atom next
@@ -405,10 +483,12 @@ atom notify
atom notsup
atom nouse_stdio
atom objects
+atom off_heap
atom offset
atom ok
atom old_heap_block_size
atom old_heap_size
+atom on_heap
atom on_load
atom open
atom open_error
@@ -419,13 +499,6 @@ atom orelse
atom os_pid
atom os_type
atom os_version
-atom ose_bg_proc
-atom ose_int_proc
-atom ose_phantom
-atom ose_pri_proc
-atom ose_process_prio
-atom ose_process_type
-atom ose_ti_proc
atom out
atom out_exited
atom out_exiting
@@ -440,6 +513,7 @@ atom pause
atom pending
atom pending_driver
atom pending_process
+atom pending_purge_lambda
atom pending_reload
atom permanent
atom pid
@@ -448,6 +522,9 @@ atom ports
atom port_count
atom port_limit
atom port_op
+atom positive
+atom prepare
+atom prepare_on_load
atom print
atom priority
atom private
@@ -492,6 +569,8 @@ atom return_from
atom return_to
atom return_trace
atom run_queue
+atom run_queue_lengths
+atom run_queue_lengths_all
atom runnable
atom runnable_ports
atom runnable_procs
@@ -501,12 +580,18 @@ atom running_procs
atom runtime
atom safe
atom save_calls
-atom scheduler
+atom scheduler
atom scheduler_id
+atom scheduler_wall_time
+atom scheduler_wall_time_all
atom schedulers_online
atom scheme
atom scientific
atom scope
+atom second
+atom seconds
+atom send
+atom send_to_non_existing_process
atom sensitive
atom sequential_tracer
atom sequential_trace_token
@@ -523,11 +608,25 @@ atom set_tcw
atom set_tcw_fake
atom separate
atom shared
+atom sighup
+atom sigterm
+atom sigusr1
+atom sigusr2
+atom sigill
+atom sigchld
+atom sigabrt
+atom sigalrm
+atom sigstop
+atom sigint
+atom sigsegv
+atom sigtstp
+atom sigquit
atom silent
atom size
atom sl_alloc
atom spawn_executable
atom spawn_driver
+atom spawned
atom ssl_tls
atom stack_size
atom start
@@ -536,6 +635,8 @@ atom static
atom stderr_to_stdout
atom stop
atom stream
+atom strict_monotonic
+atom strict_monotonic_timestamp
atom sunrm
atom suspend
atom suspended
@@ -552,17 +653,25 @@ atom term_to_binary_trap
atom this
atom thread_pool_size
atom threads
+atom time_offset
atom timeout
atom timeout_value
atom Times='*'
atom timestamp
atom total
+atom total_active_tasks
+atom total_active_tasks_all
atom total_heap_size
+atom total_run_queue_lengths
+atom total_run_queue_lengths_all
atom tpkt
-atom trace trace_ts traced
+atom trace trace_ts traced
atom trace_control_word
+atom trace_status
atom tracer
atom trap_exit
+atom trim
+atom trim_all
atom try_clause
atom true
atom tuple
@@ -578,6 +687,7 @@ atom use_stdio
atom used
atom utf8
atom unblock
+atom unblock_normal
atom uniq
atom unless_suspending
atom unloaded
@@ -588,11 +698,11 @@ atom value
atom values
atom version
atom visible
+atom wait
atom waiting
atom wall_clock
atom warning
atom warning_msg
-atom scheduler_wall_time
atom wordsize
atom write_concurrency
atom xor
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index df1983a83d..87367b44ab 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -33,17 +34,99 @@
#include "beam_catches.h"
#include "erl_binary.h"
#include "erl_nif.h"
+#include "erl_bits.h"
#include "erl_thr_progress.h"
+#include "erl_nfunc_sched.h"
+#ifdef HIPE
+# include "hipe_bif0.h"
+# define IF_HIPE(X) (X)
+#else
+# define IF_HIPE(X) (0)
+#endif
+
+#ifdef HIPE
+# include "hipe_stack.h"
+#endif
+
+static struct {
+ Eterm module;
+ erts_mtx_t mtx;
+ Export *pending_purge_lambda;
+ Eterm *sprocs;
+ Eterm def_sprocs[10];
+ Uint sp_size;
+ Uint sp_ix;
+ ErlFunEntry **funs;
+ ErlFunEntry *def_funs[10];
+ Uint fe_size;
+ Uint fe_ix;
+ struct erl_module_instance saved_old;
+} purge_state;
+
+Process *erts_code_purger = NULL;
+
+Process *erts_dirty_process_code_checker;
+erts_atomic_t erts_copy_literal_area__;
+#define ERTS_SET_COPY_LITERAL_AREA(LA) \
+ erts_atomic_set_nob(&erts_copy_literal_area__, \
+ (erts_aint_t) (LA))
+Process *erts_literal_area_collector = NULL;
+
+typedef struct ErtsLiteralAreaRef_ ErtsLiteralAreaRef;
+struct ErtsLiteralAreaRef_ {
+ ErtsLiteralAreaRef *next;
+ ErtsLiteralArea *literal_area;
+};
+
+struct {
+ erts_mtx_t mtx;
+ ErtsLiteralAreaRef *first;
+ ErtsLiteralAreaRef *last;
+} release_literal_areas;
static void set_default_trace_pattern(Eterm module);
-static Eterm check_process_code(Process* rp, Module* modp, int allow_gc, int *redsp);
+static Eterm check_process_code(Process* rp, Module* modp, int *redsp, int fcalls);
static void delete_code(Module* modp);
-static void decrement_refc(BeamInstr* code);
-static int is_native(BeamInstr* code);
static int any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
static int any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
+static void
+init_purge_state(void)
+{
+ purge_state.module = THE_NON_VALUE;
+
+ erts_mtx_init(&purge_state.mtx, "purge_state", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+ purge_state.pending_purge_lambda =
+ erts_export_put(am_erts_code_purger, am_pending_purge_lambda, 3);
+
+ purge_state.sprocs = &purge_state.def_sprocs[0];
+ purge_state.sp_size = sizeof(purge_state.def_sprocs);
+ purge_state.sp_size /= sizeof(purge_state.def_sprocs[0]);
+ purge_state.sp_ix = 0;
+
+ purge_state.funs = &purge_state.def_funs[0];
+ purge_state.fe_size = sizeof(purge_state.def_funs);
+ purge_state.fe_size /= sizeof(purge_state.def_funs[0]);
+ purge_state.fe_ix = 0;
+
+ purge_state.saved_old.code_hdr = 0;
+}
+
+void
+erts_beam_bif_load_init(void)
+{
+ erts_mtx_init(&release_literal_areas.mtx, "release_literal_areas", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+
+ release_literal_areas.first = NULL;
+ release_literal_areas.last = NULL;
+ erts_atomic_init_nob(&erts_copy_literal_area__,
+ (erts_aint_t) NULL);
+
+ init_purge_state();
+}
BIF_RETTYPE code_is_module_native_1(BIF_ALIST_1)
{
@@ -59,8 +142,8 @@ BIF_RETTYPE code_is_module_native_1(BIF_ALIST_1)
return am_undefined;
}
erts_rlock_old_code(code_ix);
- res = ((modp->curr.code && is_native(modp->curr.code)) ||
- (modp->old.code != 0 && is_native(modp->old.code))) ?
+ res = (erts_is_module_native(modp->curr.code_hdr) ||
+ erts_is_module_native(modp->old.code_hdr)) ?
am_true : am_false;
erts_runlock_old_code(code_ix);
return res;
@@ -68,40 +151,55 @@ BIF_RETTYPE code_is_module_native_1(BIF_ALIST_1)
BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3)
{
+#if !defined(HIPE)
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+#else
Module* modp;
- Eterm res;
+ Eterm res, mod;
+
+ if (!is_internal_magic_ref(BIF_ARG_1))
+ BIF_ERROR(BIF_P, BADARG);
+
+ mod = erts_module_for_prepared_code(erts_magic_ref2bin(BIF_ARG_1));
+
+ if (is_not_atom(mod))
+ BIF_ERROR(BIF_P, BADARG);
if (!erts_try_seize_code_write_permission(BIF_P)) {
ERTS_BIF_YIELD3(bif_export[BIF_code_make_stub_module_3],
BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
}
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
- modp = erts_get_module(BIF_ARG_1, erts_active_code_ix());
+ modp = erts_get_module(mod, erts_active_code_ix());
if (modp && modp->curr.num_breakpoints > 0) {
- ASSERT(modp->curr.code != NULL);
+ ASSERT(modp->curr.code_hdr != NULL);
erts_clear_module_break(modp);
ASSERT(modp->curr.num_breakpoints == 0);
}
- erts_start_staging_code_ix();
+ erts_start_staging_code_ix(1);
res = erts_make_stub_module(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
- if (res == BIF_ARG_1) {
+ if (res == mod) {
erts_end_staging_code_ix();
erts_commit_staging_code_ix();
+ if (!modp)
+ modp = erts_get_module(mod, erts_active_code_ix());
+ hipe_redirect_to_module(modp);
}
else {
erts_abort_staging_code_ix();
}
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_release_code_write_permission();
return res;
+#endif
}
BIF_RETTYPE
@@ -134,9 +232,26 @@ prepare_loading_2(BIF_ALIST_2)
res = TUPLE2(hp, am_error, reason);
BIF_RET(res);
}
- hp = HAlloc(BIF_P, PROC_BIN_SIZE);
- res = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), magic);
- erts_refc_dec(&magic->refc, 1);
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
+ res = erts_mk_magic_ref(&hp, &MSO(BIF_P), magic);
+ erts_refc_dec(&magic->intern.refc, 1);
+ BIF_RET(res);
+}
+
+BIF_RETTYPE
+has_prepared_code_on_load_1(BIF_ALIST_1)
+{
+ Eterm res;
+
+ if (!is_internal_magic_ref(BIF_ARG_1)) {
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ res = erts_has_code_on_load(erts_magic_ref2bin(BIF_ARG_1));
+ if (res == NIL) {
+ goto error;
+ }
BIF_RET(res);
}
@@ -144,19 +259,17 @@ struct m {
Binary* code;
Eterm module;
Module* modp;
- Uint exception;
+ Eterm exception;
};
-static Eterm staging_epilogue(Process* c_p, int, Eterm res, int, struct m*, int);
-#ifdef ERTS_SMP
+static Eterm staging_epilogue(Process* c_p, int, Eterm res, int, struct m*, int, int);
static void smp_code_ix_commiter(void*);
static struct /* Protected by code_write_permission */
{
Process* stager;
ErtsThrPrgrLaterOp lop;
-}commiter_state;
-#endif
+} committer_state;
static Eterm
exception_list(Process* p, Eterm tag, struct m* mp, Sint exceptions)
@@ -164,14 +277,13 @@ exception_list(Process* p, Eterm tag, struct m* mp, Sint exceptions)
Eterm* hp = HAlloc(p, 3 + 2*exceptions);
Eterm res = NIL;
- mp += exceptions - 1;
while (exceptions > 0) {
- if (mp->exception) {
+ if (is_value(mp->exception)) {
res = CONS(hp, mp->module, res);
hp += 2;
exceptions--;
}
- mp--;
+ mp++;
}
return TUPLE2(hp, tag, res);
}
@@ -180,8 +292,8 @@ exception_list(Process* p, Eterm tag, struct m* mp, Sint exceptions)
BIF_RETTYPE
finish_loading_1(BIF_ALIST_1)
{
- int i;
- int n;
+ Sint i;
+ Sint n;
struct m* p = NULL;
Uint exceptions;
Eterm res;
@@ -202,9 +314,13 @@ finish_loading_1(BIF_ALIST_1)
*/
n = erts_list_length(BIF_ARG_1);
- if (n == -1) {
- ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
- goto done;
+ if (n < 0) {
+ badarg:
+ if (p) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, p);
+ }
+ erts_release_code_write_permission();
+ BIF_ERROR(BIF_P, BADARG);
}
p = erts_alloc(ERTS_ALC_T_LOADER_TMP, n*sizeof(struct m));
@@ -216,32 +332,33 @@ finish_loading_1(BIF_ALIST_1)
for (i = 0; i < n; i++) {
Eterm* cons = list_val(BIF_ARG_1);
Eterm term = CAR(cons);
- ProcBin* pb;
- if (!ERTS_TERM_IS_MAGIC_BINARY(term)) {
- ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
- goto done;
+ if (!is_internal_magic_ref(term)) {
+ goto badarg;
}
- pb = (ProcBin*) binary_val(term);
- p[i].code = pb->val;
+ p[i].code = erts_magic_ref2bin(term);
p[i].module = erts_module_for_prepared_code(p[i].code);
if (p[i].module == NIL) {
- ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
- goto done;
+ goto badarg;
}
BIF_ARG_1 = CDR(cons);
}
/*
* Since we cannot handle atomic loading of a group of modules
- * if one or more of them uses on_load, we will only allow one
- * element in the list. This limitation is intended to be
- * lifted in the future.
+ * if one or more of them uses on_load, we will only allow
+ * more than one element in the list if none of the modules
+ * have an on_load function.
*/
if (n > 1) {
- ERTS_BIF_PREP_ERROR(res, BIF_P, SYSTEM_LIMIT);
- goto done;
+ for (i = 0; i < n; i++) {
+ if (erts_has_code_on_load(p[i].code) == am_true) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, p);
+ erts_release_code_write_permission();
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+ }
+ }
}
/*
@@ -253,18 +370,35 @@ finish_loading_1(BIF_ALIST_1)
*/
res = am_ok;
- erts_start_staging_code_ix();
+ erts_start_staging_code_ix(n);
for (i = 0; i < n; i++) {
p[i].modp = erts_put_module(p[i].module);
+ p[i].modp->seen = 0;
}
+
+ exceptions = 0;
+ for (i = 0; i < n; i++) {
+ p[i].exception = THE_NON_VALUE;
+ if (p[i].modp->seen) {
+ p[i].exception = am_duplicated;
+ exceptions++;
+ }
+ p[i].modp->seen = 1;
+ }
+ if (exceptions) {
+ res = exception_list(BIF_P, am_duplicated, p, exceptions);
+ goto done;
+ }
+
for (i = 0; i < n; i++) {
if (p[i].modp->curr.num_breakpoints > 0 ||
p[i].modp->curr.num_traced_exports > 0 ||
- erts_is_default_trace_enabled()) {
- /* tracing involved, fallback with thread blocking */
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_is_default_trace_enabled() ||
+ IF_HIPE(hipe_need_blocking(p[i].modp))) {
+ /* tracing or hipe need thread blocking */
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
is_blocking = 1;
break;
}
@@ -281,9 +415,9 @@ finish_loading_1(BIF_ALIST_1)
exceptions = 0;
for (i = 0; i < n; i++) {
- p[i].exception = 0;
- if (p[i].modp->curr.code && p[i].modp->old.code) {
- p[i].exception = 1;
+ p[i].exception = THE_NON_VALUE;
+ if (p[i].modp->curr.code_hdr && p[i].modp->old.code_hdr) {
+ p[i].exception = am_not_purged;
exceptions++;
}
}
@@ -300,11 +434,11 @@ finish_loading_1(BIF_ALIST_1)
Eterm mod;
Eterm retval;
- erts_refc_inc(&p[i].code->refc, 1);
+ erts_refc_inc(&p[i].code->intern.refc, 1);
retval = erts_finish_loading(p[i].code, BIF_P, 0, &mod);
ASSERT(retval == NIL || retval == am_on_load);
if (retval == am_on_load) {
- p[i].exception = 1;
+ p[i].exception = am_on_load;
exceptions++;
}
}
@@ -320,46 +454,48 @@ finish_loading_1(BIF_ALIST_1)
}
done:
- return staging_epilogue(BIF_P, do_commit, res, is_blocking, p, n);
+ return staging_epilogue(BIF_P, do_commit, res, is_blocking, p, n, 1);
}
static Eterm
staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking,
- struct m* loaded, int nloaded)
+ struct m* mods, int nmods, int free_mods)
{
-#ifdef ERTS_SMP
if (is_blocking || !commit)
-#endif
{
if (commit) {
+ int i;
erts_end_staging_code_ix();
erts_commit_staging_code_ix();
- if (loaded) {
- int i;
- for (i=0; i < nloaded; i++) {
- set_default_trace_pattern(loaded[i].module);
+
+ for (i=0; i < nmods; i++) {
+ if (mods[i].modp->curr.code_hdr
+ && mods[i].exception != am_on_load) {
+ set_default_trace_pattern(mods[i].module);
}
+ #ifdef HIPE
+ hipe_redirect_to_module(mods[i].modp);
+ #endif
}
}
else {
erts_abort_staging_code_ix();
}
- if (loaded) {
- erts_free(ERTS_ALC_T_LOADER_TMP, loaded);
+ if (free_mods) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, mods);
}
if (is_blocking) {
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
erts_release_code_write_permission();
return res;
}
-#ifdef ERTS_SMP
else {
ASSERT(is_value(res));
- if (loaded) {
- erts_free(ERTS_ALC_T_LOADER_TMP, loaded);
+ if (free_mods) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, mods);
}
erts_end_staging_code_ix();
/*
@@ -368,10 +504,10 @@ staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking,
* schedulers to read active code_ix in a safe way while executing
* without any memory barriers at all.
*/
- ASSERT(commiter_state.stager == NULL);
- commiter_state.stager = c_p;
- erts_schedule_thr_prgr_later_op(smp_code_ix_commiter, NULL, &commiter_state.lop);
- erts_smp_proc_inc_refc(c_p);
+ ASSERT(committer_state.stager == NULL);
+ committer_state.stager = c_p;
+ erts_schedule_thr_prgr_later_op(smp_code_ix_commiter, NULL, &committer_state.lop);
+ erts_proc_inc_refc(c_p);
erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
/*
* smp_code_ix_commiter() will do the rest "later"
@@ -379,28 +515,25 @@ staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking,
*/
ERTS_BIF_YIELD_RETURN(c_p, res);
}
-#endif
}
-#ifdef ERTS_SMP
static void smp_code_ix_commiter(void* null)
{
- Process* p = commiter_state.stager;
+ Process* p = committer_state.stager;
erts_commit_staging_code_ix();
#ifdef DEBUG
- commiter_state.stager = NULL;
+ committer_state.stager = NULL;
#endif
erts_release_code_write_permission();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
if (!ERTS_PROC_IS_EXITING(p)) {
erts_resume(p, ERTS_PROC_LOCK_STATUS);
}
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- erts_smp_proc_dec_refc(p);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_dec_refc(p);
}
-#endif /* ERTS_SMP */
@@ -418,7 +551,7 @@ check_old_code_1(BIF_ALIST_1)
modp = erts_get_module(BIF_ARG_1, code_ix);
if (modp != NULL) {
erts_rlock_old_code(code_ix);
- if (modp->old.code != NULL) {
+ if (modp->old.code_hdr) {
res = am_true;
}
erts_runlock_old_code(code_ix);
@@ -427,7 +560,7 @@ check_old_code_1(BIF_ALIST_1)
}
Eterm
-erts_check_process_code(Process *c_p, Eterm module, int allow_gc, int *redsp)
+erts_check_process_code(Process *c_p, Eterm module, int *redsp, int fcalls)
{
Module* modp;
Eterm res;
@@ -442,58 +575,23 @@ erts_check_process_code(Process *c_p, Eterm module, int allow_gc, int *redsp)
if (!modp)
return am_false;
erts_rlock_old_code(code_ix);
- res = modp->old.code ? check_process_code(c_p, modp, allow_gc, redsp) : am_false;
+ res = (!modp->old.code_hdr
+ ? am_false
+ : check_process_code(c_p, modp, redsp, fcalls));
erts_runlock_old_code(code_ix);
return res;
}
-BIF_RETTYPE erts_internal_check_process_code_2(BIF_ALIST_2)
+BIF_RETTYPE erts_internal_check_process_code_1(BIF_ALIST_1)
{
int reds = 0;
Eterm res;
- Eterm olist = BIF_ARG_2;
- int allow_gc = 1;
if (is_not_atom(BIF_ARG_1))
goto badarg;
- while (is_list(olist)) {
- Eterm *lp = list_val(olist);
- Eterm opt = CAR(lp);
- if (is_tuple(opt)) {
- Eterm* tp = tuple_val(opt);
- switch (arityval(tp[0])) {
- case 2:
- switch (tp[1]) {
- case am_allow_gc:
- switch (tp[2]) {
- case am_false:
- allow_gc = 0;
- break;
- case am_true:
- allow_gc = 1;
- break;
- default:
- goto badarg;
- }
- break;
- default:
- goto badarg;
- }
- break;
- default:
- goto badarg;
- }
- }
- else
- goto badarg;
- olist = CDR(lp);
- }
- if (is_not_nil(olist))
- goto badarg;
-
- res = erts_check_process_code(BIF_P, BIF_ARG_1, allow_gc, &reds);
+ res = erts_check_process_code(BIF_P, BIF_ARG_1, &reds, BIF_P->fcalls);
ASSERT(is_value(res));
@@ -503,6 +601,39 @@ badarg:
BIF_ERROR(BIF_P, BADARG);
}
+BIF_RETTYPE erts_internal_check_dirty_process_code_2(BIF_ALIST_2)
+{
+ Process *rp;
+ int reds = 0;
+ Eterm res;
+
+ if (BIF_P != erts_dirty_process_code_checker)
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+
+ if (is_not_internal_pid(BIF_ARG_1))
+ BIF_ERROR(BIF_P, BADARG);
+
+ if (is_not_atom(BIF_ARG_2))
+ BIF_ERROR(BIF_P, BADARG);
+
+ rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN,
+ BIF_ARG_1, ERTS_PROC_LOCK_MAIN);
+ if (rp == ERTS_PROC_LOCK_BUSY)
+ ERTS_BIF_YIELD2(bif_export[BIF_erts_internal_check_dirty_process_code_2],
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ if (!rp)
+ BIF_RET(am_false);
+
+ res = erts_check_process_code(rp, BIF_ARG_2, &reds, BIF_P->fcalls);
+
+ if (BIF_P != rp)
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+
+ ASSERT(is_value(res));
+
+ BIF_RET2(res, reds);
+}
+
BIF_RETTYPE delete_module_1(BIF_ALIST_1)
{
ErtsCodeIndex code_ix;
@@ -520,25 +651,26 @@ BIF_RETTYPE delete_module_1(BIF_ALIST_1)
}
{
- erts_start_staging_code_ix();
+ erts_start_staging_code_ix(0);
code_ix = erts_staging_code_ix();
modp = erts_get_module(BIF_ARG_1, code_ix);
if (!modp) {
res = am_undefined;
}
- else if (modp->old.code != 0) {
+ else if (modp->old.code_hdr) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp, "Module %T must be purged before loading\n",
+ erts_dsprintf(dsbufp, "Module %T must be purged before deleting\n",
BIF_ARG_1);
erts_send_error_to_logger(BIF_P->group_leader, dsbufp);
ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
}
else {
if (modp->curr.num_breakpoints > 0 ||
- modp->curr.num_traced_exports > 0) {
- /* we have tracing, retry single threaded */
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ modp->curr.num_traced_exports > 0 ||
+ IF_HIPE(hipe_need_blocking(modp))) {
+ /* tracing or hipe need to go single threaded */
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
is_blocking = 1;
if (modp->curr.num_breakpoints) {
erts_clear_module_break(modp);
@@ -550,7 +682,15 @@ BIF_RETTYPE delete_module_1(BIF_ALIST_1)
success = 1;
}
}
- return staging_epilogue(BIF_P, success, res, is_blocking, NULL, 0);
+ {
+ struct m mod;
+ Eterm retval;
+ mod.module = BIF_ARG_1;
+ mod.modp = modp;
+ mod.exception = THE_NON_VALUE;
+ retval = staging_epilogue(BIF_P, success, res, is_blocking, &mod, 1, 0);
+ return retval;
+ }
}
BIF_RETTYPE module_loaded_1(BIF_ALIST_1)
@@ -564,8 +704,8 @@ BIF_RETTYPE module_loaded_1(BIF_ALIST_1)
}
code_ix = erts_active_code_ix();
if ((modp = erts_get_module(BIF_ARG_1, code_ix)) != NULL) {
- if (modp->curr.code != NULL
- && modp->curr.code[MI_ON_LOAD_FUNCTION_PTR] == 0) {
+ if (modp->curr.code_hdr
+ && modp->curr.code_hdr->on_load_function_ptr == NULL) {
res = am_true;
}
}
@@ -612,10 +752,13 @@ BIF_RETTYPE call_on_load_function_1(BIF_ALIST_1)
{
Module* modp = erts_get_module(BIF_ARG_1, erts_active_code_ix());
- if (modp && modp->curr.code) {
- BIF_TRAP_CODE_PTR_0(BIF_P, modp->curr.code[MI_ON_LOAD_FUNCTION_PTR]);
+ if (!modp || !modp->on_load) {
+ BIF_ERROR(BIF_P, BADARG);
}
- else {
+ if (modp->on_load->code_hdr) {
+ BIF_TRAP_CODE_PTR_0(BIF_P,
+ modp->on_load->code_hdr->on_load_function_ptr);
+ } else {
BIF_ERROR(BIF_P, BADARG);
}
}
@@ -624,75 +767,105 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
{
ErtsCodeIndex code_ix;
Module* modp;
- Eterm on_load;
+
+ if (BIF_ARG_2 != am_false && BIF_ARG_2 != am_true) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
if (!erts_try_seize_code_write_permission(BIF_P)) {
ERTS_BIF_YIELD2(bif_export[BIF_finish_after_on_load_2],
BIF_P, BIF_ARG_1, BIF_ARG_2);
}
- /* ToDo: Use code_ix staging instead of thread blocking */
-
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
-
code_ix = erts_active_code_ix();
modp = erts_get_module(BIF_ARG_1, code_ix);
- if (!modp || modp->curr.code == 0) {
- error:
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ if (!modp || !modp->on_load || !modp->on_load->code_hdr
+ || !modp->on_load->code_hdr->on_load_function_ptr) {
+
erts_release_code_write_permission();
BIF_ERROR(BIF_P, BADARG);
}
- if ((on_load = modp->curr.code[MI_ON_LOAD_FUNCTION_PTR]) == 0) {
- goto error;
- }
- if (BIF_ARG_2 != am_false && BIF_ARG_2 != am_true) {
- goto error;
- }
if (BIF_ARG_2 == am_true) {
- int i;
+ struct m mods[1];
+ int is_blocking = 0;
+ int i, num_exps;
+
+ erts_start_staging_code_ix(0);
+ code_ix = erts_staging_code_ix();
+ modp = erts_get_module(BIF_ARG_1, code_ix);
+
+ ASSERT(modp && modp->on_load && modp->on_load->code_hdr
+ && modp->on_load->code_hdr->on_load_function_ptr);
+
+ if (erts_is_default_trace_enabled()
+ || IF_HIPE(hipe_need_blocking(modp))) {
+
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
+ is_blocking = 1;
+ }
+
+ /*
+ * Make the code with the on_load function current.
+ */
+
+ if (modp->curr.code_hdr) {
+ modp->old = modp->curr;
+ }
+ modp->curr = *modp->on_load;
+ erts_free(ERTS_ALC_T_PREPARED_CODE, modp->on_load);
+ modp->on_load = 0;
/*
* The on_load function succeded. Fix up export entries.
*/
- for (i = 0; i < export_list_size(code_ix); i++) {
+ num_exps = export_list_size(code_ix);
+ for (i = 0; i < num_exps; i++) {
Export *ep = export_list(i,code_ix);
- if (ep != NULL &&
- ep->code[0] == BIF_ARG_1 &&
- ep->code[4] != 0) {
- ep->addressv[code_ix] = (void *) ep->code[4];
- ep->code[4] = 0;
+ if (ep == NULL || ep->info.mfa.module != BIF_ARG_1) {
+ continue;
+ }
+ if (ep->beam[1] != 0) {
+ ep->addressv[code_ix] = (void *) ep->beam[1];
+ ep->beam[1] = 0;
+ } else {
+ if (ep->addressv[code_ix] == ep->beam &&
+ BeamIsOpCode(ep->beam[0], op_apply_bif)) {
+ continue;
+ }
+ ep->addressv[code_ix] = ep->beam;
+ ep->beam[0] = BeamOpCodeAddr(op_call_error_handler);
}
}
- modp->curr.code[MI_ON_LOAD_FUNCTION_PTR] = 0;
- set_default_trace_pattern(BIF_ARG_1);
- } else if (BIF_ARG_2 == am_false) {
- BeamInstr* code;
- BeamInstr* end;
+ modp->curr.code_hdr->on_load_function_ptr = NULL;
+
+ mods[0].modp = modp;
+ mods[0].module = BIF_ARG_1;
+ mods[0].exception = THE_NON_VALUE;
+ return staging_epilogue(BIF_P, 1, am_true, is_blocking, mods, 1, 0);
+ }
+ else if (BIF_ARG_2 == am_false) {
+ int i, num_exps;
/*
- * The on_load function failed. Remove the loaded code.
- * This is an combination of delete and purge. We purge
- * the current code; the old code is not touched.
+ * The on_load function failed. Remove references to the
+ * code that is about to be purged from the export entries.
*/
- erts_total_code_size -= modp->curr.code_length;
- code = modp->curr.code;
- end = (BeamInstr *)((char *)code + modp->curr.code_length);
- erts_cleanup_funs_on_purge(code, end);
- beam_catches_delmod(modp->curr.catches, code, modp->curr.code_length,
- erts_active_code_ix());
- erts_free(ERTS_ALC_T_CODE, (void *) code);
- modp->curr.code = NULL;
- modp->curr.code_length = 0;
- modp->curr.catches = BEAM_CATCHES_NIL;
- erts_remove_from_ranges(code);
- }
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+
+ num_exps = export_list_size(code_ix);
+ for (i = 0; i < num_exps; i++) {
+ Export *ep = export_list(i,code_ix);
+ if (ep == NULL || ep->info.mfa.module != BIF_ARG_1) {
+ continue;
+ }
+ if (BeamIsOpCode(ep->beam[0], op_apply_bif)) {
+ continue;
+ }
+ ep->beam[1] = 0;
+ }
+ }
erts_release_code_write_permission();
BIF_RET(am_true);
}
@@ -704,60 +877,267 @@ set_default_trace_pattern(Eterm module)
Binary *match_spec;
Binary *meta_match_spec;
struct trace_pattern_flags trace_pattern_flags;
- Eterm meta_tracer_pid;
+ ErtsTracer meta_tracer;
erts_get_default_trace_pattern(&trace_pattern_is_on,
&match_spec,
&meta_match_spec,
&trace_pattern_flags,
- &meta_tracer_pid);
+ &meta_tracer);
if (trace_pattern_is_on) {
- Eterm mfa[1];
- mfa[0] = module;
- (void) erts_set_trace_pattern(0, mfa, 1,
+ ErtsCodeMFA mfa;
+ mfa.module = module;
+ (void) erts_set_trace_pattern(0, &mfa, 1,
match_spec,
meta_match_spec,
1, trace_pattern_flags,
- meta_tracer_pid, 1);
+ meta_tracer, 1);
}
}
+static Uint hfrag_literal_size(Eterm* start, Eterm* end,
+ char* lit_start, Uint lit_size);
+static void hfrag_literal_copy(Eterm **hpp, ErlOffHeap *ohp,
+ Eterm *start, Eterm *end,
+ char *lit_start, Uint lit_size);
+
+Eterm
+erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed)
+{
+ ErtsLiteralArea *la;
+ ErtsMessage *msgp;
+ struct erl_off_heap_header* oh;
+ char *literals;
+ Uint lit_bsize;
+ ErlHeapFragment *hfrag;
+
+ la = ERTS_COPY_LITERAL_AREA();
+ if (!la)
+ goto return_ok;
+
+ oh = la->off_heap;
+ literals = (char *) &la->start[0];
+ lit_bsize = (char *) la->end - literals;
+
+ /*
+ * If a literal is in the message queue we make an explicit copy of
+ * it and attach it to the heap fragment. Each message needs to be
+ * self contained, we cannot save the literal in the old_heap or
+ * any other heap than the message it self.
+ */
+
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
+ ERTS_MSGQ_MV_INQ2PRIVQ(c_p);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
+
+ for (msgp = c_p->msg.first; msgp; msgp = msgp->next) {
+ ErlHeapFragment *hf;
+ Uint lit_sz = 0;
+
+ *redsp += 1;
+
+ if (msgp->data.attached == ERTS_MSG_COMBINED_HFRAG)
+ hfrag = &msgp->hfrag;
+ else if (is_value(ERL_MESSAGE_TERM(msgp)) && msgp->data.heap_frag)
+ hfrag = msgp->data.heap_frag;
+ else
+ continue; /* Content on heap or in external term format... */
+
+ for (hf = hfrag; hf; hf = hf->next) {
+ lit_sz += hfrag_literal_size(&hf->mem[0], &hf->mem[hf->used_size],
+ literals, lit_bsize);
+ *redsp += 1;
+ }
+
+ *redsp += lit_sz / 16; /* Better value needed... */
+ if (lit_sz > 0) {
+ ErlHeapFragment *bp = new_message_buffer(lit_sz);
+ Eterm *hp = bp->mem;
+
+ for (hf = hfrag; hf; hf = hf->next) {
+ hfrag_literal_copy(&hp, &bp->off_heap,
+ &hf->mem[0], &hf->mem[hf->used_size],
+ literals, lit_bsize);
+ hfrag = hf;
+ }
+
+ /* link new hfrag last */
+ ASSERT(hfrag->next == NULL);
+ hfrag->next = bp;
+ bp->next = NULL;
+ }
+ }
+
+ if (gc_allowed) {
+ /*
+ * Current implementation first tests without
+ * allowing GC, and then restarts the operation
+ * allowing GC if it is needed. It is therfore
+ * very likely that we will need the GC (although
+ * this is not completely certain). We go for
+ * the GC directly instead of scanning everything
+ * one more time...
+ *
+ * Also note that calling functions expect a
+ * major GC to be performed if gc_allowed is set
+ * to true. If you change this, you need to fix
+ * callers...
+ */
+ goto literal_gc;
+ }
+
+ *redsp += 2;
+ if (any_heap_ref_ptrs(&c_p->fvalue, &c_p->fvalue+1, literals, lit_bsize)) {
+ c_p->freason = EXC_NULL;
+ c_p->fvalue = NIL;
+ c_p->ftrace = NIL;
+ }
+
+ if (any_heap_ref_ptrs(c_p->stop, c_p->hend, literals, lit_bsize))
+ goto literal_gc;
+ *redsp += 1;
+#ifdef HIPE
+ if (nstack_any_heap_ref_ptrs(c_p, literals, lit_bsize))
+ goto literal_gc;
+ *redsp += 1;
+#endif
+ if (any_heap_refs(c_p->heap, c_p->htop, literals, lit_bsize))
+ goto literal_gc;
+ *redsp += 1;
+ if (c_p->abandoned_heap) {
+ if (any_heap_refs(c_p->abandoned_heap, c_p->abandoned_heap + c_p->heap_sz,
+ literals, lit_bsize))
+ goto literal_gc;
+ *redsp += 1;
+ }
+ if (any_heap_refs(c_p->old_heap, c_p->old_htop, literals, lit_bsize))
+ goto literal_gc;
+
+ /* Check dictionary */
+ *redsp += 1;
+ if (c_p->dictionary) {
+ Eterm* start = ERTS_PD_START(c_p->dictionary);
+ Eterm* end = start + ERTS_PD_SIZE(c_p->dictionary);
+
+ if (any_heap_ref_ptrs(start, end, literals, lit_bsize))
+ goto literal_gc;
+ }
+
+ /* Check heap fragments */
+ for (hfrag = c_p->mbuf; hfrag; hfrag = hfrag->next) {
+ Eterm *hp, *hp_end;
+
+ *redsp += 1;
+
+ hp = &hfrag->mem[0];
+ hp_end = &hfrag->mem[hfrag->used_size];
+ if (any_heap_refs(hp, hp_end, literals, lit_bsize))
+ goto literal_gc;
+ }
+
+ /*
+ * Message buffer fragments (matched messages)
+ * - off heap lists should already have been moved into
+ * process off heap structure.
+ * - Check for literals
+ */
+ for (msgp = c_p->msg_frag; msgp; msgp = msgp->next) {
+ hfrag = erts_message_to_heap_frag(msgp);
+ for (; hfrag; hfrag = hfrag->next) {
+ Eterm *hp, *hp_end;
+
+ *redsp += 1;
+
+ hp = &hfrag->mem[0];
+ hp_end = &hfrag->mem[hfrag->used_size];
+
+ if (any_heap_refs(hp, hp_end, literals, lit_bsize))
+ goto literal_gc;
+ }
+ }
+
+return_ok:
+
+ if (ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p)))
+ c_p->flags &= ~F_DIRTY_CLA;
+
+ return am_ok;
+
+literal_gc:
+
+ if (!gc_allowed)
+ return am_need_gc;
+
+ if (c_p->flags & F_DISABLE_GC)
+ return THE_NON_VALUE;
+
+ *redsp += erts_garbage_collect_literals(c_p, (Eterm *) literals, lit_bsize,
+ oh, fcalls);
+
+ if (c_p->flags & F_DIRTY_CLA)
+ return THE_NON_VALUE;
+
+ return am_ok;
+}
+
static Eterm
-check_process_code(Process* rp, Module* modp, int allow_gc, int *redsp)
+check_process_code(Process* rp, Module* modp, int *redsp, int fcalls)
{
BeamInstr* start;
char* mod_start;
Uint mod_size;
- BeamInstr* end;
Eterm* sp;
- struct erl_off_heap_header* oh;
- int done_gc = 0;
+#ifdef HIPE
+ void *nat_start = NULL;
+ Uint nat_size = 0;
+#endif
-#define INSIDE(a) (start <= (a) && (a) < end)
+ *redsp += 1;
/*
* Pick up limits for the module.
*/
- start = modp->old.code;
- end = (BeamInstr *)((char *)start + modp->old.code_length);
+ start = (BeamInstr*) modp->old.code_hdr;
mod_start = (char *) start;
mod_size = modp->old.code_length;
/*
* Check if current instruction or continuation pointer points into module.
*/
- if (INSIDE(rp->i) || INSIDE(rp->cp)) {
+ if (ErtsInArea(rp->i, mod_start, mod_size)
+ || ErtsInArea(rp->cp, mod_start, mod_size)) {
return am_true;
}
+
+ *redsp += 1;
+
+ if (erts_check_nif_export_in_area(rp, mod_start, mod_size))
+ return am_true;
+
+ *redsp += (STACK_START(rp) - rp->stop) / 32;
/*
* Check all continuation pointers stored on the stack.
*/
for (sp = rp->stop; sp < STACK_START(rp); sp++) {
- if (is_CP(*sp) && INSIDE(cp_val(*sp))) {
+ if (is_CP(*sp) && ErtsInArea(cp_val(*sp), mod_start, mod_size)) {
+ return am_true;
+ }
+ }
+
+#ifdef HIPE
+ /*
+ * Check all continuation pointers stored on the native stack if the module
+ * has native code.
+ */
+ if (modp->old.hipe_code) {
+ nat_start = modp->old.hipe_code->text_segment;
+ nat_size = modp->old.hipe_code->text_segment_size;
+ if (nat_size && nstack_any_cps_in_segment(rp, nat_start, nat_size)) {
return am_true;
}
}
+#endif
/*
* Check all continuation pointers stored in stackdump
@@ -768,15 +1148,23 @@ check_process_code(Process* rp, Module* modp, int allow_gc, int *redsp)
struct StackTrace *s;
ASSERT(is_list(rp->ftrace));
s = (struct StackTrace *) big_val(CDR(list_val(rp->ftrace)));
- if ((s->pc && INSIDE(s->pc)) ||
- (s->current && INSIDE(s->current))) {
+ if ((s->pc && ErtsInArea(s->pc, mod_start, mod_size)) ||
+ (s->current && ErtsInArea(s->current, mod_start, mod_size))) {
rp->freason = EXC_NULL;
rp->fvalue = NIL;
rp->ftrace = NIL;
} else {
int i;
+ char *area_start = mod_start;
+ Uint area_size = mod_size;
+#ifdef HIPE
+ if (rp->freason & EXF_NATIVE) {
+ area_start = nat_start;
+ area_size = nat_size;
+ }
+#endif
for (i = 0; i < s->depth; i++) {
- if (INSIDE(s->trace[i])) {
+ if (ErtsInArea(s->trace[i], area_start, area_size)) {
rp->freason = EXC_NULL;
rp->fvalue = NIL;
rp->ftrace = NIL;
@@ -786,123 +1174,9 @@ check_process_code(Process* rp, Module* modp, int allow_gc, int *redsp)
}
}
- if (rp->flags & F_DISABLE_GC) {
- /*
- * Cannot proceed. Process has disabled gc in order to
- * safely leave inconsistent data on the heap and/or
- * off heap lists. Need to wait for gc to be enabled
- * again.
- */
- return THE_NON_VALUE;
- }
-
- /*
- * See if there are funs that refer to the old version of the module.
- */
-
- rescan:
- for (oh = MSO(rp).first; oh; oh = oh->next) {
- if (thing_subtag(oh->thing_word) == FUN_SUBTAG) {
- ErlFunThing* funp = (ErlFunThing*) oh;
-
- if (INSIDE((BeamInstr *) funp->fe->address)) {
- if (done_gc) {
- return am_true;
- } else {
- if (!allow_gc)
- return am_aborted;
- /*
- * Try to get rid of this fun by garbage collecting.
- * Clear both fvalue and ftrace to make sure they
- * don't hold any funs.
- */
- rp->freason = EXC_NULL;
- rp->fvalue = NIL;
- rp->ftrace = NIL;
- done_gc = 1;
- FLAGS(rp) |= F_NEED_FULLSWEEP;
- *redsp += erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
- goto rescan;
- }
- }
- }
- }
-
- /*
- * See if there are constants inside the module referenced by the process.
- */
- done_gc = 0;
- for (;;) {
- ErlMessage* mp;
-
- if (any_heap_ref_ptrs(&rp->fvalue, &rp->fvalue+1, mod_start, mod_size)) {
- rp->freason = EXC_NULL;
- rp->fvalue = NIL;
- rp->ftrace = NIL;
- }
- if (any_heap_ref_ptrs(rp->stop, rp->hend, mod_start, mod_size)) {
- goto need_gc;
- }
- if (any_heap_refs(rp->heap, rp->htop, mod_start, mod_size)) {
- goto need_gc;
- }
-
- if (any_heap_refs(rp->old_heap, rp->old_htop, mod_start, mod_size)) {
- goto need_gc;
- }
-
- if (rp->dictionary != NULL) {
- Eterm* start = rp->dictionary->data;
- Eterm* end = start + rp->dictionary->used;
-
- if (any_heap_ref_ptrs(start, end, mod_start, mod_size)) {
- goto need_gc;
- }
- }
-
- for (mp = rp->msg.first; mp != NULL; mp = mp->next) {
- if (any_heap_ref_ptrs(mp->m, mp->m+2, mod_start, mod_size)) {
- goto need_gc;
- }
- }
- break;
-
- need_gc:
- if (done_gc) {
- return am_true;
- } else {
- Eterm* literals;
- Uint lit_size;
- struct erl_off_heap_header* oh;
-
- if (!allow_gc)
- return am_aborted;
-
- /*
- * Try to get rid of constants by by garbage collecting.
- * Clear both fvalue and ftrace.
- */
- rp->freason = EXC_NULL;
- rp->fvalue = NIL;
- rp->ftrace = NIL;
- done_gc = 1;
- FLAGS(rp) |= F_NEED_FULLSWEEP;
- *redsp += erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
- literals = (Eterm *) modp->old.code[MI_LITERALS_START];
- lit_size = (Eterm *) modp->old.code[MI_LITERALS_END] - literals;
- oh = (struct erl_off_heap_header *)
- modp->old.code[MI_LITERALS_OFF_HEAP];
- *redsp += lit_size / 10; /* Need, better value... */
- erts_garbage_collect_literals(rp, literals, lit_size, oh);
- }
- }
return am_false;
-#undef INSIDE
}
-#define in_area(ptr,start,nbytes) \
- ((UWord)((char*)(ptr) - (char*)(start)) < (nbytes))
-
static int
any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size)
{
@@ -914,7 +1188,7 @@ any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size)
switch (primary_tag(val)) {
case TAG_PRIMARY_BOXED:
case TAG_PRIMARY_LIST:
- if (in_area(EXPAND_POINTER(val), mod_start, mod_size)) {
+ if (ErtsInArea(val, mod_start, mod_size)) {
return 1;
}
break;
@@ -934,13 +1208,21 @@ any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size)
switch (primary_tag(val)) {
case TAG_PRIMARY_BOXED:
case TAG_PRIMARY_LIST:
- if (in_area(EXPAND_POINTER(val), mod_start, mod_size)) {
+ if (ErtsInArea(val, mod_start, mod_size)) {
return 1;
}
break;
case TAG_PRIMARY_HEADER:
if (!header_is_transparent(val)) {
- Eterm* new_p = p + thing_arityval(val);
+ Eterm* new_p;
+ if (header_is_bin_matchstate(val)) {
+ ErlBinMatchState *ms = (ErlBinMatchState*) p;
+ ErlBinMatchBuffer *mb = &(ms->mb);
+ if (ErtsInArea(mb->orig, mod_start, mod_size)) {
+ return 1;
+ }
+ }
+ new_p = p + thing_arityval(val);
ASSERT(start <= new_p && new_p < end);
p = new_p;
}
@@ -949,100 +1231,532 @@ any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size)
return 0;
}
-#undef in_area
+static Uint
+hfrag_literal_size(Eterm* start, Eterm* end, char* lit_start, Uint lit_size)
+{
+ Eterm* p;
+ Eterm val;
+ Uint sz = 0;
-BIF_RETTYPE purge_module_1(BIF_ALIST_1)
+ for (p = start; p < end; p++) {
+ val = *p;
+ switch (primary_tag(val)) {
+ case TAG_PRIMARY_BOXED:
+ case TAG_PRIMARY_LIST:
+ if (ErtsInArea(val, lit_start, lit_size)) {
+ sz += size_object(val);
+ }
+ break;
+ case TAG_PRIMARY_HEADER:
+ if (!header_is_transparent(val)) {
+ Eterm* new_p;
+ if (header_is_bin_matchstate(val)) {
+ ErlBinMatchState *ms = (ErlBinMatchState*) p;
+ ErlBinMatchBuffer *mb = &(ms->mb);
+ if (ErtsInArea(mb->orig, lit_start, lit_size)) {
+ sz += size_object(mb->orig);
+ }
+ }
+ new_p = p + thing_arityval(val);
+ ASSERT(start <= new_p && new_p < end);
+ p = new_p;
+ }
+ }
+ }
+ return sz;
+}
+
+static void
+hfrag_literal_copy(Eterm **hpp, ErlOffHeap *ohp,
+ Eterm *start, Eterm *end,
+ char *lit_start, Uint lit_size) {
+ Eterm* p;
+ Eterm val;
+ Uint sz;
+
+ for (p = start; p < end; p++) {
+ val = *p;
+ switch (primary_tag(val)) {
+ case TAG_PRIMARY_BOXED:
+ case TAG_PRIMARY_LIST:
+ if (ErtsInArea(val, lit_start, lit_size)) {
+ sz = size_object(val);
+ val = copy_struct(val, sz, hpp, ohp);
+ *p = val;
+ }
+ break;
+ case TAG_PRIMARY_HEADER:
+ if (!header_is_transparent(val)) {
+ Eterm* new_p;
+ /* matchstate in message, not possible. */
+ if (header_is_bin_matchstate(val)) {
+ ErlBinMatchState *ms = (ErlBinMatchState*) p;
+ ErlBinMatchBuffer *mb = &(ms->mb);
+ if (ErtsInArea(mb->orig, lit_start, lit_size)) {
+ sz = size_object(mb->orig);
+ mb->orig = copy_struct(mb->orig, sz, hpp, ohp);
+ }
+ }
+ new_p = p + thing_arityval(val);
+ ASSERT(start <= new_p && new_p < end);
+ p = new_p;
+ }
+ }
+ }
+}
+
+
+ErtsThrPrgrLaterOp later_literal_area_switch;
+
+typedef struct {
+ ErtsThrPrgrLaterOp lop;
+ ErtsLiteralArea *la;
+} ErtsLaterReleasLiteralArea;
+
+static void
+later_release_literal_area(void *vlrlap)
{
- ErtsCodeIndex code_ix;
- BeamInstr* code;
- BeamInstr* end;
- Module* modp;
- int is_blocking = 0;
- Eterm ret;
+ ErtsLaterReleasLiteralArea *lrlap;
+ lrlap = (ErtsLaterReleasLiteralArea *) vlrlap;
+ erts_release_literal_area(lrlap->la);
+ erts_free(ERTS_ALC_T_RELEASE_LAREA, vlrlap);
+}
- if (is_not_atom(BIF_ARG_1)) {
- BIF_ERROR(BIF_P, BADARG);
+static void
+complete_literal_area_switch(void *literal_area)
+{
+ Process *p = erts_literal_area_collector;
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ erts_resume(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ if (literal_area)
+ erts_release_literal_area((ErtsLiteralArea *) literal_area);
+}
+
+BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0)
+{
+ ErtsLiteralArea *unused_la;
+ ErtsLiteralAreaRef *la_ref;
+
+ if (BIF_P != erts_literal_area_collector)
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+
+ erts_mtx_lock(&release_literal_areas.mtx);
+
+ la_ref = release_literal_areas.first;
+ if (la_ref) {
+ release_literal_areas.first = la_ref->next;
+ if (!release_literal_areas.first)
+ release_literal_areas.last = NULL;
}
- if (!erts_try_seize_code_write_permission(BIF_P)) {
- ERTS_BIF_YIELD1(bif_export[BIF_purge_module_1], BIF_P, BIF_ARG_1);
+ erts_mtx_unlock(&release_literal_areas.mtx);
+
+ unused_la = ERTS_COPY_LITERAL_AREA();
+
+ if (!la_ref) {
+ ERTS_SET_COPY_LITERAL_AREA(NULL);
+ if (unused_la) {
+ ErtsLaterReleasLiteralArea *lrlap;
+ lrlap = erts_alloc(ERTS_ALC_T_RELEASE_LAREA,
+ sizeof(ErtsLaterReleasLiteralArea));
+ lrlap->la = unused_la;
+ erts_schedule_thr_prgr_later_cleanup_op(
+ later_release_literal_area,
+ (void *) lrlap,
+ &lrlap->lop,
+ (sizeof(ErtsLaterReleasLiteralArea)
+ + sizeof(ErtsLiteralArea)
+ + ((unused_la->end
+ - &unused_la->start[0])
+ - 1)*(sizeof(Eterm))));
+ }
+ BIF_RET(am_false);
}
- code_ix = erts_active_code_ix();
+ ERTS_SET_COPY_LITERAL_AREA(la_ref->literal_area);
+
+ erts_free(ERTS_ALC_T_LITERAL_REF, la_ref);
+
+ erts_schedule_thr_prgr_later_op(complete_literal_area_switch,
+ unused_la,
+ &later_literal_area_switch);
+ erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
+
+}
+
+void
+erts_purge_state_add_fun(ErlFunEntry *fe)
+{
+ ASSERT(is_value(purge_state.module));
+ if (purge_state.fe_ix >= purge_state.fe_size) {
+ ErlFunEntry **funs;
+ purge_state.fe_size += 100;
+ funs = erts_alloc(ERTS_ALC_T_PURGE_DATA,
+ sizeof(ErlFunEntry *)*purge_state.fe_size);
+ sys_memcpy((void *) funs,
+ (void *) purge_state.funs,
+ purge_state.fe_ix*sizeof(ErlFunEntry *));
+ if (purge_state.funs != &purge_state.def_funs[0])
+ erts_free(ERTS_ALC_T_PURGE_DATA, purge_state.funs);
+ purge_state.funs = funs;
+ }
+ purge_state.funs[purge_state.fe_ix++] = fe;
+}
+
+Export *
+erts_suspend_process_on_pending_purge_lambda(Process *c_p, ErlFunEntry* fe)
+{
+ erts_mtx_lock(&purge_state.mtx);
+ if (purge_state.module == fe->module) {
+ /*
+ * The process c_p is about to call a fun in the code
+ * that we are trying to purge. Suspend it and call
+ * erts_code_purger:pending_purge_lambda/3. The process
+ * will be resumed when the purge completes or aborts,
+ * and will then try to do the call again.
+ */
+ if (purge_state.sp_ix >= purge_state.sp_size) {
+ Eterm *sprocs;
+ purge_state.sp_size += 100;
+ sprocs = erts_alloc(ERTS_ALC_T_PURGE_DATA,
+ (sizeof(ErlFunEntry *)
+ * purge_state.sp_size));
+ sys_memcpy((void *) sprocs,
+ (void *) purge_state.sprocs,
+ purge_state.sp_ix*sizeof(ErlFunEntry *));
+ if (purge_state.sprocs != &purge_state.def_sprocs[0])
+ erts_free(ERTS_ALC_T_PURGE_DATA, purge_state.sprocs);
+ purge_state.sprocs = sprocs;
+ }
+ purge_state.sprocs[purge_state.sp_ix++] = c_p->common.id;
+ erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
+ ERTS_VBUMP_ALL_REDS(c_p);
+ }
+ erts_mtx_unlock(&purge_state.mtx);
+ return purge_state.pending_purge_lambda;
+}
+
+static void
+finalize_purge_operation(Process *c_p, int succeded)
+{
+ Uint ix;
+
+ if (c_p)
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ erts_mtx_lock(&purge_state.mtx);
+
+ ASSERT(purge_state.module != THE_NON_VALUE);
+
+ purge_state.module = THE_NON_VALUE;
/*
- * Correct module?
+ * Resume all processes that have tried to call
+ * funs in this code.
*/
+ for (ix = 0; ix < purge_state.sp_ix; ix++) {
+ Process *rp = erts_pid2proc(NULL, 0,
+ purge_state.sprocs[ix],
+ ERTS_PROC_LOCK_STATUS);
+ if (rp) {
+ erts_resume(rp, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ }
+ }
- if ((modp = erts_get_module(BIF_ARG_1, code_ix)) == NULL) {
- ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ erts_mtx_unlock(&purge_state.mtx);
+
+ if (c_p)
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ if (purge_state.sprocs != &purge_state.def_sprocs[0]) {
+ erts_free(ERTS_ALC_T_PURGE_DATA, purge_state.sprocs);
+ purge_state.sprocs = &purge_state.def_sprocs[0];
+ purge_state.sp_size = sizeof(purge_state.def_sprocs);
+ purge_state.sp_size /= sizeof(purge_state.def_sprocs[0]);
}
- else {
- erts_rwlock_old_code(code_ix);
+ purge_state.sp_ix = 0;
+ if (purge_state.funs != &purge_state.def_funs[0]) {
+ erts_free(ERTS_ALC_T_PURGE_DATA, purge_state.funs);
+ purge_state.funs = &purge_state.def_funs[0];
+ purge_state.fe_size = sizeof(purge_state.def_funs);
+ purge_state.fe_size /= sizeof(purge_state.def_funs[0]);
+ }
+ purge_state.fe_ix = 0;
+}
+
+
+static ErtsThrPrgrLaterOp purger_lop_data;
+
+static void
+resume_purger(void *unused)
+{
+ Process *p = erts_code_purger;
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ erts_resume(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+}
+
+static void
+finalize_purge_abort(void *unused)
+{
+ erts_fun_purge_abort_finalize(purge_state.funs, purge_state.fe_ix);
+
+ finalize_purge_operation(NULL, 0);
+
+ resume_purger(NULL);
+}
+
+
+BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
+{
+ if (BIF_P != erts_code_purger)
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+
+ if (is_not_atom(BIF_ARG_1))
+ BIF_ERROR(BIF_P, BADARG);
+
+ switch (BIF_ARG_2) {
+
+ case am_prepare:
+ case am_prepare_on_load: {
/*
- * Any code to purge?
+ * Prepare for purge by marking all fun
+ * entries referring to the code to purge
+ * with "pending purge" markers.
*/
- if (modp->old.code == 0) {
- ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
- }
+ ErtsCodeIndex code_ix;
+ Module* modp;
+ Eterm res;
+
+ if (is_value(purge_state.module))
+ BIF_ERROR(BIF_P, BADARG);
+
+ code_ix = erts_active_code_ix();
+
+ /*
+ * Correct module?
+ */
+ modp = erts_get_module(BIF_ARG_1, code_ix);
+ if (!modp)
+ res = am_false;
else {
/*
- * Unload any NIF library
+ * Any code to purge?
*/
- if (modp->old.nif != NULL) {
- /* ToDo: Do unload nif without blocking */
- erts_rwunlock_old_code(code_ix);
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
- is_blocking = 1;
- erts_rwlock_old_code(code_ix);
- erts_unload_nif(modp->old.nif);
- modp->old.nif = NULL;
+
+ if (BIF_ARG_2 == am_prepare_on_load) {
+ erts_rwlock_old_code(code_ix);
+ } else {
+ erts_rlock_old_code(code_ix);
}
+ if (BIF_ARG_2 == am_prepare_on_load) {
+ ASSERT(modp->on_load);
+ ASSERT(modp->on_load->code_hdr);
+ purge_state.saved_old = modp->old;
+ modp->old = *modp->on_load;
+ erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) modp->on_load);
+ modp->on_load = 0;
+ }
+
+ if (!modp->old.code_hdr)
+ res = am_false;
+ else {
+ BeamInstr* code;
+ BeamInstr* end;
+ erts_mtx_lock(&purge_state.mtx);
+ purge_state.module = BIF_ARG_1;
+ erts_mtx_unlock(&purge_state.mtx);
+ res = am_true;
+ code = (BeamInstr*) modp->old.code_hdr;
+ end = (BeamInstr *)((char *)code + modp->old.code_length);
+ erts_fun_purge_prepare(code, end);
+ }
+
+ if (BIF_ARG_2 == am_prepare_on_load) {
+ erts_rwunlock_old_code(code_ix);
+ } else {
+ erts_runlock_old_code(code_ix);
+ }
+ }
+
+ if (res != am_true)
+ BIF_RET(res);
+ else {
/*
- * Remove the old code.
+ * We'll be resumed when all schedulers are guaranteed
+ * to see the "pending purge" markers that we've made on
+ * all fun entries of the code that we are about to purge.
+ * Processes trying to call these funs will be suspended
+ * before calling the funs. That is we are guaranteed not
+ * to get any more direct references into the code while
+ * checking for such references...
*/
- ASSERT(erts_total_code_size >= modp->old.code_length);
- erts_total_code_size -= modp->old.code_length;
- code = modp->old.code;
- end = (BeamInstr *)((char *)code + modp->old.code_length);
- erts_cleanup_funs_on_purge(code, end);
- beam_catches_delmod(modp->old.catches, code, modp->old.code_length,
- code_ix);
- decrement_refc(code);
- erts_free(ERTS_ALC_T_CODE, (void *) code);
- modp->old.code = NULL;
- modp->old.code_length = 0;
- modp->old.catches = BEAM_CATCHES_NIL;
- erts_remove_from_ranges(code);
- ERTS_BIF_PREP_RET(ret, am_true);
+ erts_schedule_thr_prgr_later_op(resume_purger,
+ NULL,
+ &purger_lop_data);
+ erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
}
- erts_rwunlock_old_code(code_ix);
}
- if (is_blocking) {
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+
+ case am_abort: {
+ /*
+ * Soft purge that detected direct references into the code
+ * we set out to purge. Abort the purge.
+ */
+
+ if (purge_state.module != BIF_ARG_1)
+ BIF_ERROR(BIF_P, BADARG);
+
+ erts_fun_purge_abort_prepare(purge_state.funs, purge_state.fe_ix);
+
+ /*
+ * We need to restore the code addresses of the funs in
+ * two stages in order to ensure that we do not get any
+ * stale suspended processes due to the purge abort.
+ * Restore address pointer (erts_fun_purge_abort_prepare);
+ * wait for thread progress; clear pending purge address
+ * pointer (erts_fun_purge_abort_finalize), and then
+ * resume processes that got suspended
+ * (finalize_purge_operation).
+ */
+ erts_schedule_thr_prgr_later_op(finalize_purge_abort,
+ NULL,
+ &purger_lop_data);
+ erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_false);
}
- erts_release_code_write_permission();
- return ret;
-}
-static void
-decrement_refc(BeamInstr* code)
-{
- struct erl_off_heap_header* oh =
- (struct erl_off_heap_header *) code[MI_LITERALS_OFF_HEAP];
-
- while (oh) {
- Binary* bptr;
- ASSERT(thing_subtag(oh->thing_word) == REFC_BINARY_SUBTAG);
- bptr = ((ProcBin*)oh)->val;
- if (erts_refc_dectest(&bptr->refc, 0) == 0) {
- erts_bin_free(bptr);
+ case am_complete: {
+ ErtsCodeIndex code_ix;
+ BeamInstr* code;
+ Module* modp;
+ int is_blocking = 0;
+ Eterm ret;
+ ErtsLiteralArea *literals = NULL;
+
+
+ /*
+ * We have no direct references into the code.
+ * Complete to purge.
+ */
+
+ if (purge_state.module != BIF_ARG_1)
+ BIF_ERROR(BIF_P, BADARG);
+
+ if (!erts_try_seize_code_write_permission(BIF_P)) {
+ ERTS_BIF_YIELD2(bif_export[BIF_erts_internal_purge_module_2],
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ }
+
+ code_ix = erts_active_code_ix();
+
+ /*
+ * Correct module?
+ */
+
+ if ((modp = erts_get_module(BIF_ARG_1, code_ix)) == NULL) {
+ ERTS_BIF_PREP_RET(ret, am_false);
+ }
+ else {
+
+ erts_rwlock_old_code(code_ix);
+
+ /*
+ * Any code to purge?
+ */
+ if (!modp->old.code_hdr) {
+ ERTS_BIF_PREP_RET(ret, am_false);
+ }
+ else {
+ /*
+ * Unload any NIF library
+ */
+ if (modp->old.nif != NULL
+ || IF_HIPE(hipe_purge_need_blocking(modp))) {
+ /* ToDo: Do unload nif without blocking */
+ erts_rwunlock_old_code(code_ix);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
+ is_blocking = 1;
+ erts_rwlock_old_code(code_ix);
+ if (modp->old.nif) {
+ erts_unload_nif(modp->old.nif);
+ modp->old.nif = NULL;
+ }
+ }
+
+ /*
+ * Remove the old code.
+ */
+ ASSERT(erts_total_code_size >= modp->old.code_length);
+ erts_total_code_size -= modp->old.code_length;
+ code = (BeamInstr*) modp->old.code_hdr;
+ erts_fun_purge_complete(purge_state.funs, purge_state.fe_ix);
+ beam_catches_delmod(modp->old.catches, code, modp->old.code_length,
+ code_ix);
+ literals = modp->old.code_hdr->literal_area;
+ modp->old.code_hdr->literal_area = NULL;
+ erts_free(ERTS_ALC_T_CODE, (void *) code);
+ modp->old.code_hdr = NULL;
+ modp->old.code_length = 0;
+ modp->old.catches = BEAM_CATCHES_NIL;
+ erts_remove_from_ranges(code);
+#ifdef HIPE
+ hipe_purge_module(modp, is_blocking);
+#endif
+ ERTS_BIF_PREP_RET(ret, am_true);
+ }
+
+ if (purge_state.saved_old.code_hdr) {
+ modp->old = purge_state.saved_old;
+ purge_state.saved_old.code_hdr = 0;
+ }
+ erts_rwunlock_old_code(code_ix);
+ }
+ if (is_blocking) {
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
}
- oh = oh->next;
+
+ erts_release_code_write_permission();
+
+ finalize_purge_operation(BIF_P, ret == am_true);
+
+ if (literals) {
+ ErtsLiteralAreaRef *ref;
+ ref = erts_alloc(ERTS_ALC_T_LITERAL_REF,
+ sizeof(ErtsLiteralAreaRef));
+ ref->literal_area = literals;
+ ref->next = NULL;
+ erts_mtx_lock(&release_literal_areas.mtx);
+ if (release_literal_areas.last) {
+ release_literal_areas.last->next = ref;
+ release_literal_areas.last = ref;
+ }
+ else {
+ release_literal_areas.first = ref;
+ release_literal_areas.last = ref;
+ }
+ erts_mtx_unlock(&release_literal_areas.mtx);
+ erts_queue_message(erts_literal_area_collector,
+ 0,
+ erts_alloc_message(0, NULL),
+ am_copy_literals,
+ BIF_P->common.id);
+ }
+
+ return ret;
+ }
+
+ default:
+ BIF_ERROR(BIF_P, BADARG);
+
}
}
@@ -1055,37 +1769,39 @@ delete_code(Module* modp)
{
ErtsCodeIndex code_ix = erts_staging_code_ix();
Eterm module = make_atom(modp->module);
- int i;
+ int i, num_exps = export_list_size(code_ix);
- for (i = 0; i < export_list_size(code_ix); i++) {
+ for (i = 0; i < num_exps; i++) {
Export *ep = export_list(i, code_ix);
- if (ep != NULL && (ep->code[0] == module)) {
- if (ep->addressv[code_ix] == ep->code+3) {
- if (ep->code[3] == (BeamInstr) em_apply_bif) {
+ if (ep != NULL && (ep->info.mfa.module == module)) {
+ if (ep->addressv[code_ix] == ep->beam) {
+ if (BeamIsOpCode(ep->beam[0], op_apply_bif)) {
continue;
}
- else if (ep->code[3] ==
- (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
+ else if (BeamIsOpCode(ep->beam[0], op_i_generic_breakpoint)) {
+ ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
ASSERT(modp->curr.num_traced_exports > 0);
- erts_clear_export_break(modp, ep->code+3);
+ DBG_TRACE_MFA_P(&ep->info.mfa,
+ "export trace cleared, code_ix=%d", code_ix);
+ erts_clear_export_break(modp, &ep->info);
}
- else ASSERT(ep->code[3] == (BeamInstr) em_call_error_handler
- || !erts_initialized);
- }
- ep->addressv[code_ix] = ep->code+3;
- ep->code[3] = (BeamInstr) em_call_error_handler;
- ep->code[4] = 0;
+ else {
+ ASSERT(BeamIsOpCode(ep->beam[0], op_call_error_handler) ||
+ !erts_initialized);
+ }
+ }
+ ep->addressv[code_ix] = ep->beam;
+ ep->beam[0] = BeamOpCodeAddr(op_call_error_handler);
+ ep->beam[1] = 0;
+ DBG_TRACE_MFA_P(&ep->info.mfa,
+ "export invalidation, code_ix=%d", code_ix);
}
}
ASSERT(modp->curr.num_breakpoints == 0);
ASSERT(modp->curr.num_traced_exports == 0);
modp->old = modp->curr;
- modp->curr.code = NULL;
- modp->curr.code_length = 0;
- modp->curr.catches = BEAM_CATCHES_NIL;
- modp->curr.nif = NULL;
+ erts_module_instance_init(&modp->curr);
}
@@ -1099,32 +1815,12 @@ beam_make_current_old(Process *c_p, ErtsProcLocks c_p_locks, Eterm module)
* if not, delete old code; error if old code already exists.
*/
- if (modp->curr.code != NULL && modp->old.code != NULL) {
- return am_not_purged;
- } else if (modp->old.code == NULL) { /* Make the current version old. */
+ if (modp->curr.code_hdr) {
+ if (modp->old.code_hdr) {
+ return am_not_purged;
+ }
+ /* Make the current version old. */
delete_code(modp);
}
return NIL;
}
-
-static int
-is_native(BeamInstr* code)
-{
- Uint i, num_functions = code[MI_NUM_FUNCTIONS];
-
- /* Check NativeAdress of first real function in module
- */
- for (i=0; i<num_functions; i++) {
- BeamInstr* func_info = (BeamInstr *) code[MI_FUNCTIONS+i];
- Eterm name = (Eterm) func_info[3];
-
- if (is_atom(name)) {
- return func_info[1] != 0;
- }
- else ASSERT(is_nil(name)); /* ignore BIF stubs */
- }
- /* Not a single non-BIF function? */
- return 0;
-}
-
-
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 4e711c89e0..0832b3f374 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -31,6 +32,7 @@
#include "erl_binary.h"
#include "beam_bp.h"
#include "erl_term.h"
+#include "erl_nfunc_sched.h"
/* *************************************************************************
** Macros
@@ -44,15 +46,15 @@
#define ReAlloc(P, SIZ) erts_realloc(ERTS_ALC_T_BPD, (P), (SZ))
#define Free(P) erts_free(ERTS_ALC_T_BPD, (P))
-#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
-# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
+#if defined(ERTS_ENABLE_LOCK_CHECK)
+# define ERTS_REQ_PROC_MAIN_LOCK(P) \
if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN,\
__FILE__, __LINE__)
-# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
+# define ERTS_UNREQ_PROC_MAIN_LOCK(P) \
if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
#else
-# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
-# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
+# define ERTS_REQ_PROC_MAIN_LOCK(P)
+# define ERTS_UNREQ_PROC_MAIN_LOCK(P)
#endif
#define ERTS_BPF_LOCAL_TRACE 0x01
@@ -71,8 +73,40 @@ extern BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */
extern BeamInstr beam_exception_trace[1]; /* OpCode(i_exception_trace) */
extern BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
-erts_smp_atomic32_t erts_active_bp_index;
-erts_smp_atomic32_t erts_staging_bp_index;
+erts_atomic32_t erts_active_bp_index;
+erts_atomic32_t erts_staging_bp_index;
+erts_mtx_t erts_dirty_bp_ix_mtx;
+
+/*
+ * Inlined helpers
+ */
+
+static ERTS_INLINE ErtsMonotonicTime
+get_mtime(Process *c_p)
+{
+ return erts_get_monotonic_time(erts_proc_sched_data(c_p));
+}
+
+static ERTS_INLINE Uint32
+acquire_bp_sched_ix(Process *c_p)
+{
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
+ ASSERT(esdp);
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ erts_mtx_lock(&erts_dirty_bp_ix_mtx);
+ return (Uint32) erts_no_schedulers;
+ }
+ return (Uint32) esdp->no - 1;
+}
+
+static ERTS_INLINE void
+release_bp_sched_ix(Uint32 ix)
+{
+ if (ix == (Uint32) erts_no_schedulers)
+ erts_mtx_unlock(&erts_dirty_bp_ix_mtx);
+}
+
+
/* *************************************************************************
** Local prototypes
@@ -81,41 +115,33 @@ erts_smp_atomic32_t erts_staging_bp_index;
/*
** Helpers
*/
-static Eterm do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
- int local, Binary* ms, Eterm tracer_pid);
+static ErtsTracer do_call_trace(Process* c_p, ErtsCodeInfo *info, Eterm* reg,
+ int local, Binary* ms, ErtsTracer tracer);
static void set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
- enum erts_break_op count_op, Eterm tracer_pid);
-static void set_function_break(BeamInstr *pc,
+ enum erts_break_op count_op, ErtsTracer tracer);
+static void set_function_break(ErtsCodeInfo *ci,
Binary *match_spec,
Uint break_flags,
enum erts_break_op count_op,
- Eterm tracer_pid);
+ ErtsTracer tracer);
static void clear_break(BpFunctions* f, Uint break_flags);
-static int clear_function_break(BeamInstr *pc, Uint break_flags);
+static int clear_function_break(ErtsCodeInfo *ci, Uint break_flags);
-static BpDataTime* get_time_break(BeamInstr *pc);
-static GenericBpData* check_break(BeamInstr *pc, Uint break_flags);
-static void bp_time_diff(bp_data_time_item_t *item,
- process_breakpoint_time_t *pbt,
- Uint ms, Uint s, Uint us);
+static BpDataTime* get_time_break(ErtsCodeInfo *ci);
+static GenericBpData* check_break(ErtsCodeInfo *ci, Uint break_flags);
-static void bp_meta_unref(BpMetaPid* bmp);
-static void bp_count_unref(BpCount* bcp);
-static void bp_time_unref(BpDataTime* bdt);
-static void consolidate_bp_data(Module* modp, BeamInstr* pc, int local);
-static void uninstall_breakpoint(BeamInstr* pc);
+static void bp_meta_unref(BpMetaTracer *bmt);
+static void bp_count_unref(BpCount *bcp);
+static void bp_time_unref(BpDataTime *bdt);
+static void consolidate_bp_data(Module *modp, ErtsCodeInfo *ci, int local);
+static void uninstall_breakpoint(ErtsCodeInfo *ci);
/* bp_hash */
#define BP_TIME_ADD(pi0, pi1) \
do { \
- Uint r; \
(pi0)->count += (pi1)->count; \
- (pi0)->s_time += (pi1)->s_time; \
- (pi0)->us_time += (pi1)->us_time; \
- r = (pi0)->us_time / 1000000; \
- (pi0)->s_time += r; \
- (pi0)->us_time = (pi0)->us_time % 1000000; \
+ (pi0)->time += (pi1)->time; \
} while(0)
static void bp_hash_init(bp_time_hash_t *hash, Uint n);
@@ -130,13 +156,15 @@ static void bp_hash_delete(bp_time_hash_t *hash);
void
erts_bp_init(void) {
- erts_smp_atomic32_init_nob(&erts_active_bp_index, 0);
- erts_smp_atomic32_init_nob(&erts_staging_bp_index, 1);
+ erts_atomic32_init_nob(&erts_active_bp_index, 0);
+ erts_atomic32_init_nob(&erts_staging_bp_index, 1);
+ erts_mtx_init(&erts_dirty_bp_ix_mtx, "dirty_break_point_index", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
}
void
-erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified)
+erts_bp_match_functions(BpFunctions* f, ErtsCodeMFA *mfa, int specified)
{
ErtsCodeIndex code_ix = erts_active_code_ix();
Uint max_funcs = 0;
@@ -151,8 +179,8 @@ erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified)
num_modules = 0;
for (current = 0; current < max_modules; current++) {
modp = module_code(current, code_ix);
- if (modp->curr.code) {
- max_funcs += modp->curr.code[MI_NUM_FUNCTIONS];
+ if (modp->curr.code_hdr) {
+ max_funcs += modp->curr.code_hdr->num_functions;
module[num_modules++] = modp;
}
}
@@ -160,42 +188,45 @@ erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified)
f->matching = (BpFunction *) Alloc(max_funcs*sizeof(BpFunction));
i = 0;
for (current = 0; current < num_modules; current++) {
- BeamInstr** code_base = (BeamInstr **) module[current]->curr.code;
- BeamInstr* code;
- Uint num_functions = (Uint)(UWord) code_base[MI_NUM_FUNCTIONS];
+ BeamCodeHeader* code_hdr = module[current]->curr.code_hdr;
+ ErtsCodeInfo* ci;
+ Uint num_functions = (Uint)(UWord) code_hdr->num_functions;
Uint fi;
if (specified > 0) {
- if (mfa[0] != make_atom(module[current]->module)) {
+ if (mfa->module != make_atom(module[current]->module)) {
/* Wrong module name */
continue;
}
}
for (fi = 0; fi < num_functions; fi++) {
- BeamInstr* pc;
- int wi;
- code = code_base[MI_FUNCTIONS+fi];
- ASSERT(code[0] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- pc = code+5;
- if (erts_is_native_break(pc)) {
+ ci = code_hdr->functions[fi];
+ ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI));
+ if (erts_is_function_native(ci)) {
continue;
}
- if (is_nil(code[3])) { /* Ignore BIF stub */
+ if (is_nil(ci->mfa.module)) { /* Ignore BIF stub */
continue;
}
- for (wi = 0;
- wi < specified && (Eterm) code[2+wi] == mfa[wi];
- wi++) {
- /* Empty loop body */
- }
- if (wi == specified) {
- /* Store match */
- f->matching[i].pc = pc;
- f->matching[i].mod = module[current];
- i++;
- }
+ switch (specified) {
+ case 3:
+ if (ci->mfa.arity != mfa->arity)
+ continue;
+ case 2:
+ if (ci->mfa.function != mfa->function)
+ continue;
+ case 1:
+ if (ci->mfa.module != mfa->module)
+ continue;
+ case 0:
+ break;
+ }
+ /* Store match */
+ f->matching[i].ci = ci;
+ f->matching[i].mod = module[current];
+ i++;
}
}
f->matched = i;
@@ -203,7 +234,7 @@ erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified)
}
void
-erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified)
+erts_bp_match_export(BpFunctions* f, ErtsCodeMFA *mfa, int specified)
{
ErtsCodeIndex code_ix = erts_active_code_ix();
int i;
@@ -215,27 +246,36 @@ erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified)
for (i = 0; i < num_exps; i++) {
Export* ep = export_list(i, code_ix);
BeamInstr* pc;
- int j;
- for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) {
- /* Empty loop body */
- }
- if (j < specified) {
- continue;
- }
- pc = ep->code+3;
+ switch (specified) {
+ case 3:
+ if (mfa->arity != ep->info.mfa.arity)
+ continue;
+ case 2:
+ if (mfa->function != ep->info.mfa.function)
+ continue;
+ case 1:
+ if (mfa->module != ep->info.mfa.module)
+ continue;
+ case 0:
+ break;
+ default:
+ ASSERT(0);
+ }
+
+ pc = ep->beam;
if (ep->addressv[code_ix] == pc) {
- if ((*pc == (BeamInstr) em_apply_bif ||
- *pc == (BeamInstr) em_call_error_handler)) {
- continue;
+ if (BeamIsOpCode(*pc, op_apply_bif) ||
+ BeamIsOpCode(*pc, op_call_error_handler)) {
+ continue;
}
- ASSERT(*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint));
- } else if (erts_is_native_break(ep->addressv[code_ix])) {
+ ASSERT(BeamIsOpCode(*pc, op_i_generic_breakpoint));
+ } else if (erts_is_function_native(erts_code_to_codeinfo(ep->addressv[code_ix]))) {
continue;
}
- f->matching[ne].pc = pc;
- f->matching[ne].mod = erts_get_module(ep->code[0], code_ix);
+ f->matching[ne].ci = &ep->info;
+ f->matching[ne].mod = erts_get_module(ep->info.mfa.module, code_ix);
ne++;
}
@@ -245,7 +285,10 @@ erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified)
void
erts_bp_free_matched_functions(BpFunctions* f)
{
- Free(f->matching);
+ if (f->matching) {
+ Free(f->matching);
+ }
+ else ASSERT(f->matched == 0);
}
void
@@ -255,10 +298,10 @@ erts_consolidate_bp_data(BpFunctions* f, int local)
Uint i;
Uint n = f->matched;
- ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
+ ERTS_LC_ASSERT(erts_has_code_write_permission());
for (i = 0; i < n; i++) {
- consolidate_bp_data(fs[i].mod, fs[i].pc, local);
+ consolidate_bp_data(fs[i].mod, fs[i].ci, local);
}
}
@@ -267,17 +310,17 @@ erts_consolidate_bif_bp_data(void)
{
int i;
- ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
+ ERTS_LC_ASSERT(erts_has_code_write_permission());
for (i = 0; i < BIF_SIZE; i++) {
Export *ep = bif_export[i];
- consolidate_bp_data(0, ep->code+3, 0);
+ consolidate_bp_data(0, &ep->info, 0);
}
}
static void
-consolidate_bp_data(Module* modp, BeamInstr* pc, int local)
+consolidate_bp_data(Module* modp, ErtsCodeInfo *ci, int local)
{
- GenericBp* g = (GenericBp *) pc[-4];
+ GenericBp* g = ci->u.gen_bp;
GenericBpData* src;
GenericBpData* dst;
Uint flags;
@@ -299,7 +342,7 @@ consolidate_bp_data(Module* modp, BeamInstr* pc, int local)
MatchSetUnref(dst->local_ms);
}
if (flags & ERTS_BPF_META_TRACE) {
- bp_meta_unref(dst->meta_pid);
+ bp_meta_unref(dst->meta_tracer);
MatchSetUnref(dst->meta_ms);
}
if (flags & ERTS_BPF_COUNT) {
@@ -323,9 +366,10 @@ consolidate_bp_data(Module* modp, BeamInstr* pc, int local)
}
ASSERT(modp->curr.num_breakpoints >= 0);
ASSERT(modp->curr.num_traced_exports >= 0);
- ASSERT(*pc != (BeamInstr) BeamOp(op_i_generic_breakpoint));
+ ASSERT(! BeamIsOpCode(*erts_codeinfo_to_code(ci),
+ op_i_generic_breakpoint));
}
- pc[-4] = 0;
+ ci->u.gen_bp = NULL;
Free(g);
return;
}
@@ -340,8 +384,8 @@ consolidate_bp_data(Module* modp, BeamInstr* pc, int local)
MatchSetRef(dst->local_ms);
}
if (flags & ERTS_BPF_META_TRACE) {
- dst->meta_pid = src->meta_pid;
- erts_refc_inc(&dst->meta_pid->refc, 1);
+ dst->meta_tracer = src->meta_tracer;
+ erts_refc_inc(&dst->meta_tracer->refc, 1);
dst->meta_ms = src->meta_ms;
MatchSetRef(dst->meta_ms);
}
@@ -362,8 +406,8 @@ erts_commit_staged_bp(void)
ErtsBpIndex staging = erts_staging_bp_ix();
ErtsBpIndex active = erts_active_bp_ix();
- erts_smp_atomic32_set_nob(&erts_active_bp_index, staging);
- erts_smp_atomic32_set_nob(&erts_staging_bp_index, active);
+ erts_atomic32_set_nob(&erts_active_bp_index, staging);
+ erts_atomic32_set_nob(&erts_staging_bp_index, active);
}
void
@@ -371,12 +415,15 @@ erts_install_breakpoints(BpFunctions* f)
{
Uint i;
Uint n = f->matched;
- BeamInstr br = (BeamInstr) BeamOp(op_i_generic_breakpoint);
+ BeamInstr br = BeamOpCodeAddr(op_i_generic_breakpoint);
for (i = 0; i < n; i++) {
- BeamInstr* pc = f->matching[i].pc;
- GenericBp* g = (GenericBp *) pc[-4];
- if (*pc != br && g) {
+ ErtsCodeInfo* ci = f->matching[i].ci;
+ GenericBp* g = ci->u.gen_bp;
+ BeamInstr volatile *pc = erts_codeinfo_to_code(ci);
+ BeamInstr instr = *pc;
+
+ if (!BeamIsOpCode(instr, op_i_generic_breakpoint) && g) {
Module* modp = f->matching[i].mod;
/*
@@ -390,11 +437,16 @@ erts_install_breakpoints(BpFunctions* f)
/*
* The following write is not protected by any lock. We
* assume that the hardware guarantees that a write of an
- * aligned word-size (or half-word) writes is atomic
- * (i.e. that other processes executing this code will not
- * see a half pointer).
+ * aligned word-size writes is atomic (i.e. that other
+ * processes executing this code will not see a half
+ * pointer).
+ *
+ * The contents of *pc is marked 'volatile' to ensure that
+ * the compiler will do a single full-word write, and not
+ * try any fancy optimizations to write a half word.
*/
- *pc = br;
+ instr = BeamSetCodeAddr(instr, br);
+ *pc = instr;
modp->curr.num_breakpoints++;
}
}
@@ -407,16 +459,16 @@ erts_uninstall_breakpoints(BpFunctions* f)
Uint n = f->matched;
for (i = 0; i < n; i++) {
- BeamInstr* pc = f->matching[i].pc;
- uninstall_breakpoint(pc);
+ uninstall_breakpoint(f->matching[i].ci);
}
}
static void
-uninstall_breakpoint(BeamInstr* pc)
+uninstall_breakpoint(ErtsCodeInfo *ci)
{
- if (*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
- GenericBp* g = (GenericBp *) pc[-4];
+ BeamInstr *pc = erts_codeinfo_to_code(ci);
+ if (BeamIsOpCode(*pc, op_i_generic_breakpoint)) {
+ GenericBp* g = ci->u.gen_bp;
if (g->data[erts_active_bp_ix()].flags == 0) {
/*
* The following write is not protected by any lock. We
@@ -433,59 +485,59 @@ uninstall_breakpoint(BeamInstr* pc)
void
erts_set_trace_break(BpFunctions* f, Binary *match_spec)
{
- set_break(f, match_spec, ERTS_BPF_LOCAL_TRACE, 0, am_true);
+ set_break(f, match_spec, ERTS_BPF_LOCAL_TRACE, 0, erts_tracer_true);
}
void
-erts_set_mtrace_break(BpFunctions* f, Binary *match_spec, Eterm tracer_pid)
+erts_set_mtrace_break(BpFunctions* f, Binary *match_spec, ErtsTracer tracer)
{
- set_break(f, match_spec, ERTS_BPF_META_TRACE, 0, tracer_pid);
+ set_break(f, match_spec, ERTS_BPF_META_TRACE, 0, tracer);
}
void
-erts_set_call_trace_bif(BeamInstr *pc, Binary *match_spec, int local)
+erts_set_call_trace_bif(ErtsCodeInfo *ci, Binary *match_spec, int local)
{
Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
- set_function_break(pc, match_spec, flags, 0, NIL);
+ set_function_break(ci, match_spec, flags, 0, erts_tracer_nil);
}
void
-erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, Eterm tracer_pid)
+erts_set_mtrace_bif(ErtsCodeInfo *ci, Binary *match_spec, ErtsTracer tracer)
{
- set_function_break(pc, match_spec, ERTS_BPF_META_TRACE, 0, tracer_pid);
+ set_function_break(ci, match_spec, ERTS_BPF_META_TRACE, 0, tracer);
}
void
-erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op count_op)
+erts_set_time_trace_bif(ErtsCodeInfo *ci, enum erts_break_op count_op)
{
- set_function_break(pc, NULL,
+ set_function_break(ci, NULL,
ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE,
- count_op, NIL);
+ count_op, erts_tracer_nil);
}
void
-erts_clear_time_trace_bif(BeamInstr *pc) {
- clear_function_break(pc, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE);
+erts_clear_time_trace_bif(ErtsCodeInfo *ci) {
+ clear_function_break(ci, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE);
}
void
erts_set_debug_break(BpFunctions* f) {
- set_break(f, NULL, ERTS_BPF_DEBUG, 0, NIL);
+ set_break(f, NULL, ERTS_BPF_DEBUG, 0, erts_tracer_nil);
}
void
erts_set_count_break(BpFunctions* f, enum erts_break_op count_op)
{
set_break(f, 0, ERTS_BPF_COUNT|ERTS_BPF_COUNT_ACTIVE,
- count_op, NIL);
+ count_op, erts_tracer_nil);
}
void
erts_set_time_break(BpFunctions* f, enum erts_break_op count_op)
{
set_break(f, 0, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE,
- count_op, NIL);
+ count_op, erts_tracer_nil);
}
void
@@ -495,14 +547,14 @@ erts_clear_trace_break(BpFunctions* f)
}
void
-erts_clear_call_trace_bif(BeamInstr *pc, int local)
+erts_clear_call_trace_bif(ErtsCodeInfo *ci, int local)
{
- GenericBp* g = (GenericBp *) pc[-4];
+ GenericBp* g = ci->u.gen_bp;
if (g) {
Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
if (g->data[erts_staging_bp_ix()].flags & flags) {
- clear_function_break(pc, flags);
+ clear_function_break(ci, flags);
}
}
}
@@ -514,15 +566,15 @@ erts_clear_mtrace_break(BpFunctions* f)
}
void
-erts_clear_mtrace_bif(BeamInstr *pc)
+erts_clear_mtrace_bif(ErtsCodeInfo *ci)
{
- clear_function_break(pc, ERTS_BPF_META_TRACE);
+ clear_function_break(ci, ERTS_BPF_META_TRACE);
}
void
erts_clear_debug_break(BpFunctions* f)
{
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
clear_break(f, ERTS_BPF_DEBUG);
}
@@ -546,64 +598,103 @@ erts_clear_all_breaks(BpFunctions* f)
int
erts_clear_module_break(Module *modp) {
- BeamInstr** code_base;
+ BeamCodeHeader* code_hdr;
Uint n;
Uint i;
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
ASSERT(modp);
- code_base = (BeamInstr **) modp->curr.code;
- if (code_base == NULL) {
+ code_hdr = modp->curr.code_hdr;
+ if (!code_hdr) {
return 0;
}
- n = (Uint)(UWord) code_base[MI_NUM_FUNCTIONS];
+ n = (Uint)(UWord) code_hdr->num_functions;
for (i = 0; i < n; ++i) {
- BeamInstr* pc;
-
- pc = code_base[MI_FUNCTIONS+i] + 5;
- if (erts_is_native_break(pc)) {
+ ErtsCodeInfo *ci = code_hdr->functions[i];
+ if (erts_is_function_native(ci))
continue;
- }
- clear_function_break(pc, ERTS_BPF_ALL);
+ clear_function_break(ci, ERTS_BPF_ALL);
}
erts_commit_staged_bp();
for (i = 0; i < n; ++i) {
- BeamInstr* pc;
-
- pc = code_base[MI_FUNCTIONS+i] + 5;
- if (erts_is_native_break(pc)) {
+ ErtsCodeInfo *ci = code_hdr->functions[i];
+ if (erts_is_function_native(ci))
continue;
- }
- uninstall_breakpoint(pc);
- consolidate_bp_data(modp, pc, 1);
- ASSERT(pc[-4] == 0);
+ uninstall_breakpoint(ci);
+ consolidate_bp_data(modp, ci, 1);
+ ASSERT(ci->u.gen_bp == NULL);
}
return n;
}
void
-erts_clear_export_break(Module* modp, BeamInstr* pc)
+erts_clear_export_break(Module* modp, ErtsCodeInfo *ci)
{
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
- clear_function_break(pc, ERTS_BPF_ALL);
+ clear_function_break(ci, ERTS_BPF_ALL);
erts_commit_staged_bp();
- *pc = (BeamInstr) 0;
- consolidate_bp_data(modp, pc, 0);
- ASSERT(pc[-4] == 0);
+ *erts_codeinfo_to_code(ci) = (BeamInstr) 0;
+ consolidate_bp_data(modp, ci, 0);
+ ASSERT(ci->u.gen_bp == NULL);
+}
+
+/*
+ * If c_p->cp is a trace return instruction, we set cp
+ * to be the place where we again start to execute code.
+ *
+ * cp is used by match spec {caller} to get the calling
+ * function, and if we don't do this fixup it will be
+ * 'undefined'. This has the odd side effect of {caller}
+ * not really being which function is the caller, but
+ * rather which function we are about to return to.
+ */
+static void fixup_cp_before_trace(Process *c_p, int *return_to_trace)
+{
+ Eterm *cpp, *E = c_p->stop;
+ BeamInstr w = *c_p->cp;
+ if (BeamIsOpCode(w, op_return_trace)) {
+ cpp = &E[2];
+ } else if (BeamIsOpCode(w, op_i_return_to_trace)) {
+ *return_to_trace = 1;
+ cpp = &E[0];
+ } else if (BeamIsOpCode(w, op_i_return_time_trace)) {
+ cpp = &E[0];
+ } else {
+ cpp = NULL;
+ }
+ if (cpp) {
+ for (;;) {
+ BeamInstr w = *cp_val(*cpp);
+ if (BeamIsOpCode(w, op_return_trace)) {
+ cpp += 3;
+ } else if (BeamIsOpCode(w, op_i_return_to_trace)) {
+ *return_to_trace = 1;
+ cpp += 1;
+ } else if (BeamIsOpCode(w, op_i_return_time_trace)) {
+ cpp += 2;
+ } else {
+ break;
+ }
+ }
+ c_p->cp = (BeamInstr *) cp_val(*cpp);
+ ASSERT(is_CP(*cpp));
+ }
}
BeamInstr
-erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
+erts_generic_breakpoint(Process* c_p, ErtsCodeInfo *info, Eterm* reg)
{
GenericBp* g;
GenericBpData* bp;
Uint bp_flags;
ErtsBpIndex ix = erts_active_bp_ix();
- g = (GenericBp *) I[-4];
+ ASSERT(BeamIsOpCode(info->op, op_i_func_info_IaaI));
+
+ g = info->u.gen_bp;
bp = &g->data[ix];
bp_flags = bp->flags;
ASSERT((bp_flags & ~ERTS_BPF_ALL) == 0);
@@ -622,37 +713,45 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
if (bp_flags & ERTS_BPF_LOCAL_TRACE) {
ASSERT((bp_flags & ERTS_BPF_GLOBAL_TRACE) == 0);
- (void) do_call_trace(c_p, I, reg, 1, bp->local_ms, am_true);
+ (void) do_call_trace(c_p, info, reg, 1, bp->local_ms, erts_tracer_true);
} else if (bp_flags & ERTS_BPF_GLOBAL_TRACE) {
- (void) do_call_trace(c_p, I, reg, 0, bp->local_ms, am_true);
+ (void) do_call_trace(c_p, info, reg, 0, bp->local_ms, erts_tracer_true);
}
if (bp_flags & ERTS_BPF_META_TRACE) {
- Eterm old_pid;
- Eterm new_pid;
+ ErtsTracer old_tracer, new_tracer;
- old_pid = (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid);
- new_pid = do_call_trace(c_p, I, reg, 1, bp->meta_ms, old_pid);
- if (new_pid != old_pid) {
- erts_smp_atomic_set_nob(&bp->meta_pid->pid, new_pid);
+ old_tracer = erts_atomic_read_nob(&bp->meta_tracer->tracer);
+
+ new_tracer = do_call_trace(c_p, info, reg, 1, bp->meta_ms, old_tracer);
+
+ if (!ERTS_TRACER_COMPARE(new_tracer, old_tracer)) {
+ if (old_tracer == erts_atomic_cmpxchg_acqb(
+ &bp->meta_tracer->tracer,
+ (erts_aint_t)new_tracer,
+ (erts_aint_t)old_tracer)) {
+ ERTS_TRACER_CLEAR(&old_tracer);
+ } else {
+ ERTS_TRACER_CLEAR(&new_tracer);
+ }
}
}
if (bp_flags & ERTS_BPF_COUNT_ACTIVE) {
- erts_smp_atomic_inc_nob(&bp->count->acount);
+ erts_atomic_inc_nob(&bp->count->acount);
}
- if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE && erts_is_tracer_proc_valid(c_p)) {
+ if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE) {
Eterm w;
- erts_trace_time_call(c_p, I, bp->time);
+ erts_trace_time_call(c_p, info, bp->time);
w = (BeamInstr) *c_p->cp;
- if (! (w == (BeamInstr) BeamOp(op_i_return_time_trace) ||
- w == (BeamInstr) BeamOp(op_return_trace) ||
- w == (BeamInstr) BeamOp(op_i_return_to_trace)) ) {
+ if (! (BeamIsOpCode(w, op_i_return_time_trace) ||
+ BeamIsOpCode(w, op_return_trace) ||
+ BeamIsOpCode(w, op_i_return_to_trace)) ) {
Eterm* E = c_p->stop;
ASSERT(c_p->htop <= E && E <= c_p->hend);
if (E - 2 < c_p->htop) {
- (void) erts_garbage_collect(c_p, 2, reg, I[-1]);
+ (void) erts_garbage_collect(c_p, 2, reg, info->mfa.arity);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
}
E = c_p->stop;
@@ -660,7 +759,7 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
ASSERT(c_p->htop <= E && E <= c_p->hend);
E -= 2;
- E[0] = make_cp(I);
+ E[0] = make_cp(erts_codeinfo_to_code(info));
E[1] = make_cp(c_p->cp); /* original return address */
c_p->cp = beam_return_time_trace;
c_p->stop = E;
@@ -668,7 +767,7 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
}
if (bp_flags & ERTS_BPF_DEBUG) {
- return (BeamInstr) BeamOp(op_i_debug_breakpoint);
+ return BeamOpCodeAddr(op_i_debug_breakpoint);
} else {
return g->orig_instr;
}
@@ -687,18 +786,19 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
Eterm (*func)(Process*, Eterm*, BeamInstr*);
Export* ep = bif_export[bif_index];
Uint32 flags = 0, flags_meta = 0;
- Eterm meta_tracer_pid = NIL;
- int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif
- * is actually in the
- * export entry */
+ ErtsTracer meta_tracer = erts_tracer_nil;
+ int applying = (I == ep->beam); /* Yup, the apply code for a bif
+ * is actually in the
+ * export entry */
BeamInstr *cp = p->cp;
GenericBp* g;
GenericBpData* bp = NULL;
Uint bp_flags = 0;
+ int return_to_trace = 0;
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+ ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
- g = (GenericBp *) ep->fake_op_func_info_for_hipe[1];
+ g = ep->info.u.gen_bp;
if (g) {
bp = &g->data[erts_active_bp_ix()];
bp_flags = bp->flags;
@@ -710,30 +810,39 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
*/
if (!applying) {
p->cp = I;
+ } else {
+ fixup_cp_before_trace(p, &return_to_trace);
}
if (bp_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE) &&
IS_TRACED_FL(p, F_TRACE_CALLS)) {
int local = !!(bp_flags & ERTS_BPF_LOCAL_TRACE);
- flags = erts_call_trace(p, ep->code, bp->local_ms, args,
- local, &ERTS_TRACER_PROC(p));
+ flags = erts_call_trace(p, &ep->info, bp->local_ms, args,
+ local, &ERTS_TRACER(p));
}
if (bp_flags & ERTS_BPF_META_TRACE) {
- Eterm tpid1, tpid2;
-
- tpid1 = tpid2 =
- (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid);
- flags_meta = erts_call_trace(p, ep->code, bp->meta_ms, args,
- 0, &tpid2);
- meta_tracer_pid = tpid2;
- if (tpid1 != tpid2) {
- erts_smp_atomic_set_nob(&bp->meta_pid->pid, tpid2);
+ ErtsTracer old_tracer;
+
+ meta_tracer = erts_atomic_read_nob(&bp->meta_tracer->tracer);
+ old_tracer = meta_tracer;
+ flags_meta = erts_call_trace(p, &ep->info, bp->meta_ms, args,
+ 0, &meta_tracer);
+
+ if (!ERTS_TRACER_COMPARE(old_tracer, meta_tracer)) {
+ ErtsTracer new_tracer = erts_tracer_nil;
+ erts_tracer_update(&new_tracer, meta_tracer);
+ if (old_tracer == erts_atomic_cmpxchg_acqb(
+ &bp->meta_tracer->tracer,
+ (erts_aint_t)new_tracer,
+ (erts_aint_t)old_tracer)) {
+ ERTS_TRACER_CLEAR(&old_tracer);
+ } else {
+ ERTS_TRACER_CLEAR(&new_tracer);
+ }
}
}
if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE &&
- IS_TRACED_FL(p, F_TRACE_CALLS) &&
- erts_is_tracer_proc_valid(p)) {
- BeamInstr *pc = (BeamInstr *)ep->code+3;
- erts_trace_time_call(p, pc, bp->time);
+ IS_TRACED_FL(p, F_TRACE_CALLS)) {
+ erts_trace_time_call(p, &ep->info, bp->time);
}
/* Restore original continuation pointer (if changed). */
@@ -743,6 +852,30 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
result = func(p, args, I);
+ if (erts_nif_export_check_save_trace(p, result,
+ applying, ep,
+ cp, flags,
+ flags_meta, I,
+ meta_tracer)) {
+ /*
+ * erts_bif_trace_epilogue() will be called
+ * later when appropriate via the NIF export
+ * scheduling functionality...
+ */
+ return result;
+ }
+
+ return erts_bif_trace_epilogue(p, result, applying, ep, cp,
+ flags, flags_meta, I,
+ meta_tracer);
+}
+
+Eterm
+erts_bif_trace_epilogue(Process *p, Eterm result, int applying,
+ Export* ep, BeamInstr *cp, Uint32 flags,
+ Uint32 flags_meta, BeamInstr* I,
+ ErtsTracer meta_tracer)
+{
if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) {
BeamInstr i_return_trace = beam_return_trace[0];
BeamInstr i_return_to_trace = beam_return_to_trace[0];
@@ -775,8 +908,6 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
if (reason != TRAP) {
Eterm class;
Eterm value = p->fvalue;
- DeclareTmpHeapNoproc(nocatch,3);
- UseTmpHeapNoproc(3);
/* Expand error value like in handle_error() */
if (reason & EXF_ARGLIST) {
Eterm *tp;
@@ -785,7 +916,8 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
value = tp[1];
}
if ((reason & EXF_THROWN) && (p->catches <= 0)) {
- value = TUPLE2(nocatch, am_nocatch, value);
+ Eterm *hp = HAlloc(p, 3);
+ value = TUPLE2(hp, am_nocatch, value);
reason = EXC_ERROR;
}
/* Note: expand_error_value() could theoretically
@@ -797,12 +929,12 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
class = exception_tag[GET_EXC_CLASS(reason)];
if (flags_meta & MATCH_SET_EXCEPTION_TRACE) {
- erts_trace_exception(p, ep->code, class, value,
- &meta_tracer_pid);
+ erts_trace_exception(p, &ep->info.mfa, class, value,
+ &meta_tracer);
}
if (flags & MATCH_SET_EXCEPTION_TRACE) {
- erts_trace_exception(p, ep->code, class, value,
- &ERTS_TRACER_PROC(p));
+ erts_trace_exception(p, &ep->info.mfa, class, value,
+ &ERTS_TRACER(p));
}
if ((flags & MATCH_SET_RETURN_TO_TRACE) && p->catches > 0) {
/* can only happen if(local)*/
@@ -824,22 +956,22 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
}
}
}
- UnUseTmpHeapNoproc(3);
if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) {
- erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
ERTS_TRACE_FLAGS(p) |= F_EXCEPTION_TRACE;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
}
}
} else {
if (flags_meta & MATCH_SET_RX_TRACE) {
- erts_trace_return(p, ep->code, result, &meta_tracer_pid);
+ erts_trace_return(p, &ep->info.mfa, result, &meta_tracer);
}
/* MATCH_SET_RETURN_TO_TRACE cannot occur if(meta) */
if (flags & MATCH_SET_RX_TRACE) {
- erts_trace_return(p, ep->code, result, &ERTS_TRACER_PROC(p));
+ erts_trace_return(p, &ep->info.mfa, result, &ERTS_TRACER(p));
}
- if (flags & MATCH_SET_RETURN_TO_TRACE) {
+ if (flags & MATCH_SET_RETURN_TO_TRACE &&
+ IS_TRACED_FL(p, F_TRACE_RETURN_TO)) {
/* can only happen if(local)*/
if (applying) {
/* Apply of BIF, cp is in calling function */
@@ -850,69 +982,40 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
}
}
}
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+ ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
return result;
}
-static Eterm
-do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
- int local, Binary* ms, Eterm tracer_pid)
+static ErtsTracer
+do_call_trace(Process* c_p, ErtsCodeInfo* info, Eterm* reg,
+ int local, Binary* ms, ErtsTracer tracer)
{
- Eterm* cpp;
int return_to_trace = 0;
- BeamInstr w;
- BeamInstr *cp_save;
+ BeamInstr *cp_save = c_p->cp;
Uint32 flags;
Uint need = 0;
Eterm* E = c_p->stop;
- w = *c_p->cp;
- if (w == (BeamInstr) BeamOp(op_return_trace)) {
- cpp = &E[2];
- } else if (w == (BeamInstr) BeamOp(op_i_return_to_trace)) {
- return_to_trace = 1;
- cpp = &E[0];
- } else if (w == (BeamInstr) BeamOp(op_i_return_time_trace)) {
- cpp = &E[0];
- } else {
- cpp = NULL;
- }
- if (cpp) {
- for (;;) {
- BeamInstr w = *cp_val(*cpp);
- if (w == (BeamInstr) BeamOp(op_return_trace)) {
- cpp += 3;
- } else if (w == (BeamInstr) BeamOp(op_i_return_to_trace)) {
- return_to_trace = 1;
- cpp += 1;
- } else if (w == (BeamInstr) BeamOp(op_i_return_time_trace)) {
- cpp += 2;
- } else {
- break;
- }
- }
- cp_save = c_p->cp;
- c_p->cp = (BeamInstr *) cp_val(*cpp);
- ASSERT(is_CP(*cpp));
- }
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- flags = erts_call_trace(c_p, I-3, ms, reg, local, &tracer_pid);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- if (cpp) {
- c_p->cp = cp_save;
- }
+ fixup_cp_before_trace(c_p, &return_to_trace);
+
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
+ flags = erts_call_trace(c_p, info, ms, reg, local, &tracer);
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
+
+ /* restore cp after potential fixup */
+ c_p->cp = cp_save;
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
if ((flags & MATCH_SET_RETURN_TO_TRACE) && !return_to_trace) {
need += 1;
}
if (flags & MATCH_SET_RX_TRACE) {
- need += 3;
+ need += 3 + size_object(tracer);
}
if (need) {
ASSERT(c_p->htop <= E && E <= c_p->hend);
if (E - need < c_p->htop) {
- (void) erts_garbage_collect(c_p, need, reg, I[-1]);
+ (void) erts_garbage_collect(c_p, need, reg, info->mfa.arity);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
E = c_p->stop;
}
@@ -923,44 +1026,47 @@ do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
E[0] = make_cp(c_p->cp);
c_p->cp = beam_return_to_trace;
}
- if (flags & MATCH_SET_RX_TRACE) {
+ if (flags & MATCH_SET_RX_TRACE)
+ {
E -= 3;
+ c_p->stop = E;
ASSERT(c_p->htop <= E && E <= c_p->hend);
- ASSERT(is_CP((Eterm) (UWord) (I - 3)));
- ASSERT(am_true == tracer_pid ||
- is_internal_pid(tracer_pid) || is_internal_port(tracer_pid));
+ ASSERT(is_CP((Eterm) (UWord) (&info->mfa.module)));
+ ASSERT(IS_TRACER_VALID(tracer));
E[2] = make_cp(c_p->cp);
- E[1] = tracer_pid;
- E[0] = make_cp(I - 3); /* We ARE at the beginning of an
- instruction,
+ E[1] = copy_object(tracer, c_p);
+ E[0] = make_cp(&info->mfa.module);
+ /* We ARE at the beginning of an instruction,
the funcinfo is above i. */
c_p->cp = (flags & MATCH_SET_EXCEPTION_TRACE) ?
beam_exception_trace : beam_return_trace;
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
ERTS_TRACE_FLAGS(c_p) |= F_EXCEPTION_TRACE;
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- c_p->stop = E;
- return tracer_pid;
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ } else
+ c_p->stop = E;
+ return tracer;
}
void
-erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
+erts_trace_time_call(Process* c_p, ErtsCodeInfo *info, BpDataTime* bdt)
{
- Uint ms,s,us;
+ ErtsMonotonicTime time;
process_breakpoint_time_t *pbt = NULL;
bp_data_time_item_t sitem, *item = NULL;
bp_time_hash_t *h = NULL;
BpDataTime *pbdt = NULL;
+ Uint32 six = acquire_bp_sched_ix(c_p);
ASSERT(c_p);
- ASSERT(erts_smp_atomic32_read_acqb(&c_p->state) & ERTS_PSFLG_RUNNING);
+ ASSERT(erts_atomic32_read_acqb(&c_p->state) & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING));
/* get previous timestamp and breakpoint
* from the process psd */
-
+
pbt = ERTS_PROC_GET_CALL_TIME(c_p);
- get_sys_now(&ms, &s, &us);
+ time = get_mtime(c_p);
/* get pbt
* timestamp = t0
@@ -971,20 +1077,20 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
if (pbt == 0) {
/* First call of process to instrumented function */
pbt = Alloc(sizeof(process_breakpoint_time_t));
- (void) ERTS_PROC_SET_CALL_TIME(c_p, ERTS_PROC_LOCK_MAIN, pbt);
+ (void) ERTS_PROC_SET_CALL_TIME(c_p, pbt);
} else {
- ASSERT(pbt->pc);
+ ASSERT(pbt->ci);
/* add time to previous code */
- bp_time_diff(&sitem, pbt, ms, s, us);
+ sitem.time = time - pbt->time;
sitem.pid = c_p->common.id;
sitem.count = 0;
/* previous breakpoint */
- pbdt = get_time_break(pbt->pc);
+ pbdt = get_time_break(pbt->ci);
/* if null then the breakpoint was removed */
if (pbdt) {
- h = &(pbdt->hash[bp_sched2ix_proc(c_p)]);
+ h = &(pbdt->hash[six]);
ASSERT(h);
ASSERT(h->item);
@@ -1001,12 +1107,11 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
/* Add count to this code */
sitem.pid = c_p->common.id;
sitem.count = 1;
- sitem.s_time = 0;
- sitem.us_time = 0;
+ sitem.time = 0;
/* this breakpoint */
ASSERT(bdt);
- h = &(bdt->hash[bp_sched2ix_proc(c_p)]);
+ h = &(bdt->hash[six]);
ASSERT(h);
ASSERT(h->item);
@@ -1018,29 +1123,31 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
BP_TIME_ADD(item, &sitem);
}
- pbt->pc = I;
- pbt->ms = ms;
- pbt->s = s;
- pbt->us = us;
+ pbt->ci = info;
+ pbt->time = time;
+
+ release_bp_sched_ix(six);
}
void
-erts_trace_time_return(Process *p, BeamInstr *pc)
+erts_trace_time_return(Process *p, ErtsCodeInfo *ci)
{
- Uint ms,s,us;
+ ErtsMonotonicTime time;
process_breakpoint_time_t *pbt = NULL;
bp_data_time_item_t sitem, *item = NULL;
bp_time_hash_t *h = NULL;
BpDataTime *pbdt = NULL;
+ Uint32 six = acquire_bp_sched_ix(p);
ASSERT(p);
- ASSERT(erts_smp_atomic32_read_acqb(&p->state) & ERTS_PSFLG_RUNNING);
+ ASSERT(erts_atomic32_read_acqb(&p->state) & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING));
/* get previous timestamp and breakpoint
* from the process psd */
pbt = ERTS_PROC_GET_CALL_TIME(p);
- get_sys_now(&ms,&s,&us);
+ time = get_mtime(p);
/* get pbt
* lookup bdt from code
@@ -1051,21 +1158,23 @@ erts_trace_time_return(Process *p, BeamInstr *pc)
*/
if (pbt) {
+
/* might have been removed due to
* trace_pattern(false)
*/
- ASSERT(pbt->pc);
+ ASSERT(pbt->ci);
- bp_time_diff(&sitem, pbt, ms, s, us);
+ sitem.time = time - pbt->time;
sitem.pid = p->common.id;
sitem.count = 0;
/* previous breakpoint */
- pbdt = get_time_break(pbt->pc);
+ pbdt = get_time_break(pbt->ci);
/* beware, the trace_pattern might have been removed */
if (pbdt) {
- h = &(pbdt->hash[bp_sched2ix_proc(p)]);
+
+ h = &(pbdt->hash[six]);
ASSERT(h);
ASSERT(h->item);
@@ -1076,20 +1185,22 @@ erts_trace_time_return(Process *p, BeamInstr *pc)
} else {
BP_TIME_ADD(item, &sitem);
}
+
}
- pbt->pc = pc;
- pbt->ms = ms;
- pbt->s = s;
- pbt->us = us;
+ pbt->ci = ci;
+ pbt->time = time;
+
}
+
+ release_bp_sched_ix(six);
}
int
-erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local)
+erts_is_trace_break(ErtsCodeInfo *ci, Binary **match_spec_ret, int local)
{
Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
- GenericBpData* bp = check_break(pc, flags);
+ GenericBpData* bp = check_break(ci, flags);
if (bp) {
if (match_spec_ret) {
@@ -1100,57 +1211,45 @@ erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local)
return 0;
}
-int
-erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret,
- Eterm *tracer_pid_ret)
+int
+erts_is_mtrace_break(ErtsCodeInfo *ci, Binary **match_spec_ret,
+ ErtsTracer *tracer_ret)
{
- GenericBpData* bp = check_break(pc, ERTS_BPF_META_TRACE);
+ GenericBpData* bp = check_break(ci, ERTS_BPF_META_TRACE);
if (bp) {
if (match_spec_ret) {
*match_spec_ret = bp->meta_ms;
}
- if (tracer_pid_ret) {
- *tracer_pid_ret =
- (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid);
+ if (tracer_ret) {
+ *tracer_ret = erts_atomic_read_nob(&bp->meta_tracer->tracer);
}
return 1;
}
return 0;
}
-int
-erts_is_native_break(BeamInstr *pc) {
-#ifdef HIPE
- ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- return pc[0] == (BeamInstr) BeamOp(op_hipe_trap_call)
- || pc[0] == (BeamInstr) BeamOp(op_hipe_trap_call_closure);
-#else
- return 0;
-#endif
-}
-
int
-erts_is_count_break(BeamInstr *pc, Uint *count_ret)
+erts_is_count_break(ErtsCodeInfo *ci, Uint *count_ret)
{
- GenericBpData* bp = check_break(pc, ERTS_BPF_COUNT);
+ GenericBpData* bp = check_break(ci, ERTS_BPF_COUNT);
if (bp) {
if (count_ret) {
- *count_ret = (Uint) erts_smp_atomic_read_nob(&bp->count->acount);
+ *count_ret = (Uint) erts_atomic_read_nob(&bp->count->acount);
}
return 1;
}
return 0;
}
-int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *retval) {
+int erts_is_time_break(Process *p, ErtsCodeInfo *ci, Eterm *retval) {
Uint i, ix;
bp_time_hash_t hash;
Uint size;
Eterm *hp, t;
bp_data_time_item_t *item = NULL;
- BpDataTime *bdt = get_time_break(pc);
+ BpDataTime *bdt = get_time_break(ci);
if (bdt) {
if (retval) {
@@ -1182,10 +1281,14 @@ int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *retval) {
for(ix = 0; ix < hash.n; ix++) {
item = &(hash.item[ix]);
if (item->pid != NIL) {
+ ErtsMonotonicTime sec, usec;
+ usec = ERTS_MONOTONIC_TO_USEC(item->time);
+ sec = usec / 1000000;
+ usec = usec - sec*1000000;
t = TUPLE4(hp, item->pid,
make_small(item->count),
- make_small(item->s_time),
- make_small(item->us_time));
+ make_small((Uint) sec),
+ make_small((Uint) usec));
hp += 5;
*retval = CONS(hp, t, *retval); hp += 2;
}
@@ -1200,26 +1303,25 @@ int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *retval) {
}
-BeamInstr *
-erts_find_local_func(Eterm mfa[3]) {
+ErtsCodeInfo *
+erts_find_local_func(ErtsCodeMFA *mfa) {
Module *modp;
- BeamInstr** code_base;
- BeamInstr* code_ptr;
+ BeamCodeHeader* code_hdr;
+ ErtsCodeInfo* ci;
Uint i,n;
- if ((modp = erts_get_module(mfa[0], erts_active_code_ix())) == NULL)
+ if ((modp = erts_get_module(mfa->module, erts_active_code_ix())) == NULL)
return NULL;
- if ((code_base = (BeamInstr **) modp->curr.code) == NULL)
+ if ((code_hdr = modp->curr.code_hdr) == NULL)
return NULL;
- n = (BeamInstr) code_base[MI_NUM_FUNCTIONS];
+ n = (BeamInstr) code_hdr->num_functions;
for (i = 0; i < n; ++i) {
- code_ptr = code_base[MI_FUNCTIONS+i];
- ASSERT(((BeamInstr) BeamOp(op_i_func_info_IaaI)) == code_ptr[0]);
- ASSERT(mfa[0] == ((Eterm) code_ptr[2]) ||
- is_nil((Eterm) code_ptr[2]));
- if (mfa[1] == ((Eterm) code_ptr[3]) &&
- ((BeamInstr) mfa[2]) == code_ptr[4]) {
- return code_ptr + 5;
+ ci = code_hdr->functions[i];
+ ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI));
+ ASSERT(mfa->module == ci->mfa.module || is_nil(ci->mfa.module));
+ if (mfa->function == ci->mfa.function &&
+ mfa->arity == ci->mfa.arity) {
+ return ci;
}
}
return NULL;
@@ -1265,8 +1367,7 @@ static void bp_hash_rehash(bp_time_hash_t *hash, Uint n) {
}
item[hval].pid = hash->item[ix].pid;
item[hval].count = hash->item[ix].count;
- item[hval].s_time = hash->item[ix].s_time;
- item[hval].us_time = hash->item[ix].us_time;
+ item[hval].time = hash->item[ix].time;
}
}
@@ -1314,8 +1415,7 @@ static ERTS_INLINE bp_data_time_item_t * bp_hash_put(bp_time_hash_t *hash, bp_da
item = &(hash->item[hval]);
item->pid = sitem->pid;
- item->s_time = sitem->s_time;
- item->us_time = sitem->us_time;
+ item->time = sitem->time;
item->count = sitem->count;
hash->used++;
@@ -1329,45 +1429,12 @@ static void bp_hash_delete(bp_time_hash_t *hash) {
hash->item = NULL;
}
-static void bp_time_diff(bp_data_time_item_t *item, /* out */
- process_breakpoint_time_t *pbt, /* in */
- Uint ms, Uint s, Uint us) {
- int ds,dus;
-#ifdef DEBUG
- int dms;
-
-
- dms = ms - pbt->ms;
-#endif
- ds = s - pbt->s;
- dus = us - pbt->us;
-
- /* get_sys_now may return zero difftime,
- * this is ok.
- */
-
-#ifdef DEBUG
- ASSERT(dms >= 0 || ds >= 0 || dus >= 0);
-#endif
-
- if (dus < 0) {
- dus += 1000000;
- ds -= 1;
- }
- if (ds < 0) {
- ds += 1000000;
- }
-
- item->s_time = ds;
- item->us_time = dus;
-}
-
void erts_schedule_time_break(Process *p, Uint schedule) {
- Uint ms, s, us;
process_breakpoint_time_t *pbt = NULL;
bp_data_time_item_t sitem, *item = NULL;
bp_time_hash_t *h = NULL;
BpDataTime *pbdt = NULL;
+ Uint32 six = acquire_bp_sched_ix(p);
ASSERT(p);
@@ -1384,14 +1451,13 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
* the previous breakpoint.
*/
- pbdt = get_time_break(pbt->pc);
+ pbdt = get_time_break(pbt->ci);
if (pbdt) {
- get_sys_now(&ms,&s,&us);
- bp_time_diff(&sitem, pbt, ms, s, us);
+ sitem.time = get_mtime(p) - pbt->time;
sitem.pid = p->common.id;
sitem.count = 0;
- h = &(pbdt->hash[bp_sched2ix_proc(p)]);
+ h = &(pbdt->hash[six]);
ASSERT(h);
ASSERT(h->item);
@@ -1409,10 +1475,7 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
* timestamp it and remove the previous
* timestamp in the psd.
*/
- get_sys_now(&ms,&s,&us);
- pbt->ms = ms;
- pbt->s = s;
- pbt->us = us;
+ pbt->time = get_mtime(p);
break;
default :
ASSERT(0);
@@ -1420,6 +1483,8 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
break;
}
} /* pbt */
+
+ release_bp_sched_ix(six);
}
/* *************************************************************************
@@ -1429,42 +1494,42 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
static void
set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
- enum erts_break_op count_op, Eterm tracer_pid)
+ enum erts_break_op count_op, ErtsTracer tracer)
{
Uint i;
Uint n;
n = f->matched;
for (i = 0; i < n; i++) {
- BeamInstr* pc = f->matching[i].pc;
- set_function_break(pc, match_spec, break_flags,
- count_op, tracer_pid);
+ set_function_break(f->matching[i].ci,
+ match_spec, break_flags,
+ count_op, tracer);
}
}
static void
-set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
- enum erts_break_op count_op, Eterm tracer_pid)
+set_function_break(ErtsCodeInfo *ci, Binary *match_spec, Uint break_flags,
+ enum erts_break_op count_op, ErtsTracer tracer)
{
GenericBp* g;
GenericBpData* bp;
Uint common;
ErtsBpIndex ix = erts_staging_bp_ix();
- ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
- g = (GenericBp *) pc[-4];
+ ERTS_LC_ASSERT(erts_has_code_write_permission());
+ g = ci->u.gen_bp;
if (g == 0) {
int i;
- if (count_op == erts_break_reset || count_op == erts_break_stop) {
+ if (count_op == ERTS_BREAK_RESTART || count_op == ERTS_BREAK_PAUSE) {
/* Do not insert a new breakpoint */
return;
}
g = Alloc(sizeof(GenericBp));
- g->orig_instr = *pc;
+ g->orig_instr = *erts_codeinfo_to_code(ci);
for (i = 0; i < ERTS_NUM_BP_IX; i++) {
g->data[i].flags = 0;
}
- pc[-4] = (BeamInstr) g;
+ ci->u.gen_bp = g;
}
bp = &g->data[ix];
@@ -1477,13 +1542,13 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
MatchSetUnref(bp->local_ms);
} else if (common & ERTS_BPF_META_TRACE) {
MatchSetUnref(bp->meta_ms);
- bp_meta_unref(bp->meta_pid);
+ bp_meta_unref(bp->meta_tracer);
} else if (common & ERTS_BPF_COUNT) {
- if (count_op == erts_break_stop) {
+ if (count_op == ERTS_BREAK_PAUSE) {
bp->flags &= ~ERTS_BPF_COUNT_ACTIVE;
} else {
bp->flags |= ERTS_BPF_COUNT_ACTIVE;
- erts_smp_atomic_set_nob(&bp->count->acount, 0);
+ erts_atomic_set_nob(&bp->count->acount, 0);
}
ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
return;
@@ -1491,7 +1556,7 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
BpDataTime* bdt = bp->time;
Uint i = 0;
- if (count_op == erts_break_stop) {
+ if (count_op == ERTS_BREAK_PAUSE) {
bp->flags &= ~ERTS_BPF_TIME_TRACE_ACTIVE;
} else {
bp->flags |= ERTS_BPF_TIME_TRACE_ACTIVE;
@@ -1512,20 +1577,22 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
MatchSetRef(match_spec);
bp->local_ms = match_spec;
} else if (break_flags & ERTS_BPF_META_TRACE) {
- BpMetaPid* bmp;
+ BpMetaTracer* bmt;
+ ErtsTracer meta_tracer = erts_tracer_nil;
MatchSetRef(match_spec);
bp->meta_ms = match_spec;
- bmp = Alloc(sizeof(BpMetaPid));
- erts_refc_init(&bmp->refc, 1);
- erts_smp_atomic_init_nob(&bmp->pid, tracer_pid);
- bp->meta_pid = bmp;
+ bmt = Alloc(sizeof(BpMetaTracer));
+ erts_refc_init(&bmt->refc, 1);
+ erts_tracer_update(&meta_tracer, tracer); /* copy tracer */
+ erts_atomic_init_nob(&bmt->tracer, (erts_aint_t)meta_tracer);
+ bp->meta_tracer = bmt;
} else if (break_flags & ERTS_BPF_COUNT) {
BpCount* bcp;
ASSERT((bp->flags & ERTS_BPF_COUNT) == 0);
bcp = Alloc(sizeof(BpCount));
erts_refc_init(&bcp->refc, 1);
- erts_smp_atomic_init_nob(&bcp->acount, 0);
+ erts_atomic_init_nob(&bcp->acount, 0);
bp->count = bcp;
} else if (break_flags & ERTS_BPF_TIME_TRACE) {
BpDataTime* bdt;
@@ -1534,7 +1601,7 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
ASSERT((bp->flags & ERTS_BPF_TIME_TRACE) == 0);
bdt = Alloc(sizeof(BpDataTime));
erts_refc_init(&bdt->refc, 1);
- bdt->n = erts_no_schedulers;
+ bdt->n = erts_no_schedulers + 1;
bdt->hash = Alloc(sizeof(bp_time_hash_t)*(bdt->n));
for (i = 0; i < bdt->n; i++) {
bp_hash_init(&(bdt->hash[i]), 32);
@@ -1554,22 +1621,21 @@ clear_break(BpFunctions* f, Uint break_flags)
n = f->matched;
for (i = 0; i < n; i++) {
- BeamInstr* pc = f->matching[i].pc;
- clear_function_break(pc, break_flags);
+ clear_function_break(f->matching[i].ci, break_flags);
}
}
static int
-clear_function_break(BeamInstr *pc, Uint break_flags)
+clear_function_break(ErtsCodeInfo *ci, Uint break_flags)
{
GenericBp* g;
GenericBpData* bp;
Uint common;
ErtsBpIndex ix = erts_staging_bp_ix();
- ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
+ ERTS_LC_ASSERT(erts_has_code_write_permission());
- if ((g = (GenericBp *) pc[-4]) == 0) {
+ if ((g = ci->u.gen_bp) == NULL) {
return 1;
}
@@ -1582,7 +1648,7 @@ clear_function_break(BeamInstr *pc, Uint break_flags)
}
if (common & ERTS_BPF_META_TRACE) {
MatchSetUnref(bp->meta_ms);
- bp_meta_unref(bp->meta_pid);
+ bp_meta_unref(bp->meta_tracer);
}
if (common & ERTS_BPF_COUNT) {
ASSERT((bp->flags & ERTS_BPF_COUNT_ACTIVE) == 0);
@@ -1598,10 +1664,12 @@ clear_function_break(BeamInstr *pc, Uint break_flags)
}
static void
-bp_meta_unref(BpMetaPid* bmp)
+bp_meta_unref(BpMetaTracer* bmt)
{
- if (erts_refc_dectest(&bmp->refc, 0) <= 0) {
- Free(bmp);
+ if (erts_refc_dectest(&bmt->refc, 0) <= 0) {
+ ErtsTracer trc = erts_atomic_read_nob(&bmt->tracer);
+ ERTS_TRACER_CLEAR(&trc);
+ Free(bmt);
}
}
@@ -1636,13 +1704,11 @@ bp_time_unref(BpDataTime* bdt)
h_p = erts_pid2proc(NULL, 0, item->pid,
ERTS_PROC_LOCK_MAIN);
if (h_p) {
- pbt = ERTS_PROC_SET_CALL_TIME(h_p,
- ERTS_PROC_LOCK_MAIN,
- NULL);
+ pbt = ERTS_PROC_SET_CALL_TIME(h_p, NULL);
if (pbt) {
Free(pbt);
}
- erts_smp_proc_unlock(h_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(h_p, ERTS_PROC_LOCK_MAIN);
}
}
}
@@ -1655,19 +1721,19 @@ bp_time_unref(BpDataTime* bdt)
}
static BpDataTime*
-get_time_break(BeamInstr *pc)
+get_time_break(ErtsCodeInfo *ci)
{
- GenericBpData* bp = check_break(pc, ERTS_BPF_TIME_TRACE);
+ GenericBpData* bp = check_break(ci, ERTS_BPF_TIME_TRACE);
return bp ? bp->time : 0;
}
static GenericBpData*
-check_break(BeamInstr *pc, Uint break_flags)
+check_break(ErtsCodeInfo *ci, Uint break_flags)
{
- GenericBp* g = (GenericBp *) pc[-4];
+ GenericBp* g = ci->u.gen_bp;
- ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- if (erts_is_native_break(pc)) {
+ ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI));
+ if (erts_is_function_native(ci)) {
return 0;
}
if (g) {
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index b061401863..a64765822b 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -28,8 +29,7 @@
typedef struct {
Eterm pid;
Sint count;
- Uint s_time;
- Uint us_time;
+ ErtsMonotonicTime time;
} bp_data_time_item_t;
typedef struct {
@@ -45,27 +45,25 @@ typedef struct bp_data_time { /* Call time */
} BpDataTime;
typedef struct {
- Uint ms;
- Uint s;
- Uint us;
- BeamInstr *pc;
+ ErtsMonotonicTime time;
+ ErtsCodeInfo *ci;
} process_breakpoint_time_t; /* used within psd */
typedef struct {
- erts_smp_atomic_t acount;
+ erts_atomic_t acount;
erts_refc_t refc;
} BpCount;
typedef struct {
- erts_smp_atomic_t pid;
+ erts_atomic_t tracer;
erts_refc_t refc;
-} BpMetaPid;
+} BpMetaTracer;
typedef struct generic_bp_data {
Uint flags;
Binary* local_ms; /* Match spec for local call trace */
Binary* meta_ms; /* Match spec for meta trace */
- BpMetaPid* meta_pid; /* Meta trace pid */
+ BpMetaTracer* meta_tracer; /* Meta tracer */
BpCount* count; /* For call count */
BpDataTime* time; /* For time trace */
} GenericBpData;
@@ -81,23 +79,19 @@ typedef struct generic_bp {
#define ERTS_BP_CALL_TIME_SCHEDULE_OUT (1)
#define ERTS_BP_CALL_TIME_SCHEDULE_EXITING (2)
-#ifdef ERTS_SMP
-#define bp_sched2ix_proc(p) ((p)->scheduler_data->no - 1)
-#else
-#define bp_sched2ix_proc(p) (0)
-#endif
+extern erts_mtx_t erts_dirty_bp_ix_mtx;
enum erts_break_op{
- erts_break_nop = 0, /* Must be false */
- erts_break_set = !0, /* Must be true */
- erts_break_reset,
- erts_break_stop
+ ERTS_BREAK_NOP = 0, /* Must be false */
+ ERTS_BREAK_SET = !0, /* Must be true */
+ ERTS_BREAK_RESTART,
+ ERTS_BREAK_PAUSE
};
typedef Uint32 ErtsBpIndex;
typedef struct {
- BeamInstr* pc;
+ ErtsCodeInfo *ci;
Module* mod;
} BpFunction;
@@ -118,8 +112,8 @@ void erts_commit_staged_bp(void);
ERTS_GLB_INLINE ErtsBpIndex erts_active_bp_ix(void);
ERTS_GLB_INLINE ErtsBpIndex erts_staging_bp_ix(void);
-void erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified);
-void erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified);
+void erts_bp_match_functions(BpFunctions* f, ErtsCodeMFA *mfa, int specified);
+void erts_bp_match_export(BpFunctions* f, ErtsCodeMFA *mfa, int specified);
void erts_bp_free_matched_functions(BpFunctions* f);
void erts_install_breakpoints(BpFunctions* f);
@@ -130,15 +124,15 @@ void erts_consolidate_bif_bp_data(void);
void erts_set_trace_break(BpFunctions *f, Binary *match_spec);
void erts_clear_trace_break(BpFunctions *f);
-void erts_set_call_trace_bif(BeamInstr *pc, Binary *match_spec, int local);
-void erts_clear_call_trace_bif(BeamInstr *pc, int local);
+void erts_set_call_trace_bif(ErtsCodeInfo *ci, Binary *match_spec, int local);
+void erts_clear_call_trace_bif(ErtsCodeInfo *ci, int local);
void erts_set_mtrace_break(BpFunctions *f, Binary *match_spec,
- Eterm tracer_pid);
+ ErtsTracer tracer);
void erts_clear_mtrace_break(BpFunctions *f);
-void erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec,
- Eterm tracer_pid);
-void erts_clear_mtrace_bif(BeamInstr *pc);
+void erts_set_mtrace_bif(ErtsCodeInfo *ci, Binary *match_spec,
+ ErtsTracer tracer);
+void erts_clear_mtrace_bif(ErtsCodeInfo *ci);
void erts_set_debug_break(BpFunctions *f);
void erts_clear_debug_break(BpFunctions *f);
@@ -148,58 +142,46 @@ void erts_clear_count_break(BpFunctions *f);
void erts_clear_all_breaks(BpFunctions* f);
int erts_clear_module_break(Module *modp);
-void erts_clear_export_break(Module *modp, BeamInstr* pc);
-
-BeamInstr erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg);
-BeamInstr erts_trace_break(Process *p, BeamInstr *pc, Eterm *args,
- Uint32 *ret_flags, Eterm *tracer_pid);
-
-int erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local);
-int erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret,
- Eterm *tracer_pid_rte);
-int erts_is_mtrace_bif(BeamInstr *pc, Binary **match_spec_ret,
- Eterm *tracer_pid_ret);
-int erts_is_native_break(BeamInstr *pc);
-int erts_is_count_break(BeamInstr *pc, Uint *count_ret);
-int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *call_time);
-
-void erts_trace_time_call(Process* c_p, BeamInstr* pc, BpDataTime* bdt);
-void erts_trace_time_return(Process* c_p, BeamInstr* pc);
+void erts_clear_export_break(Module *modp, ErtsCodeInfo* ci);
+
+BeamInstr erts_generic_breakpoint(Process* c_p, ErtsCodeInfo *ci, Eterm* reg);
+BeamInstr erts_trace_break(Process *p, ErtsCodeInfo *ci, Eterm *args,
+ Uint32 *ret_flags, ErtsTracer *tracer);
+
+int erts_is_trace_break(ErtsCodeInfo *ci, Binary **match_spec_ret, int local);
+int erts_is_mtrace_break(ErtsCodeInfo *ci, Binary **match_spec_ret,
+ ErtsTracer *tracer_ret);
+int erts_is_mtrace_bif(ErtsCodeInfo *ci, Binary **match_spec_ret,
+ ErtsTracer *tracer_ret);
+int erts_is_native_break(ErtsCodeInfo *ci);
+int erts_is_count_break(ErtsCodeInfo *ci, Uint *count_ret);
+int erts_is_time_break(Process *p, ErtsCodeInfo *ci, Eterm *call_time);
+
+void erts_trace_time_call(Process* c_p, ErtsCodeInfo *ci, BpDataTime* bdt);
+void erts_trace_time_return(Process* c_p, ErtsCodeInfo *ci);
void erts_schedule_time_break(Process *p, Uint out);
void erts_set_time_break(BpFunctions *f, enum erts_break_op);
void erts_clear_time_break(BpFunctions *f);
-int erts_is_time_trace_bif(Process *p, BeamInstr *pc, Eterm *call_time);
-void erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op);
-void erts_clear_time_trace_bif(BeamInstr *pc);
-
-BeamInstr *erts_find_local_func(Eterm mfa[3]);
+int erts_is_time_trace_bif(Process *p, ErtsCodeInfo *ci, Eterm *call_time);
+void erts_set_time_trace_bif(ErtsCodeInfo *ci, enum erts_break_op);
+void erts_clear_time_trace_bif(ErtsCodeInfo *ci);
-ERTS_GLB_INLINE Uint erts_bp_sched2ix(void);
+ErtsCodeInfo *erts_find_local_func(ErtsCodeMFA *mfa);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-ERTS_GLB_INLINE Uint erts_bp_sched2ix(void)
-{
-#ifdef ERTS_SMP
- ErtsSchedulerData *esdp;
- esdp = erts_get_scheduler_data();
- return esdp->no - 1;
-#else
- return 0;
-#endif
-}
-extern erts_smp_atomic32_t erts_active_bp_index;
-extern erts_smp_atomic32_t erts_staging_bp_index;
+extern erts_atomic32_t erts_active_bp_index;
+extern erts_atomic32_t erts_staging_bp_index;
ERTS_GLB_INLINE ErtsBpIndex erts_active_bp_ix(void)
{
- return erts_smp_atomic32_read_nob(&erts_active_bp_index);
+ return erts_atomic32_read_nob(&erts_active_bp_index);
}
ERTS_GLB_INLINE ErtsBpIndex erts_staging_bp_ix(void)
{
- return erts_smp_atomic32_read_nob(&erts_staging_bp_index);
+ return erts_atomic32_read_nob(&erts_staging_bp_index);
}
#endif
diff --git a/erts/emulator/beam/beam_catches.c b/erts/emulator/beam/beam_catches.c
index d374d0469e..cd592c7e5e 100644
--- a/erts/emulator/beam/beam_catches.c
+++ b/erts/emulator/beam/beam_catches.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -142,7 +143,7 @@ BeamInstr *beam_catches_car(unsigned i)
struct bc_pool* p = &bccix[erts_active_code_ix()];
if (i >= p->tabsize ) {
- erl_exit(1, "beam_catches_delmod: index %#x is out of range\r\n", i);
+ erts_exit(ERTS_ERROR_EXIT, "beam_catches_delmod: index %#x is out of range\r\n", i);
}
return p->beam_catches[i].cp;
}
@@ -156,10 +157,10 @@ void beam_catches_delmod(unsigned head, BeamInstr *code, unsigned code_bytes,
ASSERT((code_ix == erts_active_code_ix()) != bccix[erts_staging_code_ix()].is_staging);
for(i = head; i != (unsigned)-1;) {
if (i >= p->tabsize) {
- erl_exit(1, "beam_catches_delmod: index %#x is out of range\r\n", i);
+ erts_exit(ERTS_ERROR_EXIT, "beam_catches_delmod: index %#x is out of range\r\n", i);
}
if( (char*)p->beam_catches[i].cp - (char*)code >= code_bytes ) {
- erl_exit(1,
+ erts_exit(ERTS_ERROR_EXIT,
"beam_catches_delmod: item %#x has cp %p which is not "
"in module's range [%p,%p[\r\n",
i, p->beam_catches[i].cp, code, ((char*)code + code_bytes));
diff --git a/erts/emulator/beam/beam_catches.h b/erts/emulator/beam/beam_catches.h
index 51ef463b2f..8eb2165ac9 100644
--- a/erts/emulator/beam/beam_catches.h
+++ b/erts/emulator/beam/beam_catches.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c
index a3cd08834f..509aa2a84f 100644
--- a/erts/emulator/beam/beam_debug.c
+++ b/erts/emulator/beam/beam_debug.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -38,6 +39,7 @@
#include "beam_bp.h"
#include "erl_binary.h"
#include "erl_thr_progress.h"
+#include "erl_nfunc_sched.h"
#ifdef ARCH_64
# define HEXF "%016bpX"
@@ -49,7 +51,10 @@
void dbg_bt(Process* p, Eterm* sp);
void dbg_where(BeamInstr* addr, Eterm x0, Eterm* reg);
-static int print_op(int to, void *to_arg, int op, int size, BeamInstr* addr);
+static int print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr);
+static void print_bif_name(fmtfn_t to, void* to_arg, BifFunction bif);
+static BeamInstr* f_to_addr(BeamInstr* base, int op, BeamInstr* ap);
+static BeamInstr* f_to_addr_packed(BeamInstr* base, int op, Sint32* ap);
BIF_RETTYPE
erts_debug_same_2(BIF_ALIST_2)
@@ -72,21 +77,55 @@ erts_debug_flat_size_1(BIF_ALIST_1)
}
}
+BIF_RETTYPE
+erts_debug_size_shared_1(BIF_ALIST_1)
+{
+ Process* p = BIF_P;
+ Eterm term = BIF_ARG_1;
+ Uint size = size_shared(term);
+
+ if (IS_USMALL(0, size)) {
+ BIF_RET(make_small(size));
+ } else {
+ Eterm* hp = HAlloc(p, BIG_UINT_HEAP_SIZE);
+ BIF_RET(uint_to_big(size, hp));
+ }
+}
+
+BIF_RETTYPE
+erts_debug_copy_shared_1(BIF_ALIST_1)
+{
+ Process* p = BIF_P;
+ Eterm term = BIF_ARG_1;
+ Uint size;
+ Eterm* hp;
+ Eterm copy;
+ erts_shcopy_t info;
+ INITIALIZE_SHCOPY(info);
+
+ size = copy_shared_calculate(term, &info);
+ if (size > 0) {
+ hp = HAlloc(p, size);
+ }
+ copy = copy_shared_perform(term, size, &info, &hp, &p->off_heap);
+ DESTROY_SHCOPY(info);
+ BIF_RET(copy);
+}
BIF_RETTYPE
erts_debug_breakpoint_2(BIF_ALIST_2)
{
Process* p = BIF_P;
Eterm MFA = BIF_ARG_1;
- Eterm bool = BIF_ARG_2;
+ Eterm boolean = BIF_ARG_2;
Eterm* tp;
- Eterm mfa[3];
+ ErtsCodeMFA mfa;
int i;
int specified = 0;
Eterm res;
BpFunctions f;
- if (bool != am_true && bool != am_false)
+ if (boolean != am_true && boolean != am_false)
goto error;
if (is_not_tuple(MFA)) {
@@ -96,34 +135,35 @@ erts_debug_breakpoint_2(BIF_ALIST_2)
if (*tp != make_arityval(3)) {
goto error;
}
- mfa[0] = tp[1];
- mfa[1] = tp[2];
- mfa[2] = tp[3];
- if (!is_atom(mfa[0]) || !is_atom(mfa[1]) ||
- (!is_small(mfa[2]) && mfa[2] != am_Underscore)) {
+ if (!is_atom(tp[1]) || !is_atom(tp[2]) ||
+ (!is_small(tp[3]) && tp[3] != am_Underscore)) {
goto error;
}
- for (i = 0; i < 3 && mfa[i] != am_Underscore; i++, specified++) {
+ for (i = 0; i < 3 && tp[i+1] != am_Underscore; i++, specified++) {
/* Empty loop body */
}
for (i = specified; i < 3; i++) {
- if (mfa[i] != am_Underscore) {
+ if (tp[i+1] != am_Underscore) {
goto error;
}
}
- if (is_small(mfa[2])) {
- mfa[2] = signed_val(mfa[2]);
+
+ mfa.module = tp[1];
+ mfa.function = tp[2];
+
+ if (is_small(tp[3])) {
+ mfa.arity = signed_val(tp[3]);
}
if (!erts_try_seize_code_write_permission(BIF_P)) {
ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_breakpoint_2],
BIF_P, BIF_ARG_1, BIF_ARG_2);
}
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
- erts_bp_match_functions(&f, mfa, specified);
- if (bool == am_true) {
+ erts_bp_match_functions(&f, &mfa, specified);
+ if (boolean == am_true) {
erts_set_debug_break(&f);
erts_install_breakpoints(&f);
erts_commit_staged_bp();
@@ -136,8 +176,8 @@ erts_debug_breakpoint_2(BIF_ALIST_2)
res = make_small(f.matched);
erts_bp_free_matched_functions(&f);
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
erts_release_code_write_permission();
return res;
@@ -159,9 +199,9 @@ void debug_dump_code(BeamInstr *I, int num)
erts_print(ERTS_PRINT_DSBUF, (void *) dsbufp, HEXF ": ", code_ptr);
instr = (BeamInstr) code_ptr[0];
for (i = 0; i < NUM_SPECIFIC_OPS; i++) {
- if (instr == (BeamInstr) BeamOp(i) && opc[i].name[0] != '\0') {
+ if (BeamIsOpCode(instr, i) && opc[i].name[0] != '\0') {
code_ptr += print_op(ERTS_PRINT_DSBUF, (void *) dsbufp,
- i, opc[i].sz-1, code_ptr+1) + 1;
+ i, opc[i].sz-1, code_ptr) + 1;
break;
}
}
@@ -206,9 +246,9 @@ erts_debug_disassemble_1(BIF_ALIST_1)
Eterm* tp;
Eterm bin;
Eterm mfa;
- BeamInstr* funcinfo = NULL; /* Initialized to eliminate warning. */
- BeamInstr* code_base;
- BeamInstr* code_ptr = NULL; /* Initialized to eliminate warning. */
+ ErtsCodeMFA *cmfa = NULL;
+ BeamCodeHeader* code_hdr;
+ BeamInstr *code_ptr;
BeamInstr instr;
BeamInstr uaddr;
Uint hsz;
@@ -216,7 +256,7 @@ erts_debug_disassemble_1(BIF_ALIST_1)
if (term_to_UWord(addr, &uaddr)) {
code_ptr = (BeamInstr *) uaddr;
- if ((funcinfo = find_function_from_pc(code_ptr)) == NULL) {
+ if ((cmfa = find_function_from_pc(code_ptr)) == NULL) {
BIF_RET(am_false);
}
} else if (is_tuple(addr)) {
@@ -247,24 +287,22 @@ erts_debug_disassemble_1(BIF_ALIST_1)
* such as erts_debug:apply/4. Then search for it in the module.
*/
if ((ep = erts_find_function(mod, name, arity, code_ix)) != NULL) {
- /* XXX: add "&& ep->address != ep->code+3" condition?
+ /* XXX: add "&& ep->address != ep->code" condition?
* Consider a traced function.
- * Its ep will have ep->address == ep->code+3.
+ * Its ep will have ep->address == ep->code.
* erts_find_function() will return the non-NULL ep.
* Below we'll try to derive a code_ptr from ep->address.
* But this code_ptr will point to the start of the Export,
* not the function's func_info instruction. BOOM !?
*/
- code_ptr = ((BeamInstr *) ep->addressv[code_ix]) - 5;
- funcinfo = code_ptr+2;
- } else if (modp == NULL || (code_base = modp->curr.code) == NULL) {
+ cmfa = erts_code_to_codemfa(ep->addressv[code_ix]);
+ } else if (modp == NULL || (code_hdr = modp->curr.code_hdr) == NULL) {
BIF_RET(am_undef);
} else {
- n = code_base[MI_NUM_FUNCTIONS];
+ n = code_hdr->num_functions;
for (i = 0; i < n; i++) {
- code_ptr = (BeamInstr *) code_base[MI_FUNCTIONS+i];
- if (code_ptr[3] == name && code_ptr[4] == arity) {
- funcinfo = code_ptr+2;
+ cmfa = &code_hdr->functions[i]->mfa;
+ if (cmfa->function == name && cmfa->arity == arity) {
break;
}
}
@@ -272,6 +310,7 @@ erts_debug_disassemble_1(BIF_ALIST_1)
BIF_RET(am_undef);
}
}
+ code_ptr = (BeamInstr*)erts_code_to_codeinfo(erts_codemfa_to_code(cmfa));
} else {
goto error;
}
@@ -280,9 +319,9 @@ erts_debug_disassemble_1(BIF_ALIST_1)
erts_print(ERTS_PRINT_DSBUF, (void *) dsbufp, HEXF ": ", code_ptr);
instr = (BeamInstr) code_ptr[0];
for (i = 0; i < NUM_SPECIFIC_OPS; i++) {
- if (instr == (BeamInstr) BeamOp(i) && opc[i].name[0] != '\0') {
+ if (BeamIsOpCode(instr, i) && opc[i].name[0] != '\0') {
code_ptr += print_op(ERTS_PRINT_DSBUF, (void *) dsbufp,
- i, opc[i].sz-1, code_ptr+1) + 1;
+ i, opc[i].sz-1, code_ptr) + 1;
break;
}
}
@@ -297,9 +336,10 @@ erts_debug_disassemble_1(BIF_ALIST_1)
(void) erts_bld_uword(NULL, &hsz, (BeamInstr) code_ptr);
hp = HAlloc(p, hsz);
addr = erts_bld_uword(&hp, NULL, (BeamInstr) code_ptr);
- ASSERT(is_atom(funcinfo[0]));
- ASSERT(is_atom(funcinfo[1]));
- mfa = TUPLE3(hp, (Eterm) funcinfo[0], (Eterm) funcinfo[1], make_small((Eterm) funcinfo[2]));
+ ASSERT(is_atom(cmfa->module) || is_nil(cmfa->module));
+ ASSERT(is_atom(cmfa->function) || is_nil(cmfa->function));
+ mfa = TUPLE3(hp, cmfa->module, cmfa->function,
+ make_small(cmfa->arity));
hp += 4;
return TUPLE3(hp, addr, bin, mfa);
}
@@ -311,11 +351,12 @@ dbg_bt(Process* p, Eterm* sp)
while (sp < stack) {
if (is_CP(*sp)) {
- BeamInstr* addr = find_function_from_pc(cp_val(*sp));
- if (addr)
+ ErtsCodeMFA* cmfa = find_function_from_pc(cp_val(*sp));
+ if (cmfa)
erts_fprintf(stderr,
HEXF ": %T:%T/%bpu\n",
- addr, (Eterm) addr[0], (Eterm) addr[1], addr[2]);
+ &cmfa->module, cmfa->module,
+ cmfa->function, cmfa->arity);
}
sp++;
}
@@ -324,17 +365,17 @@ dbg_bt(Process* p, Eterm* sp)
void
dbg_where(BeamInstr* addr, Eterm x0, Eterm* reg)
{
- BeamInstr* f = find_function_from_pc(addr);
+ ErtsCodeMFA* cmfa = find_function_from_pc(addr);
- if (f == NULL) {
+ if (cmfa == NULL) {
erts_fprintf(stderr, "???\n");
} else {
int arity;
int i;
- addr = f;
- arity = addr[2];
- erts_fprintf(stderr, HEXF ": %T:%T(", addr, (Eterm) addr[0], (Eterm) addr[1]);
+ arity = cmfa->arity;
+ erts_fprintf(stderr, HEXF ": %T:%T(", addr,
+ cmfa->module, cmfa->function);
for (i = 0; i < arity; i++)
erts_fprintf(stderr, i ? ", %T" : "%T", i ? reg[i] : x0);
erts_fprintf(stderr, ")\n");
@@ -342,7 +383,7 @@ dbg_where(BeamInstr* addr, Eterm x0, Eterm* reg)
}
static int
-print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
+print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr)
{
int i;
BeamInstr tag;
@@ -364,8 +405,14 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
* Avoid copying because instructions containing bignum operands
* are bigger than actually declared.
*/
- ap = (BeamInstr *) addr;
+ addr++;
+ ap = addr;
} else {
+#if defined(ARCH_64) && defined(CODE_MODEL_SMALL)
+ BeamInstr instr_word = addr[0];
+#endif
+ addr++;
+
/*
* Copy all arguments to a local buffer for the unpacking.
*/
@@ -385,26 +432,27 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
while (start_prog < prog) {
prog--;
switch (*prog) {
+ case 'f':
case 'g':
+ case 'q':
*ap++ = *--sp;
break;
- case 'i': /* Initialize packing accumulator. */
- *ap++ = packed;
- break;
- case 's':
- *ap++ = packed & 0x3ff;
- packed >>= 10;
+#ifdef ARCH_64
+ case '1': /* Tightest shift */
+ *ap++ = (packed & BEAM_TIGHTEST_MASK) << 3;
+ packed >>= BEAM_TIGHTEST_SHIFT;
break;
- case '0': /* Tight shift */
- *ap++ = packed & (BEAM_TIGHT_MASK / sizeof(Eterm));
+#endif
+ case '2': /* Tight shift */
+ *ap++ = packed & BEAM_TIGHT_MASK;
packed >>= BEAM_TIGHT_SHIFT;
break;
- case '6': /* Shift 16 steps */
+ case '3': /* Loose shift */
*ap++ = packed & BEAM_LOOSE_MASK;
packed >>= BEAM_LOOSE_SHIFT;
break;
#ifdef ARCH_64
- case 'w': /* Shift 32 steps */
+ case '4': /* Shift 32 steps */
*ap++ = packed & BEAM_WIDE_MASK;
packed >>= BEAM_WIDE_SHIFT;
break;
@@ -415,8 +463,18 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
case 'P':
packed = *--sp;
break;
+#if defined(ARCH_64) && defined(CODE_MODEL_SMALL)
+ case '#': /* -1 */
+ case '$': /* -2 */
+ case '%': /* -3 */
+ case '&': /* -4 */
+ case '\'': /* -5 */
+ case '(': /* -6 */
+ packed = (packed << BEAM_WIDE_SHIFT) | BeamExtraData(instr_word);
+ break;
+#endif
default:
- ASSERT(0);
+ erts_exit(ERTS_ERROR_EXIT, "beam_debug: invalid packing op: %c\n", *prog);
}
}
ap = args;
@@ -431,39 +489,41 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
while (*sign) {
switch (*sign) {
case 'r': /* x(0) */
- erts_print(to, to_arg, "x(0)");
+ erts_print(to, to_arg, "r(0)");
break;
case 'x': /* x(N) */
- if (reg_index(ap[0]) == 0) {
- erts_print(to, to_arg, "x[0]");
- } else {
- erts_print(to, to_arg, "x(%d)", reg_index(ap[0]));
+ {
+ Uint n = ap[0] / sizeof(Eterm);
+ erts_print(to, to_arg, "x(%d)", n);
+ ap++;
}
- ap++;
break;
case 'y': /* y(N) */
- erts_print(to, to_arg, "y(%d)", reg_index(ap[0]) - CP_SIZE);
- ap++;
+ {
+ Uint n = ap[0] / sizeof(Eterm) - CP_SIZE;
+ erts_print(to, to_arg, "y(%d)", n);
+ ap++;
+ }
break;
case 'n': /* Nil */
erts_print(to, to_arg, "[]");
break;
- case 's': /* Any source (tagged constant or register) */
- tag = beam_reg_tag(*ap);
- if (tag == X_REG_DEF) {
- if (reg_index(*ap) == 0) {
- erts_print(to, to_arg, "x[0]");
- } else {
- erts_print(to, to_arg, "x(%d)", reg_index(*ap));
- }
+ case 'S': /* Register */
+ {
+ Uint reg_type = (*ap & 1) ? 'y' : 'x';
+ Uint n = ap[0] / sizeof(Eterm);
+ erts_print(to, to_arg, "%c(%d)", reg_type, n);
ap++;
- break;
- } else if (tag == Y_REG_DEF) {
- erts_print(to, to_arg, "y(%d)", reg_index(*ap) - CP_SIZE);
+ break;
+ }
+ case 's': /* Any source (tagged constant or register) */
+ tag = loader_tag(*ap);
+ if (tag == LOADER_X_REG) {
+ erts_print(to, to_arg, "x(%d)", loader_x_reg_index(*ap));
ap++;
break;
- } else if (tag == R_REG_DEF) {
- erts_print(to, to_arg, "x(0)");
+ } else if (tag == LOADER_Y_REG) {
+ erts_print(to, to_arg, "y(%d)", loader_y_reg_index(*ap) - CP_SIZE);
ap++;
break;
}
@@ -480,78 +540,84 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
ap++;
break;
case 'd': /* Destination (x(0), x(N), y(N)) */
- switch (beam_reg_tag(*ap)) {
- case X_REG_DEF:
- if (reg_index(*ap) == 0) {
- erts_print(to, to_arg, "x[0]");
- } else {
- erts_print(to, to_arg, "x(%d)", reg_index(*ap));
- }
- break;
- case Y_REG_DEF:
- erts_print(to, to_arg, "y(%d)", reg_index(*ap) - CP_SIZE);
- break;
- case R_REG_DEF:
- erts_print(to, to_arg, "x(0)");
- break;
+ if (*ap & 1) {
+ erts_print(to, to_arg, "y(%d)",
+ *ap / sizeof(Eterm) - CP_SIZE);
+ } else {
+ erts_print(to, to_arg, "x(%d)",
+ *ap / sizeof(Eterm));
}
ap++;
break;
- case 'I': /* Untagged integer. */
- case 't':
- erts_print(to, to_arg, "%d", *ap);
+ case 't': /* Untagged integers */
+ case 'I':
+ case 'W':
+ switch (op) {
+ case op_i_gc_bif1_jWstd:
+ case op_i_gc_bif2_jWtssd:
+ case op_i_gc_bif3_jWtssd:
+ {
+ const ErtsGcBif* p;
+ BifFunction gcf = (BifFunction) *ap;
+ for (p = erts_gc_bifs; p->bif != 0; p++) {
+ if (p->gc_bif == gcf) {
+ print_bif_name(to, to_arg, p->bif);
+ break;
+ }
+ }
+ if (p->bif == 0) {
+ erts_print(to, to_arg, "%d", (Uint)gcf);
+ }
+ break;
+ }
+ default:
+ erts_print(to, to_arg, "%d", *ap);
+ }
ap++;
break;
case 'f': /* Destination label */
{
- BeamInstr* f = find_function_from_pc((BeamInstr *)*ap);
- if (f+3 != (BeamInstr *) *ap) {
- erts_print(to, to_arg, "f(" HEXF ")", *ap);
+ BeamInstr* target = f_to_addr(addr, op, ap);
+ ErtsCodeMFA* cmfa = find_function_from_pc(target);
+ if (!cmfa || erts_codemfa_to_code(cmfa) != target) {
+ erts_print(to, to_arg, "f(" HEXF ")", target);
} else {
- erts_print(to, to_arg, "%T:%T/%bpu", (Eterm) f[0], (Eterm) f[1], f[2]);
+ erts_print(to, to_arg, "%T:%T/%bpu", cmfa->module,
+ cmfa->function, cmfa->arity);
}
ap++;
}
break;
case 'p': /* Pointer (to label) */
{
- BeamInstr* f = find_function_from_pc((BeamInstr *)*ap);
- if (f+3 != (BeamInstr *) *ap) {
- erts_print(to, to_arg, "p(" HEXF ")", *ap);
- } else {
- erts_print(to, to_arg, "%T:%T/%bpu", (Eterm) f[0], (Eterm) f[1], f[2]);
- }
+ BeamInstr* target = f_to_addr(addr, op, ap);
+ erts_print(to, to_arg, "p(" HEXF ")", target);
ap++;
}
break;
case 'j': /* Pointer (to label) */
- erts_print(to, to_arg, "j(" HEXF ")", *ap);
+ if (*ap == 0) {
+ erts_print(to, to_arg, "j(0)");
+ } else {
+ BeamInstr* target = f_to_addr(addr, op, ap);
+ erts_print(to, to_arg, "j(" HEXF ")", target);
+ }
ap++;
break;
case 'e': /* Export entry */
{
Export* ex = (Export *) *ap;
erts_print(to, to_arg,
- "%T:%T/%bpu", (Eterm) ex->code[0], (Eterm) ex->code[1], ex->code[2]);
+ "%T:%T/%bpu", (Eterm) ex->info.mfa.module,
+ (Eterm) ex->info.mfa.function,
+ ex->info.mfa.arity);
ap++;
}
break;
case 'F': /* Function definition */
break;
case 'b':
- for (i = 0; i < BIF_SIZE; i++) {
- BifFunction bif = (BifFunction) *ap;
- if (bif == bif_table[i].f) {
- break;
- }
- }
- if (i == BIF_SIZE) {
- erts_print(to, to_arg, "b(%d)", (Uint) *ap);
- } else {
- Eterm name = bif_table[i].name;
- unsigned arity = bif_table[i].arity;
- erts_print(to, to_arg, "%T/%u", name, arity);
- }
+ print_bif_name(to, to_arg, (BifFunction) *ap);
ap++;
break;
case 'P': /* Byte offset into tuple (see beam_load.c) */
@@ -560,7 +626,7 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
ap++;
break;
case 'l': /* fr(N) */
- erts_print(to, to_arg, "fr(%d)", reg_index(ap[0]));
+ erts_print(to, to_arg, "fr(%d)", loader_reg_index(ap[0]));
ap++;
break;
default:
@@ -578,84 +644,169 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
unpacked = ap;
ap = addr + size;
+
+ /*
+ * In the code below, never use ap[-1], ap[-2], ...
+ * (will not work if the arguments have been packed).
+ *
+ * Instead use unpacked[-1], unpacked[-2], ...
+ */
switch (op) {
- case op_i_select_val_rfI:
- case op_i_select_val_xfI:
- case op_i_select_val_yfI:
+ case op_i_select_val_lins_xfI:
+ case op_i_select_val_lins_yfI:
+ case op_i_select_val_bins_xfI:
+ case op_i_select_val_bins_yfI:
{
- int n = ap[-1];
+ int n = unpacked[-1];
+ int ix = n;
+ Sint32* jump_tab = (Sint32 *)(ap + n);
- while (n > 0) {
- erts_print(to, to_arg, "%T f(" HEXF ") ", (Eterm) ap[0], ap[1]);
- ap += 2;
- size += 2;
- n--;
+ while (ix--) {
+ erts_print(to, to_arg, "%T ", (Eterm) ap[0]);
+ ap++;
+ size++;
}
+ ix = n;
+ while (ix--) {
+ BeamInstr* target = f_to_addr_packed(addr, op, jump_tab);
+ erts_print(to, to_arg, "f(" HEXF ") ", target);
+ jump_tab++;
+ }
+ size += (n+1) / 2;
}
break;
- case op_i_select_tuple_arity_rfI:
case op_i_select_tuple_arity_xfI:
case op_i_select_tuple_arity_yfI:
- {
- int n = ap[-1];
+ {
+ int n = unpacked[-1];
+ int ix = n - 1; /* without sentinel */
+ Sint32* jump_tab = (Sint32 *)(ap + n);
- while (n > 0) {
- Uint arity = arityval(ap[0]);
- erts_print(to, to_arg, " {%d} f(" HEXF ")", arity, ap[1]);
- ap += 2;
- size += 2;
- n--;
- }
- }
- break;
- case op_i_jump_on_val_rfII:
- case op_i_jump_on_val_xfII:
- case op_i_jump_on_val_yfII:
+ while (ix--) {
+ Uint arity = arityval(ap[0]);
+ erts_print(to, to_arg, "{%d} ", arity, ap[1]);
+ ap++;
+ size++;
+ }
+ /* print sentinel */
+ erts_print(to, to_arg, "{%T} ", ap[0], ap[1]);
+ ap++;
+ size++;
+ ix = n;
+ while (ix--) {
+ BeamInstr* target = f_to_addr_packed(addr, op, jump_tab);
+ erts_print(to, to_arg, "f(" HEXF ") ", target);
+ jump_tab++;
+ }
+ size += (n+1) / 2;
+ }
+ break;
+ case op_i_select_val2_xfcc:
+ case op_i_select_val2_yfcc:
+ case op_i_select_tuple_arity2_xfAA:
+ case op_i_select_tuple_arity2_yfAA:
+ {
+ Sint32* jump_tab = (Sint32 *) ap;
+ BeamInstr* target;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ target = f_to_addr_packed(addr, op, jump_tab++);
+ erts_print(to, to_arg, "f(" HEXF ") ", target);
+ }
+ size += 1;
+ }
+ break;
+ case op_i_jump_on_val_xfIW:
+ case op_i_jump_on_val_yfIW:
{
- int n;
- for (n = ap[-2]; n > 0; n--) {
- erts_print(to, to_arg, "f(" HEXF ") ", ap[0]);
- ap++;
- size++;
+ int n = unpacked[-2];
+ Sint32* jump_tab = (Sint32 *) ap;
+
+ size += (n+1) / 2;
+ while (n-- > 0) {
+ BeamInstr* target = f_to_addr_packed(addr, op, jump_tab);
+ erts_print(to, to_arg, "f(" HEXF ") ", target);
+ jump_tab++;
}
}
break;
- case op_i_jump_on_val_zero_rfI:
case op_i_jump_on_val_zero_xfI:
case op_i_jump_on_val_zero_yfI:
{
- int n;
- for (n = ap[-1]; n > 0; n--) {
- erts_print(to, to_arg, "f(" HEXF ") ", ap[0]);
- ap++;
- size++;
+ int n = unpacked[-1];
+ Sint32* jump_tab = (Sint32 *) ap;
+
+ size += (n+1) / 2;
+ while (n-- > 0) {
+ BeamInstr* target = f_to_addr_packed(addr, op, jump_tab);
+ erts_print(to, to_arg, "f(" HEXF ") ", target);
+ jump_tab++;
}
}
break;
- case op_i_put_tuple_rI:
case op_i_put_tuple_xI:
case op_i_put_tuple_yI:
- case op_new_map_jdII:
- case op_update_map_assoc_jsdII:
- case op_update_map_exact_jsdII:
- case op_i_has_map_fields_fsI:
- case op_i_get_map_elements_fsI:
+ case op_new_map_dtI:
+ case op_update_map_assoc_sdtI:
+ case op_update_map_exact_jsdtI:
{
int n = unpacked[-1];
while (n > 0) {
- if (!is_header(ap[0])) {
+ switch (loader_tag(ap[0])) {
+ case LOADER_X_REG:
+ erts_print(to, to_arg, " x(%d)", loader_x_reg_index(ap[0]));
+ break;
+ case LOADER_Y_REG:
+ erts_print(to, to_arg, " y(%d)", loader_y_reg_index(ap[0]) - CP_SIZE);
+ break;
+ default:
erts_print(to, to_arg, " %T", (Eterm) ap[0]);
+ break;
+ }
+ ap++, size++, n--;
+ }
+ }
+ break;
+ case op_i_new_small_map_lit_dtq:
+ {
+ Eterm *tp = tuple_val(unpacked[-1]);
+ int n = arityval(*tp);
+
+ while (n > 0) {
+ switch (loader_tag(ap[0])) {
+ case LOADER_X_REG:
+ erts_print(to, to_arg, " x(%d)", loader_x_reg_index(ap[0]));
+ break;
+ case LOADER_Y_REG:
+ erts_print(to, to_arg, " y(%d)", loader_y_reg_index(ap[0]) - CP_SIZE);
+ break;
+ default:
+ erts_print(to, to_arg, " %T", (Eterm) ap[0]);
+ break;
+ }
+ ap++, size++, n--;
+ }
+ }
+ break;
+ case op_i_get_map_elements_fsI:
+ {
+ int n = unpacked[-1];
+
+ while (n > 0) {
+ if (n % 3 == 1) {
+ erts_print(to, to_arg, " %X", ap[0]);
} else {
- switch ((ap[0] >> 2) & 0x03) {
- case R_REG_DEF:
- erts_print(to, to_arg, " x(0)");
+ switch (loader_tag(ap[0])) {
+ case LOADER_X_REG:
+ erts_print(to, to_arg, " x(%d)", loader_x_reg_index(ap[0]));
break;
- case X_REG_DEF:
- erts_print(to, to_arg, " x(%d)", ap[0] >> 4);
+ case LOADER_Y_REG:
+ erts_print(to, to_arg, " y(%d)", loader_y_reg_index(ap[0]) - CP_SIZE);
break;
- case Y_REG_DEF:
- erts_print(to, to_arg, " y(%d)", ap[0] >> 4);
+ default:
+ erts_print(to, to_arg, " %T", (Eterm) ap[0]);
break;
}
}
@@ -668,3 +819,422 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
return size;
}
+
+static void print_bif_name(fmtfn_t to, void* to_arg, BifFunction bif)
+{
+ int i;
+
+ for (i = 0; i < BIF_SIZE; i++) {
+ if (bif == bif_table[i].f) {
+ break;
+ }
+ }
+ if (i == BIF_SIZE) {
+ erts_print(to, to_arg, "b(%d)", (Uint) bif);
+ } else {
+ Eterm name = bif_table[i].name;
+ unsigned arity = bif_table[i].arity;
+ erts_print(to, to_arg, "%T/%u", name, arity);
+ }
+}
+
+static BeamInstr* f_to_addr(BeamInstr* base, int op, BeamInstr* ap)
+{
+ return base - 1 + opc[op].adjust + (Sint32) *ap;
+}
+
+static BeamInstr* f_to_addr_packed(BeamInstr* base, int op, Sint32* ap)
+{
+ return base - 1 + opc[op].adjust + *ap;
+}
+
+
+/*
+ * Dirty BIF testing.
+ *
+ * The erts_debug:dirty_cpu/2, erts_debug:dirty_io/1, and
+ * erts_debug:dirty/3 BIFs are used by the dirty_bif_SUITE
+ * test suite.
+ */
+
+static int ms_wait(Process *c_p, Eterm etimeout, int busy);
+static int dirty_send_message(Process *c_p, Eterm to, Eterm tag);
+static BIF_RETTYPE dirty_test(Process *c_p, Eterm type, Eterm arg1, Eterm arg2, UWord *I);
+
+/*
+ * erts_debug:dirty_cpu/2 is statically determined to execute on
+ * a dirty CPU scheduler (see erts_dirty_bif.tab).
+ */
+BIF_RETTYPE
+erts_debug_dirty_cpu_2(BIF_ALIST_2)
+{
+ return dirty_test(BIF_P, am_dirty_cpu, BIF_ARG_1, BIF_ARG_2, BIF_I);
+}
+
+/*
+ * erts_debug:dirty_io/2 is statically determined to execute on
+ * a dirty I/O scheduler (see erts_dirty_bif.tab).
+ */
+BIF_RETTYPE
+erts_debug_dirty_io_2(BIF_ALIST_2)
+{
+ return dirty_test(BIF_P, am_dirty_io, BIF_ARG_1, BIF_ARG_2, BIF_I);
+}
+
+/*
+ * erts_debug:dirty/3 executes on a normal scheduler.
+ */
+BIF_RETTYPE
+erts_debug_dirty_3(BIF_ALIST_3)
+{
+ Eterm argv[2];
+ switch (BIF_ARG_1) {
+ case am_normal:
+ return dirty_test(BIF_P, am_normal, BIF_ARG_2, BIF_ARG_3, BIF_I);
+ case am_dirty_cpu:
+ argv[0] = BIF_ARG_2;
+ argv[1] = BIF_ARG_3;
+ return erts_schedule_bif(BIF_P,
+ argv,
+ BIF_I,
+ erts_debug_dirty_cpu_2,
+ ERTS_SCHED_DIRTY_CPU,
+ am_erts_debug,
+ am_dirty_cpu,
+ 2);
+ case am_dirty_io:
+ argv[0] = BIF_ARG_2;
+ argv[1] = BIF_ARG_3;
+ return erts_schedule_bif(BIF_P,
+ argv,
+ BIF_I,
+ erts_debug_dirty_io_2,
+ ERTS_SCHED_DIRTY_IO,
+ am_erts_debug,
+ am_dirty_io,
+ 2);
+ default:
+ BIF_ERROR(BIF_P, EXC_BADARG);
+ }
+}
+
+
+static BIF_RETTYPE
+dirty_test(Process *c_p, Eterm type, Eterm arg1, Eterm arg2, UWord *I)
+{
+ BIF_RETTYPE ret;
+ if (am_scheduler == arg1) {
+ ErtsSchedulerData *esdp;
+ if (arg2 != am_type)
+ goto badarg;
+ esdp = erts_proc_sched_data(c_p);
+ if (!esdp)
+ goto scheduler_type_error;
+
+ switch (esdp->type) {
+ case ERTS_SCHED_NORMAL:
+ ERTS_BIF_PREP_RET(ret, am_normal);
+ break;
+ case ERTS_SCHED_DIRTY_CPU:
+ ERTS_BIF_PREP_RET(ret, am_dirty_cpu);
+ break;
+ case ERTS_SCHED_DIRTY_IO:
+ ERTS_BIF_PREP_RET(ret, am_dirty_io);
+ break;
+ default:
+ scheduler_type_error:
+ ERTS_BIF_PREP_RET(ret, am_error);
+ break;
+ }
+ }
+ else if (am_error == arg1) {
+ switch (arg2) {
+ case am_notsup:
+ ERTS_BIF_PREP_ERROR(ret, c_p, EXC_NOTSUP);
+ break;
+ case am_undef:
+ ERTS_BIF_PREP_ERROR(ret, c_p, EXC_UNDEF);
+ break;
+ case am_badarith:
+ ERTS_BIF_PREP_ERROR(ret, c_p, EXC_BADARITH);
+ break;
+ case am_noproc:
+ ERTS_BIF_PREP_ERROR(ret, c_p, EXC_NOPROC);
+ break;
+ case am_system_limit:
+ ERTS_BIF_PREP_ERROR(ret, c_p, SYSTEM_LIMIT);
+ break;
+ case am_badarg:
+ default:
+ goto badarg;
+ }
+ }
+ else if (am_copy == arg1) {
+ int i;
+ Eterm res;
+
+ for (res = NIL, i = 0; i < 1000; i++) {
+ Eterm *hp, sz;
+ Eterm cpy;
+ /* We do not want this to be optimized,
+ but rather the oposite... */
+ sz = size_object(arg2);
+ hp = HAlloc(c_p, sz);
+ cpy = copy_struct(arg2, sz, &hp, &c_p->off_heap);
+ hp = HAlloc(c_p, 2);
+ res = CONS(hp, cpy, res);
+ }
+
+ ERTS_BIF_PREP_RET(ret, res);
+ }
+ else if (am_send == arg1) {
+ dirty_send_message(c_p, arg2, am_ok);
+ ERTS_BIF_PREP_RET(ret, am_ok);
+ }
+ else if (ERTS_IS_ATOM_STR("wait", arg1)) {
+ if (!ms_wait(c_p, arg2, type == am_dirty_cpu))
+ goto badarg;
+ ERTS_BIF_PREP_RET(ret, am_ok);
+ }
+ else if (ERTS_IS_ATOM_STR("reschedule", arg1)) {
+ /*
+ * Reschedule operation after decrement of two until we reach
+ * zero. Switch between dirty scheduler types when 'n' is
+ * evenly divided by 4. If the initial value wasn't evenly
+ * dividable by 2, throw badarg exception.
+ */
+ Eterm next_type;
+ Sint n;
+ if (!term_to_Sint(arg2, &n) || n < 0)
+ goto badarg;
+ if (n == 0)
+ ERTS_BIF_PREP_RET(ret, am_ok);
+ else {
+ Eterm argv[3];
+ Eterm eint = erts_make_integer((Uint) (n - 2), c_p);
+ if (n % 4 != 0)
+ next_type = type;
+ else {
+ switch (type) {
+ case am_dirty_cpu: next_type = am_dirty_io; break;
+ case am_dirty_io: next_type = am_normal; break;
+ case am_normal: next_type = am_dirty_cpu; break;
+ default: goto badarg;
+ }
+ }
+ switch (next_type) {
+ case am_dirty_io:
+ argv[0] = arg1;
+ argv[1] = eint;
+ ret = erts_schedule_bif(c_p,
+ argv,
+ I,
+ erts_debug_dirty_io_2,
+ ERTS_SCHED_DIRTY_IO,
+ am_erts_debug,
+ am_dirty_io,
+ 2);
+ break;
+ case am_dirty_cpu:
+ argv[0] = arg1;
+ argv[1] = eint;
+ ret = erts_schedule_bif(c_p,
+ argv,
+ I,
+ erts_debug_dirty_cpu_2,
+ ERTS_SCHED_DIRTY_CPU,
+ am_erts_debug,
+ am_dirty_cpu,
+ 2);
+ break;
+ case am_normal:
+ argv[0] = am_normal;
+ argv[1] = arg1;
+ argv[2] = eint;
+ ret = erts_schedule_bif(c_p,
+ argv,
+ I,
+ erts_debug_dirty_3,
+ ERTS_SCHED_NORMAL,
+ am_erts_debug,
+ am_dirty,
+ 3);
+ break;
+ default:
+ goto badarg;
+ }
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("ready_wait6_done", arg1)) {
+ ERTS_DECL_AM(ready);
+ ERTS_DECL_AM(done);
+ dirty_send_message(c_p, arg2, AM_ready);
+ ms_wait(c_p, make_small(6000), 0);
+ dirty_send_message(c_p, arg2, AM_done);
+ ERTS_BIF_PREP_RET(ret, am_ok);
+ }
+ else if (ERTS_IS_ATOM_STR("alive_waitexiting", arg1)) {
+ Process *real_c_p = erts_proc_shadow2real(c_p);
+ Eterm *hp, *hp2;
+ Uint sz;
+ int i;
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
+ int dirty_io = esdp->type == ERTS_SCHED_DIRTY_IO;
+
+ if (ERTS_PROC_IS_EXITING(real_c_p))
+ goto badarg;
+ dirty_send_message(c_p, arg2, am_alive);
+
+ /* Wait until dead */
+ while (!ERTS_PROC_IS_EXITING(real_c_p)) {
+ if (dirty_io)
+ ms_wait(c_p, make_small(100), 0);
+ else
+ erts_thr_yield();
+ }
+
+ ms_wait(c_p, make_small(1000), 0);
+
+ /* Should still be able to allocate memory */
+ hp = HAlloc(c_p, 3); /* Likely on heap */
+ sz = 10000;
+ hp2 = HAlloc(c_p, sz); /* Likely in heap fragment */
+ *hp2 = make_pos_bignum_header(sz);
+ for (i = 1; i < sz; i++)
+ hp2[i] = (Eterm) 4711;
+ ERTS_BIF_PREP_RET(ret, TUPLE2(hp, am_ok, make_big(hp2)));
+ }
+ else {
+ badarg:
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+ }
+ return ret;
+}
+
+
+static int
+dirty_send_message(Process *c_p, Eterm to, Eterm tag)
+{
+ ErtsProcLocks c_p_locks, rp_locks;
+ Process *rp, *real_c_p;
+ Eterm msg, *hp;
+ ErlOffHeap *ohp;
+ ErtsMessage *mp;
+
+ ASSERT(is_immed(tag));
+
+ real_c_p = erts_proc_shadow2real(c_p);
+ if (real_c_p != c_p)
+ c_p_locks = 0;
+ else
+ c_p_locks = ERTS_PROC_LOCK_MAIN;
+
+ ASSERT(real_c_p->common.id == c_p->common.id);
+
+ rp = erts_pid2proc_opt(real_c_p, c_p_locks,
+ to, 0,
+ ERTS_P2P_FLG_INC_REFC);
+
+ if (!rp)
+ return 0;
+
+ rp_locks = 0;
+ mp = erts_alloc_message_heap(rp, &rp_locks, 3, &hp, &ohp);
+
+ msg = TUPLE2(hp, tag, c_p->common.id);
+ erts_queue_message(rp, rp_locks, mp, msg, c_p->common.id);
+
+ if (rp == real_c_p)
+ rp_locks &= ~c_p_locks;
+ if (rp_locks)
+ erts_proc_unlock(rp, rp_locks);
+
+ erts_proc_dec_refc(rp);
+
+ return 1;
+}
+
+static int
+ms_wait(Process *c_p, Eterm etimeout, int busy)
+{
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
+ ErtsMonotonicTime time, timeout_time;
+ Sint64 ms;
+
+ if (!term_to_Sint64(etimeout, &ms))
+ return 0;
+
+ time = erts_get_monotonic_time(esdp);
+
+ if (ms < 0)
+ timeout_time = time;
+ else
+ timeout_time = time + ERTS_MSEC_TO_MONOTONIC(ms);
+
+ while (time < timeout_time) {
+ if (busy)
+ erts_thr_yield();
+ else {
+ ErtsMonotonicTime timeout = timeout_time - time;
+
+#ifdef __WIN32__
+ Sleep((DWORD) ERTS_MONOTONIC_TO_MSEC(timeout));
+#else
+ {
+ ErtsMonotonicTime to = ERTS_MONOTONIC_TO_USEC(timeout);
+ struct timeval tv;
+
+ tv.tv_sec = (long) to / (1000*1000);
+ tv.tv_usec = (long) to % (1000*1000);
+
+ select(0, NULL, NULL, NULL, &tv);
+ }
+#endif
+ }
+
+ time = erts_get_monotonic_time(esdp);
+ }
+ return 1;
+}
+
+
+# define ERTS_STACK_LIMIT ((char *) ethr_get_stacklimit())
+
+/*
+ * The below functions is for testing of the stack
+ * limit functionality. They are intentionally
+ * written body recursive in order to prevent
+ * last call optimization...
+ */
+
+UWord
+erts_check_stack_recursion_downwards(char *start_c)
+{
+ char *limit = ERTS_STACK_LIMIT;
+ char c;
+ UWord res;
+ if (erts_check_below_limit(&c, limit + 1024))
+ return (char *) erts_ptr_id(start_c) - (char *) erts_ptr_id(&c);
+ res = erts_check_stack_recursion_downwards(start_c);
+ erts_ptr_id(&c);
+ return res;
+}
+
+UWord
+erts_check_stack_recursion_upwards(char *start_c)
+{
+ char *limit = ERTS_STACK_LIMIT;
+ char c;
+ UWord res;
+ if (erts_check_above_limit(&c, limit - 1024))
+ return (char *) erts_ptr_id(&c) - (char *) erts_ptr_id(start_c);
+ res = erts_check_stack_recursion_upwards(start_c);
+ erts_ptr_id(&c);
+ return res;
+}
+
+int
+erts_is_above_stack_limit(char *ptr)
+{
+ return (char *) ptr > ERTS_STACK_LIMIT;
+}
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 1026e5f649..60d0008d8f 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2014. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -37,6 +38,7 @@
#include "beam_bp.h"
#include "beam_catches.h"
#include "erl_thr_progress.h"
+#include "erl_nfunc_sched.h"
#ifdef HIPE
#include "hipe_mode_switch.h"
#include "hipe_bif1.h"
@@ -48,42 +50,40 @@
#if defined(NO_JUMP_TABLE)
# define OpCase(OpCode) case op_##OpCode
# define CountCase(OpCode) case op_count_##OpCode
-# define OpCode(OpCode) ((Uint*)op_##OpCode)
-# define Goto(Rel) {Go = (int)(UWord)(Rel); goto emulator_loop;}
-# define LabelAddr(Addr) &&##Addr
+# define IsOpCode(InstrWord, OpCode) (BeamCodeAddr(InstrWord) == (BeamInstr)op_##OpCode)
+# define Goto(Rel) {Go = BeamCodeAddr(Rel); goto emulator_loop;}
+# define GotoPF(Rel) Goto(Rel)
#else
# define OpCase(OpCode) lb_##OpCode
# define CountCase(OpCode) lb_count_##OpCode
-# define Goto(Rel) goto *((void *)Rel)
-# define LabelAddr(Label) &&Label
-# define OpCode(OpCode) (&&lb_##OpCode)
+# define IsOpCode(InstrWord, OpCode) (BeamCodeAddr(InstrWord) == (BeamInstr)&&lb_##OpCode)
+# define Goto(Rel) goto *((void *)BeamCodeAddr(Rel))
+# define GotoPF(Rel) goto *((void *)Rel)
+# define LabelAddr(Label) &&Label
#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
-# ifdef ERTS_SMP
-# define PROCESS_MAIN_CHK_LOCKS(P) \
+# define PROCESS_MAIN_CHK_LOCKS(P) \
+do { \
+ if ((P)) \
+ erts_proc_lc_chk_only_proc_main((P)); \
+ ERTS_LC_ASSERT(!erts_thr_progress_is_blocking()); \
+} while (0)
+# define ERTS_REQ_PROC_MAIN_LOCK(P) \
+do { \
+ if ((P)) \
+ erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN, \
+ __FILE__, __LINE__); \
+} while (0)
+# define ERTS_UNREQ_PROC_MAIN_LOCK(P) \
do { \
- if ((P)) { \
- erts_proc_lc_chk_only_proc_main((P)); \
- } \
- else \
- erts_lc_check_exact(NULL, 0); \
- ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); \
+ if ((P)) \
+ erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN); \
} while (0)
-# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
- if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN,\
- __FILE__, __LINE__)
-# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
- if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
-# else
-# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
-# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
-# define PROCESS_MAIN_CHK_LOCKS(P) erts_lc_check_exact(NULL, 0)
-# endif
#else
# define PROCESS_MAIN_CHK_LOCKS(P)
-# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
-# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
+# define ERTS_REQ_PROC_MAIN_LOCK(P)
+# define ERTS_UNREQ_PROC_MAIN_LOCK(P)
#endif
/*
@@ -98,10 +98,7 @@ do { \
do { \
int i_; \
int Arity_ = PC[-1]; \
- if (Arity_ > 0) { \
- CHECK_TERM(r(0)); \
- } \
- for (i_ = 1; i_ < Arity_; i_++) { \
+ for (i_ = 0; i_ < Arity_; i_++) { \
CHECK_TERM(x(i_)); \
} \
} while (0)
@@ -111,17 +108,18 @@ do { \
# define CHECK_ARGS(T)
#endif
-#ifndef MAX
-#define MAX(x, y) (((x) > (y)) ? (x) : (y))
-#endif
+#define CHECK_ALIGNED(Dst) ASSERT((((Uint)&Dst) & (sizeof(Uint)-1)) == 0)
-#define GET_BIF_ADDRESS(p) ((BifFunction) (((Export *) p)->code[4]))
+#define GET_BIF_MODULE(p) (p->info.mfa.module)
+#define GET_BIF_FUNCTION(p) (p->info.mfa.function)
+#define GET_BIF_ARITY(p) (p->info.mfa.arity)
+#define GET_BIF_ADDRESS(p) ((BifFunction) (p->beam[1]))
#define TermWords(t) (((t) / (sizeof(BeamInstr)/sizeof(Eterm))) + !!((t) % (sizeof(BeamInstr)/sizeof(Eterm))))
/*
* We reuse some of fields in the save area in the process structure.
- * This is safe to do, since this space is only activly used when
+ * This is safe to do, since this space is only actively used when
* the process is switched out.
*/
#define REDS_IN(p) ((p)->def_arg_reg[5])
@@ -135,11 +133,11 @@ do { \
/* We don't check the range if an ordinary switch is used */
#ifdef NO_JUMP_TABLE
-#define VALID_INSTR(IP) ((UWord)(IP) < (NUMBER_OF_OPCODES*2+10))
+# define VALID_INSTR(IP) (BeamCodeAddr(IP) < (NUMBER_OF_OPCODES*2+10))
#else
-#define VALID_INSTR(IP) \
- ((SWord)LabelAddr(emulator_loop) <= (SWord)(IP) && \
- (SWord)(IP) < (SWord)LabelAddr(end_emulator_loop))
+# define VALID_INSTR(IP) \
+ ((BeamInstr)LabelAddr(emulator_loop) <= BeamCodeAddr(IP) && \
+ BeamCodeAddr(IP) < (BeamInstr)LabelAddr(end_emulator_loop))
#endif /* NO_JUMP_TABLE */
#define SET_CP(p, ip) \
@@ -150,64 +148,11 @@ do { \
ASSERT(VALID_INSTR(* (Eterm *)(ip))); \
I = (ip)
-#define FetchArgs(S1, S2) tmp_arg1 = (S1); tmp_arg2 = (S2)
-
/*
- * Store a result into a register given a destination descriptor.
+ * Register target (X or Y register).
*/
-#define StoreResult(Result, DestDesc) \
- do { \
- Eterm stb_reg; \
- stb_reg = (DestDesc); \
- CHECK_TERM(Result); \
- switch (beam_reg_tag(stb_reg)) { \
- case R_REG_DEF: \
- r(0) = (Result); break; \
- case X_REG_DEF: \
- xb(x_reg_offset(stb_reg)) = (Result); break; \
- default: \
- yb(y_reg_offset(stb_reg)) = (Result); break; \
- } \
- } while (0)
-
-#define StoreSimpleDest(Src, Dest) Dest = (Src)
-
-/*
- * Store a result into a register and execute the next instruction.
- * Dst points to the word with a destination descriptor, which MUST
- * be just before the next instruction.
- */
-
-#define StoreBifResult(Dst, Result) \
- do { \
- BeamInstr* stb_next; \
- Eterm stb_reg; \
- stb_reg = Arg(Dst); \
- I += (Dst) + 2; \
- stb_next = (BeamInstr *) *I; \
- CHECK_TERM(Result); \
- switch (beam_reg_tag(stb_reg)) { \
- case R_REG_DEF: \
- r(0) = (Result); Goto(stb_next); \
- case X_REG_DEF: \
- xb(x_reg_offset(stb_reg)) = (Result); Goto(stb_next); \
- default: \
- yb(y_reg_offset(stb_reg)) = (Result); Goto(stb_next); \
- } \
- } while (0)
-
-#define ClauseFail() goto jump_f
-
-#define SAVE_CP(X) \
- do { \
- *(X) = make_cp(c_p->cp); \
- c_p->cp = 0; \
- } while(0)
-
-#define RESTORE_CP(X) SET_CP(c_p, (BeamInstr *) cp_val(*(X)))
-
-#define ISCATCHEND(instr) ((Eterm *) *(instr) == OpCode(catch_end_y))
+#define REG_TARGET_PTR(Target) (((Target) & 1) ? &yb((Target)-1) : &xb(Target))
/*
* Special Beam instructions.
@@ -217,14 +162,10 @@ BeamInstr beam_apply[2];
BeamInstr beam_exit[1];
BeamInstr beam_continue_exit[1];
-BeamInstr* em_call_error_handler;
-BeamInstr* em_apply_bif;
-BeamInstr* em_call_nif;
-
/* NOTE These should be the only variables containing trace instructions.
** Sometimes tests are form the instruction value, and sometimes
-** for the refering variable (one of these), and rouge references
+** for the referring variable (one of these), and rouge references
** will most likely cause chaos.
*/
BeamInstr beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
@@ -241,10 +182,6 @@ BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
void** beam_ops;
#endif
-#ifndef ERTS_SMP /* Not supported with smp emulator */
-extern int count_instructions;
-#endif
-
#define SWAPIN \
HTOP = HEAP_TOP(c_p); \
E = c_p->stop
@@ -253,6 +190,14 @@ extern int count_instructions;
HEAP_TOP(c_p) = HTOP; \
c_p->stop = E
+#define HEAVY_SWAPIN \
+ SWAPIN; \
+ FCALLS = c_p->fcalls
+
+#define HEAVY_SWAPOUT \
+ SWAPOUT; \
+ c_p->fcalls = FCALLS
+
/*
* Use LIGHT_SWAPOUT when the called function
* will call HeapOnlyAlloc() (and never HAlloc()).
@@ -285,173 +230,24 @@ extern int count_instructions;
HEAP_TOP((P)) = HTOP; \
(P)->stop = E; \
PROCESS_MAIN_CHK_LOCKS((P)); \
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK((P))
+ ERTS_UNREQ_PROC_MAIN_LOCK((P))
#define db(N) (N)
+#define fb(N) ((Sint)(Sint32)(N))
+#define jb(N) ((Sint)(Sint32)(N))
#define tb(N) (N)
-#define xb(N) (*(Eterm *) (((unsigned char *)reg) + (N)))
-#define yb(N) (*(Eterm *) (((unsigned char *)E) + (N)))
-#define fb(N) (*(double *) (((unsigned char *)&(freg[0].fd)) + (N)))
+#define xb(N) (*ADD_BYTE_OFFSET(reg, N))
+#define yb(N) (*ADD_BYTE_OFFSET(E, N))
+#define Sb(N) (*REG_TARGET_PTR(N))
+#define lb(N) (*(double *) (((unsigned char *)&(freg[0].fd)) + (N)))
#define Qb(N) (N)
#define Ib(N) (N)
+
#define x(N) reg[N]
#define y(N) E[N]
-#define r(N) x##N
-
-/*
- * Makes sure that there are StackNeed + HeapNeed + 1 words available
- * on the combined heap/stack segment, then allocates StackNeed + 1
- * words on the stack and saves CP.
- *
- * M is number of live registers to preserve during garbage collection
- */
-
-#define AH(StackNeed, HeapNeed, M) \
- do { \
- int needed; \
- needed = (StackNeed) + 1; \
- if (E - HTOP < (needed + (HeapNeed))) { \
- SWAPOUT; \
- reg[0] = r(0); \
- PROCESS_MAIN_CHK_LOCKS(c_p); \
- FCALLS -= erts_garbage_collect(c_p, needed + (HeapNeed), reg, (M)); \
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
- PROCESS_MAIN_CHK_LOCKS(c_p); \
- r(0) = reg[0]; \
- SWAPIN; \
- } \
- E -= needed; \
- SAVE_CP(E); \
- } while (0)
-
-#define Allocate(Ns, Live) AH(Ns, 0, Live)
-
-#define AllocateZero(Ns, Live) \
- do { Eterm* ptr; \
- int i = (Ns); \
- AH(i, 0, Live); \
- for (ptr = E + i; ptr > E; ptr--) { \
- make_blank(*ptr); \
- } \
- } while (0)
-
-#define AllocateHeap(Ns, Nh, Live) AH(Ns, Nh, Live)
-
-#define AllocateHeapZero(Ns, Nh, Live) \
- do { Eterm* ptr; \
- int i = (Ns); \
- AH(i, Nh, Live); \
- for (ptr = E + i; ptr > E; ptr--) { \
- make_blank(*ptr); \
- } \
- } while (0)
-
-#define AllocateInit(Ns, Live, Y) \
- do { AH(Ns, 0, Live); make_blank(Y); } while (0)
-
-/*
- * Like the AH macro, but allocates no additional heap space.
- */
-
-#define A(StackNeed, M) AH(StackNeed, 0, M)
-
-#define D(N) \
- RESTORE_CP(E); \
- E += (N) + 1;
-
-
-
-#define TestBinVHeap(VNh, Nh, Live) \
- do { \
- unsigned need = (Nh); \
- if ((E - HTOP < need) || (MSO(c_p).overhead + (VNh) >= BIN_VHEAP_SZ(c_p))) {\
- SWAPOUT; \
- reg[0] = r(0); \
- PROCESS_MAIN_CHK_LOCKS(c_p); \
- FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)); \
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
- PROCESS_MAIN_CHK_LOCKS(c_p); \
- r(0) = reg[0]; \
- SWAPIN; \
- } \
- HEAP_SPACE_VERIFIED(need); \
- } while (0)
-
-
-
-/*
- * Check if Nh words of heap are available; if not, do a garbage collection.
- * Live is number of active argument registers to be preserved.
- */
-
-#define TestHeap(Nh, Live) \
- do { \
- unsigned need = (Nh); \
- if (E - HTOP < need) { \
- SWAPOUT; \
- reg[0] = r(0); \
- PROCESS_MAIN_CHK_LOCKS(c_p); \
- FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)); \
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
- PROCESS_MAIN_CHK_LOCKS(c_p); \
- r(0) = reg[0]; \
- SWAPIN; \
- } \
- HEAP_SPACE_VERIFIED(need); \
- } while (0)
-
-/*
- * Check if Nh words of heap are available; if not, do a garbage collection.
- * Live is number of active argument registers to be preserved.
- * Takes special care to preserve Extra if a garbage collection occurs.
- */
-
-#define TestHeapPreserve(Nh, Live, Extra) \
- do { \
- unsigned need = (Nh); \
- if (E - HTOP < need) { \
- SWAPOUT; \
- reg[0] = r(0); \
- reg[Live] = Extra; \
- PROCESS_MAIN_CHK_LOCKS(c_p); \
- FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)+1); \
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
- PROCESS_MAIN_CHK_LOCKS(c_p); \
- if (Live > 0) { \
- r(0) = reg[0]; \
- } \
- Extra = reg[Live]; \
- SWAPIN; \
- } \
- HEAP_SPACE_VERIFIED(need); \
- } while (0)
-
-#define TestHeapPutList(Need, Reg) \
- do { \
- TestHeap((Need), 1); \
- PutList(Reg, r(0), r(0), StoreSimpleDest); \
- CHECK_TERM(r(0)); \
- } while (0)
-
-#define Init(N) make_blank(yb(N))
-
-#define Init2(Y1, Y2) do { make_blank(Y1); make_blank(Y2); } while (0)
-#define Init3(Y1, Y2, Y3) \
- do { make_blank(Y1); make_blank(Y2); make_blank(Y3); } while (0)
-
-#define MakeFun(FunP, NumFree) \
- do { \
- SWAPOUT; \
- reg[0] = r(0); \
- r(0) = new_fun(c_p, reg, (ErlFunEntry *) FunP, NumFree); \
- SWAPIN; \
- } while (0)
-
-#define PutTuple(Dst, Arity) \
- do { \
- Dst = make_tuple(HTOP); \
- pt_arity = (Arity); \
- } while (0)
+#define r(N) x(N)
+#define Q(N) (N*sizeof(Eterm *))
+#define l(N) (freg[N].fd)
/*
* Check that we haven't used the reductions and jump to function pointed to by
@@ -460,8 +256,8 @@ extern int count_instructions;
#define DispatchMacro() \
do { \
- BeamInstr* dis_next; \
- dis_next = (BeamInstr *) *I; \
+ BeamInstr dis_next; \
+ dis_next = *I; \
CHECK_ARGS(I); \
if (FCALLS > 0 || FCALLS > neg_o_reds) { \
FCALLS--; \
@@ -469,12 +265,12 @@ extern int count_instructions;
} else { \
goto context_switch; \
} \
- } while (0)
+ } while (0) \
#define DispatchMacroFun() \
do { \
- BeamInstr* dis_next; \
- dis_next = (BeamInstr *) *I; \
+ BeamInstr dis_next; \
+ dis_next = *I; \
CHECK_ARGS(I); \
if (FCALLS > 0 || FCALLS > neg_o_reds) { \
FCALLS--; \
@@ -484,23 +280,23 @@ extern int count_instructions;
} \
} while (0)
-#define DispatchMacrox() \
- do { \
- if (FCALLS > 0) { \
- Eterm* dis_next; \
- SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]); \
- dis_next = (Eterm *) *I; \
- FCALLS--; \
- CHECK_ARGS(I); \
- Goto(dis_next); \
- } else if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p) \
- && FCALLS > neg_o_reds) { \
- goto save_calls1; \
- } else { \
- SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]); \
- CHECK_ARGS(I); \
- goto context_switch; \
- } \
+#define DispatchMacrox() \
+ do { \
+ if (FCALLS > 0) { \
+ BeamInstr dis_next; \
+ SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]); \
+ dis_next = *I; \
+ FCALLS--; \
+ CHECK_ARGS(I); \
+ Goto(dis_next); \
+ } else if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p) \
+ && FCALLS > neg_o_reds) { \
+ goto save_calls1; \
+ } else { \
+ SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]); \
+ CHECK_ARGS(I); \
+ goto context_switch; \
+ } \
} while (0)
#ifdef DEBUG
@@ -519,417 +315,59 @@ extern int count_instructions;
# define Dispatchfun() DispatchMacroFun()
#endif
-#define Self(R) R = c_p->common.id
-#define Node(R) R = erts_this_node->sysname
-
#define Arg(N) I[(N)+1]
-#define Next(N) \
- I += (N) + 1; \
- ASSERT(VALID_INSTR(*I)); \
- Goto(*I)
-
-#define PreFetch(N, Dst) do { Dst = (BeamInstr *) *(I + N + 1); } while (0)
-#define NextPF(N, Dst) \
- I += N + 1; \
- ASSERT(VALID_INSTR(Dst)); \
- Goto(Dst)
-
-#define GetR(pos, tr) \
- do { \
- tr = Arg(pos); \
- switch (beam_reg_tag(tr)) { \
- case R_REG_DEF: tr = r(0); break; \
- case X_REG_DEF: tr = xb(x_reg_offset(tr)); break; \
- case Y_REG_DEF: ASSERT(y_reg_offset(tr) >= 1); tr = yb(y_reg_offset(tr)); break; \
- } \
- CHECK_TERM(tr); \
- } while (0)
-
-#define GetArg1(N, Dst) GetR((N), Dst)
-
-#define GetArg2(N, Dst1, Dst2) \
- do { \
- GetR(N, Dst1); \
- GetR((N)+1, Dst2); \
- } while (0)
-#define PutList(H, T, Dst, Store) \
- do { \
- HTOP[0] = (H); HTOP[1] = (T); \
- Store(make_list(HTOP), Dst); \
- HTOP += 2; \
- } while (0)
-
-#define Move(Src, Dst, Store) \
- do { \
- Eterm term = (Src); \
- Store(term, Dst); \
+#define GetR(pos, tr) \
+ do { \
+ tr = Arg(pos); \
+ switch (loader_tag(tr)) { \
+ case LOADER_X_REG: \
+ tr = x(loader_x_reg_index(tr)); \
+ break; \
+ case LOADER_Y_REG: \
+ ASSERT(loader_y_reg_index(tr) >= 1); \
+ tr = y(loader_y_reg_index(tr)); \
+ break; \
+ } \
+ CHECK_TERM(tr); \
} while (0)
-#define Move2(src1, dst1, src2, dst2) dst1 = (src1); dst2 = (src2)
-
-#define MoveGenDest(src, dstp) \
- if ((dstp) == NULL) { r(0) = (src); } else { *(dstp) = src; }
-
-#define MoveReturn(Src, Dest) \
- (Dest) = (Src); \
- I = c_p->cp; \
- ASSERT(VALID_INSTR(*c_p->cp)); \
- c_p->cp = 0; \
- CHECK_TERM(r(0)); \
- Goto(*I)
-
-#define DeallocateReturn(Deallocate) \
- do { \
- int words_to_pop = (Deallocate); \
- SET_I((BeamInstr *) cp_val(*E)); \
- E = ADD_BYTE_OFFSET(E, words_to_pop); \
- CHECK_TERM(r(0)); \
- Goto(*I); \
- } while (0)
-
-#define MoveDeallocateReturn(Src, Dest, Deallocate) \
- (Dest) = (Src); \
- DeallocateReturn(Deallocate)
-
-#define MoveCall(Src, Dest, CallDest, Size) \
- (Dest) = (Src); \
- SET_CP(c_p, I+Size+1); \
- SET_I((BeamInstr *) CallDest); \
- Dispatch();
-
-#define MoveCallLast(Src, Dest, CallDest, Deallocate) \
- (Dest) = (Src); \
- RESTORE_CP(E); \
- E = ADD_BYTE_OFFSET(E, (Deallocate)); \
- SET_I((BeamInstr *) CallDest); \
- Dispatch();
-
-#define MoveCallOnly(Src, Dest, CallDest) \
- (Dest) = (Src); \
- SET_I((BeamInstr *) CallDest); \
- Dispatch();
-
-#define MoveJump(Src) \
- r(0) = (Src); \
- SET_I((BeamInstr *) Arg(0)); \
- Goto(*I);
-
-#define GetList(Src, H, T) do { \
- Eterm* tmp_ptr = list_val(Src); \
- H = CAR(tmp_ptr); \
- T = CDR(tmp_ptr); } while (0)
-
-#define GetTupleElement(Src, Element, Dest) \
- do { \
- tmp_arg1 = (Eterm) COMPRESS_POINTER(((unsigned char *) tuple_val(Src)) + \
- (Element)); \
- (Dest) = (*(Eterm *) EXPAND_POINTER(tmp_arg1)); \
- } while (0)
-
-#define ExtractNextElement(Dest) \
- tmp_arg1 += sizeof(Eterm); \
- (Dest) = (* (Eterm *) (((unsigned char *) EXPAND_POINTER(tmp_arg1))))
-
-#define ExtractNextElement2(Dest) \
- do { \
- Eterm* ene_dstp = &(Dest); \
- ene_dstp[0] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[1]; \
- ene_dstp[1] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[2]; \
- tmp_arg1 += sizeof(Eterm) + sizeof(Eterm); \
- } while (0)
-
-#define ExtractNextElement3(Dest) \
- do { \
- Eterm* ene_dstp = &(Dest); \
- ene_dstp[0] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[1]; \
- ene_dstp[1] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[2]; \
- ene_dstp[2] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[3]; \
- tmp_arg1 += 3*sizeof(Eterm); \
- } while (0)
-
-#define ExtractNextElement4(Dest) \
- do { \
- Eterm* ene_dstp = &(Dest); \
- ene_dstp[0] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[1]; \
- ene_dstp[1] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[2]; \
- ene_dstp[2] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[3]; \
- ene_dstp[3] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[4]; \
- tmp_arg1 += 4*sizeof(Eterm); \
- } while (0)
-
-#define ExtractElement(Element, Dest) \
- do { \
- tmp_arg1 += (Element); \
- (Dest) = (* (Eterm *) EXPAND_POINTER(tmp_arg1)); \
- } while (0)
-
-#define EqualImmed(X, Y, Action) if (X != Y) { Action; }
-#define NotEqualImmed(X, Y, Action) if (X == Y) { Action; }
-
-#define IsFloat(Src, Fail) if (is_not_float(Src)) { Fail; }
-
-#define IsInteger(Src, Fail) if (is_not_integer(Src)) { Fail; }
-
-#define IsNumber(X, Fail) if (is_not_integer(X) && is_not_float(X)) { Fail; }
-
-#define IsAtom(Src, Fail) if (is_not_atom(Src)) { Fail; }
-
-#define IsIntegerAllocate(Src, Need, Alive, Fail) \
- if (is_not_integer(Src)) { Fail; } \
- A(Need, Alive)
-
-#define IsNil(Src, Fail) if (is_not_nil(Src)) { Fail; }
-
-#define IsList(Src, Fail) if (is_not_list(Src) && is_not_nil(Src)) { Fail; }
-
-#define IsNonemptyList(Src, Fail) if (is_not_list(Src)) { Fail; }
-
-#define IsNonemptyListAllocate(Src, Need, Alive, Fail) \
- if (is_not_list(Src)) { Fail; } \
- A(Need, Alive)
-
-#define IsNonemptyListTestHeap(Src, Need, Alive, Fail) \
- if (is_not_list(Src)) { Fail; } \
- TestHeap(Need, Alive)
-
-#define IsTuple(X, Action) if (is_not_tuple(X)) Action
-
-#define IsArity(Pointer, Arity, Fail) \
- if (*(Eterm *) \
- EXPAND_POINTER(tmp_arg1 = (Eterm) \
- COMPRESS_POINTER(tuple_val(Pointer))) != (Arity)) \
- { \
- Fail; \
- }
-
-#define IsMap(Src, Fail) if (is_not_map(Src)) { Fail; }
-
-#define HasMapField(Src, Key, Fail) if (has_not_map_field(Src, Key)) { Fail; }
+#define PUT_TERM_REG(term, desc) \
+do { \
+ switch (loader_tag(desc)) { \
+ case LOADER_X_REG: \
+ x(loader_x_reg_index(desc)) = (term); \
+ break; \
+ case LOADER_Y_REG: \
+ y(loader_y_reg_index(desc)) = (term); \
+ break; \
+ default: \
+ ASSERT(0); \
+ break; \
+ } \
+} while(0)
-#define GetMapElement(Src, Key, Dst, Fail) \
- do { \
- Eterm _res = get_map_element(Src, Key); \
- if (is_non_value(_res)) { \
- Fail; \
- } \
- Dst = _res; \
- } while (0)
+#define DispatchReturn \
+do { \
+ if (FCALLS > 0 || FCALLS > neg_o_reds) { \
+ FCALLS--; \
+ Goto(*I); \
+ } \
+ else { \
+ c_p->current = NULL; \
+ c_p->arity = 1; \
+ goto context_switch3; \
+ } \
+} while (0)
-#define IsFunction(X, Action) \
- do { \
- if ( !(is_any_fun(X)) ) { \
- Action; \
- } \
- } while (0)
-
-#define IsFunction2(F, A, Action) \
- do { \
- if (erl_is_function(c_p, F, A) != am_true ) { \
- Action; \
- } \
- } while (0)
-
-#define IsTupleOfArity(Src, Arity, Fail) \
- do { \
- if (is_not_tuple(Src) || \
- *(Eterm *) \
- EXPAND_POINTER(tmp_arg1 = \
- (Eterm) COMPRESS_POINTER(tuple_val(Src))) != Arity) { \
- Fail; \
- } \
- } while (0)
-
-#define IsBoolean(X, Fail) if ((X) != am_true && (X) != am_false) { Fail; }
-
-#define IsBinary(Src, Fail) \
- if (is_not_binary(Src) || binary_bitsize(Src) != 0) { Fail; }
-
-#define IsBitstring(Src, Fail) \
- if (is_not_binary(Src)) { Fail; }
-
-#if defined(ARCH_64) && !HALFWORD_HEAP
-#define BsSafeMul(A, B, Fail, Target) \
- do { Uint64 _res = (A) * (B); \
- if (_res / B != A) { Fail; } \
- Target = _res; \
- } while (0)
+#ifdef DEBUG
+/* Better static type testing by the C compiler */
+# define BEAM_IS_TUPLE(Src) is_tuple(Src)
#else
-#define BsSafeMul(A, B, Fail, Target) \
- do { Uint64 _res = (Uint64)(A) * (Uint64)(B); \
- if ((_res >> (8*sizeof(Uint))) != 0) { Fail; } \
- Target = _res; \
- } while (0)
+/* Better performance */
+# define BEAM_IS_TUPLE(Src) is_boxed(Src)
#endif
-#define BsGetFieldSize(Bits, Unit, Fail, Target) \
- do { \
- Sint _signed_size; Uint _uint_size; \
- if (is_small(Bits)) { \
- _signed_size = signed_val(Bits); \
- if (_signed_size < 0) { Fail; } \
- _uint_size = (Uint) _signed_size; \
- } else { \
- if (!term_to_Uint(Bits, &temp_bits)) { Fail; } \
- _uint_size = temp_bits; \
- } \
- BsSafeMul(_uint_size, Unit, Fail, Target); \
- } while (0)
-
-#define BsGetUncheckedFieldSize(Bits, Unit, Fail, Target) \
- do { \
- Sint _signed_size; Uint _uint_size; \
- if (is_small(Bits)) { \
- _signed_size = signed_val(Bits); \
- if (_signed_size < 0) { Fail; } \
- _uint_size = (Uint) _signed_size; \
- } else { \
- if (!term_to_Uint(Bits, &temp_bits)) { Fail; } \
- _uint_size = (Uint) temp_bits; \
- } \
- Target = _uint_size * Unit; \
- } while (0)
-
-#define BsGetFloat2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
- do { \
- ErlBinMatchBuffer *_mb; \
- Eterm _result; Sint _size; \
- if (!is_small(Sz) || (_size = unsigned_val(Sz)) > 64) { Fail; } \
- _size *= ((Flags) >> 3); \
- TestHeap(FLOAT_SIZE_OBJECT, Live); \
- _mb = ms_matchbuffer(Ms); \
- LIGHT_SWAPOUT; \
- _result = erts_bs_get_float_2(c_p, _size, (Flags), _mb); \
- LIGHT_SWAPIN; \
- HEAP_SPACE_VERIFIED(0); \
- if (is_non_value(_result)) { Fail; } \
- else { Store(_result, Dst); } \
- } while (0)
-
-#define BsGetBinaryImm_2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
- do { \
- ErlBinMatchBuffer *_mb; \
- Eterm _result; \
- TestHeap(heap_bin_size(ERL_ONHEAP_BIN_LIMIT), Live); \
- _mb = ms_matchbuffer(Ms); \
- LIGHT_SWAPOUT; \
- _result = erts_bs_get_binary_2(c_p, (Sz), (Flags), _mb); \
- LIGHT_SWAPIN; \
- HEAP_SPACE_VERIFIED(0); \
- if (is_non_value(_result)) { Fail; } \
- else { Store(_result, Dst); } \
- } while (0)
-
-#define BsGetBinary_2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
- do { \
- ErlBinMatchBuffer *_mb; \
- Eterm _result; Uint _size; \
- BsGetFieldSize(Sz, ((Flags) >> 3), Fail, _size); \
- TestHeap(ERL_SUB_BIN_SIZE, Live); \
- _mb = ms_matchbuffer(Ms); \
- LIGHT_SWAPOUT; \
- _result = erts_bs_get_binary_2(c_p, _size, (Flags), _mb); \
- LIGHT_SWAPIN; \
- HEAP_SPACE_VERIFIED(0); \
- if (is_non_value(_result)) { Fail; } \
- else { Store(_result, Dst); } \
- } while (0)
-
-#define BsGetBinaryAll_2(Ms, Live, Unit, Dst, Store, Fail) \
- do { \
- ErlBinMatchBuffer *_mb; \
- Eterm _result; \
- TestHeap(ERL_SUB_BIN_SIZE, Live); \
- _mb = ms_matchbuffer(Ms); \
- if (((_mb->size - _mb->offset) % Unit) == 0) { \
- LIGHT_SWAPOUT; \
- _result = erts_bs_get_binary_all_2(c_p, _mb); \
- LIGHT_SWAPIN; \
- HEAP_SPACE_VERIFIED(0); \
- ASSERT(is_value(_result)); \
- Store(_result, Dst); \
- } else { \
- HEAP_SPACE_VERIFIED(0); \
- Fail; } \
- } while (0)
-
-#define BsSkipBits2(Ms, Bits, Unit, Fail) \
- do { \
- ErlBinMatchBuffer *_mb; \
- size_t new_offset; \
- Uint _size; \
- _mb = ms_matchbuffer(Ms); \
- BsGetFieldSize(Bits, Unit, Fail, _size); \
- new_offset = _mb->offset + _size; \
- if (new_offset <= _mb->size) { _mb->offset = new_offset; } \
- else { Fail; } \
- } while (0)
-
-#define BsSkipBitsAll2(Ms, Unit, Fail) \
- do { \
- ErlBinMatchBuffer *_mb; \
- _mb = ms_matchbuffer(Ms); \
- if (((_mb->size - _mb->offset) % Unit) == 0) {_mb->offset = _mb->size; } \
- else { Fail; } \
- } while (0)
-
-#define BsSkipBitsImm2(Ms, Bits, Fail) \
- do { \
- ErlBinMatchBuffer *_mb; \
- size_t new_offset; \
- _mb = ms_matchbuffer(Ms); \
- new_offset = _mb->offset + (Bits); \
- if (new_offset <= _mb->size) { _mb->offset = new_offset; } \
- else { Fail; } \
- } while (0)
-
-#define NewBsPutIntegerImm(Sz, Flags, Src) \
- do { \
- if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3((Src), (Sz), (Flags)))) { goto badarg; } \
- } while (0)
-
-#define NewBsPutInteger(Sz, Flags, Src) \
- do { \
- Sint _size; \
- BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
- if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3((Src), _size, (Flags)))) \
- { goto badarg; } \
- } while (0)
-
-#define NewBsPutFloatImm(Sz, Flags, Src) \
- do { \
- if (!erts_new_bs_put_float(c_p, (Src), (Sz), (Flags))) { goto badarg; } \
- } while (0)
-
-#define NewBsPutFloat(Sz, Flags, Src) \
- do { \
- Sint _size; \
- BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
- if (!erts_new_bs_put_float(c_p, (Src), _size, (Flags))) { goto badarg; } \
- } while (0)
-
-#define NewBsPutBinary(Sz, Flags, Src) \
- do { \
- Sint _size; \
- BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
- if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2((Src), _size))) { goto badarg; } \
- } while (0)
-
-#define NewBsPutBinaryImm(Sz, Src) \
- do { \
- if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2((Src), (Sz)))) { goto badarg; } \
- } while (0)
-
-#define NewBsPutBinaryAll(Src, Unit) \
- do { \
- if (!erts_new_bs_put_binary_all(ERL_BITS_ARGS_2((Src), (Unit)))) { goto badarg; } \
- } while (0)
-
-
-#define IsPort(Src, Fail) if (is_not_port(Src)) { Fail; }
-#define IsPid(Src, Fail) if (is_not_pid(Src)) { Fail; }
-#define IsRef(Src, Fail) if (is_not_ref(Src)) { Fail; }
-
/*
* process_main() is already huge, so we want to avoid inlining
* into it. Especially functions that are seldom used.
@@ -945,27 +383,32 @@ extern int count_instructions;
* The following functions are called directly by process_main().
* Don't inline them.
*/
-static BifFunction translate_gc_bif(void* gcf) NOINLINE;
+static void init_emulator_finish(void) NOINLINE;
+static ErtsCodeMFA *ubif2mfa(void* uf) NOINLINE;
+static ErtsCodeMFA *gcbif2mfa(void* gcf) NOINLINE;
static BeamInstr* handle_error(Process* c_p, BeamInstr* pc,
- Eterm* reg, BifFunction bf) NOINLINE;
-static BeamInstr* call_error_handler(Process* p, BeamInstr* ip,
+ Eterm* reg, ErtsCodeMFA* bif_mfa) NOINLINE;
+static BeamInstr* call_error_handler(Process* p, ErtsCodeMFA* mfa,
Eterm* reg, Eterm func) NOINLINE;
-static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity) NOINLINE;
-static BeamInstr* apply(Process* p, Eterm module, Eterm function,
- Eterm args, Eterm* reg) NOINLINE;
+static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity,
+ BeamInstr *I, Uint offs) NOINLINE;
+static BeamInstr* apply(Process* p, Eterm* reg,
+ BeamInstr *I, Uint offs) NOINLINE;
static BeamInstr* call_fun(Process* p, int arity,
Eterm* reg, Eterm args) NOINLINE;
static BeamInstr* apply_fun(Process* p, Eterm fun,
Eterm args, Eterm* reg) NOINLINE;
static Eterm new_fun(Process* p, Eterm* reg,
ErlFunEntry* fe, int num_free) NOINLINE;
-static Eterm new_map(Process* p, Eterm* reg, BeamInstr* I) NOINLINE;
-static Eterm update_map_assoc(Process* p, Eterm* reg,
- Eterm map, BeamInstr* I) NOINLINE;
-static Eterm update_map_exact(Process* p, Eterm* reg,
- Eterm map, BeamInstr* I) NOINLINE;
-static int has_not_map_field(Eterm map, Eterm key);
+static Eterm new_map(Process* p, Eterm* reg, Uint live, Uint n, BeamInstr* ptr) NOINLINE;
+static Eterm new_small_map_lit(Process* p, Eterm* reg, Eterm keys_literal,
+ Uint live, BeamInstr* ptr) NOINLINE;
+static Eterm update_map_assoc(Process* p, Eterm* reg, Uint live,
+ Uint n, BeamInstr* new_p) NOINLINE;
+static Eterm update_map_exact(Process* p, Eterm* reg, Uint live,
+ Uint n, Eterm* new_p) NOINLINE;
static Eterm get_map_element(Eterm map, Eterm key);
+static Eterm get_map_element_hash(Eterm map, Eterm key, Uint32 hx);
/*
* Functions not directly called by process_main(). OK to inline.
@@ -974,14 +417,14 @@ static BeamInstr* next_catch(Process* c_p, Eterm *reg);
static void terminate_proc(Process* c_p, Eterm Value);
static Eterm add_stacktrace(Process* c_p, Eterm Value, Eterm exc);
static void save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg,
- BifFunction bf, Eterm args);
+ ErtsCodeMFA *bif_mfa, Eterm args);
static struct StackTrace * get_trace_from_exc(Eterm exc);
static Eterm make_arglist(Process* c_p, Eterm* reg, int a);
void
init_emulator(void)
{
- process_main();
+ process_main(0, 0);
}
/*
@@ -990,23 +433,23 @@ init_emulator(void)
*/
#if defined(__GNUC__) && defined(sparc) && !defined(DEBUG)
-# define REG_x0 asm("%l0")
# define REG_xregs asm("%l1")
# define REG_htop asm("%l2")
# define REG_stop asm("%l3")
# define REG_I asm("%l4")
# define REG_fcalls asm("%l5")
-# define REG_tmp_arg1 asm("%l6")
-# define REG_tmp_arg2 asm("%l7")
+#elif defined(__GNUC__) && defined(__amd64__) && !defined(DEBUG)
+# define REG_xregs asm("%r12")
+# define REG_htop
+# define REG_stop asm("%r13")
+# define REG_I asm("%rbx")
+# define REG_fcalls asm("%r14")
#else
-# define REG_x0
# define REG_xregs
# define REG_htop
# define REG_stop
# define REG_I
# define REG_fcalls
-# define REG_tmp_arg1
-# define REG_tmp_arg2
#endif
#ifdef USE_VM_PROBES
@@ -1015,83 +458,118 @@ init_emulator(void)
#ifdef USE_VM_CALL_PROBES
-#define DTRACE_LOCAL_CALL(p, m, f, a) \
+#define DTRACE_LOCAL_CALL(p, mfa) \
if (DTRACE_ENABLED(local_function_entry)) { \
DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
int depth = STACK_START(p) - STACK_TOP(p); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE3(local_function_entry, process_name, mfa, depth); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE3(local_function_entry, process_name, mfa_buf, depth); \
}
-#define DTRACE_GLOBAL_CALL(p, m, f, a) \
+#define DTRACE_GLOBAL_CALL(p, mfa) \
if (DTRACE_ENABLED(global_function_entry)) { \
DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
int depth = STACK_START(p) - STACK_TOP(p); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE3(global_function_entry, process_name, mfa, depth); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE3(global_function_entry, process_name, mfa_buf, depth); \
}
-#define DTRACE_RETURN(p, m, f, a) \
+#define DTRACE_RETURN(p, mfa) \
if (DTRACE_ENABLED(function_return)) { \
DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
int depth = STACK_START(p) - STACK_TOP(p); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE3(function_return, process_name, mfa, depth); \
- }
-
-#define DTRACE_BIF_ENTRY(p, m, f, a) \
- if (DTRACE_ENABLED(bif_entry)) { \
- DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE2(bif_entry, process_name, mfa); \
- }
-
-#define DTRACE_BIF_RETURN(p, m, f, a) \
- if (DTRACE_ENABLED(bif_return)) { \
- DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE2(bif_return, process_name, mfa); \
- }
-
-#define DTRACE_NIF_ENTRY(p, m, f, a) \
- if (DTRACE_ENABLED(nif_entry)) { \
- DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE2(nif_entry, process_name, mfa); \
- }
-
-#define DTRACE_NIF_RETURN(p, m, f, a) \
- if (DTRACE_ENABLED(nif_return)) { \
- DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE2(nif_return, process_name, mfa); \
- }
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE3(function_return, process_name, mfa_buf, depth); \
+ }
+
+#define DTRACE_BIF_ENTRY(p, mfa) \
+ if (DTRACE_ENABLED(bif_entry)) { \
+ DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE2(bif_entry, process_name, mfa_buf); \
+ }
+
+#define DTRACE_BIF_RETURN(p, mfa) \
+ if (DTRACE_ENABLED(bif_return)) { \
+ DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE2(bif_return, process_name, mfa_buf); \
+ }
+
+#define DTRACE_NIF_ENTRY(p, mfa) \
+ if (DTRACE_ENABLED(nif_entry)) { \
+ DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE2(nif_entry, process_name, mfa_buf); \
+ }
+
+#define DTRACE_NIF_RETURN(p, mfa) \
+ if (DTRACE_ENABLED(nif_return)) { \
+ DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE2(nif_return, process_name, mfa_buf); \
+ }
+
+#define DTRACE_GLOBAL_CALL_FROM_EXPORT(p,e) \
+ do { \
+ if (DTRACE_ENABLED(global_function_entry)) { \
+ BeamInstr* fp = (BeamInstr *) (((Export *) (e))->addressv[erts_active_code_ix()]); \
+ DTRACE_GLOBAL_CALL((p), erts_code_to_codemfa(fp)); \
+ } \
+ } while(0)
+
+#define DTRACE_RETURN_FROM_PC(p) \
+ do { \
+ ErtsCodeMFA* cmfa; \
+ if (DTRACE_ENABLED(function_return) && (cmfa = find_function_from_pc((p)->cp))) { \
+ DTRACE_RETURN((p), cmfa); \
+ } \
+ } while(0)
#else /* USE_VM_PROBES */
+#define DTRACE_LOCAL_CALL(p, mfa) do {} while (0)
+#define DTRACE_GLOBAL_CALL(p, mfa) do {} while (0)
+#define DTRACE_GLOBAL_CALL_FROM_EXPORT(p, e) do {} while (0)
+#define DTRACE_RETURN(p, mfa) do {} while (0)
+#define DTRACE_RETURN_FROM_PC(p) do {} while (0)
+#define DTRACE_BIF_ENTRY(p, mfa) do {} while (0)
+#define DTRACE_BIF_RETURN(p, mfa) do {} while (0)
+#define DTRACE_NIF_ENTRY(p, mfa) do {} while (0)
+#define DTRACE_NIF_RETURN(p, mfa) do {} while (0)
+#endif /* USE_VM_PROBES */
-#define DTRACE_LOCAL_CALL(p, m, f, a) do {} while (0)
-#define DTRACE_GLOBAL_CALL(p, m, f, a) do {} while (0)
-#define DTRACE_RETURN(p, m, f, a) do {} while (0)
-#define DTRACE_BIF_ENTRY(p, m, f, a) do {} while (0)
-#define DTRACE_BIF_RETURN(p, m, f, a) do {} while (0)
-#define DTRACE_NIF_ENTRY(p, m, f, a) do {} while (0)
-#define DTRACE_NIF_RETURN(p, m, f, a) do {} while (0)
+#ifdef DEBUG
+#define ERTS_DBG_CHK_REDS(P, FC) \
+ do { \
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF((P))) { \
+ ASSERT(FC <= 0); \
+ ASSERT(erts_proc_sched_data(c_p)->virtual_reds \
+ <= 0 - (FC)); \
+ } \
+ else { \
+ ASSERT(FC <= CONTEXT_REDS); \
+ ASSERT(erts_proc_sched_data(c_p)->virtual_reds \
+ <= CONTEXT_REDS - (FC)); \
+ } \
+} while (0)
+#else
+#define ERTS_DBG_CHK_REDS(P, FC)
+#endif
-#endif /* USE_VM_PROBES */
+#ifdef NO_FPE_SIGNALS
+# define ERTS_NO_FPE_CHECK_INIT ERTS_FP_CHECK_INIT
+# define ERTS_NO_FPE_ERROR ERTS_FP_ERROR
+#else
+# define ERTS_NO_FPE_CHECK_INIT(p)
+# define ERTS_NO_FPE_ERROR(p, a, b)
+#endif
/*
* process_main() is called twice:
@@ -1099,7 +577,7 @@ init_emulator(void)
* the instructions' C labels to the loader.
* The second call starts execution of BEAM code. This call never returns.
*/
-void process_main(void)
+void process_main(Eterm * x_reg_array, FloatDef* f_reg_array)
{
static int init_done = 0;
Process* c_p = NULL;
@@ -1108,15 +586,10 @@ void process_main(void)
ERTS_DECLARE_DUMMY(Eterm pid);
#endif
- /*
- * X register zero; also called r(0)
- */
- register Eterm x0 REG_x0 = NIL;
-
/* Pointer to X registers: x(1)..x(N); reg[0] is used when doing GC,
* in all other cases x0 is used.
*/
- register Eterm* reg REG_xregs = NULL;
+ register Eterm* reg REG_xregs = x_reg_array;
/*
* Top of heap (next free location); grows upwards.
@@ -1140,45 +613,31 @@ void process_main(void)
register Sint FCALLS REG_fcalls = 0;
/*
- * Temporaries used for picking up arguments for instructions.
- */
- register Eterm tmp_arg1 REG_tmp_arg1 = NIL;
- register Eterm tmp_arg2 REG_tmp_arg2 = NIL;
-#if HEAP_ON_C_STACK
- Eterm tmp_big[2]; /* Temporary buffer for small bignums if HEAP_ON_C_STACK. */
-#else
- Eterm *tmp_big; /* Temporary buffer for small bignums if !HEAP_ON_C_STACK. */
-#endif
-
- /*
* X registers and floating point registers are located in
* scheduler specific data.
*/
- register FloatDef *freg;
+ register FloatDef *freg = f_reg_array;
/*
* For keeping the negative old value of 'reds' when call saving is active.
*/
int neg_o_reds = 0;
- Eterm (*arith_func)(Process* p, Eterm* reg, Uint live);
-
-#ifndef NO_JUMP_TABLE
- static void* opcodes[] = { DEFINE_OPCODES };
#ifdef ERTS_OPCODE_COUNTER_SUPPORT
static void* counting_opcodes[] = { DEFINE_COUNTING_OPCODES };
-#endif
#else
- int Go;
+#ifndef NO_JUMP_TABLE
+ static void* opcodes[] = { DEFINE_OPCODES };
+#else
+ register BeamInstr Go;
+#endif
#endif
-
- Uint temp_bits; /* Temporary used by BsSkipBits2 & BsGetInteger2 */
-
- Eterm pt_arity; /* Used by do_put_tuple */
Uint64 start_time = 0; /* Monitor long schedule */
BeamInstr* start_time_i = NULL;
+ ERTS_MSACC_DECLARE_CACHE_X() /* a cached value of the tsd pointer for msacc */
+
ERL_BITS_DECLARE_STATEP; /* Has to be last declaration */
@@ -1190,7 +649,7 @@ void process_main(void)
* Note: c_p->arity must be set to reflect the number of useful terms in
* c_p->arg_reg before calling the scheduler.
*/
- if (!init_done) {
+ if (ERTS_UNLIKELY(!init_done)) {
/* This should only be reached during the init phase when only the main
* process is running. I.e. there is no race for init_done.
*/
@@ -1204,56 +663,53 @@ void process_main(void)
goto do_schedule1;
do_schedule:
- reds_used = REDS_IN(c_p) - FCALLS;
+ ASSERT(c_p->arity < 6);
+ ASSERT(c_p->debug_reds_in == REDS_IN(c_p));
+ if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
+ reds_used = REDS_IN(c_p) - FCALLS;
+ else
+ reds_used = REDS_IN(c_p) - (CONTEXT_REDS + FCALLS);
+ ASSERT(reds_used >= 0);
do_schedule1:
if (start_time != 0) {
Sint64 diff = erts_timestamp_millis() - start_time;
- if (diff > 0 && (Uint) diff > erts_system_monitor_long_schedule
-#ifdef ERTS_DIRTY_SCHEDULERS
- && !ERTS_SCHEDULER_IS_DIRTY(c_p->scheduler_data)
-#endif
- ) {
- BeamInstr *inptr = find_function_from_pc(start_time_i);
- BeamInstr *outptr = find_function_from_pc(c_p->i);
+ if (diff > 0 && (Uint) diff > erts_system_monitor_long_schedule) {
+ ErtsCodeMFA *inptr = find_function_from_pc(start_time_i);
+ ErtsCodeMFA *outptr = find_function_from_pc(c_p->i);
monitor_long_schedule_proc(c_p,inptr,outptr,(Uint) diff);
}
}
PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
-#if HALFWORD_HEAP
- ASSERT(erts_get_scheduler_data()->num_tmp_heap_used == 0);
-#endif
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- c_p = schedule(c_p, reds_used);
+ c_p = erts_schedule(NULL, c_p, reds_used);
+ ASSERT(!(c_p->flags & F_HIPE_MODE));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
start_time = 0;
#ifdef DEBUG
- pid = c_p->common.id; /* Save for debugging purpouses */
+ pid = c_p->common.id; /* Save for debugging purposes */
#endif
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_MSACC_UPDATE_CACHE_X();
+
if (erts_system_monitor_long_schedule != 0) {
start_time = erts_timestamp_millis();
start_time_i = c_p->i;
}
- reg = ERTS_PROC_GET_SCHDATA(c_p)->x_reg_array;
- freg = ERTS_PROC_GET_SCHDATA(c_p)->f_reg_array;
-#if !HEAP_ON_C_STACK
- tmp_big = ERTS_PROC_GET_SCHDATA(c_p)->beam_emu_tmp_heap;
-#endif
ERL_BITS_RELOAD_STATEP(c_p);
{
int reds;
Eterm* argp;
- BeamInstr *next;
+ BeamInstr next;
int i;
argp = c_p->arg_reg;
- for (i = c_p->arity - 1; i > 0; i--) {
+ for (i = c_p->arity - 1; i >= 0; i--) {
reg[i] = argp[i];
CHECK_TERM(reg[i]);
}
@@ -1266,23 +722,22 @@ void process_main(void)
SET_I(c_p->i);
- reds = c_p->fcalls;
- if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)
- && (ERTS_TRACE_FLAGS(c_p) & F_SENSITIVE) == 0) {
- neg_o_reds = -reds;
- FCALLS = REDS_IN(c_p) = 0;
+ REDS_IN(c_p) = reds = c_p->fcalls;
+#ifdef DEBUG
+ c_p->debug_reds_in = reds;
+#endif
+
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) {
+ neg_o_reds = -CONTEXT_REDS;
+ FCALLS = neg_o_reds + reds;
} else {
neg_o_reds = 0;
- FCALLS = REDS_IN(c_p) = reds;
+ FCALLS = reds;
}
- next = (BeamInstr *) *I;
- r(0) = c_p->arg_reg[0];
-#ifdef HARDDEBUG
- if (c_p->arity > 0) {
- CHECK_TERM(r(0));
- }
-#endif
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+
+ next = *I;
SWAPIN;
ASSERT(VALID_INSTR(next));
@@ -1295,10 +750,9 @@ void process_main(void)
if (ERTS_PROC_IS_EXITING(c_p)) {
strcpy(fun_buf, "<exiting>");
} else {
- BeamInstr *fptr = find_function_from_pc(c_p->i);
- if (fptr) {
- dtrace_fun_decode(c_p, (Eterm)fptr[0],
- (Eterm)fptr[1], (Uint)fptr[2],
+ ErtsCodeMFA *cmfa = find_function_from_pc(c_p->i);
+ if (cmfa) {
+ dtrace_fun_decode(c_p, cmfa,
NULL, fun_buf);
} else {
erts_snprintf(fun_buf, sizeof(DTRACE_CHARBUF_NAME(fun_buf)),
@@ -1319,1939 +773,8 @@ void process_main(void)
#ifdef NO_JUMP_TABLE
switch (Go) {
#endif
-#include "beam_hot.h"
-
-#define STORE_ARITH_RESULT(res) StoreBifResult(2, (res));
-#define ARITH_FUNC(name) erts_gc_##name
-
- {
- Eterm increment_reg_val;
- Eterm increment_val;
- Uint live;
- Eterm result;
-
- OpCase(i_increment_yIId):
- increment_reg_val = yb(Arg(0));
- goto do_increment;
-
- OpCase(i_increment_xIId):
- increment_reg_val = xb(Arg(0));
- goto do_increment;
-
- OpCase(i_increment_rIId):
- increment_reg_val = r(0);
- I--;
-
- do_increment:
- increment_val = Arg(1);
- if (is_small(increment_reg_val)) {
- Sint i = signed_val(increment_reg_val) + increment_val;
- ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
- if (MY_IS_SSMALL(i)) {
- result = make_small(i);
- store_result:
- StoreBifResult(3, result);
- }
- }
-
- live = Arg(2);
- SWAPOUT;
- reg[0] = r(0);
- reg[live] = increment_reg_val;
- reg[live+1] = make_small(increment_val);
- result = erts_gc_mixed_plus(c_p, reg, live);
- r(0) = reg[0];
- SWAPIN;
- ERTS_HOLE_CHECK(c_p);
- if (is_value(result)) {
- goto store_result;
- }
- ASSERT(c_p->freason != BADMATCH || is_value(c_p->fvalue));
- goto find_func_info;
- }
-
- OpCase(i_plus_jId):
- {
- Eterm result;
-
- if (is_both_small(tmp_arg1, tmp_arg2)) {
- Sint i = signed_val(tmp_arg1) + signed_val(tmp_arg2);
- ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
- if (MY_IS_SSMALL(i)) {
- result = make_small(i);
- STORE_ARITH_RESULT(result);
- }
-
- }
- arith_func = ARITH_FUNC(mixed_plus);
- goto do_big_arith2;
- }
-
- OpCase(i_minus_jId):
- {
- Eterm result;
-
- if (is_both_small(tmp_arg1, tmp_arg2)) {
- Sint i = signed_val(tmp_arg1) - signed_val(tmp_arg2);
- ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
- if (MY_IS_SSMALL(i)) {
- result = make_small(i);
- STORE_ARITH_RESULT(result);
- }
- }
- arith_func = ARITH_FUNC(mixed_minus);
- goto do_big_arith2;
- }
-
- OpCase(i_is_lt_f):
- if (CMP_GE(tmp_arg1, tmp_arg2)) {
- ClauseFail();
- }
- Next(1);
-
- OpCase(i_is_ge_f):
- if (CMP_LT(tmp_arg1, tmp_arg2)) {
- ClauseFail();
- }
- Next(1);
-
- OpCase(i_is_eq_f):
- if (CMP_NE(tmp_arg1, tmp_arg2)) {
- ClauseFail();
- }
- Next(1);
-
- OpCase(i_is_ne_f):
- if (CMP_EQ(tmp_arg1, tmp_arg2)) {
- ClauseFail();
- }
- Next(1);
-
- OpCase(i_is_eq_exact_f):
- if (!EQ(tmp_arg1, tmp_arg2)) {
- ClauseFail();
- }
- Next(1);
-
- {
- Eterm is_eq_exact_lit_val;
-
- OpCase(i_is_eq_exact_literal_xfc):
- is_eq_exact_lit_val = xb(Arg(0));
- I++;
- goto do_is_eq_exact_literal;
-
- OpCase(i_is_eq_exact_literal_yfc):
- is_eq_exact_lit_val = yb(Arg(0));
- I++;
- goto do_is_eq_exact_literal;
-
- OpCase(i_is_eq_exact_literal_rfc):
- is_eq_exact_lit_val = r(0);
-
- do_is_eq_exact_literal:
- if (!eq(Arg(1), is_eq_exact_lit_val)) {
- ClauseFail();
- }
- Next(2);
- }
-
- {
- Eterm is_ne_exact_lit_val;
-
- OpCase(i_is_ne_exact_literal_xfc):
- is_ne_exact_lit_val = xb(Arg(0));
- I++;
- goto do_is_ne_exact_literal;
-
- OpCase(i_is_ne_exact_literal_yfc):
- is_ne_exact_lit_val = yb(Arg(0));
- I++;
- goto do_is_ne_exact_literal;
-
- OpCase(i_is_ne_exact_literal_rfc):
- is_ne_exact_lit_val = r(0);
-
- do_is_ne_exact_literal:
- if (eq(Arg(1), is_ne_exact_lit_val)) {
- ClauseFail();
- }
- Next(2);
- }
-
- OpCase(i_move_call_only_fcr): {
- r(0) = Arg(1);
- }
- /* FALL THROUGH */
- OpCase(i_call_only_f): {
- SET_I((BeamInstr *) Arg(0));
- DTRACE_LOCAL_CALL(c_p, (Eterm)I[-3], (Eterm)I[-2], I[-1]);
- Dispatch();
- }
-
- OpCase(i_move_call_last_fPcr): {
- r(0) = Arg(2);
- }
- /* FALL THROUGH */
- OpCase(i_call_last_fP): {
- RESTORE_CP(E);
- E = ADD_BYTE_OFFSET(E, Arg(1));
- SET_I((BeamInstr *) Arg(0));
- DTRACE_LOCAL_CALL(c_p, (Eterm)I[-3], (Eterm)I[-2], I[-1]);
- Dispatch();
- }
-
- OpCase(i_move_call_crf): {
- r(0) = Arg(0);
- I++;
- }
- /* FALL THROUGH */
- OpCase(i_call_f): {
- SET_CP(c_p, I+2);
- SET_I((BeamInstr *) Arg(0));
- DTRACE_LOCAL_CALL(c_p, (Eterm)I[-3], (Eterm)I[-2], I[-1]);
- Dispatch();
- }
-
- OpCase(i_move_call_ext_last_ePcr): {
- r(0) = Arg(2);
- }
- /* FALL THROUGH */
- OpCase(i_call_ext_last_eP):
- RESTORE_CP(E);
- E = ADD_BYTE_OFFSET(E, Arg(1));
-
- /*
- * Note: The pointer to the export entry is never NULL; if the module
- * is not loaded, it points to code which will invoke the error handler
- * (see lb_call_error_handler below).
- */
-#ifdef USE_VM_CALL_PROBES
- if (DTRACE_ENABLED(global_function_entry)) {
- BeamInstr* fp = (BeamInstr *) (((Export *) Arg(0))->addressv[erts_active_code_ix()]);
- DTRACE_GLOBAL_CALL(c_p, (Eterm)fp[-3], (Eterm)fp[-2], fp[-1]);
- }
-#endif
- Dispatchx();
-
- OpCase(i_move_call_ext_cre): {
- r(0) = Arg(0);
- I++;
- }
- /* FALL THROUGH */
- OpCase(i_call_ext_e):
- SET_CP(c_p, I+2);
-#ifdef USE_VM_CALL_PROBES
- if (DTRACE_ENABLED(global_function_entry)) {
- BeamInstr* fp = (BeamInstr *) (((Export *) Arg(0))->addressv[erts_active_code_ix()]);
- DTRACE_GLOBAL_CALL(c_p, (Eterm)fp[-3], (Eterm)fp[-2], fp[-1]);
- }
-#endif
- Dispatchx();
-
- OpCase(i_move_call_ext_only_ecr): {
- r(0) = Arg(1);
- }
- /* FALL THROUGH */
- OpCase(i_call_ext_only_e):
-#ifdef USE_VM_CALL_PROBES
- if (DTRACE_ENABLED(global_function_entry)) {
- BeamInstr* fp = (BeamInstr *) (((Export *) Arg(0))->addressv[erts_active_code_ix()]);
- DTRACE_GLOBAL_CALL(c_p, (Eterm)fp[-3], (Eterm)fp[-2], fp[-1]);
- }
-#endif
- Dispatchx();
- OpCase(init_y): {
- BeamInstr *next;
-
- PreFetch(1, next);
- make_blank(yb(Arg(0)));
- NextPF(1, next);
- }
-
- OpCase(i_trim_I): {
- BeamInstr *next;
- Uint words;
- Uint cp;
-
- words = Arg(0);
- cp = E[0];
- PreFetch(1, next);
- E += words;
- E[0] = cp;
- NextPF(1, next);
- }
-
- OpCase(move_x1_c): {
- x(1) = Arg(0);
- Next(1);
- }
-
- OpCase(move_x2_c): {
- x(2) = Arg(0);
- Next(1);
- }
-
-
- OpCase(return): {
-#ifdef USE_VM_CALL_PROBES
- BeamInstr* fptr;
-#endif
- SET_I(c_p->cp);
-
-#ifdef USE_VM_CALL_PROBES
- if (DTRACE_ENABLED(function_return) && (fptr = find_function_from_pc(c_p->cp))) {
- DTRACE_RETURN(c_p, (Eterm)fptr[0], (Eterm)fptr[1], (Uint)fptr[2]);
- }
-#endif
- /*
- * We must clear the CP to make sure that a stale value do not
- * create a false module dependcy preventing code upgrading.
- * It also means that we can use the CP in stack backtraces.
- */
- c_p->cp = 0;
- CHECK_TERM(r(0));
- HEAP_SPACE_VERIFIED(0);
- Goto(*I);
- }
-
- /*
- * Send is almost a standard call-BIF with two arguments, except for:
- * 1) It cannot be traced.
- * 2) There is no pointer to the send_2 function stored in
- * the instruction.
- */
-
- OpCase(send): {
- BeamInstr *next;
- Eterm result;
-
- PRE_BIF_SWAPOUT(c_p);
- c_p->fcalls = FCALLS - 1;
- reg[0] = r(0);
- result = erl_send(c_p, r(0), x(1));
- PreFetch(0, next);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- if (c_p->mbuf || MSO(c_p).overhead >= BIN_VHEAP_SZ(c_p)) {
- result = erts_gc_after_bif_call(c_p, result, reg, 2);
- r(0) = reg[0];
- E = c_p->stop;
- }
- HTOP = HEAP_TOP(c_p);
- FCALLS = c_p->fcalls;
- if (is_value(result)) {
- r(0) = result;
- CHECK_TERM(r(0));
- NextPF(0, next);
- } else if (c_p->freason == TRAP) {
- SET_CP(c_p, I+1);
- SET_I(c_p->i);
- SWAPIN;
- r(0) = reg[0];
- Dispatch();
- }
- goto find_func_info;
- }
-
- {
- Eterm element_index;
- Eterm element_tuple;
-
- OpCase(i_element_xjsd):
- element_tuple = xb(Arg(0));
- I++;
- goto do_element;
-
- OpCase(i_element_yjsd):
- element_tuple = yb(Arg(0));
- I++;
- goto do_element;
-
- OpCase(i_element_rjsd):
- element_tuple = r(0);
- /* Fall through */
-
- do_element:
- GetArg1(1, element_index);
- if (is_small(element_index) && is_tuple(element_tuple)) {
- Eterm* tp = tuple_val(element_tuple);
-
- if ((signed_val(element_index) >= 1) &&
- (signed_val(element_index) <= arityval(*tp))) {
- Eterm result = tp[signed_val(element_index)];
- StoreBifResult(2, result);
- }
- }
- }
- /* Fall through */
-
- OpCase(badarg_j):
- badarg:
- c_p->freason = BADARG;
- goto lb_Cl_error;
-
- {
- Eterm fast_element_tuple;
-
- OpCase(i_fast_element_rjId):
- fast_element_tuple = r(0);
-
- do_fast_element:
- if (is_tuple(fast_element_tuple)) {
- Eterm* tp = tuple_val(fast_element_tuple);
- Eterm pos = Arg(1); /* Untagged integer >= 1 */
- if (pos <= arityval(*tp)) {
- Eterm result = tp[pos];
- StoreBifResult(2, result);
- }
- }
- goto badarg;
-
- OpCase(i_fast_element_xjId):
- fast_element_tuple = xb(Arg(0));
- I++;
- goto do_fast_element;
-
- OpCase(i_fast_element_yjId):
- fast_element_tuple = yb(Arg(0));
- I++;
- goto do_fast_element;
- }
-
- OpCase(catch_yf):
- c_p->catches++;
- yb(Arg(0)) = Arg(1);
- Next(2);
-
- OpCase(catch_end_y): {
- c_p->catches--;
- make_blank(yb(Arg(0)));
- if (is_non_value(r(0))) {
- if (x(1) == am_throw) {
- r(0) = x(2);
- } else {
- if (x(1) == am_error) {
- SWAPOUT;
- x(2) = add_stacktrace(c_p, x(2), x(3));
- SWAPIN;
- }
- /* only x(2) is included in the rootset here */
- if (E - HTOP < 3 || c_p->mbuf) { /* Force GC in case add_stacktrace()
- * created heap fragments */
- SWAPOUT;
- PROCESS_MAIN_CHK_LOCKS(c_p);
- FCALLS -= erts_garbage_collect(c_p, 3, reg+2, 1);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- SWAPIN;
- }
- r(0) = TUPLE2(HTOP, am_EXIT, x(2));
- HTOP += 3;
- }
- }
- CHECK_TERM(r(0));
- Next(1);
- }
-
- OpCase(try_end_y): {
- c_p->catches--;
- make_blank(yb(Arg(0)));
- if (is_non_value(r(0))) {
- r(0) = x(1);
- x(1) = x(2);
- x(2) = x(3);
- }
- Next(1);
- }
-
- /*
- * Skeleton for receive statement:
- *
- * recv_mark L1 Optional
- * call make_ref/monitor Optional
- * ...
- * recv_set L1 Optional
- * L1: <-------------------+
- * <-----------+ |
- * | |
- * loop_rec L2 ------+---+ |
- * ... | | |
- * remove_message | | |
- * jump L3 | | |
- * ... | | |
- * loop_rec_end L1 --+ | |
- * L2: <---------------+ |
- * wait L1 -----------------+ or wait_timeout
- * timeout
- *
- * L3: Code after receive...
- *
- *
- */
-
- OpCase(recv_mark_f): {
- /*
- * Save the current position in message buffer and the
- * the label for the loop_rec/2 instruction for the
- * the receive statement.
- */
- c_p->msg.mark = (BeamInstr *) Arg(0);
- c_p->msg.saved_last = c_p->msg.last;
- Next(1);
- }
-
- OpCase(i_recv_set): {
- /*
- * If the mark is valid (points to the loop_rec/2
- * instruction that follows), we know that the saved
- * position points to the first message that could
- * possibly be matched out.
- *
- * If the mark is invalid, we do nothing, meaning that
- * we will look through all messages in the message queue.
- */
- if (c_p->msg.mark == (BeamInstr *) (I+1)) {
- c_p->msg.save = c_p->msg.saved_last;
- }
- I++;
- /* Fall through to the loop_rec/2 instruction */
- }
-
- /*
- * Pick up the next message and place it in x(0).
- * If no message, jump to a wait or wait_timeout instruction.
- */
- OpCase(i_loop_rec_fr):
- {
- BeamInstr *next;
- ErlMessage* msgp;
-
- loop_rec__:
-
- PROCESS_MAIN_CHK_LOCKS(c_p);
-
- msgp = PEEK_MESSAGE(c_p);
-
- if (!msgp) {
-#ifdef ERTS_SMP
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
- /* Make sure messages wont pass exit signals... */
- if (ERTS_PROC_PENDING_EXIT(c_p)) {
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
- SWAPOUT;
- goto do_schedule; /* Will be rescheduled for exit */
- }
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
- msgp = PEEK_MESSAGE(c_p);
- if (msgp)
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
- else
-#endif
- {
- SET_I((BeamInstr *) Arg(0));
- Goto(*I); /* Jump to a wait or wait_timeout instruction */
- }
- }
- ErtsMoveMsgAttachmentIntoProc(msgp, c_p, E, HTOP, FCALLS,
- {
- SWAPOUT;
- reg[0] = r(0);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- },
- {
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- r(0) = reg[0];
- SWAPIN;
- });
- if (is_non_value(ERL_MESSAGE_TERM(msgp))) {
- /*
- * A corrupt distribution message that we weren't able to decode;
- * remove it...
- */
- ASSERT(!msgp->data.attached);
- /* TODO: Add DTrace probe for this bad message situation? */
- UNLINK_MESSAGE(c_p, msgp);
- free_message(msgp);
- goto loop_rec__;
- }
- PreFetch(1, next);
- r(0) = ERL_MESSAGE_TERM(msgp);
- NextPF(1, next);
- }
-
- /*
- * Remove a (matched) message from the message queue.
- */
- OpCase(remove_message): {
- BeamInstr *next;
- ErlMessage* msgp;
-
- PROCESS_MAIN_CHK_LOCKS(c_p);
-
- PreFetch(0, next);
- msgp = PEEK_MESSAGE(c_p);
-
- if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) {
- save_calls(c_p, &exp_receive);
- }
- if (ERL_MESSAGE_TOKEN(msgp) == NIL) {
-#ifdef USE_VM_PROBES
- if (DT_UTAG(c_p) != NIL) {
- if (DT_UTAG_FLAGS(c_p) & DT_UTAG_PERMANENT) {
- SEQ_TRACE_TOKEN(c_p) = am_have_dt_utag;
-#ifdef DTRACE_TAG_HARDDEBUG
- if (DT_UTAG_FLAGS(c_p) & DT_UTAG_SPREADING)
- erts_fprintf(stderr,
- "Dtrace -> (%T) stop spreading "
- "tag %T with message %T\r\n",
- c_p->common.id,DT_UTAG(c_p),ERL_MESSAGE_TERM(msgp));
-#endif
- } else {
-#ifdef DTRACE_TAG_HARDDEBUG
- erts_fprintf(stderr,
- "Dtrace -> (%T) kill tag %T with "
- "message %T\r\n",
- c_p->common.id,DT_UTAG(c_p),ERL_MESSAGE_TERM(msgp));
-#endif
- DT_UTAG(c_p) = NIL;
- SEQ_TRACE_TOKEN(c_p) = NIL;
- }
- } else {
-#endif
- SEQ_TRACE_TOKEN(c_p) = NIL;
-#ifdef USE_VM_PROBES
- }
- DT_UTAG_FLAGS(c_p) &= ~DT_UTAG_SPREADING;
-#endif
- } else if (ERL_MESSAGE_TOKEN(msgp) != am_undefined) {
- Eterm msg;
- SEQ_TRACE_TOKEN(c_p) = ERL_MESSAGE_TOKEN(msgp);
-#ifdef USE_VM_PROBES
- if (ERL_MESSAGE_TOKEN(msgp) == am_have_dt_utag) {
- if (DT_UTAG(c_p) == NIL) {
- DT_UTAG(c_p) = ERL_MESSAGE_DT_UTAG(msgp);
- }
- DT_UTAG_FLAGS(c_p) |= DT_UTAG_SPREADING;
-#ifdef DTRACE_TAG_HARDDEBUG
- erts_fprintf(stderr,
- "Dtrace -> (%T) receive tag (%T) "
- "with message %T\r\n",
- c_p->common.id, DT_UTAG(c_p), ERL_MESSAGE_TERM(msgp));
-#endif
- } else {
-#endif
- ASSERT(is_tuple(SEQ_TRACE_TOKEN(c_p)));
- ASSERT(SEQ_TRACE_TOKEN_ARITY(c_p) == 5);
- ASSERT(is_small(SEQ_TRACE_TOKEN_SERIAL(c_p)));
- ASSERT(is_small(SEQ_TRACE_TOKEN_LASTCNT(c_p)));
- ASSERT(is_small(SEQ_TRACE_TOKEN_FLAGS(c_p)));
- ASSERT(is_pid(SEQ_TRACE_TOKEN_SENDER(c_p)));
- c_p->seq_trace_lastcnt = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p));
- if (c_p->seq_trace_clock < unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p))) {
- c_p->seq_trace_clock = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p));
- }
- msg = ERL_MESSAGE_TERM(msgp);
- seq_trace_output(SEQ_TRACE_TOKEN(c_p), msg, SEQ_TRACE_RECEIVE,
- c_p->common.id, c_p);
-#ifdef USE_VM_PROBES
- }
-#endif
- }
-#ifdef USE_VM_PROBES
- if (DTRACE_ENABLED(message_receive)) {
- Eterm token2 = NIL;
- DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE);
- Sint tok_label = 0;
- Sint tok_lastcnt = 0;
- Sint tok_serial = 0;
-
- dtrace_proc_str(c_p, receiver_name);
- token2 = SEQ_TRACE_TOKEN(c_p);
- if (token2 != NIL && token2 != am_have_dt_utag) {
- tok_label = signed_val(SEQ_TRACE_T_LABEL(token2));
- tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token2));
- tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token2));
- }
- DTRACE6(message_receive,
- receiver_name, size_object(ERL_MESSAGE_TERM(msgp)),
- c_p->msg.len - 1, tok_label, tok_lastcnt, tok_serial);
- }
-#endif
- UNLINK_MESSAGE(c_p, msgp);
- JOIN_MESSAGE(c_p);
- CANCEL_TIMER(c_p);
- free_message(msgp);
-
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
-
- NextPF(0, next);
- }
-
- /*
- * Advance the save pointer to the next message (the current
- * message didn't match), then jump to the loop_rec instruction.
- */
- OpCase(loop_rec_end_f): {
- SET_I((BeamInstr *) Arg(0));
- SAVE_MESSAGE(c_p);
- goto loop_rec__;
- }
- /*
- * Prepare to wait for a message or a timeout, whichever occurs first.
- *
- * Note: In order to keep the compatibility between 32 and 64 bits
- * emulators, only timeout values that can be represented in 32 bits
- * (unsigned) or less are allowed.
- */
-
-
- OpCase(i_wait_timeout_fs): {
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
-
- /* Fall through */
- }
- OpCase(i_wait_timeout_locked_fs): {
- Eterm timeout_value;
-
- /*
- * If we have already set the timer, we must NOT set it again. Therefore,
- * we must test the F_INSLPQUEUE flag as well as the F_TIMO flag.
- */
- if (c_p->flags & (F_INSLPQUEUE | F_TIMO)) {
- goto wait2;
- }
- GetArg1(1, timeout_value);
- if (timeout_value != make_small(0)) {
-#if !defined(ARCH_64) || HALFWORD_HEAP
- Uint time_val;
-#endif
-
- if (is_small(timeout_value) && signed_val(timeout_value) > 0 &&
-#if defined(ARCH_64) && !HALFWORD_HEAP
- ((unsigned_val(timeout_value) >> 32) == 0)
-#else
- 1
-#endif
- ) {
- /*
- * The timer routiner will set c_p->i to the value in
- * c_p->def_arg_reg[0]. Note that it is safe to use this
- * location because there are no living x registers in
- * a receive statement.
- * Note that for the halfword emulator, the two first elements
- * of the array are used.
- */
- BeamInstr** pi = (BeamInstr**) c_p->def_arg_reg;
- *pi = I+3;
- set_timer(c_p, unsigned_val(timeout_value));
- } else if (timeout_value == am_infinity) {
- c_p->flags |= F_TIMO;
-#if !defined(ARCH_64) || HALFWORD_HEAP
- } else if (term_to_Uint(timeout_value, &time_val)) {
- BeamInstr** pi = (BeamInstr**) c_p->def_arg_reg;
- *pi = I+3;
- set_timer(c_p, time_val);
-#endif
- } else { /* Wrong time */
- OpCase(i_wait_error_locked): {
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
- /* Fall through */
- }
- OpCase(i_wait_error): {
- c_p->freason = EXC_TIMEOUT_VALUE;
- goto find_func_info;
- }
- }
-
- /*
- * Prepare to wait indefinitely for a new message to arrive
- * (or the time set above if falling through from above).
- *
- * When a new message arrives, control will be transferred
- * the loop_rec instruction (at label L1). In case of
- * of timeout, control will be transferred to the timeout
- * instruction following the wait_timeout instruction.
- */
-
- OpCase(wait_locked_f):
- OpCase(wait_f):
-
- wait2: {
- c_p->i = (BeamInstr *) Arg(0); /* L1 */
- SWAPOUT;
- c_p->arity = 0;
- erts_smp_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_ACTIVE);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
- c_p->current = NULL;
- goto do_schedule;
- }
- OpCase(wait_unlocked_f): {
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
- goto wait2;
- }
- }
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
- Next(2);
- }
-
- OpCase(i_wait_timeout_fI): {
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
- }
-
- OpCase(i_wait_timeout_locked_fI):
- {
- /*
- * If we have already set the timer, we must NOT set it again. Therefore,
- * we must test the F_INSLPQUEUE flag as well as the F_TIMO flag.
- */
- if ((c_p->flags & (F_INSLPQUEUE | F_TIMO)) == 0) {
- BeamInstr** p = (BeamInstr **) c_p->def_arg_reg;
- *p = I+3;
- set_timer(c_p, Arg(1));
- }
- goto wait2;
- }
-
- /*
- * A timeout has occurred. Reset the save pointer so that the next
- * receive statement will examine the first message first.
- */
- OpCase(timeout_locked): {
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
- }
-
- OpCase(timeout): {
- BeamInstr *next;
-
- PreFetch(0, next);
- if (IS_TRACED_FL(c_p, F_TRACE_RECEIVE)) {
- trace_receive(c_p, am_timeout);
- }
- if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) {
- save_calls(c_p, &exp_timeout);
- }
- c_p->flags &= ~F_TIMO;
- JOIN_MESSAGE(c_p);
- NextPF(0, next);
- }
-
-
- {
- Eterm select_val2;
-
- OpCase(i_select_tuple_arity2_yfAfAf):
- select_val2 = yb(Arg(0));
- goto do_select_tuple_arity2;
-
- OpCase(i_select_tuple_arity2_xfAfAf):
- select_val2 = xb(Arg(0));
- goto do_select_tuple_arity2;
-
- OpCase(i_select_tuple_arity2_rfAfAf):
- select_val2 = r(0);
- I--;
-
- do_select_tuple_arity2:
- if (is_not_tuple(select_val2)) {
- goto select_val2_fail;
- }
- select_val2 = *tuple_val(select_val2);
- goto do_select_val2;
-
- OpCase(i_select_val2_yfcfcf):
- select_val2 = yb(Arg(0));
- goto do_select_val2;
-
- OpCase(i_select_val2_xfcfcf):
- select_val2 = xb(Arg(0));
- goto do_select_val2;
-
- OpCase(i_select_val2_rfcfcf):
- select_val2 = r(0);
- I--;
-
- do_select_val2:
- if (select_val2 == Arg(2)) {
- I += 2;
- } else if (select_val2 == Arg(4)) {
- I += 4;
- }
-
- select_val2_fail:
- SET_I((BeamInstr *) Arg(1));
- Goto(*I);
- }
-
- {
- Eterm select_val;
-
- OpCase(i_select_tuple_arity_xfI):
- select_val = xb(Arg(0));
- goto do_select_tuple_arity;
-
- OpCase(i_select_tuple_arity_yfI):
- select_val = yb(Arg(0));
- goto do_select_tuple_arity;
-
- OpCase(i_select_tuple_arity_rfI):
- select_val = r(0);
- I--;
-
- do_select_tuple_arity:
- if (is_tuple(select_val)) {
- select_val = *tuple_val(select_val);
- goto do_binary_search;
- }
- SET_I((BeamInstr *) Arg(1));
- Goto(*I);
-
- OpCase(i_select_val_xfI):
- select_val = xb(Arg(0));
- goto do_binary_search;
-
- OpCase(i_select_val_yfI):
- select_val = yb(Arg(0));
- goto do_binary_search;
-
- OpCase(i_select_val_rfI):
- select_val = r(0);
- I--;
-
- do_binary_search:
- {
- struct Pairs {
- BeamInstr val;
- BeamInstr* addr;
- };
- struct Pairs* low;
- struct Pairs* high;
- struct Pairs* mid;
- int bdiff; /* int not long because the arrays aren't that large */
-
- low = (struct Pairs *) &Arg(3);
- high = low + Arg(2);
-
- /* The pointer subtraction (high-low) below must produce
- * a signed result, because high could be < low. That
- * requires the compiler to insert quite a bit of code.
- *
- * However, high will be > low so the result will be
- * positive. We can use that knowledge to optimise the
- * entire sequence, from the initial comparison to the
- * computation of mid.
- *
- * -- Mikael Pettersson, Acumem AB
- *
- * Original loop control code:
- *
- * while (low < high) {
- * mid = low + (high-low) / 2;
- *
- */
- while ((bdiff = (int)((char*)high - (char*)low)) > 0) {
- unsigned int boffset = ((unsigned int)bdiff >> 1) & ~(sizeof(struct Pairs)-1);
-
- mid = (struct Pairs*)((char*)low + boffset);
- if (select_val < mid->val) {
- high = mid;
- } else if (select_val > mid->val) {
- low = mid + 1;
- } else {
- SET_I(mid->addr);
- Goto(*I);
- }
- }
- SET_I((BeamInstr *) Arg(1));
- Goto(*I);
- }
- }
-
- {
- Eterm jump_on_val_zero_index;
-
- OpCase(i_jump_on_val_zero_yfI):
- jump_on_val_zero_index = yb(Arg(0));
- goto do_jump_on_val_zero_index;
-
- OpCase(i_jump_on_val_zero_xfI):
- jump_on_val_zero_index = xb(Arg(0));
- goto do_jump_on_val_zero_index;
-
- OpCase(i_jump_on_val_zero_rfI):
- jump_on_val_zero_index = r(0);
- I--;
-
- do_jump_on_val_zero_index:
- if (is_small(jump_on_val_zero_index)) {
- jump_on_val_zero_index = signed_val(jump_on_val_zero_index);
- if (jump_on_val_zero_index < Arg(2)) {
- SET_I((BeamInstr *) (&Arg(3))[jump_on_val_zero_index]);
- Goto(*I);
- }
- }
- SET_I((BeamInstr *) Arg(1));
- Goto(*I);
- }
-
- {
- Eterm jump_on_val_index;
-
-
- OpCase(i_jump_on_val_yfII):
- jump_on_val_index = yb(Arg(0));
- goto do_jump_on_val_index;
-
- OpCase(i_jump_on_val_xfII):
- jump_on_val_index = xb(Arg(0));
- goto do_jump_on_val_index;
-
- OpCase(i_jump_on_val_rfII):
- jump_on_val_index = r(0);
- I--;
-
- do_jump_on_val_index:
- if (is_small(jump_on_val_index)) {
- jump_on_val_index = (Uint) (signed_val(jump_on_val_index) - Arg(3));
- if (jump_on_val_index < Arg(2)) {
- SET_I((BeamInstr *) (&Arg(4))[jump_on_val_index]);
- Goto(*I);
- }
- }
- SET_I((BeamInstr *) Arg(1));
- Goto(*I);
- }
-
- do_put_tuple: {
- Eterm* hp = HTOP;
-
- *hp++ = make_arityval(pt_arity);
-
- do {
- Eterm term = *I++;
- switch (term & _TAG_IMMED1_MASK) {
- case (R_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER:
- *hp++ = r(0);
- break;
- case (X_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER:
- *hp++ = x(term >> _TAG_IMMED1_SIZE);
- break;
- case (Y_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER:
- *hp++ = y(term >> _TAG_IMMED1_SIZE);
- break;
- default:
- *hp++ = term;
- break;
- }
- } while (--pt_arity != 0);
- HTOP = hp;
- Goto(*I);
- }
-
- OpCase(new_map_jdII): {
- Eterm res;
-
- x(0) = r(0);
- SWAPOUT;
- res = new_map(c_p, reg, I);
- SWAPIN;
- r(0) = x(0);
- StoreResult(res, Arg(1));
- Next(4+Arg(3));
- }
-
- OpCase(i_has_map_fields_fsI): {
- map_t* mp;
- Eterm map;
- Eterm field;
- Eterm *ks;
- BeamInstr* fs;
- Uint sz,n;
-
- GetArg1(1, map);
-
- /* this instruction assumes Arg1 is a map,
- * i.e. that it follows a test is_map if needed.
- */
-
- mp = (map_t *)map_val(map);
- sz = map_get_size(mp);
-
- if (sz == 0) {
- SET_I((BeamInstr *) Arg(0));
- goto has_map_fields_fail;
- }
-
- ks = map_get_keys(mp);
- n = (Uint)Arg(2);
- fs = &Arg(3); /* pattern fields */
-
- ASSERT(n>0);
-
- while(sz) {
- field = (Eterm)*fs;
- if (EQ(field,*ks)) {
- n--;
- fs++;
- if (n == 0) break;
- }
- ks++; sz--;
- }
-
- if (n) {
- SET_I((BeamInstr *) Arg(0));
- goto has_map_fields_fail;
- }
-
- I += 4 + Arg(2);
-has_map_fields_fail:
- ASSERT(VALID_INSTR(*I));
- Goto(*I);
- }
-
-#define PUT_TERM_REG(term, desc) \
-do { \
- switch ((desc) & _TAG_IMMED1_MASK) { \
- case (R_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \
- r(0) = (term); \
- break; \
- case (X_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \
- x((desc) >> _TAG_IMMED1_SIZE) = (term); \
- break; \
- case (Y_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \
- y((desc) >> _TAG_IMMED1_SIZE) = (term); \
- break; \
- default: \
- ASSERT(0); \
- break; \
- } \
-} while(0)
-
- OpCase(i_get_map_elements_fsI): {
- Eterm map;
- map_t *mp;
- Eterm field;
- Eterm *ks;
- Eterm *vs;
- BeamInstr *fs;
- Uint sz,n;
-
- GetArg1(1, map);
-
- /* this instruction assumes Arg1 is a map,
- * i.e. that it follows a test is_map if needed.
- */
-
- mp = (map_t *)map_val(map);
- sz = map_get_size(mp);
-
- if (sz == 0) {
- SET_I((BeamInstr *) Arg(0));
- goto get_map_elements_fail;
- }
-
- n = (Uint)Arg(2) / 2;
- fs = &Arg(3); /* pattern fields and target registers */
- ks = map_get_keys(mp);
- vs = map_get_values(mp);
-
- while(sz) {
- field = (Eterm)*fs;
- if (EQ(field,*ks)) {
- PUT_TERM_REG(*vs, fs[1]);
- n--;
- fs += 2;
- /* no more values to fetch, we are done */
- if (n == 0) break;
- }
- ks++; sz--;
- vs++;
- }
-
- if (n) {
- SET_I((BeamInstr *) Arg(0));
- goto get_map_elements_fail;
- }
-
- I += 4 + Arg(2);
-get_map_elements_fail:
- ASSERT(VALID_INSTR(*I));
- Goto(*I);
- }
-#undef PUT_TERM_REG
-
- OpCase(update_map_assoc_jsdII): {
- Eterm res;
- Eterm map;
-
- GetArg1(1, map);
- x(0) = r(0);
- SWAPOUT;
- res = update_map_assoc(c_p, reg, map, I);
- SWAPIN;
- if (is_value(res)) {
- r(0) = x(0);
- StoreResult(res, Arg(2));
- Next(5+Arg(4));
- } else {
- goto badarg;
- }
- }
-
- OpCase(update_map_exact_jsdII): {
- Eterm res;
- Eterm map;
-
- GetArg1(1, map);
- x(0) = r(0);
- SWAPOUT;
- res = update_map_exact(c_p, reg, map, I);
- SWAPIN;
- if (is_value(res)) {
- r(0) = x(0);
- StoreResult(res, Arg(2));
- Next(5+Arg(4));
- } else {
- goto badarg;
- }
- }
-
-
- /*
- * All guards with zero arguments have special instructions:
- * self/0
- * node/0
- *
- * All other guard BIFs take one or two arguments.
- */
-
- /*
- * Guard BIF in head. On failure, ignore the error and jump
- * to the code for the next clause. We don't support tracing
- * of guard BIFs.
- */
-
- OpCase(bif1_fbsd):
- {
- Eterm (*bf)(Process*, Eterm*);
- Eterm tmp_reg[1];
- Eterm result;
-
- GetArg1(2, tmp_reg[0]);
- bf = (BifFunction) Arg(1);
- c_p->fcalls = FCALLS;
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- result = (*bf)(c_p, tmp_reg);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_HOLE_CHECK(c_p);
- FCALLS = c_p->fcalls;
- if (is_value(result)) {
- StoreBifResult(3, result);
- }
- SET_I((BeamInstr *) Arg(0));
- Goto(*I);
- }
-
- /*
- * Guard BIF in body. It can fail like any BIF. No trace support.
- */
-
- OpCase(bif1_body_bsd):
- {
- Eterm (*bf)(Process*, Eterm*);
-
- Eterm tmp_reg[1];
- Eterm result;
-
- GetArg1(1, tmp_reg[0]);
- bf = (BifFunction) Arg(0);
- c_p->fcalls = FCALLS;
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- result = (*bf)(c_p, tmp_reg);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_HOLE_CHECK(c_p);
- FCALLS = c_p->fcalls;
- if (is_value(result)) {
- StoreBifResult(2, result);
- }
- reg[0] = tmp_reg[0];
- SWAPOUT;
- I = handle_error(c_p, I, reg, bf);
- goto post_error_handling;
- }
-
- OpCase(i_gc_bif1_jIsId):
- {
- typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint);
- GcBifFunction bf;
- Eterm arg;
- Eterm result;
- Uint live = (Uint) Arg(3);
-
- GetArg1(2, arg);
- reg[0] = r(0);
- reg[live] = arg;
- bf = (GcBifFunction) Arg(1);
- c_p->fcalls = FCALLS;
- SWAPOUT;
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- result = (*bf)(c_p, reg, live);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- SWAPIN;
- r(0) = reg[0];
- ERTS_HOLE_CHECK(c_p);
- FCALLS = c_p->fcalls;
- if (is_value(result)) {
- StoreBifResult(4, result);
- }
- if (Arg(0) != 0) {
- SET_I((BeamInstr *) Arg(0));
- Goto(*I);
- }
- reg[0] = arg;
- I = handle_error(c_p, I, reg, translate_gc_bif((void *) bf));
- goto post_error_handling;
- }
-
- OpCase(i_gc_bif2_jIId): /* Note, one less parameter than the i_gc_bif1
- and i_gc_bif3 */
- {
- typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint);
- GcBifFunction bf;
- Eterm result;
- Uint live = (Uint) Arg(2);
-
- reg[0] = r(0);
- reg[live++] = tmp_arg1;
- reg[live] = tmp_arg2;
- bf = (GcBifFunction) Arg(1);
- c_p->fcalls = FCALLS;
- SWAPOUT;
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- result = (*bf)(c_p, reg, live);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- SWAPIN;
- r(0) = reg[0];
- ERTS_HOLE_CHECK(c_p);
- FCALLS = c_p->fcalls;
- if (is_value(result)) {
- StoreBifResult(3, result);
- }
- if (Arg(0) != 0) {
- SET_I((BeamInstr *) Arg(0));
- Goto(*I);
- }
- reg[0] = tmp_arg1;
- reg[1] = tmp_arg2;
- I = handle_error(c_p, I, reg, translate_gc_bif((void *) bf));
- goto post_error_handling;
- }
-
- OpCase(i_gc_bif3_jIsId):
- {
- typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint);
- GcBifFunction bf;
- Eterm arg;
- Eterm result;
- Uint live = (Uint) Arg(3);
-
- GetArg1(2, arg);
- reg[0] = r(0);
- reg[live++] = arg;
- reg[live++] = tmp_arg1;
- reg[live] = tmp_arg2;
- bf = (GcBifFunction) Arg(1);
- c_p->fcalls = FCALLS;
- SWAPOUT;
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- result = (*bf)(c_p, reg, live);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- SWAPIN;
- r(0) = reg[0];
- ERTS_HOLE_CHECK(c_p);
- FCALLS = c_p->fcalls;
- if (is_value(result)) {
- StoreBifResult(4, result);
- }
- if (Arg(0) != 0) {
- SET_I((BeamInstr *) Arg(0));
- Goto(*I);
- }
- reg[0] = arg;
- reg[1] = tmp_arg1;
- reg[2] = tmp_arg2;
- I = handle_error(c_p, I, reg, translate_gc_bif((void *) bf));
- goto post_error_handling;
- }
-
- /*
- * Guards bifs and, or, xor in guards.
- */
- OpCase(i_bif2_fbd):
- {
- Eterm tmp_reg[2] = {tmp_arg1, tmp_arg2};
- Eterm (*bf)(Process*, Eterm*);
- Eterm result;
-
- bf = (BifFunction) Arg(1);
- c_p->fcalls = FCALLS;
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- result = (*bf)(c_p, tmp_reg);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_HOLE_CHECK(c_p);
- FCALLS = c_p->fcalls;
- if (is_value(result)) {
- StoreBifResult(2, result);
- }
- SET_I((BeamInstr *) Arg(0));
- Goto(*I);
- }
-
- /*
- * Guards bifs and, or, xor, relational operators in body.
- */
- OpCase(i_bif2_body_bd):
- {
- Eterm tmp_reg[2] = {tmp_arg1, tmp_arg2};
- Eterm (*bf)(Process*, Eterm*);
- Eterm result;
-
- bf = (BifFunction) Arg(0);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- result = (*bf)(c_p, tmp_reg);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_HOLE_CHECK(c_p);
- if (is_value(result)) {
- ASSERT(!is_CP(result));
- StoreBifResult(1, result);
- }
- reg[0] = tmp_arg1;
- reg[1] = tmp_arg2;
- SWAPOUT;
- I = handle_error(c_p, I, reg, bf);
- goto post_error_handling;
- }
-
- /*
- * The most general BIF call. The BIF may build any amount of data
- * on the heap. The result is always returned in r(0).
- */
- OpCase(call_bif_e):
- {
- Eterm (*bf)(Process*, Eterm*, BeamInstr*) = GET_BIF_ADDRESS(Arg(0));
- Eterm result;
- BeamInstr *next;
-
- PRE_BIF_SWAPOUT(c_p);
- c_p->fcalls = FCALLS - 1;
- if (FCALLS <= 0) {
- save_calls(c_p, (Export *) Arg(0));
- }
- PreFetch(1, next);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- reg[0] = r(0);
- result = (*bf)(c_p, reg, I);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- ERTS_HOLE_CHECK(c_p);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- if (c_p->mbuf || MSO(c_p).overhead >= BIN_VHEAP_SZ(c_p)) {
- Uint arity = ((Export *)Arg(0))->code[2];
- result = erts_gc_after_bif_call(c_p, result, reg, arity);
- E = c_p->stop;
- }
- HTOP = HEAP_TOP(c_p);
- FCALLS = c_p->fcalls;
- if (is_value(result)) {
- r(0) = result;
- CHECK_TERM(r(0));
- NextPF(1, next);
- } else if (c_p->freason == TRAP) {
- SET_CP(c_p, I+2);
- SET_I(c_p->i);
- SWAPIN;
- r(0) = reg[0];
- Dispatch();
- }
-
- /*
- * Error handling. SWAPOUT is not needed because it was done above.
- */
- ASSERT(c_p->stop == E);
- I = handle_error(c_p, I, reg, bf);
- goto post_error_handling;
- }
-
- /*
- * Arithmetic operations.
- */
-
- OpCase(i_times_jId):
- {
- arith_func = ARITH_FUNC(mixed_times);
- goto do_big_arith2;
- }
-
- OpCase(i_m_div_jId):
- {
- arith_func = ARITH_FUNC(mixed_div);
- goto do_big_arith2;
- }
-
- OpCase(i_int_div_jId):
- {
- Eterm result;
-
- if (tmp_arg2 == SMALL_ZERO) {
- goto badarith;
- } else if (is_both_small(tmp_arg1, tmp_arg2)) {
- Sint ires = signed_val(tmp_arg1) / signed_val(tmp_arg2);
- if (MY_IS_SSMALL(ires)) {
- result = make_small(ires);
- STORE_ARITH_RESULT(result);
- }
- }
- arith_func = ARITH_FUNC(int_div);
- goto do_big_arith2;
- }
-
- OpCase(i_rem_jId):
- {
- Eterm result;
-
- if (tmp_arg2 == SMALL_ZERO) {
- goto badarith;
- } else if (is_both_small(tmp_arg1, tmp_arg2)) {
- result = make_small(signed_val(tmp_arg1) % signed_val(tmp_arg2));
- STORE_ARITH_RESULT(result);
- } else {
- arith_func = ARITH_FUNC(int_rem);
- goto do_big_arith2;
- }
- }
-
- OpCase(i_band_jId):
- {
- Eterm result;
-
- if (is_both_small(tmp_arg1, tmp_arg2)) {
- /*
- * No need to untag -- TAG & TAG == TAG.
- */
- result = tmp_arg1 & tmp_arg2;
- STORE_ARITH_RESULT(result);
- }
- arith_func = ARITH_FUNC(band);
- goto do_big_arith2;
- }
-
- do_big_arith2:
- {
- Eterm result;
- Uint live = Arg(1);
-
- SWAPOUT;
- reg[0] = r(0);
- reg[live] = tmp_arg1;
- reg[live+1] = tmp_arg2;
- result = arith_func(c_p, reg, live);
- r(0) = reg[0];
- SWAPIN;
- ERTS_HOLE_CHECK(c_p);
- if (is_value(result)) {
- STORE_ARITH_RESULT(result);
- }
- goto lb_Cl_error;
- }
-
- /*
- * An error occured in an arithmetic operation or test that could
- * appear either in a head or in a body.
- * In a head, execution should continue at failure address in Arg(0).
- * In a body, Arg(0) == 0 and an exception should be raised.
- */
- lb_Cl_error: {
- if (Arg(0) != 0) {
- OpCase(jump_f): {
- jump_f:
- SET_I((BeamInstr *) Arg(0));
- Goto(*I);
- }
- }
- ASSERT(c_p->freason != BADMATCH || is_value(c_p->fvalue));
- goto find_func_info;
- }
-
- OpCase(i_bor_jId):
- {
- Eterm result;
-
- if (is_both_small(tmp_arg1, tmp_arg2)) {
- /*
- * No need to untag -- TAG | TAG == TAG.
- */
- result = tmp_arg1 | tmp_arg2;
- STORE_ARITH_RESULT(result);
- }
- arith_func = ARITH_FUNC(bor);
- goto do_big_arith2;
- }
-
- OpCase(i_bxor_jId):
- {
- Eterm result;
-
- if (is_both_small(tmp_arg1, tmp_arg2)) {
- /*
- * We could extract the tag from one argument, but a tag extraction
- * could mean a shift. Therefore, play it safe here.
- */
- result = make_small(signed_val(tmp_arg1) ^ signed_val(tmp_arg2));
- STORE_ARITH_RESULT(result);
- }
- arith_func = ARITH_FUNC(bxor);
- goto do_big_arith2;
- }
-
- {
- Sint i;
- Sint ires;
- Eterm* bigp;
-
- OpCase(i_bsr_jId):
- if (is_small(tmp_arg2)) {
- i = -signed_val(tmp_arg2);
- if (is_small(tmp_arg1)) {
- goto small_shift;
- } else if (is_big(tmp_arg1)) {
- if (i == 0) {
- StoreBifResult(2, tmp_arg1);
- }
- goto big_shift;
- }
- } else if (is_big(tmp_arg2)) {
- /*
- * N bsr NegativeBigNum == N bsl MAX_SMALL
- * N bsr PositiveBigNum == N bsl MIN_SMALL
- */
- tmp_arg2 = make_small(bignum_header_is_neg(*big_val(tmp_arg2)) ?
- MAX_SMALL : MIN_SMALL);
- goto do_bsl;
- }
- goto badarith;
-
- OpCase(i_bsl_jId):
- do_bsl:
- if (is_small(tmp_arg2)) {
- i = signed_val(tmp_arg2);
-
- if (is_small(tmp_arg1)) {
- small_shift:
- ires = signed_val(tmp_arg1);
-
- if (i == 0 || ires == 0) {
- StoreBifResult(2, tmp_arg1);
- } else if (i < 0) { /* Right shift */
- i = -i;
- if (i >= SMALL_BITS-1) {
- tmp_arg1 = (ires < 0) ? SMALL_MINUS_ONE : SMALL_ZERO;
- } else {
- tmp_arg1 = make_small(ires >> i);
- }
- StoreBifResult(2, tmp_arg1);
- } else if (i < SMALL_BITS-1) { /* Left shift */
- if ((ires > 0 && ((~(Uint)0 << ((SMALL_BITS-1)-i)) & ires) == 0) ||
- ((~(Uint)0 << ((SMALL_BITS-1)-i)) & ~ires) == 0) {
- tmp_arg1 = make_small(ires << i);
- StoreBifResult(2, tmp_arg1);
- }
- }
- tmp_arg1 = small_to_big(ires, tmp_big);
-
- big_shift:
- if (i > 0) { /* Left shift. */
- ires = big_size(tmp_arg1) + (i / D_EXP);
- } else { /* Right shift. */
- ires = big_size(tmp_arg1);
- if (ires <= (-i / D_EXP))
- ires = 3; /* ??? */
- else
- ires -= (-i / D_EXP);
- }
- {
- ires = BIG_NEED_SIZE(ires+1);
- /*
- * Slightly conservative check the size to avoid
- * allocating huge amounts of memory for bignums that
- * clearly would overflow the arity in the header
- * word.
- */
- if (ires-8 > BIG_ARITY_MAX) {
- c_p->freason = SYSTEM_LIMIT;
- goto lb_Cl_error;
- }
- TestHeapPreserve(ires+1, Arg(1), tmp_arg1);
- bigp = HTOP;
- tmp_arg1 = big_lshift(tmp_arg1, i, bigp);
- if (is_big(tmp_arg1)) {
- HTOP += bignum_header_arity(*HTOP) + 1;
- }
- HEAP_SPACE_VERIFIED(0);
- if (is_nil(tmp_arg1)) {
- /*
- * This result must have been only slight larger
- * than allowed since it wasn't caught by the
- * previous test.
- */
- c_p->freason = SYSTEM_LIMIT;
- goto lb_Cl_error;
- }
- ERTS_HOLE_CHECK(c_p);
- StoreBifResult(2, tmp_arg1);
- }
- } else if (is_big(tmp_arg1)) {
- if (i == 0) {
- StoreBifResult(2, tmp_arg1);
- }
- goto big_shift;
- }
- } else if (is_big(tmp_arg2)) {
- if (bignum_header_is_neg(*big_val(tmp_arg2))) {
- /*
- * N bsl NegativeBigNum is either 0 or -1, depending on
- * the sign of N. Since we don't believe this case
- * is common, do the calculation with the minimum
- * amount of code.
- */
- tmp_arg2 = make_small(MIN_SMALL);
- goto do_bsl;
- } else if (is_small(tmp_arg1) || is_big(tmp_arg1)) {
- /*
- * N bsl PositiveBigNum is too large to represent.
- */
- c_p->freason = SYSTEM_LIMIT;
- goto lb_Cl_error;
- }
- /* Fall through if the left argument is not an integer. */
- }
- /*
- * One or more non-integer arguments.
- */
- goto badarith;
- }
-
- OpCase(i_int_bnot_jsId):
- {
- Eterm bnot_val;
-
- GetArg1(1, bnot_val);
- if (is_small(bnot_val)) {
- bnot_val = make_small(~signed_val(bnot_val));
- } else {
- Uint live = Arg(2);
- SWAPOUT;
- reg[0] = r(0);
- reg[live] = bnot_val;
- bnot_val = erts_gc_bnot(c_p, reg, live);
- r(0) = reg[0];
- SWAPIN;
- ERTS_HOLE_CHECK(c_p);
- if (is_nil(bnot_val)) {
- goto lb_Cl_error;
- }
- }
- StoreBifResult(3, bnot_val);
- }
-
- badarith:
- c_p->freason = BADARITH;
- goto lb_Cl_error;
-
- OpCase(i_apply): {
- BeamInstr *next;
- SWAPOUT;
- next = apply(c_p, r(0), x(1), x(2), reg);
- SWAPIN;
- if (next != NULL) {
- r(0) = reg[0];
- SET_CP(c_p, I+1);
- SET_I(next);
- Dispatch();
- }
- I = handle_error(c_p, I, reg, apply_3);
- goto post_error_handling;
- }
-
- OpCase(i_apply_last_P): {
- BeamInstr *next;
- SWAPOUT;
- next = apply(c_p, r(0), x(1), x(2), reg);
- SWAPIN;
- if (next != NULL) {
- r(0) = reg[0];
- SET_CP(c_p, (BeamInstr *) EXPAND_POINTER(E[0]));
- E = ADD_BYTE_OFFSET(E, Arg(0));
- SET_I(next);
- Dispatch();
- }
- I = handle_error(c_p, I, reg, apply_3);
- goto post_error_handling;
- }
-
- OpCase(i_apply_only): {
- BeamInstr *next;
- SWAPOUT;
- next = apply(c_p, r(0), x(1), x(2), reg);
- SWAPIN;
- if (next != NULL) {
- r(0) = reg[0];
- SET_I(next);
- Dispatch();
- }
- I = handle_error(c_p, I, reg, apply_3);
- goto post_error_handling;
- }
-
- OpCase(apply_I): {
- BeamInstr *next;
-
- reg[0] = r(0);
- SWAPOUT;
- next = fixed_apply(c_p, reg, Arg(0));
- SWAPIN;
- if (next != NULL) {
- r(0) = reg[0];
- SET_CP(c_p, I+2);
- SET_I(next);
- Dispatch();
- }
- I = handle_error(c_p, I, reg, apply_3);
- goto post_error_handling;
- }
-
- OpCase(apply_last_IP): {
- BeamInstr *next;
-
- reg[0] = r(0);
- SWAPOUT;
- next = fixed_apply(c_p, reg, Arg(0));
- SWAPIN;
- if (next != NULL) {
- r(0) = reg[0];
- SET_CP(c_p, (BeamInstr *) EXPAND_POINTER(E[0]));
- E = ADD_BYTE_OFFSET(E, Arg(1));
- SET_I(next);
- Dispatch();
- }
- I = handle_error(c_p, I, reg, apply_3);
- goto post_error_handling;
- }
-
- OpCase(i_apply_fun): {
- BeamInstr *next;
-
- SWAPOUT;
- next = apply_fun(c_p, r(0), x(1), reg);
- SWAPIN;
- if (next != NULL) {
- r(0) = reg[0];
- SET_CP(c_p, I+1);
- SET_I(next);
- Dispatchfun();
- }
- goto find_func_info;
- }
-
- OpCase(i_apply_fun_last_P): {
- BeamInstr *next;
-
- SWAPOUT;
- next = apply_fun(c_p, r(0), x(1), reg);
- SWAPIN;
- if (next != NULL) {
- r(0) = reg[0];
- SET_CP(c_p, (BeamInstr *) EXPAND_POINTER(E[0]));
- E = ADD_BYTE_OFFSET(E, Arg(0));
- SET_I(next);
- Dispatchfun();
- }
- goto find_func_info;
- }
-
- OpCase(i_apply_fun_only): {
- BeamInstr *next;
-
- SWAPOUT;
- next = apply_fun(c_p, r(0), x(1), reg);
- SWAPIN;
- if (next != NULL) {
- r(0) = reg[0];
- SET_I(next);
- Dispatchfun();
- }
- goto find_func_info;
- }
-
- OpCase(i_call_fun_I): {
- BeamInstr *next;
-
- SWAPOUT;
- reg[0] = r(0);
-
- next = call_fun(c_p, Arg(0), reg, THE_NON_VALUE);
- SWAPIN;
- if (next != NULL) {
- r(0) = reg[0];
- SET_CP(c_p, I+2);
- SET_I(next);
- Dispatchfun();
- }
- goto find_func_info;
- }
-
- OpCase(i_call_fun_last_IP): {
- BeamInstr *next;
-
- SWAPOUT;
- reg[0] = r(0);
- next = call_fun(c_p, Arg(0), reg, THE_NON_VALUE);
- SWAPIN;
- if (next != NULL) {
- r(0) = reg[0];
- SET_CP(c_p, (BeamInstr *) EXPAND_POINTER(E[0]));
- E = ADD_BYTE_OFFSET(E, Arg(1));
- SET_I(next);
- Dispatchfun();
- }
- goto find_func_info;
- }
+#include "beam_hot.h"
#ifdef DEBUG
/*
@@ -3279,19 +802,29 @@ get_map_elements_fail:
* called from I[-3], I[-2], and I[-1] respectively.
*/
context_switch_fun:
- c_p->arity = I[-1] + 1;
+ /* Add one for the environment of the fun */
+ c_p->arity = erts_code_to_codemfa(I)->arity + 1;
goto context_switch2;
context_switch:
- c_p->arity = I[-1];
+ c_p->arity = erts_code_to_codemfa(I)->arity;
+
+ context_switch2: /* Entry for fun calls. */
+ c_p->current = erts_code_to_codemfa(I);
- context_switch2: /* Entry for fun calls. */
- c_p->current = I-3; /* Pointer to Mod, Func, Arity */
+ context_switch3:
{
Eterm* argp;
int i;
+ if (erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_EXITING) {
+ c_p->i = beam_exit;
+ c_p->arity = 0;
+ c_p->current = NULL;
+ goto do_schedule;
+ }
+
/*
* Make sure that there is enough room for the argument registers to be saved.
*/
@@ -3322,127 +855,46 @@ get_map_elements_fail:
* (beacuse the code for the Dispatch() macro becomes shorter that way).
*/
- reds_used = REDS_IN(c_p) - FCALLS + 1;
-
+ ASSERT(c_p->debug_reds_in == REDS_IN(c_p));
+ if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
+ reds_used = REDS_IN(c_p) - FCALLS;
+ else
+ reds_used = REDS_IN(c_p) - (CONTEXT_REDS + FCALLS);
+ ASSERT(reds_used >= 0);
+
/*
* Save the argument registers and everything else.
*/
argp = c_p->arg_reg;
- for (i = c_p->arity - 1; i > 0; i--) {
+ for (i = c_p->arity - 1; i >= 0; i--) {
argp[i] = reg[i];
}
- c_p->arg_reg[0] = r(0);
SWAPOUT;
c_p->i = I;
goto do_schedule1;
}
- OpCase(set_tuple_element_sdP): {
- Eterm element;
- Eterm tuple;
- BeamInstr *next;
- Eterm* p;
-
- PreFetch(3, next);
- GetArg2(0, element, tuple);
- ASSERT(is_tuple(tuple));
- p = (Eterm *) ((unsigned char *) tuple_val(tuple) + Arg(2));
- *p = element;
- NextPF(3, next);
- }
-
- OpCase(i_is_ne_exact_f):
- if (EQ(tmp_arg1, tmp_arg2)) {
- ClauseFail();
- }
- Next(1);
+#include "beam_warm.h"
OpCase(normal_exit): {
SWAPOUT;
c_p->freason = EXC_NORMAL;
- c_p->arity = 0; /* In case this process will ever be garbed again. */
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ c_p->arity = 0; /* In case this process will ever be garbed again. */
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
erts_do_exit_process(c_p, am_normal);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
goto do_schedule;
}
OpCase(continue_exit): {
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
erts_continue_exit_process(c_p);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
goto do_schedule;
}
- OpCase(raise_ss): {
- /* This was not done very well in R10-0; then, we passed the tag in
- the first argument and hoped that the existing c_p->ftrace was
- still correct. But the ftrace-object already includes the tag
- (or rather, the freason). Now, we pass the original ftrace in
- the first argument. We also handle atom tags in the first
- argument for backwards compatibility.
- */
- Eterm raise_val1;
- Eterm raise_val2;
- GetArg2(0, raise_val1, raise_val2);
- c_p->fvalue = raise_val2;
- if (c_p->freason == EXC_NULL) {
- /* a safety check for the R10-0 case; should not happen */
- c_p->ftrace = NIL;
- c_p->freason = EXC_ERROR;
- }
- /* for R10-0 code, keep existing c_p->ftrace and hope it's correct */
- switch (raise_val1) {
- case am_throw:
- c_p->freason = EXC_THROWN & ~EXF_SAVETRACE;
- break;
- case am_error:
- c_p->freason = EXC_ERROR & ~EXF_SAVETRACE;
- break;
- case am_exit:
- c_p->freason = EXC_EXIT & ~EXF_SAVETRACE;
- break;
- default:
- {/* R10-1 and later
- XXX note: should do sanity check on given trace if it can be
- passed from a user! Currently only expecting generated calls.
- */
- struct StackTrace *s;
- c_p->ftrace = raise_val1;
- s = get_trace_from_exc(raise_val1);
- if (s == NULL) {
- c_p->freason = EXC_ERROR;
- } else {
- c_p->freason = PRIMARY_EXCEPTION(s->freason);
- }
- }
- }
- goto find_func_info;
- }
-
- {
- Eterm badmatch_val;
-
- OpCase(badmatch_y):
- badmatch_val = yb(Arg(0));
- goto do_badmatch;
-
- OpCase(badmatch_x):
- badmatch_val = xb(Arg(0));
- goto do_badmatch;
-
- OpCase(badmatch_r):
- badmatch_val = r(0);
-
- do_badmatch:
- c_p->fvalue = badmatch_val;
- c_p->freason = BADMATCH;
- }
- /* Fall through here */
-
find_func_info: {
- reg[0] = r(0);
SWAPOUT;
I = handle_error(c_p, I, reg, NULL);
goto post_error_handling;
@@ -3459,11 +911,10 @@ get_map_elements_fail:
* code[3]: &&call_error_handler
* code[4]: Not used
*/
- SWAPOUT;
- reg[0] = r(0);
- I = call_error_handler(c_p, I-3, reg, am_undefined_function);
- r(0) = reg[0];
- SWAPIN;
+ HEAVY_SWAPOUT;
+ I = call_error_handler(c_p, erts_code_to_codemfa(I),
+ reg, am_undefined_function);
+ HEAVY_SWAPIN;
if (I) {
Goto(*I);
}
@@ -3471,1782 +922,400 @@ get_map_elements_fail:
/* Fall through */
OpCase(error_action_code): {
handle_error:
- reg[0] = r(0);
SWAPOUT;
I = handle_error(c_p, NULL, reg, NULL);
post_error_handling:
if (I == 0) {
goto do_schedule;
} else {
- r(0) = reg[0];
ASSERT(!is_value(r(0)));
- if (c_p->mbuf) {
- erts_garbage_collect(c_p, 0, reg+1, 3);
- }
SWAPIN;
Goto(*I);
}
}
- {
- Eterm nif_bif_result;
- Eterm bif_nif_arity;
-
- OpCase(call_nif):
- {
- /*
- * call_nif is always first instruction in function:
- *
- * I[-3]: Module
- * I[-2]: Function
- * I[-1]: Arity
- * I[0]: &&call_nif
- * I[1]: Function pointer to NIF function
- * I[2]: Pointer to erl_module_nif
- */
- BifFunction vbf;
-
- DTRACE_NIF_ENTRY(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
- c_p->current = I-3; /* current and vbf set to please handle_error */
- SWAPOUT;
- c_p->fcalls = FCALLS - 1;
- PROCESS_MAIN_CHK_LOCKS(c_p);
- bif_nif_arity = I[-1];
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
-
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- {
- typedef Eterm NifF(struct enif_environment_t*, int argc, Eterm argv[]);
- NifF* fp = vbf = (NifF*) I[1];
- struct enif_environment_t env;
- erts_pre_nif(&env, c_p, (struct erl_module_nif*)I[2]);
- reg[0] = r(0);
- nif_bif_result = (*fp)(&env, bif_nif_arity, reg);
- erts_post_nif(&env);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (is_non_value(nif_bif_result) && c_p->freason == TRAP) {
- Export* ep = ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(c_p);
- ep->code[0] = I[-3];
- ep->code[1] = I[-2];
- }
-#endif
- }
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(nif_bif_result));
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
-
- DTRACE_NIF_RETURN(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
- goto apply_bif_or_nif_epilogue;
-
- OpCase(apply_bif):
- /*
- * At this point, I points to the code[3] in the export entry for
- * the BIF:
- *
- * code[0]: Module
- * code[1]: Function
- * code[2]: Arity
- * code[3]: &&apply_bif
- * code[4]: Function pointer to BIF function
- */
-
- c_p->current = I-3; /* In case we apply process_info/1,2 or load_nif/1 */
- c_p->i = I; /* In case we apply check_process_code/2. */
- c_p->arity = 0; /* To allow garbage collection on ourselves
- * (check_process_code/2).
- */
- DTRACE_BIF_ENTRY(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
-
- SWAPOUT;
- c_p->fcalls = FCALLS - 1;
- vbf = (BifFunction) Arg(0);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- bif_nif_arity = I[-1];
- ASSERT(bif_nif_arity <= 3);
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- reg[0] = r(0);
- {
- Eterm (*bf)(Process*, Eterm*, BeamInstr*) = vbf;
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- nif_bif_result = (*bf)(c_p, reg, I);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) ||
- is_non_value(nif_bif_result));
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- }
-
- DTRACE_BIF_RETURN(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
-
- apply_bif_or_nif_epilogue:
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- ERTS_HOLE_CHECK(c_p);
- if (c_p->mbuf) {
- nif_bif_result = erts_gc_after_bif_call(c_p, nif_bif_result,
- reg, bif_nif_arity);
- }
- SWAPIN; /* There might have been a garbage collection. */
- FCALLS = c_p->fcalls;
- if (is_value(nif_bif_result)) {
- r(0) = nif_bif_result;
- CHECK_TERM(r(0));
- SET_I(c_p->cp);
- c_p->cp = 0;
- Goto(*I);
- } else if (c_p->freason == TRAP) {
- SET_I(c_p->i);
- r(0) = reg[0];
- if (c_p->flags & F_HIBERNATE_SCHED) {
- c_p->flags &= ~F_HIBERNATE_SCHED;
- goto do_schedule;
- }
- Dispatch();
- }
- I = handle_error(c_p, c_p->cp, reg, vbf);
- goto post_error_handling;
- }
- }
-
- OpCase(i_get_sd):
- {
- Eterm arg;
- Eterm result;
-
- GetArg1(0, arg);
- result = erts_pd_hash_get(c_p, arg);
- StoreBifResult(1, result);
- }
-
- {
- Eterm case_end_val;
-
- OpCase(case_end_x):
- case_end_val = xb(Arg(0));
- goto do_case_end;
-
- OpCase(case_end_y):
- case_end_val = yb(Arg(0));
- goto do_case_end;
-
- OpCase(case_end_r):
- case_end_val = r(0);
-
- do_case_end:
- c_p->fvalue = case_end_val;
- c_p->freason = EXC_CASE_CLAUSE;
- goto find_func_info;
- }
-
- OpCase(if_end):
- c_p->freason = EXC_IF_CLAUSE;
- goto find_func_info;
-
OpCase(i_func_info_IaaI): {
+ ErtsCodeInfo *ci = (ErtsCodeInfo*)I;
c_p->freason = EXC_FUNCTION_CLAUSE;
- c_p->current = I + 2;
+ c_p->current = &ci->mfa;
goto handle_error;
}
- OpCase(try_case_end_s):
- {
- Eterm try_case_end_val;
- GetArg1(0, try_case_end_val);
- c_p->fvalue = try_case_end_val;
- c_p->freason = EXC_TRY_CLAUSE;
- goto find_func_info;
- }
-
- /*
- * Construction of binaries using new instructions.
- */
- {
- Eterm new_binary;
- Eterm num_bits_term;
- Uint num_bits;
- Uint alloc;
- Uint num_bytes;
-
- OpCase(i_bs_init_bits_heap_IIId): {
- num_bits = Arg(0);
- alloc = Arg(1);
- I++;
- goto do_bs_init_bits_known;
- }
-
- OpCase(i_bs_init_bits_IId): {
- num_bits = Arg(0);
- alloc = 0;
- goto do_bs_init_bits_known;
- }
-
- OpCase(i_bs_init_bits_fail_heap_IjId): {
- /* tmp_arg1 was fetched by an i_fetch instruction */
- num_bits_term = tmp_arg1;
- alloc = Arg(0);
- I++;
- goto do_bs_init_bits;
- }
-
- OpCase(i_bs_init_bits_fail_rjId): {
- num_bits_term = r(0);
- alloc = 0;
- goto do_bs_init_bits;
- }
- OpCase(i_bs_init_bits_fail_yjId): {
- num_bits_term = yb(Arg(0));
- I++;
- alloc = 0;
- goto do_bs_init_bits;
- }
- OpCase(i_bs_init_bits_fail_xjId): {
- num_bits_term = xb(Arg(0));
- I++;
- alloc = 0;
- /* FALL THROUGH */
- }
-
- /* num_bits_term = Term for number of bits to build (small/big)
- * alloc = Number of words to allocate on heap
- * Operands: Fail Live Dst
- */
-
- do_bs_init_bits:
- if (is_small(num_bits_term)) {
- Sint size = signed_val(num_bits_term);
- if (size < 0) {
- goto badarg;
- }
- num_bits = (Uint) size;
- } else {
- Uint bits;
-
- if (!term_to_Uint(num_bits_term, &bits)) {
- c_p->freason = bits;
- goto lb_Cl_error;
-
- }
- num_bits = (Eterm) bits;
- }
-
- /* num_bits = Number of bits to build
- * alloc = Number of extra words to allocate on heap
- * Operands: NotUsed Live Dst
- */
- do_bs_init_bits_known:
- num_bytes = ((Uint64)num_bits+(Uint64)7) >> 3;
- if (num_bits & 7) {
- alloc += ERL_SUB_BIN_SIZE;
- }
- if (num_bytes <= ERL_ONHEAP_BIN_LIMIT) {
- alloc += heap_bin_size(num_bytes);
- } else {
- alloc += PROC_BIN_SIZE;
- }
- TestHeap(alloc, Arg(1));
-
- /* num_bits = Number of bits to build
- * num_bytes = Number of bytes to allocate in the binary
- * alloc = Total number of words to allocate on heap
- * Operands: NotUsed NotUsed Dst
- */
- if (num_bytes <= ERL_ONHEAP_BIN_LIMIT) {
- ErlHeapBin* hb;
-
- erts_bin_offset = 0;
- erts_writable_bin = 0;
- hb = (ErlHeapBin *) HTOP;
- HTOP += heap_bin_size(num_bytes);
- hb->thing_word = header_heap_bin(num_bytes);
- hb->size = num_bytes;
- erts_current_bin = (byte *) hb->data;
- new_binary = make_binary(hb);
-
- do_bits_sub_bin:
- if (num_bits & 7) {
- ErlSubBin* sb;
-
- sb = (ErlSubBin *) HTOP;
- HTOP += ERL_SUB_BIN_SIZE;
- sb->thing_word = HEADER_SUB_BIN;
- sb->size = num_bytes - 1;
- sb->bitsize = num_bits & 7;
- sb->offs = 0;
- sb->bitoffs = 0;
- sb->is_writable = 0;
- sb->orig = new_binary;
- new_binary = make_binary(sb);
- }
- HEAP_SPACE_VERIFIED(0);
- StoreBifResult(2, new_binary);
- } else {
- Binary* bptr;
- ProcBin* pb;
-
- erts_bin_offset = 0;
- erts_writable_bin = 0;
-
- /*
- * Allocate the binary struct itself.
- */
- bptr = erts_bin_nrml_alloc(num_bytes);
- bptr->flags = 0;
- bptr->orig_size = num_bytes;
- erts_refc_init(&bptr->refc, 1);
- erts_current_bin = (byte *) bptr->orig_bytes;
-
- /*
- * Now allocate the ProcBin on the heap.
- */
- pb = (ProcBin *) HTOP;
- HTOP += PROC_BIN_SIZE;
- pb->thing_word = HEADER_PROC_BIN;
- pb->size = num_bytes;
- pb->next = MSO(c_p).first;
- MSO(c_p).first = (struct erl_off_heap_header*) pb;
- pb->val = bptr;
- pb->bytes = (byte*) bptr->orig_bytes;
- pb->flags = 0;
- OH_OVERHEAD(&(MSO(c_p)), pb->size / sizeof(Eterm));
- new_binary = make_binary(pb);
- goto do_bits_sub_bin;
- }
- }
-
- {
- OpCase(i_bs_init_fail_heap_IjId): {
- /* tmp_arg1 was fetched by an i_fetch instruction */
- tmp_arg2 = Arg(0);
- I++;
- goto do_bs_init;
- }
-
- OpCase(i_bs_init_fail_rjId): {
- tmp_arg1 = r(0);
- tmp_arg2 = 0;
- goto do_bs_init;
- }
-
- OpCase(i_bs_init_fail_yjId): {
- tmp_arg1 = yb(Arg(0));
- tmp_arg2 = 0;
- I++;
- goto do_bs_init;
- }
-
- OpCase(i_bs_init_fail_xjId): {
- tmp_arg1 = xb(Arg(0));
- tmp_arg2 = 0;
- I++;
- }
- /* FALL THROUGH */
- do_bs_init:
- if (is_small(tmp_arg1)) {
- Sint size = signed_val(tmp_arg1);
- if (size < 0) {
- goto badarg;
- }
- tmp_arg1 = (Eterm) size;
- } else {
- Uint bytes;
-
- if (!term_to_Uint(tmp_arg1, &bytes)) {
- c_p->freason = bytes;
- goto lb_Cl_error;
- }
- if ((bytes >> (8*sizeof(Uint)-3)) != 0) {
- goto system_limit;
- }
- tmp_arg1 = (Eterm) bytes;
- }
- if (tmp_arg1 <= ERL_ONHEAP_BIN_LIMIT) {
- goto do_heap_bin_alloc;
- } else {
- goto do_proc_bin_alloc;
- }
-
-
- OpCase(i_bs_init_heap_IIId): {
- tmp_arg1 = Arg(0);
- tmp_arg2 = Arg(1);
- I++;
- goto do_proc_bin_alloc;
- }
-
- OpCase(i_bs_init_IId): {
- tmp_arg1 = Arg(0);
- tmp_arg2 = 0;
- }
- /* FALL THROUGH */
- do_proc_bin_alloc: {
- Binary* bptr;
- ProcBin* pb;
-
- erts_bin_offset = 0;
- erts_writable_bin = 0;
- TestBinVHeap(tmp_arg1 / sizeof(Eterm),
- tmp_arg2 + PROC_BIN_SIZE + ERL_SUB_BIN_SIZE, Arg(1));
-
- /*
- * Allocate the binary struct itself.
- */
- bptr = erts_bin_nrml_alloc(tmp_arg1);
- bptr->flags = 0;
- bptr->orig_size = tmp_arg1;
- erts_refc_init(&bptr->refc, 1);
- erts_current_bin = (byte *) bptr->orig_bytes;
-
- /*
- * Now allocate the ProcBin on the heap.
- */
- pb = (ProcBin *) HTOP;
- HTOP += PROC_BIN_SIZE;
- pb->thing_word = HEADER_PROC_BIN;
- pb->size = tmp_arg1;
- pb->next = MSO(c_p).first;
- MSO(c_p).first = (struct erl_off_heap_header*) pb;
- pb->val = bptr;
- pb->bytes = (byte*) bptr->orig_bytes;
- pb->flags = 0;
-
- OH_OVERHEAD(&(MSO(c_p)), tmp_arg1 / sizeof(Eterm));
-
- StoreBifResult(2, make_binary(pb));
- }
-
- OpCase(i_bs_init_heap_bin_heap_IIId): {
- tmp_arg1 = Arg(0);
- tmp_arg2 = Arg(1);
- I++;
- goto do_heap_bin_alloc;
- }
-
- OpCase(i_bs_init_heap_bin_IId): {
- tmp_arg1 = Arg(0);
- tmp_arg2 = 0;
- }
- /* Fall through */
- do_heap_bin_alloc:
- {
- ErlHeapBin* hb;
- Uint bin_need;
-
- bin_need = heap_bin_size(tmp_arg1);
- erts_bin_offset = 0;
- erts_writable_bin = 0;
- TestHeap(bin_need+tmp_arg2+ERL_SUB_BIN_SIZE, Arg(1));
- hb = (ErlHeapBin *) HTOP;
- HTOP += bin_need;
- hb->thing_word = header_heap_bin(tmp_arg1);
- hb->size = tmp_arg1;
- erts_current_bin = (byte *) hb->data;
- tmp_arg1 = make_binary(hb);
- StoreBifResult(2, tmp_arg1);
- }
- }
-
- OpCase(i_bs_add_jId): {
- Uint Unit = Arg(1);
- if (is_both_small(tmp_arg1, tmp_arg2)) {
- Sint Arg1 = signed_val(tmp_arg1);
- Sint Arg2 = signed_val(tmp_arg2);
-
- if (Arg1 >= 0 && Arg2 >= 0) {
- BsSafeMul(Arg2, Unit, goto system_limit, tmp_arg1);
- tmp_arg1 += Arg1;
-
- store_bs_add_result:
- if (MY_IS_SSMALL((Sint) tmp_arg1)) {
- tmp_arg1 = make_small(tmp_arg1);
- } else {
- /*
- * May generate a heap fragment, but in this
- * particular case it is OK, since the value will be
- * stored into an x register (the GC will scan x
- * registers for references to heap fragments) and
- * there is no risk that value can be stored into a
- * location that is not scanned for heap-fragment
- * references (such as the heap).
- */
- SWAPOUT;
- tmp_arg1 = erts_make_integer(tmp_arg1, c_p);
- HTOP = HEAP_TOP(c_p);
- }
- StoreBifResult(2, tmp_arg1);
- }
- goto badarg;
- } else {
- Uint a;
- Uint b;
- Uint c;
-
- /*
- * Now we know that one of the arguments is
- * not a small. We must convert both arguments
- * to Uints and check for errors at the same time.
- *
- * Error checking is tricky.
- *
- * If one of the arguments is not numeric or
- * not positive, the error reason is BADARG.
- *
- * Otherwise if both arguments are numeric,
- * but at least one argument does not fit in
- * an Uint, the reason is SYSTEM_LIMIT.
- */
-
- if (!term_to_Uint(tmp_arg1, &a)) {
- if (a == BADARG) {
- goto badarg;
- }
- if (!term_to_Uint(tmp_arg2, &b)) {
- c_p->freason = b;
- goto lb_Cl_error;
- }
- goto system_limit;
- } else if (!term_to_Uint(tmp_arg2, &b)) {
- c_p->freason = b;
- goto lb_Cl_error;
- }
-
- /*
- * The arguments are now correct and stored in a and b.
- */
-
- BsSafeMul(b, Unit, goto system_limit, c);
- tmp_arg1 = a + c;
- if (tmp_arg1 < a) {
- /*
- * If the result is less than one of the
- * arguments, there must have been an overflow.
- */
- goto system_limit;
- }
- goto store_bs_add_result;
- }
- /* No fallthrough */
- ASSERT(0);
- }
-
- OpCase(bs_put_string_II):
- {
- BeamInstr *next;
- PreFetch(2, next);
- erts_new_bs_put_string(ERL_BITS_ARGS_2((byte *) Arg(1), Arg(0)));
- NextPF(2, next);
- }
-
- /*
- * tmp_arg1 = Number of bytes to build
- * tmp_arg2 = Source binary
- * Operands: Fail ExtraHeap Live Unit Dst
- */
-
- OpCase(i_bs_append_jIIId): {
- Uint live = Arg(2);
- Uint res;
-
- SWAPOUT;
- reg[0] = r(0);
- reg[live] = tmp_arg2;
- res = erts_bs_append(c_p, reg, live, tmp_arg1, Arg(1), Arg(3));
- r(0) = reg[0];
- SWAPIN;
- if (is_non_value(res)) {
- /* c_p->freason is already set (may be either BADARG or SYSTEM_LIMIT). */
- goto lb_Cl_error;
- }
- StoreBifResult(4, res);
- }
-
- /*
- * tmp_arg1 = Number of bytes to build
- * tmp_arg2 = Source binary
- * Operands: Fail Unit Dst
- */
- OpCase(i_bs_private_append_jId): {
- Eterm res;
-
- res = erts_bs_private_append(c_p, tmp_arg2, tmp_arg1, Arg(1));
- if (is_non_value(res)) {
- /* c_p->freason is already set (may be either BADARG or SYSTEM_LIMIT). */
- goto lb_Cl_error;
- }
- StoreBifResult(2, res);
- }
-
- /*
- * tmp_arg1 = Initial size of writable binary
- * Operands: Live Dst
- */
- OpCase(bs_init_writable): {
- SWAPOUT;
- r(0) = erts_bs_init_writable(c_p, r(0));
- SWAPIN;
- Next(0);
- }
-
- /*
- * Calculate the number of bytes needed to encode the source
- * operarand to UTF-8. If the source operand is invalid (e.g. wrong
- * type or range) we return a nonsense integer result (0 or 4). We
- * can get away with that because we KNOW that bs_put_utf8 will do
- * full error checking.
- */
- OpCase(i_bs_utf8_size_sd): {
- Eterm arg;
- Eterm result;
-
- GetArg1(0, arg);
- if (arg < make_small(0x80UL)) {
- result = make_small(1);
- } else if (arg < make_small(0x800UL)) {
- result = make_small(2);
- } else if (arg < make_small(0x10000UL)) {
- result = make_small(3);
- } else {
- result = make_small(4);
- }
- StoreBifResult(1, result);
- }
-
- OpCase(i_bs_put_utf8_js): {
- Eterm arg;
-
- GetArg1(1, arg);
- if (!erts_bs_put_utf8(ERL_BITS_ARGS_1(arg))) {
- goto badarg;
- }
- Next(2);
- }
-
- /*
- * Calculate the number of bytes needed to encode the source
- * operarand to UTF-8. If the source operand is invalid (e.g. wrong
- * type or range) we return a nonsense integer result (2 or 4). We
- * can get away with that because we KNOW that bs_put_utf16 will do
- * full error checking.
- */
-
- OpCase(i_bs_utf16_size_sd): {
- Eterm arg;
- Eterm result = make_small(2);
-
- GetArg1(0, arg);
- if (arg >= make_small(0x10000UL)) {
- result = make_small(4);
- }
- StoreBifResult(1, result);
- }
-
- OpCase(i_bs_put_utf16_jIs): {
- Eterm arg;
-
- GetArg1(2, arg);
- if (!erts_bs_put_utf16(ERL_BITS_ARGS_2(arg, Arg(1)))) {
- goto badarg;
- }
- Next(3);
- }
-
- /*
- * Only used for validating a value about to be stored in a binary.
- */
- OpCase(i_bs_validate_unicode_js): {
- Eterm val;
-
- GetArg1(1, val);
-
- /*
- * There is no need to untag the integer, but it IS necessary
- * to make sure it is small (if the term is a bignum, it could
- * slip through the test, and there is no further test that
- * would catch it, since bit syntax construction silently masks
- * too big numbers).
- */
- if (is_not_small(val) || val > make_small(0x10FFFFUL) ||
- (make_small(0xD800UL) <= val && val <= make_small(0xDFFFUL))) {
- goto badarg;
- }
- Next(2);
- }
-
- /*
- * Only used for validating a value matched out.
- *
- * tmp_arg1 = Integer to validate
- * tmp_arg2 = Match context
- */
- OpCase(i_bs_validate_unicode_retract_j): {
- /*
- * There is no need to untag the integer, but it IS necessary
- * to make sure it is small (a bignum pointer could fall in
- * the valid range).
- */
- if (is_not_small(tmp_arg1) || tmp_arg1 > make_small(0x10FFFFUL) ||
- (make_small(0xD800UL) <= tmp_arg1 &&
- tmp_arg1 <= make_small(0xDFFFUL))) {
- ErlBinMatchBuffer *mb = ms_matchbuffer(tmp_arg2);
-
- mb->offset -= 32;
- goto badarg;
- }
- Next(1);
- }
+#include "beam_cold.h"
- /*
- * Matching of binaries.
- */
+#ifdef ERTS_OPCODE_COUNTER_SUPPORT
+ DEFINE_COUNTING_LABELS;
+#endif
- {
- Eterm header;
- BeamInstr *next;
- Uint slots;
- Eterm context;
-
- OpCase(i_bs_start_match2_rfIId): {
- context = r(0);
-
- do_start_match:
- slots = Arg(2);
- if (!is_boxed(context)) {
- ClauseFail();
- }
- PreFetch(4, next);
- header = *boxed_val(context);
- if (header_is_bin_matchstate(header)) {
- ErlBinMatchState* ms = (ErlBinMatchState *) boxed_val(context);
- Uint actual_slots = HEADER_NUM_SLOTS(header);
- ms->save_offset[0] = ms->mb.offset;
- if (actual_slots < slots) {
- ErlBinMatchState* dst;
- Uint live = Arg(1);
- Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots);
-
- TestHeapPreserve(wordsneeded, live, context);
- ms = (ErlBinMatchState *) boxed_val(context);
- dst = (ErlBinMatchState *) HTOP;
- *dst = *ms;
- *HTOP = HEADER_BIN_MATCHSTATE(slots);
- HTOP += wordsneeded;
- HEAP_SPACE_VERIFIED(0);
- StoreResult(make_matchstate(dst), Arg(3));
- }
- } else if (is_binary_header(header)) {
- Eterm result;
- Uint live = Arg(1);
- Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots);
- TestHeapPreserve(wordsneeded, live, context);
- HEAP_TOP(c_p) = HTOP;
+#ifndef NO_JUMP_TABLE
#ifdef DEBUG
- c_p->stop = E; /* Needed for checking in HeapOnlyAlloc(). */
+ end_emulator_loop:
+#endif
#endif
- result = erts_bs_start_match_2(c_p, context, slots);
- HTOP = HEAP_TOP(c_p);
- HEAP_SPACE_VERIFIED(0);
- if (is_non_value(result)) {
- ClauseFail();
- } else {
- StoreResult(result, Arg(3));
- }
- } else {
- ClauseFail();
- }
- NextPF(4, next);
- }
- OpCase(i_bs_start_match2_xfIId): {
- context = xb(Arg(0));
- I++;
- goto do_start_match;
- }
- OpCase(i_bs_start_match2_yfIId): {
- context = yb(Arg(0));
- I++;
- goto do_start_match;
- }
- }
-
- OpCase(bs_test_zero_tail2_fr): {
- BeamInstr *next;
- ErlBinMatchBuffer *_mb;
-
- PreFetch(1, next);
- _mb = (ErlBinMatchBuffer*) ms_matchbuffer(r(0));
- if (_mb->size != _mb->offset) {
- ClauseFail();
- }
- NextPF(1, next);
- }
-
- OpCase(bs_test_zero_tail2_fx): {
- BeamInstr *next;
- ErlBinMatchBuffer *_mb;
-
- PreFetch(2, next);
- _mb = (ErlBinMatchBuffer*) ms_matchbuffer(xb(Arg(1)));
- if (_mb->size != _mb->offset) {
- ClauseFail();
- }
- NextPF(2, next);
- }
-
- OpCase(bs_test_tail_imm2_frI): {
- BeamInstr *next;
- ErlBinMatchBuffer *_mb;
- PreFetch(2, next);
- _mb = ms_matchbuffer(r(0));
- if (_mb->size - _mb->offset != Arg(1)) {
- ClauseFail();
- }
- NextPF(2, next);
- }
- OpCase(bs_test_tail_imm2_fxI): {
- BeamInstr *next;
- ErlBinMatchBuffer *_mb;
- PreFetch(3, next);
- _mb = ms_matchbuffer(xb(Arg(1)));
- if (_mb->size - _mb->offset != Arg(2)) {
- ClauseFail();
- }
- NextPF(3, next);
- }
-
- OpCase(bs_test_unit_frI): {
- BeamInstr *next;
- ErlBinMatchBuffer *_mb;
- PreFetch(2, next);
- _mb = ms_matchbuffer(r(0));
- if ((_mb->size - _mb->offset) % Arg(1)) {
- ClauseFail();
- }
- NextPF(2, next);
- }
- OpCase(bs_test_unit_fxI): {
- BeamInstr *next;
- ErlBinMatchBuffer *_mb;
- PreFetch(3, next);
- _mb = ms_matchbuffer(xb(Arg(1)));
- if ((_mb->size - _mb->offset) % Arg(2)) {
- ClauseFail();
- }
- NextPF(3, next);
- }
-
- OpCase(bs_test_unit8_fr): {
- BeamInstr *next;
- ErlBinMatchBuffer *_mb;
- PreFetch(1, next);
- _mb = ms_matchbuffer(r(0));
- if ((_mb->size - _mb->offset) & 7) {
- ClauseFail();
- }
- NextPF(1, next);
- }
- OpCase(bs_test_unit8_fx): {
- BeamInstr *next;
- ErlBinMatchBuffer *_mb;
- PreFetch(2, next);
- _mb = ms_matchbuffer(xb(Arg(1)));
- if ((_mb->size - _mb->offset) & 7) {
- ClauseFail();
- }
- NextPF(2, next);
- }
-
- {
- Eterm bs_get_integer8_context;
-
- OpCase(i_bs_get_integer_8_rfd): {
- bs_get_integer8_context = r(0);
- goto do_bs_get_integer_8;
- }
-
- OpCase(i_bs_get_integer_8_xfd): {
- bs_get_integer8_context = xb(Arg(0));
- I++;
- }
- do_bs_get_integer_8: {
- ErlBinMatchBuffer *_mb;
- Eterm _result;
- _mb = ms_matchbuffer(bs_get_integer8_context);
- if (_mb->size - _mb->offset < 8) {
- ClauseFail();
- }
- if (BIT_OFFSET(_mb->offset) != 0) {
- _result = erts_bs_get_integer_2(c_p, 8, 0, _mb);
- } else {
- _result = make_small(_mb->base[BYTE_OFFSET(_mb->offset)]);
- _mb->offset += 8;
- }
- StoreBifResult(1, _result);
- }
- }
+ OpCase(int_code_end):
+ OpCase(label_L):
+ OpCase(on_load):
+ OpCase(line_I):
+ erts_exit(ERTS_ERROR_EXIT, "meta op\n");
- {
- Eterm bs_get_integer_16_context;
-
- OpCase(i_bs_get_integer_16_rfd):
- bs_get_integer_16_context = r(0);
- goto do_bs_get_integer_16;
-
- OpCase(i_bs_get_integer_16_xfd):
- bs_get_integer_16_context = xb(Arg(0));
- I++;
-
- do_bs_get_integer_16:
- {
- ErlBinMatchBuffer *_mb;
- Eterm _result;
- _mb = ms_matchbuffer(bs_get_integer_16_context);
- if (_mb->size - _mb->offset < 16) {
- ClauseFail();
- }
- if (BIT_OFFSET(_mb->offset) != 0) {
- _result = erts_bs_get_integer_2(c_p, 16, 0, _mb);
- } else {
- _result = make_small(get_int16(_mb->base+BYTE_OFFSET(_mb->offset)));
- _mb->offset += 16;
- }
- StoreBifResult(1, _result);
- }
- }
+ /*
+ * One-time initialization of Beam emulator.
+ */
+ init_emulator:
{
- Eterm bs_get_integer_32_context;
-
- OpCase(i_bs_get_integer_32_rfId):
- bs_get_integer_32_context = r(0);
- goto do_bs_get_integer_32;
-
-
- OpCase(i_bs_get_integer_32_xfId):
- bs_get_integer_32_context = xb(Arg(0));
- I++;
-
-
- do_bs_get_integer_32:
- {
- ErlBinMatchBuffer *_mb;
- Uint32 _integer;
- Eterm _result;
- _mb = ms_matchbuffer(bs_get_integer_32_context);
- if (_mb->size - _mb->offset < 32) { ClauseFail(); }
- if (BIT_OFFSET(_mb->offset) != 0) {
- _integer = erts_bs_get_unaligned_uint32(_mb);
- } else {
- _integer = get_int32(_mb->base + _mb->offset/8);
- }
- _mb->offset += 32;
-#if !defined(ARCH_64) || HALFWORD_HEAP
- if (IS_USMALL(0, _integer)) {
-#endif
- _result = make_small(_integer);
-#if !defined(ARCH_64) || HALFWORD_HEAP
- } else {
- TestHeap(BIG_UINT_HEAP_SIZE, Arg(1));
- _result = uint_to_big((Uint) _integer, HTOP);
- HTOP += BIG_UINT_HEAP_SIZE;
- HEAP_SPACE_VERIFIED(0);
- }
+#ifndef NO_JUMP_TABLE
+#ifdef ERTS_OPCODE_COUNTER_SUPPORT
+#ifdef DEBUG
+ counting_opcodes[op_catch_end_y] = LabelAddr(lb_catch_end_y);
#endif
- StoreBifResult(2, _result);
- }
- }
-
- /* Operands: Size Live Fail Flags Dst */
- OpCase(i_bs_get_integer_imm_rIIfId): {
- tmp_arg1 = r(0);
- /* Operands: Size Live Fail Flags Dst */
- goto do_bs_get_integer_imm_test_heap;
- }
-
- /* Operands: x(Reg) Size Live Fail Flags Dst */
- OpCase(i_bs_get_integer_imm_xIIfId): {
- tmp_arg1 = xb(Arg(0));
- I++;
- /* Operands: Size Live Fail Flags Dst */
- goto do_bs_get_integer_imm_test_heap;
- }
-
- /*
- * tmp_arg1 = match context
- * Operands: Size Live Fail Flags Dst
- */
- do_bs_get_integer_imm_test_heap: {
- Uint wordsneeded;
- tmp_arg2 = Arg(0);
- wordsneeded = 1+WSIZE(NBYTES(tmp_arg2));
- TestHeapPreserve(wordsneeded, Arg(1), tmp_arg1);
- I += 2;
- /* Operands: Fail Flags Dst */
- goto do_bs_get_integer_imm;
- }
-
- /* Operands: Size Fail Flags Dst */
- OpCase(i_bs_get_integer_small_imm_rIfId): {
- tmp_arg1 = r(0);
- tmp_arg2 = Arg(0);
- I++;
- /* Operands: Fail Flags Dst */
- goto do_bs_get_integer_imm;
- }
-
- /* Operands: x(Reg) Size Fail Flags Dst */
- OpCase(i_bs_get_integer_small_imm_xIfId): {
- tmp_arg1 = xb(Arg(0));
- tmp_arg2 = Arg(1);
- I += 2;
- /* Operands: Fail Flags Dst */
- goto do_bs_get_integer_imm;
- }
-
- /*
- * tmp_arg1 = match context
- * tmp_arg2 = size of field
- * Operands: Fail Flags Dst
- */
- do_bs_get_integer_imm: {
- ErlBinMatchBuffer* mb;
- Eterm result;
-
- mb = ms_matchbuffer(tmp_arg1);
- LIGHT_SWAPOUT;
- result = erts_bs_get_integer_2(c_p, tmp_arg2, Arg(1), mb);
- LIGHT_SWAPIN;
- HEAP_SPACE_VERIFIED(0);
- if (is_non_value(result)) {
- ClauseFail();
- }
- StoreBifResult(2, result);
- }
-
- /*
- * tmp_arg1 = Match context
- * tmp_arg2 = Size field
- * Operands: Fail Live FlagsAndUnit Dst
- */
- OpCase(i_bs_get_integer_fIId): {
- Uint flags;
- Uint size;
- ErlBinMatchBuffer* mb;
- Eterm result;
-
- flags = Arg(2);
- BsGetFieldSize(tmp_arg2, (flags >> 3), ClauseFail(), size);
- if (size >= SMALL_BITS) {
- Uint wordsneeded;
- /* check bits size before potential gc.
- * We do not want a gc and then realize we don't need
- * the allocated space (i.e. if the op fails)
- *
- * remember to reacquire the matchbuffer after gc.
- */
+ counting_opcodes[op_i_func_info_IaaI] = LabelAddr(lb_i_func_info_IaaI);
+ beam_ops = counting_opcodes;
+#else /* #ifndef ERTS_OPCODE_COUNTER_SUPPORT */
+ beam_ops = opcodes;
+#endif /* ERTS_OPCODE_COUNTER_SUPPORT */
+#endif /* NO_JUMP_TABLE */
- mb = ms_matchbuffer(tmp_arg1);
- if (mb->size - mb->offset < size) {
- ClauseFail();
- }
- wordsneeded = 1+WSIZE(NBYTES((Uint) size));
- TestHeapPreserve(wordsneeded, Arg(1), tmp_arg1);
- }
- mb = ms_matchbuffer(tmp_arg1);
- LIGHT_SWAPOUT;
- result = erts_bs_get_integer_2(c_p, size, flags, mb);
- LIGHT_SWAPIN;
- HEAP_SPACE_VERIFIED(0);
- if (is_non_value(result)) {
- ClauseFail();
- }
- StoreBifResult(3, result);
+ init_emulator_finish();
+ return;
}
+#ifdef NO_JUMP_TABLE
+ default:
+ erts_exit(ERTS_ERROR_EXIT, "unexpected op code %d\n",Go);
+ }
+#endif
+ return; /* Never executed */
- {
- Eterm get_utf8_context;
-
- /* Operands: MatchContext Fail Dst */
- OpCase(i_bs_get_utf8_rfd): {
- get_utf8_context = r(0);
- goto do_bs_get_utf8;
- }
+ save_calls1:
+ {
+ BeamInstr dis_next;
- OpCase(i_bs_get_utf8_xfd): {
- get_utf8_context = xb(Arg(0));
- I++;
- }
+ save_calls(c_p, (Export *) Arg(0));
- /*
- * get_utf8_context = match_context
- * Operands: Fail Dst
- */
+ SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]);
- do_bs_get_utf8: {
- Eterm result = erts_bs_get_utf8(ms_matchbuffer(get_utf8_context));
- if (is_non_value(result)) {
- ClauseFail();
- }
- StoreBifResult(1, result);
- }
- }
+ dis_next = *I;
+ FCALLS--;
+ Goto(dis_next);
+ }
+}
- {
- Eterm get_utf16_context;
+/*
+ * One-time initialization of emulator. Does not need to be
+ * in process_main().
+ */
+static void
+init_emulator_finish(void)
+{
+ int i;
+ Export* ep;
- /* Operands: MatchContext Fail Flags Dst */
- OpCase(i_bs_get_utf16_rfId): {
- get_utf16_context = r(0);
- goto do_bs_get_utf16;
+#if defined(ARCH_64) && defined(CODE_MODEL_SMALL)
+ for (i = 0; i < NUMBER_OF_OPCODES; i++) {
+ BeamInstr instr = BeamOpCodeAddr(i);
+ if (instr >= (1ull << 32)) {
+ erts_exit(ERTS_ERROR_EXIT,
+ "This run-time was supposed be compiled with all code below 2Gb,\n"
+ "but the instruction '%s' is located at %016lx.\n",
+ opc[i].name, instr);
+ }
}
+#endif
- OpCase(i_bs_get_utf16_xfId): {
- get_utf16_context = xb(Arg(0));
- I++;
- }
+ beam_apply[0] = BeamOpCodeAddr(op_i_apply);
+ beam_apply[1] = BeamOpCodeAddr(op_normal_exit);
+ beam_exit[0] = BeamOpCodeAddr(op_error_action_code);
+ beam_continue_exit[0] = BeamOpCodeAddr(op_continue_exit);
+ beam_return_to_trace[0] = BeamOpCodeAddr(op_i_return_to_trace);
+ beam_return_trace[0] = BeamOpCodeAddr(op_return_trace);
+ beam_exception_trace[0] = BeamOpCodeAddr(op_return_trace); /* UGLY */
+ beam_return_time_trace[0] = BeamOpCodeAddr(op_i_return_time_trace);
/*
- * get_utf16_context = match_context
- * Operands: Fail Flags Dst
+ * Enter all BIFs into the export table.
*/
- do_bs_get_utf16: {
- Eterm result = erts_bs_get_utf16(ms_matchbuffer(get_utf16_context),
- Arg(1));
- if (is_non_value(result)) {
- ClauseFail();
- }
- StoreBifResult(2, result);
- }
- }
-
- {
- Eterm context_to_binary_context;
- ErlBinMatchBuffer* mb;
- ErlSubBin* sb;
- Uint size;
- Uint offs;
- Uint orig;
- Uint hole_size;
-
- OpCase(bs_context_to_binary_r): {
- context_to_binary_context = x0;
- I -= 2;
- goto do_context_to_binary;
- }
-
- /* Unfortunately, inlining can generate this instruction. */
- OpCase(bs_context_to_binary_y): {
- context_to_binary_context = yb(Arg(0));
- goto do_context_to_binary0;
- }
-
- OpCase(bs_context_to_binary_x): {
- context_to_binary_context = xb(Arg(0));
-
- do_context_to_binary0:
- I--;
- }
-
- do_context_to_binary:
- if (is_boxed(context_to_binary_context) &&
- header_is_bin_matchstate(*boxed_val(context_to_binary_context))) {
- ErlBinMatchState* ms;
- ms = (ErlBinMatchState *) boxed_val(context_to_binary_context);
- mb = &ms->mb;
- offs = ms->save_offset[0];
- size = mb->size - offs;
- goto do_bs_get_binary_all_reuse_common;
- }
- Next(2);
-
- OpCase(i_bs_get_binary_all_reuse_rfI): {
- context_to_binary_context = x0;
- goto do_bs_get_binary_all_reuse;
- }
-
- OpCase(i_bs_get_binary_all_reuse_xfI): {
- context_to_binary_context = xb(Arg(0));
- I++;
- }
-
- do_bs_get_binary_all_reuse:
- mb = ms_matchbuffer(context_to_binary_context);
- size = mb->size - mb->offset;
- if (size % Arg(1) != 0) {
- ClauseFail();
- }
- offs = mb->offset;
-
- do_bs_get_binary_all_reuse_common:
- orig = mb->orig;
- sb = (ErlSubBin *) boxed_val(context_to_binary_context);
- hole_size = 1 + header_arity(sb->thing_word) - ERL_SUB_BIN_SIZE;
- sb->thing_word = HEADER_SUB_BIN;
- sb->size = BYTE_OFFSET(size);
- sb->bitsize = BIT_OFFSET(size);
- sb->offs = BYTE_OFFSET(offs);
- sb->bitoffs = BIT_OFFSET(offs);
- sb->is_writable = 0;
- sb->orig = orig;
- if (hole_size) {
- sb[1].thing_word = make_pos_bignum_header(hole_size-1);
- }
- Next(2);
- }
-
- {
- Eterm match_string_context;
-
- OpCase(i_bs_match_string_rfII): {
- match_string_context = r(0);
- goto do_bs_match_string;
- }
- OpCase(i_bs_match_string_xfII): {
- match_string_context = xb(Arg(0));
- I++;
- }
-
- do_bs_match_string:
- {
- BeamInstr *next;
- byte* bytes;
- Uint bits;
- ErlBinMatchBuffer* mb;
- Uint offs;
-
- PreFetch(3, next);
- bits = Arg(1);
- bytes = (byte *) Arg(2);
- mb = ms_matchbuffer(match_string_context);
- if (mb->size - mb->offset < bits) {
- ClauseFail();
- }
- offs = mb->offset & 7;
- if (offs == 0 && (bits & 7) == 0) {
- if (sys_memcmp(bytes, mb->base+(mb->offset>>3), bits>>3)) {
- ClauseFail();
- }
- } else if (erts_cmp_bits(bytes, 0, mb->base+(mb->offset>>3), mb->offset & 7, bits)) {
- ClauseFail();
- }
- mb->offset += bits;
- NextPF(3, next);
+ for (i = 0; i < BIF_SIZE; i++) {
+ ep = erts_export_put(bif_table[i].module,
+ bif_table[i].name,
+ bif_table[i].arity);
+ bif_export[i] = ep;
+ ep->beam[0] = BeamOpCodeAddr(op_apply_bif);
+ ep->beam[1] = (BeamInstr) bif_table[i].f;
+ /* XXX: set func info for bifs */
+ ep->info.op = BeamOpCodeAddr(op_i_func_info_IaaI);
}
- }
-
- OpCase(i_bs_save2_rI): {
- BeamInstr *next;
- ErlBinMatchState *_ms;
- PreFetch(1, next);
- _ms = (ErlBinMatchState*) boxed_val((Eterm) r(0));
- _ms->save_offset[Arg(0)] = _ms->mb.offset;
- NextPF(1, next);
- }
- OpCase(i_bs_save2_xI): {
- BeamInstr *next;
- ErlBinMatchState *_ms;
- PreFetch(2, next);
- _ms = (ErlBinMatchState*) boxed_val((Eterm) xb(Arg(0)));
- _ms->save_offset[Arg(1)] = _ms->mb.offset;
- NextPF(2, next);
- }
-
- OpCase(i_bs_restore2_rI): {
- BeamInstr *next;
- ErlBinMatchState *_ms;
- PreFetch(1, next);
- _ms = (ErlBinMatchState*) boxed_val((Eterm) r(0));
- _ms->mb.offset = _ms->save_offset[Arg(0)];
- NextPF(1, next);
- }
- OpCase(i_bs_restore2_xI): {
- BeamInstr *next;
- ErlBinMatchState *_ms;
- PreFetch(2, next);
- _ms = (ErlBinMatchState*) boxed_val((Eterm) xb(Arg(0)));
- _ms->mb.offset = _ms->save_offset[Arg(1)];
- NextPF(2, next);
- }
-
-#include "beam_cold.h"
-
+}
- /*
- * This instruction is probably never used (because it is combined with a
- * a return). However, a future compiler might for some reason emit a
- * deallocate not followed by a return, and that should work.
- */
- OpCase(deallocate_I): {
- BeamInstr *next;
+/*
+ * erts_dirty_process_main() is what dirty schedulers execute. Since they handle
+ * only NIF calls they do not need to be able to execute all BEAM
+ * instructions.
+ */
+void erts_dirty_process_main(ErtsSchedulerData *esdp)
+{
+ Process* c_p = NULL;
+ ErtsMonotonicTime start_time;
+#ifdef DEBUG
+ ERTS_DECLARE_DUMMY(Eterm pid);
+#endif
- PreFetch(1, next);
- D(Arg(0));
- NextPF(1, next);
- }
+ /* Pointer to X registers: x(1)..x(N); reg[0] is used when doing GC,
+ * in all other cases x0 is used.
+ */
+ register Eterm* reg REG_xregs = NULL;
/*
- * Trace and debugging support.
+ * Top of heap (next free location); grows upwards.
*/
+ register Eterm* HTOP REG_htop = NULL;
- OpCase(return_trace): {
- BeamInstr* code = (BeamInstr *) (UWord) E[0];
-
- SWAPOUT; /* Needed for shared heap */
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- erts_trace_return(c_p, code, r(0), E+1/*Process tracer*/);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- SWAPIN;
- c_p->cp = NULL;
- SET_I((BeamInstr *) cp_val(E[2]));
- E += 3;
- Goto(*I);
- }
-
- OpCase(i_generic_breakpoint): {
- BeamInstr real_I;
- ASSERT(I[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- SWAPOUT;
- reg[0] = r(0);
- real_I = erts_generic_breakpoint(c_p, I, reg);
- r(0) = reg[0];
- SWAPIN;
- ASSERT(VALID_INSTR(real_I));
- Goto(real_I);
- }
+ /* Stack pointer. Grows downwards; points
+ * to last item pushed (normally a saved
+ * continuation pointer).
+ */
+ register Eterm* E REG_stop = NULL;
- OpCase(i_return_time_trace): {
- BeamInstr *pc = (BeamInstr *) (UWord) E[0];
- SWAPOUT;
- erts_trace_time_return(c_p, pc);
- SWAPIN;
- c_p->cp = NULL;
- SET_I((BeamInstr *) cp_val(E[1]));
- E += 2;
- Goto(*I);
- }
+ /*
+ * Pointer to next threaded instruction.
+ */
+ register BeamInstr *I REG_I = NULL;
- OpCase(i_return_to_trace): {
- if (IS_TRACED_FL(c_p, F_TRACE_RETURN_TO)) {
- Uint *cpp = (Uint*) E;
- for(;;) {
- ASSERT(is_CP(*cpp));
- if (*cp_val(*cpp) == (BeamInstr) OpCode(return_trace)) {
- do ++cpp; while(is_not_CP(*cpp));
- cpp += 2;
- } else if (*cp_val(*cpp) == (BeamInstr) OpCode(i_return_to_trace)) {
- do ++cpp; while(is_not_CP(*cpp));
- } else break;
- }
- SWAPOUT; /* Needed for shared heap */
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- erts_trace_return_to(c_p, cp_val(*cpp));
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- SWAPIN;
- }
- c_p->cp = NULL;
- SET_I((BeamInstr *) cp_val(E[0]));
- E += 1;
- Goto(*I);
- }
+ ERTS_MSACC_DECLARE_CACHE_X() /* a cached value of the tsd pointer for msacc */
- /*
- * New floating point instructions.
- */
+ /*
+ * start_time always positive for dirty CPU schedulers,
+ * and negative for dirty I/O schedulers.
+ */
- OpCase(fmove_ql): {
- Eterm fr = Arg(1);
- BeamInstr *next;
+ if (ERTS_SCHEDULER_IS_DIRTY_CPU(esdp)) {
+ start_time = erts_get_monotonic_time(NULL);
+ ASSERT(start_time >= 0);
+ }
+ else {
+ start_time = ERTS_SINT64_MIN;
+ ASSERT(start_time < 0);
+ }
- PreFetch(2, next);
- GET_DOUBLE(Arg(0), *(FloatDef*)ADD_BYTE_OFFSET(freg, fr));
- NextPF(2, next);
- }
+ goto do_dirty_schedule;
- OpCase(fmove_dl): {
- Eterm targ1;
- Eterm fr = Arg(1);
- BeamInstr *next;
+ context_switch:
+ c_p->current = erts_code_to_codemfa(I); /* Pointer to Mod, Func, Arity */
+ c_p->arity = c_p->current->arity;
- PreFetch(2, next);
- GetR(0, targ1);
- /* Arg(0) == HEADER_FLONUM */
- GET_DOUBLE(targ1, *(FloatDef*)ADD_BYTE_OFFSET(freg, fr));
- NextPF(2, next);
- }
+ {
+ int reds_used;
+ Eterm* argp;
+ int i;
- OpCase(fmove_ld): {
- Eterm fr = Arg(0);
- Eterm dest = make_float(HTOP);
+ /*
+ * Make sure that there is enough room for the argument registers to be saved.
+ */
+ if (c_p->arity > c_p->max_arg_reg) {
+ /*
+ * Yes, this is an expensive operation, but you only pay it the first
+ * time you call a function with more than 6 arguments which is
+ * scheduled out. This is better than paying for 26 words of wasted
+ * space for most processes which never call functions with more than
+ * 6 arguments.
+ */
+ Uint size = c_p->arity * sizeof(c_p->arg_reg[0]);
+ if (c_p->arg_reg != c_p->def_arg_reg) {
+ c_p->arg_reg = (Eterm *) erts_realloc(ERTS_ALC_T_ARG_REG,
+ (void *) c_p->arg_reg,
+ size);
+ } else {
+ c_p->arg_reg = (Eterm *) erts_alloc(ERTS_ALC_T_ARG_REG, size);
+ }
+ c_p->max_arg_reg = c_p->arity;
+ }
- PUT_DOUBLE(*(FloatDef*)ADD_BYTE_OFFSET(freg, fr), HTOP);
- HTOP += FLOAT_SIZE_OBJECT;
- StoreBifResult(1, dest);
- }
+ /*
+ * Save the argument registers and everything else.
+ */
- OpCase(fconv_dl): {
- Eterm targ1;
- Eterm fr = Arg(1);
- BeamInstr *next;
-
- GetR(0, targ1);
- PreFetch(2, next);
- if (is_small(targ1)) {
- fb(fr) = (double) signed_val(targ1);
- } else if (is_big(targ1)) {
- if (big_to_double(targ1, &fb(fr)) < 0) {
- goto fbadarith;
- }
- } else if (is_float(targ1)) {
- GET_DOUBLE(targ1, *(FloatDef*)ADD_BYTE_OFFSET(freg, fr));
- } else {
- goto fbadarith;
- }
- NextPF(2, next);
- }
+ argp = c_p->arg_reg;
+ for (i = c_p->arity - 1; i >= 0; i--) {
+ argp[i] = reg[i];
+ }
+ SWAPOUT;
+ c_p->i = I;
-#ifdef NO_FPE_SIGNALS
- OpCase(fclearerror):
- OpCase(i_fcheckerror):
- erl_exit(1, "fclearerror/i_fcheckerror without fpe signals (beam_emu)");
-# define ERTS_NO_FPE_CHECK_INIT ERTS_FP_CHECK_INIT
-# define ERTS_NO_FPE_ERROR ERTS_FP_ERROR
-#else
-# define ERTS_NO_FPE_CHECK_INIT(p)
-# define ERTS_NO_FPE_ERROR(p, a, b)
+ do_dirty_schedule:
- OpCase(fclearerror): {
- BeamInstr *next;
+ if (start_time < 0) {
+ /*
+ * Dirty I/O scheduler:
+ * One reduction consumed regardless of
+ * time spent in the dirty NIF.
+ */
+ reds_used = esdp->virtual_reds + 1;
+ }
+ else {
+ /*
+ * Dirty CPU scheduler:
+ * Reductions based on time consumed by
+ * the dirty NIF.
+ */
+ Sint64 treds;
+ treds = erts_time2reds(start_time,
+ erts_get_monotonic_time(esdp));
+ treds += esdp->virtual_reds;
+ reds_used = treds > INT_MAX ? INT_MAX : (int) treds;
+ }
- PreFetch(0, next);
- ERTS_FP_CHECK_INIT(c_p);
- NextPF(0, next);
- }
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ c_p = erts_schedule(esdp, c_p, reds_used);
- OpCase(i_fcheckerror): {
- BeamInstr *next;
+ if (start_time >= 0) {
+ start_time = erts_get_monotonic_time(esdp);
+ ASSERT(start_time >= 0);
+ }
+ }
- PreFetch(0, next);
- ERTS_FP_ERROR(c_p, freg[0].fd, goto fbadarith);
- NextPF(0, next);
- }
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+#ifdef DEBUG
+ pid = c_p->common.id; /* Save for debugging purposes */
#endif
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ASSERT(!(c_p->flags & F_HIPE_MODE));
+ ERTS_MSACC_UPDATE_CACHE_X();
- OpCase(i_fadd_lll): {
- BeamInstr *next;
-
- PreFetch(3, next);
- ERTS_NO_FPE_CHECK_INIT(c_p);
- fb(Arg(2)) = fb(Arg(0)) + fb(Arg(1));
- ERTS_NO_FPE_ERROR(c_p, fb(Arg(2)), goto fbadarith);
- NextPF(3, next);
- }
- OpCase(i_fsub_lll): {
- BeamInstr *next;
-
- PreFetch(3, next);
- ERTS_NO_FPE_CHECK_INIT(c_p);
- fb(Arg(2)) = fb(Arg(0)) - fb(Arg(1));
- ERTS_NO_FPE_ERROR(c_p, fb(Arg(2)), goto fbadarith);
- NextPF(3, next);
- }
- OpCase(i_fmul_lll): {
- BeamInstr *next;
-
- PreFetch(3, next);
- ERTS_NO_FPE_CHECK_INIT(c_p);
- fb(Arg(2)) = fb(Arg(0)) * fb(Arg(1));
- ERTS_NO_FPE_ERROR(c_p, fb(Arg(2)), goto fbadarith);
- NextPF(3, next);
- }
- OpCase(i_fdiv_lll): {
- BeamInstr *next;
-
- PreFetch(3, next);
- ERTS_NO_FPE_CHECK_INIT(c_p);
- fb(Arg(2)) = fb(Arg(0)) / fb(Arg(1));
- ERTS_NO_FPE_ERROR(c_p, fb(Arg(2)), goto fbadarith);
- NextPF(3, next);
- }
- OpCase(i_fnegate_ll): {
- BeamInstr *next;
-
- PreFetch(2, next);
- ERTS_NO_FPE_CHECK_INIT(c_p);
- fb(Arg(1)) = -fb(Arg(0));
- ERTS_NO_FPE_ERROR(c_p, fb(Arg(1)), goto fbadarith);
- NextPF(2, next);
-
- fbadarith:
- c_p->freason = BADARITH;
- goto find_func_info;
- }
+ /*
+ * Set fcalls even though we ignore it, so we don't
+ * confuse code accessing it...
+ */
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
+ c_p->fcalls = 0;
+ else
+ c_p->fcalls = CONTEXT_REDS;
-#ifdef HIPE
- {
- unsigned cmd;
+ if (erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_DIRTY_RUNNING_SYS) {
+ erts_execute_dirty_system_task(c_p);
+ goto do_dirty_schedule;
+ }
+ else {
+ ErtsCodeMFA *codemfa;
+ Eterm* argp;
+ int i, exiting;
- OpCase(hipe_trap_call): {
- /*
- * I[-5]: &&lb_i_func_info_IaaI
- * I[-4]: Native code callee (inserted by HiPE)
- * I[-3]: Module (tagged atom)
- * I[-2]: Function (tagged atom)
- * I[-1]: Arity (untagged integer)
- * I[ 0]: &&lb_hipe_trap_call
- * ... remainder of original BEAM code
- */
- ASSERT(I[-5] == (Uint) OpCode(i_func_info_IaaI));
- c_p->hipe.ncallee = (void(*)(void)) I[-4];
- cmd = HIPE_MODE_SWITCH_CMD_CALL | (I[-1] << 8);
- ++hipe_trap_count;
- goto L_hipe_mode_switch;
- }
- OpCase(hipe_trap_call_closure): {
- ASSERT(I[-5] == (Uint) OpCode(i_func_info_IaaI));
- c_p->hipe.ncallee = (void(*)(void)) I[-4];
- cmd = HIPE_MODE_SWITCH_CMD_CALL_CLOSURE | (I[-1] << 8);
- ++hipe_trap_count;
- goto L_hipe_mode_switch;
- }
- OpCase(hipe_trap_return): {
- cmd = HIPE_MODE_SWITCH_CMD_RETURN;
- goto L_hipe_mode_switch;
- }
- OpCase(hipe_trap_throw): {
- cmd = HIPE_MODE_SWITCH_CMD_THROW;
- goto L_hipe_mode_switch;
- }
- OpCase(hipe_trap_resume): {
- cmd = HIPE_MODE_SWITCH_CMD_RESUME;
- goto L_hipe_mode_switch;
- }
- L_hipe_mode_switch:
- /* XXX: this abuse of def_arg_reg[] is horrid! */
- SWAPOUT;
- c_p->fcalls = FCALLS;
- c_p->def_arg_reg[4] = -neg_o_reds;
- reg[0] = r(0);
- c_p = hipe_mode_switch(c_p, cmd, reg);
- reg = ERTS_PROC_GET_SCHDATA(c_p)->x_reg_array;
- freg = ERTS_PROC_GET_SCHDATA(c_p)->f_reg_array;
- ERL_BITS_RELOAD_STATEP(c_p);
- neg_o_reds = -c_p->def_arg_reg[4];
- FCALLS = c_p->fcalls;
- SWAPIN;
- switch( c_p->def_arg_reg[3] ) { /* Halfword wont work with hipe yet! */
- case HIPE_MODE_SWITCH_RES_RETURN:
- ASSERT(is_value(reg[0]));
- MoveReturn(reg[0], r(0));
- case HIPE_MODE_SWITCH_RES_CALL:
- SET_I(c_p->i);
- r(0) = reg[0];
- Dispatch();
- case HIPE_MODE_SWITCH_RES_CALL_CLOSURE:
- /* This can be used to call any function value, but currently it's
- only used to call closures referring to unloaded modules. */
- {
- BeamInstr *next;
-
- next = call_fun(c_p, c_p->arity - 1, reg, THE_NON_VALUE);
- SWAPIN;
- if (next != NULL) {
- r(0) = reg[0];
- SET_I(next);
- Dispatchfun();
- }
- goto find_func_info;
- }
- case HIPE_MODE_SWITCH_RES_THROW:
- c_p->cp = NULL;
- I = handle_error(c_p, I, reg, NULL);
- goto post_error_handling;
- default:
- erl_exit(1, "hipe_mode_switch: result %u\n", c_p->def_arg_reg[3]);
- }
- }
- OpCase(hipe_call_count): {
- /*
- * I[-5]: &&lb_i_func_info_IaaI
- * I[-4]: pointer to struct hipe_call_count (inserted by HiPE)
- * I[-3]: Module (tagged atom)
- * I[-2]: Function (tagged atom)
- * I[-1]: Arity (untagged integer)
- * I[ 0]: &&lb_hipe_call_count
- * ... remainder of original BEAM code
- */
- struct hipe_call_count *hcc = (struct hipe_call_count*)I[-4];
- ASSERT(I[-5] == (Uint) OpCode(i_func_info_IaaI));
- ASSERT(hcc != NULL);
- ASSERT(VALID_INSTR(hcc->opcode));
- ++(hcc->count);
- Goto(hcc->opcode);
- }
-#endif /* HIPE */
+ reg = esdp->x_reg_array;
- OpCase(i_yield):
- {
- /* This is safe as long as REDS_IN(c_p) is never stored
- * in c_p->arg_reg[0]. It is currently stored in c_p->def_arg_reg[5],
- * which may be c_p->arg_reg[5], which is close, but no banana.
- */
- c_p->arg_reg[0] = am_true;
- c_p->arity = 1; /* One living register (the 'true' return value) */
- SWAPOUT;
- c_p->i = I + 1; /* Next instruction */
- c_p->current = NULL;
- goto do_schedule;
- }
-
- OpCase(i_hibernate): {
- SWAPOUT;
- if (erts_hibernate(c_p, r(0), x(1), x(2), reg)) {
- c_p->flags &= ~F_HIBERNATE_SCHED;
- goto do_schedule;
- } else {
- I = handle_error(c_p, I, reg, hibernate_3);
- goto post_error_handling;
- }
- }
+ argp = c_p->arg_reg;
+ for (i = c_p->arity - 1; i >= 0; i--) {
+ reg[i] = argp[i];
+ CHECK_TERM(reg[i]);
+ }
- OpCase(i_debug_breakpoint): {
- SWAPOUT;
- reg[0] = r(0);
- I = call_error_handler(c_p, I-3, reg, am_breakpoint);
- r(0) = reg[0];
- SWAPIN;
- if (I) {
- Goto(*I);
- }
- goto handle_error;
- }
+ /*
+ * We put the original reduction count in the process structure, to reduce
+ * the code size (referencing a field in a struct through a pointer stored
+ * in a register gives smaller code than referencing a global variable).
+ */
+ I = c_p->i;
- OpCase(system_limit_j):
- system_limit:
- c_p->freason = SYSTEM_LIMIT;
- goto lb_Cl_error;
+ SWAPIN;
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(process_scheduled)) {
+ DTRACE_CHARBUF(process_buf, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(fun_buf, DTRACE_TERM_BUF_SIZE);
+ dtrace_proc_str(c_p, process_buf);
-#ifdef ERTS_OPCODE_COUNTER_SUPPORT
- DEFINE_COUNTING_LABELS;
-#endif
+ if (ERTS_PROC_IS_EXITING(c_p)) {
+ strcpy(fun_buf, "<exiting>");
+ } else {
+ ErtsCodeMFA *cmfa = find_function_from_pc(c_p->i);
+ if (cmfa) {
+ dtrace_fun_decode(c_p, cmfa, NULL, fun_buf);
+ } else {
+ erts_snprintf(fun_buf, sizeof(DTRACE_CHARBUF_NAME(fun_buf)),
+ "<unknown/%p>", *I);
+ }
+ }
-#ifndef NO_JUMP_TABLE
-#ifdef DEBUG
- end_emulator_loop:
-#endif
+ DTRACE2(process_scheduled, process_buf, fun_buf);
+ }
#endif
- OpCase(int_code_end):
- OpCase(label_L):
- OpCase(on_load):
- OpCase(line_I):
- erl_exit(1, "meta op\n");
-
- /*
- * One-time initialization of Beam emulator.
- */
-
- init_emulator:
- {
- int i;
- Export* ep;
+ /*
+ * call_nif is always first instruction in function:
+ *
+ * I[-3]: Module
+ * I[-2]: Function
+ * I[-1]: Arity
+ * I[0]: &&call_nif
+ * I[1]: Function pointer to NIF function
+ * I[2]: Pointer to erl_module_nif
+ * I[3]: Function pointer to dirty NIF
+ *
+ * This layout is determined by the NifExport struct
+ */
-#ifndef NO_JUMP_TABLE
-#ifdef ERTS_OPCODE_COUNTER_SUPPORT
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF);
- /* Are tables correctly generated by beam_makeops? */
- ASSERT(sizeof(counting_opcodes) == sizeof(opcodes));
+ codemfa = erts_code_to_codemfa(I);
- if (count_instructions) {
-#ifdef DEBUG
- counting_opcodes[op_catch_end_y] = LabelAddr(lb_catch_end_y);
-#endif
- counting_opcodes[op_i_func_info_IaaI] = LabelAddr(lb_i_func_info_IaaI);
- beam_ops = counting_opcodes;
- }
- else
-#endif /* #ifndef ERTS_OPCODE_COUNTER_SUPPORT */
- {
- beam_ops = opcodes;
- }
-#endif /* NO_JUMP_TABLE */
-
- em_call_error_handler = OpCode(call_error_handler);
- em_apply_bif = OpCode(apply_bif);
- em_call_nif = OpCode(call_nif);
-
- beam_apply[0] = (BeamInstr) OpCode(i_apply);
- beam_apply[1] = (BeamInstr) OpCode(normal_exit);
- beam_exit[0] = (BeamInstr) OpCode(error_action_code);
- beam_continue_exit[0] = (BeamInstr) OpCode(continue_exit);
- beam_return_to_trace[0] = (BeamInstr) OpCode(i_return_to_trace);
- beam_return_trace[0] = (BeamInstr) OpCode(return_trace);
- beam_exception_trace[0] = (BeamInstr) OpCode(return_trace); /* UGLY */
- beam_return_time_trace[0] = (BeamInstr) OpCode(i_return_time_trace);
+ DTRACE_NIF_ENTRY(c_p, codemfa);
+ c_p->current = codemfa;
+ SWAPOUT;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
- /*
- * Enter all BIFs into the export table.
- */
- for (i = 0; i < BIF_SIZE; i++) {
- ep = erts_export_put(bif_table[i].module,
- bif_table[i].name,
- bif_table[i].arity);
- bif_export[i] = ep;
- ep->code[3] = (BeamInstr) OpCode(apply_bif);
- ep->code[4] = (BeamInstr) bif_table[i].f;
- /* XXX: set func info for bifs */
- ep->fake_op_func_info_for_hipe[0] = (BeamInstr) BeamOp(op_i_func_info_IaaI);
- }
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ if (BeamIsOpCode(*I, op_apply_bif)) {
+ exiting = erts_call_dirty_bif(esdp, c_p, I, reg);
+ }
+ else {
+ ASSERT(BeamIsOpCode(*I, op_call_nif));
+ exiting = erts_call_dirty_nif(esdp, c_p, I, reg);
+ }
- return;
- }
-#ifdef NO_JUMP_TABLE
- default:
- erl_exit(1, "unexpected op code %d\n",Go);
- }
-#endif
- return; /* Never executed */
+ ASSERT(!(c_p->flags & F_HIBERNATE_SCHED));
- save_calls1:
- {
- Eterm* dis_next;
-
- save_calls(c_p, (Export *) Arg(0));
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
+ if (exiting)
+ goto do_dirty_schedule;
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]);
+ DTRACE_NIF_RETURN(c_p, codemfa);
+ ERTS_HOLE_CHECK(c_p);
+ SWAPIN;
+ I = c_p->i;
+ goto context_switch;
+ }
+}
- dis_next = (Eterm *) *I;
- FCALLS--;
- Goto(dis_next);
+static ErtsCodeMFA *
+gcbif2mfa(void* gcf)
+{
+ int i;
+ for (i = 0; erts_gc_bifs[i].bif; i++) {
+ if (erts_gc_bifs[i].gc_bif == gcf)
+ return &bif_export[erts_gc_bifs[i].exp_ix]->info.mfa;
}
+ erts_exit(ERTS_ERROR_EXIT, "bad gc bif");
+ return NULL;
}
-static BifFunction
-translate_gc_bif(void* gcf)
+static ErtsCodeMFA *
+ubif2mfa(void* uf)
{
- if (gcf == erts_gc_length_1) {
- return length_1;
- } else if (gcf == erts_gc_size_1) {
- return size_1;
- } else if (gcf == erts_gc_bit_size_1) {
- return bit_size_1;
- } else if (gcf == erts_gc_byte_size_1) {
- return byte_size_1;
- } else if (gcf == erts_gc_map_size_1) {
- return map_size_1;
- } else if (gcf == erts_gc_abs_1) {
- return abs_1;
- } else if (gcf == erts_gc_float_1) {
- return float_1;
- } else if (gcf == erts_gc_round_1) {
- return round_1;
- } else if (gcf == erts_gc_trunc_1) {
- return round_1;
- } else if (gcf == erts_gc_binary_part_2) {
- return binary_part_2;
- } else if (gcf == erts_gc_binary_part_3) {
- return binary_part_3;
- } else {
- erl_exit(1, "bad gc bif");
+ int i;
+ for (i = 0; erts_u_bifs[i].bif; i++) {
+ if (erts_u_bifs[i].bif == uf)
+ return &bif_export[erts_u_bifs[i].exp_ix]->info.mfa;
}
+ erts_exit(ERTS_ERROR_EXIT, "bad u bif");
+ return NULL;
}
/*
@@ -5279,7 +1348,9 @@ Eterm error_atom[NUMBER_EXIT_CODES] = {
am_notalive, /* 14 */
am_system_limit, /* 15 */
am_try_clause, /* 16 */
- am_notsup /* 17 */
+ am_notsup, /* 17 */
+ am_badmap, /* 18 */
+ am_badkey, /* 19 */
};
/*
@@ -5303,15 +1374,27 @@ Eterm error_atom[NUMBER_EXIT_CODES] = {
*/
static BeamInstr*
-handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf)
+handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, ErtsCodeMFA *bif_mfa)
{
Eterm* hp;
Eterm Value = c_p->fvalue;
Eterm Args = am_true;
- c_p->i = pc; /* In case we call erl_exit(). */
ASSERT(c_p->freason != TRAP); /* Should have been handled earlier. */
+ if (c_p->freason & EXF_RESTORE_NIF)
+ erts_nif_export_restore_error(c_p, &pc, reg, &bif_mfa);
+
+#ifdef DEBUG
+ if (bif_mfa) {
+ /* Verify that bif_mfa does not point into our nif export */
+ NifExport *nep = ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p);
+ ASSERT(!nep || !ErtsInArea(bif_mfa, (char *)nep, sizeof(NifExport)));
+ }
+#endif
+
+ c_p->i = pc; /* In case we call erts_exit(). */
+
/*
* Check if we have an arglist for the top level call. If so, this
* is encoded in Value, so we have to dig out the real Value as well
@@ -5334,7 +1417,7 @@ handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf)
* more modular.
*/
if (c_p->freason & EXF_SAVETRACE) {
- save_stacktrace(c_p, pc, reg, bf, Args);
+ save_stacktrace(c_p, pc, reg, bif_mfa, Args);
}
/*
@@ -5372,11 +1455,11 @@ handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf)
c_p->cp = 0; /* To avoid keeping stale references. */
return new_pc;
}
- if (c_p->catches > 0) erl_exit(1, "Catch not found");
+ if (c_p->catches > 0) erts_exit(ERTS_ERROR_EXIT, "Catch not found");
}
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
terminate_proc(c_p, Value);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
return NULL;
}
@@ -5404,8 +1487,10 @@ next_catch(Process* c_p, Eterm *reg) {
/* Can not follow cp here - code may be unloaded */
BeamInstr *cpp = c_p->cp;
if (cpp == beam_exception_trace) {
- erts_trace_exception(c_p, cp_val(ptr[0]),
- reg[1], reg[2], ptr+1);
+ ErtsCodeMFA *mfa = (ErtsCodeMFA*)cp_val(ptr[0]);
+ erts_trace_exception(c_p, mfa,
+ reg[1], reg[2],
+ ERTS_TRACER_FROM_ETERM(ptr+1));
/* Skip return_trace parameters */
ptr += 2;
} else if (cpp == beam_return_trace) {
@@ -5431,8 +1516,10 @@ next_catch(Process* c_p, Eterm *reg) {
if (is_catch(*ptr) && active_catches) goto found_catch;
}
if (cp_val(*prev) == beam_exception_trace) {
- erts_trace_exception(c_p, cp_val(ptr[0]),
- reg[1], reg[2], ptr+1);
+ ErtsCodeMFA *mfa = (ErtsCodeMFA*)cp_val(ptr[0]);
+ erts_trace_exception(c_p, mfa,
+ reg[1], reg[2],
+ ERTS_TRACER_FROM_ETERM(ptr+1));
}
/* Skip return_trace parameters */
ptr += 2;
@@ -5482,18 +1569,35 @@ next_catch(Process* c_p, Eterm *reg) {
static void
terminate_proc(Process* c_p, Eterm Value)
{
+ Eterm *hp;
+ Eterm Args = NIL;
+
/* Add a stacktrace if this is an error. */
if (GET_EXC_CLASS(c_p->freason) == EXTAG_ERROR) {
Value = add_stacktrace(c_p, Value, c_p->ftrace);
}
/* EXF_LOG is a primary exception flag */
if (c_p->freason & EXF_LOG) {
+ int alive = erts_is_alive;
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp, "Error in process %T ", c_p->common.id);
- if (erts_is_alive)
- erts_dsprintf(dsbufp, "on node %T ", erts_this_node->sysname);
- erts_dsprintf(dsbufp,"with exit value: %0.*T\n", display_items, Value);
- erts_send_error_to_logger(c_p->group_leader, dsbufp);
+
+ /* Build the format message */
+ erts_dsprintf(dsbufp, "Error in process ~p ");
+ if (alive)
+ erts_dsprintf(dsbufp, "on node ~p ");
+ erts_dsprintf(dsbufp, "with exit value:~n~p~n");
+
+ /* Build the args in reverse order */
+ hp = HAlloc(c_p, 2);
+ Args = CONS(hp, Value, Args);
+ if (alive) {
+ hp = HAlloc(c_p, 2);
+ Args = CONS(hp, erts_this_node->sysname, Args);
+ }
+ hp = HAlloc(c_p, 2);
+ Args = CONS(hp, c_p->common.id, Args);
+
+ erts_send_error_term_to_logger(c_p->group_leader, dsbufp, Args);
}
/*
* If we use a shared heap, the process will be garbage-collected.
@@ -5535,6 +1639,8 @@ expand_error_value(Process* c_p, Uint freason, Eterm Value) {
case (GET_EXC_INDEX(EXC_TRY_CLAUSE)):
case (GET_EXC_INDEX(EXC_BADFUN)):
case (GET_EXC_INDEX(EXC_BADARITY)):
+ case (GET_EXC_INDEX(EXC_BADMAP)):
+ case (GET_EXC_INDEX(EXC_BADKEY)):
/* Some common exceptions: value -> {atom, value} */
ASSERT(is_value(Value));
hp = HAlloc(c_p, 3);
@@ -5586,11 +1692,12 @@ expand_error_value(Process* c_p, Uint freason, Eterm Value) {
*/
static void
-save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
- Eterm args) {
+save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg,
+ ErtsCodeMFA *bif_mfa, Eterm args) {
struct StackTrace* s;
int sz;
int depth = erts_backtrace_depth; /* max depth (never negative) */
+
if (depth > 0) {
/* There will always be a current function */
depth --;
@@ -5606,33 +1713,30 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
s->depth = 0;
/*
- * If the failure was in a BIF other than 'error', 'exit' or
- * 'throw', find the bif-table index and save the argument
+ * If the failure was in a BIF other than 'error/1', 'error/2',
+ * 'exit/1' or 'throw/1', save BIF-MFA and save the argument
* registers by consing up an arglist.
*/
- if (bf != NULL && bf != error_1 && bf != error_2 &&
- bf != exit_1 && bf != throw_1) {
- int i;
- int a = 0;
- for (i = 0; i < BIF_SIZE; i++) {
- if (bf == bif_table[i].f || bf == bif_table[i].traced) {
- Export *ep = bif_export[i];
- s->current = ep->code;
- a = bif_table[i].arity;
+ if (bif_mfa) {
+ if (bif_mfa->module == am_erlang) {
+ switch (bif_mfa->function) {
+ case am_error:
+ if (bif_mfa->arity == 1 || bif_mfa->arity == 2)
+ goto non_bif_stacktrace;
+ break;
+ case am_exit:
+ if (bif_mfa->arity == 1)
+ goto non_bif_stacktrace;
+ break;
+ case am_throw:
+ if (bif_mfa->arity == 1)
+ goto non_bif_stacktrace;
+ break;
+ default:
break;
}
}
- if (i >= BIF_SIZE) {
- /*
- * The Bif does not really exist (no BIF entry). It is a
- * TRAP and traps are called through apply_bif, which also
- * sets c_p->current (luckily).
- * OR it is a NIF called by call_nif where current is also set.
- */
- ASSERT(c_p->current);
- s->current = c_p->current;
- a = s->current[2];
- }
+ s->current = bif_mfa;
/* Save first stack entry */
ASSERT(pc);
if (depth > 0) {
@@ -5645,8 +1749,11 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
depth--;
}
s->pc = NULL;
- args = make_arglist(c_p, reg, a); /* Overwrite CAR(c_p->ftrace) */
+ args = make_arglist(c_p, reg, bif_mfa->arity); /* Overwrite CAR(c_p->ftrace) */
} else {
+
+ non_bif_stacktrace:
+
s->current = c_p->current;
/*
* For a function_clause error, the arguments are in the beam
@@ -5656,7 +1763,7 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
(GET_EXC_INDEX(EXC_FUNCTION_CLAUSE)) ) {
int a;
ASSERT(s->current);
- a = s->current[2];
+ a = s->current->arity;
args = make_arglist(c_p, reg, a); /* Overwrite CAR(c_p->ftrace) */
/* Save first stack entry */
ASSERT(c_p->cp);
@@ -5707,12 +1814,30 @@ erts_save_stacktrace(Process* p, struct StackTrace* s, int depth)
p->cp) {
/* Cannot follow cp here - code may be unloaded */
BeamInstr *cpp = p->cp;
+ int trace_cp;
if (cpp == beam_exception_trace || cpp == beam_return_trace) {
/* Skip return_trace parameters */
ptr += 2;
+ trace_cp = 1;
} else if (cpp == beam_return_to_trace) {
/* Skip return_to_trace parameters */
ptr += 1;
+ trace_cp = 1;
+ }
+ else {
+ trace_cp = 0;
+ }
+ if (trace_cp && s->pc == cpp) {
+ /*
+ * If process 'cp' points to a return/exception trace
+ * instruction and 'cp' has been saved as 'pc' in
+ * stacktrace, we need to update 'pc' in stacktrace
+ * with the actual 'cp' located on the top of the
+ * stack; otherwise, we will lose the top stackframe
+ * when building the stack trace.
+ */
+ ASSERT(is_CP(p->stop[0]));
+ s->pc = cp_val(p->stop[0]);
}
}
while (ptr < STACK_START(p) && depth > 0) {
@@ -5827,17 +1952,20 @@ build_stacktrace(Process* c_p, Eterm exc) {
erts_lookup_function_info(&fi, s->pc, 1);
} else if (GET_EXC_INDEX(s->freason) ==
GET_EXC_INDEX(EXC_FUNCTION_CLAUSE)) {
- erts_lookup_function_info(&fi, s->current, 1);
+ erts_lookup_function_info(&fi, erts_codemfa_to_code(s->current), 1);
} else {
erts_set_current_function(&fi, s->current);
}
+ depth = s->depth;
/*
- * If fi.current is still NULL, default to the initial function
+ * If fi.current is still NULL, and we have no
+ * stack at all, default to the initial function
* (e.g. spawn_link(erlang, abs, [1])).
*/
- if (fi.current == NULL) {
- erts_set_current_function(&fi, c_p->initial);
+ if (fi.mfa == NULL) {
+ if (depth <= 0)
+ erts_set_current_function(&fi, &c_p->u.initial);
args = am_true; /* Just in case */
} else {
args = get_args_from_exc(exc);
@@ -5847,13 +1975,12 @@ build_stacktrace(Process* c_p, Eterm exc) {
* Look up all saved continuation pointers and calculate
* needed heap space.
*/
- depth = s->depth;
stk = stkp = (FunctionInfo *) erts_alloc(ERTS_ALC_T_TMP,
depth*sizeof(FunctionInfo));
- heap_size = fi.needed + 2;
+ heap_size = fi.mfa ? fi.needed + 2 : 0;
for (i = 0; i < depth; i++) {
erts_lookup_function_info(stkp, s->trace[i], 1);
- if (stkp->current) {
+ if (stkp->mfa) {
heap_size += stkp->needed + 2;
stkp++;
}
@@ -5869,15 +1996,17 @@ build_stacktrace(Process* c_p, Eterm exc) {
res = CONS(hp, mfa, res);
hp += 2;
}
- hp = erts_build_mfa_item(&fi, hp, args, &mfa);
- res = CONS(hp, mfa, res);
+ if (fi.mfa) {
+ hp = erts_build_mfa_item(&fi, hp, args, &mfa);
+ res = CONS(hp, mfa, res);
+ }
erts_free(ERTS_ALC_T_TMP, (void *) stk);
return res;
}
static BeamInstr*
-call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func)
+call_error_handler(Process* p, ErtsCodeMFA* mfa, Eterm* reg, Eterm func)
{
Eterm* hp;
Export* ep;
@@ -5886,13 +2015,14 @@ call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func)
Uint sz;
int i;
+ DBG_TRACE_MFA_P(mfa, "call_error_handler");
/*
* Search for the error_handler module.
*/
ep = erts_find_function(erts_proc_get_error_handler(p), func, 3,
erts_active_code_ix());
if (ep == NULL) { /* No error handler */
- p->current = fi;
+ p->current = mfa;
p->freason = EXC_UNDEF;
return 0;
}
@@ -5901,7 +2031,7 @@ call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func)
* Create a list with all arguments in the x registers.
*/
- arity = fi[2];
+ arity = mfa->arity;
sz = 2 * arity;
if (HeapWordsLeft(p) < sz) {
erts_garbage_collect(p, sz, reg, arity);
@@ -5917,8 +2047,8 @@ call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func)
/*
* Set up registers for call to error_handler:<func>/3.
*/
- reg[0] = fi[0];
- reg[1] = fi[1];
+ reg[0] = mfa->module;
+ reg[1] = mfa->function;
reg[2] = args;
return ep->addressv[erts_active_code_ix()];
}
@@ -5967,12 +2097,112 @@ apply_setup_error_handler(Process* p, Eterm module, Eterm function, Uint arity,
return ep;
}
+static ERTS_INLINE void
+apply_bif_error_adjustment(Process *p, Export *ep,
+ Eterm *reg, Uint arity,
+ BeamInstr *I, Uint stack_offset)
+{
+ /*
+ * I is only set when the apply is a tail call, i.e.,
+ * from the instructions i_apply_only, i_apply_last_P,
+ * and apply_last_IP.
+ */
+ if (I
+ && BeamIsOpCode(ep->beam[0], op_apply_bif)
+ && (ep == bif_export[BIF_error_1]
+ || ep == bif_export[BIF_error_2]
+ || ep == bif_export[BIF_exit_1]
+ || ep == bif_export[BIF_throw_1])) {
+ /*
+ * We are about to tail apply one of the BIFs
+ * erlang:error/1, erlang:error/2, erlang:exit/1,
+ * or erlang:throw/1. Error handling of these BIFs is
+ * special!
+ *
+ * We need 'p->cp' to point into the calling
+ * function when handling the error after the BIF has
+ * been applied. This in order to get the topmost
+ * stackframe correct. Without the following adjustment,
+ * 'p->cp' will point into the function that called
+ * current function when handling the error. We add a
+ * dummy stackframe in order to achieve this.
+ *
+ * Note that these BIFs unconditionally will cause
+ * an exception to be raised. That is, our modifications
+ * of 'p->cp' as well as the stack will be corrected by
+ * the error handling code.
+ *
+ * If we find an exception/return-to trace continuation
+ * pointer as the topmost continuation pointer, we do not
+ * need to do anything since the information already will
+ * be available for generation of the stacktrace.
+ */
+ int apply_only = stack_offset == 0;
+ BeamInstr *cpp;
+
+ if (apply_only) {
+ ASSERT(p->cp != NULL);
+ cpp = p->cp;
+ }
+ else {
+ ASSERT(is_CP(p->stop[0]));
+ cpp = cp_val(p->stop[0]);
+ }
+
+ if (cpp != beam_exception_trace
+ && cpp != beam_return_trace
+ && cpp != beam_return_to_trace) {
+ Uint need = stack_offset /* bytes */ / sizeof(Eterm);
+ if (need == 0)
+ need = 1; /* i_apply_only */
+ if (p->stop - p->htop < need)
+ erts_garbage_collect(p, (int) need, reg, arity+1);
+ p->stop -= need;
+
+ if (apply_only) {
+ /*
+ * Called from the i_apply_only instruction.
+ *
+ * 'p->cp' contains continuation pointer pointing
+ * into the function that called current function.
+ * We push that continuation pointer onto the stack,
+ * and set 'p->cp' to point into current function.
+ */
+
+ p->stop[0] = make_cp(p->cp);
+ p->cp = I;
+ }
+ else {
+ /*
+ * Called from an i_apply_last_p, or apply_last_IP,
+ * instruction.
+ *
+ * Calling instruction will after we return read
+ * a continuation pointer from the stack and write
+ * it to 'p->cp', and then remove the topmost
+ * stackframe of size 'stack_offset'.
+ *
+ * We have sized the dummy-stackframe so that it
+ * will be removed by the instruction we currently
+ * are executing, and leave the stackframe that
+ * normally would have been removed intact.
+ *
+ */
+ p->stop[0] = make_cp(I);
+ }
+ }
+ }
+}
+
static BeamInstr*
-apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
+apply(Process* p, Eterm* reg, BeamInstr *I, Uint stack_offset)
{
int arity;
Export* ep;
- Eterm tmp, this;
+ Eterm tmp;
+ Eterm module = reg[0];
+ Eterm function = reg[1];
+ Eterm args = reg[2];
/*
* Check the arguments which should be of the form apply(Module,
@@ -5993,26 +2223,45 @@ apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
return 0;
}
- /* The module argument may be either an atom or an abstract module
- * (currently implemented using tuples, but this might change).
- */
- this = THE_NON_VALUE;
- if (is_not_atom(module)) {
- Eterm* tp;
-
- if (is_not_tuple(module)) goto error;
- tp = tuple_val(module);
- if (arityval(tp[0]) < 1) goto error;
- this = module;
- module = tp[1];
- if (is_not_atom(module)) goto error;
+ while (1) {
+ Eterm m, f, a;
+
+ if (is_not_atom(module)) goto error;
+
+ if (module != am_erlang || function != am_apply)
+ break;
+
+ /* Adjust for multiple apply of apply/3... */
+
+ a = args;
+ if (is_list(a)) {
+ Eterm *consp = list_val(a);
+ m = CAR(consp);
+ a = CDR(consp);
+ if (is_list(a)) {
+ consp = list_val(a);
+ f = CAR(consp);
+ a = CDR(consp);
+ if (is_list(a)) {
+ consp = list_val(a);
+ a = CAR(consp);
+ if (is_nil(CDR(consp))) {
+ /* erlang:apply/3 */
+ module = m;
+ function = f;
+ args = a;
+ if (is_not_atom(f))
+ goto error;
+ continue;
+ }
+ }
+ }
+ }
+ break; /* != erlang:apply/3 */
}
-
/*
* Walk down the 3rd parameter of apply (the argument list) and copy
- * the parameters to the x registers (reg[]). If the module argument
- * was an abstract module, add 1 to the function arity and put the
- * module argument in the n+1st x register as a THIS reference.
+ * the parameters to the x registers (reg[]).
*/
tmp = args;
@@ -6029,9 +2278,6 @@ apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
if (is_not_nil(tmp)) { /* Must be well-formed list */
goto error;
}
- if (this != THE_NON_VALUE) {
- reg[arity++] = this;
- }
/*
* Get the index into the export table, or failing that the export
@@ -6045,18 +2291,14 @@ apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
} else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) {
save_calls(p, ep);
}
-
-#ifdef USE_VM_CALL_PROBES
- if (DTRACE_ENABLED(global_function_entry)) {
- BeamInstr *fptr = (BeamInstr *) ep->addressv[erts_active_code_ix()];
- DTRACE_GLOBAL_CALL(p, (Eterm)fptr[-3], (Eterm)fptr[-2], (Uint)fptr[-1]);
- }
-#endif
+ apply_bif_error_adjustment(p, ep, reg, arity, I, stack_offset);
+ DTRACE_GLOBAL_CALL_FROM_EXPORT(p, ep);
return ep->addressv[erts_active_code_ix()];
}
static BeamInstr*
-fixed_apply(Process* p, Eterm* reg, Uint arity)
+fixed_apply(Process* p, Eterm* reg, Uint arity,
+ BeamInstr *I, Uint stack_offset)
{
Export* ep;
Eterm module;
@@ -6074,17 +2316,11 @@ fixed_apply(Process* p, Eterm* reg, Uint arity)
return 0;
}
- /* The module argument may be either an atom or an abstract module
- * (currently implemented using tuples, but this might change).
- */
- if (is_not_atom(module)) {
- Eterm* tp;
- if (is_not_tuple(module)) goto error;
- tp = tuple_val(module);
- if (arityval(tp[0]) < 1) goto error;
- module = tp[1];
- if (is_not_atom(module)) goto error;
- ++arity;
+ if (is_not_atom(module)) goto error;
+
+ /* Handle apply of apply/3... */
+ if (module == am_erlang && function == am_apply && arity == 3) {
+ return apply(p, reg, I, stack_offset);
}
/*
@@ -6100,21 +2336,19 @@ fixed_apply(Process* p, Eterm* reg, Uint arity)
} else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) {
save_calls(p, ep);
}
-
-#ifdef USE_VM_CALL_PROBES
- if (DTRACE_ENABLED(global_function_entry)) {
- BeamInstr *fptr = (BeamInstr *) ep->addressv[erts_active_code_ix()];
- DTRACE_GLOBAL_CALL(p, (Eterm)fptr[-3], (Eterm)fptr[-2], (Uint)fptr[-1]);
- }
-#endif
+ apply_bif_error_adjustment(p, ep, reg, arity, I, stack_offset);
+ DTRACE_GLOBAL_CALL_FROM_EXPORT(p, ep);
return ep->addressv[erts_active_code_ix()];
}
int
-erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* reg)
+erts_hibernate(Process* c_p, Eterm* reg)
{
int arity;
Eterm tmp;
+ Eterm module = reg[0];
+ Eterm function = reg[1];
+ Eterm args = reg[2];
if (is_not_atom(module) || is_not_atom(function)) {
/*
@@ -6158,11 +2392,11 @@ erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* re
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(process_hibernate)) {
+ ErtsCodeMFA cmfa = { module, function, arity};
DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE);
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE);
- dtrace_fun_decode(c_p, module, function, arity,
- process_name, mfa);
- DTRACE2(process_hibernate, process_name, mfa);
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE);
+ dtrace_fun_decode(c_p, &cmfa, process_name, mfa_buf);
+ DTRACE2(process_hibernate, process_name, mfa_buf);
}
#endif
/*
@@ -6182,25 +2416,23 @@ erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* re
* If there are no waiting messages, garbage collect and
* shrink the heap.
*/
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ ERTS_MSGQ_MV_INQ2PRIVQ(c_p);
if (!c_p->msg.len) {
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
c_p->fvalue = NIL;
PROCESS_MAIN_CHK_LOCKS(c_p);
erts_garbage_collect_hibernate(c_p);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
-#ifdef ERTS_SMP
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ ERTS_MSGQ_MV_INQ2PRIVQ(c_p);
if (!c_p->msg.len)
-#endif
- erts_smp_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_ACTIVE);
+ erts_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_ACTIVE);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
}
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
- c_p->current = bif_export[BIF_hibernate_3]->code;
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ c_p->current = &bif_export[BIF_hibernate_3]->info.mfa;
c_p->flags |= F_HIBERNATE_SCHED; /* Needed also when woken! */
return 1;
}
@@ -6223,21 +2455,15 @@ call_fun(Process* p, /* Current process. */
if (is_fun_header(hdr)) {
ErlFunThing* funp = (ErlFunThing *) fun_val(fun);
- ErlFunEntry* fe;
- BeamInstr* code_ptr;
+ ErlFunEntry* fe = funp->fe;
+ BeamInstr* code_ptr = fe->address;
Eterm* var_ptr;
- int actual_arity;
- unsigned num_free;
-
- fe = funp->fe;
- num_free = funp->num_free;
- code_ptr = fe->address;
- actual_arity = (int) code_ptr[-1];
+ unsigned num_free = funp->num_free;
+ ErtsCodeMFA *mfa = erts_code_to_codemfa(code_ptr);
+ int actual_arity = mfa->arity;
if (actual_arity == arity+num_free) {
- DTRACE_LOCAL_CALL(p, (Eterm)code_ptr[-3],
- (Eterm)code_ptr[-2],
- code_ptr[-1]);
+ DTRACE_LOCAL_CALL(p, mfa);
if (num_free == 0) {
return code_ptr;
} else {
@@ -6292,34 +2518,49 @@ call_fun(Process* p, /* Current process. */
* representation (the module has never been loaded),
* or the module defining the fun has been unloaded.
*/
+
module = fe->module;
- if ((modp = erts_get_module(module, code_ix)) != NULL
- && modp->curr.code != NULL) {
+
+ ERTS_THR_READ_MEMORY_BARRIER;
+ if (fe->pend_purge_address) {
/*
- * There is a module loaded, but obviously the fun is not
- * defined in it. We must not call the error_handler
- * (or we will get into an infinite loop).
+ * The system is currently trying to purge the
+ * module containing this fun. Suspend the process
+ * and let it try again when the purge operation is
+ * done (may succeed or not).
*/
- goto badfun;
+ ep = erts_suspend_process_on_pending_purge_lambda(p, fe);
+ ASSERT(ep);
}
+ else {
+ if ((modp = erts_get_module(module, code_ix)) != NULL
+ && modp->curr.code_hdr != NULL) {
+ /*
+ * There is a module loaded, but obviously the fun is not
+ * defined in it. We must not call the error_handler
+ * (or we will get into an infinite loop).
+ */
+ goto badfun;
+ }
- /*
- * No current code for this module. Call the error_handler module
- * to attempt loading the module.
- */
+ /*
+ * No current code for this module. Call the error_handler module
+ * to attempt loading the module.
+ */
- ep = erts_find_function(erts_proc_get_error_handler(p),
- am_undefined_lambda, 3, code_ix);
- if (ep == NULL) { /* No error handler */
- p->current = NULL;
- p->freason = EXC_UNDEF;
- return NULL;
+ ep = erts_find_function(erts_proc_get_error_handler(p),
+ am_undefined_lambda, 3, code_ix);
+ if (ep == NULL) { /* No error handler */
+ p->current = NULL;
+ p->freason = EXC_UNDEF;
+ return NULL;
+ }
}
reg[0] = module;
reg[1] = fun;
reg[2] = args;
reg[3] = NIL;
- return ep->addressv[erts_active_code_ix()];
+ return ep->addressv[code_ix];
}
}
} else if (is_export_header(hdr)) {
@@ -6327,10 +2568,10 @@ call_fun(Process* p, /* Current process. */
int actual_arity;
ep = *((Export **) (export_val(fun) + 1));
- actual_arity = (int) ep->code[2];
+ actual_arity = ep->info.mfa.arity;
if (arity == actual_arity) {
- DTRACE_GLOBAL_CALL(p, ep->code[0], ep->code[1], (Uint)ep->code[2]);
+ DTRACE_GLOBAL_CALL(p, &ep->info.mfa);
return ep->addressv[erts_active_code_ix()];
} else {
/*
@@ -6384,7 +2625,7 @@ apply_fun(Process* p, Eterm fun, Eterm args, Eterm* reg)
}
if (is_not_nil(tmp)) { /* Must be well-formed list */
- p->freason = EXC_UNDEF;
+ p->freason = EXC_BADARG;
return NULL;
}
reg[arity] = fun;
@@ -6418,9 +2659,6 @@ new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free)
funp->fe = fe;
funp->num_free = num_free;
funp->creator = p->common.id;
-#ifdef HIPE
- funp->native_address = fe->native_address;
-#endif
funp->arity = (int)fe->address[-1] - num_free;
for (i = 0; i < num_free; i++) {
*hp++ = reg[i];
@@ -6428,104 +2666,140 @@ new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free)
return make_fun(funp);
}
-static int has_not_map_field(Eterm map, Eterm key)
+static Eterm get_map_element(Eterm map, Eterm key)
{
- map_t* mp;
- Eterm* keys;
- Uint i;
- Uint n;
-
- mp = (map_t *)map_val(map);
- keys = map_get_keys(mp);
- n = map_get_size(mp);
- if (is_immed(key)) {
- for (i = 0; i < n; i++) {
- if (keys[i] == key) {
- return 0;
+ Uint32 hx;
+ const Eterm *vs;
+ if (is_flatmap(map)) {
+ flatmap_t *mp;
+ Eterm *ks;
+ Uint i;
+ Uint n;
+
+ mp = (flatmap_t *)flatmap_val(map);
+ ks = flatmap_get_keys(mp);
+ vs = flatmap_get_values(mp);
+ n = flatmap_get_size(mp);
+ if (is_immed(key)) {
+ for (i = 0; i < n; i++) {
+ if (ks[i] == key) {
+ return vs[i];
+ }
}
- }
- } else {
- for (i = 0; i < n; i++) {
- if (EQ(keys[i], key)) {
- return 0;
+ } else {
+ for (i = 0; i < n; i++) {
+ if (EQ(ks[i], key)) {
+ return vs[i];
+ }
}
}
+ return THE_NON_VALUE;
}
- return 1;
+ ASSERT(is_hashmap(map));
+ hx = hashmap_make_hash(key);
+ vs = erts_hashmap_get(hx,key,map);
+ return vs ? *vs : THE_NON_VALUE;
}
-static Eterm get_map_element(Eterm map, Eterm key)
+static Eterm get_map_element_hash(Eterm map, Eterm key, Uint32 hx)
{
- map_t *mp;
- Eterm* ks, *vs;
- Uint i;
- Uint n;
-
- mp = (map_t *)map_val(map);
- ks = map_get_keys(mp);
- vs = map_get_values(mp);
- n = map_get_size(mp);
- if (is_immed(key)) {
- for (i = 0; i < n; i++) {
- if (ks[i] == key) {
- return vs[i];
+ const Eterm *vs;
+
+ if (is_flatmap(map)) {
+ flatmap_t *mp;
+ Eterm *ks;
+ Uint i;
+ Uint n;
+
+ mp = (flatmap_t *)flatmap_val(map);
+ ks = flatmap_get_keys(mp);
+ vs = flatmap_get_values(mp);
+ n = flatmap_get_size(mp);
+ if (is_immed(key)) {
+ for (i = 0; i < n; i++) {
+ if (ks[i] == key) {
+ return vs[i];
+ }
}
- }
- } else {
- for (i = 0; i < n; i++) {
- if (EQ(ks[i], key)) {
- return vs[i];
+ } else {
+ for (i = 0; i < n; i++) {
+ if (EQ(ks[i], key)) {
+ return vs[i];
+ }
}
}
+ return THE_NON_VALUE;
}
- return THE_NON_VALUE;
+
+ ASSERT(is_hashmap(map));
+ ASSERT(hx == hashmap_make_hash(key));
+ vs = erts_hashmap_get(hx, key, map);
+ return vs ? *vs : THE_NON_VALUE;
}
-#define GET_TERM(term, dest) \
-do { \
- Eterm src = (Eterm)(term); \
- switch (src & _TAG_IMMED1_MASK) { \
- case (R_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \
- dest = x(0); \
- break; \
- case (X_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \
- dest = x(src >> _TAG_IMMED1_SIZE); \
- break; \
- case (Y_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \
- dest = y(src >> _TAG_IMMED1_SIZE); \
- break; \
- default: \
- dest = src; \
- break; \
- } \
+#define GET_TERM(term, dest) \
+do { \
+ Eterm src = (Eterm)(term); \
+ switch (loader_tag(src)) { \
+ case LOADER_X_REG: \
+ dest = x(loader_x_reg_index(src)); \
+ break; \
+ case LOADER_Y_REG: \
+ dest = y(loader_y_reg_index(src)); \
+ break; \
+ default: \
+ dest = src; \
+ break; \
+ } \
} while(0)
static Eterm
-new_map(Process* p, Eterm* reg, BeamInstr* I)
+new_map(Process* p, Eterm* reg, Uint live, Uint n, BeamInstr* ptr)
{
- Uint n = Arg(3);
Uint i;
Uint need = n + 1 /* hdr */ + 1 /*size*/ + 1 /* ptr */ + 1 /* arity */;
Eterm keys;
Eterm *mhp,*thp;
Eterm *E;
- BeamInstr *ptr;
- map_t *mp;
+ flatmap_t *mp;
+ ErtsHeapFactory factory;
+
+ if (n > 2*MAP_SMALL_MAP_LIMIT) {
+ Eterm res;
+ if (HeapWordsLeft(p) < n) {
+ erts_garbage_collect(p, n, reg, live);
+ }
+
+ mhp = p->htop;
+ thp = p->htop;
+ E = p->stop;
+
+ for (i = 0; i < n/2; i++) {
+ GET_TERM(*ptr++, *mhp++);
+ GET_TERM(*ptr++, *mhp++);
+ }
+
+ p->htop = mhp;
+
+ erts_factory_proc_init(&factory, p);
+ res = erts_hashmap_from_array(&factory, thp, n/2, 0);
+ erts_factory_close(&factory);
+ return res;
+ }
if (HeapWordsLeft(p) < need) {
- erts_garbage_collect(p, need, reg, Arg(2));
+ erts_garbage_collect(p, need, reg, live);
}
thp = p->htop;
mhp = thp + 1 + n/2;
E = p->stop;
- ptr = &Arg(4);
keys = make_tuple(thp);
*thp++ = make_arityval(n/2);
- mp = (map_t *)mhp; mhp += MAP_HEADER_SIZE;
- mp->thing_word = MAP_HEADER;
+ mp = (flatmap_t *)mhp; mhp += MAP_HEADER_FLATMAP_SZ;
+ mp->thing_word = MAP_HEADER_FLATMAP;
mp->size = n/2;
mp->keys = keys;
@@ -6534,39 +2808,91 @@ new_map(Process* p, Eterm* reg, BeamInstr* I)
GET_TERM(*ptr++, *mhp++);
}
p->htop = mhp;
- return make_map(mp);
+ return make_flatmap(mp);
}
static Eterm
-update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
+new_small_map_lit(Process* p, Eterm* reg, Eterm keys_literal, Uint live, BeamInstr* ptr)
+{
+ Eterm* keys = tuple_val(keys_literal);
+ Uint n = arityval(*keys);
+ Uint need = n + 1 /* hdr */ + 1 /*size*/ + 1 /* ptr */ + 1 /* arity */;
+ Uint i;
+ flatmap_t *mp;
+ Eterm *mhp;
+ Eterm *E;
+
+ ASSERT(n <= MAP_SMALL_MAP_LIMIT);
+
+ if (HeapWordsLeft(p) < need) {
+ erts_garbage_collect(p, need, reg, live);
+ }
+
+ mhp = p->htop;
+ E = p->stop;
+
+ mp = (flatmap_t *)mhp; mhp += MAP_HEADER_FLATMAP_SZ;
+ mp->thing_word = MAP_HEADER_FLATMAP;
+ mp->size = n;
+ mp->keys = keys_literal;
+
+ for (i = 0; i < n; i++) {
+ GET_TERM(*ptr++, *mhp++);
+ }
+
+ p->htop = mhp;
+
+ return make_flatmap(mp);
+}
+
+static Eterm
+update_map_assoc(Process* p, Eterm* reg, Uint live, Uint n, BeamInstr* new_p)
{
- Uint n;
Uint num_old;
Uint num_updates;
Uint need;
- map_t *old_mp, *mp;
+ flatmap_t *old_mp, *mp;
Eterm res;
Eterm* hp;
Eterm* E;
Eterm* old_keys;
Eterm* old_vals;
- BeamInstr* new_p;
Eterm new_key;
Eterm* kp;
+ Eterm map;
- if (is_not_map(map)) {
- return THE_NON_VALUE;
+ num_updates = n / 2;
+ map = reg[live];
+
+ if (is_not_flatmap(map)) {
+ Uint32 hx;
+ Eterm val;
+
+ ASSERT(is_hashmap(map));
+ res = map;
+ E = p->stop;
+ while(num_updates--) {
+ /* assoc can't fail */
+ GET_TERM(new_p[0], new_key);
+ GET_TERM(new_p[1], val);
+ hx = hashmap_make_hash(new_key);
+
+ res = erts_hashmap_insert(p, hx, new_key, val, res, 0);
+
+ new_p += 2;
+ }
+ return res;
}
- old_mp = (map_t *) map_val(map);
- num_old = map_get_size(old_mp);
+ old_mp = (flatmap_t *) flatmap_val(map);
+ num_old = flatmap_get_size(old_mp);
/*
* If the old map is empty, create a new map.
*/
if (num_old == 0) {
- return new_map(p, reg, I+1);
+ return new_map(p, reg, live, n, new_p);
}
/*
@@ -6574,14 +2900,11 @@ update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
* update list are new).
*/
- num_updates = Arg(4) / 2;
- need = 2*(num_old+num_updates) + 1 + MAP_HEADER_SIZE;
+ need = 2*(num_old+num_updates) + 1 + MAP_HEADER_FLATMAP_SZ;
if (HeapWordsLeft(p) < need) {
- Uint live = Arg(3);
- reg[live] = map;
erts_garbage_collect(p, need, reg, live+1);
map = reg[live];
- old_mp = (map_t *)map_val(map);
+ old_mp = (flatmap_t *)flatmap_val(map);
}
/*
@@ -6612,16 +2935,15 @@ update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
kp = p->htop + 1; /* Point to first key */
hp = kp + num_old + num_updates;
- res = make_map(hp);
- mp = (map_t *)hp;
- hp += MAP_HEADER_SIZE;
- mp->thing_word = MAP_HEADER;
+ res = make_flatmap(hp);
+ mp = (flatmap_t *)hp;
+ hp += MAP_HEADER_FLATMAP_SZ;
+ mp->thing_word = MAP_HEADER_FLATMAP;
mp->keys = make_tuple(kp-1);
- old_vals = map_get_values(old_mp);
- old_keys = map_get_keys(old_mp);
+ old_vals = flatmap_get_values(old_mp);
+ old_keys = flatmap_get_keys(old_mp);
- new_p = &Arg(5);
GET_TERM(*new_p, new_key);
n = num_updates;
@@ -6707,8 +3029,17 @@ update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
n = kp - p->htop - 1; /* Actual number of keys/values */
*p->htop = make_arityval(n);
+ p->htop = hp;
mp->size = n;
- p->htop = hp;
+
+ /* The expensive case, need to build a hashmap */
+ if (n > MAP_SMALL_MAP_LIMIT) {
+ ErtsHeapFactory factory;
+ erts_factory_proc_init(&factory, p);
+ res = erts_hashmap_from_ks_and_vs(&factory,flatmap_get_keys(mp),
+ flatmap_get_values(mp),n);
+ erts_factory_close(&factory);
+ }
return res;
}
@@ -6717,33 +3048,67 @@ update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
*/
static Eterm
-update_map_exact(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
+update_map_exact(Process* p, Eterm* reg, Uint live, Uint n, Eterm* new_p)
{
- Uint n;
Uint i;
Uint num_old;
Uint need;
- map_t *old_mp, *mp;
+ flatmap_t *old_mp, *mp;
Eterm res;
Eterm* hp;
Eterm* E;
Eterm* old_keys;
Eterm* old_vals;
- BeamInstr* new_p;
Eterm new_key;
+ Eterm map;
- if (is_not_map(map)) {
- return THE_NON_VALUE;
+ n /= 2; /* Number of values to be updated */
+ ASSERT(n > 0);
+ map = reg[live];
+
+ if (is_not_flatmap(map)) {
+ Uint32 hx;
+ Eterm val;
+
+ /* apparently the compiler does not emit is_map instructions,
+ * bad compiler */
+
+ if (is_not_hashmap(map)) {
+ p->freason = BADMAP;
+ p->fvalue = map;
+ return THE_NON_VALUE;
+ }
+
+ res = map;
+ E = p->stop;
+ while(n--) {
+ GET_TERM(new_p[0], new_key);
+ GET_TERM(new_p[1], val);
+ hx = hashmap_make_hash(new_key);
+
+ res = erts_hashmap_insert(p, hx, new_key, val, res, 1);
+ if (is_non_value(res)) {
+ p->fvalue = new_key;
+ p->freason = BADKEY;
+ return res;
+ }
+
+ new_p += 2;
+ }
+ return res;
}
- old_mp = (map_t *) map_val(map);
- num_old = map_get_size(old_mp);
+ old_mp = (flatmap_t *) flatmap_val(map);
+ num_old = flatmap_get_size(old_mp);
/*
- * If the old map is empty, create a new map.
+ * If the old map is empty, fail.
*/
if (num_old == 0) {
+ E = p->stop;
+ p->freason = BADKEY;
+ GET_TERM(new_p[0], p->fvalue);
return THE_NON_VALUE;
}
@@ -6751,13 +3116,11 @@ update_map_exact(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
* Allocate the exact heap space needed.
*/
- need = num_old + MAP_HEADER_SIZE;
+ need = num_old + MAP_HEADER_FLATMAP_SZ;
if (HeapWordsLeft(p) < need) {
- Uint live = Arg(3);
- reg[live] = map;
erts_garbage_collect(p, need, reg, live+1);
map = reg[live];
- old_mp = (map_t *)map_val(map);
+ old_mp = (flatmap_t *)flatmap_val(map);
}
/*
@@ -6767,23 +3130,20 @@ update_map_exact(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
hp = p->htop;
E = p->stop;
- old_vals = map_get_values(old_mp);
- old_keys = map_get_keys(old_mp);
+ old_vals = flatmap_get_values(old_mp);
+ old_keys = flatmap_get_keys(old_mp);
- res = make_map(hp);
- mp = (map_t *)hp;
- hp += MAP_HEADER_SIZE;
- mp->thing_word = MAP_HEADER;
+ res = make_flatmap(hp);
+ mp = (flatmap_t *)hp;
+ hp += MAP_HEADER_FLATMAP_SZ;
+ mp->thing_word = MAP_HEADER_FLATMAP;
mp->size = num_old;
mp->keys = old_mp->keys;
/* Get array of key/value pairs to be updated */
- new_p = &Arg(5);
GET_TERM(*new_p, new_key);
/* Update all values */
- n = Arg(4) / 2; /* Number of values to be updated */
- ASSERT(n > 0);
for (i = 0; i < num_old; i++) {
if (!EQ(*old_keys, new_key)) {
/* Not same keys */
@@ -6816,6 +3176,8 @@ update_map_exact(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
* update list did not previously exist.
*/
ASSERT(hp == p->htop + need);
+ p->freason = BADKEY;
+ p->fvalue = new_key;
return THE_NON_VALUE;
}
#undef GET_TERM
@@ -6837,15 +3199,27 @@ erts_is_builtin(Eterm Mod, Eterm Name, int arity)
Export e;
Export* ep;
- e.code[0] = Mod;
- e.code[1] = Name;
- e.code[2] = arity;
+ if (Mod == am_erlang) {
+ /*
+ * Special case for built-in functions that are implemented
+ * as instructions as opposed to SNIFs.
+ */
+ if (Name == am_apply && (arity == 2 || arity == 3)) {
+ return 1;
+ } else if (Name == am_yield && arity == 0) {
+ return 1;
+ }
+ }
+
+ e.info.mfa.module = Mod;
+ e.info.mfa.function = Name;
+ e.info.mfa.arity = arity;
if ((ep = export_get(&e)) == NULL) {
return 0;
}
- return ep->addressv[erts_active_code_ix()] == ep->code+3
- && (ep->code[3] == (BeamInstr) em_apply_bif);
+ return ep->addressv[erts_active_code_ix()] == ep->beam &&
+ BeamIsOpCode(ep->beam[0], op_apply_bif);
}
@@ -6860,7 +3234,7 @@ erts_current_reductions(Process *current, Process *p)
if (current != p) {
return 0;
} else if (current->fcalls < 0 && ERTS_PROC_GET_SAVED_CALLS_BUF(current)) {
- return -current->fcalls;
+ return current->fcalls + CONTEXT_REDS;
} else {
return REDS_IN(current) - current->fcalls;
}
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index e96177cfd9..7331c331a6 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2014. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -31,16 +32,20 @@
#include "bif.h"
#include "external.h"
#include "beam_load.h"
+#include "beam_bp.h"
#include "big.h"
#include "erl_bits.h"
#include "beam_catches.h"
#include "erl_binary.h"
#include "erl_zlib.h"
+#include "erl_map.h"
+#include "erl_process_dict.h"
#ifdef HIPE
#include "hipe_bif0.h"
#include "hipe_mode_switch.h"
#include "hipe_arch.h"
+#include "hipe_load.h"
#endif
ErlDrvBinary* erts_gzinflate_buffer(char*, int);
@@ -50,12 +55,6 @@ ErlDrvBinary* erts_gzinflate_buffer(char*, int);
#define DEFINED 1
#define EXPORTED 2
-#ifdef NO_JUMP_TABLE
-# define BeamOpCode(Op) ((BeamInstr)(Op))
-#else
-# define BeamOpCode(Op) ((BeamInstr)beam_ops[Op])
-#endif
-
#if defined(WORDS_BIGENDIAN)
# define NATIVE_ENDIAN(F) \
if ((F).val & BSF_NATIVE) { \
@@ -76,16 +75,28 @@ ErlDrvBinary* erts_gzinflate_buffer(char*, int);
#define TE_FAIL (-1)
#define TE_SHORT_WINDOW (-2)
+/*
+ * Type for a reference to a label that must be patched.
+ */
+
typedef struct {
- Uint value; /* Value of label (NULL if not known yet). */
- Uint patches; /* Index (into code buffer) to first location
- * which must be patched with the value of this label.
- */
-#ifdef ERTS_SMP
+ Uint pos; /* Position of label reference to patch. */
+ Uint offset; /* Offset from patch location. */
+ int packed; /* 0 (not packed), 1 (lsw), 2 (msw) */
+} LabelPatch;
+
+/*
+ * Type for a label.
+ */
+
+typedef struct {
+ Uint value; /* Value of label (0 if not known yet). */
Uint looprec_targeted; /* Non-zero if this label is the target of a loop_rec
* instruction.
*/
-#endif
+ LabelPatch* patches; /* Array of label patches. */
+ Uint num_patches; /* Number of patches in array. */
+ Uint num_allocated; /* Number of allocated patches. */
} Label;
/*
@@ -102,7 +113,7 @@ typedef struct {
*/
typedef struct genop {
- int op; /* Opcode. */
+ unsigned int op; /* Opcode. */
int arity; /* Number of arguments. */
GenOpArg def_args[MAX_OPARGS]; /* Default buffer for arguments. */
GenOpArg* a; /* The arguments. */
@@ -152,13 +163,15 @@ typedef struct {
#define STR_CHUNK 2
#define IMP_CHUNK 3
#define EXP_CHUNK 4
-#define NUM_MANDATORY 5
+#define MIN_MANDATORY 1
+#define MAX_MANDATORY 5
#define LAMBDA_CHUNK 5
#define LITERAL_CHUNK 6
#define ATTR_CHUNK 7
#define COMPILE_CHUNK 8
#define LINE_CHUNK 9
+#define UTF8_ATOM_CHUNK 10
#define NUM_CHUNK_TYPES (sizeof(chunk_types)/sizeof(chunk_types[0]))
@@ -168,9 +181,13 @@ typedef struct {
static Uint chunk_types[] = {
/*
- * Mandatory chunk types -- these MUST be present.
+ * Atom chunk types -- Atom or AtU8 MUST be present.
*/
MakeIffId('A', 't', 'o', 'm'), /* 0 */
+
+ /*
+ * Mandatory chunk types -- these MUST be present.
+ */
MakeIffId('C', 'o', 'd', 'e'), /* 1 */
MakeIffId('S', 't', 'r', 'T'), /* 2 */
MakeIffId('I', 'm', 'p', 'T'), /* 3 */
@@ -184,6 +201,7 @@ static Uint chunk_types[] = {
MakeIffId('A', 't', 't', 'r'), /* 7 */
MakeIffId('C', 'I', 'n', 'f'), /* 8 */
MakeIffId('L', 'i', 'n', 'e'), /* 9 */
+ MakeIffId('A', 't', 'U', '8'), /* 10 */
};
/*
@@ -204,10 +222,7 @@ typedef struct {
typedef struct {
Eterm term; /* The tagged term (in the heap). */
- Uint heap_size; /* (Exact) size on the heap. */
- SWord offset; /* Offset from temporary location to final. */
- ErlOffHeap off_heap; /* Start of linked list of ProcBins. */
- Eterm* heap; /* Heap for term. */
+ ErlHeapFragment* heap_frags;
} Literal;
/*
@@ -218,7 +233,7 @@ typedef struct {
typedef struct literal_patch LiteralPatch;
struct literal_patch {
- int pos; /* Position in code */
+ Uint pos; /* Position in code */
LiteralPatch* next;
};
@@ -245,7 +260,7 @@ typedef struct {
/*
* This structure contains all information about the module being loaded.
*/
-
+#define MD5_SIZE 16
typedef struct LoaderState {
/*
* The current logical file within the binary.
@@ -283,32 +298,34 @@ typedef struct LoaderState {
byte* code_start; /* Start of code file. */
unsigned code_size; /* Size of code file. */
int specific_op; /* Specific opcode (-1 if not found). */
- int num_functions; /* Number of functions in module. */
- int num_labels; /* Number of labels. */
- int code_buffer_size; /* Size of code buffer in words. */
- BeamInstr* code; /* Loaded code. */
- int ci; /* Current index into loaded code. */
+ unsigned int num_functions; /* Number of functions in module. */
+ unsigned int num_labels; /* Number of labels. */
+ BeamCodeHeader* hdr; /* Loaded code header */
+ BeamInstr* codev; /* Loaded code buffer */
+ int codev_size; /* Size of code buffer in words. */
+ int ci; /* Current index into loaded code buffer. */
Label* labels;
StringPatch* string_patches; /* Linked list of position into string table to patch. */
BeamInstr catches; /* Linked list of catch_yf instructions. */
unsigned loaded_size; /* Final size of code when loaded. */
- byte mod_md5[16]; /* MD5 for module code. */
+ byte mod_md5[MD5_SIZE]; /* MD5 for module code. */
int may_load_nif; /* true if NIFs may later be loaded for this module */
int on_load; /* Index in the code for the on_load function
* (or 0 if there is no on_load function)
*/
+ int otp_20_or_higher; /* Compiled with OTP 20 or higher */
/*
* Atom table.
*/
- int num_atoms; /* Number of atoms in atom table. */
+ unsigned int num_atoms; /* Number of atoms in atom table. */
Eterm* atom; /* Atom table. */
- int num_exps; /* Number of exports. */
+ unsigned int num_exps; /* Number of exports. */
ExportEntry* export; /* Pointer to export table. */
- int num_imports; /* Number of imports. */
+ unsigned int num_imports; /* Number of imports. */
ImportEntry* import; /* Import entry (translated information). */
/*
@@ -322,8 +339,8 @@ typedef struct LoaderState {
* Lambda table.
*/
- int num_lambdas; /* Number of lambdas in table. */
- int lambdas_allocated; /* Size of allocated lambda table. */
+ unsigned int num_lambdas; /* Number of lambdas in table. */
+ unsigned int lambdas_allocated; /* Size of allocated lambda table. */
Lambda* lambdas; /* Pointer to lambdas. */
Lambda def_lambdas[16]; /* Default storage for lambda table. */
char* lambda_error; /* Delayed missing 'FunT' error. */
@@ -332,8 +349,8 @@ typedef struct LoaderState {
* Literals (constant pool).
*/
- int num_literals; /* Number of literals in table. */
- int allocated_literals; /* Number of literal entries allocated. */
+ unsigned int num_literals; /* Number of literals in table. */
+ unsigned int allocated_literals; /* Number of literal entries allocated. */
Literal* literals; /* Array of literals. */
LiteralPatch* literal_patches; /* Operands that need to be patched. */
Uint total_literal_size; /* Total heap size for all literals. */
@@ -342,13 +359,13 @@ typedef struct LoaderState {
* Line table.
*/
BeamInstr* line_item; /* Line items from the BEAM file. */
- int num_line_items; /* Number of line items. */
+ unsigned int num_line_items;/* Number of line items. */
LineInstr* line_instr; /* Line instructions */
- int num_line_instrs; /* Maximum number of line instructions */
- int current_li; /* Current line instruction */
- int* func_line; /* Mapping from function to first line instr */
+ unsigned int num_line_instrs; /* Maximum number of line instructions */
+ unsigned int current_li; /* Current line instruction */
+ unsigned int* func_line; /* Mapping from function to first line instr */
Eterm* fname; /* List of file names */
- int num_fnames; /* Number of filenames in fname table */
+ unsigned int num_fnames; /* Number of filenames in fname table */
int loc_size; /* Size of location info in bytes (2/4) */
} LoaderState;
@@ -476,15 +493,20 @@ typedef struct LoaderState {
static void free_loader_state(Binary* magic);
-static void loader_state_dtor(Binary* magic);
-static Eterm insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm group_leader, Eterm module,
- BeamInstr* code, Uint size);
+static ErlHeapFragment* new_literal_fragment(Uint size);
+static void free_literal_fragment(ErlHeapFragment*);
+static int loader_state_dtor(Binary* magic);
+#ifdef HIPE
+static Eterm stub_insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
+ Eterm group_leader, Eterm module,
+ BeamCodeHeader* code_hdr, Uint size,
+ HipeModule *hipe_code);
+#endif
static int init_iff_file(LoaderState* stp, byte* code, Uint size);
static int scan_iff_file(LoaderState* stp, Uint* chunk_types,
- Uint num_types, Uint num_mandatory);
+ Uint num_types);
static int verify_chunks(LoaderState* stp);
-static int load_atom_table(LoaderState* stp);
+static int load_atom_table(LoaderState* stp, ErtsAtomEncoding enc);
static int load_import_table(LoaderState* stp);
static int read_export_table(LoaderState* stp);
static int is_bif(Eterm mod, Eterm func, unsigned arity);
@@ -492,6 +514,7 @@ static int read_lambda_table(LoaderState* stp);
static int read_literal_table(LoaderState* stp);
static int read_line_table(LoaderState* stp);
static int read_code_header(LoaderState* stp);
+static void init_label(Label* lp);
static int load_code(LoaderState* stp);
static GenOp* gen_element(LoaderState* stp, GenOpArg Fail, GenOpArg Index,
GenOpArg Tuple, GenOpArg Dst);
@@ -511,7 +534,7 @@ static GenOp* gen_get_map_element(LoaderState* stp, GenOpArg Fail, GenOpArg Src,
static int freeze_code(LoaderState* stp);
-static void final_touch(LoaderState* stp);
+static void final_touch(LoaderState* stp, struct erl_module_instance* inst_p);
static void short_file(int line, LoaderState* stp, unsigned needed);
static void load_printf(int line, LoaderState* context, char *fmt, ...);
static int transform_engine(LoaderState* st);
@@ -522,15 +545,19 @@ static int get_tag_and_value(LoaderState* stp, Uint len_code,
static int new_label(LoaderState* stp);
static void new_literal_patch(LoaderState* stp, int pos);
static void new_string_patch(LoaderState* stp, int pos);
+static int find_literal(LoaderState* stp, Eterm needle, Uint *idx);
static Uint new_literal(LoaderState* stp, Eterm** hpp, Uint heap_size);
static int genopargcompare(GenOpArg* a, GenOpArg* b);
-static Eterm exported_from_module(Process* p, Eterm mod);
-static Eterm functions_in_module(Process* p, Eterm mod);
-static Eterm attributes_for_module(Process* p, Eterm mod);
-static Eterm compilation_info_for_module(Process* p, Eterm mod);
-static Eterm native_addresses(Process* p, Eterm mod);
-int patch_funentries(Eterm Patchlist);
-int patch(Eterm Addresses, Uint fe);
+static Eterm get_module_info(Process* p, ErtsCodeIndex code_ix,
+ BeamCodeHeader*, Eterm module, Eterm what);
+static Eterm exported_from_module(Process* p, ErtsCodeIndex code_ix,
+ Eterm mod);
+static Eterm functions_in_module(Process* p, BeamCodeHeader*);
+static Eterm attributes_for_module(Process* p, BeamCodeHeader*);
+static Eterm compilation_info_for_module(Process* p, BeamCodeHeader*);
+static Eterm md5_of_module(Process* p, BeamCodeHeader*);
+static Eterm has_native(BeamCodeHeader*);
+static Eterm native_addresses(Process* p, BeamCodeHeader*);
static int safe_mul(UWord a, UWord b, UWord* resp);
static int must_swap_floats;
@@ -595,6 +622,7 @@ extern void check_allocated_block(Uint type, void *blk);
#define CHKBLK(TYPE,BLK) /* nothing */
#endif
+
Eterm
erts_prepare_loading(Binary* magic, Process *c_p, Eterm group_leader,
Eterm* modp, byte* code, Uint unloaded_size)
@@ -617,7 +645,7 @@ erts_prepare_loading(Binary* magic, Process *c_p, Eterm group_leader,
CHKALLOC();
CHKBLK(ERTS_ALC_T_CODE,stp->code);
if (!init_iff_file(stp, code, unloaded_size) ||
- !scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY) ||
+ !scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES) ||
!verify_chunks(stp)) {
goto load_error;
}
@@ -635,28 +663,43 @@ erts_prepare_loading(Binary* magic, Process *c_p, Eterm group_leader,
/*
* Initialize code area.
*/
- stp->code_buffer_size = 2048 + stp->num_functions;
- stp->code = (BeamInstr *) erts_alloc(ERTS_ALC_T_CODE,
- sizeof(BeamInstr) * stp->code_buffer_size);
+ stp->codev_size = 2048 + stp->num_functions;
+ stp->hdr = (BeamCodeHeader*) erts_alloc(ERTS_ALC_T_CODE,
+ (offsetof(BeamCodeHeader,functions)
+ + sizeof(BeamInstr) * stp->codev_size));
- stp->code[MI_NUM_FUNCTIONS] = stp->num_functions;
- stp->ci = MI_FUNCTIONS + stp->num_functions + 1;
+ stp->hdr->num_functions = stp->num_functions;
- stp->code[MI_ATTR_PTR] = 0;
- stp->code[MI_ATTR_SIZE] = 0;
- stp->code[MI_ATTR_SIZE_ON_HEAP] = 0;
- stp->code[MI_COMPILE_PTR] = 0;
- stp->code[MI_COMPILE_SIZE] = 0;
- stp->code[MI_COMPILE_SIZE_ON_HEAP] = 0;
+ /* Let the codev array start at functions[0] in order to index
+ * both function pointers and the loaded code itself that follows.
+ */
+ stp->codev = (BeamInstr*) &stp->hdr->functions;
+ stp->ci = stp->num_functions + 1;
+
+ stp->hdr->attr_ptr = NULL;
+ stp->hdr->attr_size = 0;
+ stp->hdr->attr_size_on_heap = 0;
+ stp->hdr->compile_ptr = NULL;
+ stp->hdr->compile_size = 0;
+ stp->hdr->compile_size_on_heap = 0;
+ stp->hdr->literal_area = NULL;
+ stp->hdr->md5_ptr = NULL;
/*
* Read the atom table.
*/
CHKBLK(ERTS_ALC_T_CODE,stp->code);
- define_file(stp, "atom table", ATOM_CHUNK);
- if (!load_atom_table(stp)) {
- goto load_error;
+ if (stp->chunks[UTF8_ATOM_CHUNK].size > 0) {
+ define_file(stp, "utf8 atom table", UTF8_ATOM_CHUNK);
+ if (!load_atom_table(stp, ERTS_ATOM_ENC_UTF8)) {
+ goto load_error;
+ }
+ } else {
+ define_file(stp, "atom table", ATOM_CHUNK);
+ if (!load_atom_table(stp, ERTS_ATOM_ENC_LATIN1)) {
+ goto load_error;
+ }
}
/*
@@ -706,6 +749,13 @@ erts_prepare_loading(Binary* magic, Process *c_p, Eterm group_leader,
}
/*
+ * Find out whether the code was compiled with OTP 20
+ * or higher.
+ */
+
+ stp->otp_20_or_higher = stp->chunks[UTF8_ATOM_CHUNK].size > 0;
+
+ /*
* Load the code chunk.
*/
@@ -750,37 +800,94 @@ Eterm
erts_finish_loading(Binary* magic, Process* c_p,
ErtsProcLocks c_p_locks, Eterm* modp)
{
- Eterm retval;
+ Eterm retval = NIL;
LoaderState* stp = ERTS_MAGIC_BIN_DATA(magic);
+ Module* mod_tab_p;
+ struct erl_module_instance* inst_p;
+ Uint size;
- /*
- * No other process may run since we will update the export
- * table which is not protected by any locks.
- */
-
- ERTS_SMP_LC_ASSERT(erts_initialized == 0 || erts_has_code_write_permission() ||
- erts_smp_thr_progress_is_blocking());
-
+ ERTS_LC_ASSERT(erts_initialized == 0 || erts_has_code_write_permission() ||
+ erts_thr_progress_is_blocking());
/*
* Make current code for the module old and insert the new code
* as current. This will fail if there already exists old code
* for the module.
*/
+ mod_tab_p = erts_put_module(stp->module);
CHKBLK(ERTS_ALC_T_CODE,stp->code);
- retval = insert_new_code(c_p, c_p_locks, stp->group_leader, stp->module,
- stp->code, stp->loaded_size);
- if (retval != NIL) {
- goto load_error;
+ if (!stp->on_load) {
+ /*
+ * Normal case -- no -on_load() function.
+ */
+ retval = beam_make_current_old(c_p, c_p_locks, stp->module);
+ ASSERT(retval == NIL);
+ } else {
+ ErtsCodeIndex code_ix = erts_staging_code_ix();
+ Eterm module = stp->module;
+ int i, num_exps;
+
+ /*
+ * There is an -on_load() function. We will keep the current
+ * code, but we must turn off any tracing.
+ */
+ num_exps = export_list_size(code_ix);
+ for (i = 0; i < num_exps; i++) {
+ Export *ep = export_list(i, code_ix);
+ if (ep == NULL || ep->info.mfa.module != module) {
+ continue;
+ }
+ if (ep->addressv[code_ix] == ep->beam) {
+ if (BeamIsOpCode(ep->beam[0], op_apply_bif)) {
+ continue;
+ } else if (BeamIsOpCode(ep->beam[0], op_i_generic_breakpoint)) {
+ ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
+ ASSERT(mod_tab_p->curr.num_traced_exports > 0);
+ erts_clear_export_break(mod_tab_p, &ep->info);
+ ep->addressv[code_ix] = (BeamInstr *) ep->beam[1];
+ ep->beam[1] = 0;
+ }
+ ASSERT(ep->beam[1] == 0);
+ }
+ }
+ ASSERT(mod_tab_p->curr.num_breakpoints == 0);
+ ASSERT(mod_tab_p->curr.num_traced_exports == 0);
}
/*
+ * Update module table.
+ */
+
+ size = stp->loaded_size;
+ erts_total_code_size += size;
+
+ if (!stp->on_load) {
+ inst_p = &mod_tab_p->curr;
+ } else {
+ mod_tab_p->on_load =
+ (struct erl_module_instance *)
+ erts_alloc(ERTS_ALC_T_PREPARED_CODE,
+ sizeof(struct erl_module_instance));
+ inst_p = mod_tab_p->on_load;
+ erts_module_instance_init(inst_p);
+ }
+
+ inst_p->code_hdr = stp->hdr;
+ inst_p->code_length = size;
+
+ /*
+ * Update ranges (used for finding a function from a PC value).
+ */
+
+ erts_update_ranges((BeamInstr*)inst_p->code_hdr, size);
+
+ /*
* Ready for the final touch: fixing the export table entries for
* exported and imported functions. This can't fail.
*/
CHKBLK(ERTS_ALC_T_CODE,stp->code);
- final_touch(stp);
+ final_touch(stp, inst_p);
/*
* Loading succeded.
@@ -792,7 +899,8 @@ erts_finish_loading(Binary* magic, Process* c_p,
debug_dump_code(stp->code,stp->ci);
#endif
#endif
- stp->code = NULL; /* Prevent code from being freed. */
+ stp->hdr = NULL; /* Prevent code from being freed. */
+ stp->codev = NULL;
*modp = stp->module;
/*
@@ -803,7 +911,6 @@ erts_finish_loading(Binary* magic, Process* c_p,
retval = am_on_load;
}
- load_error:
free_loader_state(magic);
return retval;
}
@@ -816,7 +923,7 @@ erts_alloc_loader_state(void)
magic = erts_create_magic_binary(sizeof(LoaderState),
loader_state_dtor);
- erts_refc_inc(&magic->refc, 1);
+ erts_refc_inc(&magic->intern.refc, 1);
stp = ERTS_MAGIC_BIN_DATA(magic);
stp->bin = NULL;
stp->function = THE_NON_VALUE; /* Function not known yet */
@@ -824,7 +931,8 @@ erts_alloc_loader_state(void)
stp->specific_op = -1;
stp->genop = NULL;
stp->atom = NULL;
- stp->code = NULL;
+ stp->hdr = NULL;
+ stp->codev = NULL;
stp->labels = NULL;
stp->import = NULL;
stp->export = NULL;
@@ -860,29 +968,73 @@ erts_module_for_prepared_code(Binary* magic)
LoaderState* stp;
if (ERTS_MAGIC_BIN_DESTRUCTOR(magic) != loader_state_dtor) {
+#ifdef HIPE
+ HipeLoaderState *hipe_stp;
+ if ((hipe_stp = hipe_get_loader_state(magic))
+ && hipe_stp->text_segment != 0) {
+ return hipe_stp->module;
+ }
+#endif
return NIL;
}
stp = ERTS_MAGIC_BIN_DATA(magic);
- if (stp->code != 0) {
+ if (stp->hdr != 0) {
return stp->module;
} else {
return NIL;
}
}
+/*
+ * Return a non-zero value if the module has an on_load function,
+ * or 0 if it does not.
+ */
+
+Eterm
+erts_has_code_on_load(Binary* magic)
+{
+ LoaderState* stp;
+
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(magic) != loader_state_dtor) {
+ return NIL;
+ }
+ stp = ERTS_MAGIC_BIN_DATA(magic);
+ return stp->on_load ? am_true : am_false;
+}
+
static void
free_loader_state(Binary* magic)
{
loader_state_dtor(magic);
- if (erts_refc_dectest(&magic->refc, 0) == 0) {
- erts_bin_free(magic);
- }
+ erts_bin_release(magic);
+}
+
+static ErlHeapFragment* new_literal_fragment(Uint size)
+{
+ ErlHeapFragment* bp;
+ bp = (ErlHeapFragment*) ERTS_HEAP_ALLOC(ERTS_ALC_T_PREPARED_CODE,
+ ERTS_HEAP_FRAG_SIZE(size));
+ ERTS_INIT_HEAP_FRAG(bp, size, size);
+ return bp;
+}
+
+static void free_literal_fragment(ErlHeapFragment* bp)
+{
+ ASSERT(bp != NULL);
+ do {
+ ErlHeapFragment* next_bp = bp->next;
+
+ erts_cleanup_offheap(&bp->off_heap);
+ ERTS_HEAP_FREE(ERTS_ALC_T_PREPARED_CODE, (void *) bp,
+ ERTS_HEAP_FRAG_SIZE(bp->size));
+ bp = next_bp;
+ }while (bp != NULL);
}
/*
* This destructor function can safely be called multiple times.
*/
-static void
+static int
loader_state_dtor(Binary* magic)
{
LoaderState* stp = ERTS_MAGIC_BIN_DATA(magic);
@@ -891,11 +1043,20 @@ loader_state_dtor(Binary* magic)
driver_free_binary(stp->bin);
stp->bin = 0;
}
- if (stp->code != 0) {
- erts_free(ERTS_ALC_T_CODE, stp->code);
- stp->code = 0;
+ if (stp->hdr != 0) {
+ if (stp->hdr->literal_area) {
+ erts_release_literal_area(stp->hdr->literal_area);
+ stp->hdr->literal_area = NULL;
+ }
+ erts_free(ERTS_ALC_T_CODE, stp->hdr);
+ stp->hdr = 0;
+ stp->codev = 0;
}
if (stp->labels != 0) {
+ Uint num;
+ for (num = 0; num < stp->num_labels; num++) {
+ erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->labels[num].patches);
+ }
erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->labels);
stp->labels = 0;
}
@@ -918,10 +1079,9 @@ loader_state_dtor(Binary* magic)
if (stp->literals != 0) {
int i;
for (i = 0; i < stp->num_literals; i++) {
- if (stp->literals[i].heap != 0) {
- erts_free(ERTS_ALC_T_PREPARED_CODE,
- (void *) stp->literals[i].heap);
- stp->literals[i].heap = 0;
+ if (stp->literals[i].heap_frags != 0) {
+ free_literal_fragment(stp->literals[i].heap_frags);
+ stp->literals[i].heap_frags = 0;
}
}
erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->literals);
@@ -963,12 +1123,15 @@ loader_state_dtor(Binary* magic)
*/
ASSERT(stp->genop_blocks == 0);
+ return 1;
}
+#ifdef HIPE
static Eterm
-insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm group_leader, Eterm module, BeamInstr* code,
- Uint size)
+stub_insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
+ Eterm group_leader, Eterm module,
+ BeamCodeHeader* code_hdr, Uint size,
+ HipeModule *hipe_code)
{
Module* modp;
Eterm retval;
@@ -988,17 +1151,21 @@ insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
erts_total_code_size += size;
modp = erts_put_module(module);
- modp->curr.code = code;
+ modp->curr.code_hdr = code_hdr;
modp->curr.code_length = size;
modp->curr.catches = BEAM_CATCHES_NIL; /* Will be filled in later. */
+ DBG_TRACE_MFA(make_atom(modp->module), 0, 0, "insert_new_code "
+ "first_hipe_ref = %p", hipe_code->first_hipe_ref);
+ modp->curr.hipe_code = hipe_code;
/*
* Update ranges (used for finding a function from a PC value).
*/
- erts_update_ranges(code, size);
+ erts_update_ranges((BeamInstr*)modp->curr.code_hdr, size);
return NIL;
}
+#endif
static int
init_iff_file(LoaderState* stp, byte* code, Uint size)
@@ -1072,7 +1239,7 @@ init_iff_file(LoaderState* stp, byte* code, Uint size)
* Scan the IFF file. The header should have been verified by init_iff_file().
*/
static int
-scan_iff_file(LoaderState* stp, Uint* chunk_types, Uint num_types, Uint num_mandatory)
+scan_iff_file(LoaderState* stp, Uint* chunk_types, Uint num_types)
{
Uint count;
Uint id;
@@ -1151,7 +1318,16 @@ verify_chunks(LoaderState* stp)
MD5_CTX context;
MD5Init(&context);
- for (i = 0; i < NUM_MANDATORY; i++) {
+
+ if (stp->chunks[UTF8_ATOM_CHUNK].start != NULL) {
+ MD5Update(&context, stp->chunks[UTF8_ATOM_CHUNK].start, stp->chunks[UTF8_ATOM_CHUNK].size);
+ } else if (stp->chunks[ATOM_CHUNK].start != NULL) {
+ MD5Update(&context, stp->chunks[ATOM_CHUNK].start, stp->chunks[ATOM_CHUNK].size);
+ } else {
+ LoadError0(stp, "mandatory chunk of type 'Atom' or 'AtU8' not found\n");
+ }
+
+ for (i = MIN_MANDATORY; i < MAX_MANDATORY; i++) {
if (stp->chunks[i].start != NULL) {
MD5Update(&context, stp->chunks[i].start, stp->chunks[i].size);
} else {
@@ -1212,9 +1388,9 @@ verify_chunks(LoaderState* stp)
}
static int
-load_atom_table(LoaderState* stp)
+load_atom_table(LoaderState* stp, ErtsAtomEncoding enc)
{
- int i;
+ unsigned int i;
GetInt(stp, 4, stp->num_atoms);
stp->num_atoms++;
@@ -1231,7 +1407,7 @@ load_atom_table(LoaderState* stp)
GetByte(stp, n);
GetString(stp, atom, n);
- stp->atom[i] = erts_atom_put(atom, n, ERTS_ATOM_ENC_LATIN1, 1);
+ stp->atom[i] = erts_atom_put(atom, n, enc, 1);
}
/*
@@ -1259,13 +1435,13 @@ load_atom_table(LoaderState* stp)
static int
load_import_table(LoaderState* stp)
{
- int i;
+ unsigned int i;
GetInt(stp, 4, stp->num_imports);
stp->import = erts_alloc(ERTS_ALC_T_PREPARED_CODE,
stp->num_imports * sizeof(ImportEntry));
for (i = 0; i < stp->num_imports; i++) {
- int n;
+ unsigned int n;
Eterm mod;
Eterm func;
Uint arity;
@@ -1273,17 +1449,17 @@ load_import_table(LoaderState* stp)
GetInt(stp, 4, n);
if (n >= stp->num_atoms) {
- LoadError2(stp, "import entry %d: invalid atom number %d", i, n);
+ LoadError2(stp, "import entry %u: invalid atom number %u", i, n);
}
mod = stp->import[i].module = stp->atom[n];
GetInt(stp, 4, n);
if (n >= stp->num_atoms) {
- LoadError2(stp, "import entry %d: invalid atom number %d", i, n);
+ LoadError2(stp, "import entry %u: invalid atom number %u", i, n);
}
func = stp->import[i].function = stp->atom[n];
GetInt(stp, 4, arity);
if (arity > MAX_REG) {
- LoadError2(stp, "import entry %d: invalid arity %d", i, arity);
+ LoadError2(stp, "import entry %u: invalid arity %d", i, arity);
}
stp->import[i].arity = arity;
stp->import[i].patches = 0;
@@ -1294,8 +1470,8 @@ load_import_table(LoaderState* stp)
* the BIF function.
*/
if ((e = erts_active_export_entry(mod, func, arity)) != NULL) {
- if (e->code[3] == (BeamInstr) em_apply_bif) {
- stp->import[i].bf = (BifFunction) e->code[4];
+ if (BeamIsOpCode(e->beam[0], op_apply_bif)) {
+ stp->import[i].bf = (BifFunction) e->beam[1];
if (func == am_load_nif && mod == am_erlang && arity == 2) {
stp->may_load_nif = 1;
}
@@ -1311,12 +1487,12 @@ load_import_table(LoaderState* stp)
static int
read_export_table(LoaderState* stp)
{
- int i;
+ unsigned int i;
BeamInstr* address;
GetInt(stp, 4, stp->num_exps);
if (stp->num_exps > stp->num_functions) {
- LoadError2(stp, "%d functions exported; only %d functions defined",
+ LoadError2(stp, "%u functions exported; only %u functions defined",
stp->num_exps, stp->num_functions);
}
stp->export
@@ -1334,18 +1510,18 @@ read_export_table(LoaderState* stp)
stp->export[i].function = func;
GetInt(stp, 4, arity);
if (arity > MAX_REG) {
- LoadError2(stp, "export table entry %d: absurdly high arity %d", i, arity);
+ LoadError2(stp, "export table entry %u: absurdly high arity %u", i, arity);
}
stp->export[i].arity = arity;
GetInt(stp, 4, n);
if (n >= stp->num_labels) {
- LoadError3(stp, "export table entry %d: invalid label %d (highest defined label is %d)", i, n, stp->num_labels);
+ LoadError3(stp, "export table entry %u: invalid label %u (highest defined label is %u)", i, n, stp->num_labels);
}
value = stp->labels[n].value;
if (value == 0) {
- LoadError2(stp, "export table entry %d: label %d not resolved", i, n);
+ LoadError2(stp, "export table entry %u: label %u not resolved", i, n);
}
- stp->export[i].address = address = stp->code + value;
+ stp->export[i].address = address = stp->codev + value;
/*
* Find out if there is a BIF with the same name.
@@ -1364,7 +1540,7 @@ read_export_table(LoaderState* stp)
* any other functions that walk through all local functions.
*/
- if (stp->labels[n].patches) {
+ if (stp->labels[n].num_patches > 0) {
LoadError3(stp, "there are local calls to the stub for "
"the BIF %T:%T/%d",
stp->module, func, arity);
@@ -1388,7 +1564,7 @@ is_bif(Eterm mod, Eterm func, unsigned arity)
if (e == NULL) {
return 0;
}
- if (e->code[3] != (BeamInstr) em_apply_bif) {
+ if (! BeamIsOpCode(e->beam[0], op_apply_bif)) {
return 0;
}
if (mod == am_erlang && func == am_apply && arity == 3) {
@@ -1404,7 +1580,7 @@ is_bif(Eterm mod, Eterm func, unsigned arity)
static int
read_lambda_table(LoaderState* stp)
{
- int i;
+ unsigned int i;
GetInt(stp, 4, stp->num_lambdas);
if (stp->num_lambdas > stp->lambdas_allocated) {
@@ -1424,12 +1600,12 @@ read_lambda_table(LoaderState* stp)
GetAtom(stp, n, stp->lambdas[i].function);
GetInt(stp, 4, arity);
if (arity > MAX_REG) {
- LoadError2(stp, "lambda entry %d: absurdly high arity %d", i, arity);
+ LoadError2(stp, "lambda entry %u: absurdly high arity %u", i, arity);
}
stp->lambdas[i].arity = arity;
GetInt(stp, 4, n);
if (n >= stp->num_labels) {
- LoadError3(stp, "lambda entry %d: invalid label %d (highest defined label is %d)",
+ LoadError3(stp, "lambda entry %u: invalid label %u (highest defined label is %u)",
i, n, stp->num_labels);
}
stp->lambdas[i].label = n;
@@ -1446,10 +1622,11 @@ read_lambda_table(LoaderState* stp)
return 0;
}
+
static int
read_literal_table(LoaderState* stp)
{
- int i;
+ unsigned int i;
uLongf uncompressed_sz;
byte* uncompressed = 0;
@@ -1467,36 +1644,46 @@ read_literal_table(LoaderState* stp)
stp->allocated_literals = stp->num_literals;
for (i = 0; i < stp->num_literals; i++) {
- stp->literals[i].heap = 0;
+ stp->literals[i].heap_frags = 0;
}
for (i = 0; i < stp->num_literals; i++) {
- int sz;
+ Uint sz;
Sint heap_size;
byte* p;
Eterm val;
- Eterm* hp;
+ ErtsHeapFactory factory;
GetInt(stp, 4, sz); /* Size of external term format. */
GetString(stp, p, sz);
if ((heap_size = erts_decode_ext_size(p, sz)) < 0) {
- LoadError1(stp, "literal %d: bad external format", i);
+ LoadError1(stp, "literal %u: bad external format", i);
}
- hp = stp->literals[i].heap = erts_alloc(ERTS_ALC_T_PREPARED_CODE,
- heap_size*sizeof(Eterm));
- stp->literals[i].off_heap.first = 0;
- stp->literals[i].off_heap.overhead = 0;
- val = erts_decode_ext(&hp, &stp->literals[i].off_heap, &p);
- stp->literals[i].heap_size = hp - stp->literals[i].heap;
- if (stp->literals[i].heap_size > heap_size) {
- erl_exit(1, "overrun by %d word(s) for literal heap, term %d",
- stp->literals[i].heap_size - heap_size, i);
- }
- if (is_non_value(val)) {
- LoadError1(stp, "literal %d: bad external format", i);
- }
- stp->literals[i].term = val;
- stp->total_literal_size += stp->literals[i].heap_size;
+
+ if (heap_size > 0) {
+ erts_factory_heap_frag_init(&factory,
+ new_literal_fragment(heap_size));
+ factory.alloc_type = ERTS_ALC_T_PREPARED_CODE;
+ val = erts_decode_ext(&factory, &p, 0);
+
+ if (is_non_value(val)) {
+ LoadError1(stp, "literal %u: bad external format", i);
+ }
+ erts_factory_close(&factory);
+ stp->literals[i].heap_frags = factory.heap_frags;
+ stp->total_literal_size += erts_used_frag_sz(factory.heap_frags);
+ }
+ else {
+ erts_factory_dummy_init(&factory);
+ val = erts_decode_ext(&factory, &p, 0);
+ if (is_non_value(val)) {
+ LoadError1(stp, "literal %u: bad external format", i);
+ }
+ ASSERT(is_immed(val));
+ stp->literals[i].heap_frags = NULL;
+ }
+ stp->literals[i].term = val;
+
}
erts_free(ERTS_ALC_T_TMP, uncompressed);
return 1;
@@ -1513,9 +1700,9 @@ read_line_table(LoaderState* stp)
{
unsigned version;
ERTS_DECLARE_DUMMY(unsigned flags);
- int num_line_items;
+ unsigned int num_line_items;
BeamInstr* lp;
- int i;
+ unsigned int i;
BeamInstr fname_index;
BeamInstr tag;
@@ -1594,7 +1781,7 @@ read_line_table(LoaderState* stp)
}
} else if (tag == TAG_a) {
if (val > stp->num_fnames) {
- LoadError2(stp, "file index overflow (%d/%d)",
+ LoadError2(stp, "file index overflow (%u/%u)",
val, stp->num_fnames);
}
fname_index = val;
@@ -1630,9 +1817,9 @@ read_line_table(LoaderState* stp)
stp->num_line_instrs *
sizeof(LineInstr));
stp->current_li = 0;
- stp->func_line = (int *) erts_alloc(ERTS_ALC_T_PREPARED_CODE,
- stp->num_functions *
- sizeof(int));
+ stp->func_line = (unsigned int *) erts_alloc(ERTS_ALC_T_PREPARED_CODE,
+ stp->num_functions *
+ sizeof(int));
return 1;
@@ -1656,6 +1843,10 @@ read_code_header(LoaderState* stp)
*/
GetInt(stp, 4, head_size);
+ if (head_size > stp->file_left) {
+ LoadError2(stp, "invalid code header size %u; bytes left %u",
+ head_size, stp->file_left);
+ }
stp->code_start = stp->file_p + head_size;
stp->code_size = stp->file_left - head_size;
stp->file_left = head_size;
@@ -1695,11 +1886,7 @@ read_code_header(LoaderState* stp)
stp->labels = (Label *) erts_alloc(ERTS_ALC_T_PREPARED_CODE,
stp->num_labels * sizeof(Label));
for (i = 0; i < stp->num_labels; i++) {
- stp->labels[i].value = 0;
- stp->labels[i].patches = 0;
-#ifdef ERTS_SMP
- stp->labels[i].looprec_targeted = 0;
-#endif
+ init_label(&stp->labels[i]);
}
stp->catches = 0;
@@ -1715,29 +1902,61 @@ read_code_header(LoaderState* stp)
} else {}
#define CodeNeed(w) do { \
- ASSERT(ci <= code_buffer_size); \
- if (code_buffer_size < ci+(w)) { \
- code_buffer_size = 2*ci+(w); \
- stp->code = code = \
- (BeamInstr *) erts_realloc(ERTS_ALC_T_CODE, \
- (void *) code, \
- code_buffer_size * sizeof(BeamInstr)); \
+ ASSERT(ci <= codev_size); \
+ if (codev_size < ci+(w)) { \
+ codev_size = 2*ci+(w); \
+ stp->hdr = (BeamCodeHeader*) erts_realloc(ERTS_ALC_T_CODE, \
+ (void *) stp->hdr, \
+ (offsetof(BeamCodeHeader,functions) \
+ + codev_size * sizeof(BeamInstr))); \
+ code = stp->codev = (BeamInstr*) &stp->hdr->functions; \
} \
} while (0)
#define TermWords(t) (((t) / (sizeof(BeamInstr)/sizeof(Eterm))) + !!((t) % (sizeof(BeamInstr)/sizeof(Eterm))))
+static void init_label(Label* lp)
+{
+ lp->value = 0;
+ lp->looprec_targeted = 0;
+ lp->num_patches = 0;
+ lp->num_allocated = 4;
+ lp->patches = erts_alloc(ERTS_ALC_T_PREPARED_CODE,
+ lp->num_allocated * sizeof(LabelPatch));
+}
+
+static void
+register_label_patch(LoaderState* stp, Uint label, Uint ci, Uint offset)
+{
+ Label* lp;
+
+ ASSERT(label < stp->num_labels);
+ lp = &stp->labels[label];
+ if (lp->num_allocated <= lp->num_patches) {
+ lp->num_allocated *= 2;
+ lp->patches = erts_realloc(ERTS_ALC_T_PREPARED_CODE,
+ (void *) lp->patches,
+ lp->num_allocated * sizeof(LabelPatch));
+ }
+ lp->patches[lp->num_patches].pos = ci;
+ lp->patches[lp->num_patches].offset = offset;
+ lp->patches[lp->num_patches].packed = 0;
+ lp->num_patches++;
+ stp->codev[ci] = label;
+}
+
static int
load_code(LoaderState* stp)
{
int i;
- int ci;
- int last_func_start = 0; /* Needed by nif loading and line instructions */
+ Uint ci;
+ Uint last_instr_start; /* Needed for relative jumps */
+ Uint last_func_start = 0; /* Needed by nif loading and line instructions */
char* sign;
int arg; /* Number of current argument. */
int num_specific; /* Number of specific ops for current. */
BeamInstr* code;
- int code_buffer_size;
+ int codev_size;
int specific;
Uint last_label = 0; /* Number of last label. */
Uint function_number = 0;
@@ -1745,32 +1964,35 @@ load_code(LoaderState* stp)
GenOp** last_op_next = NULL;
int arity;
int retval = 1;
+#if defined(BEAM_WIDE_SHIFT)
+ int num_trailing_f; /* Number of extra 'f' arguments in a list */
+#endif
/*
* The size of the loaded func_info instruction is needed
* by both the nif functionality and line instructions.
*/
enum {
- FUNC_INFO_SZ = 5
+ FUNC_INFO_SZ = sizeof(ErtsCodeInfo) / sizeof(Eterm)
};
- code = stp->code;
- code_buffer_size = stp->code_buffer_size;
+ code = stp->codev;
+ codev_size = stp->codev_size;
ci = stp->ci;
for (;;) {
- int new_op;
+ unsigned int new_op;
GenOp* tmp_op;
- ASSERT(ci <= code_buffer_size);
+ ASSERT(ci <= codev_size);
get_next_instr:
GetByte(stp, new_op);
if (new_op >= NUM_GENERIC_OPS) {
- LoadError1(stp, "invalid opcode %d", new_op);
+ LoadError1(stp, "invalid opcode %u", new_op);
}
if (gen_opc[new_op].name[0] == '\0') {
- LoadError1(stp, "invalid opcode %d", new_op);
+ LoadError1(stp, "invalid opcode %u", new_op);
}
@@ -1808,9 +2030,7 @@ load_code(LoaderState* stp)
case TAG_o:
break;
case TAG_x:
- if (last_op->a[arg].val == 0) {
- last_op->a[arg].type = TAG_r;
- } else if (last_op->a[arg].val >= MAX_REG) {
+ if (last_op->a[arg].val >= MAX_REG) {
LoadError1(stp, "invalid x register number: %u",
last_op->a[arg].val);
}
@@ -1852,31 +2072,10 @@ load_code(LoaderState* stp)
case 0:
/* Floating point number.
* Not generated by the compiler in R16B and later.
+ * (The literal pool is used instead.)
*/
- {
- Eterm* hp;
-/* XXX:PaN - Halfword should use ARCH_64 variant instead */
-#if !defined(ARCH_64) || HALFWORD_HEAP
- Uint high, low;
-# endif
- last_op->a[arg].val = new_literal(stp, &hp,
- FLOAT_SIZE_OBJECT);
- hp[0] = HEADER_FLONUM;
- last_op->a[arg].type = TAG_q;
-#if defined(ARCH_64) && !HALFWORD_HEAP
- GetInt(stp, 8, hp[1]);
-# else
- GetInt(stp, 4, high);
- GetInt(stp, 4, low);
- if (must_swap_floats) {
- Uint t = high;
- high = low;
- low = t;
- }
- hp[1] = high;
- hp[2] = low;
-# endif
- }
+ LoadError0(stp, "please re-compile this module with an "
+ ERLANG_OTP_RELEASE " compiler");
break;
case 1: /* List. */
if (arg+1 != arity) {
@@ -1959,42 +2158,47 @@ load_code(LoaderState* stp)
ASSERT(arity == last_op->arity);
do_transform:
- if (stp->genop == NULL) {
- last_op_next = NULL;
- goto get_next_instr;
- }
-
+ ASSERT(stp->genop != NULL);
if (gen_opc[stp->genop->op].transform != -1) {
- int need;
- tmp_op = stp->genop;
-
- for (need = gen_opc[stp->genop->op].min_window-1; need > 0; need--) {
- if (tmp_op == NULL) {
- goto get_next_instr;
- }
- tmp_op = tmp_op->next;
+ if (stp->genop->next == NULL) {
+ /*
+ * Simple heuristic: Most transformations requires
+ * at least two instructions, so make sure that
+ * there are. That will reduce the number of
+ * TE_SHORT_WINDOWs.
+ */
+ goto get_next_instr;
}
switch (transform_engine(stp)) {
case TE_FAIL:
- last_op_next = NULL;
- last_op = NULL;
+ /*
+ * No transformation found. stp->genop != NULL and
+ * last_op_next is still valid. Go ahead and load
+ * the instruction.
+ */
break;
case TE_OK:
+ /*
+ * Some transformation was applied. last_op_next is
+ * no longer valid and stp->genop may be NULL.
+ * Try to transform again.
+ */
+ if (stp->genop == NULL) {
+ last_op_next = &stp->genop;
+ goto get_next_instr;
+ }
last_op_next = NULL;
- last_op = NULL;
goto do_transform;
case TE_SHORT_WINDOW:
- last_op_next = NULL;
- last_op = NULL;
+ /*
+ * No transformation applied. stp->genop != NULL and
+ * last_op_next is still valid. Fetch a new instruction
+ * before trying the transformation again.
+ */
goto get_next_instr;
}
}
- if (stp->genop == NULL) {
- last_op_next = NULL;
- goto get_next_instr;
- }
-
/*
* From the collected generic instruction, find the specific
* instruction.
@@ -2017,7 +2221,42 @@ load_code(LoaderState* stp)
if (((opc[specific].mask[0] & mask[0]) == mask[0]) &&
((opc[specific].mask[1] & mask[1]) == mask[1]) &&
((opc[specific].mask[2] & mask[2]) == mask[2])) {
- break;
+
+ if (!opc[specific].involves_r) {
+ break; /* No complications - match */
+ }
+
+ /*
+ * The specific operation uses the 'r' operand,
+ * which is shorthand for x(0). Now things
+ * get complicated. First we must check whether
+ * all operands that should be of type 'r' use
+ * x(0) (as opposed to some other X register).
+ */
+ for (arg = 0; arg < arity; arg++) {
+ if (opc[specific].involves_r & (1 << arg) &&
+ tmp_op->a[arg].type == TAG_x) {
+ if (tmp_op->a[arg].val != 0) {
+ break; /* Other X register than 0 */
+ }
+ }
+ }
+
+ if (arg == arity) {
+ /*
+ * All 'r' operands use x(0) in the generic
+ * operation. That means a match. Now we
+ * will need to rewrite the generic instruction
+ * to actually use 'r' instead of 'x(0)'.
+ */
+ for (arg = 0; arg < arity; arg++) {
+ if (opc[specific].involves_r & (1 << arg) &&
+ tmp_op->a[arg].type == TAG_x) {
+ tmp_op->a[arg].type = TAG_r;
+ }
+ }
+ break; /* Match */
+ }
}
specific++;
}
@@ -2071,7 +2310,8 @@ load_code(LoaderState* stp)
stp->specific_op = specific;
CodeNeed(opc[stp->specific_op].sz+16); /* Extra margin for packing */
- code[ci++] = BeamOpCode(stp->specific_op);
+ last_instr_start = ci + opc[stp->specific_op].adjust;
+ code[ci++] = BeamOpCodeAddr(stp->specific_op);
}
/*
@@ -2128,14 +2368,11 @@ load_code(LoaderState* stp)
break;
case 's': /* Any source (tagged constant or register) */
switch (tag) {
- case TAG_r:
- code[ci++] = make_rreg();
- break;
case TAG_x:
- code[ci++] = make_xreg(tmp_op->a[arg].val);
+ code[ci++] = make_loader_x_reg(tmp_op->a[arg].val);
break;
case TAG_y:
- code[ci++] = make_yreg(tmp_op->a[arg].val);
+ code[ci++] = make_loader_y_reg(tmp_op->a[arg].val);
break;
case TAG_i:
code[ci++] = (BeamInstr) make_small((Uint)tmp_op->a[arg].val);
@@ -2146,22 +2383,24 @@ load_code(LoaderState* stp)
case TAG_n:
code[ci++] = NIL;
break;
+ case TAG_q:
+ new_literal_patch(stp, ci);
+ code[ci++] = tmp_op->a[arg].val;
+ break;
default:
LoadError1(stp, "bad tag %d for general source",
tmp_op->a[arg].type);
break;
}
break;
- case 'd': /* Destination (x(0), x(N), y(N) */
+ case 'd': /* Destination (x(N), y(N) */
+ case 'S': /* Source (x(N), y(N)) */
switch (tag) {
- case TAG_r:
- code[ci++] = make_rreg();
- break;
case TAG_x:
- code[ci++] = make_xreg(tmp_op->a[arg].val);
+ code[ci++] = tmp_op->a[arg].val * sizeof(Eterm);
break;
case TAG_y:
- code[ci++] = make_yreg(tmp_op->a[arg].val);
+ code[ci++] = tmp_op->a[arg].val * sizeof(Eterm) + 1;
break;
default:
LoadError1(stp, "bad tag %d for destination",
@@ -2169,11 +2408,29 @@ load_code(LoaderState* stp)
break;
}
break;
- case 'I': /* Untagged integer (or pointer). */
- VerifyTag(stp, tag, TAG_u);
- code[ci++] = tmp_op->a[arg].val;
- break;
- case 't': /* Small untagged integer -- can be packed. */
+ case 't': /* Small untagged integer (16 bits) -- can be packed. */
+ case 'I': /* Untagged integer (32 bits) -- can be packed. */
+ case 'W': /* Untagged integer or pointer (machine word). */
+#ifdef DEBUG
+ switch (*sign) {
+ case 't':
+ if (tmp_op->a[arg].val >> 16 != 0) {
+ load_printf(__LINE__, stp, "value %lu of type 't' does not fit in 16 bits",
+ tmp_op->a[arg].val);
+ ASSERT(0);
+ }
+ break;
+#ifdef ARCH_64
+ case 'I':
+ if (tmp_op->a[arg].val >> 32 != 0) {
+ load_printf(__LINE__, stp, "value %lu of type 'I' does not fit in 32 bits",
+ tmp_op->a[arg].val);
+ ASSERT(0);
+ }
+ break;
+#endif
+ }
+#endif
VerifyTag(stp, tag, TAG_u);
code[ci++] = tmp_op->a[arg].val;
break;
@@ -2183,16 +2440,14 @@ load_code(LoaderState* stp)
break;
case 'f': /* Destination label */
VerifyTag(stp, tag_to_letter[tag], *sign);
- code[ci] = stp->labels[tmp_op->a[arg].val].patches;
- stp->labels[tmp_op->a[arg].val].patches = ci;
+ register_label_patch(stp, tmp_op->a[arg].val, ci, -last_instr_start);
ci++;
break;
case 'j': /* 'f' or 'p' */
if (tag == TAG_p) {
code[ci] = 0;
} else if (tag == TAG_f) {
- code[ci] = stp->labels[tmp_op->a[arg].val].patches;
- stp->labels[tmp_op->a[arg].val].patches = ci;
+ register_label_patch(stp, tmp_op->a[arg].val, ci, -last_instr_start);
} else {
LoadError3(stp, "bad tag %d; expected %d or %d",
tag, TAG_f, TAG_p);
@@ -2205,14 +2460,13 @@ load_code(LoaderState* stp)
VerifyTag(stp, tag, TAG_u);
last_label = tmp_op->a[arg].val;
if (!(0 < last_label && last_label < stp->num_labels)) {
- LoadError2(stp, "invalid label num %d (0 < label < %d)",
+ LoadError2(stp, "invalid label num %u (0 < label < %u)",
tmp_op->a[arg].val, stp->num_labels);
}
if (stp->labels[last_label].value != 0) {
LoadError1(stp, "label %d defined more than once", last_label);
}
stp->labels[last_label].value = ci;
- ASSERT(stp->labels[last_label].patches < ci);
break;
case 'e': /* Export entry */
VerifyTag(stp, tag, TAG_u);
@@ -2258,39 +2512,168 @@ load_code(LoaderState* stp)
* The packing engine.
*/
if (opc[stp->specific_op].pack[0]) {
- char* prog; /* Program for packing engine. */
- BeamInstr stack[8]; /* Stack. */
- BeamInstr* sp = stack; /* Points to next free position. */
- BeamInstr packed = 0; /* Accumulator for packed operations. */
+ char* prog; /* Program for packing engine. */
+ struct pack_stack {
+ BeamInstr instr;
+ Uint* patch_pos;
+ } stack[8]; /* Stack. */
+ struct pack_stack* sp = stack; /* Points to next free position. */
+ BeamInstr packed = 0; /* Accumulator for packed operations. */
+ LabelPatch* packed_label = 0;
for (prog = opc[stp->specific_op].pack; *prog; prog++) {
switch (*prog) {
- case 'g': /* Get instruction; push on stack. */
- *sp++ = code[--ci];
- break;
- case 'i': /* Initialize packing accumulator. */
- packed = code[--ci];
+ case 'g': /* Get operand and push on stack. */
+ ci--;
+ sp->instr = code[ci];
+ sp->patch_pos = 0;
+ sp++;
+ break;
+ case 'f': /* Get possible 'f' operand and push on stack. */
+ {
+ Uint w = code[--ci];
+ sp->instr = w;
+ sp->patch_pos = 0;
+
+ if (w != 0) {
+ LabelPatch* lbl_p;
+ int num_patches;
+ int patch;
+
+ ASSERT(w < stp->num_labels);
+ lbl_p = stp->labels[w].patches;
+ num_patches = stp->labels[w].num_patches;
+ for (patch = num_patches - 1; patch >= 0; patch--) {
+ if (lbl_p[patch].pos == ci) {
+ sp->patch_pos = &lbl_p[patch].pos;
+ break;
+ }
+ }
+ ASSERT(sp->patch_pos);
+ }
+ sp++;
+ }
+ break;
+ case 'q': /* Get possible 'q' operand and push on stack. */
+ {
+ LiteralPatch* lp;
+
+ ci--;
+ sp->instr = code[ci];
+ sp->patch_pos = 0;
+
+ for (lp = stp->literal_patches;
+ lp && lp->pos > ci-MAX_OPARGS;
+ lp = lp->next) {
+ if (lp->pos == ci) {
+ sp->patch_pos = &lp->pos;
+ break;
+ }
+ }
+ sp++;
+ }
+ break;
+#ifdef ARCH_64
+ case '1': /* Tightest shift (always 10 bits) */
+ ci--;
+ ASSERT((code[ci] & ~0x1FF8ull) == 0); /* Fits in 10 bits */
+ packed = (packed << BEAM_TIGHTEST_SHIFT);
+ packed |= code[ci] >> 3;
+ if (packed_label) {
+ packed_label->packed++;
+ }
break;
- case '0': /* Tight shift */
+#endif
+ case '2': /* Tight shift (10 or 16 bits) */
packed = (packed << BEAM_TIGHT_SHIFT) | code[--ci];
+ if (packed_label) {
+ packed_label->packed++;
+ }
break;
- case '6': /* Shift 16 steps */
+ case '3': /* Loose shift (16 bits) */
packed = (packed << BEAM_LOOSE_SHIFT) | code[--ci];
+ if (packed_label) {
+ packed_label->packed++;
+ }
break;
#ifdef ARCH_64
- case 'w': /* Shift 32 steps */
- packed = (packed << BEAM_WIDE_SHIFT) | code[--ci];
- break;
+ case '4': /* Wide shift (32 bits) */
+ {
+ Uint w = code[--ci];
+
+ if (packed_label) {
+ packed_label->packed++;
+ }
+
+ /*
+ * 'w' can handle both labels ('f' and 'j'), as well
+ * as 'I'. Test whether this is a label.
+ */
+
+ if (w < stp->num_labels) {
+ /*
+ * Probably a label. Look for patch pointing to this
+ * position.
+ */
+ LabelPatch* lp = stp->labels[w].patches;
+ int num_patches = stp->labels[w].num_patches;
+ int patch;
+ for (patch = num_patches - 1; patch >= 0; patch--) {
+ if (lp[patch].pos == ci) {
+ lp[patch].packed = 1;
+ packed_label = &lp[patch];
+ break;
+ }
+ }
+ }
+ packed = (packed << BEAM_WIDE_SHIFT) |
+ (code[ci] & BEAM_WIDE_MASK);
+ }
+ break;
#endif
case 'p': /* Put instruction (from stack). */
- code[ci++] = *--sp;
+ --sp;
+ code[ci] = sp->instr;
+ if (sp->patch_pos) {
+ *sp->patch_pos = ci;
+ }
+ ci++;
break;
- case 'P': /* Put packed operands. */
- *sp++ = packed;
+ case 'P': /* Put packed operands (on the stack). */
+ sp->instr = packed;
+ sp->patch_pos = 0;
+ if (packed_label) {
+ sp->patch_pos = &packed_label->pos;
+ packed_label = 0;
+ }
+ sp++;
packed = 0;
break;
+#if defined(ARCH_64) && defined(CODE_MODEL_SMALL)
+ case '#': /* -1 */
+ case '$': /* -2 */
+ case '%': /* -3 */
+ case '&': /* -4 */
+ case '\'': /* -5 */
+ case '(': /* -6 */
+ /* Pack accumulator contents into instruction word. */
+ {
+ Sint pos = ci - (*prog - '#' + 1);
+ /* Are the high 32 bits of the instruction word zero? */
+ ASSERT((code[pos] & ~((1ull << BEAM_WIDE_SHIFT)-1)) == 0);
+ code[pos] |= packed << BEAM_WIDE_SHIFT;
+ if (packed_label) {
+ ASSERT(packed_label->packed == 1);
+ packed_label->pos = pos;
+ packed_label->packed = 2;
+ packed_label = 0;
+ }
+ packed >>= BEAM_WIDE_SHIFT;
+ }
+ break;
+#endif
default:
- ASSERT(0);
+ erts_exit(ERTS_ERROR_EXIT, "beam_load: invalid packing op: %c\n", *prog);
}
}
ASSERT(sp == stack); /* Incorrect program? */
@@ -2300,7 +2683,17 @@ load_code(LoaderState* stp)
* Load any list arguments using the primitive tags.
*/
+#if defined(BEAM_WIDE_SHIFT)
+ num_trailing_f = 0;
+#endif
for ( ; arg < tmp_op->arity; arg++) {
+#if defined(BEAM_WIDE_SHIFT)
+ if (tmp_op->a[arg].type == TAG_f) {
+ num_trailing_f++;
+ } else {
+ num_trailing_f = 0;
+ }
+#endif
switch (tmp_op->a[arg].type) {
case TAG_i:
CodeNeed(1);
@@ -2314,24 +2707,16 @@ load_code(LoaderState* stp)
break;
case TAG_f:
CodeNeed(1);
- code[ci] = stp->labels[tmp_op->a[arg].val].patches;
- stp->labels[tmp_op->a[arg].val].patches = ci;
+ register_label_patch(stp, tmp_op->a[arg].val, ci, -last_instr_start);
ci++;
break;
- case TAG_r:
- CodeNeed(1);
- code[ci++] = (R_REG_DEF << _TAG_PRIMARY_SIZE) |
- TAG_PRIMARY_HEADER;
- break;
case TAG_x:
CodeNeed(1);
- code[ci++] = (tmp_op->a[arg].val << _TAG_IMMED1_SIZE) |
- (X_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER;
+ code[ci++] = make_loader_x_reg(tmp_op->a[arg].val);
break;
case TAG_y:
CodeNeed(1);
- code[ci++] = (tmp_op->a[arg].val << _TAG_IMMED1_SIZE) |
- (Y_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER;
+ code[ci++] = make_loader_y_reg(tmp_op->a[arg].val);
break;
case TAG_n:
CodeNeed(1);
@@ -2348,26 +2733,79 @@ load_code(LoaderState* stp)
}
}
+ /*
+ * If all the extra arguments were 'f' operands,
+ * and the wordsize is 64 bits, pack two 'f' operands
+ * into each word.
+ */
+
+#if defined(BEAM_WIDE_SHIFT)
+ if (num_trailing_f >= 1) {
+ Uint src_index = ci - num_trailing_f;
+ Uint src_limit = ci;
+ Uint dst_limit = src_index + (num_trailing_f+1)/2;
+
+ ci = src_index;
+ while (ci < dst_limit) {
+ Uint w[2];
+ BeamInstr packed = 0;
+ int wi;
+
+ w[0] = code[src_index];
+ if (src_index+1 < src_limit) {
+ w[1] = code[src_index+1];
+ } else {
+ w[1] = 0;
+ }
+ for (wi = 0; wi < 2; wi++) {
+ Uint lbl = w[wi];
+ LabelPatch* lp = stp->labels[lbl].patches;
+ int num_patches = stp->labels[lbl].num_patches;
+
+#if defined(WORDS_BIGENDIAN)
+ packed <<= BEAM_WIDE_SHIFT;
+ packed |= lbl & BEAM_WIDE_MASK;
+#else
+ packed >>= BEAM_WIDE_SHIFT;
+ packed |= lbl << BEAM_WIDE_SHIFT;
+#endif
+ while (num_patches-- > 0) {
+ if (lp->pos == src_index + wi) {
+ lp->pos = ci;
+#if defined(WORDS_BIGENDIAN)
+ lp->packed = 2 - wi;
+#else
+ lp->packed = wi + 1;
+#endif
+ break;
+ }
+ lp++;
+ }
+ }
+ code[ci++] = packed;
+ src_index += 2;
+ }
+ }
+#endif
+
/*
* Handle a few special cases.
*/
switch (stp->specific_op) {
case op_i_func_info_IaaI:
{
- Uint offset;
-
+ Sint offset;
if (function_number >= stp->num_functions) {
- LoadError1(stp, "too many functions in module (header said %d)",
+ LoadError1(stp, "too many functions in module (header said %u)",
stp->num_functions);
}
if (stp->may_load_nif) {
const int finfo_ix = ci - FUNC_INFO_SZ;
- enum { MIN_FUNC_SZ = 3 };
- if (finfo_ix - last_func_start < MIN_FUNC_SZ && last_func_start) {
+ if (finfo_ix - last_func_start < BEAM_NIF_MIN_FUNC_SZ && last_func_start) {
/* Must make room for call_nif op */
- int pad = MIN_FUNC_SZ - (finfo_ix - last_func_start);
- ASSERT(pad > 0 && pad < MIN_FUNC_SZ);
+ int pad = BEAM_NIF_MIN_FUNC_SZ - (finfo_ix - last_func_start);
+ ASSERT(pad > 0 && pad < BEAM_NIF_MIN_FUNC_SZ);
CodeNeed(pad);
sys_memmove(&code[finfo_ix+pad], &code[finfo_ix],
FUNC_INFO_SZ*sizeof(BeamInstr));
@@ -2392,18 +2830,20 @@ load_code(LoaderState* stp)
stp->function = code[ci-2];
stp->arity = code[ci-1];
+ /* When this assert is triggered, it is normally a sign that
+ the size of the ops.tab i_func_info instruction is not
+ the same as FUNC_INFO_SZ */
ASSERT(stp->labels[last_label].value == ci - FUNC_INFO_SZ);
- offset = MI_FUNCTIONS + function_number;
- code[offset] = stp->labels[last_label].patches;
- stp->labels[last_label].patches = offset;
+ offset = function_number;
+ register_label_patch(stp, last_label, offset, 0);
function_number++;
if (stp->arity > MAX_ARG) {
LoadError1(stp, "too many arguments: %d", stp->arity);
}
#ifdef DEBUG
- ASSERT(stp->labels[0].patches == 0); /* Should not be referenced. */
+ ASSERT(stp->labels[0].num_patches == 0); /* Should not be referenced. */
for (i = 1; i < stp->num_labels; i++) {
- ASSERT(stp->labels[i].patches < ci);
+ ASSERT(stp->labels[i].num_patches <= stp->labels[i].num_allocated);
}
#endif
}
@@ -2414,9 +2854,8 @@ load_code(LoaderState* stp)
/* Remember offset for the on_load function. */
stp->on_load = ci;
break;
- case op_bs_put_string_II:
- case op_i_bs_match_string_rfII:
- case op_i_bs_match_string_xfII:
+ case op_bs_put_string_WW:
+ case op_i_bs_match_string_xfWW:
new_string_patch(stp, ci-1);
break;
@@ -2433,14 +2872,14 @@ load_code(LoaderState* stp)
if (stp->line_item) {
BeamInstr item = code[ci-1];
BeamInstr loc;
- int li;
+ unsigned int li;
if (item >= stp->num_line_items) {
- LoadError2(stp, "line instruction index overflow (%d/%d)",
+ LoadError2(stp, "line instruction index overflow (%u/%u)",
item, stp->num_line_items);
}
li = stp->current_li;
if (li >= stp->num_line_instrs) {
- LoadError2(stp, "line instruction table overflow (%d/%d)",
+ LoadError2(stp, "line instruction table overflow (%u/%u)",
li, stp->num_line_instrs);
}
loc = stp->line_item[item];
@@ -2472,7 +2911,11 @@ load_code(LoaderState* stp)
* End of code found.
*/
case op_int_code_end:
- stp->code_buffer_size = code_buffer_size;
+ if (function_number != stp->num_functions) {
+ LoadError2(stp, "too few functions (%u) in module (header said %u)",
+ function_number, stp->num_functions);
+ }
+ stp->codev_size = codev_size;
stp->ci = ci;
stp->function = THE_NON_VALUE;
stp->genop = NULL;
@@ -2487,7 +2930,10 @@ load_code(LoaderState* stp)
{
GenOp* next = stp->genop->next;
FREE_GENOP(stp, stp->genop);
- stp->genop = next;
+ if ((stp->genop = next) == NULL) {
+ last_op_next = &stp->genop;
+ goto get_next_instr;
+ }
goto do_transform;
}
}
@@ -2520,6 +2966,12 @@ load_code(LoaderState* stp)
#define never(St) 0
+static int
+compiled_with_otp_20_or_higher(LoaderState* stp)
+{
+ return stp->otp_20_or_higher;
+}
+
/*
* Predicate that tests whether a jump table can be used.
*/
@@ -2631,12 +3083,18 @@ mixed_types(LoaderState* stp, GenOpArg Size, GenOpArg* Rest)
}
static int
-same_label(LoaderState* stp, GenOpArg Target, GenOpArg Label)
+is_killed_apply(LoaderState* stp, GenOpArg Reg, GenOpArg Live)
{
- return Target.type = TAG_f && Label.type == TAG_u &&
- Target.val == Label.val;
+ return Reg.type == TAG_x && Live.type == TAG_u &&
+ Live.val+2 <= Reg.val;
}
+static int
+is_killed(LoaderState* stp, GenOpArg Reg, GenOpArg Live)
+{
+ return Reg.type == TAG_x && Live.type == TAG_u &&
+ Live.val <= Reg.val;
+}
/*
* Generate an instruction for element/2.
@@ -2653,7 +3111,8 @@ gen_element(LoaderState* stp, GenOpArg Fail, GenOpArg Index,
op->next = NULL;
if (Index.type == TAG_i && Index.val > 0 &&
- (Tuple.type == TAG_r || Tuple.type == TAG_x || Tuple.type == TAG_y)) {
+ Index.val <= ERTS_MAX_TUPLE_SIZE &&
+ (Tuple.type == TAG_x || Tuple.type == TAG_y)) {
op->op = genop_i_fast_element_4;
op->a[0] = Tuple;
op->a[1] = Fail;
@@ -2743,13 +3202,14 @@ gen_get_integer2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live,
op->a[0] = Ms;
op->a[1] = Fail;
op->a[2] = Dst;
+#ifdef ARCH_64
} else if (bits == 32 && (Flags.val & BSF_LITTLE) == 0) {
- op->op = genop_i_bs_get_integer_32_4;
- op->arity = 4;
+ op->op = genop_i_bs_get_integer_32_3;
+ op->arity = 3;
op->a[0] = Ms;
op->a[1] = Fail;
- op->a[2] = Live;
- op->a[3] = Dst;
+ op->a[2] = Dst;
+#endif
} else {
generic:
if (bits < SMALL_BITS) {
@@ -2789,23 +3249,16 @@ gen_get_integer2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live,
goto generic;
}
} else {
- GenOp* op2;
- NEW_GENOP(stp, op2);
-
- op->op = genop_i_fetch_2;
- op->arity = 2;
- op->a[0] = Ms;
- op->a[1] = Size;
- op->next = op2;
-
- op2->op = genop_i_bs_get_integer_4;
- op2->arity = 4;
- op2->a[0] = Fail;
- op2->a[1] = Live;
- op2->a[2].type = TAG_u;
- op2->a[2].val = (Unit.val << 3) | Flags.val;
- op2->a[3] = Dst;
- op2->next = NULL;
+ op->op = genop_i_bs_get_integer_6;
+ op->arity = 6;
+ op->a[0] = Fail;
+ op->a[1] = Live;
+ op->a[2].type = TAG_u;
+ op->a[2].val = (Unit.val << 3) | Flags.val;
+ op->a[3] = Ms;
+ op->a[4] = Size;
+ op->a[5] = Dst;
+ op->next = NULL;
return op;
}
op->next = NULL;
@@ -2891,16 +3344,6 @@ gen_get_binary2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live,
}
/*
- * Predicate to test whether a heap binary should be generated.
- */
-
-static int
-should_gen_heap_bin(LoaderState* stp, GenOpArg Src)
-{
- return Src.val <= ERL_ONHEAP_BIN_LIMIT;
-}
-
-/*
* Predicate to test whether a binary construction is too big.
*/
@@ -3165,18 +3608,11 @@ gen_increment_from_minus(LoaderState* stp, GenOpArg Reg, GenOpArg Integer,
static int
negation_is_small(LoaderState* stp, GenOpArg Int)
{
- return Int.type == TAG_i && IS_SSMALL(-Int.val);
-}
-
-
-static int
-smp(LoaderState* stp)
-{
-#ifdef ERTS_SMP
- return 1;
-#else
- return 0;
-#endif
+ /* Check for the rare case of overflow in BeamInstr (UWord) -> Sint
+ * Cast to the correct type before using IS_SSMALL (Sint) */
+ return Int.type == TAG_i &&
+ !(Int.val & ~((((BeamInstr)1) << ((sizeof(Sint)*8)-1))-1)) &&
+ IS_SSMALL(-((Sint)Int.val));
}
/*
@@ -3185,10 +3621,8 @@ smp(LoaderState* stp)
static int
smp_mark_target_label(LoaderState* stp, GenOpArg L)
{
-#ifdef ERTS_SMP
ASSERT(L.type == TAG_f);
stp->labels[L.val].looprec_targeted = 1;
-#endif
return 1;
}
@@ -3199,12 +3633,8 @@ smp_mark_target_label(LoaderState* stp, GenOpArg L)
static int
smp_already_locked(LoaderState* stp, GenOpArg L)
{
-#ifdef ERTS_SMP
ASSERT(L.type == TAG_u);
return stp->labels[L.val].looprec_targeted;
-#else
- return 0;
-#endif
}
/*
@@ -3218,21 +3648,21 @@ gen_literal_timeout(LoaderState* stp, GenOpArg Fail, GenOpArg Time)
Sint timeout;
NEW_GENOP(stp, op);
- op->op = genop_i_wait_timeout_2;
+ op->op = genop_wait_timeout_unlocked_int_2;
op->next = NULL;
op->arity = 2;
- op->a[0] = Fail;
- op->a[1].type = TAG_u;
+ op->a[0].type = TAG_u;
+ op->a[1] = Fail;
if (Time.type == TAG_i && (timeout = Time.val) >= 0 &&
-#if defined(ARCH_64) && !HALFWORD_HEAP
+#if defined(ARCH_64)
(timeout >> 32) == 0
#else
1
#endif
) {
- op->a[1].val = timeout;
-#if !defined(ARCH_64) || HALFWORD_HEAP
+ op->a[0].val = timeout;
+#if !defined(ARCH_64)
} else if (Time.type == TAG_q) {
Eterm big;
@@ -3245,11 +3675,11 @@ gen_literal_timeout(LoaderState* stp, GenOpArg Fail, GenOpArg Time)
} else {
Uint u;
(void) term_to_Uint(big, &u);
- op->a[1].val = (BeamInstr) u;
+ op->a[0].val = (BeamInstr) u;
}
#endif
} else {
-#if !defined(ARCH_64) || HALFWORD_HEAP
+#if !defined(ARCH_64)
error:
#endif
op->op = genop_i_wait_error_0;
@@ -3265,21 +3695,21 @@ gen_literal_timeout_locked(LoaderState* stp, GenOpArg Fail, GenOpArg Time)
Sint timeout;
NEW_GENOP(stp, op);
- op->op = genop_i_wait_timeout_locked_2;
+ op->op = genop_wait_timeout_locked_int_2;
op->next = NULL;
op->arity = 2;
- op->a[0] = Fail;
- op->a[1].type = TAG_u;
-
+ op->a[0].type = TAG_u;
+ op->a[1] = Fail;
+
if (Time.type == TAG_i && (timeout = Time.val) >= 0 &&
-#if defined(ARCH_64) && !HALFWORD_HEAP
+#if defined(ARCH_64)
(timeout >> 32) == 0
#else
1
#endif
) {
- op->a[1].val = timeout;
-#if !defined(ARCH_64) || HALFWORD_HEAP
+ op->a[0].val = timeout;
+#if !defined(ARCH_64)
} else if (Time.type == TAG_q) {
Eterm big;
@@ -3292,11 +3722,11 @@ gen_literal_timeout_locked(LoaderState* stp, GenOpArg Fail, GenOpArg Time)
} else {
Uint u;
(void) term_to_Uint(big, &u);
- op->a[1].val = (BeamInstr) u;
+ op->a[0].val = (BeamInstr) u;
}
#endif
} else {
-#if !defined(ARCH_64) || HALFWORD_HEAP
+#if !defined(ARCH_64)
error:
#endif
op->op = genop_i_wait_error_locked_0;
@@ -3315,9 +3745,10 @@ gen_select_tuple_arity(LoaderState* stp, GenOpArg S, GenOpArg Fail,
{
GenOp* op;
+ GenOpArg *tmp;
int arity = Size.val + 3;
int size = Size.val / 2;
- int i;
+ int i, j, align = 0;
/*
* Verify the validity of the list.
@@ -3332,9 +3763,37 @@ gen_select_tuple_arity(LoaderState* stp, GenOpArg S, GenOpArg Fail,
}
/*
+ * Use a special-cased instruction if there are only two values.
+ */
+ if (size == 2) {
+ NEW_GENOP(stp, op);
+ op->next = NULL;
+ op->op = genop_i_select_tuple_arity2_4;
+ GENOP_ARITY(op, arity - 1);
+ op->a[0] = S;
+ op->a[1] = Fail;
+ op->a[2].type = TAG_u;
+ op->a[2].val = Rest[0].val;
+ op->a[3].type = TAG_u;
+ op->a[3].val = Rest[2].val;
+ op->a[4] = Rest[1];
+ op->a[5] = Rest[3];
+
+ return op;
+ }
+
+ /*
* Generate the generic instruction.
+ * Assumption:
+ * Few different tuple arities to select on (fewer than 20).
+ * Use linear scan approach.
*/
+ align = 1;
+
+ arity += 2*align;
+ size += align;
+
NEW_GENOP(stp, op);
op->next = NULL;
op->op = genop_i_select_tuple_arity_3;
@@ -3342,39 +3801,36 @@ gen_select_tuple_arity(LoaderState* stp, GenOpArg S, GenOpArg Fail,
op->a[0] = S;
op->a[1] = Fail;
op->a[2].type = TAG_u;
- op->a[2].val = Size.val / 2;
- for (i = 0; i < Size.val; i += 2) {
- op->a[i+3].type = TAG_v;
- op->a[i+3].val = make_arityval(Rest[i].val);
- op->a[i+4] = Rest[i+1];
- }
+ op->a[2].val = size;
- /*
- * Sort the values to make them useful for a binary search.
- */
+ tmp = (GenOpArg *) erts_alloc(ERTS_ALC_T_LOADER_TMP, sizeof(GenOpArg)*(arity-2*align));
- qsort(op->a+3, size, 2*sizeof(GenOpArg),
- (int (*)(const void *, const void *)) genopargcompare);
-#ifdef DEBUG
- for (i = 3; i < arity-2; i += 2) {
- ASSERT(op->a[i].val < op->a[i+2].val);
+ for (i = 3; i < arity - 2*align; i+=2) {
+ tmp[i-3].type = TAG_v;
+ tmp[i-3].val = make_arityval(Rest[i-3].val);
+ tmp[i-2] = Rest[i-2];
}
-#endif
/*
- * Use a special-cased instruction if there are only two values.
+ * Sort the values to make them useful for a sentinel search
*/
- if (size == 2) {
- op->op = genop_i_select_tuple_arity2_6;
- op->arity--;
- op->a[2].type = TAG_u;
- op->a[2].val = arityval(op->a[3].val);
- op->a[3] = op->a[4];
- op->a[4].type = TAG_u;
- op->a[4].val = arityval(op->a[5].val);
- op->a[5] = op->a[6];
+
+ qsort(tmp, size - align, 2*sizeof(GenOpArg),
+ (int (*)(const void *, const void *)) genopargcompare);
+
+ j = 3;
+ for (i = 3; i < arity - 2*align; i += 2) {
+ op->a[j] = tmp[i-3];
+ op->a[j + size] = tmp[i-2];
+ j++;
}
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) tmp);
+
+ op->a[j].type = TAG_u;
+ op->a[j].val = ~((BeamInstr)0);
+ op->a[j+size] = Fail;
+
return op;
}
@@ -3596,46 +4052,82 @@ gen_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail,
GenOpArg Size, GenOpArg* Rest)
{
GenOp* op;
+ GenOpArg *tmp;
int arity = Size.val + 3;
int size = Size.val / 2;
- int i;
+ int i, j, align = 0;
+
+ if (size == 2) {
+ /*
+ * Use a special-cased instruction if there are only two values.
+ */
+
+ NEW_GENOP(stp, op);
+ op->next = NULL;
+ op->op = genop_i_select_val2_4;
+ GENOP_ARITY(op, arity - 1);
+ op->a[0] = S;
+ op->a[1] = Fail;
+ op->a[2] = Rest[0];
+ op->a[3] = Rest[2];
+ op->a[4] = Rest[1];
+ op->a[5] = Rest[3];
+
+ return op;
+ }
+
+ if (size <= 10) {
+ /* Use linear search. Reserve place for a sentinel. */
+ align = 1;
+ }
+
+ arity += 2*align;
+ size += align;
NEW_GENOP(stp, op);
op->next = NULL;
- op->op = genop_i_select_val_3;
+ op->op = (align == 0) ? genop_i_select_val_bins_3 : genop_i_select_val_lins_3;
GENOP_ARITY(op, arity);
op->a[0] = S;
op->a[1] = Fail;
op->a[2].type = TAG_u;
op->a[2].val = size;
- for (i = 3; i < arity; i++) {
- op->a[i] = Rest[i-3];
+
+ tmp = (GenOpArg *) erts_alloc(ERTS_ALC_T_LOADER_TMP, sizeof(GenOpArg)*(arity-2*align));
+
+ for (i = 3; i < arity - 2*align; i++) {
+ tmp[i-3] = Rest[i-3];
}
/*
- * Sort the values to make them useful for a binary search.
+ * Sort the values to make them useful for a binary or sentinel search.
*/
- qsort(op->a+3, size, 2*sizeof(GenOpArg),
- (int (*)(const void *, const void *)) genopargcompare);
-#ifdef DEBUG
- for (i = 3; i < arity-2; i += 2) {
- ASSERT(op->a[i].val < op->a[i+2].val);
+ qsort(tmp, size - align, 2*sizeof(GenOpArg),
+ (int (*)(const void *, const void *)) genopargcompare);
+
+ j = 3;
+ for (i = 3; i < arity - 2*align; i += 2) {
+ op->a[j] = tmp[i-3];
+ op->a[j+size] = tmp[i-2];
+ j++;
}
-#endif
- /*
- * Use a special-cased instruction if there are only two values.
- */
- if (size == 2) {
- op->op = genop_i_select_val2_6;
- op->arity--;
- op->a[2] = op->a[3];
- op->a[3] = op->a[4];
- op->a[4] = op->a[5];
- op->a[5] = op->a[6];
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) tmp);
+
+ if (align) {
+ /* Add sentinel for linear search. */
+ op->a[j].type = TAG_u;
+ op->a[j].val = ~((BeamInstr)0);
+ op->a[j+size] = Fail;
}
+#ifdef DEBUG
+ for (i = 0; i < size - 1; i++) {
+ ASSERT(op->a[i+3].val <= op->a[i+4].val);
+ }
+#endif
+
return op;
}
@@ -3759,150 +4251,97 @@ gen_make_fun2(LoaderState* stp, GenOpArg idx)
op->next = NULL;
return op;
}
+
+static GenOp*
+translate_gc_bif(LoaderState* stp, GenOp* op, GenOpArg Bif)
+{
+ const ErtsGcBif* p;
+ BifFunction bf;
+
+ bf = stp->import[Bif.val].bf;
+ for (p = erts_gc_bifs; p->bif != 0; p++) {
+ if (p->bif == bf) {
+ op->a[1].type = TAG_u;
+ op->a[1].val = (BeamInstr) p->gc_bif;
+ return op;
+ }
+ }
+
+ op->op = genop_unsupported_guard_bif_3;
+ op->arity = 3;
+ op->a[0].type = TAG_a;
+ op->a[0].val = stp->import[Bif.val].module;
+ op->a[1].type = TAG_a;
+ op->a[1].val = stp->import[Bif.val].function;
+ op->a[2].type = TAG_u;
+ op->a[2].val = stp->import[Bif.val].arity;
+ return op;
+}
+
/*
- * Rewrite gc_bifs with one parameter (the common case). Utilized
- * in ops.tab to rewrite instructions calling bif's in guards
- * to use a garbage collecting implementation. The instructions
- * are sometimes once again rewritten to handle literals (putting the
- * parameter in the mostly unused r[0] before the instruction is executed).
+ * Rewrite gc_bifs with one parameter (the common case).
*/
static GenOp*
gen_guard_bif1(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
GenOpArg Src, GenOpArg Dst)
{
GenOp* op;
- BifFunction bf;
NEW_GENOP(stp, op);
op->next = NULL;
- bf = stp->import[Bif.val].bf;
- /* The translations here need to have a reverse counterpart in
- beam_emu.c:translate_gc_bif for error handling to work properly. */
- if (bf == length_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_length_1;
- } else if (bf == size_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_size_1;
- } else if (bf == bit_size_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_bit_size_1;
- } else if (bf == byte_size_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_byte_size_1;
- } else if (bf == map_size_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_map_size_1;
- } else if (bf == abs_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_abs_1;
- } else if (bf == float_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_float_1;
- } else if (bf == round_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_round_1;
- } else if (bf == trunc_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_trunc_1;
- } else {
- op->op = genop_unsupported_guard_bif_3;
- op->arity = 3;
- op->a[0].type = TAG_a;
- op->a[0].val = stp->import[Bif.val].module;
- op->a[1].type = TAG_a;
- op->a[1].val = stp->import[Bif.val].function;
- op->a[2].type = TAG_u;
- op->a[2].val = stp->import[Bif.val].arity;
- return op;
- }
op->op = genop_i_gc_bif1_5;
op->arity = 5;
op->a[0] = Fail;
- op->a[1].type = TAG_u;
+ /* op->a[1] is set by translate_gc_bif() */
op->a[2] = Src;
op->a[3] = Live;
op->a[4] = Dst;
- return op;
+ return translate_gc_bif(stp, op, Bif);
}
/*
* This is used by the ops.tab rule that rewrites gc_bifs with two parameters.
- * The instruction returned is then again rewritten to an i_load instruction
- * followed by i_gc_bif2_jIId, to handle literals properly.
- * As opposed to the i_gc_bif1_jIsId, the instruction i_gc_bif2_jIId is
- * always rewritten, regardless of if there actually are any literals.
*/
static GenOp*
gen_guard_bif2(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
GenOpArg S1, GenOpArg S2, GenOpArg Dst)
{
GenOp* op;
- BifFunction bf;
NEW_GENOP(stp, op);
op->next = NULL;
- bf = stp->import[Bif.val].bf;
- /* The translations here need to have a reverse counterpart in
- beam_emu.c:translate_gc_bif for error handling to work properly. */
- if (bf == binary_part_2) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_binary_part_2;
- } else {
- op->op = genop_unsupported_guard_bif_3;
- op->arity = 3;
- op->a[0].type = TAG_a;
- op->a[0].val = stp->import[Bif.val].module;
- op->a[1].type = TAG_a;
- op->a[1].val = stp->import[Bif.val].function;
- op->a[2].type = TAG_u;
- op->a[2].val = stp->import[Bif.val].arity;
- return op;
- }
- op->op = genop_ii_gc_bif2_6;
+ op->op = genop_i_gc_bif2_6;
op->arity = 6;
op->a[0] = Fail;
- op->a[1].type = TAG_u;
- op->a[2] = S1;
- op->a[3] = S2;
- op->a[4] = Live;
+ /* op->a[1] is set by translate_gc_bif() */
+ op->a[2] = Live;
+ op->a[3] = S1;
+ op->a[4] = S2;
op->a[5] = Dst;
- return op;
+ return translate_gc_bif(stp, op, Bif);
}
/*
* This is used by the ops.tab rule that rewrites gc_bifs with three parameters.
- * The instruction returned is then again rewritten to a move instruction that
- * uses r[0] for temp storage, followed by an i_load instruction,
- * followed by i_gc_bif3_jIsId, to handle literals properly. Rewriting
- * always occur, as with the gc_bif2 counterpart.
*/
static GenOp*
gen_guard_bif3(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
GenOpArg S1, GenOpArg S2, GenOpArg S3, GenOpArg Dst)
{
GenOp* op;
- BifFunction bf;
NEW_GENOP(stp, op);
op->next = NULL;
- bf = stp->import[Bif.val].bf;
- /* The translations here need to have a reverse counterpart in
- beam_emu.c:translate_gc_bif for error handling to work properly. */
- if (bf == binary_part_3) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_binary_part_3;
- } else {
- op->op = genop_unsupported_guard_bif_3;
- op->arity = 3;
- op->a[0].type = TAG_a;
- op->a[0].val = stp->import[Bif.val].module;
- op->a[1].type = TAG_a;
- op->a[1].val = stp->import[Bif.val].function;
- op->a[2].type = TAG_u;
- op->a[2].val = stp->import[Bif.val].arity;
- return op;
- }
op->op = genop_ii_gc_bif3_7;
op->arity = 7;
op->a[0] = Fail;
- op->a[1].type = TAG_u;
- op->a[2] = S1;
- op->a[3] = S2;
- op->a[4] = S3;
- op->a[5] = Live;
+ /* op->a[1] is set by translate_gc_bif() */
+ op->a[2] = Live;
+ op->a[3] = S1;
+ op->a[4] = S2;
+ op->a[5] = S3;
op->a[6] = Dst;
- op->next = NULL;
- return op;
+ return translate_gc_bif(stp, op, Bif);
}
static GenOp*
@@ -3955,8 +4394,225 @@ tuple_append_put(LoaderState* stp, GenOpArg Arity, GenOpArg Dst,
}
/*
+ * Predicate to test whether the given literal is a map.
+ */
+
+static int
+literal_is_map(LoaderState* stp, GenOpArg Lit)
+{
+ Eterm term;
+
+ ASSERT(Lit.type == TAG_q);
+ term = stp->literals[Lit.val].term;
+ return is_map(term);
+}
+
+/*
+ * Predicate to test whether all of the given new small map keys are literals
+ */
+static int
+is_small_map_literal_keys(LoaderState* stp, GenOpArg Size, GenOpArg* Rest)
+{
+ if (Size.val > MAP_SMALL_MAP_LIMIT) {
+ return 0;
+ }
+
+ /*
+ * Operations with non-literals have always only one key.
+ */
+ if (Size.val != 2) {
+ return 1;
+ }
+
+ switch (Rest[0].type) {
+ case TAG_a:
+ case TAG_i:
+ case TAG_n:
+ case TAG_q:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static GenOp*
+gen_new_small_map_lit(LoaderState* stp, GenOpArg Dst, GenOpArg Live,
+ GenOpArg Size, GenOpArg* Rest)
+{
+ unsigned size = Size.val;
+ Uint lit;
+ unsigned i;
+ GenOp* op;
+ GenOpArg* dst;
+ Eterm* hp;
+ Eterm* tmp;
+ Eterm* thp;
+ Eterm keys;
+
+ NEW_GENOP(stp, op);
+ GENOP_ARITY(op, 3 + size/2);
+ op->next = NULL;
+ op->op = genop_i_new_small_map_lit_3;
+
+ tmp = thp = erts_alloc(ERTS_ALC_T_LOADER_TMP, (1 + size/2) * sizeof(*tmp));
+ keys = make_tuple(thp);
+ *thp++ = make_arityval(size/2);
+
+ dst = op->a+3;
+
+ for (i = 0; i < size; i += 2) {
+ switch (Rest[i].type) {
+ case TAG_a:
+ *thp++ = Rest[i].val;
+ ASSERT(is_atom(Rest[i].val));
+ break;
+ case TAG_i:
+ *thp++ = make_small(Rest[i].val);
+ break;
+ case TAG_n:
+ *thp++ = NIL;
+ break;
+ case TAG_q:
+ *thp++ = stp->literals[Rest[i].val].term;
+ break;
+ }
+ *dst++ = Rest[i + 1];
+ }
+
+ if (!find_literal(stp, keys, &lit)) {
+ lit = new_literal(stp, &hp, 1 + size/2);
+ sys_memcpy(hp, tmp, (1 + size/2) * sizeof(*tmp));
+ }
+ erts_free(ERTS_ALC_T_LOADER_TMP, tmp);
+
+ op->a[0] = Dst;
+ op->a[1] = Live;
+ op->a[2].type = TAG_q;
+ op->a[2].val = lit;
+
+ return op;
+}
+
+/*
+ * Predicate to test whether the given literal is an empty map.
+ */
+
+static int
+is_empty_map(LoaderState* stp, GenOpArg Lit)
+{
+ Eterm term;
+
+ if (Lit.type != TAG_q) {
+ return 0;
+ }
+ term = stp->literals[Lit.val].term;
+ return is_flatmap(term) && flatmap_get_size(flatmap_val(term)) == 0;
+}
+
+/*
+ * Pseudo predicate map_key_sort that will sort the Rest operand for
+ * map instructions as a side effect.
+ */
+
+typedef struct SortGenOpArg {
+ Eterm term; /* Term to use for comparing */
+ GenOpArg arg; /* Original data */
+} SortGenOpArg;
+
+static int
+genopargtermcompare(SortGenOpArg* a, SortGenOpArg* b)
+{
+ return CMP_TERM(a->term, b->term);
+}
+
+static int
+map_key_sort(LoaderState* stp, GenOpArg Size, GenOpArg* Rest)
+{
+ SortGenOpArg* t;
+ unsigned size = Size.val;
+ unsigned i;
+
+ if (size == 2) {
+ return 1; /* Already sorted. */
+ }
+
+
+ t = (SortGenOpArg *) erts_alloc(ERTS_ALC_T_TMP, size*sizeof(SortGenOpArg));
+
+ /*
+ * Copy original data and sort keys to a temporary array.
+ */
+ for (i = 0; i < size; i += 2) {
+ t[i].arg = Rest[i];
+ switch (Rest[i].type) {
+ case TAG_a:
+ t[i].term = Rest[i].val;
+ ASSERT(is_atom(t[i].term));
+ break;
+ case TAG_i:
+ t[i].term = make_small(Rest[i].val);
+ break;
+ case TAG_n:
+ t[i].term = NIL;
+ break;
+ case TAG_q:
+ t[i].term = stp->literals[Rest[i].val].term;
+ break;
+ default:
+ /*
+ * Not a literal key. Not allowed. Only a single
+ * variable key is allowed in each map instruction.
+ */
+ erts_free(ERTS_ALC_T_TMP, (void *) t);
+ return 0;
+ }
+#ifdef DEBUG
+ t[i+1].term = THE_NON_VALUE;
+#endif
+ t[i+1].arg = Rest[i+1];
+ }
+
+ /*
+ * Sort the temporary array.
+ */
+ qsort((void *) t, size / 2, 2 * sizeof(SortGenOpArg),
+ (int (*)(const void *, const void *)) genopargtermcompare);
+
+ /*
+ * Copy back the sorted, original data.
+ */
+ for (i = 0; i < size; i++) {
+ Rest[i] = t[i].arg;
+ }
+
+ erts_free(ERTS_ALC_T_TMP, (void *) t);
+ return 1;
+}
+
+static int
+hash_genop_arg(LoaderState* stp, GenOpArg Key, Uint32* hx)
+{
+ switch (Key.type) {
+ case TAG_a:
+ *hx = hashmap_make_hash(Key.val);
+ return 1;
+ case TAG_i:
+ *hx = hashmap_make_hash(make_small(Key.val));
+ return 1;
+ case TAG_n:
+ *hx = hashmap_make_hash(NIL);
+ return 1;
+ case TAG_q:
+ *hx = hashmap_make_hash(stp->literals[Key.val].term);
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/*
* Replace a get_map_elements with one key to an instruction with one
- * element
+ * element.
*/
static GenOp*
@@ -3964,37 +4620,150 @@ gen_get_map_element(LoaderState* stp, GenOpArg Fail, GenOpArg Src,
GenOpArg Size, GenOpArg* Rest)
{
GenOp* op;
+ GenOpArg Key;
+ Uint32 hx = 0;
ASSERT(Size.type == TAG_u);
NEW_GENOP(stp, op);
op->next = NULL;
- op->op = genop_get_map_element_4;
- op->arity = 4;
-
op->a[0] = Fail;
op->a[1] = Src;
op->a[2] = Rest[0];
- op->a[3] = Rest[1];
+
+ Key = Rest[0];
+ if (hash_genop_arg(stp, Key, &hx)) {
+ op->arity = 5;
+ op->op = genop_i_get_map_element_hash_5;
+ op->a[3].type = TAG_u;
+ op->a[3].val = (BeamInstr) hx;
+ op->a[4] = Rest[1];
+ } else {
+ op->arity = 4;
+ op->op = genop_i_get_map_element_4;
+ op->a[3] = Rest[1];
+ }
+ return op;
+}
+
+static int
+hash_internal_genop_arg(LoaderState* stp, GenOpArg Key, Uint32* hx)
+{
+ Eterm key_term;
+ switch (Key.type) {
+ case TAG_a:
+ key_term = Key.val;
+ break;
+ case TAG_i:
+ key_term = make_small(Key.val);
+ break;
+ case TAG_n:
+ key_term = NIL;
+ break;
+ case TAG_q:
+ key_term = stp->literals[Key.val].term;
+ break;
+ default:
+ return 0;
+ }
+ *hx = erts_pd_make_hx(key_term);
+ return 1;
+}
+
+
+static GenOp*
+gen_get(LoaderState* stp, GenOpArg Src, GenOpArg Dst)
+{
+ GenOp* op;
+ Uint32 hx = 0;
+
+ NEW_GENOP(stp, op);
+ op->next = NULL;
+ if (hash_internal_genop_arg(stp, Src, &hx)) {
+ op->arity = 3;
+ op->op = genop_i_get_hash_3;
+ op->a[0] = Src;
+ op->a[1].type = TAG_u;
+ op->a[1].val = (BeamInstr) hx;
+ op->a[2] = Dst;
+ } else {
+ op->arity = 2;
+ op->op = genop_i_get_2;
+ op->a[0] = Src;
+ op->a[1] = Dst;
+ }
return op;
}
+
static GenOp*
-gen_has_map_field(LoaderState* stp, GenOpArg Fail, GenOpArg Src,
- GenOpArg Size, GenOpArg* Rest)
+gen_get_map_elements(LoaderState* stp, GenOpArg Fail, GenOpArg Src,
+ GenOpArg Size, GenOpArg* Rest)
{
GenOp* op;
+ Uint32 hx;
+ Uint i;
+ GenOpArg* dst;
+#ifdef DEBUG
+ int good_hash;
+#endif
+ ERTS_UNDEF(hx, 0);
ASSERT(Size.type == TAG_u);
NEW_GENOP(stp, op);
+ op->op = genop_i_get_map_elements_3;
+ GENOP_ARITY(op, 3 + 3*(Size.val/2));
op->next = NULL;
- op->op = genop_has_map_field_3;
- op->arity = 4;
+ op->a[0] = Fail;
+ op->a[1] = Src;
+ op->a[2].type = TAG_u;
+ op->a[2].val = 3*(Size.val/2);
+
+ dst = op->a+3;
+ for (i = 0; i < Size.val / 2; i++) {
+ dst[0] = Rest[2*i];
+ dst[1] = Rest[2*i+1];
+#ifdef DEBUG
+ good_hash =
+#endif
+ hash_genop_arg(stp, dst[0], &hx);
+#ifdef DEBUG
+ ASSERT(good_hash);
+#endif
+ dst[2].type = TAG_u;
+ dst[2].val = (BeamInstr) hx;
+ dst += 3;
+ }
+ return op;
+}
+
+static GenOp*
+gen_has_map_fields(LoaderState* stp, GenOpArg Fail, GenOpArg Src,
+ GenOpArg Size, GenOpArg* Rest)
+{
+ GenOp* op;
+ Uint i;
+ Uint n;
+
+ ASSERT(Size.type == TAG_u);
+ n = Size.val;
+
+ NEW_GENOP(stp, op);
+ GENOP_ARITY(op, 3 + 2*n);
+ op->next = NULL;
+ op->op = genop_get_map_elements_3;
op->a[0] = Fail;
op->a[1] = Src;
- op->a[2] = Rest[0];
+ op->a[2].type = TAG_u;
+ op->a[2].val = 2*n;
+
+ for (i = 0; i < n; i++) {
+ op->a[3+2*i] = Rest[i];
+ op->a[3+2*i+1].type = TAG_x;
+ op->a[3+2*i+1].val = SCRATCH_X_REG; /* Ignore result */
+ }
return op;
}
@@ -4006,8 +4775,8 @@ gen_has_map_field(LoaderState* stp, GenOpArg Fail, GenOpArg Src,
static int
freeze_code(LoaderState* stp)
{
- BeamInstr* code = stp->code;
- Uint *literal_end = NULL;
+ BeamCodeHeader* code_hdr = stp->hdr;
+ BeamInstr* codev = (BeamInstr*) &stp->hdr->functions;
int i;
byte* str_table;
unsigned strtab_size = stp->chunks[STR_CHUNK].size;
@@ -4032,128 +4801,80 @@ freeze_code(LoaderState* stp)
if (stp->line_instr == 0) {
line_size = 0;
} else {
- line_size = (MI_LINE_FUNC_TAB + (stp->num_functions + 1) +
- (stp->current_li+1) + stp->num_fnames) *
- sizeof(Eterm) + (stp->current_li+1) * stp->loc_size;
+ line_size = (offsetof(BeamCodeLineTab,func_tab)
+ + (stp->num_functions + 1) * sizeof(BeamInstr**) /* func_tab */
+ + (stp->current_li + 1) * sizeof(BeamInstr*) /* line items */
+ + stp->num_fnames * sizeof(Eterm) /* fname table */
+ + (stp->current_li + 1) * stp->loc_size); /* loc_tab */
}
- size = (stp->ci * sizeof(BeamInstr)) +
- (stp->total_literal_size * sizeof(Eterm)) +
- strtab_size + attr_size + compile_size + line_size;
+ size = offsetof(BeamCodeHeader,functions) + (stp->ci * sizeof(BeamInstr)) +
+ strtab_size + attr_size + compile_size + MD5_SIZE + line_size;
/*
* Move the code to its final location.
*/
- code = (BeamInstr *) erts_realloc(ERTS_ALC_T_CODE, (void *) code, size);
- CHKBLK(ERTS_ALC_T_CODE,code);
+ code_hdr = (BeamCodeHeader*) erts_realloc(ERTS_ALC_T_CODE, (void *) code_hdr, size);
+ codev = (BeamInstr*) &code_hdr->functions;
+ CHKBLK(ERTS_ALC_T_CODE,code_hdr);
/*
* Place a pointer to the op_int_code_end instruction in the
* function table in the beginning of the file.
*/
- code[MI_FUNCTIONS+stp->num_functions] = (BeamInstr) (code + stp->ci - 1);
- CHKBLK(ERTS_ALC_T_CODE,code);
+ code_hdr->functions[stp->num_functions] = (ErtsCodeInfo*)(codev + stp->ci - 1);
+ CHKBLK(ERTS_ALC_T_CODE,code_hdr);
/*
* Store the pointer to the on_load function.
*/
if (stp->on_load) {
- code[MI_ON_LOAD_FUNCTION_PTR] = (BeamInstr) (code + stp->on_load);
+ code_hdr->on_load_function_ptr = codev + stp->on_load;
} else {
- code[MI_ON_LOAD_FUNCTION_PTR] = 0;
+ code_hdr->on_load_function_ptr = NULL;
}
- CHKBLK(ERTS_ALC_T_CODE,code);
+ CHKBLK(ERTS_ALC_T_CODE,code_hdr);
- literal_end = (Uint *) (code+stp->ci);
/*
- * Place the literal heap directly after the code and fix up all
- * instructions that refer to it.
+ * Place the literals in their own allocated heap (for fast range check)
+ * and fix up all instructions that refer to it.
*/
{
- Uint* ptr;
- Uint* low;
- Uint* high;
+ Eterm* ptr;
LiteralPatch* lp;
- struct erl_off_heap_header* off_heap = 0;
- struct erl_off_heap_header** off_heap_last = &off_heap;
-
- low = (Uint *) (code+stp->ci);
- high = low + stp->total_literal_size;
- code[MI_LITERALS_START] = (BeamInstr) low;
- code[MI_LITERALS_END] = (BeamInstr) high;
- ptr = low;
- for (i = 0; i < stp->num_literals; i++) {
- SWord offset;
- struct erl_off_heap_header* t_off_heap;
-
- sys_memcpy(ptr, stp->literals[i].heap,
- stp->literals[i].heap_size*sizeof(Eterm));
- offset = ptr - stp->literals[i].heap;
- stp->literals[i].offset = offset;
- high = ptr + stp->literals[i].heap_size;
- while (ptr < high) {
- Eterm val = *ptr;
- switch (primary_tag(val)) {
- case TAG_PRIMARY_LIST:
- case TAG_PRIMARY_BOXED:
- *ptr++ = offset_ptr(val, offset);
- break;
- case TAG_PRIMARY_HEADER:
- if (header_is_transparent(val)) {
- ptr++;
- } else {
- if (thing_subtag(val) == REFC_BINARY_SUBTAG) {
- struct erl_off_heap_header* oh;
-
- oh = (struct erl_off_heap_header*) ptr;
- if (oh->next) {
- Eterm** uptr = (Eterm **) (void *) &oh->next;
- *uptr += offset;
- }
- }
- ptr += 1 + thing_arityval(val);
- }
- break;
- default:
- ptr++;
- break;
- }
- }
- ASSERT(ptr == high);
+ ErlOffHeap code_off_heap;
+ ErtsLiteralArea *literal_area;
+ Uint lit_asize;
- /*
- * Re-link the off_heap list for this term onto the
- * off_heap list for the entire module.
- */
- t_off_heap = stp->literals[i].off_heap.first;
- if (t_off_heap) {
- t_off_heap = (struct erl_off_heap_header *)
- offset_ptr((UWord) t_off_heap, offset);
- while (t_off_heap) {
- *off_heap_last = t_off_heap;
- off_heap_last = &t_off_heap->next;
- t_off_heap = t_off_heap->next;
- }
- }
+ ERTS_INIT_OFF_HEAP(&code_off_heap);
+
+ lit_asize = ERTS_LITERAL_AREA_ALLOC_SIZE(stp->total_literal_size);
+ literal_area = erts_alloc(ERTS_ALC_T_LITERAL, lit_asize);
+ ptr = &literal_area->start[0];
+ literal_area->end = ptr + stp->total_literal_size;
+
+ for (i = 0; i < stp->num_literals; i++) {
+ if (is_not_immed(stp->literals[i].term)) {
+ erts_move_multi_frags(&ptr, &code_off_heap,
+ stp->literals[i].heap_frags,
+ &stp->literals[i].term, 1, 1);
+ ASSERT(erts_is_literal(stp->literals[i].term,
+ ptr_val(stp->literals[i].term)));
+ }
}
- code[MI_LITERALS_OFF_HEAP] = (BeamInstr) off_heap;
+ literal_area->off_heap = code_off_heap.first;
lp = stp->literal_patches;
while (lp != 0) {
BeamInstr* op_ptr;
- Uint literal;
Literal* lit;
- op_ptr = code + lp->pos;
+ op_ptr = codev + lp->pos;
lit = &stp->literals[op_ptr[0]];
- literal = lit->term;
- if (is_boxed(literal) || is_list(literal)) {
- literal = offset_ptr(literal, lit->offset);
- }
- op_ptr[0] = literal;
+ op_ptr[0] = lit->term;
lp = lp->next;
}
- literal_end += stp->total_literal_size;
+ code_hdr->literal_area = literal_area;
}
CHKBLK(ERTS_ALC_T_CODE,code);
@@ -4161,52 +4882,49 @@ freeze_code(LoaderState* stp)
* If there is line information, place it here.
*/
if (stp->line_instr == 0) {
- code[MI_LINE_TABLE] = (BeamInstr) 0;
- str_table = (byte *) literal_end;
+ code_hdr->line_table = NULL;
+ str_table = (byte *) (codev + stp->ci);
} else {
- Eterm* line_tab = (Eterm *) literal_end;
- Eterm* p;
- int ftab_size = stp->num_functions;
- int num_instrs = stp->current_li;
- Eterm* first_line_item;
+ BeamCodeLineTab* const line_tab = (BeamCodeLineTab *) (codev+stp->ci);
+ const unsigned int ftab_size = stp->num_functions;
+ const unsigned int num_instrs = stp->current_li;
+ const BeamInstr** const line_items =
+ (const BeamInstr**) &line_tab->func_tab[ftab_size + 1];
- code[MI_LINE_TABLE] = (BeamInstr) line_tab;
- p = line_tab + MI_LINE_FUNC_TAB;
+ code_hdr->line_table = line_tab;
- first_line_item = (p + ftab_size + 1);
for (i = 0; i < ftab_size; i++) {
- *p++ = (Eterm) (BeamInstr) (first_line_item + stp->func_line[i]);
+ line_tab->func_tab[i] = line_items + stp->func_line[i];
}
- *p++ = (Eterm) (BeamInstr) (first_line_item + num_instrs);
- ASSERT(p == first_line_item);
+ line_tab->func_tab[i] = line_items + num_instrs;
+
for (i = 0; i < num_instrs; i++) {
- *p++ = (Eterm) (BeamInstr) (code + stp->line_instr[i].pos);
+ line_items[i] = codev + stp->line_instr[i].pos;
}
- *p++ = (Eterm) (BeamInstr) (code + stp->ci - 1);
+ line_items[i] = codev + stp->ci - 1;
- line_tab[MI_LINE_FNAME_PTR] = (Eterm) (BeamInstr) p;
- memcpy(p, stp->fname, stp->num_fnames*sizeof(Eterm));
- p += stp->num_fnames;
+ line_tab->fname_ptr = (Eterm*) &line_items[i + 1];
+ memcpy(line_tab->fname_ptr, stp->fname, stp->num_fnames*sizeof(Eterm));
- line_tab[MI_LINE_LOC_TAB] = (Eterm) (BeamInstr) p;
- line_tab[MI_LINE_LOC_SIZE] = stp->loc_size;
+ line_tab->loc_size = stp->loc_size;
if (stp->loc_size == 2) {
- Uint16* locp = (Uint16 *) p;
- for (i = 0; i < num_instrs; i++) {
+ Uint16* locp = (Uint16 *) &line_tab->fname_ptr[stp->num_fnames];
+ line_tab->loc_tab.p2 = locp;
+ for (i = 0; i < num_instrs; i++) {
*locp++ = (Uint16) stp->line_instr[i].loc;
- }
- *locp++ = LINE_INVALID_LOCATION;
- str_table = (byte *) locp;
+ }
+ *locp++ = LINE_INVALID_LOCATION;
+ str_table = (byte *) locp;
} else {
- Uint32* locp = (Uint32 *) p;
- ASSERT(stp->loc_size == 4);
+ Uint32* locp = (Uint32 *) &line_tab->fname_ptr[stp->num_fnames];
+ ASSERT(stp->loc_size == 4);
+ line_tab->loc_tab.p4 = locp;
for (i = 0; i < num_instrs; i++) {
*locp++ = stp->line_instr[i].loc;
}
*locp++ = LINE_INVALID_LOCATION;
- str_table = (byte *) locp;
+ str_table = (byte *) locp;
}
-
CHKBLK(ERTS_ALC_T_CODE,code);
}
@@ -4218,13 +4936,13 @@ freeze_code(LoaderState* stp)
if (attr_size) {
byte* attr = str_table + strtab_size;
sys_memcpy(attr, stp->chunks[ATTR_CHUNK].start, stp->chunks[ATTR_CHUNK].size);
- code[MI_ATTR_PTR] = (BeamInstr) attr;
- code[MI_ATTR_SIZE] = (BeamInstr) stp->chunks[ATTR_CHUNK].size;
+ code_hdr->attr_ptr = attr;
+ code_hdr->attr_size = (BeamInstr) stp->chunks[ATTR_CHUNK].size;
decoded_size = erts_decode_ext_size(attr, attr_size);
if (decoded_size < 0) {
LoadError0(stp, "bad external term representation of module attributes");
}
- code[MI_ATTR_SIZE_ON_HEAP] = decoded_size;
+ code_hdr->attr_size_on_heap = decoded_size;
}
CHKBLK(ERTS_ALC_T_CODE,code);
if (compile_size) {
@@ -4234,9 +4952,9 @@ freeze_code(LoaderState* stp)
stp->chunks[COMPILE_CHUNK].size);
CHKBLK(ERTS_ALC_T_CODE,code);
- code[MI_COMPILE_PTR] = (BeamInstr) compile_info;
+ code_hdr->compile_ptr = compile_info;
CHKBLK(ERTS_ALC_T_CODE,code);
- code[MI_COMPILE_SIZE] = (BeamInstr) stp->chunks[COMPILE_CHUNK].size;
+ code_hdr->compile_size = (BeamInstr) stp->chunks[COMPILE_CHUNK].size;
CHKBLK(ERTS_ALC_T_CODE,code);
decoded_size = erts_decode_ext_size(compile_info, compile_size);
CHKBLK(ERTS_ALC_T_CODE,code);
@@ -4244,15 +4962,24 @@ freeze_code(LoaderState* stp)
LoadError0(stp, "bad external term representation of compilation information");
}
CHKBLK(ERTS_ALC_T_CODE,code);
- code[MI_COMPILE_SIZE_ON_HEAP] = decoded_size;
+ code_hdr->compile_size_on_heap = decoded_size;
+ }
+ CHKBLK(ERTS_ALC_T_CODE,code);
+ {
+ byte* md5_sum = str_table + strtab_size + attr_size + compile_size;
+ CHKBLK(ERTS_ALC_T_CODE,code);
+ sys_memcpy(md5_sum, stp->mod_md5, MD5_SIZE);
+ CHKBLK(ERTS_ALC_T_CODE,code);
+ code_hdr->md5_ptr = md5_sum;
+ CHKBLK(ERTS_ALC_T_CODE,code);
}
CHKBLK(ERTS_ALC_T_CODE,code);
/*
* Make sure that we have not overflowed the allocated code space.
*/
- ASSERT(str_table + strtab_size + attr_size + compile_size ==
- ((byte *) code) + size);
+ ASSERT(str_table + strtab_size + attr_size + compile_size + MD5_SIZE ==
+ ((byte *) code_hdr) + size);
/*
* Patch all instructions that refer to the string table.
@@ -4264,46 +4991,83 @@ freeze_code(LoaderState* stp)
BeamInstr* op_ptr;
byte* strp;
- op_ptr = code + sp->pos;
+ op_ptr = codev + sp->pos;
strp = str_table + op_ptr[0];
op_ptr[0] = (BeamInstr) strp;
sp = sp->next;
}
}
- CHKBLK(ERTS_ALC_T_CODE,code);
+ CHKBLK(ERTS_ALC_T_CODE,code_hdr);
/*
* Resolve all labels.
*/
for (i = 0; i < stp->num_labels; i++) {
- Uint this_patch;
- Uint next_patch;
+ Uint patch;
Uint value = stp->labels[i].value;
-
- if (value == 0 && stp->labels[i].patches != 0) {
+
+ if (value == 0 && stp->labels[i].num_patches != 0) {
LoadError1(stp, "label %d not resolved", i);
}
ASSERT(value < stp->ci);
- this_patch = stp->labels[i].patches;
- while (this_patch != 0) {
- ASSERT(this_patch < stp->ci);
- next_patch = code[this_patch];
- ASSERT(next_patch < stp->ci);
- code[this_patch] = (BeamInstr) (code + value);
- this_patch = next_patch;
+ for (patch = 0; patch < stp->labels[i].num_patches; patch++) {
+ LabelPatch* lp = &stp->labels[i].patches[patch];
+ Uint pos = lp->pos;
+ ASSERT(pos < stp->ci);
+ if (pos < stp->num_functions) {
+ /*
+ * This is the array of pointers to the beginning of
+ * each function. The pointers must remain absolute.
+ */
+ codev[pos] = (BeamInstr) (codev + value);
+ } else {
+#if defined(DEBUG) && defined(BEAM_WIDE_MASK)
+ Uint w;
+#endif
+ Sint32 rel = lp->offset + value;
+ switch (lp->packed) {
+ case 0: /* Not packed */
+ ASSERT(codev[pos] == i);
+ codev[pos] = rel;
+ break;
+#ifdef BEAM_WIDE_MASK
+ case 1: /* Least significant word. */
+#ifdef DEBUG
+ w = codev[pos] & BEAM_WIDE_MASK;
+ /* Correct label in least significant word? */
+ ASSERT(w == i);
+#endif
+ codev[pos] = (codev[pos] & ~BEAM_WIDE_MASK) |
+ (rel & BEAM_WIDE_MASK);
+ break;
+ case 2: /* Most significant word */
+#ifdef DEBUG
+ w = (codev[pos] >> BEAM_WIDE_SHIFT) & BEAM_WIDE_MASK;
+ /* Correct label in most significant word? */
+ ASSERT(w == i);
+#endif
+ codev[pos] = ((Uint)rel << BEAM_WIDE_SHIFT) |
+ (codev[pos] & BEAM_WIDE_MASK);
+ break;
+#endif
+ default:
+ ASSERT(0);
+ }
+ }
}
}
- CHKBLK(ERTS_ALC_T_CODE,code);
+ CHKBLK(ERTS_ALC_T_CODE,code_hdr);
/*
* Save the updated code pointer and code size.
*/
- stp->code = code;
+ stp->hdr = code_hdr;
+ stp->codev = codev;
stp->loaded_size = size;
- CHKBLK(ERTS_ALC_T_CODE,code);
+ CHKBLK(ERTS_ALC_T_CODE,code_hdr);
return 1;
load_error:
@@ -4311,19 +5075,19 @@ freeze_code(LoaderState* stp)
* Make sure that the caller frees the newly reallocated block, and
* not the old one (in case it has moved).
*/
- stp->code = code;
+ stp->hdr = code_hdr;
+ stp->codev = codev;
return 0;
}
static void
-final_touch(LoaderState* stp)
+final_touch(LoaderState* stp, struct erl_module_instance* inst_p)
{
- int i;
+ unsigned int i;
int on_load = stp->on_load;
unsigned catches;
Uint index;
- BeamInstr* code = stp->code;
- Module* modp;
+ BeamInstr* codev = stp->codev;
/*
* Allocate catch indices and fix up all catch_yf instructions.
@@ -4332,14 +5096,16 @@ final_touch(LoaderState* stp)
index = stp->catches;
catches = BEAM_CATCHES_NIL;
while (index != 0) {
- BeamInstr next = code[index];
- code[index] = BeamOpCode(op_catch_yf);
- catches = beam_catches_cons((BeamInstr *)code[index+2], catches);
- code[index+2] = make_catch(catches);
+ BeamInstr next = codev[index];
+ BeamInstr* abs_addr;
+ codev[index] = BeamOpCodeAddr(op_catch_yf);
+ /* We must make the address of the label absolute again. */
+ abs_addr = (BeamInstr *)codev + index + codev[index+2];
+ catches = beam_catches_cons(abs_addr, catches);
+ codev[index+2] = make_catch(catches);
index = next;
}
- modp = erts_put_module(stp->module);
- modp->curr.catches = catches;
+ inst_p->catches = catches;
/*
* Export functions.
@@ -4355,16 +5121,16 @@ final_touch(LoaderState* stp)
}
ep = erts_export_put(stp->module, stp->export[i].function,
stp->export[i].arity);
- if (!on_load) {
- ep->addressv[erts_staging_code_ix()] = address;
- } else {
+ if (on_load) {
/*
- * Don't make any of the exported functions
- * callable yet.
+ * on_load: Don't make any of the exported functions
+ * callable yet. Keep any function in the current
+ * code callable.
*/
- ep->addressv[erts_staging_code_ix()] = ep->code+3;
- ep->code[4] = (BeamInstr) address;
+ ep->beam[1] = (BeamInstr) address;
}
+ else
+ ep->addressv[erts_staging_code_ix()] = address;
}
/*
@@ -4386,8 +5152,8 @@ final_touch(LoaderState* stp)
current = stp->import[i].patches;
while (current != 0) {
ASSERT(current < stp->ci);
- next = stp->code[current];
- stp->code[current] = import;
+ next = stp->codev[current];
+ stp->codev[current] = import;
current = next;
}
}
@@ -4400,7 +5166,7 @@ final_touch(LoaderState* stp)
for (i = 0; i < stp->num_lambdas; i++) {
unsigned entry_label = stp->lambdas[i].label;
ErlFunEntry* fe = stp->lambdas[i].fe;
- BeamInstr* code_ptr = (BeamInstr *) (stp->code + stp->labels[entry_label].value);
+ BeamInstr* code_ptr = stp->codev + stp->labels[entry_label].value;
if (fe->address[0] != 0) {
/*
@@ -4410,7 +5176,7 @@ final_touch(LoaderState* stp)
}
fe->address = code_ptr;
#ifdef HIPE
- hipe_set_closure_stub(fe, stp->lambdas[i].num_free);
+ hipe_set_closure_stub(fe);
#endif
}
}
@@ -4421,32 +5187,26 @@ transform_engine(LoaderState* st)
{
Uint op;
int ap; /* Current argument. */
- Uint* restart; /* Where to restart if current match fails. */
- GenOpArg def_vars[TE_MAX_VARS]; /* Default buffer for variables. */
- GenOpArg* var = def_vars;
- int num_vars = 0;
+ const Uint* restart; /* Where to restart if current match fails. */
+ GenOpArg var[TE_MAX_VARS]; /* Buffer for variables. */
+ GenOpArg* rest_args = NULL;
+ int num_rest_args = 0;
int i; /* General index. */
Uint mask;
GenOp* instr;
- Uint* pc;
- int rval;
+ GenOp* first = st->genop;
+ GenOp* keep = NULL;
+ const Uint* pc;
static Uint restart_fail[1] = {TOP_fail};
- ASSERT(gen_opc[st->genop->op].transform != -1);
- pc = op_transform + gen_opc[st->genop->op].transform;
- restart = pc;
+ ASSERT(gen_opc[first->op].transform != -1);
+ restart = op_transform + gen_opc[first->op].transform;
restart:
- if (var != def_vars) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) var);
- var = def_vars;
- }
ASSERT(restart != NULL);
pc = restart;
ASSERT(*pc < NUM_TOPS); /* Valid instruction? */
- instr = st->genop;
-
-#define RETURN(r) rval = (r); goto do_return;
+ instr = first;
#ifdef DEBUG
restart = NULL;
@@ -4464,7 +5224,7 @@ transform_engine(LoaderState* st)
* We'll need at least one more instruction to decide whether
* this combination matches or not.
*/
- RETURN(TE_SHORT_WINDOW);
+ return TE_SHORT_WINDOW;
}
if (*pc++ != instr->op)
goto restart;
@@ -4513,7 +5273,8 @@ transform_engine(LoaderState* st)
if (var[i].type != instr->a[ap].type)
goto restart;
switch (var[i].type) {
- case TAG_r: case TAG_n: break;
+ case TAG_n:
+ break;
default:
if (var[i].val != instr->a[ap].val)
goto restart;
@@ -4546,7 +5307,7 @@ transform_engine(LoaderState* st)
if (i >= st->num_imports || st->import[i].bf == NULL)
goto restart;
if (bif_number != -1 &&
- bif_export[bif_number]->code[4] != (BeamInstr) st->import[i].bf) {
+ bif_export[bif_number]->beam[1] != (BeamInstr) st->import[i].bf) {
goto restart;
}
}
@@ -4625,19 +5386,9 @@ transform_engine(LoaderState* st)
#if defined(TOP_rest_args)
case TOP_rest_args:
{
- int n = *pc++;
int formal_arity = gen_opc[instr->op].arity;
- int j = formal_arity;
-
- num_vars = n + (instr->arity - formal_arity);
- var = erts_alloc(ERTS_ALC_T_LOADER_TMP,
- num_vars * sizeof(GenOpArg));
- for (i = 0; i < n; i++) {
- var[i] = def_vars[i];
- }
- while (i < num_vars) {
- var[i++] = instr->a[j++];
- }
+ num_rest_args = instr->arity - formal_arity;
+ rest_args = instr->a + formal_arity;
}
break;
#endif
@@ -4646,21 +5397,22 @@ transform_engine(LoaderState* st)
break;
case TOP_commit:
instr = instr->next; /* The next_instr was optimized away. */
-
- /*
- * The left-hand side of this transformation matched.
- * Delete all matched instructions.
- */
- while (st->genop != instr) {
- GenOp* next = st->genop->next;
- FREE_GENOP(st, st->genop);
- st->genop = next;
- }
+ keep = instr;
+ st->genop = instr;
#ifdef DEBUG
instr = 0;
#endif
break;
-
+#if defined(TOP_keep)
+ case TOP_keep:
+ /* Keep the current instruction unchanged. */
+ keep = instr;
+ st->genop = instr;
+#ifdef DEBUG
+ instr = 0;
+#endif
+ break;
+#endif
#if defined(TOP_call_end)
case TOP_call_end:
{
@@ -4685,22 +5437,19 @@ transform_engine(LoaderState* st)
lastp = &((*lastp)->next);
}
- instr = instr->next; /* The next_instr was optimized away. */
-
- /*
- * The left-hand side of this transformation matched.
- * Delete all matched instructions.
- */
- while (st->genop != instr) {
- GenOp* next = st->genop->next;
- FREE_GENOP(st, st->genop);
- st->genop = next;
- }
- *lastp = st->genop;
+ keep = instr->next; /* The next_instr was optimized away. */
+ *lastp = keep;
st->genop = new_instr;
}
- RETURN(TE_OK);
+ /* FALLTHROUGH */
#endif
+ case TOP_end:
+ while (first != keep) {
+ GenOp* next = first->next;
+ FREE_GENOP(st, first);
+ first = next;
+ }
+ return TE_OK;
case TOP_new_instr:
/*
* Note that the instructions are generated in reverse order.
@@ -4712,6 +5461,12 @@ transform_engine(LoaderState* st)
instr->arity = gen_opc[op].arity;
ap = 0;
break;
+#ifdef TOP_rename
+ case TOP_rename:
+ instr->op = op = *pc++;
+ instr->arity = gen_opc[op].arity;
+ return TE_OK;
+#endif
case TOP_store_type:
i = *pc++;
instr->a[ap].type = i;
@@ -4731,14 +5486,10 @@ transform_engine(LoaderState* st)
#if defined(TOP_store_rest_args)
case TOP_store_rest_args:
{
- int n = *pc++;
- int num_extra = num_vars - n;
-
- ASSERT(n <= num_vars);
- GENOP_ARITY(instr, instr->arity+num_extra);
+ GENOP_ARITY(instr, instr->arity+num_rest_args);
memcpy(instr->a, instr->def_args, ap*sizeof(GenOpArg));
- memcpy(instr->a+ap, var+n, num_extra*sizeof(GenOpArg));
- ap += num_extra;
+ memcpy(instr->a+ap, rest_args, num_rest_args*sizeof(GenOpArg));
+ ap += num_rest_args;
}
break;
#endif
@@ -4750,21 +5501,12 @@ transform_engine(LoaderState* st)
case TOP_try_me_else_fail:
restart = restart_fail;
break;
- case TOP_end:
- RETURN(TE_OK);
case TOP_fail:
- RETURN(TE_FAIL);
+ return TE_FAIL;
default:
ASSERT(0);
}
}
-#undef RETURN
-
- do_return:
- if (var != def_vars) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) var);
- }
- return rval;
}
static void
@@ -4826,12 +5568,15 @@ get_tag_and_value(LoaderState* stp, Uint len_code,
{
Uint count;
Sint val;
- byte default_buf[128];
- byte* bigbuf = default_buf;
+ byte default_byte_buf[128];
+ byte* byte_buf = default_byte_buf;
+ Eterm default_big_buf[128/sizeof(Eterm)];
+ Eterm* big_buf = default_big_buf;
+ Eterm tmp_big;
byte* s;
int i;
int neg = 0;
- Uint arity;
+ Uint words_needed;
Eterm* hp;
/*
@@ -4908,8 +5653,11 @@ get_tag_and_value(LoaderState* stp, Uint len_code,
*result = val;
return TAG_i;
} else {
- *result = new_literal(stp, &hp, BIG_UINT_HEAP_SIZE);
- (void) small_to_big(val, hp);
+ tmp_big = small_to_big(val, big_buf);
+ if (!find_literal(stp, tmp_big, result)) {
+ *result = new_literal(stp, &hp, BIG_UINT_HEAP_SIZE);
+ sys_memcpy(hp, big_buf, BIG_UINT_HEAP_SIZE*sizeof(Eterm));
+ }
return TAG_q;
}
}
@@ -4919,8 +5667,8 @@ get_tag_and_value(LoaderState* stp, Uint len_code,
* (including margin).
*/
- if (count+8 > sizeof(default_buf)) {
- bigbuf = erts_alloc(ERTS_ALC_T_LOADER_TMP, count+8);
+ if (count+8 > sizeof(default_byte_buf)) {
+ byte_buf = erts_alloc(ERTS_ALC_T_LOADER_TMP, count+8);
}
/*
@@ -4929,20 +5677,20 @@ get_tag_and_value(LoaderState* stp, Uint len_code,
GetString(stp, s, count);
for (i = 0; i < count; i++) {
- bigbuf[count-i-1] = *s++;
+ byte_buf[count-i-1] = *s++;
}
/*
* Check if the number is negative, and negate it if so.
*/
- if ((bigbuf[count-1] & 0x80) != 0) {
+ if ((byte_buf[count-1] & 0x80) != 0) {
unsigned carry = 1;
neg = 1;
for (i = 0; i < count; i++) {
- bigbuf[i] = ~bigbuf[i] + carry;
- carry = (bigbuf[i] == 0 && carry == 1);
+ byte_buf[i] = ~byte_buf[i] + carry;
+ carry = (byte_buf[i] == 0 && carry == 1);
}
ASSERT(carry == 0);
}
@@ -4951,32 +5699,52 @@ get_tag_and_value(LoaderState* stp, Uint len_code,
* Align to word boundary.
*/
- if (bigbuf[count-1] == 0) {
+ if (byte_buf[count-1] == 0) {
count--;
}
- if (bigbuf[count-1] == 0) {
+ if (byte_buf[count-1] == 0) {
LoadError0(stp, "bignum not normalized");
}
while (count % sizeof(Eterm) != 0) {
- bigbuf[count++] = 0;
+ byte_buf[count++] = 0;
}
/*
- * Allocate heap space for the bignum and copy it.
+ * Convert to a bignum.
*/
- arity = count/sizeof(Eterm);
- *result = new_literal(stp, &hp, arity+1);
- (void) bytes_to_big(bigbuf, count, neg, hp);
+ words_needed = count/sizeof(Eterm) + 1;
+ if (words_needed*sizeof(Eterm) > sizeof(default_big_buf)) {
+ big_buf = erts_alloc(ERTS_ALC_T_LOADER_TMP, words_needed*sizeof(Eterm));
+ }
+ tmp_big = bytes_to_big(byte_buf, count, neg, big_buf);
+ if (is_nil(tmp_big)) {
+ goto load_error;
+ }
- if (bigbuf != default_buf) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) bigbuf);
+ /*
+ * Create a literal if there is no previous literal with the same value.
+ */
+
+ if (!find_literal(stp, tmp_big, result)) {
+ *result = new_literal(stp, &hp, words_needed);
+ sys_memcpy(hp, big_buf, words_needed*sizeof(Eterm));
+ }
+
+ if (byte_buf != default_byte_buf) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) byte_buf);
+ }
+ if (big_buf != default_big_buf) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) big_buf);
}
return TAG_q;
load_error:
- if (bigbuf != default_buf) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) bigbuf);
+ if (byte_buf != default_byte_buf) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) byte_buf);
+ }
+ if (big_buf != default_big_buf) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) big_buf);
}
return -1;
}
@@ -5015,14 +5783,13 @@ new_genop(LoaderState* stp)
static int
new_label(LoaderState* stp)
{
- int num = stp->num_labels;
+ unsigned int num = stp->num_labels;
stp->num_labels++;
stp->labels = (Label *) erts_realloc(ERTS_ALC_T_PREPARED_CODE,
(void *) stp->labels,
stp->num_labels * sizeof(Label));
- stp->labels[num].value = 0;
- stp->labels[num].patches = 0;
+ init_label(&stp->labels[num]);
return num;
}
@@ -5071,19 +5838,36 @@ new_literal(LoaderState* stp, Eterm** hpp, Uint heap_size)
stp->total_literal_size += heap_size;
lit = stp->literals + stp->num_literals;
- lit->offset = 0;
- lit->heap_size = heap_size;
- lit->heap = erts_alloc(ERTS_ALC_T_PREPARED_CODE, heap_size*sizeof(Eterm));
- lit->term = make_boxed(lit->heap);
- lit->off_heap.first = 0;
- lit->off_heap.overhead = 0;
- *hpp = lit->heap;
+ lit->heap_frags = new_literal_fragment(heap_size);
+ lit->term = make_boxed(lit->heap_frags->mem);
+ *hpp = lit->heap_frags->mem;
return stp->num_literals++;
}
+static int
+find_literal(LoaderState* stp, Eterm needle, Uint *idx)
+{
+ int i;
+
+ /*
+ * The search is done backwards since the most recent literals
+ * allocated by the loader itself will be placed at the end
+ */
+ for (i = stp->num_literals - 1; i >= 0; i--) {
+ if (EQ(needle, stp->literals[i].term)) {
+ *idx = (Uint) i;
+ return 1;
+ }
+ }
+ return 0;
+}
+
Eterm
erts_module_info_0(Process* p, Eterm module)
{
+ Module* modp;
+ ErtsCodeIndex code_ix = erts_active_code_ix();
+ BeamCodeHeader* code_hdr;
Eterm *hp;
Eterm list = NIL;
Eterm tup;
@@ -5092,21 +5876,31 @@ erts_module_info_0(Process* p, Eterm module)
return THE_NON_VALUE;
}
- if (erts_get_module(module, erts_active_code_ix()) == NULL) {
+ modp = erts_get_module(module, code_ix);
+ if (modp == NULL) {
return THE_NON_VALUE;
}
+ code_hdr = modp->curr.code_hdr;
+ if (code_hdr == NULL) {
+ return THE_NON_VALUE;
+ }
+
#define BUILD_INFO(What) \
- tup = erts_module_info_1(p, module, What); \
+ tup = get_module_info(p, code_ix, code_hdr, module, What); \
hp = HAlloc(p, 5); \
tup = TUPLE2(hp, What, tup); \
hp += 3; \
list = CONS(hp, tup, list)
+ BUILD_INFO(am_md5);
+#ifdef HIPE
+ BUILD_INFO(am_native);
+#endif
BUILD_INFO(am_compile);
BUILD_INFO(am_attributes);
- BUILD_INFO(am_imports);
BUILD_INFO(am_exports);
+ BUILD_INFO(am_module);
#undef BUILD_INFO
return list;
}
@@ -5114,20 +5908,47 @@ erts_module_info_0(Process* p, Eterm module)
Eterm
erts_module_info_1(Process* p, Eterm module, Eterm what)
{
+ Module* modp;
+ ErtsCodeIndex code_ix = erts_active_code_ix();
+ BeamCodeHeader* code_hdr;
+
+ if (is_not_atom(module)) {
+ return THE_NON_VALUE;
+ }
+
+ modp = erts_get_module(module, code_ix);
+ if (modp == NULL) {
+ return THE_NON_VALUE;
+ }
+
+ code_hdr = modp->curr.code_hdr;
+ if (code_hdr == NULL) {
+ return THE_NON_VALUE;
+ }
+
+ return get_module_info(p, code_ix, code_hdr, module, what);
+}
+
+static Eterm
+get_module_info(Process* p, ErtsCodeIndex code_ix, BeamCodeHeader* code_hdr,
+ Eterm module, Eterm what)
+{
if (what == am_module) {
return module;
- } else if (what == am_imports) {
- return NIL;
+ } else if (what == am_md5) {
+ return md5_of_module(p, code_hdr);
} else if (what == am_exports) {
- return exported_from_module(p, module);
+ return exported_from_module(p, code_ix, module);
} else if (what == am_functions) {
- return functions_in_module(p, module);
+ return functions_in_module(p, code_hdr);
} else if (what == am_attributes) {
- return attributes_for_module(p, module);
+ return attributes_for_module(p, code_hdr);
} else if (what == am_compile) {
- return compilation_info_for_module(p, module);
+ return compilation_info_for_module(p, code_hdr);
} else if (what == am_native_addresses) {
- return native_addresses(p, module);
+ return native_addresses(p, code_hdr);
+ } else if (what == am_native) {
+ return has_native(code_hdr);
}
return THE_NON_VALUE;
}
@@ -5135,16 +5956,12 @@ erts_module_info_1(Process* p, Eterm module, Eterm what)
/*
* Builds a list of all functions in the given module:
* [{Name, Arity},...]
- *
- * Returns a tagged term, or 0 on error.
*/
Eterm
functions_in_module(Process* p, /* Process whose heap to use. */
- Eterm mod) /* Tagged atom for module. */
+ BeamCodeHeader* code_hdr)
{
- Module* modp;
- BeamInstr* code;
int i;
Uint num_functions;
Uint need;
@@ -5152,32 +5969,21 @@ functions_in_module(Process* p, /* Process whose heap to use. */
Eterm* hp_end;
Eterm result = NIL;
- if (is_not_atom(mod)) {
- return THE_NON_VALUE;
- }
-
- modp = erts_get_module(mod, erts_active_code_ix());
- if (modp == NULL) {
- return THE_NON_VALUE;
- }
- code = modp->curr.code;
- num_functions = code[MI_NUM_FUNCTIONS];
+ num_functions = code_hdr->num_functions;
need = 5*num_functions;
hp = HAlloc(p, need);
hp_end = hp + need;
for (i = num_functions-1; i >= 0 ; i--) {
- BeamInstr* func_info = (BeamInstr *) code[MI_FUNCTIONS+i];
- Eterm name = (Eterm) func_info[3];
- int arity = (int) func_info[4];
+ ErtsCodeInfo* ci = code_hdr->functions[i];
Eterm tuple;
/*
* If the function name is [], this entry is a stub for
* a BIF that should be ignored.
*/
- ASSERT(is_atom(name) || is_nil(name));
- if (is_atom(name)) {
- tuple = TUPLE2(hp, name, make_small(arity));
+ ASSERT(is_atom(ci->mfa.function) || is_nil(ci->mfa.function));
+ if (is_atom(ci->mfa.function)) {
+ tuple = TUPLE2(hp, ci->mfa.function, make_small(ci->mfa.arity));
hp += 3;
result = CONS(hp, tuple, result);
hp += 2;
@@ -5188,87 +5994,136 @@ functions_in_module(Process* p, /* Process whose heap to use. */
}
/*
+ * Returns 'true' if mod has any native compiled functions, otherwise 'false'
+ */
+
+static Eterm
+has_native(BeamCodeHeader *code_hdr)
+{
+ Eterm result = am_false;
+#ifdef HIPE
+ if (erts_is_module_native(code_hdr)) {
+ result = am_true;
+ }
+#endif
+ return result;
+}
+
+void
+erts_release_literal_area(ErtsLiteralArea* literal_area)
+{
+ struct erl_off_heap_header* oh;
+
+ if (!literal_area)
+ return;
+
+ oh = literal_area->off_heap;
+
+ while (oh) {
+ Binary* bptr;
+ ASSERT(thing_subtag(oh->thing_word) == REFC_BINARY_SUBTAG);
+ bptr = ((ProcBin*)oh)->val;
+ erts_bin_release(bptr);
+ oh = oh->next;
+ }
+ erts_free(ERTS_ALC_T_LITERAL, literal_area);
+}
+
+int
+erts_is_module_native(BeamCodeHeader* code_hdr)
+{
+ Uint i, num_functions;
+
+ /* Check NativeAdress of first real function in module */
+ if (code_hdr != NULL) {
+ num_functions = code_hdr->num_functions;
+ for (i=0; i<num_functions; i++) {
+ ErtsCodeInfo* ci = code_hdr->functions[i];
+ if (is_atom(ci->mfa.function)) {
+ return erts_is_function_native(ci);
+ }
+ else ASSERT(is_nil(ci->mfa.function)); /* ignore BIF stubs */
+ }
+ }
+ return 0;
+}
+
+int
+erts_is_function_native(ErtsCodeInfo *ci)
+{
+#ifdef HIPE
+ ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI));
+ return BeamIsOpCode(erts_codeinfo_to_code(ci)[0], op_hipe_trap_call) ||
+ BeamIsOpCode(erts_codeinfo_to_code(ci)[0], op_hipe_trap_call_closure);
+#else
+ return 0;
+#endif
+}
+
+/*
* Builds a list of all functions including native addresses.
* [{Name,Arity,NativeAddress},...]
- *
- * Returns a tagged term, or 0 on error.
*/
static Eterm
-native_addresses(Process* p, Eterm mod)
+native_addresses(Process* p, BeamCodeHeader* code_hdr)
{
- Module* modp;
- BeamInstr* code;
+ Eterm result = NIL;
+#ifdef HIPE
int i;
Eterm* hp;
Uint num_functions;
Uint need;
Eterm* hp_end;
- Eterm result = NIL;
- if (is_not_atom(mod)) {
- return THE_NON_VALUE;
- }
-
- modp = erts_get_module(mod, erts_active_code_ix());
- if (modp == NULL) {
- return THE_NON_VALUE;
- }
-
- code = modp->curr.code;
- num_functions = code[MI_NUM_FUNCTIONS];
+ num_functions = code_hdr->num_functions;
need = (6+BIG_UINT_HEAP_SIZE)*num_functions;
hp = HAlloc(p, need);
hp_end = hp + need;
for (i = num_functions-1; i >= 0 ; i--) {
- BeamInstr* func_info = (BeamInstr *) code[MI_FUNCTIONS+i];
- Eterm name = (Eterm) func_info[3];
- int arity = (int) func_info[4];
+ ErtsCodeInfo *ci = code_hdr->functions[i];
Eterm tuple;
- ASSERT(is_atom(name) || is_nil(name)); /* [] if BIF stub */
- if (func_info[1] != 0) {
- Eterm addr;
- ASSERT(is_atom(name));
- addr = erts_bld_uint(&hp, NULL, func_info[1]);
- tuple = erts_bld_tuple(&hp, NULL, 3, name, make_small(arity), addr);
+ ASSERT(is_atom(ci->mfa.function)
+ || is_nil(ci->mfa.function)); /* [] if BIF stub */
+ if (ci->u.ncallee != NULL) {
+ Eterm addr;
+ ASSERT(is_atom(ci->mfa.function));
+ addr = erts_bld_uint(&hp, NULL, (Uint)ci->u.ncallee);
+ tuple = erts_bld_tuple(&hp, NULL, 3, ci->mfa.function,
+ make_small(ci->mfa.arity), addr);
result = erts_bld_cons(&hp, NULL, tuple, result);
}
}
HRelease(p, hp_end, hp);
+#endif
return result;
}
/*
* Builds a list of all exported functions in the given module:
* [{Name, Arity},...]
- *
- * Returns a tagged term, or 0 on error.
*/
Eterm
exported_from_module(Process* p, /* Process whose heap to use. */
+ ErtsCodeIndex code_ix,
Eterm mod) /* Tagged atom for module. */
{
- int i;
+ int i, num_exps;
Eterm* hp = NULL;
Eterm* hend = NULL;
Eterm result = NIL;
- ErtsCodeIndex code_ix;
-
- if (is_not_atom(mod)) {
- return THE_NON_VALUE;
- }
- code_ix = erts_active_code_ix();
- for (i = 0; i < export_list_size(code_ix); i++) {
+ num_exps = export_list_size(code_ix);
+ for (i = 0; i < num_exps; i++) {
Export* ep = export_list(i,code_ix);
- if (ep->code[0] == mod) {
+ if (ep->info.mfa.module == mod) {
Eterm tuple;
- if (ep->addressv[code_ix] == ep->code+3 &&
- ep->code[3] == (BeamInstr) em_call_error_handler) {
+ if (ep->addressv[code_ix] == ep->beam &&
+ BeamIsOpCode(ep->beam[0], op_call_error_handler)) {
/* There is a call to the function, but it does not exist. */
continue;
}
@@ -5278,7 +6133,8 @@ exported_from_module(Process* p, /* Process whose heap to use. */
hp = HAlloc(p, need);
hend = hp + need;
}
- tuple = TUPLE2(hp, ep->code[1], make_small(ep->code[2]));
+ tuple = TUPLE2(hp, ep->info.mfa.function,
+ make_small(ep->info.mfa.arity));
hp += 3;
result = CONS(hp, tuple, result);
hp += 2;
@@ -5290,91 +6146,68 @@ exported_from_module(Process* p, /* Process whose heap to use. */
/*
* Returns a list of all attributes for the module.
- *
- * Returns a tagged term, or 0 on error.
*/
Eterm
attributes_for_module(Process* p, /* Process whose heap to use. */
- Eterm mod) /* Tagged atom for module. */
-
+ BeamCodeHeader* code_hdr)
{
- Module* modp;
- BeamInstr* code;
- Eterm* hp;
byte* ext;
Eterm result = NIL;
- Eterm* end;
- if (is_not_atom(mod) || (is_not_list(result) && is_not_nil(result))) {
- return THE_NON_VALUE;
- }
-
- modp = erts_get_module(mod, erts_active_code_ix());
- if (modp == NULL) {
- return THE_NON_VALUE;
- }
- code = modp->curr.code;
- ext = (byte *) code[MI_ATTR_PTR];
+ ext = code_hdr->attr_ptr;
if (ext != NULL) {
- hp = HAlloc(p, code[MI_ATTR_SIZE_ON_HEAP]);
- end = hp + code[MI_ATTR_SIZE_ON_HEAP];
- result = erts_decode_ext(&hp, &MSO(p), &ext);
+ ErtsHeapFactory factory;
+ erts_factory_proc_prealloc_init(&factory, p, code_hdr->attr_size_on_heap);
+ result = erts_decode_ext(&factory, &ext, 0);
if (is_value(result)) {
- ASSERT(hp <= end);
+ erts_factory_close(&factory);
}
- HRelease(p,end,hp);
}
return result;
}
/*
* Returns a list containing compilation information.
- *
- * Returns a tagged term, or 0 on error.
*/
Eterm
compilation_info_for_module(Process* p, /* Process whose heap to use. */
- Eterm mod) /* Tagged atom for module. */
+ BeamCodeHeader* code_hdr)
{
- Module* modp;
- BeamInstr* code;
- Eterm* hp;
byte* ext;
Eterm result = NIL;
- Eterm* end;
-
- if (is_not_atom(mod) || (is_not_list(result) && is_not_nil(result))) {
- return THE_NON_VALUE;
- }
- modp = erts_get_module(mod, erts_active_code_ix());
- if (modp == NULL) {
- return THE_NON_VALUE;
- }
- code = modp->curr.code;
- ext = (byte *) code[MI_COMPILE_PTR];
+ ext = code_hdr->compile_ptr;
if (ext != NULL) {
- hp = HAlloc(p, code[MI_COMPILE_SIZE_ON_HEAP]);
- end = hp + code[MI_COMPILE_SIZE_ON_HEAP];
- result = erts_decode_ext(&hp, &MSO(p), &ext);
+ ErtsHeapFactory factory;
+ erts_factory_proc_prealloc_init(&factory, p, code_hdr->compile_size_on_heap);
+ result = erts_decode_ext(&factory, &ext, 0);
if (is_value(result)) {
- ASSERT(hp <= end);
+ erts_factory_close(&factory);
}
- HRelease(p,end,hp);
}
return result;
}
/*
+ * Returns the MD5 checksum for a module
+ */
+
+Eterm
+md5_of_module(Process* p, /* Process whose heap to use. */
+ BeamCodeHeader* code_hdr)
+{
+ return new_binary(p, code_hdr->md5_ptr, MD5_SIZE);
+}
+
+/*
* Build a single {M,F,A,Loction} item to be part of
* a stack trace.
*/
Eterm*
erts_build_mfa_item(FunctionInfo* fi, Eterm* hp, Eterm args, Eterm* mfa_p)
{
- BeamInstr* current = fi->current;
Eterm loc = NIL;
if (fi->loc != LINE_INVALID_LOCATION) {
@@ -5384,7 +6217,7 @@ erts_build_mfa_item(FunctionInfo* fi, Eterm* hp, Eterm args, Eterm* mfa_p)
Eterm file_term = NIL;
if (file == 0) {
- Atom* ap = atom_tab(atom_val(fi->current[0]));
+ Atom* ap = atom_tab(atom_val(fi->mfa->module));
file_term = buf_to_intlist(&hp, ".erl", 4, NIL);
file_term = buf_to_intlist(&hp, (char*)ap->name, ap->len, file_term);
} else {
@@ -5403,10 +6236,12 @@ erts_build_mfa_item(FunctionInfo* fi, Eterm* hp, Eterm args, Eterm* mfa_p)
}
if (is_list(args) || is_nil(args)) {
- *mfa_p = TUPLE4(hp, current[0], current[1], args, loc);
+ *mfa_p = TUPLE4(hp, fi->mfa->module, fi->mfa->function,
+ args, loc);
} else {
- Eterm arity = make_small(current[2]);
- *mfa_p = TUPLE4(hp, current[0], current[1], arity, loc);
+ Eterm arity = make_small(fi->mfa->arity);
+ *mfa_p = TUPLE4(hp, fi->mfa->module, fi->mfa->function,
+ arity, loc);
}
return hp + 5;
}
@@ -5417,9 +6252,9 @@ erts_build_mfa_item(FunctionInfo* fi, Eterm* hp, Eterm args, Eterm* mfa_p)
* the function.
*/
void
-erts_set_current_function(FunctionInfo* fi, BeamInstr* current)
+erts_set_current_function(FunctionInfo* fi, ErtsCodeMFA* mfa)
{
- fi->current = current;
+ fi->mfa = mfa;
fi->needed = 5;
fi->loc = LINE_INVALID_LOCATION;
}
@@ -5428,13 +6263,13 @@ erts_set_current_function(FunctionInfo* fi, BeamInstr* current)
/*
* Returns a pointer to {module, function, arity}, or NULL if not found.
*/
-BeamInstr*
+ErtsCodeMFA*
find_function_from_pc(BeamInstr* pc)
{
FunctionInfo fi;
erts_lookup_function_info(&fi, pc, 0);
- return fi.current;
+ return fi.mfa;
}
/*
@@ -5489,7 +6324,7 @@ code_get_chunk_2(BIF_ALIST_2)
goto error;
}
if (!init_iff_file(stp, start, binary_size(Bin)) ||
- !scan_iff_file(stp, &chunk, 1, 1) ||
+ !scan_iff_file(stp, &chunk, 1) ||
stp->chunks[0].start == NULL) {
res = am_undefined;
goto done;
@@ -5538,12 +6373,12 @@ code_module_md5_1(BIF_ALIST_1)
}
stp->module = THE_NON_VALUE; /* Suppress diagnostiscs */
if (!init_iff_file(stp, bytes, binary_size(Bin)) ||
- !scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY) ||
+ !scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES) ||
!verify_chunks(stp)) {
res = am_undefined;
goto done;
}
- res = new_binary(p, stp->mod_md5, sizeof(stp->mod_md5));
+ res = new_binary(p, stp->mod_md5, MD5_SIZE);
done:
erts_free_aligned_binary_bytes(temp_alloc);
@@ -5551,31 +6386,28 @@ code_module_md5_1(BIF_ALIST_1)
return res;
}
-#define WORDS_PER_FUNCTION 6
+#ifdef HIPE
+#define WORDS_PER_FUNCTION (sizeof(ErtsCodeInfo) / sizeof(UWord) + 1)
static BeamInstr*
-make_stub(BeamInstr* fp, Eterm mod, Eterm func, Uint arity, Uint native, BeamInstr OpCode)
+make_stub(ErtsCodeInfo* info, Eterm mod, Eterm func, Uint arity, Uint native, BeamInstr OpCode)
{
- fp[0] = (BeamInstr) BeamOp(op_i_func_info_IaaI);
- fp[1] = native;
- fp[2] = mod;
- fp[3] = func;
- fp[4] = arity;
-#ifdef HIPE
- if (native) {
- fp[5] = BeamOpCode(op_move_return_nr);
- hipe_mfa_save_orig_beam_op(mod, func, arity, fp+5);
- }
-#endif
- fp[5] = OpCode;
- return fp + WORDS_PER_FUNCTION;
+ DBG_TRACE_MFA(mod,func,arity,"make beam stub at %p", erts_codeinfo_to_code(info));
+ ASSERT(WORDS_PER_FUNCTION == 6);
+ info->op = BeamOpCodeAddr(op_i_func_info_IaaI);
+ info->u.ncallee = (void (*)(void)) native;
+ info->mfa.module = mod;
+ info->mfa.function = func;
+ info->mfa.arity = arity;
+ erts_codeinfo_to_code(info)[0] = OpCode;
+ return erts_codeinfo_to_code(info)+1;
}
static byte*
stub_copy_info(LoaderState* stp,
int chunk, /* Chunk: ATTR_CHUNK or COMPILE_CHUNK */
byte* info, /* Where to store info. */
- BeamInstr* ptr_word, /* Where to store pointer into info. */
+ byte** ptr_word, /* Where to store pointer into info. */
BeamInstr* size_word, /* Where to store size into info. */
BeamInstr* size_on_heap_word) /* Where to store size on heap. */
{
@@ -5583,7 +6415,7 @@ stub_copy_info(LoaderState* stp,
Uint size = stp->chunks[chunk].size;
if (size != 0) {
memcpy(info, stp->chunks[chunk].start, size);
- *ptr_word = (BeamInstr) info;
+ *ptr_word = info;
decoded_size = erts_decode_ext_size(info, size);
if (decoded_size < 0) {
return 0;
@@ -5597,11 +6429,11 @@ stub_copy_info(LoaderState* stp,
static int
stub_read_export_table(LoaderState* stp)
{
- int i;
+ unsigned int i;
GetInt(stp, 4, stp->num_exps);
if (stp->num_exps > stp->num_functions) {
- LoadError2(stp, "%d functions exported; only %d functions defined",
+ LoadError2(stp, "%u functions exported; only %u functions defined",
stp->num_exps, stp->num_functions);
}
stp->export
@@ -5615,7 +6447,7 @@ stub_read_export_table(LoaderState* stp)
GetAtom(stp, n, stp->export[i].function);
GetInt(stp, 4, n);
if (n > MAX_REG) {
- LoadError2(stp, "export table entry %d: absurdly high arity %d", i, n);
+ LoadError2(stp, "export table entry %u: absurdly high arity %u", i, n);
}
stp->export[i].arity = n;
GetInt(stp, 4, n); /* Ignore label */
@@ -5627,22 +6459,17 @@ stub_read_export_table(LoaderState* stp)
}
static void
-stub_final_touch(LoaderState* stp, BeamInstr* fp)
+stub_final_touch(LoaderState* stp, ErtsCodeInfo* ci)
{
- int i;
- int n = stp->num_exps;
- Eterm mod = fp[2];
- Eterm function = fp[3];
- int arity = fp[4];
-#ifdef HIPE
+ unsigned int i;
+ unsigned int n = stp->num_exps;
Lambda* lp;
-#endif
- if (is_bif(mod, function, arity)) {
- fp[1] = 0;
- fp[2] = 0;
- fp[3] = 0;
- fp[4] = 0;
+ if (is_bif(ci->mfa.module, ci->mfa.function, ci->mfa.arity)) {
+ ci->u.ncallee = NULL;
+ ci->mfa.module = 0;
+ ci->mfa.function = 0;
+ ci->mfa.arity = 0;
return;
}
@@ -5651,9 +6478,14 @@ stub_final_touch(LoaderState* stp, BeamInstr* fp)
*/
for (i = 0; i < n; i++) {
- if (stp->export[i].function == function && stp->export[i].arity == arity) {
- Export* ep = erts_export_put(mod, function, arity);
- ep->addressv[erts_staging_code_ix()] = fp+5;
+ if (stp->export[i].function == ci->mfa.function &&
+ stp->export[i].arity == ci->mfa.arity) {
+ Export* ep = erts_export_put(ci->mfa.module,
+ ci->mfa.function,
+ ci->mfa.arity);
+ ep->addressv[erts_staging_code_ix()] = erts_codeinfo_to_code(ci);
+ DBG_TRACE_MFA_P(&ci->mfa,"set beam stub at %p in export at %p (code_ix=%d)",
+ erts_codeinfo_to_code(ci), ep, erts_staging_code_ix());
return;
}
}
@@ -5663,16 +6495,14 @@ stub_final_touch(LoaderState* stp, BeamInstr* fp)
* Search the lambda table to find out which.
*/
-#ifdef HIPE
n = stp->num_lambdas;
for (i = 0, lp = stp->lambdas; i < n; i++, lp++) {
ErlFunEntry* fe = stp->lambdas[i].fe;
- if (lp->function == function && lp->arity == arity) {
- fp[5] = (Eterm) BeamOpCode(op_hipe_trap_call_closure);
- fe->address = &(fp[5]);
+ if (lp->function == ci->mfa.function && lp->arity == ci->mfa.arity) {
+ *erts_codeinfo_to_code(ci) = BeamOpCodeAddr(op_hipe_trap_call_closure);
+ fe->address = erts_codeinfo_to_code(ci);
}
}
-#endif
return;
}
@@ -5681,10 +6511,9 @@ stub_final_touch(LoaderState* stp, BeamInstr* fp)
[{Adr, Patchtyppe} | Addresses]
and the address of a fun_entry.
*/
-int
+static int
patch(Eterm Addresses, Uint fe)
{
-#ifdef HIPE
Eterm* listp;
Eterm tuple;
Eterm* tp;
@@ -5720,15 +6549,13 @@ patch(Eterm Addresses, Uint fe)
}
-#endif
return 1;
}
-int
+static int
patch_funentries(Eterm Patchlist)
{
-#ifdef HIPE
while (!is_nil(Patchlist)) {
Eterm Info;
Eterm MFA;
@@ -5802,50 +6629,41 @@ patch_funentries(Eterm Patchlist)
fe = erts_get_fun_entry(Mod, uniq, index);
fe->native_address = (Uint *)native_address;
- /* Deliberate MEMORY LEAK of native fun entries!!!
- *
- * Uncomment line below when hipe code upgrade and purging works correctly.
- * Today we may get cases when old (leaked) native code of a purged module
- * gets called and tries to create instances of a deleted fun entry.
- *
- * Reproduced on a debug emulator with stdlib_test/qlc_SUITE:join_merge
- *
- * erts_refc_dec(&fe->refc, 1);
- */
+ erts_refc_dec(&fe->refc, 1);
if (!patch(Addresses, (Uint) fe))
return 0;
}
-#endif
return 1; /* Signal that all went well */
}
-
/*
* Do a dummy load of a module. No threaded code will be loaded.
* Used for loading native code.
* Will also patch all references to fun_entries to point to
* the new fun_entries created.
*/
-
Eterm
-erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
+erts_make_stub_module(Process* p, Eterm hipe_magic_bin, Eterm Beam, Eterm Info)
{
Binary* magic;
+ Binary* hipe_magic;
LoaderState* stp;
+ HipeLoaderState* hipe_stp;
+ HipeModule *hipe_code;
BeamInstr Funcs;
BeamInstr Patchlist;
+ Eterm MD5Bin;
Eterm* tp;
- BeamInstr* code = NULL;
- BeamInstr* ptrs;
+ BeamCodeHeader* code_hdr;
+ BeamInstr* code_base;
BeamInstr* fp;
byte* info;
- Uint ci;
- int n;
+ Sint n;
int code_size;
int rval;
- int i;
+ Sint i;
byte* temp_alloc = NULL;
byte* bytes;
Uint size;
@@ -5856,20 +6674,27 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
*/
magic = erts_alloc_loader_state();
stp = ERTS_MAGIC_BIN_DATA(magic);
+ hipe_code = erts_alloc(ERTS_ALC_T_HIPE_LL, sizeof(*hipe_code));
- if (is_not_atom(Mod)) {
+ if (!is_internal_magic_ref(hipe_magic_bin) ||
+ !(hipe_magic = erts_magic_ref2bin(hipe_magic_bin),
+ hipe_stp = hipe_get_loader_state(hipe_magic)) ||
+ hipe_stp->module == NIL || hipe_stp->text_segment == 0) {
goto error;
}
if (is_not_tuple(Info)) {
goto error;
}
tp = tuple_val(Info);
- if (tp[0] != make_arityval(2)) {
+ if (tp[0] != make_arityval(3)) {
goto error;
}
Funcs = tp[1];
- Patchlist = tp[2];
-
+ Patchlist = tp[2];
+ MD5Bin = tp[3];
+ if (is_not_binary(MD5Bin) || (binary_size(MD5Bin) != MD5_SIZE)) {
+ goto error;
+ }
if ((n = erts_list_length(Funcs)) < 0) {
goto error;
}
@@ -5882,13 +6707,13 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
* Scan the Beam binary and read the interesting sections.
*/
- stp->module = Mod;
+ stp->module = hipe_stp->module;
stp->group_leader = p->group_leader;
stp->num_functions = n;
if (!init_iff_file(stp, bytes, size)) {
goto error;
}
- if (!scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY) ||
+ if (!scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES) ||
!verify_chunks(stp)) {
goto error;
}
@@ -5896,9 +6721,16 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
if (!read_code_header(stp)) {
goto error;
}
- define_file(stp, "atom table", ATOM_CHUNK);
- if (!load_atom_table(stp)) {
- goto error;
+ if (stp->chunks[UTF8_ATOM_CHUNK].size > 0) {
+ define_file(stp, "utf8 atom table", UTF8_ATOM_CHUNK);
+ if (!load_atom_table(stp, ERTS_ATOM_ENC_UTF8)) {
+ goto error;
+ }
+ } else {
+ define_file(stp, "atom table", ATOM_CHUNK);
+ if (!load_atom_table(stp, ERTS_ATOM_ENC_LATIN1)) {
+ goto error;
+ }
}
define_file(stp, "export table", EXP_CHUNK);
if (!stub_read_export_table(stp)) {
@@ -5916,37 +6748,38 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
* Allocate memory for the stub module.
*/
- code_size = ((WORDS_PER_FUNCTION+1)*n + MI_FUNCTIONS + 2) * sizeof(BeamInstr);
- code_size += stp->chunks[ATTR_CHUNK].size;
- code_size += stp->chunks[COMPILE_CHUNK].size;
- code = erts_alloc_fnf(ERTS_ALC_T_CODE, code_size);
- if (!code) {
+ code_size = (offsetof(BeamCodeHeader,functions)
+ + ((n+1) * sizeof(BeamInstr*))
+ + (WORDS_PER_FUNCTION*n + 1) * sizeof(BeamInstr)
+ + stp->chunks[ATTR_CHUNK].size
+ + stp->chunks[COMPILE_CHUNK].size
+ + MD5_SIZE);
+ code_hdr = erts_alloc_fnf(ERTS_ALC_T_CODE, code_size);
+ if (!code_hdr) {
goto error;
}
/*
- * Initialize code area.
+ * Initialize code header.
*/
- code[MI_NUM_FUNCTIONS] = n;
- code[MI_ATTR_PTR] = 0;
- code[MI_ATTR_SIZE] = 0;
- code[MI_ATTR_SIZE_ON_HEAP] = 0;
- code[MI_COMPILE_PTR] = 0;
- code[MI_COMPILE_SIZE] = 0;
- code[MI_COMPILE_SIZE_ON_HEAP] = 0;
- code[MI_LITERALS_START] = 0;
- code[MI_LITERALS_END] = 0;
- code[MI_LITERALS_OFF_HEAP] = 0;
- code[MI_ON_LOAD_FUNCTION_PTR] = 0;
- ci = MI_FUNCTIONS + n + 1;
+ code_hdr->num_functions = n;
+ code_hdr->attr_ptr = NULL;
+ code_hdr->attr_size = 0;
+ code_hdr->attr_size_on_heap = 0;
+ code_hdr->compile_ptr = NULL;
+ code_hdr->compile_size = 0;
+ code_hdr->compile_size_on_heap = 0;
+ code_hdr->literal_area = NULL;
+ code_hdr->on_load_function_ptr = NULL;
+ code_hdr->line_table = NULL;
+ code_hdr->md5_ptr = NULL;
/*
* Make stubs for all functions.
*/
- ptrs = code + MI_FUNCTIONS;
- fp = code + ci;
+ fp = code_base = (BeamInstr*) &code_hdr->functions[n+1];
for (i = 0; i < n; i++) {
Eterm* listp;
Eterm tuple;
@@ -5989,21 +6822,18 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
* Set the pointer and make the stub. Put a return instruction
* as the body until we know what kind of trap we should put there.
*/
- ptrs[i] = (BeamInstr) fp;
-#ifdef HIPE
- op = (Eterm) BeamOpCode(op_hipe_trap_call); /* Might be changed later. */
-#else
- op = (Eterm) BeamOpCode(op_move_return_nr);
-#endif
- fp = make_stub(fp, Mod, func, arity, (Uint)native_address, op);
+ code_hdr->functions[i] = (ErtsCodeInfo*)fp;
+ op = BeamOpCodeAddr(op_hipe_trap_call); /* Might be changed later. */
+ fp = make_stub((ErtsCodeInfo*)fp, hipe_stp->module, func, arity,
+ (Uint)native_address, op);
}
/*
* Insert the last pointer and the int_code_end instruction.
*/
- ptrs[i] = (BeamInstr) fp;
- *fp++ = (BeamInstr) BeamOp(op_int_code_end);
+ code_hdr->functions[i] = (ErtsCodeInfo*)fp;
+ *fp++ = BeamOpCodeAddr(op_int_code_end);
/*
* Copy attributes and compilation information.
@@ -6011,25 +6841,44 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
info = (byte *) fp;
info = stub_copy_info(stp, ATTR_CHUNK, info,
- code+MI_ATTR_PTR,
- code+MI_ATTR_SIZE,
- code+MI_ATTR_SIZE_ON_HEAP);
+ &code_hdr->attr_ptr,
+ &code_hdr->attr_size,
+ &code_hdr->attr_size_on_heap);
if (info == NULL) {
goto error;
}
info = stub_copy_info(stp, COMPILE_CHUNK, info,
- code+MI_COMPILE_PTR,
- code+MI_COMPILE_SIZE,
- code+MI_COMPILE_SIZE_ON_HEAP);
+ &code_hdr->compile_ptr,
+ &code_hdr->compile_size,
+ &code_hdr->compile_size_on_heap);
if (info == NULL) {
goto error;
}
+ {
+ byte *tmp = NULL;
+ byte *md5 = NULL;
+ if ((md5 = erts_get_aligned_binary_bytes(MD5Bin, &tmp)) != NULL) {
+ sys_memcpy(info, md5, MD5_SIZE);
+ code_hdr->md5_ptr = info;
+ }
+ erts_free_aligned_binary_bytes(tmp);
+ }
+
+ /*
+ * Initialise HiPE module
+ */
+ hipe_code->text_segment = hipe_stp->text_segment;
+ hipe_code->text_segment_size = hipe_stp->text_segment_size;
+ hipe_code->data_segment = hipe_stp->data_segment;
+ hipe_code->first_hipe_ref = hipe_stp->new_hipe_refs;
+ hipe_code->first_hipe_sdesc = hipe_stp->new_hipe_sdesc;
/*
* Insert the module in the module table.
*/
- rval = insert_new_code(p, 0, p->group_leader, Mod, code, code_size);
+ rval = stub_insert_new_code(p, 0, p->group_leader, hipe_stp->module,
+ code_hdr, code_size, hipe_code);
if (rval != NIL) {
goto error;
}
@@ -6038,25 +6887,76 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
* Export all stub functions and insert the correct type of HiPE trap.
*/
- fp = code + ci;
+ fp = code_base;
for (i = 0; i < n; i++) {
- stub_final_touch(stp, fp);
+ stub_final_touch(stp, (ErtsCodeInfo*)fp);
fp += WORDS_PER_FUNCTION;
}
if (patch_funentries(Patchlist)) {
+ Eterm mod = hipe_stp->module;
+ /* Prevent code from being freed */
+ hipe_stp->text_segment = 0;
+ hipe_stp->data_segment = 0;
+ hipe_stp->new_hipe_refs = NULL;
+ hipe_stp->new_hipe_sdesc = NULL;
+
erts_free_aligned_binary_bytes(temp_alloc);
free_loader_state(magic);
- return Mod;
+ hipe_free_loader_state(hipe_stp);
+
+ return mod;
}
error:
+ erts_free(ERTS_ALC_T_HIPE_LL, hipe_code);
erts_free_aligned_binary_bytes(temp_alloc);
free_loader_state(magic);
BIF_ERROR(p, BADARG);
}
+int erts_commit_hipe_patch_load(Eterm hipe_magic_bin)
+{
+ Binary* hipe_magic;
+ HipeLoaderState* hipe_stp;
+ HipeModule *hipe_code;
+ Module* modp;
+
+ if (!is_internal_magic_ref(hipe_magic_bin) ||
+ !(hipe_magic = erts_magic_ref2bin(hipe_magic_bin),
+ hipe_stp = hipe_get_loader_state(hipe_magic)) ||
+ hipe_stp->module == NIL || hipe_stp->text_segment == 0) {
+ return 0;
+ }
+
+ modp = erts_get_module(hipe_stp->module, erts_active_code_ix());
+ if (!modp)
+ return 0;
+
+ /*
+ * Initialise HiPE module
+ */
+ hipe_code = erts_alloc(ERTS_ALC_T_HIPE_LL, sizeof(*hipe_code));
+ hipe_code->text_segment = hipe_stp->text_segment;
+ hipe_code->text_segment_size = hipe_stp->text_segment_size;
+ hipe_code->data_segment = hipe_stp->data_segment;
+ hipe_code->first_hipe_ref = hipe_stp->new_hipe_refs;
+ hipe_code->first_hipe_sdesc = hipe_stp->new_hipe_sdesc;
+
+ modp->curr.hipe_code = hipe_code;
+
+ /* Prevent code from being freed */
+ hipe_stp->text_segment = 0;
+ hipe_stp->data_segment = 0;
+ hipe_stp->new_hipe_refs = NULL;
+ hipe_stp->new_hipe_sdesc = NULL;
+
+ return 1;
+}
+
#undef WORDS_PER_FUNCTION
+#endif /* HIPE */
+
static int safe_mul(UWord a, UWord b, UWord* resp)
{
@@ -6070,3 +6970,46 @@ static int safe_mul(UWord a, UWord b, UWord* resp)
}
}
+#ifdef ENABLE_DBG_TRACE_MFA
+
+#define MFA_MAX 10
+Eterm dbg_trace_m[MFA_MAX];
+Eterm dbg_trace_f[MFA_MAX];
+Uint dbg_trace_a[MFA_MAX];
+unsigned int dbg_trace_ix = 0;
+
+void dbg_set_traced_mfa(const char* m, const char* f, Uint a)
+{
+ unsigned i = dbg_trace_ix++;
+ ASSERT(i < MFA_MAX);
+ dbg_trace_m[i] = am_atom_put(m, strlen(m));
+ dbg_trace_f[i] = am_atom_put(f, strlen(f));
+ dbg_trace_a[i] = a;
+}
+
+int dbg_is_traced_mfa(Eterm m, Eterm f, Uint a)
+{
+ unsigned int i;
+ for (i = 0; i < dbg_trace_ix; ++i) {
+ if (m == dbg_trace_m[i] &&
+ (!f || (f == dbg_trace_f[i] && a == dbg_trace_a[i]))) {
+
+ return i+1;
+ }
+ }
+ return 0;
+}
+
+void dbg_vtrace_mfa(unsigned ix, const char* format, ...)
+{
+ va_list arglist;
+ va_start(arglist, format);
+ ASSERT(--ix < MFA_MAX);
+ erts_fprintf(stderr, "MFA TRACE %T:%T/%u: ",
+ dbg_trace_m[ix], dbg_trace_f[ix], (int)dbg_trace_a[ix]);
+
+ erts_vfprintf(stderr, format, arglist);
+ va_end(arglist);
+}
+
+#endif /* ENABLE_DBG_TRACE_MFA */
diff --git a/erts/emulator/beam/beam_load.h b/erts/emulator/beam/beam_load.h
index bd22b0c4de..156c3c45e2 100644
--- a/erts/emulator/beam/beam_load.h
+++ b/erts/emulator/beam/beam_load.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -26,30 +27,17 @@
Eterm beam_make_current_old(Process *c_p, ErtsProcLocks c_p_locks,
Eterm module);
-
typedef struct gen_op_entry {
char* name;
int arity;
int specific;
int num_specific;
int transform;
- int min_window;
} GenOpEntry;
-extern GenOpEntry gen_opc[];
-
-#ifdef NO_JUMP_TABLE
-#define BeamOp(Op) (Op)
-#else
-extern void** beam_ops;
-#define BeamOp(Op) beam_ops[(Op)]
-#endif
-
+extern const GenOpEntry gen_opc[];
-extern BeamInstr beam_debug_apply[];
-extern BeamInstr* em_call_error_handler;
-extern BeamInstr* em_apply_bif;
-extern BeamInstr* em_call_nif;
+struct ErtsLiteralArea_;
/*
* The following variables keep a sorted list of address ranges for
@@ -59,68 +47,88 @@ extern BeamInstr* em_call_nif;
/* Total code size in bytes */
extern Uint erts_total_code_size;
-/*
- * Index into start of code chunks which contains additional information
- * about the loaded module.
- *
- * First number of functions.
- */
-
-#define MI_NUM_FUNCTIONS 0
-
-/*
- * The attributes retrieved by Mod:module_info(attributes).
- */
-
-#define MI_ATTR_PTR 1
-#define MI_ATTR_SIZE 2
-#define MI_ATTR_SIZE_ON_HEAP 3
-
-/*
- * The compilation information retrieved by Mod:module_info(compile).
- */
-
-#define MI_COMPILE_PTR 4
-#define MI_COMPILE_SIZE 5
-#define MI_COMPILE_SIZE_ON_HEAP 6
-
-/*
- * Literal area (constant pool).
- */
-#define MI_LITERALS_START 7
-#define MI_LITERALS_END 8
-#define MI_LITERALS_OFF_HEAP 9
+typedef struct BeamCodeLineTab_ BeamCodeLineTab;
/*
- * Pointer to the on_load function (or NULL if none).
- */
-#define MI_ON_LOAD_FUNCTION_PTR 10
-
-/*
- * Pointer to the line table (or NULL if none).
- */
-#define MI_LINE_TABLE 11
-
-/*
- * Start of function pointer table. This table contains pointers to
- * all functions in the module plus an additional pointer just beyond
- * the end of the last function.
- *
- * The actual loaded code (for the first function) start just beyond
- * this table.
+ * Header of code chunks which contains additional information
+ * about the loaded module.
*/
-
-#define MI_FUNCTIONS 12
+typedef struct beam_code_header {
+ /*
+ * Number of functions.
+ */
+ UWord num_functions;
+
+ /*
+ * The attributes retrieved by Mod:module_info(attributes).
+ */
+ byte* attr_ptr;
+ UWord attr_size;
+ UWord attr_size_on_heap;
+
+ /*
+ * The compilation information retrieved by Mod:module_info(compile).
+ */
+ byte* compile_ptr;
+ UWord compile_size;
+ UWord compile_size_on_heap;
+
+ /*
+ * Literal area (constant pool).
+ */
+ struct ErtsLiteralArea_ *literal_area;
+
+ /*
+ * Pointer to the on_load function (or NULL if none).
+ */
+ BeamInstr* on_load_function_ptr;
+
+ /*
+ * Pointer to the line table (or NULL if none).
+ */
+ BeamCodeLineTab* line_table;
+
+ /*
+ * Pointer to the module MD5 sum (16 bytes)
+ */
+ byte* md5_ptr;
+
+ /*
+ * Start of function pointer table. This table contains pointers to
+ * all functions in the module plus an additional pointer just beyond
+ * the end of the last function.
+ *
+ * The actual loaded code (for the first function) start just beyond
+ * this table.
+ */
+ ErtsCodeInfo* functions[1];
+
+}BeamCodeHeader;
+
+# define BEAM_NIF_MIN_FUNC_SZ 4
+
+void erts_release_literal_area(struct ErtsLiteralArea_* literal_area);
+int erts_is_module_native(BeamCodeHeader* code);
+int erts_is_function_native(ErtsCodeInfo*);
+void erts_beam_bif_load_init(void);
+struct erl_fun_entry;
+void erts_purge_state_add_fun(struct erl_fun_entry *fe);
+Export *erts_suspend_process_on_pending_purge_lambda(Process *c_p,
+ struct erl_fun_entry*);
/*
* Layout of the line table.
*/
-
-#define MI_LINE_FNAME_PTR 0
-#define MI_LINE_LOC_TAB 1
-#define MI_LINE_LOC_SIZE 2
-#define MI_LINE_FUNC_TAB 3
+struct BeamCodeLineTab_ {
+ Eterm* fname_ptr;
+ int loc_size;
+ union {
+ Uint16* p2;
+ Uint32* p4;
+ }loc_tab;
+ const BeamInstr** func_tab[1];
+};
#define LINE_INVALID_LOCATION (0)
@@ -134,4 +142,34 @@ extern Uint erts_total_code_size;
#define LOC_FILE(Loc) ((Loc) >> 24)
#define LOC_LINE(Loc) ((Loc) & ((1 << 24)-1))
+
+/*
+ * MFA event debug "tracing" usage:
+ *
+ * #define ENABLE_DBG_TRACE_MFA
+ * call dbg_set_traced_mfa("mymod","myfunc",arity)
+ * for the function(s) to trace, in some init function.
+ *
+ * Run and get stderr printouts when interesting things happen to your MFA.
+ */
+#ifdef ENABLE_DBG_TRACE_MFA
+
+void dbg_set_traced_mfa(const char* m, const char* f, Uint a);
+int dbg_is_traced_mfa(Eterm m, Eterm f, Uint a);
+void dbg_vtrace_mfa(unsigned ix, const char* format, ...);
+#define DBG_TRACE_MFA(M,F,A,FMT, ...) do {\
+ unsigned ix;\
+ if ((ix=dbg_is_traced_mfa(M,F,A))) \
+ dbg_vtrace_mfa(ix, FMT"\n", ##__VA_ARGS__);\
+ }while(0)
+
+#define DBG_TRACE_MFA_P(MFA, FMT, ...) \
+ DBG_TRACE_MFA((MFA)->module, (MFA)->function, (MFA)->arity, FMT, ##__VA_ARGS__)
+
+#else
+# define dbg_set_traced_mfa(M,F,A)
+# define DBG_TRACE_MFA(M,F,A,FMT, ...)
+# define DBG_TRACE_MFA_P(MFA,FMT, ...)
+#endif /* ENABLE_DBG_TRACE_MFA */
+
#endif /* _BEAM_LOAD_H */
diff --git a/erts/emulator/beam/beam_ranges.c b/erts/emulator/beam/beam_ranges.c
index 0f2d5d0c2a..01bda7f3c1 100644
--- a/erts/emulator/beam/beam_ranges.c
+++ b/erts/emulator/beam/beam_ranges.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2012. All Rights Reserved.
+ * Copyright Ericsson AB 2012-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -28,16 +29,25 @@
typedef struct {
BeamInstr* start; /* Pointer to start of module. */
- erts_smp_atomic_t end; /* (BeamInstr*) Points one word beyond last function in module. */
+ erts_atomic_t end; /* (BeamInstr*) Points one word beyond last function in module. */
} Range;
+/*
+ * Used for crash dumping of literals. The size of erts_dump_lit_areas is
+ * always twice the number of active ranges (to allow for literals in both
+ * current and old code).
+ */
+
+ErtsLiteralArea** erts_dump_lit_areas;
+Uint erts_dump_num_lit_areas;
+
/* Range 'end' needs to be atomic as we purge module
by setting end=start in active code_ix */
-#define RANGE_END(R) ((BeamInstr*)erts_smp_atomic_read_nob(&(R)->end))
+#define RANGE_END(R) ((BeamInstr*)erts_atomic_read_nob(&(R)->end))
static Range* find_range(BeamInstr* pc);
-static void lookup_loc(FunctionInfo* fi, BeamInstr* pc,
- BeamInstr* modp, int idx);
+static void lookup_loc(FunctionInfo* fi, const BeamInstr* pc,
+ BeamCodeHeader*, int idx);
/*
* The following variables keep a sorted list of address ranges for
@@ -48,10 +58,11 @@ struct ranges {
Range* modules; /* Sorted lists of module addresses. */
Sint n; /* Number of range entries. */
Sint allocated; /* Number of allocated entries. */
- erts_smp_atomic_t mid; /* Cached search start point */
+ erts_atomic_t mid; /* Cached search start point */
};
static struct ranges r[ERTS_NUM_CODE_IX];
-static erts_smp_atomic_t mem_used;
+static erts_atomic_t mem_used;
+static Range* write_ptr;
#ifdef HARD_DEBUG
static void check_consistency(struct ranges* p)
@@ -71,61 +82,110 @@ static void check_consistency(struct ranges* p)
# define CHECK(r)
#endif /* HARD_DEBUG */
+static int
+rangecompare(Range* a, Range* b)
+{
+ if (a->start < b->start) {
+ return -1;
+ } else if (a->start == b->start) {
+ return 0;
+ } else {
+ return 1;
+ }
+}
void
erts_init_ranges(void)
{
Sint i;
- erts_smp_atomic_init_nob(&mem_used, 0);
+ erts_atomic_init_nob(&mem_used, 0);
for (i = 0; i < ERTS_NUM_CODE_IX; i++) {
r[i].modules = 0;
r[i].n = 0;
r[i].allocated = 0;
- erts_smp_atomic_init_nob(&r[i].mid, 0);
+ erts_atomic_init_nob(&r[i].mid, 0);
}
+
+ erts_dump_num_lit_areas = 8;
+ erts_dump_lit_areas = (ErtsLiteralArea **)
+ erts_alloc(ERTS_ALC_T_CRASH_DUMP,
+ erts_dump_num_lit_areas * sizeof(ErtsLiteralArea*));
}
void
-erts_start_staging_ranges(void)
+erts_start_staging_ranges(int num_new)
{
+ ErtsCodeIndex src = erts_active_code_ix();
ErtsCodeIndex dst = erts_staging_code_ix();
+ Sint need;
if (r[dst].modules) {
- erts_smp_atomic_add_nob(&mem_used, -r[dst].allocated);
+ erts_atomic_add_nob(&mem_used, -r[dst].allocated);
erts_free(ERTS_ALC_T_MODULE_REFS, r[dst].modules);
- r[dst].modules = NULL;
}
+
+ need = r[dst].allocated = r[src].n + num_new;
+ erts_atomic_add_nob(&mem_used, need);
+ write_ptr = erts_alloc(ERTS_ALC_T_MODULE_REFS,
+ need * sizeof(Range));
+ r[dst].modules = write_ptr;
}
void
erts_end_staging_ranges(int commit)
{
- ErtsCodeIndex dst = erts_staging_code_ix();
-
- if (commit && r[dst].modules == NULL) {
+ if (commit) {
Sint i;
- Sint n;
-
- /* No modules added, just clone src and remove purged code. */
ErtsCodeIndex src = erts_active_code_ix();
+ ErtsCodeIndex dst = erts_staging_code_ix();
+ Range* mp;
+ Sint num_inserted;
- erts_smp_atomic_add_nob(&mem_used, r[src].n);
- r[dst].modules = erts_alloc(ERTS_ALC_T_MODULE_REFS,
- r[src].n * sizeof(Range));
- r[dst].allocated = r[src].n;
- n = 0;
+ mp = r[dst].modules;
+ num_inserted = write_ptr - mp;
for (i = 0; i < r[src].n; i++) {
Range* rp = r[src].modules+i;
if (rp->start < RANGE_END(rp)) {
/* Only insert a module that has not been purged. */
- r[dst].modules[n] = *rp;
- n++;
+ write_ptr->start = rp->start;
+ erts_atomic_init_nob(&write_ptr->end,
+ (erts_aint_t)(RANGE_END(rp)));
+ write_ptr++;
}
}
- r[dst].n = n;
- erts_smp_atomic_set_nob(&r[dst].mid,
- (erts_aint_t) (r[dst].modules + n / 2));
+
+ /*
+ * There are num_inserted new range entries (unsorted) at the
+ * beginning of the modules array, followed by the old entries
+ * (sorted). We must now sort the entire array.
+ */
+
+ r[dst].n = write_ptr - mp;
+ if (num_inserted > 1) {
+ qsort(mp, r[dst].n, sizeof(Range),
+ (int (*)(const void *, const void *)) rangecompare);
+ } else if (num_inserted == 1) {
+ /* Sift the new range into place. This is faster than qsort(). */
+ Range t = mp[0];
+ for (i = 0; i < r[dst].n-1 && t.start > mp[i+1].start; i++) {
+ mp[i] = mp[i+1];
+ }
+ mp[i] = t;
+ }
+ r[dst].modules = mp;
+ CHECK(&r[dst]);
+ erts_atomic_set_nob(&r[dst].mid,
+ (erts_aint_t) (r[dst].modules +
+ r[dst].n / 2));
+
+ if (r[dst].allocated * 2 > erts_dump_num_lit_areas) {
+ erts_dump_num_lit_areas *= 2;
+ erts_dump_lit_areas = (ErtsLiteralArea **)
+ erts_realloc(ERTS_ALC_T_CRASH_DUMP,
+ (void *) erts_dump_lit_areas,
+ erts_dump_num_lit_areas * sizeof(ErtsLiteralArea*));
+ }
}
}
@@ -134,95 +194,42 @@ erts_update_ranges(BeamInstr* code, Uint size)
{
ErtsCodeIndex dst = erts_staging_code_ix();
ErtsCodeIndex src = erts_active_code_ix();
- Sint i;
- Sint n;
- Sint need;
if (src == dst) {
ASSERT(!erts_initialized);
/*
- * During start-up of system, the indices are the same.
- * Handle this by faking a source area.
+ * During start-up of system, the indices are the same
+ * and erts_start_staging_ranges() has not been called.
*/
- src = (src+1) % ERTS_NUM_CODE_IX;
- if (r[src].modules) {
- erts_smp_atomic_add_nob(&mem_used, -r[src].allocated);
- erts_free(ERTS_ALC_T_MODULE_REFS, r[src].modules);
- }
- r[src] = r[dst];
- r[dst].modules = 0;
- }
-
- CHECK(&r[src]);
-
- ASSERT(r[dst].modules == NULL);
- need = r[dst].allocated = r[src].n + 1;
- erts_smp_atomic_add_nob(&mem_used, need);
- r[dst].modules = (Range *) erts_alloc(ERTS_ALC_T_MODULE_REFS,
- need * sizeof(Range));
- n = 0;
- for (i = 0; i < r[src].n; i++) {
- Range* rp = r[src].modules+i;
- if (code < rp->start) {
- r[dst].modules[n].start = code;
- erts_smp_atomic_init_nob(&r[dst].modules[n].end,
- (erts_aint_t)(((byte *)code) + size));
- ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < code);
- n++;
- break;
- }
- if (rp->start < RANGE_END(rp)) {
- /* Only insert a module that has not been purged. */
- r[dst].modules[n].start = rp->start;
- erts_smp_atomic_init_nob(&r[dst].modules[n].end,
- (erts_aint_t)(RANGE_END(rp)));
- ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < rp->start);
- n++;
+ if (r[dst].modules == NULL) {
+ Sint need = 128;
+ erts_atomic_add_nob(&mem_used, need);
+ r[dst].modules = erts_alloc(ERTS_ALC_T_MODULE_REFS,
+ need * sizeof(Range));
+ r[dst].allocated = need;
+ write_ptr = r[dst].modules;
}
}
- while (i < r[src].n) {
- Range* rp = r[src].modules+i;
- if (rp->start < RANGE_END(rp)) {
- /* Only insert a module that has not been purged. */
- r[dst].modules[n].start = rp->start;
- erts_smp_atomic_init_nob(&r[dst].modules[n].end,
- (erts_aint_t)(RANGE_END(rp)));
- ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < rp->start);
- n++;
- }
- i++;
- }
-
- if (n == 0 || code > r[dst].modules[n-1].start) {
- r[dst].modules[n].start = code;
- erts_smp_atomic_init_nob(&r[dst].modules[n].end,
- (erts_aint_t)(((byte *)code) + size));
- ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < code);
- n++;
- }
-
- ASSERT(n <= r[src].n+1);
- r[dst].n = n;
- erts_smp_atomic_set_nob(&r[dst].mid,
- (erts_aint_t) (r[dst].modules + n / 2));
-
- CHECK(&r[dst]);
- CHECK(&r[src]);
+ ASSERT(r[dst].modules);
+ write_ptr->start = code;
+ erts_atomic_init_nob(&(write_ptr->end),
+ (erts_aint_t)(((byte *)code) + size));
+ write_ptr++;
}
void
erts_remove_from_ranges(BeamInstr* code)
{
Range* rp = find_range(code);
- erts_smp_atomic_set_nob(&rp->end, (erts_aint_t)rp->start);
+ erts_atomic_set_nob(&rp->end, (erts_aint_t)rp->start);
}
UWord
erts_ranges_sz(void)
{
- return erts_smp_atomic_read_nob(&mem_used) * sizeof(Range);
+ return erts_atomic_read_nob(&mem_used) * sizeof(Range);
}
/*
@@ -236,32 +243,33 @@ erts_ranges_sz(void)
void
erts_lookup_function_info(FunctionInfo* fi, BeamInstr* pc, int full_info)
{
- BeamInstr** low;
- BeamInstr** high;
- BeamInstr** mid;
+ ErtsCodeInfo** low;
+ ErtsCodeInfo** high;
+ ErtsCodeInfo** mid;
Range* rp;
+ BeamCodeHeader* hdr;
- fi->current = NULL;
+ fi->mfa = NULL;
fi->needed = 5;
fi->loc = LINE_INVALID_LOCATION;
rp = find_range(pc);
if (rp == 0) {
return;
}
+ hdr = (BeamCodeHeader*) rp->start;
- low = (BeamInstr **) (rp->start + MI_FUNCTIONS);
- high = low + rp->start[MI_NUM_FUNCTIONS];
+ low = hdr->functions;
+ high = low + hdr->num_functions;
while (low < high) {
mid = low + (high-low) / 2;
- if (pc < mid[0]) {
+ if (pc < (BeamInstr*)(mid[0])) {
high = mid;
- } else if (pc < mid[1]) {
- fi->current = mid[0]+2;
+ } else if (pc < (BeamInstr*)(mid[1])) {
+ fi->mfa = &mid[0]->mfa;
if (full_info) {
- BeamInstr** fp = (BeamInstr **) (rp->start +
- MI_FUNCTIONS);
+ ErtsCodeInfo** fp = hdr->functions;
int idx = mid - fp;
- lookup_loc(fi, pc, rp->start, idx);
+ lookup_loc(fi, pc, hdr, idx);
}
return;
} else {
@@ -276,16 +284,16 @@ find_range(BeamInstr* pc)
ErtsCodeIndex active = erts_active_code_ix();
Range* low = r[active].modules;
Range* high = low + r[active].n;
- Range* mid = (Range *) erts_smp_atomic_read_nob(&r[active].mid);
+ Range* mid = (Range *) erts_atomic_read_nob(&r[active].mid);
CHECK(&r[active]);
while (low < high) {
if (pc < mid->start) {
high = mid;
- } else if (pc > RANGE_END(mid)) {
+ } else if (pc >= RANGE_END(mid)) {
low = mid + 1;
} else {
- erts_smp_atomic_set_nob(&r[active].mid, (erts_aint_t) mid);
+ erts_atomic_set_nob(&r[active].mid, (erts_aint_t) mid);
return mid;
}
mid = low + (high-low) / 2;
@@ -294,39 +302,34 @@ find_range(BeamInstr* pc)
}
static void
-lookup_loc(FunctionInfo* fi, BeamInstr* orig_pc, BeamInstr* modp, int idx)
+lookup_loc(FunctionInfo* fi, const BeamInstr* pc,
+ BeamCodeHeader* code_hdr, int idx)
{
- Eterm* line = (Eterm *) modp[MI_LINE_TABLE];
- Eterm* low;
- Eterm* high;
- Eterm* mid;
- Eterm pc;
+ BeamCodeLineTab* lt = code_hdr->line_table;
+ const BeamInstr** low;
+ const BeamInstr** high;
+ const BeamInstr** mid;
- if (line == 0) {
+ if (lt == NULL) {
return;
}
- pc = (Eterm) (BeamInstr) orig_pc;
- fi->fname_ptr = (Eterm *) (BeamInstr) line[MI_LINE_FNAME_PTR];
- low = (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB+idx];
- high = (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB+idx+1];
+ fi->fname_ptr = lt->fname_ptr;
+ low = lt->func_tab[idx];
+ high = lt->func_tab[idx+1];
while (high > low) {
mid = low + (high-low) / 2;
if (pc < mid[0]) {
high = mid;
} else if (pc < mid[1]) {
int file;
- int index = mid - (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB];
+ int index = mid - lt->func_tab[0];
- if (line[MI_LINE_LOC_SIZE] == 2) {
- Uint16* loc_table =
- (Uint16 *) (BeamInstr) line[MI_LINE_LOC_TAB];
- fi->loc = loc_table[index];
+ if (lt->loc_size == 2) {
+ fi->loc = lt->loc_tab.p2[index];
} else {
- Uint32* loc_table =
- (Uint32 *) (BeamInstr) line[MI_LINE_LOC_TAB];
- ASSERT(line[MI_LINE_LOC_SIZE] == 4);
- fi->loc = loc_table[index];
+ ASSERT(lt->loc_size == 4);
+ fi->loc = lt->loc_tab.p4[index];
}
if (fi->loc == LINE_INVALID_LOCATION) {
return;
@@ -335,7 +338,7 @@ lookup_loc(FunctionInfo* fi, BeamInstr* orig_pc, BeamInstr* modp, int idx)
file = LOC_FILE(fi->loc);
if (file == 0) {
/* Special case: Module name with ".erl" appended */
- Atom* mod_atom = atom_tab(atom_val(fi->current[0]));
+ Atom* mod_atom = atom_tab(atom_val(fi->mfa->module));
fi->needed += 2*(mod_atom->len+4);
} else {
Atom* ap = atom_tab(atom_val((fi->fname_ptr)[file-1]));
diff --git a/erts/emulator/beam/benchmark.c b/erts/emulator/beam/benchmark.c
deleted file mode 100644
index 8613131176..0000000000
--- a/erts/emulator/beam/benchmark.c
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2002-2012. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
-
-#include "sys.h"
-#include "global.h"
-#include "benchmark.h"
-
-#ifdef BM_COUNTERS
-unsigned long long processes_busy;
-unsigned long long processes_spawned;
-unsigned long long messages_sent;
-unsigned long long messages_copied;
-unsigned long long messages_ego;
-unsigned long long minor_gc;
-unsigned long long major_gc;
-#endif /* BM_COUNTERS */
-
-#ifdef BM_TIMERS
-
-#if (defined(__i386__) || defined(__x86_64__)) && USE_PERFCTR
-
-#include "libperfctr.h"
-struct vperfctr *system_clock;
-double cpu_khz;
-BM_NEW_TIMER(start);
-
-static double get_hrvtime(void)
-{
- unsigned long long ticks;
- double milli_seconds;
-
- ticks = vperfctr_read_tsc(system_clock);
- milli_seconds = (double)ticks / cpu_khz;
- return milli_seconds;
-}
-
-static void stop_hrvtime(void)
-{
- if(system_clock)
- {
- vperfctr_stop(system_clock);
- vperfctr_close(system_clock);
- system_clock = NULL;
- }
-}
-
-#else /* not perfctr, asuming Solaris */
-#include <time.h>
-BM_TIMER_T system_clock;
-#endif
-
-unsigned long local_pause_times[MAX_PAUSE_TIME];
-unsigned long pause_times[MAX_PAUSE_TIME];
-unsigned long pause_times_old[MAX_PAUSE_TIME];
-
-BM_TIMER_T mmu;
-BM_TIMER_T mmu_counter;
-
-BM_NEW_TIMER(timer);
-BM_NEW_TIMER(system);
-BM_NEW_TIMER(gc);
-BM_NEW_TIMER(minor_gc);
-BM_NEW_TIMER(major_gc);
-BM_NEW_TIMER(minor_global_gc);
-BM_NEW_TIMER(major_global_gc);
-BM_NEW_TIMER(send);
-BM_NEW_TIMER(copy);
-BM_NEW_TIMER(size);
-BM_NEW_TIMER(max_minor);
-BM_NEW_TIMER(max_major);
-BM_NEW_TIMER(max_global_minor);
-BM_NEW_TIMER(max_global_major);
-BM_NEW_TIMER(misc0);
-BM_NEW_TIMER(misc1);
-BM_NEW_TIMER(misc2);
-#endif /* BM_TIMERS */
-
-#ifdef BM_HEAP_SIZES
-unsigned long long max_used_heap;
-unsigned long long max_allocated_heap;
-unsigned long long max_used_global_heap;
-unsigned long long max_allocated_global_heap;
-#endif /* BM_HEAP_SIZES */
-
-#ifdef BM_MESSAGE_SIZES
-unsigned long long words_sent;
-unsigned long long words_copied;
-unsigned long long words_prealloc;
-unsigned long long message_sizes[1000];
-#endif /* BM_MESSAGE_SIZES */
-
-/*****
- * The following functions have to be defined, but they only have contents
- * if certain keywords are defined.
- */
-
-void init_benchmarking()
-{
-#ifdef BM_TIMERS
-#if (defined(__i386__) || defined(__x86_64__)) && USE_PERFCTR
- /* pass `--with-perfctr=/path/to/perfctr' when configuring */
- struct perfctr_info info;
- struct vperfctr_control control;
- int i;
-
- system_clock = vperfctr_open();
- if (system_clock != NULL)
- {
- if (vperfctr_info(system_clock,&info) >= 0)
- {
- cpu_khz = (double)info.cpu_khz;
- if (info.cpu_features & PERFCTR_FEATURE_RDTSC)
- {
- memset(&control,0,sizeof control);
- control.cpu_control.tsc_on = 1;
- }
- }
- if (vperfctr_control(system_clock,&control) < 0)
- {
- vperfctr_close(system_clock);
- system_clock = NULL;
- }
- }
-
- for (i = 0; i < 1000; i++)
- {
- BM_START_TIMER(system);
- BM_STOP_TIMER(system);
- }
-
- timer_time = system_time / 1000;
- start_time = 0;
-#else
- int i;
- for (i = 0; i < 1000; i++)
- {
- BM_START_TIMER(system);
- BM_STOP_TIMER(system);
- }
- timer_time = system_time / 1000;
-#endif
-
- for (i = 0; i < MAX_PAUSE_TIME; i++) {
- local_pause_times[i] = 0;
- pause_times[i] = 0;
- pause_times_old[i] = 0;
- }
-
- mmu = 0;
- mmu_counter = 0;
-
- BM_MMU_INIT();
-#endif /* BM_TIMERS */
-
-#ifdef BM_COUNTERS
- processes_busy = 0;
- processes_spawned = 0;
- messages_sent = 0;
- messages_copied = 0;
- messages_ego = 0;
- minor_gc = 0;
- major_gc = 0;
-#endif /* BM_COUNTERS */
-
-#ifdef BM_HEAP_SIZES
- max_used_heap = 0;
- max_allocated_heap = 0;
- max_used_global_heap = 0;
- max_allocated_global_heap = 0;
-#endif /* BM_HEAP_SIZES */
-
-#ifdef BM_MESSAGE_SIZES
- words_sent = 0;
- words_copied = 0;
- words_prealloc = 0;
- {
- int i;
- for (i = 0; i < 1000; i++)
- message_sizes[i] = 0;
- }
-#endif /* BM_MESSAGE_SIZES */
-}
-
-void save_statistics()
-{
-#ifdef BM_STATISTICS
- FILE *file = fopen(BM_STATISTICS_FILE,"a");
- long i = 0;
-
- if (file)
- {
- erts_fprintf(file,"-------------------------------------------------------------------------\n");
- erts_fprintf(file,"The counters are reset at system start and are sums over the entire node.\n");
- erts_fprintf(file,"You may reset them manually using the BIFs in the module hipe_bifs.\n");
- erts_fprintf(file,"All times are given in milliseconds.\n");
- erts_fprintf(file,"-------------------------------------------------------------------------\n");
-
- erts_fprintf(file,"Node: %T\n",erts_this_node->sysname);
-
-#ifdef BM_COUNTERS
- erts_fprintf(file,"Number of processes spawned: %lld\n",processes_spawned);
- erts_fprintf(file,"Number of local minor GCs: %lld\n",minor_gc);
- erts_fprintf(file,"Number of local major GCs: %lld\n",major_gc);
- erts_fprintf(file,"Number of messages sent: %lld\n",messages_sent);
- erts_fprintf(file,"Number of messages copied: %lld\n",messages_copied);
- erts_fprintf(file,"Number of messages sent to self: %lld\n",messages_ego);
-#endif /* BM_COUNTERS */
-
-#ifdef BM_MESSAGE_SIZES
- erts_fprintf(file,"Number of words sent: %lld\n",words_sent);
- erts_fprintf(file,"Number of words copied: %lld\n",words_copied);
- erts_fprintf(file,"Number of words preallocated: %lld\n",words_prealloc);
-#endif /* BM_MESSAGE_SIZES */
-
-#ifdef BM_HEAP_SIZES
- erts_fprintf(file,"Biggest local heap used (in words): %lld\n",max_used_heap);
- erts_fprintf(file,"Biggest local heap allocated (in words): %lld\n",max_allocated_heap);
- erts_fprintf(file,"Biggest global heap used (in words): %lld\n",max_used_global_heap);
- erts_fprintf(file,"Biggest global heap allocated (in words): %lld\n",max_allocated_global_heap);
-#endif /* BM_HEAP_SIZES */
-
-#ifdef BM_TIMERS
- erts_fprintf(file,"--- The total active system time is the sum of all times below ---\n");
- BM_TIME_PRINTER("Mutator time",system_time);
- BM_TIME_PRINTER("Time spent in send (excluding size & copy)",send_time);
- BM_TIME_PRINTER("Time spent in size",size_time);
- BM_TIME_PRINTER("Time spent in copy",copy_time);
- BM_TIME_PRINTER("Time spent in local minor GC",minor_gc_time);
- BM_TIME_PRINTER("Time spent in local major GC",major_gc_time);
- BM_TIME_PRINTER("Time spent in global minor GC",minor_global_gc_time);
- BM_TIME_PRINTER("Time spent in global major GC",major_global_gc_time);
- erts_fprintf(file,"---\n");
- BM_TIME_PRINTER("Maximum time spent in one separate local minor GC",max_minor_time);
- BM_TIME_PRINTER("Maximum time spent in one separate local major GC",max_major_time);
- BM_TIME_PRINTER("Maximum time spent in one separate global minor GC",max_global_minor_time);
- BM_TIME_PRINTER("Maximum time spent in one separate global major GC",max_global_major_time);
-#endif /* BM_TIMERS */
-
-#if 0
- /* Save a log file for import into excel */
-
- long long total_time, n;
- long left, right, mid;
-
-#ifdef BM_COUNTERS
- erts_fprintf(file,"Spawns\tLocalGC\tMAGC\tMessages\tMutator_t\tLocalGC_t\tMAGC_t\tLocMaxP\tLocMeanP\tLocGeoMP\tMAMaxP\tMAMeanP\tMAGeoMP\t\tCMAGC\tCMAGC_t\n");
- erts_fprintf(file,"%lld\t%lld\t%lld\t%lld\t",
- processes_spawned,
- minor_garbage_cols + major_garbage_cols,
- minor_global_garbage_cols + major_global_garbage_cols,
- messages_sent);
-#endif /* BM_COUNTERS */
-
-#ifdef BM_TIMERS
- erts_fprintf(file,"%lld\t%lld\t%lld\t",
- (long long)(system_time + send_time + size_time + copy_time),
- (long long)(minor_gc_time + major_gc_time),
- (long long)(minor_global_gc_time + major_global_gc_time));
-
- total_time = 0; n = 0;
- left = 0; right = 0; mid = 0;
- for (i = 0; i < MAX_PAUSE_TIME; i++) {
- total_time += local_pause_times[i] * i;
- n += local_pause_times[i];
- if (i > mid)
- right += local_pause_times[i];
- while(right > left) {
- left += local_pause_times[mid++];
- right -= local_pause_times[mid];
- }
- }
- erts_fprintf(file,"%lld\t%lld\t%ld\t",
- (long long)((max_minor_time > max_major_time ?
- max_minor_time :
- max_major_time)*1000),
- total_time / n,
- mid);
-
- total_time = 0; n = 0;
- left = 0; right = 0; mid = 0;
- for (i = 0; i < MAX_PAUSE_TIME; i++) {
- if (pause_times[i] > 0) {
- total_time += pause_times[i] * i;
- n += pause_times[i];
- if (i > mid)
- right += pause_times[i];
- while(right > left) {
- left += pause_times[mid++];
- right -= pause_times[mid];
- }
- }
- }
- erts_fprintf(file,"%lld\t%lld\t%ld\t",
- (long long)((max_global_minor_time > max_global_major_time ?
- max_global_minor_time :
- max_global_major_time)*1000),
- (n > 0 ? total_time / n : 0),
- mid);
-
- erts_fprintf(file,"\t%lld\t%lld\n",n,total_time);
-
- erts_fprintf(file,"\nMinor:\n");
- for (i = 0; i < MAX_PAUSE_TIME; i++) {
- if (i < 1000 || pause_times[i] > 0) {
- erts_fprintf(file,"%d\t%ld\n",i,pause_times[i]);
- }
- }
-
- fprintf(file,"Major:\n");
- for (i = 0; i < MAX_PAUSE_TIME; i++) {
- if (pause_times_old[i] > 0) {
- fprintf(file,"%d\t%ld\n",i,pause_times_old[i]);
- }
- }
-#endif /* BM_TIMERS */
-
-#ifdef BM_TIMERS
- total_time = 0; n = 0;
- left = 0; right = 0; mid = 0;
- fprintf(file,"\nLocal:\n");
- for (i = 0; i < MAX_PAUSE_TIME; i++) {
- if (local_pause_times[i] > 0) {
- erts_fprintf(file,"%d\t%ld\n",i,local_pause_times[i]);
- total_time += local_pause_times[i] * i;
- n += local_pause_times[i];
- if (i > mid)
- right += local_pause_times[i];
- while(right > left) {
- left += local_pause_times[mid++];
- right -= local_pause_times[mid];
- }
- }
- }
- erts_fprintf(file,"Mid: %ld Mean: %ld\n",(long)mid,
- (long)(n > 0 ? total_time / n : 0));
-#endif
-#endif /* 0 */
- fclose(file);
- }
- else
- fprintf(stderr,"Sorry... Can not write to %s!\n\r",BM_STATISTICS_FILE);
-#endif /* BM_STATISTICS */
-}
diff --git a/erts/emulator/beam/benchmark.h b/erts/emulator/beam/benchmark.h
deleted file mode 100644
index 766edaac42..0000000000
--- a/erts/emulator/beam/benchmark.h
+++ /dev/null
@@ -1,329 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2002-2012. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-#ifndef __BENCHMARK_H__
-#define __BENCHMARK_H__
-
-/* The define __BENCHMARK__ is the master switch to turn on and off
- * benchmarking. This will enable the benchmark-BIFs in hipe_bif1.c.
- * Documentation for the BIFs is in hipe_bif1.c, and that is where you
- * will find the information about how to accually get some data out
- * from these timers and counters.
- */
-/* #define __BENCHMARK__ */
-
-#ifdef __BENCHMARK__
-/*
- * The defines below enables different parts of the benchmaring.
- * Counters and timers that are disabled, always report zero in
- * the BIFs.
- */
-
-/* BM_TIMERS keeps track of the time spent in diferent parts of the
- * system. It only measures accual active time, not time spent in idle
- * mode. These timers requires hardware support. For Linux, use the
- * package perfctr from user.it.uu.se/~mikpe/linux/perfctr. If this
- * package is not specified when configuring the system
- * (--with-perfctr=PATH), the Solaris hrtime_t will be used.
- * To add new timers look below.
- */
-#define BM_TIMERS
-
-/* BM_COUNTERS count all kinds of events that occurs in the system.
- * Among other things it counts the number of messages, then number of
- * garbage collections, the number of processes spawned etc.
- * To add new counters look below.
- */
-#define BM_COUNTERS
-
-/* BM_MESSAGE_SIZES keeps a log of the size of all messages sent in
- * the system. This introduce an overhead in time for the shared heap
- * system since all message sizes have to be calculated at send.
- */
-/* #define BM_MESSAGE_SIZES */
-
-/* BM_HEAP_SIZES goes through all processes at garbage collection time
- * to sum their allocated and used heap sizes. In anything else than a
- * shared heap system, this will cost.
- */
-/* #define BM_HEAP_SIZES */
-
-/* BM_STATISTICS saves an entry in the file BM_STATISTICS_FILE. This
- * is done for each erlang node at exit time.
- */
-/* #define BM_STATISTICS */
-
-#endif /* __BENCHMARK__ */
-
-
-#ifdef BM_STATISTICS
-# define BM_STATISTICS_FILE "/tmp/erlang_statistics.joppe.log"
-#endif /* BM_STATISTICS */
-
-
-/************ There are no more settings below this line *************/
-
-/*
- * Maintenance and how to add new stuff is documented by the code
- * below ;-)
- */
-
-#ifdef BM_COUNTERS
-/*********************************************************************
- * To add new counters:
- *
- * Add the variable here AND in benchmark.c. Use the macro
- * BM_COUNT(var) in the code where you want to increase it.
- *
- */
-extern unsigned long long processes_busy;
-extern unsigned long long processes_spawned;
-extern unsigned long long messages_sent;
-extern unsigned long long messages_copied;
-extern unsigned long long messages_ego;
-extern unsigned long long minor_gc;
-extern unsigned long long major_gc;
-
-#define BM_COUNT(var) (var)++;
-
-#define BM_EGO_COUNT(send,rec) { \
- if ((send) == (rec)) \
- BM_COUNT(messages_ego); }
-
-#define BM_LAZY_COPY_START long long gcs = minor_global_gc + major_global_gc;
-#define BM_LAZY_COPY_STOP { gcs = (minor_global_gc + major_global_gc) - gcs; \
- if (gcs > gc_in_copy) gc_in_copy = gcs; }
-
-#else /* !BM_COUNTERS */
-# define BM_COUNT(var)
-# define BM_EGO_COUNT(send,rec)
-# define BM_LAZY_COPY_START
-# define BM_LAZY_COPY_STOP
-#endif /* BM_COUNTERS */
-
-
-#ifdef BM_TIMERS
-/*********************************************************************
- * To add new timers:
- *
- * Add the variable below using the form extern BM_TIMER_T blah_time.
- * Also add them in benchmark.c using the macro NEW_TIMER(blah). Use
- * the macro BM_SWAP_TIMER(from,blah) ... BM_SWAP_TIMER(blah,to) to
- * start and stop the new timer. Note, that you have to know what
- * timer is running at the place where you want to insert your new
- * timer to be able to stop and start (from,to) it.
- *
- * You can use the macros BM_STOP_TIMER(blah) and BM_START_TIMER(blah)
- * around code that should not be timed at all. As above, you have to
- * know what timer to start and stop. The system timer is running at
- * most places in the emulator. Only the garbage collector and the
- * message sending has its own timers at the moment.
- *
- * The timer_time used when stopping timers is the time it takes to
- * start and stop the timers, calculated in init_benchmarking(). If it
- * is not there, the time it takes to do this will accually be
- * substantial compared to some small times in the system we want to
- * meassure (send time in shared heap for instance).
- */
-
-#if (defined(__i386__) || defined(__x86_64__)) && USE_PERFCTR
-#include "libperfctr.h"
-
-#define BM_TIMER_T double
-
-extern struct vperfctr *system_clock;
-extern double cpu_khz;
-extern BM_TIMER_T start_time;
-
-#define BM_START_TIMER(t) start_time = \
- (BM_TIMER_T)vperfctr_read_tsc(system_clock) / \
- cpu_khz;
-
-#define BM_STOP_TIMER(t) do { \
- BM_TIMER_T tmp = ((BM_TIMER_T)vperfctr_read_tsc(system_clock) / cpu_khz); \
- tmp -= (start_time + timer_time); \
- t##_time += (tmp > 0 ? tmp : 0); \
-} while(0)
-
-#define BM_TIME_PRINTER(str,time) do { \
- int min,sec,milli,micro; \
- BM_TIMER_T tmp = (time) * 1000; \
- micro = (uint)(tmp - ((int)(tmp / 1000)) * 1000); \
- tmp /= 1000; \
- milli = (uint)(tmp - ((int)(tmp / 1000)) * 1000); \
- tmp /= 1000; \
- sec = (uint)(tmp - ((int)(tmp / 60)) * 60); \
- min = (uint)tmp / 60; \
- erts_fprintf(file,str": %d:%02d.%03d %03d\n",min,sec,milli,micro); \
-} while(0)
-
-#else /* !USE_PERFCTR (Assuming Solaris) */
-
-#define BM_TIMER_T hrtime_t
-#define BM_START_TIMER(t) system_clock = sys_gethrtime()
-#define BM_STOP_TIMER(t) do { \
- BM_TIMER_T tmp = (sys_gethrtime() - system_clock) - timer_time; \
- t##_time += (tmp > 0 ? tmp : 0); \
-} while(0)
-
-#define BM_TIME_PRINTER(str,time) do { \
- int min,sec,milli,micro; \
- BM_TIMER_T tmp; \
- tmp = (time) / 1000; \
- micro = tmp % 1000; \
- tmp /= 1000; \
- milli = tmp % 1000; \
- tmp /= 1000; \
- sec = tmp % 60; \
- min = tmp / 60; \
- erts_fprintf(file,str": %d:%02d.%03d %03d\n",min,sec,milli,micro); \
-} while(0)
-
-extern BM_TIMER_T system_clock;
-#endif /* USE_PERFCTR */
-
-extern BM_TIMER_T timer_time;
-extern BM_TIMER_T system_time;
-extern BM_TIMER_T gc_time;
-extern BM_TIMER_T minor_gc_time;
-extern BM_TIMER_T major_gc_time;
-extern BM_TIMER_T minor_global_gc_time;
-extern BM_TIMER_T major_global_gc_time;
-extern BM_TIMER_T send_time;
-extern BM_TIMER_T copy_time;
-extern BM_TIMER_T size_time;
-extern BM_TIMER_T max_minor_time;
-extern BM_TIMER_T max_major_time;
-extern BM_TIMER_T max_global_minor_time;
-extern BM_TIMER_T max_global_major_time;
-extern BM_TIMER_T misc0_time;
-extern BM_TIMER_T misc1_time;
-extern BM_TIMER_T misc2_time;
-
-#define MAX_PAUSE_TIME 500000
-extern unsigned long local_pause_times[MAX_PAUSE_TIME];
-extern unsigned long pause_times[MAX_PAUSE_TIME];
-extern unsigned long pause_times_old[MAX_PAUSE_TIME];
-
-#define MMU_INTERVAL 5 /* milli seconds */
-extern BM_TIMER_T mmu_counter;
-extern BM_TIMER_T mmu;
-
-#define BM_NEW_TIMER(t) BM_TIMER_T t##_time = 0;
-#define BM_RESET_TIMER(t) t##_time = 0;
-#define BM_SWAP_TIMER(t1,t2) do { BM_STOP_TIMER(t1); BM_START_TIMER(t2); } while(0)
-#define BM_MMU_INIT() do { \
- BM_TIMER_T gc = gc_time; \
- while (gc > 0) { \
- if (gc > MMU_INTERVAL) { \
- gc -= MMU_INTERVAL - mmu_counter; \
- erts_printf("%d\n",(int)((mmu / MMU_INTERVAL) * 100)); \
- mmu_counter = 0; mmu = 0; \
- } else { \
- mmu_counter += gc; \
- if (mmu_counter >= MMU_INTERVAL) { \
- mmu_counter -= MMU_INTERVAL; \
- erts_printf("%d\n",(int)((mmu / MMU_INTERVAL) * 100)); \
- mmu = 0; \
- } \
- gc = 0; \
- } \
- } \
- BM_RESET_TIMER(system); \
- BM_RESET_TIMER(send); \
- BM_RESET_TIMER(copy); \
- BM_RESET_TIMER(size); \
-} while(0)
-
-#define BM_MMU_READ() do { \
- BM_TIMER_T mut = system_time + send_time + copy_time + size_time; \
- while (mut > 0) { \
- if (mut > MMU_INTERVAL) { \
- BM_TIMER_T tmp = MMU_INTERVAL - mmu_counter; \
- mmu += tmp; mut -= tmp; \
- erts_printf("%d\n",(int)((mmu / MMU_INTERVAL) * 100)); \
- mmu_counter = 0; mmu = 0; \
- } else { \
- mmu_counter += mut; mmu += mut; \
- if (mmu_counter >= MMU_INTERVAL) { \
- mmu_counter -= MMU_INTERVAL; \
- mmu -= mmu_counter; \
- erts_printf("%d\n",(int)((mmu / MMU_INTERVAL) * 100)); \
- mmu = mmu_counter; \
- } \
- mut = 0; \
- } \
- } \
-} while(0)
-
-#else /* !BM_TIMERS */
-# define BM_NEW_TIMER(t)
-# define BM_START_TIMER(t)
-# define BM_STOP_TIMER(t)
-# define BM_RESET_TIMER(t)
-# define BM_SWAP_TIMER(t1,t2)
-# define BM_TIME_PRINTER(str,time)
-# define BM_MMU_INIT()
-# define BM_MMU_READ()
-#endif /* BM_TIMERS */
-
-#ifdef BM_HEAP_SIZES
-extern unsigned long long max_used_heap;
-extern unsigned long long max_allocated_heap;
-extern unsigned long long max_used_global_heap;
-extern unsigned long long max_allocated_global_heap;
-#endif /* BM_HEAP_SIZES */
-
-#ifdef BM_MESSAGE_SIZES
-extern unsigned long long words_sent;
-extern unsigned long long words_copied;
-extern unsigned long long words_prealloc;
-extern unsigned long long message_sizes[1000];
-
-#define BM_MESSAGE_COPIED(size) { \
- words_copied += size; \
- BM_COUNT(messages_copied); }
-
-#define BM_PREALLOC_DATA(size) { \
- words_prealloc += size; }
-
-#define BM_MESSAGE(mess,send,rec) { \
- Uint msize = size_object(mess); \
- words_sent += msize; \
- if (msize < 1000) \
- message_sizes[msize]++; \
- else \
- message_sizes[999]++; \
- BM_EGO_COUNT(send,rec); \
- BM_COUNT(messages_sent); }
-
-#else /* !BM_MESSAGE_SIZES */
-
-#define BM_MESSAGE_COPIED(size) BM_COUNT(messages_copied);
-#define BM_PREALLOC_DATA(size)
-#define BM_MESSAGE(mess,send,rec) { \
- BM_EGO_COUNT(send,rec); \
- BM_COUNT(messages_sent); }
-
-#endif /* BM_MESSAGE_SIZES */
-
-void init_benchmarking(void);
-void save_statistics(void);
-
-#endif /* _BENCHMARK_H_ */
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 06a1230ca0..4b11884f38 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -28,7 +29,9 @@
#include "global.h"
#include "erl_process.h"
#include "error.h"
+#define ERL_WANT_HIPE_BIF_WRAPPER__
#include "bif.h"
+#undef ERL_WANT_HIPE_BIF_WRAPPER__
#include "big.h"
#include "dist.h"
#include "erl_version.h"
@@ -40,15 +43,24 @@
#define ERTS_PTAB_WANT_BIF_IMPL__
#include "erl_ptab.h"
#include "erl_bits.h"
+#include "erl_bif_unique.h"
+#include "erl_map.h"
+#include "erl_msacc.h"
-static Export* flush_monitor_message_trap = NULL;
+Export *erts_await_result;
+static Export* flush_monitor_messages_trap = NULL;
static Export* set_cpu_topology_trap = NULL;
static Export* await_proc_exit_trap = NULL;
static Export* await_port_send_result_trap = NULL;
Export* erts_format_cpu_topology_trap = NULL;
+static Export dsend_continue_trap_export;
+Export *erts_convert_time_unit_trap = NULL;
+
+static Export *await_msacc_mod_trap = NULL;
+static erts_atomic32_t msacc;
static Export *await_sched_wall_time_mod_trap;
-static erts_smp_atomic32_t sched_wall_time;
+static erts_atomic32_t sched_wall_time;
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
@@ -62,7 +74,7 @@ BIF_RETTYPE spawn_3(BIF_ALIST_3)
ErlSpawnOpts so;
Eterm pid;
- so.flags = 0;
+ so.flags = erts_default_spo_flags;
pid = erl_create_process(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, &so);
if (is_non_value(pid)) {
BIF_ERROR(BIF_P, so.error_code);
@@ -86,14 +98,12 @@ static int insert_internal_link(Process* p, Eterm rpid)
ASSERT(is_internal_pid(rpid));
-#ifdef ERTS_SMP
if (IS_TRACED(p)
&& (ERTS_TRACE_FLAGS(p) & (F_TRACE_SOL|F_TRACE_SOL1))) {
rp_locks = ERTS_PROC_LOCKS_ALL;
}
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK);
-#endif
+ erts_proc_lock(p, ERTS_PROC_LOCK_LINK);
/* get a pointer to the process struct of the linked process */
rp = erts_pid2proc_opt(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK,
@@ -101,7 +111,7 @@ static int insert_internal_link(Process* p, Eterm rpid)
ERTS_P2P_FLG_ALLOW_OTHER_X);
if (!rp) {
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_LINK);
return 0;
}
@@ -109,15 +119,12 @@ static int insert_internal_link(Process* p, Eterm rpid)
erts_add_link(&ERTS_P_LINKS(p), LINK_PID, rp->common.id);
erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, p->common.id);
- ASSERT(is_nil(ERTS_TRACER_PROC(p))
- || is_internal_pid(ERTS_TRACER_PROC(p))
- || is_internal_port(ERTS_TRACER_PROC(p)));
+ ASSERT(IS_TRACER_VALID(ERTS_TRACER(p)));
if (IS_TRACED(p)) {
if (ERTS_TRACE_FLAGS(p) & (F_TRACE_SOL|F_TRACE_SOL1)) {
ERTS_TRACE_FLAGS(rp) |= (ERTS_TRACE_FLAGS(p) & TRACEE_FLAGS);
- ERTS_TRACER_PROC(rp) = ERTS_TRACER_PROC(p); /* maybe steal */
-
+ erts_tracer_replace(&rp->common, ERTS_TRACER(p));
if (ERTS_TRACE_FLAGS(p) & F_TRACE_SOL1) { /* maybe override */
ERTS_TRACE_FLAGS(rp) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
ERTS_TRACE_FLAGS(p) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
@@ -126,13 +133,14 @@ static int insert_internal_link(Process* p, Eterm rpid)
}
}
if (IS_TRACED_FL(rp, F_TRACE_PROCS))
- trace_proc(p, rp, am_getting_linked, p->common.id);
+ trace_proc(p, p == rp ? rp_locks : ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK,
+ rp, am_getting_linked, p->common.id);
if (p == rp)
- erts_smp_proc_unlock(p, rp_locks & ~ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(p, rp_locks & ~ERTS_PROC_LOCK_MAIN);
else {
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(rp, rp_locks);
}
return 1;
@@ -145,7 +153,7 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
DistEntry *dep;
if (IS_TRACED_FL(BIF_P, F_TRACE_PROCS)) {
- trace_proc(BIF_P, BIF_P, am_link, BIF_ARG_1);
+ trace_proc(BIF_P, ERTS_PROC_LOCK_MAIN, BIF_P, am_link, BIF_ARG_1);
}
/* check that the pid or port which is our argument is OK */
@@ -168,13 +176,13 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
goto res_no_proc;
}
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
if (erts_add_link(&ERTS_P_LINKS(BIF_P), LINK_PID, BIF_ARG_1) >= 0)
send_link_signal = 1;
/* else: already linked */
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
if (send_link_signal) {
Eterm ref;
@@ -186,7 +194,7 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
goto res_no_proc;
case ERTS_PORT_OP_SCHEDULED:
if (refp) {
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
BIF_TRAP3(await_port_send_result_trap, BIF_P, ref, am_true, am_true);
}
default:
@@ -202,11 +210,11 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
if (is_external_pid(BIF_ARG_1)) {
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
/* We may earn time by checking first that we're not linked already */
if (erts_lookup_link(ERTS_P_LINKS(BIF_P), BIF_ARG_1) != NULL) {
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
BIF_RET(am_true);
}
else {
@@ -215,7 +223,7 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
ErtsDSigData dsd;
dep = external_pid_dist_entry(BIF_ARG_1);
if (dep == erts_this_dist_entry) {
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
goto res_no_proc;
}
@@ -224,13 +232,13 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
case ERTS_DSIG_PREP_NOT_ALIVE:
/* Let the dlink trap handle it */
case ERTS_DSIG_PREP_NOT_CONNECTED:
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
BIF_TRAP1(dlink_trap, BIF_P, BIF_ARG_1);
case ERTS_DSIG_PREP_CONNECTED:
/* We are connected. Setup link and send link signal */
- erts_smp_de_links_lock(dep);
+ erts_de_links_lock(dep);
erts_add_link(&ERTS_P_LINKS(BIF_P), LINK_PID, BIF_ARG_1);
lnk = erts_add_or_lookup_link(&(dep->nlinks),
@@ -239,9 +247,9 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
ASSERT(lnk != NULL);
erts_add_link(&ERTS_LINK_ROOT(lnk), LINK_PID, BIF_ARG_1);
- erts_smp_de_links_unlock(dep);
- erts_smp_de_runlock(dep);
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+ erts_de_links_unlock(dep);
+ erts_de_runlock(dep);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
code = erts_dsig_send_link(&dsd, BIF_P->common.id, BIF_ARG_1);
if (code == ERTS_DSIG_SEND_YIELD)
@@ -257,11 +265,11 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
res_no_proc: {
- erts_aint32_t state = erts_smp_atomic32_read_nob(&BIF_P->state);
+ erts_aint32_t state = erts_atomic32_read_nob(&BIF_P->state);
if (state & ERTS_PSFLG_TRAP_EXIT) {
ErtsProcLocks locks = ERTS_PROC_LOCK_MAIN;
erts_deliver_exit_message(BIF_ARG_1, BIF_P, &locks, am_noproc, NIL);
- erts_smp_proc_unlock(BIF_P, ~ERTS_PROC_LOCK_MAIN & locks);
+ erts_proc_unlock(BIF_P, ~ERTS_PROC_LOCK_MAIN & locks);
BIF_RET(am_true);
}
else
@@ -269,77 +277,57 @@ res_no_proc: {
}
}
-#define ERTS_DEMONITOR_FALSE 2
-#define ERTS_DEMONITOR_TRUE 1
-#define ERTS_DEMONITOR_BADARG 0
-#define ERTS_DEMONITOR_YIELD_TRUE -1
-#define ERTS_DEMONITOR_INTERNAL_ERROR -2
-
-static int
+/* This function is allowed to return range of values handled by demonitor/1-2
+ * Namely: atoms true, false, yield, internal_error, badarg or THE_NON_VALUE
+ */
+static Eterm
remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to)
{
ErtsDSigData dsd;
ErtsMonitor *dmon;
ErtsMonitor *mon;
int code;
- int res;
-#ifndef ERTS_SMP
- int stale_mon = 0;
-#endif
+ Eterm res = am_false;
- ERTS_SMP_LC_ASSERT((ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK)
+ ERTS_LC_ASSERT((ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK)
== erts_proc_lc_my_proc_locks(c_p));
code = erts_dsig_prepare(&dsd, dep, c_p, ERTS_DSP_RLOCK, 0);
switch (code) {
case ERTS_DSIG_PREP_NOT_ALIVE:
case ERTS_DSIG_PREP_NOT_CONNECTED:
-#ifndef ERTS_SMP
- /* XXX Is this possible? Shouldn't this link
- previously have been removed if the node
- had previously been disconnected. */
- ASSERT(0);
- stale_mon = 1;
-#endif
/*
* In the smp case this is possible if the node goes
* down just before the call to demonitor.
*/
if (dep) {
- erts_smp_de_links_lock(dep);
+ erts_de_links_lock(dep);
dmon = erts_remove_monitor(&dep->monitors, ref);
- erts_smp_de_links_unlock(dep);
+ erts_de_links_unlock(dep);
if (dmon)
erts_destroy_monitor(dmon);
}
mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref);
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_LINK);
- res = ERTS_DEMONITOR_TRUE;
+ res = am_true;
break;
case ERTS_DSIG_PREP_CONNECTED:
- erts_smp_de_links_lock(dep);
+ erts_de_links_lock(dep);
mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref);
dmon = erts_remove_monitor(&dep->monitors, ref);
- erts_smp_de_links_unlock(dep);
- erts_smp_de_runlock(dep);
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_LINK);
+ erts_de_links_unlock(dep);
+ erts_de_runlock(dep);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_LINK);
if (!dmon) {
-#ifndef ERTS_SMP
- /* XXX How is this possible? Shouldn't this link
- previously have been removed when the distributed
- end was removed. */
- ASSERT(0);
- stale_mon = 1;
-#endif
/*
* This is possible when smp support is enabled.
* 'DOWN' message just arrived.
*/
- res = ERTS_DEMONITOR_TRUE;
+ res = am_true;
}
else {
/*
@@ -351,33 +339,18 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to)
c_p->common.id,
(mon->name != NIL
? mon->name
- : mon->pid),
+ : mon->u.pid),
ref,
0);
- res = (code == ERTS_DSIG_SEND_YIELD
- ? ERTS_DEMONITOR_YIELD_TRUE
- : ERTS_DEMONITOR_TRUE);
+ res = (code == ERTS_DSIG_SEND_YIELD ? am_yield : am_true);
erts_destroy_monitor(dmon);
-
}
break;
default:
ASSERT(! "Invalid dsig prepare result");
- return ERTS_DEMONITOR_INTERNAL_ERROR;
+ return am_internal_error;
}
-#ifndef ERTS_SMP
- if (stale_mon) {
- erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp, "Stale process monitor %T to ", ref);
- if (is_atom(to))
- erts_dsprintf(dsbufp, "{%T, %T}", to, dep->sysname);
- else
- erts_dsprintf(dsbufp, "%T", to);
- erts_dsprintf(dsbufp, " found\n");
- erts_send_error_to_logger(c_p->group_leader, dsbufp);
- }
-#endif
/*
* We aren't allowed to destroy 'mon' until now, since 'to'
@@ -387,118 +360,158 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to)
lookup and remove */
erts_destroy_monitor(mon);
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
return res;
}
-static int demonitor(Process *c_p, Eterm ref)
+static ERTS_INLINE void
+demonitor_local_process(Process *c_p, Eterm ref, Eterm to, Eterm *res)
+{
+ Process *rp = erts_pid2proc_opt(c_p,
+ ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK,
+ to,
+ ERTS_PROC_LOCK_LINK,
+ ERTS_P2P_FLG_ALLOW_OTHER_X);
+ ErtsMonitor *mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref);
+
+ if (!mon)
+ *res = am_false;
+ else
+ {
+ *res = am_true;
+ erts_destroy_monitor(mon);
+ }
+ if (rp) {
+ ErtsMonitor *rmon;
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref);
+ if (rp != c_p)
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ if (rmon != NULL)
+ erts_destroy_monitor(rmon);
+ }
+ else {
+ ERTS_ASSERT_IS_NOT_EXITING(c_p);
+ }
+}
+
+static ERTS_INLINE BIF_RETTYPE
+demonitor_local_port(Process *origin, Eterm ref, Eterm target)
{
- ErtsMonitor *mon = NULL; /* The monitor entry to delete */
- Process *rp; /* Local target process */
- Eterm to = NIL; /* Monitor link traget */
- DistEntry *dep = NULL; /* Target's distribution entry */
- int deref_de = 0;
- int res;
- int unlock_link = 1;
+ BIF_RETTYPE res = am_false;
+ Port *port = erts_port_lookup_raw(target);
+
+ if (!port) {
+ BIF_ERROR(origin, BADARG);
+ }
+ erts_proc_unlock(origin, ERTS_PROC_LOCK_LINK);
+
+ if (port) {
+ Eterm trap_ref;
+ switch (erts_port_demonitor(origin, ERTS_PORT_DEMONITOR_NORMAL,
+ port, ref, &trap_ref)) {
+ case ERTS_PORT_OP_DROPPED:
+ case ERTS_PORT_OP_BADARG:
+ break;
+ case ERTS_PORT_OP_SCHEDULED:
+ BIF_TRAP3(await_port_send_result_trap, origin, trap_ref,
+ am_busy_port, am_true);
+ /* the busy_port atom will never be returned, because it cannot be
+ * returned from erts_port_(de)monitor, but just in case if in future
+ * internal API changes - you may see this atom */
+ default:
+ break;
+ }
+ }
+ else {
+ ERTS_ASSERT_IS_NOT_EXITING(origin);
+ }
+ BIF_RET(res);
+}
+/* Can return atom true, false, yield, internal_error, badarg or
+ * THE_NON_VALUE if error occurred or trap has been set up
+ */
+static
+BIF_RETTYPE demonitor(Process *c_p, Eterm ref, Eterm *multip)
+{
+ ErtsMonitor *mon = NULL; /* The monitor entry to delete */
+ Eterm to = NIL; /* Monitor link traget */
+ DistEntry *dep = NULL; /* Target's distribution entry */
+ BIF_RETTYPE res = am_false;
+ int unlock_link = 1;
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_LINK);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_LINK);
if (is_not_internal_ref(ref)) {
- res = ERTS_DEMONITOR_BADARG;
+ res = am_badarg;
goto done; /* Cannot be this monitor's ref */
}
mon = erts_lookup_monitor(ERTS_P_MONITORS(c_p), ref);
if (!mon) {
- res = ERTS_DEMONITOR_FALSE;
goto done;
}
- if (mon->type != MON_ORIGIN) {
- res = ERTS_DEMONITOR_BADARG;
- goto done;
- }
- to = mon->pid;
-
- if (is_atom(to)) {
- /* Monitoring a name at node to */
- ASSERT(is_node_name_atom(to));
- dep = erts_sysname_to_connected_dist_entry(to);
- ASSERT(dep != erts_this_dist_entry);
- if (dep)
- deref_de = 1;
- } else {
- ASSERT(is_pid(to));
- dep = pid_dist_entry(to);
- }
- if (dep != erts_this_dist_entry) {
- res = remote_demonitor(c_p, dep, ref, to);
- /* remote_demonitor() unlocks link lock on c_p */
- unlock_link = 0;
- }
- else { /* Local monitor */
- if (deref_de) {
- deref_de = 0;
- erts_deref_dist_entry(dep);
- }
- dep = NULL;
- rp = erts_pid2proc_opt(c_p,
- ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK,
- to,
- ERTS_PROC_LOCK_LINK,
- ERTS_P2P_FLG_ALLOW_OTHER_X);
- mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref);
-#ifndef ERTS_SMP
- ASSERT(mon);
-#else
- if (!mon)
- res = ERTS_DEMONITOR_FALSE;
- else
-#endif
- {
- res = ERTS_DEMONITOR_TRUE;
- erts_destroy_monitor(mon);
+ switch (mon->type) {
+ case MON_TIME_OFFSET:
+ *multip = am_true;
+ erts_demonitor_time_offset(ref);
+ res = am_true;
+ break;
+ case MON_ORIGIN:
+ to = mon->u.pid;
+ *multip = am_false;
+ if (is_atom(to)) {
+ /* Monitoring a name at node to */
+ ASSERT(is_node_name_atom(to));
+ dep = erts_sysname_to_connected_dist_entry(to);
+ ASSERT(dep != erts_this_dist_entry);
+ } else if (is_port(to)) {
+ if (port_dist_entry(to) != erts_this_dist_entry) {
+ goto badarg;
+ }
+ res = demonitor_local_port(c_p, ref, to);
+ unlock_link = 0;
+ goto done;
+ } else {
+ ASSERT(is_pid(to));
+ dep = pid_dist_entry(to);
}
- if (rp) {
- ErtsMonitor *rmon;
- rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref);
- if (rp != c_p)
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- if (rmon != NULL)
- erts_destroy_monitor(rmon);
+ if (dep != erts_this_dist_entry) {
+ res = remote_demonitor(c_p, dep, ref, to);
+ /* remote_demonitor() unlocks link lock on c_p */
+ unlock_link = 0;
}
- else {
- ERTS_SMP_ASSERT_IS_NOT_EXITING(c_p);
+ else { /* Local monitor */
+ demonitor_local_process(c_p, ref, to, &res);
}
-
+ break;
+ default /* case */ :
+badarg:
+ res = am_badarg; /* will be converted to error by caller */
+ *multip = am_false;
+ break;
}
-
- done:
+done:
if (unlock_link)
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_LINK);
- if (deref_de) {
- ASSERT(dep);
- erts_deref_dist_entry(dep);
- }
-
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
- return res;
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
+ BIF_RET(res);
}
BIF_RETTYPE demonitor_1(BIF_ALIST_1)
{
- switch (demonitor(BIF_P, BIF_ARG_1)) {
- case ERTS_DEMONITOR_FALSE:
- case ERTS_DEMONITOR_TRUE:
- BIF_RET(am_true);
- case ERTS_DEMONITOR_YIELD_TRUE:
- ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
- case ERTS_DEMONITOR_BADARG:
- BIF_ERROR(BIF_P, BADARG);
- case ERTS_DEMONITOR_INTERNAL_ERROR:
+ Eterm multi;
+ switch (demonitor(BIF_P, BIF_ARG_1, &multi)) {
+ case am_false:
+ case am_true: BIF_RET(am_true);
+ case THE_NON_VALUE: BIF_RET(THE_NON_VALUE);
+ case am_yield: ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
+ case am_badarg: BIF_ERROR(BIF_P, BADARG);
+
+ case am_internal_error:
default:
ASSERT(! "demonitor(): internal error");
BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
@@ -507,10 +520,11 @@ BIF_RETTYPE demonitor_1(BIF_ALIST_1)
BIF_RETTYPE demonitor_2(BIF_ALIST_2)
{
- Eterm res = am_true;
- int info = 0;
- int flush = 0;
- Eterm list = BIF_ARG_2;
+ BIF_RETTYPE res = am_true;
+ Eterm multi = am_false;
+ int info = 0;
+ int flush = 0;
+ Eterm list = BIF_ARG_2;
while (is_list(list)) {
Eterm* consp = list_val(list);
@@ -530,20 +544,28 @@ BIF_RETTYPE demonitor_2(BIF_ALIST_2)
if (is_not_nil(list))
goto badarg;
- switch (demonitor(BIF_P, BIF_ARG_1)) {
- case ERTS_DEMONITOR_FALSE:
+ switch (demonitor(BIF_P, BIF_ARG_1, &multi)) {
+ case THE_NON_VALUE:
+ /* If other error occurred or trap has been set up - pass through */
+ BIF_RET(THE_NON_VALUE);
+ case am_false:
if (info)
res = am_false;
- if (flush)
- BIF_TRAP2(flush_monitor_message_trap, BIF_P, BIF_ARG_1, res);
- case ERTS_DEMONITOR_TRUE:
+ if (flush) {
+flush_messages:
+ BIF_TRAP3(flush_monitor_messages_trap, BIF_P,
+ BIF_ARG_1, multi, res);
+ }
+ case am_true:
+ if (multi == am_true && flush)
+ goto flush_messages;
BIF_RET(res);
- case ERTS_DEMONITOR_YIELD_TRUE:
+ case am_yield:
ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
- case ERTS_DEMONITOR_BADARG:
- badarg:
+ case am_badarg:
+badarg:
BIF_ERROR(BIF_P, BADARG);
- case ERTS_DEMONITOR_INTERNAL_ERROR:
+ case am_internal_error:
default:
ASSERT(! "demonitor(): internal error");
BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
@@ -564,7 +586,7 @@ erts_queue_monitor_message(Process *p,
Eterm reason_copy, ref_copy, item_copy;
Uint reason_size, ref_size, item_size, heap_size;
ErlOffHeap *ohp;
- ErlHeapFragment *bp;
+ ErtsMessage *msgp;
reason_size = IS_CONST(reason) ? 0 : size_object(reason);
item_size = IS_CONST(item) ? 0 : size_object(item);
@@ -572,11 +594,8 @@ erts_queue_monitor_message(Process *p,
heap_size = 6+reason_size+ref_size+item_size;
- hp = erts_alloc_message_heap(heap_size,
- &bp,
- &ohp,
- p,
- p_locksp);
+ msgp = erts_alloc_message_heap(p, p_locksp, heap_size,
+ &hp, &ohp);
reason_copy = (IS_CONST(reason)
? reason
@@ -587,86 +606,157 @@ erts_queue_monitor_message(Process *p,
ref_copy = copy_struct(ref, ref_size, &hp, ohp);
tup = TUPLE5(hp, am_DOWN, ref_copy, type, item_copy, reason_copy);
- erts_queue_message(p, p_locksp, bp, tup, NIL
-#ifdef USE_VM_PROBES
- , NIL
-#endif
- );
+ erts_queue_message(p, *p_locksp, msgp, tup, am_system);
}
-static BIF_RETTYPE
-local_pid_monitor(Process *p, Eterm target)
+static Eterm
+local_pid_monitor(Process *p, Eterm target, Eterm mon_ref, int boolean)
{
- BIF_RETTYPE ret;
- Eterm mon_ref;
- Process *rp;
+ Eterm ret = mon_ref;
+ Process *rp;
ErtsProcLocks p_locks = ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK;
- mon_ref = erts_make_ref(p);
- ERTS_BIF_PREP_RET(ret, mon_ref);
if (target == p->common.id) {
return ret;
}
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK);
+ erts_proc_lock(p, ERTS_PROC_LOCK_LINK);
rp = erts_pid2proc_opt(p, p_locks,
target, ERTS_PROC_LOCK_LINK,
ERTS_P2P_FLG_ALLOW_OTHER_X);
if (!rp) {
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_LINK);
p_locks &= ~ERTS_PROC_LOCK_LINK;
- erts_queue_monitor_message(p, &p_locks,
- mon_ref, am_process, target, am_noproc);
+ if (boolean)
+ ret = am_false;
+ else
+ erts_queue_monitor_message(p, &p_locks,
+ mon_ref, am_process, target, am_noproc);
}
else {
ASSERT(rp != p);
+ if (boolean)
+ ret = am_true;
+
erts_add_monitor(&ERTS_P_MONITORS(p), MON_ORIGIN, mon_ref, target, NIL);
erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, mon_ref, p->common.id, NIL);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
}
- erts_smp_proc_unlock(p, p_locks & ~ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(p, p_locks & ~ERTS_PROC_LOCK_MAIN);
return ret;
}
static BIF_RETTYPE
-local_name_monitor(Process *p, Eterm target_name)
+local_port_monitor(Process *origin, Eterm target)
{
- BIF_RETTYPE ret;
- Eterm mon_ref;
- ErtsProcLocks p_locks = ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK;
- Process *rp;
+ BIF_RETTYPE ref = erts_make_ref(origin);
+ Port *port = erts_sig_lookup_port(origin, target);
+ ErtsProcLocks p_locks = ERTS_PROC_LOCK_MAIN;
- mon_ref = erts_make_ref(p);
- ERTS_BIF_PREP_RET(ret, mon_ref);
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK);
- rp = erts_whereis_process(p, p_locks, target_name, ERTS_PROC_LOCK_LINK,
- ERTS_P2P_FLG_ALLOW_OTHER_X);
- if (!rp) {
- DeclareTmpHeap(lhp,3,p);
+ if (!port) {
+res_no_proc:
+ /* Send the DOWN message immediately. Ref is made on the fly because
+ * caller has never seen it yet. */
+ erts_queue_monitor_message(origin, &p_locks, ref,
+ am_port, target, am_noproc);
+ }
+ else {
+ switch (erts_port_monitor(origin, port, target, &ref)) {
+ case ERTS_PORT_OP_DROPPED:
+ case ERTS_PORT_OP_BADARG:
+ goto res_no_proc;
+ case ERTS_PORT_OP_SCHEDULED:
+ BIF_TRAP3(await_port_send_result_trap, origin, ref,
+ am_busy_port, ref);
+ /* the busy_port atom will never be returned, because it cannot be
+ * returned from erts_port_monitor, but just in case if in future
+ * internal API changes - you may see this atom */
+ default:
+ break;
+ }
+ }
+ erts_proc_unlock(origin, p_locks & ~ERTS_PROC_LOCK_MAIN);
+ BIF_RET(ref);
+}
+
+/* Type = process | port :: atom(), 1st argument passed to erlang:monitor/2
+ */
+static BIF_RETTYPE
+local_name_monitor(Process *self, Eterm type, Eterm target_name)
+{
+ BIF_RETTYPE ret = erts_make_ref(self);
+
+ ErtsProcLocks p_locks = ERTS_PROC_LOCK_MAIN | ERTS_PROC_LOCK_LINK;
+ Process *proc = NULL;
+ Port *port = NULL;
+
+ erts_proc_lock(self, ERTS_PROC_LOCK_LINK);
+
+ erts_whereis_name(self, p_locks, target_name,
+ &proc, ERTS_PROC_LOCK_LINK,
+ ERTS_P2P_FLG_ALLOW_OTHER_X,
+ &port, 0);
+
+ /* If the name is not registered,
+ * or if we asked for proc and got a port,
+ * or if we asked for port and got a proc,
+ * we just send the 'DOWN' message.
+ */
+ if ((!proc && !port) ||
+ (type == am_process && port) ||
+ (type == am_port && proc)) {
+ DeclareTmpHeap(lhp,3,self);
Eterm item;
- UseTmpHeap(3,p);
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+ UseTmpHeap(3,self);
+
+ erts_proc_unlock(self, ERTS_PROC_LOCK_LINK);
p_locks &= ~ERTS_PROC_LOCK_LINK;
+
item = TUPLE2(lhp, target_name, erts_this_dist_entry->sysname);
- erts_queue_monitor_message(p, &p_locks,
- mon_ref, am_process, item, am_noproc);
- UnUseTmpHeap(3,p);
+ erts_queue_monitor_message(self, &p_locks,
+ ret,
+ type, /* = process|port :: atom() */
+ item, am_noproc);
+ UnUseTmpHeap(3,self);
+ }
+ else if (port) {
+ erts_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN);
+ p_locks &= ~ERTS_PROC_LOCK_MAIN;
+
+ switch (erts_port_monitor(self, port, target_name, &ret)) {
+ case ERTS_PORT_OP_DONE:
+ return ret;
+ case ERTS_PORT_OP_SCHEDULED: { /* Scheduled a signal */
+ ASSERT(is_internal_ordinary_ref(ret));
+ BIF_TRAP3(await_port_send_result_trap, self,
+ ret, am_true, ret);
+ /* bif_trap returns */
+ } break;
+ default:
+ goto badarg;
+ }
}
- else if (rp != p) {
- erts_add_monitor(&ERTS_P_MONITORS(p), MON_ORIGIN, mon_ref, rp->common.id,
- target_name);
- erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, mon_ref, p->common.id,
- target_name);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ else if (proc != self) {
+ erts_add_monitor(&ERTS_P_MONITORS(self), MON_ORIGIN, ret,
+ proc->common.id, target_name);
+ erts_add_monitor(&ERTS_P_MONITORS(proc), MON_TARGET, ret,
+ self->common.id, target_name);
+ erts_proc_unlock(proc, ERTS_PROC_LOCK_LINK);
}
- erts_smp_proc_unlock(p, p_locks & ~ERTS_PROC_LOCK_MAIN);
-
- return ret;
+ if (p_locks) {
+ erts_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN);
+ }
+ BIF_RET(ret);
+badarg:
+ if (p_locks) {
+ erts_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN);
+ }
+ BIF_ERROR(self, BADARG);
}
static BIF_RETTYPE
@@ -677,20 +767,20 @@ remote_monitor(Process *p, Eterm bifarg1, Eterm bifarg2,
BIF_RETTYPE ret;
int code;
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK);
+ erts_proc_lock(p, ERTS_PROC_LOCK_LINK);
code = erts_dsig_prepare(&dsd, dep, p, ERTS_DSP_RLOCK, 0);
switch (code) {
case ERTS_DSIG_PREP_NOT_ALIVE:
/* Let the dmonitor_p trap handle it */
case ERTS_DSIG_PREP_NOT_CONNECTED:
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_LINK);
ERTS_BIF_PREP_TRAP2(ret, dmonitor_p_trap, p, bifarg1, bifarg2);
break;
case ERTS_DSIG_PREP_CONNECTED:
if (!(dep->flags & DFLAG_DIST_MONITOR)
|| (byname && !(dep->flags & DFLAG_DIST_MONITOR_NAME))) {
- erts_smp_de_runlock(dep);
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+ erts_de_runlock(dep);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_LINK);
ERTS_BIF_PREP_ERROR(ret, p, BADARG);
}
else {
@@ -709,16 +799,16 @@ remote_monitor(Process *p, Eterm bifarg1, Eterm bifarg2,
d_name = NIL;
}
- erts_smp_de_links_lock(dep);
+ erts_de_links_lock(dep);
erts_add_monitor(&ERTS_P_MONITORS(p), MON_ORIGIN, mon_ref, p_trgt,
p_name);
erts_add_monitor(&(dep->monitors), MON_TARGET, mon_ref, p->common.id,
d_name);
- erts_smp_de_links_unlock(dep);
- erts_smp_de_runlock(dep);
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+ erts_de_links_unlock(dep);
+ erts_de_runlock(dep);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_LINK);
code = erts_dsig_send_monitor(&dsd, p->common.id, target, mon_ref);
if (code == ERTS_DSIG_SEND_YIELD)
@@ -733,7 +823,7 @@ remote_monitor(Process *p, Eterm bifarg1, Eterm bifarg2,
break;
}
- return ret;
+ BIF_RET(ret);
}
BIF_RETTYPE monitor_2(BIF_ALIST_2)
@@ -741,59 +831,77 @@ BIF_RETTYPE monitor_2(BIF_ALIST_2)
Eterm target = BIF_ARG_2;
BIF_RETTYPE ret;
DistEntry *dep = NULL;
- int deref_de = 0;
/* Only process monitors are implemented */
- if (BIF_ARG_1 != am_process) {
- goto error;
+ switch (BIF_ARG_1) {
+ case am_time_offset: {
+ Eterm ref;
+ if (BIF_ARG_2 != am_clock_service) {
+ goto badarg;
+ }
+ ref = erts_make_ref(BIF_P);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
+ erts_add_monitor(&ERTS_P_MONITORS(BIF_P), MON_TIME_OFFSET,
+ ref, am_clock_service, NIL);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+ erts_monitor_time_offset(BIF_P->common.id, ref);
+ BIF_RET(ref);
+ }
+ case am_process:
+ case am_port:
+ break;
+ default:
+ goto badarg;
}
- if (is_internal_pid(target)) {
- local_pid:
- ret = local_pid_monitor(BIF_P, target);
- } else if (is_external_pid(target)) {
+ if (is_internal_pid(target) && BIF_ARG_1 == am_process) {
+local_pid:
+ ret = local_pid_monitor(BIF_P, target, erts_make_ref(BIF_P), 0);
+ } else if (is_external_pid(target) && BIF_ARG_1 == am_process) {
dep = external_pid_dist_entry(target);
if (dep == erts_this_dist_entry)
goto local_pid;
ret = remote_monitor(BIF_P, BIF_ARG_1, BIF_ARG_2, dep, target, 0);
+ } else if (is_internal_port(target) && BIF_ARG_1 == am_port) {
+local_port:
+ ret = local_port_monitor(BIF_P, target);
+ } else if (is_external_port(target) && BIF_ARG_1 == am_port) {
+ dep = external_port_dist_entry(target);
+ if (dep == erts_this_dist_entry) {
+ goto local_port;
+ }
+ goto badarg; /* No want remote port */
} else if (is_atom(target)) {
- ret = local_name_monitor(BIF_P, target);
+ ret = local_name_monitor(BIF_P, BIF_ARG_1, target);
} else if (is_tuple(target)) {
Eterm *tp = tuple_val(target);
Eterm remote_node;
Eterm name;
- if (arityval(*tp) != 2)
- goto error;
+ if (arityval(*tp) != 2) {
+ goto badarg;
+ }
remote_node = tp[2];
name = tp[1];
if (!is_atom(remote_node) || !is_atom(name)) {
- goto error;
+ goto badarg;
}
if (!erts_is_alive && remote_node != am_Noname) {
- goto error; /* Remote monitor from (this) undistributed node */
+ goto badarg; /* Remote monitor from (this) undistributed node */
}
dep = erts_sysname_to_connected_dist_entry(remote_node);
if (dep == erts_this_dist_entry) {
- deref_de = 1;
- ret = local_name_monitor(BIF_P, name);
+ ret = local_name_monitor(BIF_P, BIF_ARG_1, name);
} else {
- if (dep)
- deref_de = 1;
ret = remote_monitor(BIF_P, BIF_ARG_1, BIF_ARG_2, dep, name, 1);
}
} else {
- error:
+badarg:
ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
}
- if (deref_de) {
- deref_de = 0;
- erts_deref_dist_entry(dep);
- }
return ret;
}
-
/**********************************************************************/
/* this is a combination of the spawn and link BIFs */
@@ -802,7 +910,7 @@ BIF_RETTYPE spawn_link_3(BIF_ALIST_3)
ErlSpawnOpts so;
Eterm pid;
- so.flags = SPO_LINK;
+ so.flags = erts_default_spo_flags|SPO_LINK;
pid = erl_create_process(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, &so);
if (is_non_value(pid)) {
BIF_ERROR(BIF_P, so.error_code);
@@ -839,11 +947,13 @@ BIF_RETTYPE spawn_opt_1(BIF_ALIST_1)
/*
* Store default values for options.
*/
- so.flags = SPO_USE_ARGS;
+ so.flags = erts_default_spo_flags|SPO_USE_ARGS;
so.min_heap_size = H_MIN_SIZE;
so.min_vheap_size = BIN_VH_MIN_SIZE;
+ so.max_heap_size = H_MAX_SIZE;
+ so.max_heap_flags = H_MAX_FLAGS;
so.priority = PRIORITY_NORMAL;
- so.max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
+ so.max_gen_gcs = (Uint16) erts_atomic32_read_nob(&erts_max_gen_gcs);
so.scheduler = 0;
/*
@@ -874,6 +984,19 @@ BIF_RETTYPE spawn_opt_1(BIF_ALIST_1)
so.priority = PRIORITY_LOW;
else
goto error;
+ } else if (arg == am_message_queue_data) {
+ switch (val) {
+ case am_on_heap:
+ so.flags &= ~SPO_OFF_HEAP_MSGQ;
+ so.flags |= SPO_ON_HEAP_MSGQ;
+ break;
+ case am_off_heap:
+ so.flags &= ~SPO_ON_HEAP_MSGQ;
+ so.flags |= SPO_OFF_HEAP_MSGQ;
+ break;
+ default:
+ goto error;
+ }
} else if (arg == am_min_heap_size && is_small(val)) {
Sint min_heap_size = signed_val(val);
if (min_heap_size < 0) {
@@ -883,6 +1006,9 @@ BIF_RETTYPE spawn_opt_1(BIF_ALIST_1)
} else {
so.min_heap_size = erts_next_heap_size(min_heap_size, 0);
}
+ } else if (arg == am_max_heap_size) {
+ if (!erts_max_heap_size(val, &so.max_heap_size, &so.max_heap_flags))
+ goto error;
} else if (arg == am_min_bin_vheap_size && is_small(val)) {
Sint min_vheap_size = signed_val(val);
if (min_vheap_size < 0) {
@@ -916,6 +1042,10 @@ BIF_RETTYPE spawn_opt_1(BIF_ALIST_1)
goto error;
}
+ if (so.max_heap_size != 0 && so.max_heap_size < so.min_heap_size) {
+ goto error;
+ }
+
/*
* Spawn the process.
*/
@@ -945,6 +1075,7 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
Process *rp;
DistEntry *dep;
ErtsLink *l = NULL, *rl = NULL;
+ ErtsProcLocks cp_locks = ERTS_PROC_LOCK_MAIN;
/*
* SMP specific note concerning incoming exit signals:
@@ -959,19 +1090,17 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
*/
if (IS_TRACED_FL(BIF_P, F_TRACE_PROCS)) {
- trace_proc(BIF_P, BIF_P, am_unlink, BIF_ARG_1);
+ trace_proc(BIF_P, cp_locks, BIF_P, am_unlink, BIF_ARG_1);
}
if (is_internal_port(BIF_ARG_1)) {
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
-#ifdef ERTS_SMP
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
if (ERTS_PROC_PENDING_EXIT(BIF_P))
goto handle_pending_exit;
-#endif
l = erts_remove_link(&ERTS_P_LINKS(BIF_P), BIF_ARG_1);
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
if (l) {
Port *prt;
@@ -990,7 +1119,7 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
res = erts_port_unlink(BIF_P, prt, BIF_P->common.id, refp);
if (refp && res == ERTS_PORT_OP_SCHEDULED) {
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
BIF_TRAP3(await_port_send_result_trap, BIF_P, ref, am_true, am_true);
}
}
@@ -1013,14 +1142,12 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
/* Blind removal, we might have trapped or anything, this leaves
us in a state where monitors might be inconsistent, but the dist
code should take care of it. */
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
-#ifdef ERTS_SMP
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
if (ERTS_PROC_PENDING_EXIT(BIF_P))
goto handle_pending_exit;
-#endif
l = erts_remove_link(&ERTS_P_LINKS(BIF_P), BIF_ARG_1);
- erts_smp_proc_unlock(BIF_P,
+ erts_proc_unlock(BIF_P,
ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
if (l)
@@ -1062,22 +1189,20 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
/* Internal pid... */
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
+
+ cp_locks |= ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS;
/* get process struct */
- rp = erts_pid2proc_opt(BIF_P, (ERTS_PROC_LOCK_MAIN
- | ERTS_PROC_LOCK_LINK
- | ERTS_PROC_LOCK_STATUS),
+ rp = erts_pid2proc_opt(BIF_P, cp_locks,
BIF_ARG_1, ERTS_PROC_LOCK_LINK,
ERTS_P2P_FLG_ALLOW_OTHER_X);
-#ifdef ERTS_SMP
if (ERTS_PROC_PENDING_EXIT(BIF_P)) {
if (rp && rp != BIF_P)
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
goto handle_pending_exit;
}
-#endif
/* unlink and ignore errors */
l = erts_remove_link(&ERTS_P_LINKS(BIF_P), BIF_ARG_1);
@@ -1085,7 +1210,7 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
erts_destroy_link(l);
if (!rp) {
- ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P);
+ ERTS_ASSERT_IS_NOT_EXITING(BIF_P);
}
else {
rl = erts_remove_link(&ERTS_P_LINKS(rp), BIF_P->common.id);
@@ -1093,26 +1218,27 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
erts_destroy_link(rl);
if (IS_TRACED_FL(rp, F_TRACE_PROCS) && rl != NULL) {
- trace_proc(BIF_P, rp, am_getting_unlinked, BIF_P->common.id);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS);
+ cp_locks &= ~ERTS_PROC_LOCK_STATUS;
+ trace_proc(BIF_P, (ERTS_PROC_LOCK_MAIN | ERTS_PROC_LOCK_LINK),
+ rp, am_getting_unlinked, BIF_P->common.id);
}
if (rp != BIF_P)
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
}
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(BIF_P, cp_locks & ~ERTS_PROC_LOCK_MAIN);
BIF_RET(am_true);
-#ifdef ERTS_SMP
handle_pending_exit:
erts_handle_pending_exit(BIF_P, (ERTS_PROC_LOCK_MAIN
| ERTS_PROC_LOCK_LINK
| ERTS_PROC_LOCK_STATUS));
ASSERT(ERTS_PROC_IS_EXITING(BIF_P));
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
ERTS_BIF_EXITED(BIF_P);
-#endif
}
BIF_RETTYPE hibernate_3(BIF_ALIST_3)
@@ -1124,7 +1250,11 @@ BIF_RETTYPE hibernate_3(BIF_ALIST_3)
*/
Eterm reg[3];
- if (erts_hibernate(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, reg)) {
+ reg[0] = BIF_ARG_1;
+ reg[1] = BIF_ARG_2;
+ reg[2] = BIF_ARG_3;
+
+ if (erts_hibernate(BIF_P, reg)) {
/*
* If hibernate succeeded, TRAP. The process will be wait in a
* hibernated state if its state is inactive (!ERTS_PSFLG_ACTIVE);
@@ -1393,7 +1523,7 @@ BIF_RETTYPE exit_2(BIF_ALIST_2)
ERTS_BIF_CHK_EXITED(BIF_P);
if (refp && res == ERTS_PORT_OP_SCHEDULED) {
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
BIF_TRAP3(await_port_send_result_trap, BIF_P, ref, am_true, am_true);
}
@@ -1445,7 +1575,7 @@ BIF_RETTYPE exit_2(BIF_ALIST_2)
if (BIF_ARG_1 == BIF_P->common.id) {
rp_locks = ERTS_PROC_LOCKS_ALL;
rp = BIF_P;
- erts_smp_proc_lock(rp, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_lock(rp, ERTS_PROC_LOCKS_ALL_MINOR);
}
else {
rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
@@ -1467,12 +1597,10 @@ BIF_RETTYPE exit_2(BIF_ALIST_2)
NIL,
NULL,
BIF_P == rp ? ERTS_XSIG_FLG_NO_IGN_NORMAL : 0);
-#ifdef ERTS_SMP
if (rp == BIF_P)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
-#endif
+ erts_proc_unlock(rp, rp_locks);
/*
* We may have exited ourselves and may have to take action.
*/
@@ -1511,7 +1639,32 @@ static BIF_RETTYPE process_flag_aux(Process *BIF_P,
scb->n = 0;
}
- scb = ERTS_PROC_SET_SAVED_CALLS_BUF(rp, ERTS_PROC_LOCK_MAIN, scb);
+#ifdef HIPE
+ if (rp->flags & F_HIPE_MODE) {
+ ASSERT(!ERTS_PROC_GET_SAVED_CALLS_BUF(rp));
+ scb = ERTS_PROC_SET_SUSPENDED_SAVED_CALLS_BUF(rp, scb);
+ }
+ else
+#endif
+ {
+#ifdef HIPE
+ ASSERT(!ERTS_PROC_GET_SUSPENDED_SAVED_CALLS_BUF(rp));
+#endif
+ scb = ERTS_PROC_SET_SAVED_CALLS_BUF(rp, scb);
+ if (rp == BIF_P && ((scb && i == 0) || (!scb && i != 0))) {
+ /* Adjust fcalls to match save calls setting... */
+ if (i == 0)
+ BIF_P->fcalls += CONTEXT_REDS; /* disabled it */
+ else
+ BIF_P->fcalls -= CONTEXT_REDS; /* enabled it */
+
+ /*
+ * Make sure we reschedule immediately so the
+ * change take effect at once.
+ */
+ ERTS_VBUMP_ALL_REDS(BIF_P);
+ }
+ }
if (!scb)
old_value = make_small(0);
@@ -1520,12 +1673,7 @@ static BIF_RETTYPE process_flag_aux(Process *BIF_P,
erts_free(ERTS_ALC_T_CALLS_BUF, (void *) scb);
}
- /* Make sure the process in question is rescheduled
- immediately, if it's us, so the call saving takes effect. */
- if (rp == BIF_P)
- BIF_RET2(old_value, CONTEXT_REDS);
- else
- BIF_RET(old_value);
+ BIF_RET(old_value);
}
error:
@@ -1539,9 +1687,7 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
if (is_not_atom(BIF_ARG_2)) {
goto error;
}
- old_value = erts_proc_set_error_handler(BIF_P,
- ERTS_PROC_LOCK_MAIN,
- BIF_ARG_2);
+ old_value = erts_proc_set_error_handler(BIF_P, BIF_ARG_2);
BIF_RET(old_value);
}
else if (BIF_ARG_1 == am_priority) {
@@ -1566,18 +1712,19 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
* true. For more info, see implementation of
* erts_send_exit_signal().
*/
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCKS_XSIG_SEND);
if (trap_exit)
- state = erts_smp_atomic32_read_bor_mb(&BIF_P->state,
+ state = erts_atomic32_read_bor_mb(&BIF_P->state,
ERTS_PSFLG_TRAP_EXIT);
else
- state = erts_smp_atomic32_read_band_mb(&BIF_P->state,
+ state = erts_atomic32_read_band_mb(&BIF_P->state,
~ERTS_PSFLG_TRAP_EXIT);
-#ifdef ERTS_SMP
- if (ERTS_PROC_PENDING_EXIT(BIF_P)) {
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCKS_XSIG_SEND);
+
+ if (state & ERTS_PSFLG_PENDING_EXIT) {
erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN);
ERTS_BIF_EXITED(BIF_P);
}
-#endif
old_value = (state & ERTS_PSFLG_TRAP_EXIT) ? am_true : am_false;
BIF_RET(old_value);
@@ -1595,19 +1742,17 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
if (sched == 0) {
new = NULL;
- state = erts_smp_atomic32_read_band_mb(&BIF_P->state,
+ state = erts_atomic32_read_band_mb(&BIF_P->state,
~ERTS_PSFLG_BOUND);
}
else {
new = erts_schedid2runq(sched);
-#ifdef ERTS_SMP
erts_atomic_set_nob(&BIF_P->run_queue, (erts_aint_t) new);
-#endif
- state = erts_smp_atomic32_read_bor_mb(&BIF_P->state,
+ state = erts_atomic32_read_bor_mb(&BIF_P->state,
ERTS_PSFLG_BOUND);
}
- curr = ERTS_GET_SCHEDULER_DATA_FROM_PROC(BIF_P)->run_queue;
+ curr = erts_proc_sched_data(BIF_P)->run_queue;
old = (ERTS_PSFLG_BOUND & state) ? curr : NULL;
ASSERT(!old || old == curr);
@@ -1652,6 +1797,29 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
}
BIF_RET(old_value);
}
+ else if (BIF_ARG_1 == am_max_heap_size) {
+ Eterm *hp;
+ Uint sz = 0, max_heap_size, max_heap_flags;
+
+ if (!erts_max_heap_size(BIF_ARG_2, &max_heap_size, &max_heap_flags))
+ goto error;
+
+ if ((max_heap_size < MIN_HEAP_SIZE(BIF_P) && max_heap_size != 0))
+ goto error;
+
+ erts_max_heap_size_map(MAX_HEAP_SIZE_GET(BIF_P), MAX_HEAP_SIZE_FLAGS_GET(BIF_P), NULL, &sz);
+ hp = HAlloc(BIF_P, sz);
+ old_value = erts_max_heap_size_map(MAX_HEAP_SIZE_GET(BIF_P), MAX_HEAP_SIZE_FLAGS_GET(BIF_P), &hp, NULL);
+ MAX_HEAP_SIZE_SET(BIF_P, max_heap_size);
+ MAX_HEAP_SIZE_FLAGS_SET(BIF_P, max_heap_flags);
+ BIF_RET(old_value);
+ }
+ else if (BIF_ARG_1 == am_message_queue_data) {
+ old_value = erts_change_message_queue_management(BIF_P, BIF_ARG_2);
+ if (is_non_value(old_value))
+ goto error;
+ BIF_RET(old_value);
+ }
else if (BIF_ARG_1 == am_sensitive) {
Uint is_sensitive;
if (BIF_ARG_2 == am_true) {
@@ -1661,7 +1829,7 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
} else {
goto error;
}
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR);
old_value = (ERTS_TRACE_FLAGS(BIF_P) & F_SENSITIVE
? am_true
: am_false);
@@ -1670,8 +1838,10 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
} else {
ERTS_TRACE_FLAGS(BIF_P) &= ~F_SENSITIVE;
}
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR);
- BIF_RET(old_value);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR);
+ /* make sure to bump all reds so that we get
+ rescheduled immediately so setting takes effect */
+ BIF_RET2(old_value, CONTEXT_REDS);
}
else if (BIF_ARG_1 == am_monitor_nodes) {
/*
@@ -1710,15 +1880,19 @@ BIF_RETTYPE process_flag_3(BIF_ALIST_3)
Process *rp;
Eterm res;
- if ((rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
- BIF_ARG_1, ERTS_PROC_LOCK_MAIN)) == NULL) {
+ rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN,
+ BIF_ARG_1, ERTS_PROC_LOCK_MAIN);
+ if (rp == ERTS_PROC_LOCK_BUSY)
+ ERTS_BIF_YIELD3(bif_export[BIF_process_flag_3], BIF_P,
+ BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+
+ if (!rp)
BIF_ERROR(BIF_P, BADARG);
- }
res = process_flag_aux(BIF_P, rp, BIF_ARG_2, BIF_ARG_3);
if (rp != BIF_P)
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
return res;
}
@@ -1777,6 +1951,8 @@ BIF_RETTYPE whereis_1(BIF_ALIST_1)
* erlang:'!'/2
*/
+HIPE_WRAPPER_BIF_DISABLE_GC(ebif_bang, 2)
+
BIF_RETTYPE
ebif_bang_2(BIF_ALIST_2)
{
@@ -1795,34 +1971,35 @@ ebif_bang_2(BIF_ALIST_2)
#define SEND_USER_ERROR (-5)
#define SEND_INTERNAL_ERROR (-6)
#define SEND_AWAIT_RESULT (-7)
+#define SEND_YIELD_CONTINUE (-8)
-Sint do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp);
static Sint remote_send(Process *p, DistEntry *dep,
- Eterm to, Eterm full_to, Eterm msg, int suspend)
+ Eterm to, Eterm full_to, Eterm msg,
+ ErtsSendContext* ctx)
{
Sint res;
int code;
- ErtsDSigData dsd;
ASSERT(is_atom(to) || is_external_pid(to));
- code = erts_dsig_prepare(&dsd, dep, p, ERTS_DSP_NO_LOCK, !suspend);
+ ctx->dep = dep;
+ code = erts_dsig_prepare(&ctx->dsd, dep, p, ERTS_DSP_NO_LOCK, !ctx->suspend);
switch (code) {
case ERTS_DSIG_PREP_NOT_ALIVE:
case ERTS_DSIG_PREP_NOT_CONNECTED:
res = SEND_TRAP;
break;
case ERTS_DSIG_PREP_WOULD_SUSPEND:
- ASSERT(!suspend);
+ ASSERT(!ctx->suspend);
res = SEND_YIELD;
break;
case ERTS_DSIG_PREP_CONNECTED: {
if (is_atom(to))
- code = erts_dsig_send_reg_msg(&dsd, to, msg);
+ code = erts_dsig_send_reg_msg(to, msg, ctx);
else
- code = erts_dsig_send_msg(&dsd, to, msg);
+ code = erts_dsig_send_msg(to, msg, ctx);
/*
* Note that reductions have been bumped on calling
* process by erts_dsig_send_reg_msg() or
@@ -1830,6 +2007,8 @@ static Sint remote_send(Process *p, DistEntry *dep,
*/
if (code == ERTS_DSIG_SEND_YIELD)
res = SEND_YIELD_RETURN;
+ else if (code == ERTS_DSIG_SEND_CONTINUE)
+ res = SEND_YIELD_CONTINUE;
else
res = 0;
break;
@@ -1840,7 +2019,7 @@ static Sint remote_send(Process *p, DistEntry *dep,
}
if (res >= 0) {
- if (IS_TRACED(p))
+ if (IS_TRACED_FL(p, F_TRACE_SEND))
trace_send(p, full_to, msg);
if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
save_calls(p, &exp_send);
@@ -1849,8 +2028,9 @@ static Sint remote_send(Process *p, DistEntry *dep,
return res;
}
-Sint
-do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
+static Sint
+do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext *ctx)
+{
Eterm portid;
Port *pt;
Process* rp;
@@ -1858,7 +2038,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
Eterm* tp;
if (is_internal_pid(to)) {
- if (IS_TRACED(p))
+ if (IS_TRACED_FL(p, F_TRACE_SEND))
trace_send(p, to, msg);
if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
save_calls(p, &exp_send);
@@ -1881,13 +2061,18 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
erts_send_error_to_logger(p->group_leader, dsbufp);
return 0;
}
- return remote_send(p, dep, to, to, msg, suspend);
+ return remote_send(p, dep, to, to, msg, ctx);
} else if (is_atom(to)) {
Eterm id = erts_whereis_name_to_id(p, to);
- rp = erts_proc_lookup(id);
- if (rp)
+ rp = erts_proc_lookup_raw(id);
+ if (rp) {
+ if (IS_TRACED_FL(p, F_TRACE_SEND))
+ trace_send(p, to, msg);
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
+ save_calls(p, &exp_send);
goto send_message;
+ }
pt = erts_port_lookup(id,
(erts_port_synchronous_ops
@@ -1898,7 +2083,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
goto port_common;
}
- if (IS_TRACED(p))
+ if (IS_TRACED_FL(p, F_TRACE_SEND))
trace_send(p, to, msg);
if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
save_calls(p, &exp_send);
@@ -1929,23 +2114,32 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
port_common:
ret_val = 0;
-
+
if (pt) {
- int ps_flags = suspend ? 0 : ERTS_PORT_SIG_FLG_NOSUSPEND;
+ int ps_flags = ctx->suspend ? 0 : ERTS_PORT_SIG_FLG_NOSUSPEND;
*refp = NIL;
+ if (IS_TRACED_FL(p, F_TRACE_SEND)) /* trace once only !! */
+ trace_send(p, portid, msg);
+
+ if (have_seqtrace(SEQ_TRACE_TOKEN(p))) {
+ seq_trace_update_send(p);
+ seq_trace_output(SEQ_TRACE_TOKEN(p), msg,
+ SEQ_TRACE_SEND, portid, p);
+ }
+
switch (erts_port_command(p, ps_flags, pt, msg, refp)) {
case ERTS_PORT_OP_CALLER_EXIT:
/* We are exiting... */
return SEND_USER_ERROR;
case ERTS_PORT_OP_BUSY:
/* Nothing has been sent */
- if (suspend)
+ if (ctx->suspend)
erts_suspend(p, ERTS_PROC_LOCK_MAIN, pt);
return SEND_YIELD;
case ERTS_PORT_OP_BUSY_SCHEDULED:
/* Message was sent */
- if (suspend) {
+ if (ctx->suspend) {
erts_suspend(p, ERTS_PROC_LOCK_MAIN, pt);
ret_val = SEND_YIELD_RETURN;
break;
@@ -1953,7 +2147,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
/* Fall through */
case ERTS_PORT_OP_SCHEDULED:
if (is_not_nil(*refp)) {
- ASSERT(is_internal_ref(*refp));
+ ASSERT(is_internal_ordinary_ref(*refp));
ret_val = SEND_AWAIT_RESULT;
}
break;
@@ -1966,22 +2160,10 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
break;
}
}
-
- if (IS_TRACED(p)) /* trace once only !! */
- trace_send(p, portid, msg);
+
if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
save_calls(p, &exp_send);
-
- if (SEQ_TRACE_TOKEN(p) != NIL
-#ifdef USE_VM_PROBES
- && SEQ_TRACE_TOKEN(p) != am_have_dt_utag
-#endif
- ) {
- seq_trace_update_send(p);
- seq_trace_output(SEQ_TRACE_TOKEN(p), msg,
- SEQ_TRACE_SEND, portid, p);
- }
-
+
if (ERTS_PROC_IS_EXITING(p)) {
KILL_CATCHES(p); /* Must exit */
return SEND_USER_ERROR;
@@ -2003,8 +2185,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
if (dep == erts_this_dist_entry) {
Eterm id;
- erts_deref_dist_entry(dep);
- if (IS_TRACED(p))
+ if (IS_TRACED_FL(p, F_TRACE_SEND))
trace_send(p, to, msg);
if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
save_calls(p, &exp_send);
@@ -2025,12 +2206,15 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
return 0;
}
- ret = remote_send(p, dep, tp[1], to, msg, suspend);
- if (dep)
- erts_deref_dist_entry(dep);
+ ret = remote_send(p, dep, tp[1], to, msg, ctx);
+ if (ret == SEND_YIELD_CONTINUE) {
+ if (dep)
+ erts_ref_dist_entry(dep);
+ ctx->dep_to_deref = dep;
+ }
return ret;
} else {
- if (IS_TRACED(p)) /* XXX Is this really neccessary ??? */
+ if (IS_TRACED_FL(p, F_TRACE_SEND))
trace_send(p, to, msg);
if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
save_calls(p, &exp_send);
@@ -2040,17 +2224,15 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
send_message: {
ErtsProcLocks rp_locks = 0;
Sint res;
-#ifdef ERTS_SMP
if (p == rp)
rp_locks |= ERTS_PROC_LOCK_MAIN;
-#endif
/* send to local process */
res = erts_send_message(p, rp, &rp_locks, msg, 0);
if (erts_use_sender_punish)
res *= 4;
else
res = 0;
- erts_smp_proc_unlock(rp,
+ erts_proc_unlock(rp,
p == rp
? (rp_locks & ~ERTS_PROC_LOCK_MAIN)
: rp_locks);
@@ -2058,9 +2240,11 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
}
}
+HIPE_WRAPPER_BIF_DISABLE_GC(send, 3)
BIF_RETTYPE send_3(BIF_ALIST_3)
{
+ BIF_RETTYPE retval;
Eterm ref;
Process *p = BIF_P;
Eterm to = BIF_ARG_1;
@@ -2068,34 +2252,51 @@ BIF_RETTYPE send_3(BIF_ALIST_3)
Eterm opts = BIF_ARG_3;
int connect = !0;
- int suspend = !0;
Eterm l = opts;
Sint result;
-
+
+ DeclareTypedTmpHeap(ErtsSendContext, ctx, BIF_P);
+
+ ERTS_MSACC_PUSH_STATE_M_X();
+
+ UseTmpHeap(sizeof(ErtsSendContext)/sizeof(Eterm), BIF_P);
+
+ ctx->suspend = !0;
+ ctx->dep_to_deref = NULL;
+ ctx->return_term = am_ok;
+ ctx->dss.reds = (Sint) (ERTS_BIF_REDS_LEFT(p) * TERM_TO_BINARY_LOOP_FACTOR);
+ ctx->dss.phase = ERTS_DSIG_SEND_PHASE_INIT;
+
while (is_list(l)) {
if (CAR(list_val(l)) == am_noconnect) {
connect = 0;
} else if (CAR(list_val(l)) == am_nosuspend) {
- suspend = 0;
+ ctx->suspend = 0;
} else {
- BIF_ERROR(p, BADARG);
+ ERTS_BIF_PREP_ERROR(retval, p, BADARG);
+ goto done;
}
l = CDR(list_val(l));
}
if(!is_nil(l)) {
- BIF_ERROR(p, BADARG);
+ ERTS_BIF_PREP_ERROR(retval, p, BADARG);
+ goto done;
}
#ifdef DEBUG
ref = NIL;
#endif
- result = do_send(p, to, msg, suspend, &ref);
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_SEND);
+ result = do_send(p, to, msg, &ref, ctx);
+ ERTS_MSACC_POP_STATE_M_X();
+
if (result > 0) {
ERTS_VBUMP_REDS(p, result);
if (ERTS_IS_PROC_OUT_OF_REDS(p))
goto yield_return;
- BIF_RET(am_ok);
+ ERTS_BIF_PREP_RET(retval, am_ok);
+ goto done;
}
switch (result) {
@@ -2103,68 +2304,129 @@ BIF_RETTYPE send_3(BIF_ALIST_3)
/* May need to yield even though we do not bump reds here... */
if (ERTS_IS_PROC_OUT_OF_REDS(p))
goto yield_return;
- BIF_RET(am_ok);
+ ERTS_BIF_PREP_RET(retval, am_ok);
break;
case SEND_TRAP:
if (connect) {
- BIF_TRAP3(dsend3_trap, p, to, msg, opts);
+ ERTS_BIF_PREP_TRAP3(retval, dsend3_trap, p, to, msg, opts);
} else {
- BIF_RET(am_noconnect);
+ ERTS_BIF_PREP_RET(retval, am_noconnect);
}
break;
case SEND_YIELD:
- if (suspend) {
- ERTS_BIF_YIELD3(bif_export[BIF_send_3], p, to, msg, opts);
+ if (ctx->suspend) {
+ ERTS_BIF_PREP_YIELD3(retval,
+ bif_export[BIF_send_3], p, to, msg, opts);
} else {
- BIF_RET(am_nosuspend);
+ ERTS_BIF_PREP_RET(retval, am_nosuspend);
}
break;
case SEND_YIELD_RETURN:
- if (!suspend)
- BIF_RET(am_nosuspend);
+ if (!ctx->suspend) {
+ ERTS_BIF_PREP_RET(retval, am_nosuspend);
+ break;
+ }
yield_return:
- ERTS_BIF_YIELD_RETURN(p, am_ok);
+ ERTS_BIF_PREP_YIELD_RETURN(retval, p, am_ok);
+ break;
case SEND_AWAIT_RESULT:
- ASSERT(is_internal_ref(ref));
- BIF_TRAP3(await_port_send_result_trap, p, ref, am_nosuspend, am_ok);
+ ASSERT(is_internal_ordinary_ref(ref));
+ ERTS_BIF_PREP_TRAP3(retval, await_port_send_result_trap, p, ref, am_nosuspend, am_ok);
+ break;
case SEND_BADARG:
- BIF_ERROR(p, BADARG);
+ ERTS_BIF_PREP_ERROR(retval, p, BADARG);
break;
case SEND_USER_ERROR:
- BIF_ERROR(p, EXC_ERROR);
+ ERTS_BIF_PREP_ERROR(retval, p, EXC_ERROR);
break;
case SEND_INTERNAL_ERROR:
- BIF_ERROR(p, EXC_INTERNAL_ERROR);
+ ERTS_BIF_PREP_ERROR(retval, p, EXC_INTERNAL_ERROR);
+ break;
+ case SEND_YIELD_CONTINUE:
+ BUMP_ALL_REDS(p);
+ erts_set_gc_state(p, 0);
+ ERTS_BIF_PREP_TRAP1(retval, &dsend_continue_trap_export, p,
+ erts_dsend_export_trap_context(p, ctx));
break;
default:
- ASSERT(! "Illegal send result");
+ erts_exit(ERTS_ABORT_EXIT, "send_3 invalid result %d\n", (int)result);
break;
}
- ASSERT(! "Can not arrive here");
- BIF_ERROR(p, BADARG);
+
+done:
+ UnUseTmpHeap(sizeof(ErtsSendContext)/sizeof(Eterm), BIF_P);
+ return retval;
}
+HIPE_WRAPPER_BIF_DISABLE_GC(send, 2)
+
BIF_RETTYPE send_2(BIF_ALIST_2)
{
return erl_send(BIF_P, BIF_ARG_1, BIF_ARG_2);
}
+static BIF_RETTYPE dsend_continue_trap_1(BIF_ALIST_1)
+{
+ Binary* bin = erts_magic_ref2bin(BIF_ARG_1);
+ ErtsSendContext* ctx = (ErtsSendContext*) ERTS_MAGIC_BIN_DATA(bin);
+ Sint initial_reds = (Sint) (ERTS_BIF_REDS_LEFT(BIF_P) * TERM_TO_BINARY_LOOP_FACTOR);
+ int result;
+
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == erts_dsend_context_dtor);
+
+ ctx->dss.reds = initial_reds;
+ result = erts_dsig_send(&ctx->dsd, &ctx->dss);
+
+ switch (result) {
+ case ERTS_DSIG_SEND_OK:
+ erts_set_gc_state(BIF_P, 1);
+ BIF_RET(ctx->return_term);
+ break;
+ case ERTS_DSIG_SEND_YIELD: /*SEND_YIELD_RETURN*/
+ erts_set_gc_state(BIF_P, 1);
+ if (!ctx->suspend)
+ BIF_RET(am_nosuspend);
+ ERTS_BIF_YIELD_RETURN(BIF_P, ctx->return_term);
+
+ case ERTS_DSIG_SEND_CONTINUE: { /*SEND_YIELD_CONTINUE*/
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP1(&dsend_continue_trap_export, BIF_P, BIF_ARG_1);
+ }
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "dsend_continue_trap invalid result %d\n", (int)result);
+ break;
+ }
+ ASSERT(! "Can not arrive here");
+ BIF_ERROR(BIF_P, BADARG);
+}
+
Eterm erl_send(Process *p, Eterm to, Eterm msg)
{
+ Eterm retval;
Eterm ref;
Sint result;
-
+ DeclareTypedTmpHeap(ErtsSendContext, ctx, p);
+ ERTS_MSACC_PUSH_AND_SET_STATE_M_X(ERTS_MSACC_STATE_SEND);
+ UseTmpHeap(sizeof(ErtsSendContext)/sizeof(Eterm), p);
#ifdef DEBUG
ref = NIL;
#endif
+ ctx->suspend = !0;
+ ctx->dep_to_deref = NULL;
+ ctx->return_term = msg;
+ ctx->dss.reds = (Sint) (ERTS_BIF_REDS_LEFT(p) * TERM_TO_BINARY_LOOP_FACTOR);
+ ctx->dss.phase = ERTS_DSIG_SEND_PHASE_INIT;
+
+ result = do_send(p, to, msg, &ref, ctx);
+
+ ERTS_MSACC_POP_STATE_M_X();
- result = do_send(p, to, msg, !0, &ref);
-
if (result > 0) {
ERTS_VBUMP_REDS(p, result);
if (ERTS_IS_PROC_OUT_OF_REDS(p))
goto yield_return;
- BIF_RET(msg);
+ ERTS_BIF_PREP_RET(retval, msg);
+ goto done;
}
switch (result) {
@@ -2172,35 +2434,46 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg)
/* May need to yield even though we do not bump reds here... */
if (ERTS_IS_PROC_OUT_OF_REDS(p))
goto yield_return;
- BIF_RET(msg);
+ ERTS_BIF_PREP_RET(retval, msg);
break;
case SEND_TRAP:
- BIF_TRAP2(dsend2_trap, p, to, msg);
+ ERTS_BIF_PREP_TRAP2(retval, dsend2_trap, p, to, msg);
break;
case SEND_YIELD:
- ERTS_BIF_YIELD2(bif_export[BIF_send_2], p, to, msg);
+ ERTS_BIF_PREP_YIELD2(retval, bif_export[BIF_send_2], p, to, msg);
break;
case SEND_YIELD_RETURN:
yield_return:
- ERTS_BIF_YIELD_RETURN(p, msg);
+ ERTS_BIF_PREP_YIELD_RETURN(retval, p, msg);
+ break;
case SEND_AWAIT_RESULT:
- ASSERT(is_internal_ref(ref));
- BIF_TRAP3(await_port_send_result_trap, p, ref, msg, msg);
+ ASSERT(is_internal_ordinary_ref(ref));
+ ERTS_BIF_PREP_TRAP3(retval,
+ await_port_send_result_trap, p, ref, msg, msg);
+ break;
case SEND_BADARG:
- BIF_ERROR(p, BADARG);
+ ERTS_BIF_PREP_ERROR(retval, p, BADARG);
break;
case SEND_USER_ERROR:
- BIF_ERROR(p, EXC_ERROR);
+ ERTS_BIF_PREP_ERROR(retval, p, EXC_ERROR);
break;
case SEND_INTERNAL_ERROR:
- BIF_ERROR(p, EXC_INTERNAL_ERROR);
+ ERTS_BIF_PREP_ERROR(retval, p, EXC_INTERNAL_ERROR);
+ break;
+ case SEND_YIELD_CONTINUE:
+ BUMP_ALL_REDS(p);
+ erts_set_gc_state(p, 0);
+ ERTS_BIF_PREP_TRAP1(retval, &dsend_continue_trap_export, p,
+ erts_dsend_export_trap_context(p, ctx));
break;
default:
- ASSERT(! "Illegal send result");
+ erts_exit(ERTS_ABORT_EXIT, "invalid send result %d\n", (int)result);
break;
}
- ASSERT(! "Can not arrive here");
- BIF_ERROR(p, BADARG);
+
+done:
+ UnUseTmpHeap(sizeof(ErtsSendContext)/sizeof(Eterm), p);
+ return retval;
}
/**********************************************************************/
@@ -2670,18 +2943,18 @@ BIF_RETTYPE atom_to_list_1(BIF_ALIST_1)
BIF_RETTYPE list_to_atom_1(BIF_ALIST_1)
{
Eterm res;
- char *buf = (char *) erts_alloc(ERTS_ALC_T_TMP, MAX_ATOM_CHARACTERS);
- int i = intlist_to_buf(BIF_ARG_1, buf, MAX_ATOM_CHARACTERS);
-
+ byte *buf = (byte *) erts_alloc(ERTS_ALC_T_TMP, MAX_ATOM_SZ_LIMIT);
+ Sint written;
+ int i = erts_unicode_list_to_buf(BIF_ARG_1, buf, MAX_ATOM_CHARACTERS,
+ &written);
if (i < 0) {
erts_free(ERTS_ALC_T_TMP, (void *) buf);
- i = erts_list_length(BIF_ARG_1);
- if (i > MAX_ATOM_CHARACTERS) {
+ if (i == -2) {
BIF_ERROR(BIF_P, SYSTEM_LIMIT);
}
BIF_ERROR(BIF_P, BADARG);
}
- res = erts_atom_put((byte *) buf, i, ERTS_ATOM_ENC_LATIN1, 1);
+ res = erts_atom_put(buf, written, ERTS_ATOM_ENC_UTF8, 1);
ASSERT(is_atom(res));
erts_free(ERTS_ALC_T_TMP, (void *) buf);
BIF_RET(res);
@@ -2691,17 +2964,18 @@ BIF_RETTYPE list_to_atom_1(BIF_ALIST_1)
BIF_RETTYPE list_to_existing_atom_1(BIF_ALIST_1)
{
- int i;
- char *buf = (char *) erts_alloc(ERTS_ALC_T_TMP, MAX_ATOM_CHARACTERS);
-
- if ((i = intlist_to_buf(BIF_ARG_1, buf, MAX_ATOM_CHARACTERS)) < 0) {
+ byte *buf = (byte *) erts_alloc(ERTS_ALC_T_TMP, MAX_ATOM_SZ_LIMIT);
+ Sint written;
+ int i = erts_unicode_list_to_buf(BIF_ARG_1, buf, MAX_ATOM_CHARACTERS,
+ &written);
+ if (i < 0) {
error:
erts_free(ERTS_ALC_T_TMP, (void *) buf);
BIF_ERROR(BIF_P, BADARG);
} else {
Eterm a;
- if (erts_atom_get(buf, i, &a, ERTS_ATOM_ENC_LATIN1)) {
+ if (erts_atom_get((char *) buf, written, &a, ERTS_ATOM_ENC_UTF8)) {
erts_free(ERTS_ALC_T_TMP, (void *) buf);
BIF_RET(a);
} else {
@@ -2751,170 +3025,20 @@ BIF_RETTYPE integer_to_list_1(BIF_ALIST_1)
/**********************************************************************/
-/* convert a list of ascii ascii integer value to an integer */
-
-
-#define LTI_BAD_STRUCTURE 0
-#define LTI_NO_INTEGER 1
-#define LTI_SOME_INTEGER 2
-#define LTI_ALL_INTEGER 3
-
-static int do_list_to_integer(Process *p, Eterm orig_list,
- Eterm *integer, Eterm *rest)
-{
- Sint i = 0;
- int skip = 0;
- int neg = 0;
- int n = 0;
- int m;
- int lg2;
- Eterm res;
- Eterm* hp;
- Eterm *hp_end;
- Eterm lst = orig_list;
- Eterm tail = lst;
- int error_res = LTI_BAD_STRUCTURE;
-
- if (is_nil(lst)) {
- error_res = LTI_NO_INTEGER;
- error:
- *rest = tail;
- *integer = make_small(0);
- return error_res;
- }
- if (is_not_list(lst))
- goto error;
-
- /* if first char is a '-' then it is a negative integer */
- if (CAR(list_val(lst)) == make_small('-')) {
- neg = 1;
- skip = 1;
- lst = CDR(list_val(lst));
- if (is_not_list(lst)) {
- tail = lst;
- error_res = LTI_NO_INTEGER;
- goto error;
- }
- } else if (CAR(list_val(lst)) == make_small('+')) {
- /* ignore plus */
- skip = 1;
- lst = CDR(list_val(lst));
- if (is_not_list(lst)) {
- tail = lst;
- error_res = LTI_NO_INTEGER;
- goto error;
- }
- }
-
- /* Calculate size and do type check */
-
- while(1) {
- if (is_not_small(CAR(list_val(lst)))) {
- break;
- }
- if (unsigned_val(CAR(list_val(lst))) < '0' ||
- unsigned_val(CAR(list_val(lst))) > '9') {
- break;
- }
- i = i * 10;
- i = i + unsigned_val(CAR(list_val(lst))) - '0';
- n++;
- lst = CDR(list_val(lst));
- if (is_nil(lst)) {
- break;
- }
- if (is_not_list(lst)) {
- break;
- }
- }
-
- tail = lst;
- if (!n) {
- error_res = LTI_NO_INTEGER;
- goto error;
- }
-
-
- /* If n <= 8 then we know it's a small int
- ** since 2^27 = 134217728. If n > 8 then we must
- ** construct a bignum and let that routine do the checking
- */
-
- if (n <= SMALL_DIGITS) { /* It must be small */
- if (neg) i = -i;
- res = make_small(i);
- } else {
- lg2 = (n+1)*230/69+1;
- m = (lg2+D_EXP-1)/D_EXP; /* number of digits */
- m = BIG_NEED_SIZE(m); /* number of words + thing */
-
- hp = HAlloc(p, m);
- hp_end = hp + m;
-
- lst = orig_list;
- if (skip)
- lst = CDR(list_val(lst));
-
- /* load first digits (at least one digit) */
- if ((i = (n % D_DECIMAL_EXP)) == 0)
- i = D_DECIMAL_EXP;
- n -= i;
- m = 0;
- while(i--) {
- m = 10*m + (unsigned_val(CAR(list_val(lst))) - '0');
- lst = CDR(list_val(lst));
- }
- res = small_to_big(m, hp); /* load first digits */
-
- while(n) {
- i = D_DECIMAL_EXP;
- n -= D_DECIMAL_EXP;
- m = 0;
- while(i--) {
- m = 10*m + (unsigned_val(CAR(list_val(lst))) - '0');
- lst = CDR(list_val(lst));
- }
- if (is_small(res))
- res = small_to_big(signed_val(res), hp);
- res = big_times_small(res, D_DECIMAL_BASE, hp);
- if (is_small(res))
- res = small_to_big(signed_val(res), hp);
- res = big_plus_small(res, m, hp);
- }
-
- if (is_big(res)) /* check if small */
- res = big_plus_small(res, 0, hp); /* includes conversion to small */
-
- if (neg) {
- if (is_small(res))
- res = make_small(-signed_val(res));
- else {
- Uint *big = big_val(res); /* point to thing */
- *big = bignum_header_neg(*big);
- }
- }
+/*
+ * Converts a list of ascii base10 digits to an integer fully or partially.
+ * Returns result and the remaining tail.
+ * On error returns: {error,not_a_list}, or {error, no_integer}
+ */
- if (is_big(res)) {
- hp += (big_arity(res)+1);
- }
- HRelease(p,hp_end,hp);
- }
- *integer = res;
- *rest = tail;
- if (tail != NIL) {
- return LTI_SOME_INTEGER;
- }
- return LTI_ALL_INTEGER;
-}
-BIF_RETTYPE string_to_integer_1(BIF_ALIST_1)
+BIF_RETTYPE string_list_to_integer_1(BIF_ALIST_1)
{
Eterm res;
Eterm tail;
Eterm *hp;
/* must be a list */
- switch (do_list_to_integer(BIF_P,BIF_ARG_1,&res,&tail)) {
- /* HAlloc after do_list_to_integer as it
- might HAlloc itself (bignum) */
+ switch (erts_list_to_integer(BIF_P, BIF_ARG_1, 10, &res, &tail)) {
+ /* HAlloc after erts_list_to_integer as it might HAlloc itself (bignum) */
case LTI_BAD_STRUCTURE:
hp = HAlloc(BIF_P,3);
BIF_RET(TUPLE2(hp, am_error, am_not_a_list));
@@ -2929,13 +3053,14 @@ BIF_RETTYPE string_to_integer_1(BIF_ALIST_1)
BIF_RETTYPE list_to_integer_1(BIF_ALIST_1)
{
- /* Using do_list_to_integer is about twice as fast as using
+ /* Using erts_list_to_integer is about twice as fast as using
erts_chars_to_integer because we do not have to copy the
entire list */
Eterm res;
Eterm dummy;
/* must be a list */
- if (do_list_to_integer(BIF_P,BIF_ARG_1,&res,&dummy) != LTI_ALL_INTEGER) {
+ if (erts_list_to_integer(BIF_P, BIF_ARG_1, 10,
+ &res, &dummy) != LTI_ALL_INTEGER) {
BIF_ERROR(BIF_P,BADARG);
}
BIF_RET(res);
@@ -2943,14 +3068,12 @@ BIF_RETTYPE list_to_integer_1(BIF_ALIST_1)
BIF_RETTYPE list_to_integer_2(BIF_ALIST_2)
{
-
/* Bif implementation is about 50% faster than pure erlang,
and since we have erts_chars_to_integer now it is simpler
as well. This could be optmized further if we did not have to
copy the list to buf. */
- int i;
- Eterm res;
- char *buf = NULL;
+ Sint i;
+ Eterm res, dummy;
int base;
i = erts_list_length(BIF_ARG_1);
@@ -2958,31 +3081,16 @@ BIF_RETTYPE list_to_integer_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, BADARG);
base = signed_val(BIF_ARG_2);
-
+
if (base < 2 || base > 36)
BIF_ERROR(BIF_P, BADARG);
- /* Take fast path if base it 10 */
- if (base == 10)
- return list_to_integer_1(BIF_P,&BIF_ARG_1);
-
- buf = (char *) erts_alloc(ERTS_ALC_T_TMP, i + 1);
-
- if (intlist_to_buf(BIF_ARG_1, buf, i) < 0)
- goto list_to_integer_1_error;
- buf[i] = '\0'; /* null terminal */
-
- if ((res = erts_chars_to_integer(BIF_P,buf,i,base)) == THE_NON_VALUE)
- goto list_to_integer_1_error;
-
- erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ if (erts_list_to_integer(BIF_P, BIF_ARG_1, base,
+ &res, &dummy) != LTI_ALL_INTEGER) {
+ BIF_ERROR(BIF_P,BADARG);
+ }
BIF_RET(res);
-
- list_to_integer_1_error:
- erts_free(ERTS_ALC_T_TMP, (void *) buf);
- BIF_ERROR(BIF_P, BADARG);
-
- }
+}
/**********************************************************************/
@@ -3109,7 +3217,7 @@ BIF_RETTYPE float_to_binary_2(BIF_ALIST_2)
#define LOAD_E(xi,xim,xl,xlm) ((xi)=(xim), (xl)=(xlm))
#define STRING_TO_FLOAT_BUF_INC_SZ (128)
-BIF_RETTYPE string_to_float_1(BIF_ALIST_1)
+BIF_RETTYPE string_list_to_float_1(BIF_ALIST_1)
{
Eterm orig = BIF_ARG_1;
Eterm list = orig;
@@ -3288,7 +3396,7 @@ static BIF_RETTYPE do_charbuf_to_float(Process *BIF_P,char *buf) {
BIF_RETTYPE list_to_float_1(BIF_ALIST_1)
{
- int i;
+ Sint i;
Eterm res;
char *buf = NULL;
@@ -3405,7 +3513,7 @@ BIF_RETTYPE list_to_tuple_1(BIF_ALIST_1)
Eterm* cons;
Eterm res;
Eterm* hp;
- int len;
+ Sint len;
if ((len = erts_list_length(list)) < 0 || len > ERTS_MAX_TUPLE_SIZE) {
BIF_ERROR(BIF_P, BADARG);
@@ -3434,91 +3542,6 @@ BIF_RETTYPE self_0(BIF_ALIST_0)
/**********************************************************************/
-/*
- New representation of refs in R9, see erl_term.h
-
- In the first data word, only the usual 18 bits are used. Ordinarily,
- in "long refs" all words are used (in other words, practically never
- wrap around), but for compatibility with older nodes, "short refs"
- exist. Short refs come into being by being converted from the old
- external format for refs (tag REFERENCE_EXT). Short refs are
- converted back to the old external format.
-
- When converting a long ref to the external format in the case of
- preparing for sending to an older node, the ref is truncated by only
- using the first word (with 18 significant bits), and using the old tag
- REFERENCE_EXT.
-
- When comparing refs or different size, only the parts up to the length
- of the shorter operand are used. This has the desirable effect that a
- long ref sent to an old node and back will be treated as equal to
- the original, although some of the bits have been lost.
-
- The hash value for a ref always considers only the first word, since
- in the above scenario, the original and the copy should have the same
- hash value.
-*/
-
-static Uint32 reference0; /* Initialized in erts_init_bif */
-static Uint32 reference1;
-static Uint32 reference2;
-static erts_smp_spinlock_t make_ref_lock;
-static erts_smp_mtx_t ports_snapshot_mtx;
-erts_smp_atomic_t erts_dead_ports_ptr; /* To store dying ports during snapshot */
-
-void
-erts_make_ref_in_array(Uint32 ref[ERTS_MAX_REF_NUMBERS])
-{
- erts_smp_spin_lock(&make_ref_lock);
-
- reference0++;
- if (reference0 >= MAX_REFERENCE) {
- reference0 = 0;
- reference1++;
- if (reference1 == 0) {
- reference2++;
- }
- }
-
- ref[0] = reference0;
- ref[1] = reference1;
- ref[2] = reference2;
-
- erts_smp_spin_unlock(&make_ref_lock);
-}
-
-Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE])
-{
- Eterm* hp = buffer;
- Uint32 ref[ERTS_MAX_REF_NUMBERS];
-
- erts_make_ref_in_array(ref);
- write_ref_thing(hp, ref[0], ref[1], ref[2]);
- return make_internal_ref(hp);
-}
-
-Eterm erts_make_ref(Process *p)
-{
- Eterm* hp;
- Uint32 ref[ERTS_MAX_REF_NUMBERS];
-
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
-
- hp = HAlloc(p, REF_THING_SIZE);
-
- erts_make_ref_in_array(ref);
- write_ref_thing(hp, ref[0], ref[1], ref[2]);
-
- return make_internal_ref(hp);
-}
-
-BIF_RETTYPE make_ref_0(BIF_ALIST_0)
-{
- return erts_make_ref(BIF_P);
-}
-
-/**********************************************************************/
-
/* return the time of day */
BIF_RETTYPE time_0(BIF_ALIST_0)
@@ -3764,13 +3787,21 @@ BIF_RETTYPE now_0(BIF_ALIST_0)
/**********************************************************************/
-BIF_RETTYPE garbage_collect_0(BIF_ALIST_0)
+/*
+ * Pass atom 'minor' for relaxed generational GC run. This is only
+ * recommendation, major run may still be chosen by VM.
+ * Pass atom 'major' for default behaviour - major GC run (fullsweep)
+ */
+BIF_RETTYPE
+erts_internal_garbage_collect_1(BIF_ALIST_1)
{
- int reds;
-
- FLAGS(BIF_P) |= F_NEED_FULLSWEEP;
- reds = erts_garbage_collect(BIF_P, 0, NULL, 0);
- BIF_RET2(am_true, reds);
+ switch (BIF_ARG_1) {
+ case am_minor: break;
+ case am_major: FLAGS(BIF_P) |= F_NEED_FULLSWEEP; break;
+ default: BIF_ERROR(BIF_P, BADARG);
+ }
+ erts_garbage_collect(BIF_P, 0, NULL, 0);
+ return am_true;
}
/**********************************************************************/
@@ -3825,7 +3856,7 @@ BIF_RETTYPE erts_debug_display_1(BIF_ALIST_1)
erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(64);
pres = erts_dsprintf(dsbufp, "%.*T\n", INT_MAX, BIF_ARG_1);
if (pres < 0)
- erl_exit(1, "Failed to convert term to string: %d (%s)\n",
+ erts_exit(ERTS_ERROR_EXIT, "Failed to convert term to string: %d (%s)\n",
-pres, erl_errno_id(-pres));
hp = HAlloc(BIF_P, 2*dsbufp->str_len); /* we need length * 2 heap words */
res = buf_to_intlist(&hp, dsbufp->str, dsbufp->str_len, NIL);
@@ -3839,15 +3870,18 @@ BIF_RETTYPE display_string_1(BIF_ALIST_1)
{
Process* p = BIF_P;
Eterm string = BIF_ARG_1;
- int len = is_string(string);
- char *str;
+ Sint len = erts_unicode_list_to_buf_len(string);
+ Sint written;
+ byte *str;
+ int res;
- if (len <= 0) {
+ if (len < 0) {
BIF_ERROR(p, BADARG);
}
- str = (char *) erts_alloc(ERTS_ALC_T_TMP, sizeof(char)*(len + 1));
- if (intlist_to_buf(string, str, len) != len)
- erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
+ str = (byte *) erts_alloc(ERTS_ALC_T_TMP, sizeof(char)*(len + 1));
+ res = erts_unicode_list_to_buf(string, str, len, &written);
+ if (res != 0 || written != len)
+ erts_exit(ERTS_ERROR_EXIT, "%s:%d: Internal error (%d)\n", __FILE__, __LINE__, res);
str[len] = '\0';
erts_fprintf(stderr, "%s", str);
erts_free(ERTS_ALC_T_TMP, (void *) str);
@@ -3862,61 +3896,11 @@ BIF_RETTYPE display_nl_0(BIF_ALIST_0)
/**********************************************************************/
-/* stop the system */
-/* ARGSUSED */
-BIF_RETTYPE halt_0(BIF_ALIST_0)
-{
- VERBOSE(DEBUG_SYSTEM,("System halted by BIF halt()\n"));
- erl_halt(0);
- ERTS_BIF_YIELD1(bif_export[BIF_halt_1], BIF_P, am_undefined);
-}
-
-/**********************************************************************/
-
-#define HALT_MSG_SIZE 200
-static char halt_msg[HALT_MSG_SIZE];
-
-/* stop the system with exit code */
-/* ARGSUSED */
-BIF_RETTYPE halt_1(BIF_ALIST_1)
-{
- Sint code;
-
- if (is_small(BIF_ARG_1) && (code = signed_val(BIF_ARG_1)) >= 0) {
- VERBOSE(DEBUG_SYSTEM,("System halted by BIF halt(%T)\n", BIF_ARG_1));
- erl_halt((int)(- code));
- ERTS_BIF_YIELD1(bif_export[BIF_halt_1], BIF_P, am_undefined);
- }
- else if (ERTS_IS_ATOM_STR("abort", BIF_ARG_1)) {
- VERBOSE(DEBUG_SYSTEM,("System halted by BIF halt(%T)\n", BIF_ARG_1));
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erl_exit(ERTS_ABORT_EXIT, "");
- }
- else if (is_string(BIF_ARG_1) || BIF_ARG_1 == NIL) {
- int i;
-
- if ((i = intlist_to_buf(BIF_ARG_1, halt_msg, HALT_MSG_SIZE-1)) < 0) {
- goto error;
- }
- halt_msg[i] = '\0';
- VERBOSE(DEBUG_SYSTEM,("System halted by BIF halt(%T)\n", BIF_ARG_1));
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erl_exit(ERTS_DUMP_EXIT, "%s\n", halt_msg);
- }
- else
- goto error;
- return NIL; /* Pedantic (lint does not know about erl_exit) */
- error:
- BIF_ERROR(BIF_P, BADARG);
-}
-
-/**********************************************************************/
/* stop the system with exit code and flags */
-/* ARGSUSED */
BIF_RETTYPE halt_2(BIF_ALIST_2)
{
- Sint code;
+ Uint code;
Eterm optlist = BIF_ARG_2;
int flush = 1;
@@ -3943,39 +3927,44 @@ BIF_RETTYPE halt_2(BIF_ALIST_2)
if (is_not_nil(optlist))
goto error;
- if (is_small(BIF_ARG_1) && (code = signed_val(BIF_ARG_1)) >= 0) {
+ if (term_to_Uint_mask(BIF_ARG_1, &code)) {
+ int pos_int_code = (int) (code & INT_MAX);
VERBOSE(DEBUG_SYSTEM,
("System halted by BIF halt(%T, %T)\n", BIF_ARG_1, BIF_ARG_2));
if (flush) {
- erl_halt((int)(- code));
- ERTS_BIF_YIELD1(bif_export[BIF_halt_1], BIF_P, am_undefined);
+ erts_halt(pos_int_code);
+ ERTS_BIF_YIELD2(bif_export[BIF_halt_2], BIF_P, am_undefined, am_undefined);
}
else {
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erl_exit((int)(- code), "");
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_exit(pos_int_code, "");
}
}
else if (ERTS_IS_ATOM_STR("abort", BIF_ARG_1)) {
VERBOSE(DEBUG_SYSTEM,
("System halted by BIF halt(%T, %T)\n", BIF_ARG_1, BIF_ARG_2));
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erl_exit(ERTS_ABORT_EXIT, "");
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_exit(ERTS_ABORT_EXIT, "");
}
- else if (is_string(BIF_ARG_1) || BIF_ARG_1 == NIL) {
- int i;
+ else if (is_list(BIF_ARG_1) || BIF_ARG_1 == NIL) {
+# define HALT_MSG_SIZE 200
+ static byte halt_msg[4*HALT_MSG_SIZE+1];
+ Sint written;
- if ((i = intlist_to_buf(BIF_ARG_1, halt_msg, HALT_MSG_SIZE-1)) < 0) {
- goto error;
- }
- halt_msg[i] = '\0';
+ if (erts_unicode_list_to_buf(BIF_ARG_1, halt_msg, HALT_MSG_SIZE,
+ &written) == -1 ) {
+ goto error;
+ }
+ ASSERT(written >= 0 && written < sizeof(halt_msg));
+ halt_msg[written] = '\0';
VERBOSE(DEBUG_SYSTEM,
("System halted by BIF halt(%T, %T)\n", BIF_ARG_1, BIF_ARG_2));
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erl_exit(ERTS_DUMP_EXIT, "%s\n", halt_msg);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_exit(ERTS_DUMP_EXIT, "%s\n", halt_msg);
}
else
goto error;
- return NIL; /* Pedantic (lint does not know about erl_exit) */
+ return NIL; /* Pedantic (lint does not know about erts_exit) */
error:
BIF_ERROR(BIF_P, BADARG);
}
@@ -3984,16 +3973,19 @@ BIF_RETTYPE halt_2(BIF_ALIST_2)
BIF_RETTYPE function_exported_3(BIF_ALIST_3)
{
+ int arity;
if (is_not_atom(BIF_ARG_1) ||
is_not_atom(BIF_ARG_2) ||
is_not_small(BIF_ARG_3)) {
BIF_ERROR(BIF_P, BADARG);
}
- if (erts_find_function(BIF_ARG_1, BIF_ARG_2, signed_val(BIF_ARG_3),
- erts_active_code_ix()) == NULL) {
- BIF_RET(am_false);
+ arity = signed_val(BIF_ARG_3);
+ if (erts_find_function(BIF_ARG_1, BIF_ARG_2, arity,
+ erts_active_code_ix()) != NULL ||
+ erts_is_builtin(BIF_ARG_1, BIF_ARG_2, arity)) {
+ BIF_RET(am_true);
}
- BIF_RET(am_true);
+ BIF_RET(am_false);
}
/**********************************************************************/
@@ -4028,7 +4020,7 @@ term2list_dsprintf(Process *p, Eterm term)
erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(64);
pres = erts_dsprintf(dsbufp, "%T", term);
if (pres < 0)
- erl_exit(1, "Failed to convert term to list: %d (%s)\n",
+ erts_exit(ERTS_ERROR_EXIT, "Failed to convert term to list: %d (%s)\n",
-pres, erl_errno_id(-pres));
hp = HAlloc(p, 2*dsbufp->str_len); /* we need length * 2 heap words */
res = buf_to_intlist(&hp, dsbufp->str, dsbufp->str_len, NIL);
@@ -4040,6 +4032,7 @@ BIF_RETTYPE ref_to_list_1(BIF_ALIST_1)
{
if (is_not_ref(BIF_ARG_1))
BIF_ERROR(BIF_P, BADARG);
+ erts_magic_ref_save_bin(BIF_ARG_1);
BIF_RET(term2list_dsprintf(BIF_P, BIF_ARG_1));
}
@@ -4056,16 +4049,9 @@ BIF_RETTYPE make_fun_3(BIF_ALIST_3)
if (arity < 0) {
goto error;
}
-#if HALFWORD_HEAP
- hp = HAlloc(BIF_P, 3);
- hp[0] = HEADER_EXPORT;
- /* Yes, May be misaligned, but X86_64 will fix it... */
- *((Export **) (hp+1)) = erts_export_get_or_make_stub(BIF_ARG_1, BIF_ARG_2, (Uint) arity);
-#else
hp = HAlloc(BIF_P, 2);
hp[0] = HEADER_EXPORT;
hp[1] = (Eterm) erts_export_get_or_make_stub(BIF_ARG_1, BIF_ARG_2, (Uint) arity);
-#endif
BIF_RET(make_export(hp));
}
@@ -4109,7 +4095,7 @@ BIF_RETTYPE list_to_pid_1(BIF_ALIST_1)
{
Uint a = 0, b = 0, c = 0;
char* cp;
- int i;
+ Sint i;
DistEntry *dep = NULL;
char *buf = (char *) erts_alloc(ERTS_ALC_T_TMP, 65);
/*
@@ -4159,7 +4145,6 @@ BIF_RETTYPE list_to_pid_1(BIF_ALIST_1)
goto bad;
if(dep == erts_this_dist_entry) {
- erts_deref_dist_entry(dep);
BIF_RET(make_internal_pid(make_pid_data(c, b)));
}
else {
@@ -4170,6 +4155,7 @@ BIF_RETTYPE list_to_pid_1(BIF_ALIST_1)
goto bad;
enp = erts_find_or_insert_node(dep->sysname, dep->creation);
+ ASSERT(enp != erts_this_node);
etp = (ExternalThing *) HAlloc(BIF_P, EXTERNAL_THING_HEAD_SIZE + 1);
etp->header = make_external_pid_header(1);
@@ -4178,18 +4164,203 @@ BIF_RETTYPE list_to_pid_1(BIF_ALIST_1)
etp->data.ui[0] = make_pid_data(c, b);
MSO(BIF_P).first = (struct erl_off_heap_header*) etp;
- erts_deref_dist_entry(dep);
BIF_RET(make_external_pid(etp));
}
bad:
- if (dep)
- erts_deref_dist_entry(dep);
if (buf)
erts_free(ERTS_ALC_T_TMP, (void *) buf);
BIF_ERROR(BIF_P, BADARG);
}
+BIF_RETTYPE list_to_port_1(BIF_ALIST_1)
+{
+ /*
+ * A valid port identifier is on the format
+ * "#Port<N.P>" where N is node and P is
+ * the port id. Both N and P are of type Uint32.
+ */
+ Uint32 n, p;
+ char* cp;
+ int i;
+ DistEntry *dep = NULL;
+ char buf[6 /* #Port< */
+ + (2)*(10 + 1) /* N.P> */
+ + 1 /* \0 */];
+
+ /* walk down the list and create a C string */
+ if ((i = intlist_to_buf(BIF_ARG_1, buf, sizeof(buf)-1)) < 0)
+ goto bad;
+
+ buf[i] = '\0'; /* null terminal */
+
+ cp = &buf[0];
+ if (strncmp("#Port<", cp, 6) != 0)
+ goto bad;
+
+ cp += 6; /* strlen("#Port<") */
+
+ if (sscanf(cp, "%u.%u>", (unsigned int*)&n, (unsigned int*)&p) < 2)
+ goto bad;
+
+ if (p > ERTS_MAX_PORT_NUMBER)
+ goto bad;
+
+ dep = erts_channel_no_to_dist_entry(n);
+
+ if (!dep)
+ goto bad;
+
+ if(dep == erts_this_dist_entry) {
+ BIF_RET(make_internal_port(p));
+ }
+ else {
+ ExternalThing *etp;
+ ErlNode *enp;
+
+ if (is_nil(dep->cid))
+ goto bad;
+
+ enp = erts_find_or_insert_node(dep->sysname, dep->creation);
+ ASSERT(enp != erts_this_node);
+
+ etp = (ExternalThing *) HAlloc(BIF_P, EXTERNAL_THING_HEAD_SIZE + 1);
+ etp->header = make_external_port_header(1);
+ etp->next = MSO(BIF_P).first;
+ etp->node = enp;
+ etp->data.ui[0] = p;
+
+ MSO(BIF_P).first = (struct erl_off_heap_header*) etp;
+ BIF_RET(make_external_port(etp));
+ }
+
+ bad:
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE list_to_ref_1(BIF_ALIST_1)
+{
+ /*
+ * A valid reference is on the format
+ * "#Ref<N.X.Y.Z>" where N, X, Y, and Z are
+ * 32-bit integers (i.e., max 10 characters).
+ */
+ Eterm *hp;
+ Eterm res;
+ Uint32 refn[ERTS_MAX_REF_NUMBERS];
+ int n = 0;
+ Uint ints[1 + ERTS_MAX_REF_NUMBERS] = {0};
+ char* cp;
+ Sint i;
+ DistEntry *dep = NULL;
+ char buf[5 /* #Ref< */
+ + (1 + ERTS_MAX_REF_NUMBERS)*(10 + 1) /* N.X.Y.Z> */
+ + 1 /* \0 */];
+
+ /* walk down the list and create a C string */
+ if ((i = intlist_to_buf(BIF_ARG_1, buf, sizeof(buf)-1)) < 0)
+ goto bad;
+
+ buf[i] = '\0'; /* null terminal */
+
+ cp = &buf[0];
+ if (*cp++ != '#') goto bad;
+ if (*cp++ != 'R') goto bad;
+ if (*cp++ != 'e') goto bad;
+ if (*cp++ != 'f') goto bad;
+ if (*cp++ != '<') goto bad;
+
+ for (i = 0; i < sizeof(ints)/sizeof(Uint); i++) {
+ if (*cp < '0' || *cp > '9') goto bad;
+
+ while (*cp >= '0' && *cp <= '9') {
+ ints[i] = 10*ints[i] + (*cp - '0');
+ cp++;
+ }
+
+ n++;
+ if (ints[i] > ~((Uint32) 0)) goto bad;
+ if (*cp == '>') break;
+ if (*cp++ != '.') goto bad;
+ }
+
+ if (*cp++ != '>') goto bad;
+ if (*cp != '\0') goto bad;
+
+ if (n < 2) goto bad;
+
+ for (n = 0; i > 0; i--)
+ refn[n++] = (Uint32) ints[i];
+
+ ASSERT(n <= ERTS_MAX_REF_NUMBERS);
+
+ dep = erts_channel_no_to_dist_entry(ints[0]);
+
+ if (!dep)
+ goto bad;
+
+ if(dep == erts_this_dist_entry) {
+ ErtsMagicBinary *mb;
+ Uint32 sid;
+ if (refn[0] > MAX_REFERENCE) goto bad;
+ if (n != ERTS_REF_NUMBERS) goto bad;
+ sid = erts_get_ref_numbers_thr_id(refn);
+ if (sid > erts_no_schedulers) goto bad;
+ mb = erts_magic_ref_lookup_bin(refn);
+ if (mb) {
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
+ res = erts_mk_magic_ref(&hp, &BIF_P->off_heap,
+ (Binary *) mb);
+ }
+ else {
+ hp = HAlloc(BIF_P, ERTS_REF_THING_SIZE);
+ write_ref_thing(hp, refn[0], refn[1], refn[2]);
+ res = make_internal_ref(hp);
+ }
+ }
+ else {
+ ExternalThing *etp;
+ ErlNode *enp;
+ Uint hsz;
+ int j;
+
+ if (is_nil(dep->cid))
+ goto bad;
+
+ enp = erts_find_or_insert_node(dep->sysname, dep->creation);
+ ASSERT(enp != erts_this_node);
+
+ hsz = EXTERNAL_THING_HEAD_SIZE;
+#if defined(ARCH_64)
+ hsz += n/2 + 1;
+#else
+ hsz += n;
+#endif
+
+ etp = (ExternalThing *) HAlloc(BIF_P, hsz);
+ etp->header = make_external_ref_header(n/2);
+ etp->next = BIF_P->off_heap.first;
+ etp->node = enp;
+ i = 0;
+#if defined(ARCH_64)
+ etp->data.ui32[i] = n;
+#endif
+ for (j = 0; j < n; j++) {
+ etp->data.ui32[i] = refn[j];
+ i++;
+ }
+
+ BIF_P->off_heap.first = (struct erl_off_heap_header*) etp;
+ res = make_external_ref(etp);
+ }
+
+ BIF_RET(res);
+
+ bad:
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+
/**********************************************************************/
BIF_RETTYPE group_leader_0(BIF_ALIST_0)
@@ -4251,16 +4422,38 @@ BIF_RETTYPE group_leader_2(BIF_ALIST_2)
new_member->group_leader = BIF_ARG_1;
else {
locks &= ~ERTS_PROC_LOCK_STATUS;
- erts_smp_proc_unlock(new_member, ERTS_PROC_LOCK_STATUS);
- new_member->group_leader = STORE_NC_IN_PROC(new_member,
- BIF_ARG_1);
+ erts_proc_unlock(new_member, ERTS_PROC_LOCK_STATUS);
+ if (new_member == BIF_P
+ || !(erts_atomic32_read_nob(&new_member->state)
+ & ERTS_PSFLG_DIRTY_RUNNING)) {
+ new_member->group_leader = STORE_NC_IN_PROC(new_member,
+ BIF_ARG_1);
+ }
+ else {
+ ErlHeapFragment *bp;
+ Eterm *hp;
+ /*
+ * Other process executing on a dirty scheduler,
+ * so we are not allowed to write to its heap.
+ * Store in heap fragment.
+ */
+
+ bp = new_message_buffer(NC_HEAP_SIZE(BIF_ARG_1));
+ hp = bp->mem;
+ new_member->group_leader = STORE_NC(&hp,
+ &new_member->off_heap,
+ BIF_ARG_1);
+ bp->next = new_member->mbuf;
+ new_member->mbuf = bp;
+ new_member->mbuf_sz += bp->used_size;
+ }
}
}
if (new_member == BIF_P)
locks &= ~ERTS_PROC_LOCK_MAIN;
if (locks)
- erts_smp_proc_unlock(new_member, locks);
+ erts_proc_unlock(new_member, locks);
if (await_x) {
/* Wait for new_member to terminate; then badarg */
@@ -4285,59 +4478,52 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
Sint n;
if (BIF_ARG_1 == am_multi_scheduling) {
- if (BIF_ARG_2 == am_block || BIF_ARG_2 == am_unblock) {
-#ifndef ERTS_SMP
- BIF_RET(am_disabled);
-#else
- if (erts_no_schedulers == 1)
- BIF_RET(am_disabled);
- else {
- switch (erts_block_multi_scheduling(BIF_P,
- ERTS_PROC_LOCK_MAIN,
- BIF_ARG_2 == am_block,
- 0)) {
- case ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED:
- BIF_RET(am_blocked);
- case ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED:
- ERTS_BIF_YIELD_RETURN_X(BIF_P, am_blocked,
- am_multi_scheduling);
- case ERTS_SCHDLR_SSPND_DONE:
- BIF_RET(am_enabled);
- case ERTS_SCHDLR_SSPND_YIELD_RESTART:
- ERTS_VBUMP_ALL_REDS(BIF_P);
- BIF_TRAP2(bif_export[BIF_system_flag_2],
- BIF_P, BIF_ARG_1, BIF_ARG_2);
- case ERTS_SCHDLR_SSPND_YIELD_DONE:
- ERTS_BIF_YIELD_RETURN_X(BIF_P, am_enabled,
- am_multi_scheduling);
- case ERTS_SCHDLR_SSPND_EINVAL:
- goto error;
- default:
- ASSERT(0);
- BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
- break;
- }
- }
-#endif
+ if (BIF_ARG_2 == am_block || BIF_ARG_2 == am_unblock
+ || BIF_ARG_2 == am_block_normal || BIF_ARG_2 == am_unblock_normal) {
+ int block = (BIF_ARG_2 == am_block
+ || BIF_ARG_2 == am_block_normal);
+ int normal = (BIF_ARG_2 == am_block_normal
+ || BIF_ARG_2 == am_unblock_normal);
+ switch (erts_block_multi_scheduling(BIF_P,
+ ERTS_PROC_LOCK_MAIN,
+ block,
+ normal,
+ 0)) {
+ case ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED:
+ BIF_RET(am_blocked);
+ case ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED:
+ BIF_RET(am_blocked_normal);
+ case ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED:
+ ERTS_BIF_YIELD_RETURN_X(BIF_P, am_blocked,
+ am_multi_scheduling);
+ case ERTS_SCHDLR_SSPND_YIELD_DONE_NMSCHED_BLOCKED:
+ ERTS_BIF_YIELD_RETURN_X(BIF_P, am_blocked_normal,
+ am_multi_scheduling);
+ case ERTS_SCHDLR_SSPND_DONE:
+ BIF_RET(am_enabled);
+ case ERTS_SCHDLR_SSPND_YIELD_RESTART:
+ ERTS_VBUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(bif_export[BIF_system_flag_2],
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ case ERTS_SCHDLR_SSPND_YIELD_DONE:
+ ERTS_BIF_YIELD_RETURN_X(BIF_P, am_enabled,
+ am_multi_scheduling);
+ case ERTS_SCHDLR_SSPND_EINVAL:
+ goto error;
+ default:
+ ASSERT(0);
+ BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
+ break;
+ }
}
} else if (BIF_ARG_1 == am_schedulers_online) {
-#ifndef ERTS_SMP
- if (BIF_ARG_2 != make_small(1))
- goto error;
- else
- BIF_RET(make_small(1));
-#else
Sint old_no;
if (!is_small(BIF_ARG_2))
goto error;
switch (erts_set_schedulers_online(BIF_P,
ERTS_PROC_LOCK_MAIN,
signed_val(BIF_ARG_2),
- &old_no
-#ifdef ERTS_DIRTY_SCHEDULERS
- , 0
-#endif
- )) {
+ &old_no, 0)) {
case ERTS_SCHDLR_SSPND_DONE:
BIF_RET(make_small(old_no));
case ERTS_SCHDLR_SSPND_YIELD_RESTART:
@@ -4354,7 +4540,6 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
break;
}
-#endif
} else if (BIF_ARG_1 == am_fullsweep_after) {
Uint16 nval;
Uint oval;
@@ -4362,7 +4547,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
goto error;
}
nval = (n > (Sint) ((Uint16) -1)) ? ((Uint16) -1) : ((Uint16) n);
- oval = (Uint) erts_smp_atomic32_xchg_nob(&erts_max_gen_gcs,
+ oval = (Uint) erts_atomic32_xchg_nob(&erts_max_gen_gcs,
(erts_aint32_t) nval);
BIF_RET(make_small(oval));
} else if (BIF_ARG_1 == am_min_heap_size) {
@@ -4372,13 +4557,13 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
goto error;
}
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
H_MIN_SIZE = erts_next_heap_size(n, 0);
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(make_small(oval));
} else if (BIF_ARG_1 == am_min_bin_vheap_size) {
@@ -4388,15 +4573,40 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
goto error;
}
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
BIN_VH_MIN_SIZE = erts_next_heap_size(n, 0);
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(make_small(oval));
+ } else if (BIF_ARG_1 == am_max_heap_size) {
+
+ Eterm *hp, old_value;
+ Uint sz = 0, max_heap_size, max_heap_flags;
+
+ if (!erts_max_heap_size(BIF_ARG_2, &max_heap_size, &max_heap_flags))
+ goto error;
+
+ if (max_heap_size < H_MIN_SIZE && max_heap_size != 0)
+ goto error;
+
+ erts_max_heap_size_map(H_MAX_SIZE, H_MAX_FLAGS, NULL, &sz);
+ hp = HAlloc(BIF_P, sz);
+ old_value = erts_max_heap_size_map(H_MAX_SIZE, H_MAX_FLAGS, &hp, NULL);
+
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
+
+ H_MAX_SIZE = max_heap_size;
+ H_MAX_FLAGS = max_heap_flags;
+
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+
+ BIF_RET(old_value);
} else if (BIF_ARG_1 == am_display_items) {
int oval = display_items;
if (!is_small(BIF_ARG_2) || (n = signed_val(BIF_ARG_2)) < 0) {
@@ -4417,17 +4627,39 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
} else if (BIF_ARG_1 == am_trace_control_word) {
BIF_RET(db_set_trace_control_word(BIF_P, BIF_ARG_2));
} else if (BIF_ARG_1 == am_sequential_tracer) {
- Eterm old_value = erts_set_system_seq_tracer(BIF_P,
- ERTS_PROC_LOCK_MAIN,
- BIF_ARG_2);
- if (old_value != THE_NON_VALUE) {
- BIF_RET(old_value);
- }
+ ErtsTracer new_seq_tracer, old_seq_tracer;
+ Eterm ret;
+
+ if (BIF_ARG_2 == am_false)
+ new_seq_tracer = erts_tracer_nil;
+ else
+ new_seq_tracer = erts_term_to_tracer(THE_NON_VALUE, BIF_ARG_2);
+
+ if (new_seq_tracer == THE_NON_VALUE)
+ goto error;
+
+ old_seq_tracer = erts_set_system_seq_tracer(BIF_P,
+ ERTS_PROC_LOCK_MAIN,
+ new_seq_tracer);
+
+ ERTS_TRACER_CLEAR(&new_seq_tracer);
+
+ if (old_seq_tracer == THE_NON_VALUE)
+ goto error;
+
+ if (ERTS_TRACER_IS_NIL(old_seq_tracer))
+ BIF_RET(am_false);
+
+ ret = erts_tracer_to_term(BIF_P, old_seq_tracer);
+
+ ERTS_TRACER_CLEAR(&old_seq_tracer);
+
+ BIF_RET(ret);
} else if (BIF_ARG_1 == make_small(1)) {
int i, max;
- ErlMessage* mp;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ ErtsMessage* mp;
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
max = erts_ptab_max(&erts_proc);
for (i = 0; i < max; i++) {
@@ -4440,7 +4672,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
#endif
p->seq_trace_clock = 0;
p->seq_trace_lastcnt = 0;
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p);
+ ERTS_MSGQ_MV_INQ2PRIVQ(p);
mp = p->msg.first;
while(mp != NULL) {
#ifdef USE_VM_PROBES
@@ -4453,23 +4685,22 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
}
}
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(am_true);
} else if (BIF_ARG_1 == am_scheduler_wall_time) {
if (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false) {
erts_aint32_t new = BIF_ARG_2 == am_true ? 1 : 0;
- erts_aint32_t old = erts_smp_atomic32_xchg_nob(&sched_wall_time,
+ erts_aint32_t old = erts_atomic32_xchg_nob(&sched_wall_time,
new);
- Eterm ref = erts_sched_wall_time_request(BIF_P, 1, new);
+ Eterm ref = erts_sched_wall_time_request(BIF_P, 1, new, 0, 0);
ASSERT(is_value(ref));
BIF_TRAP2(await_sched_wall_time_mod_trap,
BIF_P,
ref,
old ? am_true : am_false);
}
-#if defined(ERTS_SMP) && defined(ERTS_DIRTY_SCHEDULERS)
} else if (BIF_ARG_1 == am_dirty_cpu_schedulers_online) {
Sint old_no;
if (!is_small(BIF_ARG_2))
@@ -4495,6 +4726,52 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
break;
}
+ } else if (BIF_ARG_1 == am_time_offset
+ && ERTS_IS_ATOM_STR("finalize", BIF_ARG_2)) {
+ ErtsTimeOffsetState res;
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ res = erts_finalize_time_offset();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ switch (res) {
+ case ERTS_TIME_OFFSET_PRELIMINARY: {
+ DECL_AM(preliminary);
+ BIF_RET(AM_preliminary);
+ }
+ case ERTS_TIME_OFFSET_FINAL: {
+ DECL_AM(final);
+ BIF_RET(AM_final);
+ }
+ case ERTS_TIME_OFFSET_VOLATILE: {
+ DECL_AM(volatile);
+ BIF_RET(AM_volatile);
+ }
+ default:
+ ERTS_INTERNAL_ERROR("Unknown state");
+ }
+#ifdef ERTS_ENABLE_MSACC
+ } else if (BIF_ARG_1 == am_microstate_accounting) {
+ Eterm threads;
+ if (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false) {
+ erts_aint32_t new = BIF_ARG_2 == am_true ? ERTS_MSACC_ENABLE : ERTS_MSACC_DISABLE;
+ erts_aint32_t old = erts_atomic32_xchg_nob(&msacc, new);
+ Eterm ref = erts_msacc_request(BIF_P, new, &threads);
+ if (is_non_value(ref))
+ BIF_RET(old ? am_true : am_false);
+ BIF_TRAP3(await_msacc_mod_trap,
+ BIF_P,
+ ref,
+ old ? am_true : am_false,
+ threads);
+ } else if (BIF_ARG_2 == am_reset) {
+ Eterm ref = erts_msacc_request(BIF_P, ERTS_MSACC_RESET, &threads);
+ erts_aint32_t old = erts_atomic32_read_nob(&msacc);
+ ASSERT(is_value(ref));
+ BIF_TRAP3(await_msacc_mod_trap,
+ BIF_P,
+ ref,
+ old ? am_true : am_false,
+ threads);
+ }
#endif
} else if (ERTS_IS_ATOM_STR("scheduling_statistics", BIF_ARG_1)) {
int what;
@@ -4506,9 +4783,9 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
what = ERTS_SCHED_STAT_MODIFY_CLEAR;
else
goto error;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_sched_stat_modify(what);
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(am_true);
} else if (ERTS_IS_ATOM_STR("internal_cpu_topology", BIF_ARG_1)) {
Eterm res = erts_set_cpu_topology(BIF_P, BIF_ARG_2);
@@ -4537,25 +4814,6 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
/**********************************************************************/
-BIF_RETTYPE hash_2(BIF_ALIST_2)
-{
- Uint32 hash;
- Sint range;
-
- if (is_not_small(BIF_ARG_2)) {
- BIF_ERROR(BIF_P, BADARG);
- }
- if ((range = signed_val(BIF_ARG_2)) <= 0) { /* [1..MAX_SMALL] */
- BIF_ERROR(BIF_P, BADARG);
- }
-#if defined(ARCH_64) && !HALFWORD_HEAP
- if (range > ((1L << 27) - 1))
- BIF_ERROR(BIF_P, BADARG);
-#endif
- hash = make_broken_hash(BIF_ARG_1);
- BIF_RET(make_small(1 + (hash % range))); /* [1..range] */
-}
-
BIF_RETTYPE phash_2(BIF_ALIST_2)
{
Uint32 hash;
@@ -4620,7 +4878,7 @@ BIF_RETTYPE phash2_2(BIF_ALIST_2)
/*
* Return either a small or a big. Use the heap for bigs if there is room.
*/
-#if defined(ARCH_64) && !HALFWORD_HEAP
+#if defined(ARCH_64)
BIF_RET(make_small(final_hash));
#else
if (IS_USMALL(0, final_hash)) {
@@ -4647,7 +4905,7 @@ BIF_RETTYPE bump_reductions_1(BIF_ALIST_1)
}
BIF_RETTYPE erts_internal_cmp_term_2(BIF_ALIST_2) {
- int res = CMP_TERM(BIF_ARG_1,BIF_ARG_2);
+ Sint res = CMP_TERM(BIF_ARG_1,BIF_ARG_2);
/* ensure -1, 0, 1 result */
if (res < 0) {
@@ -4660,29 +4918,25 @@ BIF_RETTYPE erts_internal_cmp_term_2(BIF_ALIST_2) {
/*
* Processes doing yield on return in a bif ends up in bif_return_trap().
*/
-static BIF_RETTYPE bif_return_trap(
-#ifdef DEBUG
- BIF_ALIST_2
-#else
- BIF_ALIST_1
-#endif
- )
+static BIF_RETTYPE bif_return_trap(BIF_ALIST_2)
{
-#ifdef DEBUG
+ Eterm res = BIF_ARG_1;
+
switch (BIF_ARG_2) {
- case am_multi_scheduling:
-#ifdef ERTS_SMP
- erts_dbg_multi_scheduling_return_trap(BIF_P, BIF_ARG_1);
-#endif
- break;
- case am_schedulers_online:
+ case am_multi_scheduling: {
+ int msb = erts_is_multi_scheduling_blocked();
+ if (msb > 0)
+ res = am_blocked;
+ else if (msb < 0)
+ res = am_blocked_normal;
+ else
+ ERTS_INTERNAL_ERROR("Unexpected multi scheduling block state");
break;
+ }
default:
break;
}
-#endif
-
- BIF_RET(BIF_ARG_1);
+ BIF_RET(res);
}
/*
@@ -4703,22 +4957,22 @@ static ERTS_INLINE int
skip_current_msgq(Process *c_p)
{
int res;
-#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
erts_proc_lc_chk_only_proc_main(c_p);
#endif
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
if (ERTS_PROC_PENDING_EXIT(c_p)) {
KILL_CATCHES(c_p);
c_p->freason = EXC_EXIT;
res = 0;
}
else {
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
+ ERTS_MSGQ_MV_INQ2PRIVQ(c_p);
c_p->msg.save = c_p->msg.last;
res = 1;
}
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
return res;
}
@@ -4767,46 +5021,45 @@ erts_bif_prep_await_proc_exit_apply_trap(Process *c_p,
Export bif_return_trap_export;
void erts_init_trap_export(Export* ep, Eterm m, Eterm f, Uint a,
- Eterm (*bif)(BIF_ALIST_0))
+ Eterm (*bif)(BIF_ALIST))
{
int i;
sys_memset((void *) ep, 0, sizeof(Export));
for (i=0; i<ERTS_NUM_CODE_IX; i++) {
- ep->addressv[i] = &ep->code[3];
+ ep->addressv[i] = ep->beam;
}
- ep->code[0] = m;
- ep->code[1] = f;
- ep->code[2] = a;
- ep->code[3] = (BeamInstr) em_apply_bif;
- ep->code[4] = (BeamInstr) bif;
+ ep->info.mfa.module = m;
+ ep->info.mfa.function = f;
+ ep->info.mfa.arity = a;
+ ep->beam[0] = BeamOpCodeAddr(op_apply_bif);
+ ep->beam[1] = (BeamInstr) bif;
}
void erts_init_bif(void)
{
- reference0 = 0;
- reference1 = 0;
- reference2 = 0;
-
- erts_smp_spinlock_init(&make_ref_lock, "make_ref");
- erts_smp_mtx_init(&ports_snapshot_mtx, "ports_snapshot");
- erts_smp_atomic_init_nob(&erts_dead_ports_ptr, (erts_aint_t) NULL);
-
/*
- * bif_return_trap/1 is a hidden BIF that bifs that need to
- * yield the calling process traps to. The only thing it does:
- * return the value passed as argument.
+ * bif_return_trap/2 is a hidden BIF that bifs that need to
+ * yield the calling process traps to.
*/
- erts_init_trap_export(&bif_return_trap_export, am_erlang, am_bif_return_trap,
-#ifdef DEBUG
- 2
-#else
- 1
-#endif
- , &bif_return_trap);
+ erts_init_trap_export(&bif_return_trap_export,
+ am_erlang, am_bif_return_trap, 2,
+ &bif_return_trap);
- flush_monitor_message_trap = erts_export_put(am_erlang,
- am_flush_monitor_message,
- 2);
+ erts_await_result = erts_export_put(am_erts_internal,
+ am_await_result,
+ 1);
+
+ erts_init_trap_export(&dsend_continue_trap_export,
+ am_erts_internal, am_dsend_continue_trap, 1,
+ dsend_continue_trap_1);
+
+ flush_monitor_messages_trap = erts_export_put(am_erts_internal,
+ am_flush_monitor_messages,
+ 3);
+
+ erts_convert_time_unit_trap = erts_export_put(am_erlang,
+ am_convert_time_unit,
+ 3);
set_cpu_topology_trap = erts_export_put(am_erlang,
am_set_cpu_topology,
@@ -4818,10 +5071,310 @@ void erts_init_bif(void)
await_port_send_result_trap
= erts_export_put(am_erts_internal, am_await_port_send_result, 3);
await_sched_wall_time_mod_trap
- = erts_export_put(am_erlang, am_await_sched_wall_time_modifications, 2);
- erts_smp_atomic32_init_nob(&sched_wall_time, 0);
+ = erts_export_put(am_erlang, am_await_sched_wall_time_modifications, 2);
+ await_msacc_mod_trap
+ = erts_export_put(am_erts_internal, am_await_microstate_accounting_modifications, 3);
+
+ erts_atomic32_init_nob(&sched_wall_time, 0);
+ erts_atomic32_init_nob(&msacc, ERTS_MSACC_IS_ENABLED());
+}
+
+/*
+ * Scheduling of BIFs via NifExport...
+ */
+#define ERTS_WANT_NFUNC_SCHED_INTERNALS__
+#include "erl_nfunc_sched.h"
+
+#define ERTS_SCHED_BIF_TRAP_MARKER ((void *) (UWord) 1)
+
+static ERTS_INLINE void
+schedule(Process *c_p, Process *dirty_shadow_proc,
+ ErtsCodeMFA *mfa, BeamInstr *pc,
+ ErtsBifFunc dfunc, void *ifunc,
+ Eterm module, Eterm function,
+ int argc, Eterm *argv)
+{
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p));
+ (void) erts_nif_export_schedule(c_p, dirty_shadow_proc,
+ mfa, pc, BeamOpCodeAddr(op_apply_bif),
+ dfunc, ifunc,
+ module, function,
+ argc, argv);
+}
+
+
+static BIF_RETTYPE dirty_bif_result(BIF_ALIST_1)
+{
+ NifExport *nep = (NifExport *) ERTS_PROC_GET_NIF_TRAP_EXPORT(BIF_P);
+ erts_nif_export_restore(BIF_P, nep, BIF_ARG_1);
+ BIF_RET(BIF_ARG_1);
+}
+
+static BIF_RETTYPE dirty_bif_trap(BIF_ALIST)
+{
+ NifExport *nep = (NifExport *) ERTS_PROC_GET_NIF_TRAP_EXPORT(BIF_P);
+
+ /*
+ * Arity and argument registers already set
+ * correct by call to dirty_bif_trap()...
+ */
+
+ ASSERT(BIF_P->arity == nep->exp.info.mfa.arity);
+
+ erts_nif_export_restore(BIF_P, nep, THE_NON_VALUE);
+
+ BIF_P->i = (BeamInstr *) nep->func;
+ BIF_P->freason = TRAP;
+ return THE_NON_VALUE;
+}
+
+static BIF_RETTYPE dirty_bif_exception(BIF_ALIST_2)
+{
+ Eterm freason;
+
+ ASSERT(is_small(BIF_ARG_1));
+
+ freason = signed_val(BIF_ARG_1);
+
+ /* Restore orig info for error and clear nif export in handle_error() */
+ freason |= EXF_RESTORE_NIF;
+
+ BIF_P->fvalue = BIF_ARG_2;
+
+ BIF_ERROR(BIF_P, freason);
+}
+
+
+static BIF_RETTYPE call_bif(Process *c_p, Eterm *reg, BeamInstr *I);
+
+BIF_RETTYPE
+erts_schedule_bif(Process *proc,
+ Eterm *argv,
+ BeamInstr *i,
+ ErtsBifFunc bif,
+ ErtsSchedType sched_type,
+ Eterm mod,
+ Eterm func,
+ int argc)
+{
+ Process *c_p, *dirty_shadow_proc;
+ ErtsCodeMFA *mfa;
+
+ if (proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
+ dirty_shadow_proc = proc;
+ c_p = proc->next;
+ ASSERT(c_p->common.id == dirty_shadow_proc->common.id);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ }
+ else
+ {
+ dirty_shadow_proc = NULL;
+ c_p = proc;
+ }
+
+ if (!ERTS_PROC_IS_EXITING(c_p)) {
+ Export *exp;
+ BifFunction dbif, ibif;
+ BeamInstr *pc;
+
+ /*
+ * dbif - direct bif
+ * ibif - indirect bif
+ */
+
+ erts_aint32_t set, mask;
+ mask = (ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC);
+ switch (sched_type) {
+ case ERTS_SCHED_DIRTY_CPU:
+ set = ERTS_PSFLG_DIRTY_CPU_PROC;
+ dbif = bif;
+ ibif = NULL;
+ break;
+ case ERTS_SCHED_DIRTY_IO:
+ set = ERTS_PSFLG_DIRTY_IO_PROC;
+ dbif = bif;
+ ibif = NULL;
+ break;
+ case ERTS_SCHED_NORMAL:
+ default:
+ set = 0;
+ dbif = call_bif;
+ ibif = bif;
+ break;
+ }
+
+ (void) erts_atomic32_read_bset_nob(&c_p->state, mask, set);
+
+ if (i == NULL) {
+ ERTS_INTERNAL_ERROR("Missing instruction pointer");
+ }
+#ifdef HIPE
+ else if (proc->flags & F_HIPE_MODE) {
+ /* Pointer to bif export in i */
+ exp = (Export *) i;
+ pc = c_p->cp;
+ mfa = &exp->info.mfa;
+ }
+#endif
+ else if (BeamIsOpCode(*i, op_call_bif_e)) {
+ /* Pointer to bif export in i+1 */
+ exp = (Export *) i[1];
+ pc = i;
+ mfa = &exp->info.mfa;
+ }
+ else if (BeamIsOpCode(*i, op_apply_bif)) {
+ /* Pointer to bif in i+1, and mfa in i-3 */
+ pc = c_p->cp;
+ mfa = erts_code_to_codemfa(i);
+ }
+ else {
+ ERTS_INTERNAL_ERROR("erts_schedule_bif() called "
+ "from unexpected instruction");
+ }
+ ASSERT(bif);
+
+ if (argc < 0) { /* reschedule original call */
+ mod = mfa->module;
+ func = mfa->function;
+ argc = (int) mfa->arity;
+ }
+
+ schedule(c_p, dirty_shadow_proc, mfa, pc, dbif, ibif,
+ mod, func, argc, argv);
+ }
+
+ if (dirty_shadow_proc)
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ return THE_NON_VALUE;
}
+static BIF_RETTYPE
+call_bif(Process *c_p, Eterm *reg, BeamInstr *I)
+{
+ NifExport *nep = ERTS_I_BEAM_OP_TO_NIF_EXPORT(I);
+ ErtsBifFunc bif = (ErtsBifFunc) nep->func;
+ BIF_RETTYPE ret;
+
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()));
+
+ nep->func = ERTS_SCHED_BIF_TRAP_MARKER;
+
+ ASSERT(bif);
+
+ ret = (*bif)(c_p, reg, I);
+
+ if (is_value(ret))
+ erts_nif_export_restore(c_p, nep, ret);
+ else if (c_p->freason != TRAP)
+ c_p->freason |= EXF_RESTORE_NIF; /* restore in handle_error() */
+ else if (nep->func == ERTS_SCHED_BIF_TRAP_MARKER) {
+ /* BIF did an ordinary trap... */
+ erts_nif_export_restore(c_p, nep, ret);
+ }
+ /* else:
+ * BIF rescheduled itself using erts_schedule_bif().
+ */
+
+ return ret;
+}
+
+
+int
+erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *reg)
+{
+ BIF_RETTYPE result;
+ int exiting;
+ Process *dirty_shadow_proc;
+ ErtsBifFunc bf;
+ NifExport *nep;
+#ifdef DEBUG
+ Eterm *c_p_htop;
+ erts_aint32_t state;
+
+ ASSERT(!c_p->scheduler_data);
+ state = erts_atomic32_read_nob(&c_p->state);
+ ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING)
+ && !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)));
+ ASSERT(esdp);
+
+#endif
+
+ nep = ERTS_I_BEAM_OP_TO_NIF_EXPORT(I);
+ ASSERT(nep == ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p));
+
+ nep->func = ERTS_SCHED_BIF_TRAP_MARKER;
+
+ bf = (ErtsBifFunc) I[1];
+
+ erts_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC));
+
+ dirty_shadow_proc = erts_make_dirty_shadow_proc(esdp, c_p);
+
+ dirty_shadow_proc->freason = c_p->freason;
+ dirty_shadow_proc->fvalue = c_p->fvalue;
+ dirty_shadow_proc->ftrace = c_p->ftrace;
+ dirty_shadow_proc->cp = c_p->cp;
+ dirty_shadow_proc->i = c_p->i;
+
+#ifdef DEBUG
+ c_p_htop = c_p->htop;
+#endif
+
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ result = (*bf)(dirty_shadow_proc, reg, I);
+
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ ASSERT(c_p_htop == c_p->htop);
+ ASSERT(dirty_shadow_proc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
+ ASSERT(dirty_shadow_proc->next == c_p);
+
+ exiting = ERTS_PROC_IS_EXITING(c_p);
+
+ if (!exiting) {
+ if (is_value(result))
+ schedule(c_p, dirty_shadow_proc, NULL, NULL, dirty_bif_result,
+ NULL, am_erts_internal, am_dirty_bif_result, 1, &result);
+ else if (dirty_shadow_proc->freason != TRAP) {
+ Eterm argv[2];
+ ASSERT(dirty_shadow_proc->freason <= MAX_SMALL);
+ argv[0] = make_small(dirty_shadow_proc->freason);
+ argv[1] = dirty_shadow_proc->fvalue;
+ schedule(c_p, dirty_shadow_proc, NULL, NULL,
+ dirty_bif_exception, NULL, am_erts_internal,
+ am_dirty_bif_exception, 2, argv);
+ }
+ else if (nep->func == ERTS_SCHED_BIF_TRAP_MARKER) {
+ /* Dirty BIF did an ordinary trap... */
+ ASSERT(!(erts_atomic32_read_nob(&c_p->state)
+ & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)));
+ schedule(c_p, dirty_shadow_proc, NULL, NULL,
+ dirty_bif_trap, (void *) dirty_shadow_proc->i,
+ am_erts_internal, am_dirty_bif_trap,
+ dirty_shadow_proc->arity, reg);
+ }
+ /* else:
+ * BIF rescheduled itself using erts_schedule_bif().
+ */
+ c_p->freason = dirty_shadow_proc->freason;
+ c_p->fvalue = dirty_shadow_proc->fvalue;
+ c_p->ftrace = dirty_shadow_proc->ftrace;
+ c_p->cp = dirty_shadow_proc->cp;
+ c_p->i = dirty_shadow_proc->i;
+ c_p->arity = dirty_shadow_proc->arity;
+ }
+
+ erts_flush_dirty_shadow_proc(dirty_shadow_proc);
+
+ return exiting;
+}
+
+
+
#ifdef HARDDEBUG
/*
You'll need this line in bif.tab to be able to use this debug bif
@@ -5076,12 +5629,10 @@ BIF_RETTYPE dt_restore_tag_1(BIF_ALIST_1)
SEQ_TRACE_TOKEN(BIF_P) = am_have_dt_utag;
}
}
-#else
+#else
if (BIF_ARG_1 != am_true) {
BIF_ERROR(BIF_P,BADARG);
}
#endif
BIF_RET(am_true);
}
-
-
diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h
index 51b77a95ed..a2bc883dbe 100644
--- a/erts/emulator/beam/bif.h
+++ b/erts/emulator/beam/bif.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -20,20 +21,42 @@
#ifndef __BIF_H__
#define __BIF_H__
+extern Export *erts_await_result;
extern Export* erts_format_cpu_topology_trap;
+extern Export *erts_convert_time_unit_trap;
#define BIF_RETTYPE Eterm
#define BIF_P A__p
-#define BIF_ALIST_0 Process* A__p, Eterm* BIF__ARGS
-#define BIF_ALIST_1 Process* A__p, Eterm* BIF__ARGS
-#define BIF_ALIST_2 Process* A__p, Eterm* BIF__ARGS
-#define BIF_ALIST_3 Process* A__p, Eterm* BIF__ARGS
+#define BIF_ALIST Process* A__p, Eterm* BIF__ARGS, BeamInstr *A__I
+#define BIF_CALL_ARGS A__p, BIF__ARGS, A__I
+
+#define BIF_ALIST_0 BIF_ALIST
+#define BIF_ALIST_1 BIF_ALIST
+#define BIF_ALIST_2 BIF_ALIST
+#define BIF_ALIST_3 BIF_ALIST
+#define BIF_ALIST_4 BIF_ALIST
#define BIF_ARG_1 (BIF__ARGS[0])
#define BIF_ARG_2 (BIF__ARGS[1])
#define BIF_ARG_3 (BIF__ARGS[2])
+#define BIF_ARG_4 (BIF__ARGS[3])
+
+#define BIF_I A__I
+
+/* NBIF_* is for bif calls from native code... */
+
+#define NBIF_ALIST Process* A__p, Eterm* BIF__ARGS
+#define NBIF_CALL_ARGS A__p, BIF__ARGS
+
+#define NBIF_ALIST_0 NBIF_ALIST
+#define NBIF_ALIST_1 NBIF_ALIST
+#define NBIF_ALIST_2 NBIF_ALIST
+#define NBIF_ALIST_3 NBIF_ALIST
+#define NBIF_ALIST_4 NBIF_ALIST
+
+typedef BIF_RETTYPE (*ErtsBifFunc)(BIF_ALIST);
#define ERTS_IS_PROC_OUT_OF_REDS(p) \
((p)->fcalls > 0 \
@@ -47,27 +70,30 @@ extern Export* erts_format_cpu_topology_trap;
(p)->fcalls = 0; \
else \
(p)->fcalls = -CONTEXT_REDS; \
+ ASSERT(ERTS_BIF_REDS_LEFT((p)) == 0); \
} while(0)
-
-#define ERTS_VBUMP_ALL_REDS(p) \
+#define ERTS_VBUMP_ALL_REDS_INTERNAL(p, fcalls) \
do { \
if (!ERTS_PROC_GET_SAVED_CALLS_BUF((p))) { \
- if ((p)->fcalls > 0) \
- ERTS_PROC_GET_SCHDATA((p))->virtual_reds += (p)->fcalls; \
- (p)->fcalls = 0; \
+ if ((fcalls) > 0) \
+ erts_proc_sched_data((p))->virtual_reds += (fcalls); \
+ (fcalls) = 0; \
} \
else { \
- if ((p)->fcalls > -CONTEXT_REDS) \
- ERTS_PROC_GET_SCHDATA((p))->virtual_reds \
- += ((p)->fcalls - (-CONTEXT_REDS)); \
- (p)->fcalls = -CONTEXT_REDS; \
+ if ((fcalls) > -CONTEXT_REDS) \
+ erts_proc_sched_data((p))->virtual_reds \
+ += ((fcalls) - (-CONTEXT_REDS)); \
+ (fcalls) = -CONTEXT_REDS; \
} \
} while(0)
+#define ERTS_VBUMP_ALL_REDS(p) \
+ ERTS_VBUMP_ALL_REDS_INTERNAL((p), (p)->fcalls)
+
#define BUMP_REDS(p, gc) do { \
ASSERT(p); \
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));\
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));\
(p)->fcalls -= (gc); \
if ((p)->fcalls < 0) { \
if (!ERTS_PROC_GET_SAVED_CALLS_BUF((p))) \
@@ -83,32 +109,56 @@ do { \
if (!ERTS_PROC_GET_SAVED_CALLS_BUF((p))) { \
if ((p)->fcalls >= reds) { \
(p)->fcalls -= reds; \
- ERTS_PROC_GET_SCHDATA((p))->virtual_reds += reds; \
+ erts_proc_sched_data((p))->virtual_reds += reds; \
} \
else { \
if ((p)->fcalls > 0) \
- ERTS_PROC_GET_SCHDATA((p))->virtual_reds += (p)->fcalls;\
+ erts_proc_sched_data((p))->virtual_reds += (p)->fcalls; \
(p)->fcalls = 0; \
} \
} \
else { \
if ((p)->fcalls >= reds - CONTEXT_REDS) { \
(p)->fcalls -= reds; \
- ERTS_PROC_GET_SCHDATA((p))->virtual_reds += reds; \
+ erts_proc_sched_data((p))->virtual_reds += reds; \
} \
else { \
if ((p)->fcalls > -CONTEXT_REDS) \
- ERTS_PROC_GET_SCHDATA((p))->virtual_reds \
+ erts_proc_sched_data((p))->virtual_reds \
+= (p)->fcalls - (-CONTEXT_REDS); \
(p)->fcalls = -CONTEXT_REDS; \
} \
} \
} while(0)
-#define ERTS_BIF_REDS_LEFT(p) \
+#define ERTS_VBUMP_LEAVE_REDS_INTERNAL(P, Reds, FCalls) \
+ do { \
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF((P))) { \
+ int nreds__ = ((int)(Reds)) - CONTEXT_REDS; \
+ if ((FCalls) > nreds__) { \
+ erts_proc_sched_data((P))->virtual_reds \
+ += (FCalls) - nreds__; \
+ (FCalls) = nreds__; \
+ } \
+ } \
+ else { \
+ if ((FCalls) > (Reds)) { \
+ erts_proc_sched_data((P))->virtual_reds \
+ += (FCalls) - (Reds); \
+ (FCalls) = (Reds); \
+ } \
+ } \
+ } while (0)
+
+#define ERTS_VBUMP_LEAVE_REDS(P, Reds) \
+ ERTS_VBUMP_LEAVE_REDS_INTERNAL(P, Reds, (P)->fcalls)
+
+#define ERTS_REDS_LEFT(p, FCalls) \
(ERTS_PROC_GET_SAVED_CALLS_BUF((p)) \
- ? ((p)->fcalls > -CONTEXT_REDS ? ((p)->fcalls - (-CONTEXT_REDS)) : 0)\
- : ((p)->fcalls > 0 ? (p)->fcalls : 0))
+ ? ((FCalls) > -CONTEXT_REDS ? ((FCalls) - (-CONTEXT_REDS)) : 0) \
+ : ((FCalls) > 0 ? (FCalls) : 0))
+
+#define ERTS_BIF_REDS_LEFT(p) ERTS_REDS_LEFT(p, p->fcalls)
#define BIF_RET2(x, gc) do { \
BUMP_REDS(BIF_P, (gc)); \
@@ -124,12 +174,85 @@ do { \
return THE_NON_VALUE; \
} while(0)
+#define ERTS_BIF_ERROR_TRAPPED0(Proc, Reason, Bif) \
+do { \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = &(Bif)->info.mfa; \
+ return THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_ERROR_TRAPPED1(Proc, Reason, Bif, A0) \
+do { \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = &(Bif)->info.mfa; \
+ reg[0] = (Eterm) (A0); \
+ return THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_ERROR_TRAPPED2(Proc, Reason, Bif, A0, A1) \
+do { \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = &(Bif)->info.mfa; \
+ reg[0] = (Eterm) (A0); \
+ reg[1] = (Eterm) (A1); \
+ return THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_ERROR_TRAPPED3(Proc, Reason, Bif, A0, A1, A2) \
+do { \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = &(Bif)->info.mfa; \
+ reg[0] = (Eterm) (A0); \
+ reg[1] = (Eterm) (A1); \
+ reg[2] = (Eterm) (A2); \
+ return THE_NON_VALUE; \
+} while (0)
+
#define ERTS_BIF_PREP_ERROR(Ret, Proc, Reason) \
do { \
(Proc)->freason = (Reason); \
(Ret) = THE_NON_VALUE; \
} while (0)
+#define ERTS_BIF_PREP_ERROR_TRAPPED0(Ret, Proc, Reason, Bif) \
+do { \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = &(Bif)->info.mfa; \
+ (Ret) = THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_PREP_ERROR_TRAPPED1(Ret, Proc, Reason, Bif, A0) \
+do { \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = &(Bif)->info.mfa; \
+ reg[0] = (Eterm) (A0); \
+ (Ret) = THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_PREP_ERROR_TRAPPED2(Ret, Proc, Reason, Bif, A0, A1) \
+do { \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = &(Bif)->info.mfa; \
+ reg[0] = (Eterm) (A0); \
+ reg[1] = (Eterm) (A1); \
+ (Ret) = THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_PREP_ERROR_TRAPPED3(Ret, Proc, Reason, Bif, A0, A1, A2) \
+do { \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = &(Bif)->info.mfa; \
+ reg[0] = (Eterm) (A0); \
+ reg[1] = (Eterm) (A1); \
+ reg[2] = (Eterm) (A2); \
+ (Ret) = THE_NON_VALUE; \
+} while (0)
#define ERTS_BIF_PREP_TRAP0(Ret, Trap, Proc) \
do { \
@@ -141,7 +264,7 @@ do { \
#define ERTS_BIF_PREP_TRAP1(Ret, Trap, Proc, A0) \
do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->arity = 1; \
reg[0] = (Eterm) (A0); \
(Proc)->i = (BeamInstr*) ((Trap)->addressv[erts_active_code_ix()]); \
@@ -151,7 +274,7 @@ do { \
#define ERTS_BIF_PREP_TRAP2(Ret, Trap, Proc, A0, A1) \
do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->arity = 2; \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
@@ -162,7 +285,7 @@ do { \
#define ERTS_BIF_PREP_TRAP3(Ret, Trap, Proc, A0, A1, A2) \
do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->arity = 3; \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
@@ -174,7 +297,7 @@ do { \
#define ERTS_BIF_PREP_TRAP3_NO_RET(Trap, Proc, A0, A1, A2)\
do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->arity = 3; \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
@@ -191,7 +314,7 @@ do { \
} while(0)
#define BIF_TRAP1(Trap_, p, A0) do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((p))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((p))->x_reg_array; \
(p)->arity = 1; \
reg[0] = (A0); \
(p)->i = (BeamInstr*) ((Trap_)->addressv[erts_active_code_ix()]); \
@@ -200,7 +323,7 @@ do { \
} while(0)
#define BIF_TRAP2(Trap_, p, A0, A1) do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((p))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((p))->x_reg_array; \
(p)->arity = 2; \
reg[0] = (A0); \
reg[1] = (A1); \
@@ -210,7 +333,7 @@ do { \
} while(0)
#define BIF_TRAP3(Trap_, p, A0, A1, A2) do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((p))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((p))->x_reg_array; \
(p)->arity = 3; \
reg[0] = (A0); \
reg[1] = (A1); \
@@ -234,37 +357,20 @@ do { \
} while(0)
extern Export bif_return_trap_export;
-#ifdef DEBUG
-#define ERTS_BIF_PREP_YIELD_RETURN_X(RET, P, VAL, DEBUG_VAL) \
-do { \
- ERTS_VBUMP_ALL_REDS(P); \
- ERTS_BIF_PREP_TRAP2(RET, &bif_return_trap_export, (P), (VAL), \
- (DEBUG_VAL)); \
-} while (0)
-#else
-#define ERTS_BIF_PREP_YIELD_RETURN_X(RET, P, VAL, DEBUG_VAL) \
+#define ERTS_BIF_PREP_YIELD_RETURN_X(RET, P, VAL, OP) \
do { \
ERTS_VBUMP_ALL_REDS(P); \
- ERTS_BIF_PREP_TRAP1(RET, &bif_return_trap_export, (P), (VAL)); \
+ ERTS_BIF_PREP_TRAP2(RET, &bif_return_trap_export, (P), (VAL), (OP));\
} while (0)
-#endif
#define ERTS_BIF_PREP_YIELD_RETURN(RET, P, VAL) \
ERTS_BIF_PREP_YIELD_RETURN_X(RET, (P), (VAL), am_undefined)
-#ifdef DEBUG
-#define ERTS_BIF_YIELD_RETURN_X(P, VAL, DEBUG_VAL) \
+#define ERTS_BIF_YIELD_RETURN_X(P, VAL, OP) \
do { \
ERTS_VBUMP_ALL_REDS(P); \
- BIF_TRAP2(&bif_return_trap_export, (P), (VAL), (DEBUG_VAL)); \
+ BIF_TRAP2(&bif_return_trap_export, (P), (VAL), (OP)); \
} while (0)
-#else
-#define ERTS_BIF_YIELD_RETURN_X(P, VAL, DEBUG_VAL) \
-do { \
- ERTS_VBUMP_ALL_REDS(P); \
- BIF_TRAP1(&bif_return_trap_export, (P), (VAL)); \
-} while (0)
-#endif
#define ERTS_BIF_RETURN_YIELD(P) ERTS_VBUMP_ALL_REDS((P))
@@ -392,6 +498,89 @@ erts_bif_prep_await_proc_exit_apply_trap(Process *c_p,
Eterm args[],
int nargs);
+int erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p,
+ BeamInstr *I, Eterm *reg);
+
+BIF_RETTYPE
+erts_schedule_bif(Process *proc,
+ Eterm *argv,
+ BeamInstr *i,
+ ErtsBifFunc dbf,
+ ErtsSchedType sched_type,
+ Eterm mod,
+ Eterm func,
+ int argc);
+
+ERTS_GLB_INLINE BIF_RETTYPE
+erts_reschedule_bif(Process *proc,
+ Eterm *argv,
+ BeamInstr *i,
+ ErtsBifFunc dbf,
+ ErtsSchedType sched_type);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE BIF_RETTYPE
+erts_reschedule_bif(Process *proc,
+ Eterm *argv,
+ BeamInstr *i,
+ ErtsBifFunc dbf,
+ ErtsSchedType sched_type)
+{
+ return erts_schedule_bif(proc, argv, i, dbf, sched_type,
+ THE_NON_VALUE, THE_NON_VALUE, -1);
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#ifdef ERL_WANT_HIPE_BIF_WRAPPER__
+
+#ifndef HIPE
+
+#define HIPE_WRAPPER_BIF_DISABLE_GC(BIF_NAME, ARITY)
+
+#else
+
+#include "erl_fun.h"
+#include "hipe_mode_switch.h"
+
+/*
+ * Hipe wrappers used by native code for BIFs that disable GC while trapping.
+ * Also add usage of the wrapper in ../hipe/hipe_bif_list.m4
+ *
+ * Problem:
+ * When native code calls a BIF that traps, hipe_mode_switch will push a
+ * "trap frame" on the Erlang stack in order to find its way back from beam_emu
+ * back to native caller when finally done. If GC is disabled and stack/heap
+ * is full there is no place to push the "trap frame".
+ *
+ * Solution:
+ * We reserve space on stack for the "trap frame" here before the BIF is called.
+ * If the BIF does not trap, the space is reclaimed here before returning.
+ * If the BIF traps, hipe_push_beam_trap_frame() will detect that a "trap frame"
+ * already is reserved and use it.
+ */
+
+
+#define HIPE_WRAPPER_BIF_DISABLE_GC(BIF_NAME, ARITY) \
+BIF_RETTYPE \
+nbif_impl_hipe_wrapper_ ## BIF_NAME ## _ ## ARITY (NBIF_ALIST); \
+BIF_RETTYPE \
+nbif_impl_hipe_wrapper_ ## BIF_NAME ## _ ## ARITY (NBIF_ALIST) \
+{ \
+ BIF_RETTYPE res; \
+ hipe_reserve_beam_trap_frame(BIF_P, BIF__ARGS, ARITY); \
+ res = nbif_impl_ ## BIF_NAME ## _ ## ARITY (NBIF_CALL_ARGS); \
+ if (is_value(res) || BIF_P->freason != TRAP) { \
+ hipe_unreserve_beam_trap_frame(BIF_P); \
+ } \
+ return res; \
+}
+
+#endif
+
+#endif /* ERL_WANT_HIPE_BIF_WRAPPER__ */
+
#include "erl_bif_table.h"
#endif
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index 2d888862bf..f7b4451890 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -1,18 +1,19 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1996-2013. All Rights Reserved.
+# Copyright Ericsson AB 1996-2017. All Rights Reserved.
#
-# The contents of this file are subject to the Erlang Public License,
-# Version 1.1, (the "License"); you may not use this file except in
-# compliance with the License. You should have received a copy of the
-# Erlang Public License along with this software. If not, it can be
-# retrieved online at http://www.erlang.org/.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
#
-# Software distributed under the License is distributed on an "AS IS"
-# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-# the License for the specific language governing rights and limitations
-# under the License.
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
#
# %CopyrightEnd%
#
@@ -22,22 +23,24 @@
#
# Lines starting with '#' are ignored.
#
-# <bif-decl> ::= "bif" <bif> <C-name>* | "ubif" <bif> <C-name>*
+# <bif-decl> ::= "bif" <bif> <C-name>* |
+# "ubif" <bif> <C-name>* |
+# "gcbif" <bif> <C-name>*
# <bif> ::= <module> ":" <name> "/" <arity>
#
-# "ubif" is an unwrapped bif, i.e. a bif without a trace wrapper,
-# or rather; the trace entry point in the export entry is the same
-# as the normal entry point, and no trace wrapper is generated.
+# ubif: Use for operators and guard BIFs that never build anything
+# on the heap (such as tuple_size/1) and operators.
+#
+# gcbif: Use for guard BIFs that may build on the heap (such as abs/1).
#
-# Important: Use "ubif" for guard BIFs and operators; use "bif" for ordinary BIFs.
+# bif: Use for all other BIFs.
#
# Add new BIFs to the end of the file.
#
-# Note: Guards BIFs require special support in the compiler (to be able to actually
-# call them from within a guard).
+# Note: Guards BIFs usually require special support in the compiler.
#
-ubif erlang:abs/1
+gcbif erlang:abs/1
bif erlang:adler32/1
bif erlang:adler32/2
bif erlang:adler32_combine/3
@@ -45,6 +48,7 @@ bif erlang:apply/3
bif erlang:atom_to_list/1
bif erlang:binary_to_list/1
bif erlang:binary_to_list/3
+bif erlang:binary_to_term/1
bif erlang:crc32/1
bif erlang:crc32/2
bif erlang:crc32_combine/3
@@ -60,18 +64,16 @@ bif erlang:exit/1
bif erlang:exit/2
bif erlang:external_size/1
bif erlang:external_size/2
-ubif erlang:float/1
+gcbif erlang:float/1
bif erlang:float_to_list/1
bif erlang:float_to_list/2
bif erlang:fun_info/2
-bif erlang:garbage_collect/0
+bif erts_internal:garbage_collect/1
bif erlang:get/0
bif erlang:get/1
bif erlang:get_keys/1
bif erlang:group_leader/0
bif erlang:group_leader/2
-bif erlang:halt/0
-bif erlang:halt/1
bif erlang:halt/2
bif erlang:phash/2
bif erlang:phash2/1
@@ -79,18 +81,22 @@ bif erlang:phash2/2
ubif erlang:hd/1
bif erlang:integer_to_list/1
bif erlang:is_alive/0
-ubif erlang:length/1
+gcbif erlang:length/1
bif erlang:link/1
bif erlang:list_to_atom/1
bif erlang:list_to_binary/1
bif erlang:list_to_float/1
bif erlang:list_to_integer/1
bif erlang:list_to_pid/1
+bif erlang:list_to_port/1
+bif erlang:list_to_ref/1
bif erlang:list_to_tuple/1
bif erlang:loaded/0
bif erlang:localtime/0
bif erlang:localtime_to_universaltime/2
bif erlang:make_ref/0
+bif erlang:unique_integer/0
+bif erlang:unique_integer/1
bif erlang:md5/1
bif erlang:md5_init/0
bif erlang:md5_update/2
@@ -103,8 +109,15 @@ ubif erlang:node/1
ubif erlang:node/0
bif erlang:nodes/1
bif erlang:now/0
+bif erlang:monotonic_time/0
+bif erlang:monotonic_time/1
+bif erlang:system_time/0
+bif erlang:system_time/1
+bif erlang:time_offset/0
+bif erlang:time_offset/1
+bif erlang:timestamp/0
-bif erlang:open_port/2
+bif erts_internal:open_port/2
bif erlang:pid_to_list/1
bif erlang:ports/0
@@ -114,14 +127,13 @@ bif erlang:process_flag/3
bif erlang:process_info/1
bif erlang:process_info/2
bif erlang:processes/0
-bif erlang:purge_module/1
bif erlang:put/2
bif erlang:register/2
bif erlang:registered/0
-ubif erlang:round/1
+gcbif erlang:round/1
ubif erlang:self/0
bif erlang:setelement/3
-ubif erlang:size/1
+gcbif erlang:size/1
bif erlang:spawn/3
bif erlang:spawn_link/3
bif erlang:split_binary/2
@@ -131,7 +143,7 @@ bif erlang:term_to_binary/2
bif erlang:throw/1
bif erlang:time/0
ubif erlang:tl/1
-ubif erlang:trunc/1
+gcbif erlang:trunc/1
bif erlang:tuple_to_list/1
bif erlang:universaltime/0
bif erlang:universaltime_to_localtime/1
@@ -142,6 +154,12 @@ bif erlang:spawn_opt/1
bif erlang:setnode/2
bif erlang:setnode/3
bif erlang:dist_exit/3
+bif erlang:dist_get_stat/1
+bif erlang:dist_ctrl_input_handler/2
+bif erlang:dist_ctrl_put_data/2
+bif erlang:dist_ctrl_get_data/1
+bif erlang:dist_ctrl_get_data_notification/1
+
# Static native functions in erts_internal
bif erts_internal:port_info/1
@@ -151,21 +169,31 @@ bif erts_internal:port_command/3
bif erts_internal:port_control/3
bif erts_internal:port_close/1
bif erts_internal:port_connect/2
-bif erts_internal:binary_to_term/1
-bif erts_internal:binary_to_term/2
bif erts_internal:request_system_task/3
-bif erts_internal:check_process_code/2
+bif erts_internal:request_system_task/4
+bif erts_internal:check_process_code/1
+
+bif erts_internal:map_to_tuple_keys/1
+bif erts_internal:term_type/1
+bif erts_internal:map_hashmap_children/1
+
+bif erts_internal:time_unit/0
+bif erts_internal:perf_counter_unit/0
+
+bif erts_internal:is_system_process/1
+
+bif erts_internal:system_check/1
+bif erts_internal:release_literal_area_switch/0
# inet_db support
bif erlang:port_set_data/2
bif erlang:port_get_data/1
# Tracing & debugging.
-bif erlang:trace_pattern/2
-bif erlang:trace_pattern/3
-bif erlang:trace/3
+bif erts_internal:trace_pattern/3
+bif erts_internal:trace/3
bif erlang:trace_info/2
bif erlang:trace_delivered/1
bif erlang:seq_trace/2
@@ -194,15 +222,20 @@ bif math:erf/1
bif math:erfc/1
bif math:exp/1
bif math:log/1
+bif math:log2/1
bif math:log10/1
bif math:sqrt/1
bif math:atan2/2
bif math:pow/2
bif erlang:start_timer/3
+bif erlang:start_timer/4
bif erlang:send_after/3
+bif erlang:send_after/4
bif erlang:cancel_timer/1
+bif erlang:cancel_timer/2
bif erlang:read_timer/1
+bif erlang:read_timer/2
bif erlang:make_tuple/2
bif erlang:append_element/2
@@ -299,7 +332,7 @@ bif erlang:match_spec_test/3
# Bifs in ets module.
#
-bif ets:all/0
+bif ets:internal_request_all/0
bif ets:new/2
bif ets:delete/1
bif ets:delete/2
@@ -335,6 +368,7 @@ bif ets:select_reverse/1
bif ets:select_reverse/2
bif ets:select_reverse/3
bif ets:select_delete/2
+bif ets:select_replace/2
bif ets:match_spec_compile/1
bif ets:match_spec_run_r/3
@@ -347,6 +381,9 @@ bif os:getenv/0
bif os:getenv/1
bif os:getpid/0
bif os:timestamp/0
+bif os:system_time/0
+bif os:system_time/1
+bif os:perf_counter/0
#
# Bifs in the erl_ddll module (the module actually does not exist)
@@ -363,6 +400,7 @@ bif erl_ddll:demonitor/1
#
# Bifs in the re module
#
+bif re:version/0
bif re:compile/1
bif re:compile/2
bif re:run/2
@@ -391,6 +429,9 @@ bif erts_debug:set_internal_state/2
bif erts_debug:display/1
bif erts_debug:dist_ext_to_term/2
bif erts_debug:instructions/0
+bif erts_debug:dirty_cpu/2
+bif erts_debug:dirty_io/2
+bif erts_debug:dirty/3
#
# Monitor testing bif's...
@@ -402,7 +443,10 @@ bif erts_debug:dump_links/1
#
# Lock counter bif's
#
-bif erts_debug:lock_counters/1
+bif erts_debug:lcnt_control/2
+bif erts_debug:lcnt_control/1
+bif erts_debug:lcnt_collect/0
+bif erts_debug:lcnt_clear/0
#
# New Bifs in R8.
@@ -426,8 +470,8 @@ bif error_logger:warning_map/0
bif erlang:get_module_info/1
bif erlang:get_module_info/2
ubif erlang:is_boolean/1
-bif string:to_integer/1
-bif string:to_float/1
+bif string:list_to_integer/1
+bif string:list_to_float/1
bif erlang:make_fun/3
bif erlang:iolist_size/1
bif erlang:iolist_to_binary/1
@@ -438,8 +482,8 @@ bif erlang:list_to_existing_atom/1
#
ubif erlang:is_bitstring/1
ubif erlang:tuple_size/1
-ubif erlang:byte_size/1
-ubif erlang:bit_size/1
+gcbif erlang:byte_size/1
+gcbif erlang:bit_size/1
bif erlang:list_to_bitstring/1
bif erlang:bitstring_to_list/1
@@ -480,14 +524,19 @@ bif erlang:call_on_load_function/1
bif erlang:finish_after_on_load/2
#
+# New Bifs in R13B04
+#
+bif erlang:binary_to_term/2
+
+#
# The binary match bifs (New in R14A - EEP9)
#
#
# The searching/splitting/substituting thingies
#
-ubif erlang:binary_part/2
-ubif erlang:binary_part/3
+gcbif erlang:binary_part/2
+gcbif erlang:binary_part/3
bif binary:compile_pattern/1
bif binary:match/2
@@ -573,13 +622,13 @@ bif io:printable_range/0
bif os:unsetenv/1
#
-# New in R17A
+# New in 17.0
#
bif re:inspect/2
ubif erlang:is_map/1
-ubif erlang:map_size/1
+gcbif erlang:map_size/1
bif maps:to_list/1
bif maps:find/2
bif maps:get/2
@@ -595,8 +644,50 @@ bif maps:values/1
bif erts_internal:cmp_term/2
+bif ets:take/2
+
+#
+# New in 17.1
+#
+
+bif erlang:fun_info_mfa/1
+
+# New in 18.0
+#
+
+bif erlang:get_keys/0
+bif ets:update_counter/4
+bif erts_debug:map_info/1
+
+#
+# New in 19.0
+#
+
+bif erts_internal:is_process_executing_dirty/1
+bif erts_internal:check_dirty_process_code/2
+bif erts_internal:purge_module/2
+bif binary:split/2
+bif binary:split/3
+bif erts_debug:size_shared/1
+bif erts_debug:copy_shared/1
+bif erlang:has_prepared_code_on_load/1
+
+bif maps:take/2
+
+#
+# New in 20.0
+#
+
+gcbif erlang:floor/1
+gcbif erlang:ceil/1
+bif math:floor/1
+bif math:ceil/1
+bif math:fmod/2
+bif os:set_signal/2
+bif erts_internal:maps_to_list/2
+
#
-# Obsolete
+# New in 20.1
#
-bif erlang:hash/2
+bif erlang:iolist_to_iovec/1
diff --git a/erts/emulator/beam/bif_instrs.tab b/erts/emulator/beam/bif_instrs.tab
new file mode 100644
index 0000000000..0932b8b985
--- /dev/null
+++ b/erts/emulator/beam/bif_instrs.tab
@@ -0,0 +1,539 @@
+// -*- c -*-
+//
+// %CopyrightBegin%
+//
+// Copyright Ericsson AB 2017. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// %CopyrightEnd%
+//
+
+// ================================================================
+// All guards with zero arguments have special instructions,
+// for example:
+//
+// self/0
+// node/0
+//
+// All other guard BIFs take one or two arguments.
+// ================================================================
+
+CALL_GUARD_BIF(BF, TmpReg, Dst) {
+ Eterm result;
+
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+ c_p->fcalls = FCALLS;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ ERTS_CHK_MBUF_SZ(c_p);
+ result = (*$BF)(c_p, $TmpReg, I);
+ ERTS_CHK_MBUF_SZ(c_p);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_HOLE_CHECK(c_p);
+ FCALLS = c_p->fcalls;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+ if (ERTS_LIKELY(is_value(result))) {
+ $Dst = result;
+ $NEXT0();
+ }
+}
+
+// Guard BIF in head. On failure, ignore the error and jump
+// to the code for the next clause. We don't support tracing
+// of guard BIFs.
+
+bif1(Fail, Bif, Src, Dst) {
+ ErtsBifFunc bf;
+ Eterm tmp_reg[1];
+
+ tmp_reg[0] = $Src;
+ bf = (BifFunction) $Bif;
+ $CALL_GUARD_BIF(bf, tmp_reg, $Dst);
+
+ $FAIL($Fail);
+}
+
+//
+// Guard BIF in body. It can fail like any BIF. No trace support.
+//
+
+bif1_body(Bif, Src, Dst) {
+ ErtsBifFunc bf;
+ Eterm tmp_reg[1];
+
+ tmp_reg[0] = $Src;
+ bf = (BifFunction) $Bif;
+ $CALL_GUARD_BIF(bf, tmp_reg, $Dst);
+
+ reg[0] = tmp_reg[0];
+ SWAPOUT;
+ I = handle_error(c_p, I, reg, ubif2mfa((void *) bf));
+ goto post_error_handling;
+}
+
+//
+// Guard bif in guard with two arguments ('and'/2, 'or'/2, 'xor'/2).
+//
+
+i_bif2(Fail, Bif, Src1, Src2, Dst) {
+ Eterm tmp_reg[2];
+ ErtsBifFunc bf;
+
+ tmp_reg[0] = $Src1;
+ tmp_reg[1] = $Src2;
+ bf = (ErtsBifFunc) $Bif;
+ $CALL_GUARD_BIF(bf, tmp_reg, $Dst);
+ $FAIL($Fail);
+}
+
+//
+// Guard bif in body with two arguments ('and'/2, 'or'/2, 'xor'/2).
+//
+
+i_bif2_body(Bif, Src1, Src2, Dst) {
+ Eterm tmp_reg[2];
+ ErtsBifFunc bf;
+
+ tmp_reg[0] = $Src1;
+ tmp_reg[1] = $Src2;
+ bf = (ErtsBifFunc) $Bif;
+ $CALL_GUARD_BIF(bf, tmp_reg, $Dst);
+ reg[0] = tmp_reg[0];
+ reg[1] = tmp_reg[1];
+ SWAPOUT;
+ I = handle_error(c_p, I, reg, ubif2mfa((void *) bf));
+ goto post_error_handling;
+}
+
+//
+// Garbage-collecting BIF with one argument in either guard or body.
+//
+
+i_gc_bif1(Fail, Bif, Src, Live, Dst) {
+ typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint);
+ GcBifFunction bf;
+ Eterm result;
+ Uint live = (Uint) $Live;
+
+ x(live) = $Src;
+ bf = (GcBifFunction) $Bif;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+ c_p->fcalls = FCALLS;
+ SWAPOUT;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
+ ERTS_CHK_MBUF_SZ(c_p);
+ result = (*bf)(c_p, reg, live);
+ ERTS_CHK_MBUF_SZ(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ SWAPIN;
+ ERTS_HOLE_CHECK(c_p);
+ FCALLS = c_p->fcalls;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+ if (ERTS_LIKELY(is_value(result))) {
+ $REFRESH_GEN_DEST();
+ $Dst = result;
+ $NEXT0();
+ }
+ if (ERTS_LIKELY($Fail != 0)) { /* Handle error in guard. */
+ $JUMP($Fail);
+ }
+
+ /* Handle error in body. */
+ x(0) = x(live);
+ I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf));
+ goto post_error_handling;
+}
+
+//
+// Garbage-collecting BIF with two arguments in either guard or body.
+//
+
+i_gc_bif2(Fail, Bif, Live, Src1, Src2, Dst) {
+ typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint);
+ GcBifFunction bf;
+ Eterm result;
+ Uint live = (Uint) $Live;
+
+ /*
+ * XXX This calling convention does not make sense. 'live'
+ * should point out the first argument, not the second
+ * (i.e. 'live' should not be incremented below).
+ */
+ x(live) = $Src1;
+ x(live+1) = $Src2;
+ live++;
+
+ bf = (GcBifFunction) $Bif;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+ c_p->fcalls = FCALLS;
+ SWAPOUT;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
+ ERTS_CHK_MBUF_SZ(c_p);
+ result = (*bf)(c_p, reg, live);
+ ERTS_CHK_MBUF_SZ(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ SWAPIN;
+ ERTS_HOLE_CHECK(c_p);
+ FCALLS = c_p->fcalls;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+ if (ERTS_LIKELY(is_value(result))) {
+ $REFRESH_GEN_DEST();
+ $Dst = result;
+ $NEXT0();
+ }
+
+ if (ERTS_LIKELY($Fail != 0)) { /* Handle error in guard. */
+ $JUMP($Fail);
+ }
+
+ /* Handle error in body. */
+ live--;
+ x(0) = x(live);
+ x(1) = x(live+1);
+ I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf));
+ goto post_error_handling;
+}
+
+//
+// Garbage-collecting BIF with three arguments in either guard or body.
+//
+
+i_gc_bif3(Fail, Bif, Live, Src2, Src3, Dst) {
+ typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint);
+ GcBifFunction bf;
+ Eterm result;
+ Uint live = (Uint) $Live;
+
+ /*
+ * XXX This calling convention does not make sense. 'live'
+ * should point out the first argument, not the third
+ * (i.e. 'live' should not be incremented below).
+ */
+ x(live) = x(SCRATCH_X_REG);
+ x(live+1) = $Src2;
+ x(live+2) = $Src3;
+ live += 2;
+
+ bf = (GcBifFunction) $Bif;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+ c_p->fcalls = FCALLS;
+ SWAPOUT;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
+ ERTS_CHK_MBUF_SZ(c_p);
+ result = (*bf)(c_p, reg, live);
+ ERTS_CHK_MBUF_SZ(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ SWAPIN;
+ ERTS_HOLE_CHECK(c_p);
+ FCALLS = c_p->fcalls;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+ if (ERTS_LIKELY(is_value(result))) {
+ $REFRESH_GEN_DEST();
+ $Dst = result;
+ $NEXT0();
+ }
+
+ /* Handle error in guard. */
+ if (ERTS_LIKELY($Fail != 0)) {
+ $JUMP($Fail);
+ }
+
+ /* Handle error in body. */
+ live -= 2;
+ x(0) = x(live);
+ x(1) = x(live+1);
+ x(2) = x(live+2);
+ I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf));
+ goto post_error_handling;
+}
+
+//
+// The most general BIF call. The BIF may build any amount of data
+// on the heap. The result is always returned in r(0).
+//
+call_bif(Exp) {
+ ErtsBifFunc bf;
+ Eterm result;
+ ErlHeapFragment *live_hf_end;
+ Export *export = (Export*) $Exp;
+
+ if (!((FCALLS - 1) > 0 || (FCALLS-1) > neg_o_reds)) {
+ /* If we have run out of reductions, we do a context
+ switch before calling the bif */
+ c_p->arity = GET_BIF_ARITY(export);
+ c_p->current = &export->info.mfa;
+ goto context_switch3;
+ }
+
+ ERTS_MSACC_SET_BIF_STATE_CACHED_X(GET_BIF_MODULE(export),
+ GET_BIF_ADDRESS(export));
+
+ bf = GET_BIF_ADDRESS(export);
+
+ PRE_BIF_SWAPOUT(c_p);
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+ c_p->fcalls = FCALLS - 1;
+ if (FCALLS <= 0) {
+ save_calls(c_p, export);
+ }
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ live_hf_end = c_p->mbuf;
+ ERTS_CHK_MBUF_SZ(c_p);
+ result = (*bf)(c_p, reg, I);
+ ERTS_CHK_MBUF_SZ(c_p);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ ERTS_HOLE_CHECK(c_p);
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
+ if (ERTS_IS_GC_DESIRED(c_p)) {
+ Uint arity = GET_BIF_ARITY(export);
+ result = erts_gc_after_bif_call_lhf(c_p, live_hf_end, result,
+ reg, arity);
+ E = c_p->stop;
+ }
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ HTOP = HEAP_TOP(c_p);
+ FCALLS = c_p->fcalls;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+ /* We have to update the cache if we are enabled in order
+ to make sure no book keeping is done after we disabled
+ msacc. We don't always do this as it is quite expensive. */
+ if (ERTS_MSACC_IS_ENABLED_CACHED_X()) {
+ ERTS_MSACC_UPDATE_CACHE_X();
+ }
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
+ if (ERTS_LIKELY(is_value(result))) {
+ r(0) = result;
+ CHECK_TERM(r(0));
+ $NEXT0();
+ } else if (c_p->freason == TRAP) {
+ SET_CP(c_p, I+2);
+ SET_I(c_p->i);
+ SWAPIN;
+ Dispatch();
+ }
+
+ /*
+ * Error handling. SWAPOUT is not needed because it was done above.
+ */
+ ASSERT(c_p->stop == E);
+ I = handle_error(c_p, I, reg, &export->info.mfa);
+ goto post_error_handling;
+}
+
+//
+// Send is almost a standard call-BIF with two arguments, except for:
+// 1. It cannot be traced.
+// 2. There is no pointer to the send_2 function stored in
+// the instruction.
+//
+
+send() {
+ Eterm result;
+
+ if (!(FCALLS > 0 || FCALLS > neg_o_reds)) {
+ /* If we have run out of reductions, we do a context
+ switch before calling the bif */
+ c_p->arity = 2;
+ c_p->current = NULL;
+ goto context_switch3;
+ }
+
+ PRE_BIF_SWAPOUT(c_p);
+ c_p->fcalls = FCALLS - 1;
+ result = erl_send(c_p, r(0), x(1));
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ HTOP = HEAP_TOP(c_p);
+ FCALLS = c_p->fcalls;
+ if (ERTS_LIKELY(is_value(result))) {
+ r(0) = result;
+ CHECK_TERM(r(0));
+ } else if (c_p->freason == TRAP) {
+ SET_CP(c_p, I+1);
+ SET_I(c_p->i);
+ SWAPIN;
+ Dispatch();
+ } else {
+ goto find_func_info;
+ }
+}
+
+call_nif := nif_bif.call_nif.epilogue;
+apply_bif := nif_bif.apply_bif.epilogue;
+
+nif_bif.head() {
+ Eterm nif_bif_result;
+ Eterm bif_nif_arity;
+ BifFunction vbf;
+ ErlHeapFragment *live_hf_end;
+ ErtsCodeMFA *codemfa;
+}
+
+nif_bif.call_nif() {
+ /*
+ * call_nif is always first instruction in function:
+ *
+ * I[-3]: Module
+ * I[-2]: Function
+ * I[-1]: Arity
+ * I[0]: &&call_nif
+ * I[1]: Function pointer to NIF function
+ * I[2]: Pointer to erl_module_nif
+ * I[3]: Function pointer to dirty NIF
+ *
+ * This layout is determined by the NifExport struct
+ */
+
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF);
+
+ codemfa = erts_code_to_codemfa(I);
+
+ c_p->current = codemfa; /* current and vbf set to please handle_error */
+
+ DTRACE_NIF_ENTRY(c_p, codemfa);
+
+ HEAVY_SWAPOUT;
+
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ bif_nif_arity = codemfa->arity;
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
+
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ {
+ typedef Eterm NifF(struct enif_environment_t*, int argc, Eterm argv[]);
+ NifF* fp = vbf = (NifF*) I[1];
+ struct enif_environment_t env;
+ ASSERT(c_p->scheduler_data);
+ live_hf_end = c_p->mbuf;
+ ERTS_CHK_MBUF_SZ(c_p);
+ erts_pre_nif(&env, c_p, (struct erl_module_nif*)I[2], NULL);
+ nif_bif_result = (*fp)(&env, bif_nif_arity, reg);
+ if (env.exception_thrown)
+ nif_bif_result = THE_NON_VALUE;
+ erts_post_nif(&env);
+ ERTS_CHK_MBUF_SZ(c_p);
+
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
+ ASSERT(!env.exiting);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ }
+
+ DTRACE_NIF_RETURN(c_p, codemfa);
+}
+
+nif_bif.apply_bif() {
+ /*
+ * At this point, I points to the code[0] in the export entry for
+ * the BIF:
+ *
+ * code[-3]: Module
+ * code[-2]: Function
+ * code[-1]: Arity
+ * code[0]: &&apply_bif
+ * code[1]: Function pointer to BIF function
+ */
+
+ if (!((FCALLS - 1) > 0 || (FCALLS - 1) > neg_o_reds)) {
+ /* If we have run out of reductions, we do a context
+ switch before calling the bif */
+ goto context_switch;
+ }
+
+ codemfa = erts_code_to_codemfa(I);
+
+ ERTS_MSACC_SET_BIF_STATE_CACHED_X(codemfa->module, (BifFunction)Arg(0));
+
+
+ /* In case we apply process_info/1,2 or load_nif/1 */
+ c_p->current = codemfa;
+ $SET_CP_I_ABS(I); /* In case we apply check_process_code/2. */
+ c_p->arity = 0; /* To allow garbage collection on ourselves
+ * (check_process_code/2).
+ */
+ DTRACE_BIF_ENTRY(c_p, codemfa);
+
+ SWAPOUT;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS - 1);
+ c_p->fcalls = FCALLS - 1;
+ vbf = (BifFunction) Arg(0);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ bif_nif_arity = codemfa->arity;
+ ASSERT(bif_nif_arity <= 4);
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ {
+ ErtsBifFunc bf = vbf;
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ live_hf_end = c_p->mbuf;
+ ERTS_CHK_MBUF_SZ(c_p);
+ nif_bif_result = (*bf)(c_p, reg, I);
+ ERTS_CHK_MBUF_SZ(c_p);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) ||
+ is_non_value(nif_bif_result));
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ }
+ /* We have to update the cache if we are enabled in order
+ to make sure no book keeping is done after we disabled
+ msacc. We don't always do this as it is quite expensive. */
+ if (ERTS_MSACC_IS_ENABLED_CACHED_X())
+ ERTS_MSACC_UPDATE_CACHE_X();
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
+ DTRACE_BIF_RETURN(c_p, codemfa);
+}
+
+nif_bif.epilogue() {
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
+ ERTS_HOLE_CHECK(c_p);
+ if (ERTS_IS_GC_DESIRED(c_p)) {
+ nif_bif_result = erts_gc_after_bif_call_lhf(c_p, live_hf_end,
+ nif_bif_result,
+ reg, bif_nif_arity);
+ }
+ SWAPIN; /* There might have been a garbage collection. */
+ FCALLS = c_p->fcalls;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+ if (ERTS_LIKELY(is_value(nif_bif_result))) {
+ r(0) = nif_bif_result;
+ CHECK_TERM(r(0));
+ SET_I(c_p->cp);
+ c_p->cp = 0;
+ Goto(*I);
+ } else if (c_p->freason == TRAP) {
+ SET_I(c_p->i);
+ if (c_p->flags & F_HIBERNATE_SCHED) {
+ c_p->flags &= ~F_HIBERNATE_SCHED;
+ goto do_schedule;
+ }
+ Dispatch();
+ }
+ I = handle_error(c_p, c_p->cp, reg, c_p->current);
+ goto post_error_handling;
+}
diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c
index 41a041eba6..5eaf262cd8 100644
--- a/erts/emulator/beam/big.c
+++ b/erts/emulator/beam/big.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2014. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -47,7 +48,7 @@
_t_dst = (dst)+((sz)-1); \
_t_src = (src)+((sz)-1); \
while(_t_sz--) *_t_dst-- = *_t_src--; \
- } \
+ } \
} while(0)
/* add a and b with carry in + out */
@@ -274,7 +275,9 @@
_b = _b << _s; \
_vn1 = _b >> H_EXP; \
_vn0 = _b & LO_MASK; \
- _un32 = (_a1 << _s) | ((_a0>>(D_EXP-_s)) & (-_s >> (D_EXP-1))); \
+ /* If needed to avoid undefined behaviour */ \
+ if (_s) _un32 = (_a1 << _s) | ((_a0>>(D_EXP-_s)) & (-_s >> (D_EXP-1))); \
+ else _un32 = _a1; \
_un10 = _a0 << _s; \
_un1 = _un10 >> H_EXP; \
_un0 = _un10 & LO_MASK; \
@@ -420,6 +423,25 @@
#endif
+/* Forward declaration of lookup tables (See below in this file) used in list to
+ * integer conversions for different bases. Also used in bignum printing.
+ */
+static const byte digits_per_sint_lookup[36-1];
+static const byte digits_per_small_lookup[36-1];
+static const Sint largest_power_of_base_lookup[36-1];
+
+static ERTS_INLINE byte get_digits_per_signed_int(Uint base) {
+ return digits_per_sint_lookup[base-2];
+}
+
+static ERTS_INLINE byte get_digits_per_small(Uint base) {
+ return digits_per_small_lookup[base-2];
+}
+
+static ERTS_INLINE Sint get_largest_power_of_base(Uint base) {
+ return largest_power_of_base_lookup[base-2];
+}
+
/*
** compare two number vectors
*/
@@ -1271,8 +1293,11 @@ static dsize_t I_bxor(ErtsDigit* x, dsize_t xl, short xsgn,
*r++ = ~c ^ *y++;
x++;
}
- while(xl--)
- *r++ = ~*x++;
+ while(xl--) {
+ DSUBb(*x,0,b,c);
+ *r++ = ~c;
+ x++;
+ }
}
else {
ErtsDigit b1, b2;
@@ -1290,7 +1315,9 @@ static dsize_t I_bxor(ErtsDigit* x, dsize_t xl, short xsgn,
x++; y++;
}
while(xl--) {
- *r++ = *x++;
+ DSUBb(*x,0,b1,c1);
+ *r++ = c1;
+ x++;
}
}
}
@@ -1484,20 +1511,8 @@ Eterm uint_to_big(Uint x, Eterm *y)
Eterm uword_to_big(UWord x, Eterm *y)
{
-#if HALFWORD_HEAP
- Uint upper = x >> 32;
- Uint lower = x & 0xFFFFFFFFUL;
- if (upper == 0) {
- *y = make_pos_bignum_header(1);
- } else {
- *y = make_pos_bignum_header(2);
- BIG_DIGIT(y, 1) = upper;
- }
- BIG_DIGIT(y, 0) = lower;
-#else
*y = make_pos_bignum_header(1);
BIG_DIGIT(y, 0) = x;
-#endif
return make_big(y);
}
@@ -1506,13 +1521,15 @@ Eterm uword_to_big(UWord x, Eterm *y)
*/
Eterm small_to_big(Sint x, Eterm *y)
{
+ Uint xu;
if (x >= 0) {
+ xu = x;
*y = make_pos_bignum_header(1);
} else {
- x = -x;
+ xu = -(Uint)x;
*y = make_neg_bignum_header(1);
}
- BIG_DIGIT(y, 0) = x;
+ BIG_DIGIT(y, 0) = xu;
return make_big(y);
}
@@ -1520,7 +1537,7 @@ Eterm small_to_big(Sint x, Eterm *y)
Eterm erts_uint64_to_big(Uint64 x, Eterm **hpp)
{
Eterm *hp = *hpp;
-#if defined(ARCH_32) || HALFWORD_HEAP
+#if defined(ARCH_32)
if (x >= (((Uint64) 1) << 32)) {
*hp = make_pos_bignum_header(2);
BIG_DIGIT(hp, 0) = (Uint) (x & ((Uint) 0xffffffff));
@@ -1540,21 +1557,24 @@ Eterm erts_uint64_to_big(Uint64 x, Eterm **hpp)
Eterm erts_sint64_to_big(Sint64 x, Eterm **hpp)
{
Eterm *hp = *hpp;
+ Uint64 ux;
int neg;
- if (x >= 0)
+ if (x >= 0) {
neg = 0;
+ ux = x;
+ }
else {
neg = 1;
- x = -x;
+ ux = -(Uint64)x;
}
-#if defined(ARCH_32) || HALFWORD_HEAP
- if (x >= (((Uint64) 1) << 32)) {
+#if defined(ARCH_32)
+ if (ux >= (((Uint64) 1) << 32)) {
if (neg)
*hp = make_neg_bignum_header(2);
else
*hp = make_pos_bignum_header(2);
- BIG_DIGIT(hp, 0) = (Uint) (x & ((Uint) 0xffffffff));
- BIG_DIGIT(hp, 1) = (Uint) ((x >> 32) & ((Uint) 0xffffffff));
+ BIG_DIGIT(hp, 0) = (Uint) (ux & ((Uint) 0xffffffff));
+ BIG_DIGIT(hp, 1) = (Uint) ((ux >> 32) & ((Uint) 0xffffffff));
*hpp += 3;
}
else
@@ -1564,12 +1584,52 @@ Eterm erts_sint64_to_big(Sint64 x, Eterm **hpp)
*hp = make_neg_bignum_header(1);
else
*hp = make_pos_bignum_header(1);
- BIG_DIGIT(hp, 0) = (Uint) x;
+ BIG_DIGIT(hp, 0) = (Uint) ux;
*hpp += 2;
}
return make_big(hp);
}
+Eterm
+erts_uint64_array_to_big(Uint **hpp, int neg, int len, Uint64 *array)
+{
+ Uint *headerp;
+ int i, pot_digits, digits;
+
+ headerp = *hpp;
+
+ pot_digits = digits = 0;
+ for (i = 0; i < len; i++) {
+#if defined(ARCH_32)
+ Uint low_val = array[i] & ((Uint) 0xffffffff);
+ Uint high_val = (array[i] >> 32) & ((Uint) 0xffffffff);
+ BIG_DIGIT(headerp, pot_digits) = low_val;
+ pot_digits++;
+ if (low_val)
+ digits = pot_digits;
+ BIG_DIGIT(headerp, pot_digits) = high_val;
+ pot_digits++;
+ if (high_val)
+ digits = pot_digits;
+#else
+ Uint val = array[i];
+ BIG_DIGIT(headerp, pot_digits) = val;
+ pot_digits++;
+ if (val)
+ digits = pot_digits;
+#endif
+ }
+
+ if (neg)
+ *headerp = make_neg_bignum_header(digits);
+ else
+ *headerp = make_pos_bignum_header(digits);
+
+ *hpp = headerp + 1 + digits;
+
+ return make_big(headerp);
+}
+
/*
** Convert a bignum to a double float
*/
@@ -1603,8 +1663,6 @@ big_to_double(Wterm x, double* resp)
/*
* Logic has been copied from erl_bif_guard.c and slightly
* modified to use a static instead of dynamic heap
- *
- * HALFWORD: Return relative term with 'heap' as base.
*/
Eterm
double_to_big(double x, Eterm *heap, Uint hsz)
@@ -1635,7 +1693,7 @@ double_to_big(double x, Eterm *heap, Uint hsz)
sz = BIG_NEED_SIZE(ds); /* number of words including arity */
hp = heap;
- res = make_big_rel(hp, heap);
+ res = make_big(hp);
xp = (ErtsDigit*) (hp + 1);
ASSERT(ds < hsz);
@@ -1685,8 +1743,10 @@ static Uint write_big(Wterm x, void (*write_func)(void *, char), void *arg)
short sign = BIG_SIGN(xp);
ErtsDigit rem;
Uint n = 0;
+ const Uint digits_per_Sint = get_digits_per_signed_int(10);
+ const Sint largest_pow_of_base = get_largest_power_of_base(10);
- if (xl == 1 && *dx < D_DECIMAL_BASE) {
+ if (xl == 1 && *dx < largest_pow_of_base) {
rem = *dx;
if (rem == 0) {
(*write_func)(arg, '0'); n++;
@@ -1704,7 +1764,7 @@ static Uint write_big(Wterm x, void (*write_func)(void *, char), void *arg)
MOVE_DIGITS(tmp, dx, xl);
while(1) {
- tmpl = D_div(tmp, tmpl, D_DECIMAL_BASE, tmp, &rem);
+ tmpl = D_div(tmp, tmpl, largest_pow_of_base, tmp, &rem);
if (tmpl == 1 && *tmp == 0) {
while(rem) {
(*write_func)(arg, (rem % 10)+'0'); n++;
@@ -1712,7 +1772,7 @@ static Uint write_big(Wterm x, void (*write_func)(void *, char), void *arg)
}
break;
} else {
- int i = D_DECIMAL_EXP;
+ Uint i = digits_per_Sint;
while(i--) {
(*write_func)(arg, (rem % 10)+'0'); n++;
rem /= 10;
@@ -1893,6 +1953,8 @@ Eterm bytes_to_big(byte *xp, dsize_t xsz, int xsgn, Eterm *r)
*rwp = d;
rwp++;
}
+ if (rsz > BIG_ARITY_MAX)
+ return NIL;
if (xsgn) {
*r = make_neg_bignum_header(rsz);
}
@@ -1978,6 +2040,32 @@ term_to_Uint(Eterm term, Uint *up)
}
}
+/* same as term_to_Uint()
+ but also accept larger bignums by masking
+ */
+int
+term_to_Uint_mask(Eterm term, Uint *up)
+{
+ if (is_small(term)) {
+ Sint i = signed_val(term);
+ if (i < 0) {
+ *up = BADARG;
+ return 0;
+ }
+ *up = (Uint) i;
+ return 1;
+ } else if (is_big(term) && !big_sign(term)) {
+ ErtsDigit* xr = big_v(term);
+
+ ERTS_CT_ASSERT(sizeof(ErtsDigit) == sizeof(Uint));
+ *up = (Uint)*xr; /* just pick first word */
+ return 1;
+ } else {
+ *up = BADARG;
+ return 0;
+ }
+}
+
int
term_to_UWord(Eterm term, UWord *up)
{
@@ -2183,21 +2271,6 @@ Eterm big_minus(Eterm x, Eterm y, Eterm *r)
}
/*
-** Subtract a digit from big number
-*/
-Eterm big_minus_small(Eterm x, Eterm y, Eterm *r)
-{
- Eterm* xp = big_val(x);
-
- if (BIG_SIGN(xp))
- return big_norm(r, D_add(BIG_V(xp),BIG_SIZE(xp), (ErtsDigit) y, BIG_V(r)),
- (short) BIG_SIGN(xp));
- else
- return big_norm(r, D_sub(BIG_V(xp),BIG_SIZE(xp), (ErtsDigit) y, BIG_V(r)),
- (short) BIG_SIGN(xp));
-}
-
-/*
** Multiply smallnums
*/
@@ -2329,16 +2402,6 @@ Eterm big_rem(Eterm x, Eterm y, Eterm *r)
}
}
-Eterm big_neg(Eterm x, Eterm *r)
-{
- Eterm* xp = big_val(x);
- dsize_t xsz = BIG_SIZE(xp);
- short xsgn = BIG_SIGN(xp);
-
- MOVE_DIGITS(BIG_V(r), BIG_V(xp), xsz);
- return big_norm(r, xsz, (short) !xsgn);
-}
-
Eterm big_band(Eterm x, Eterm y, Eterm *r)
{
Eterm* xp = big_val(x);
@@ -2486,63 +2549,100 @@ int term_equals_2pow32(Eterm x)
}
}
+static ERTS_INLINE int c2int_is_invalid_char(byte ch, int base) {
+ return (ch < '0'
+ || (ch > ('0' + base - 1)
+ && !(base > 10
+ && ((ch >= 'a' && ch < ('a' + base - 10))
+ || (ch >= 'A' && ch < ('A' + base - 10))))));
+}
-#define IS_VALID_CHARACTER(CHAR,BASE) \
- (CHAR < '0' \
- || (CHAR > ('0' + BASE - 1) \
- && !(BASE > 10 \
- && ((CHAR >= 'a' && CHAR < ('a' + BASE - 10)) \
- || (CHAR >= 'A' && CHAR < ('A' + BASE - 10))))))
-#define CHARACTER_FROM_BASE(CHAR) \
- ((CHAR <= '9') ? CHAR - '0' : 10 + ((CHAR <= 'Z') ? CHAR - 'A' : CHAR - 'a'))
-#define D_BASE_EXP(BASE) (d_base_exp_lookup[BASE-2])
-#define D_BASE_BASE(BASE) (d_base_base_lookup[BASE-2])
-#define LG2_LOOKUP(BASE) (lg2_lookup[base-2])
+static ERTS_INLINE byte c2int_digit_from_base(byte ch) {
+ return ch <= '9' ? ch - '0'
+ : (10 + (ch <= 'Z' ? ch - 'A' : ch - 'a'));
+}
/*
- * for i in 2..64 do
- * lg2_lookup[i-2] = log2(i)
- * end
- * How many bits are needed to store string of size n
+ * How many bits are needed to store 1 digit of given base in binary
+ * Wo.Alpha formula: Table [log2[n], {n,2,36}]
*/
-const double lg2_lookup[] = { 1.0, 1.58496, 2, 2.32193, 2.58496, 2.80735, 3.0,
- 3.16993, 3.32193, 3.45943, 3.58496, 3.70044, 3.80735, 3.90689, 4.0,
- 4.08746, 4.16993, 4.24793, 4.32193, 4.39232, 4.45943, 4.52356, 4.58496,
- 4.64386, 4.70044, 4.75489, 4.80735, 4.85798, 4.90689, 4.9542, 5.0,
- 5.04439, 5.08746, 5.12928, 5.16993, 5.20945, 5.24793, 5.2854, 5.32193,
- 5.35755, 5.39232, 5.42626, 5.45943, 5.49185, 5.52356, 5.55459, 5.58496,
- 5.61471, 5.64386, 5.67243, 5.70044, 5.72792, 5.75489, 5.78136, 5.80735,
- 5.83289, 5.85798, 5.88264, 5.90689, 5.93074, 5.9542, 5.97728, 6.0 };
+static const double lg2_lookup[36-1] = {
+ 1.0, 1.58496, 2.0, 2.32193, 2.58496, 2.80735, 3.0, 3.16993, 3.32193,
+ 3.45943, 3.58496, 3.70044, 3.80735, 3.90689, 4.0, 4.08746, 4.16993, 4.24793,
+ 4.32193, 4.39232, 4.45943, 4.52356, 4.58496, 4.64386, 4.70044, 4.75489,
+ 4.80735, 4.85798, 4.90689, 4.9542, 5.0, 5.04439, 5.08746, 5.12928, 5.16993
+};
+static ERTS_INLINE double lookup_log2(Uint base) {
+ return lg2_lookup[base - 2];
+}
/*
- * for i in 2..64 do
- * d_base_exp_lookup[i-2] = 31 / lg2_lookup[i-2];
- * end
- * How many characters can fit in 31 bits
+ * How many digits can fit into a signed int (Sint) for given base, we take
+ * one digit away just to be on the safer side (some corner cases).
*/
-const byte d_base_exp_lookup[] = { 31, 19, 15, 13, 11, 11, 10, 9, 9, 8, 8, 8, 8,
- 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5,
- 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
- 5, 5 };
+static const byte digits_per_sint_lookup[36-1] = {
+#if (SIZEOF_VOID_P == 4)
+ /* Wo.Alpha formula: Table [Trunc[31 / log[2,n]]-1, {n, 2, 36}] */
+ 30, 18, 14, 12, 10, 10, 9, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4
+#elif (SIZEOF_VOID_P == 8)
+ /* Wo.Alpha formula: Table [Trunc[63 / log[2,n]]-1, {n, 2, 36}] */
+ 62, 38, 30, 26, 23, 21, 20, 18, 17, 17, 16, 16, 15, 15, 14, 14, 14, 13, 13,
+ 13, 13, 12, 12, 12, 12, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11
+#else
+ #error "Please produce a lookup table for the new architecture"
+#endif
+};
/*
- * for i in 2..64 do
- * d_base_base_lookup[i-2] = pow(i,d_base_exp_lookup[i-2]);
- * end
- * How much can the characters which fit in 31 bit represent
+ * How many digits can fit into Erlang Small (SMALL_BITS-1) counting sign bit
*/
-const Uint d_base_base_lookup[] = { 2147483648u, 1162261467u, 1073741824u,
- 1220703125u, 362797056u, 1977326743u, 1073741824u, 387420489u,
- 1000000000u, 214358881u, 429981696u, 815730721u, 1475789056u,
- 170859375u, 268435456u, 410338673u, 612220032u, 893871739u, 1280000000u,
- 1801088541u, 113379904u, 148035889u, 191102976u, 244140625u, 308915776u,
- 387420489u, 481890304u, 594823321u, 729000000u, 887503681u, 1073741824u,
- 1291467969u, 1544804416u, 1838265625u, 60466176u, 69343957u, 79235168u,
- 90224199u, 102400000u, 115856201u, 130691232u, 147008443u, 164916224u,
- 184528125u, 205962976u, 229345007u, 254803968u, 282475249u, 312500000u,
- 345025251u, 380204032u, 418195493u, 459165024u, 503284375u, 550731776u,
- 601692057u, 656356768u, 714924299u, 777600000u, 844596301u, 916132832u,
- 992436543u, 1073741824u };
+static const byte digits_per_small_lookup[36-1] = {
+#if (SIZEOF_VOID_P == 4)
+ /* Wo.Alpha formula: Table [Trunc[27 / log[2,n]]-1, {n, 2, 36}] */
+ 27, 17, 13, 11, 10, 9, 9, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+#elif (SIZEOF_VOID_P == 8)
+ /* Wo.Alpha formula: Table [Trunc[59 / log[2,n]]-1, {n, 2, 36}] */
+ 59, 37, 29, 25, 22, 21, 19, 18, 17, 17, 16, 15, 15, 15, 14, 14, 14, 13, 13,
+ 13, 13, 13, 12, 12, 12, 12, 12, 12, 12, 11, 11, 11, 11, 11, 11
+#else
+ #error "Please produce a lookup table for the new architecture"
+#endif
+};
+
+/*
+ * Largest power of base which can be represented in a signed int (Sint).
+ * Calculated by base 2..36 to the power of corresponding element from
+ * digits_per_sint_lookup.
+ */
+static const Sint largest_power_of_base_lookup[36-1] = {
+#if (SIZEOF_VOID_P == 4)
+ /* Wo.Alpha formula: Table [Pow[n, Trunc[31 / log[2,n]]-1], {n, 2, 36}] */
+ 1073741824, 387420489, 268435456, 244140625, 60466176, 282475249, 134217728,
+ 43046721, 100000000, 19487171, 35831808, 62748517, 105413504, 11390625,
+ 16777216, 24137569, 34012224, 47045881, 64000000, 85766121, 5153632,
+ 6436343,7962624, 9765625, 11881376, 14348907, 17210368, 20511149, 24300000,
+ 28629151, 33554432, 39135393, 45435424, 52521875, 1679616
+#elif (SIZEOF_VOID_P == 8)
+ /* Wo.Alpha formula: Table [Pow[n, Trunc[63 / log[2,n]]-1], {n, 2, 36}]
+ * with LL added after each element manually */
+ 4611686018427387904LL, 1350851717672992089LL, 1152921504606846976LL,
+ 1490116119384765625LL, 789730223053602816LL, 558545864083284007LL,
+ 1152921504606846976LL, 150094635296999121LL, 100000000000000000LL,
+ 505447028499293771LL, 184884258895036416LL, 665416609183179841LL,
+ 155568095557812224LL, 437893890380859375LL, 72057594037927936LL,
+ 168377826559400929LL, 374813367582081024LL, 42052983462257059LL,
+ 81920000000000000LL, 154472377739119461LL, 282810057883082752LL,
+ 21914624432020321LL, 36520347436056576LL, 59604644775390625LL,
+ 95428956661682176LL, 150094635296999121LL, 232218265089212416LL,
+ 12200509765705829LL, 17714700000000000LL, 25408476896404831LL,
+ 36028797018963968LL, 50542106513726817LL, 70188843638032384LL,
+ 96549157373046875LL, 131621703842267136LL
+#else
+ #error "Please produce a lookup table for the new architecture"
+#endif
+};
Eterm erts_chars_to_integer(Process *BIF_P, char *bytes,
Uint size, const int base) {
@@ -2552,8 +2652,11 @@ Eterm erts_chars_to_integer(Process *BIF_P, char *bytes,
int neg = 0;
byte b;
Eterm *hp, *hp_end;
- int m;
+ Sint m;
int lg2;
+ const Uint digits_per_small = get_digits_per_small(base);
+ const Uint digits_per_Sint = get_digits_per_signed_int(base);
+ const Sint largest_pow_of_base = get_largest_power_of_base(base);
if (size == 0)
goto bytebuf_to_integer_1_error;
@@ -2568,55 +2671,69 @@ Eterm erts_chars_to_integer(Process *BIF_P, char *bytes,
size--;
}
- if (size < SMALL_DIGITS && base <= 10) {
- /* *
- * Take shortcut if we know that all chars are '0' < b < '9' and
- * fit in a small. This improves speed by about 10% over the generic
- * small case.
- * */
- while (size--) {
- b = *bytes++;
+ /* Trim leading zeroes */
+ if (size) {
+ while (*bytes == '0') {
+ bytes++;
+ size--;
+ if (!size) {
+ /* All zero! */
+ res = make_small(0);
+ goto bytebuf_to_integer_1_done;
+ }
+ }
+ }
- if (b < '0' || b > ('0'+base-1))
- goto bytebuf_to_integer_1_error;
+ if (size == 0)
+ goto bytebuf_to_integer_1_error;
- i = i * base + b - '0';
- }
+ if (size < digits_per_small) {
+ if (base <= 10) {
+ /* *
+ * Take shortcut if we know that all chars are '0' < b < '9' and
+ * fit in a small. This improves speed by about 10% over the generic
+ * small case.
+ * */
+ while (size--) {
+ b = *bytes++;
- if (neg)
- i = -i;
- res = make_small(i);
- goto bytebuf_to_integer_1_done;
- }
+ if (b < '0' || b > ('0'+base-1))
+ goto bytebuf_to_integer_1_error;
- /*
- * Calculate the maximum number of bits which will
- * be needed to represent the binary
- */
- lg2 = ((size+2)*LG2_LOOKUP(base)+1);
+ i = i * base + b - '0';
+ }
- if (lg2 < SMALL_BITS) {
- /* Take shortcut if we know it will fit in a small.
- * This improves speed by about 30%.
- */
- while (size) {
- b = *bytes++;
- size--;
+ if (neg)
+ i = -i;
+ res = make_small(i);
+ goto bytebuf_to_integer_1_done;
+ }
- if (IS_VALID_CHARACTER(b,base))
- goto bytebuf_to_integer_1_error;
+ /* Take shortcut if we know it will fit in a small.
+ * This improves speed by about 30%.
+ */
+ while (size) {
+ b = *bytes++;
+ size--;
- i = i * base + CHARACTER_FROM_BASE(b);
+ if (c2int_is_invalid_char(b, base))
+ goto bytebuf_to_integer_1_error;
- }
-
- if (neg)
- i = -i;
- res = make_small(i);
- goto bytebuf_to_integer_1_done;
+ i = i * base + c2int_digit_from_base(b);
+ }
+ if (neg)
+ i = -i;
+ res = make_small(i);
+ goto bytebuf_to_integer_1_done;
}
+ /*
+ * Calculate the maximum number of bits which will
+ * be needed to represent the binary
+ */
+ lg2 = ((size+2)*lookup_log2(base)+1);
+
/* Start calculating bignum */
m = (lg2 + D_EXP-1)/D_EXP;
m = BIG_NEED_SIZE(m);
@@ -2624,8 +2741,8 @@ Eterm erts_chars_to_integer(Process *BIF_P, char *bytes,
hp = HAlloc(BIF_P, m);
hp_end = hp + m;
- if ((i = (size % D_BASE_EXP(base))) == 0)
- i = D_BASE_EXP(base);
+ if ((i = (size % digits_per_Sint)) == 0)
+ i = digits_per_Sint;
n = size - i;
m = 0;
@@ -2633,43 +2750,40 @@ Eterm erts_chars_to_integer(Process *BIF_P, char *bytes,
while (i--) {
b = *bytes++;
- if (IS_VALID_CHARACTER(b,base)) {
+ if (c2int_is_invalid_char(b,base)) {
HRelease(BIF_P, hp_end, hp);
goto bytebuf_to_integer_1_error;
}
- m = base * m + CHARACTER_FROM_BASE(b);
+ m = base * m + c2int_digit_from_base(b);
}
res = small_to_big(m, hp);
while (n) {
- i = D_BASE_EXP(base);
- n -= D_BASE_EXP(base);
+ i = digits_per_Sint;
+ n -= digits_per_Sint;
m = 0;
while (i--) {
b = *bytes++;
- if (IS_VALID_CHARACTER(b,base)) {
+ if (c2int_is_invalid_char(b,base)) {
HRelease(BIF_P, hp_end, hp);
goto bytebuf_to_integer_1_error;
}
- m = base * m + CHARACTER_FROM_BASE(b);
+ m = base * m + c2int_digit_from_base(b);
}
if (is_small(res)) {
res = small_to_big(signed_val(res), hp);
}
- res = big_times_small(res, D_BASE_BASE(base), hp);
+ res = big_times_small(res, largest_pow_of_base, hp);
if (is_small(res)) {
res = small_to_big(signed_val(res), hp);
}
res = big_plus_small(res, m, hp);
}
- if (is_big(res)) /* check if small */
- res = big_plus_small(res, 0, hp); /* includes conversion to small */
-
if (neg) {
if (is_small(res))
res = make_small(-signed_val(res));
@@ -2679,8 +2793,12 @@ Eterm erts_chars_to_integer(Process *BIF_P, char *bytes,
}
}
- if (is_big(res)) {
- hp += (big_arity(res) + 1);
+ if (is_not_small(res)) {
+ res = big_plus_small(res, 0, hp); /* includes conversion to small */
+
+ if (is_not_small(res)) {
+ hp += (big_arity(res) + 1);
+ }
}
HRelease(BIF_P, hp_end, hp);
goto bytebuf_to_integer_1_done;
@@ -2690,5 +2808,166 @@ bytebuf_to_integer_1_error:
bytebuf_to_integer_1_done:
return res;
+}
+/* Converts list of digits with given 'base' to integer sequentially. Returns
+ * result in 'integer_out', remaining tail goes to 'tail_out' and returns result
+ * code if the list was consumed fully or partially or there was an error
+ */
+LTI_result_t erts_list_to_integer(Process *BIF_P, Eterm orig_list,
+ const Uint base,
+ Eterm *integer_out, Eterm *tail_out)
+{
+ Sint i = 0;
+ Uint ui = 0;
+ int skip = 0;
+ int neg = 0;
+ Sint n = 0;
+ Sint m;
+ int lg2;
+ Eterm res;
+ Eterm lst = orig_list;
+ Eterm tail = lst;
+ int error_res = LTI_BAD_STRUCTURE;
+ const Uint digits_per_small = get_digits_per_small(base);
+ const Uint digits_per_Sint = get_digits_per_signed_int(base);
+
+ if (is_nil(lst)) {
+ error_res = LTI_NO_INTEGER;
+ error:
+ *tail_out = tail;
+ *integer_out = make_small(0);
+ return error_res;
+ }
+ if (is_not_list(lst))
+ goto error;
+
+ /* if first char is a '-' then it is a negative integer */
+ if (CAR(list_val(lst)) == make_small('-')) {
+ neg = 1;
+ skip = 1;
+ lst = CDR(list_val(lst));
+ if (is_not_list(lst)) {
+ tail = lst;
+ error_res = LTI_NO_INTEGER;
+ goto error;
+ }
+ } else if (CAR(list_val(lst)) == make_small('+')) {
+ /* ignore plus */
+ skip = 1;
+ lst = CDR(list_val(lst));
+ if (is_not_list(lst)) {
+ tail = lst;
+ error_res = LTI_NO_INTEGER;
+ goto error;
+ }
+ }
+
+ /* Calculate size and do type check */
+
+ while(1) {
+ byte ch;
+ if (is_not_small(CAR(list_val(lst)))) {
+ break;
+ }
+ ch = unsigned_val(CAR(list_val(lst)));
+ if (c2int_is_invalid_char(ch, base)) {
+ break;
+ }
+ ui = ui * base;
+ ui = ui + c2int_digit_from_base(ch);
+ n++;
+ lst = CDR(list_val(lst));
+ if (is_nil(lst)) {
+ break;
+ }
+ if (is_not_list(lst)) {
+ break;
+ }
+ }
+
+ tail = lst;
+ if (!n) {
+ error_res = LTI_NO_INTEGER;
+ goto error;
+ }
+
+
+ /* If length fits inside Sint then we know it's a small int. Else we
+ * must construct a bignum and let that routine do the checking
+ */
+
+ if (n <= digits_per_small) { /* It must be small */
+ i = neg ? -(Sint)ui : (Sint)ui;
+ res = make_small(i);
+ } else {
+ const Sint largest_pow_of_base = get_largest_power_of_base(base);
+ Eterm *hp;
+ Eterm *hp_end;
+
+ /* Convert from log_base to log2 using lookup table */
+ lg2 = ((n+2)*lookup_log2(base)+1);
+ m = (lg2+D_EXP-1)/D_EXP; /* number of digits */
+ m = BIG_NEED_SIZE(m); /* number of words + thing */
+
+ hp = HAlloc(BIF_P, m);
+ hp_end = hp + m;
+
+ lst = orig_list;
+ if (skip)
+ lst = CDR(list_val(lst));
+
+ /* load first digits (at least one digit) */
+ if ((i = (n % digits_per_Sint)) == 0)
+ i = digits_per_Sint;
+ n -= i;
+ m = 0;
+ while(i--) {
+ m *= base;
+ m += c2int_digit_from_base(unsigned_val(CAR(list_val(lst))));
+ lst = CDR(list_val(lst));
+ }
+ res = small_to_big(m, hp); /* load first digits */
+
+ while(n) {
+ i = digits_per_Sint;
+ n -= digits_per_Sint;
+ m = 0;
+ while(i--) {
+ m *= base;
+ m += c2int_digit_from_base(unsigned_val(CAR(list_val(lst))));
+ lst = CDR(list_val(lst));
+ }
+ if (is_small(res))
+ res = small_to_big(signed_val(res), hp);
+ res = big_times_small(res, largest_pow_of_base, hp);
+ if (is_small(res))
+ res = small_to_big(signed_val(res), hp);
+ res = big_plus_small(res, m, hp);
+ }
+
+ if (neg) {
+ if (is_small(res))
+ res = make_small(-signed_val(res));
+ else {
+ Uint *big = big_val(res); /* point to thing */
+ *big = bignum_header_neg(*big);
+ }
+ }
+
+ if (is_not_small(res)) {
+ res = big_plus_small(res, 0, hp); /* includes conversion to small */
+
+ if (is_not_small(res)) {
+ hp += (big_arity(res)+1);
+ }
+ }
+ HRelease(BIF_P, hp_end, hp);
+ }
+ *integer_out = res;
+ *tail_out = tail;
+ if (tail != NIL) {
+ return LTI_SOME_INTEGER;
+ }
+ return LTI_ALL_INTEGER;
}
diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h
index d80111822e..7556205063 100644
--- a/erts/emulator/beam/big.h
+++ b/erts/emulator/beam/big.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2014. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -20,21 +21,12 @@
#ifndef __BIG_H__
#define __BIG_H__
-#ifndef __SYS_H__
#include "sys.h"
-#endif
-
-#ifndef __CONFIG_H__
-#include "erl_vm.h"
-#endif
-
-#ifndef __GLOBAL_H__
#include "global.h"
-#endif
typedef Uint ErtsDigit;
-#if ((SIZEOF_VOID_P == 4) || HALFWORD_HEAP) && defined(SIZEOF_LONG_LONG) && (SIZEOF_LONG_LONG == 8)
+#if (SIZEOF_VOID_P == 4) && defined(SIZEOF_LONG_LONG) && (SIZEOF_LONG_LONG == 8)
/* Assume 32-bit machine with long long support */
typedef Uint64 ErtsDoubleDigit;
typedef Uint16 ErtsHalfDigit;
@@ -53,9 +45,6 @@ typedef Uint32 ErtsHalfDigit;
#error "can not determine machine size"
#endif
-#define D_DECIMAL_EXP 9
-#define D_DECIMAL_BASE 1000000000
-
typedef Uint dsize_t; /* Vector size type */
#define D_EXP (ERTS_SIZEOF_ETERM*8)
@@ -81,29 +70,42 @@ typedef Uint dsize_t; /* Vector size type */
/* Check for small */
#define IS_USMALL(sgn,x) ((sgn) ? ((x) <= MAX_SMALL+1) : ((x) <= MAX_SMALL))
-#define IS_SSMALL(x) (((x) >= MIN_SMALL) && ((x) <= MAX_SMALL))
+
+/*
+ * It seems that both clang and gcc will generate sub-optimal code
+ * for the more obvious way to write the range check:
+ *
+ * #define IS_SSMALL(x) (((x) >= MIN_SMALL) && ((x) <= MAX_SMALL))
+ *
+ * Note that IS_SSMALL() may be used in the 32-bit emulator with
+ * a Uint64 argument. Therefore, we must test the size of the argument
+ * to ensure that the cast does not discard the high-order 32 bits.
+ */
+#define _IS_SSMALL32(x) (((Uint32) ((((x)) >> (SMALL_BITS-1)) + 1)) < 2)
+#define _IS_SSMALL64(x) (((Uint64) ((((x)) >> (SMALL_BITS-1)) + 1)) < 2)
+#define IS_SSMALL(x) (sizeof(x) == sizeof(Uint32) ? _IS_SSMALL32(x) : _IS_SSMALL64(x))
/* The heap size needed for a bignum */
#define BIG_NEED_SIZE(x) ((x) + 1)
+#define BIG_NEED_FOR_BITS(bits) BIG_NEED_SIZE(((bits)-1)/D_EXP + 1)
#define BIG_UINT_HEAP_SIZE (1 + 1) /* always, since sizeof(Uint) <= sizeof(Eterm) */
-#if HALFWORD_HEAP
-#define BIG_UWORD_HEAP_SIZE(UW) (((UW) >> (sizeof(Uint) * 8)) ? 3 : 2)
-#else
#define BIG_UWORD_HEAP_SIZE(UW) BIG_UINT_HEAP_SIZE
-#endif
-#if defined(ARCH_32) || HALFWORD_HEAP
+#if defined(ARCH_32)
#define ERTS_UINT64_BIG_HEAP_SIZE__(X) \
((X) >= (((Uint64) 1) << 32) ? (1 + 2) : (1 + 1))
#define ERTS_SINT64_HEAP_SIZE(X) \
(IS_SSMALL((X)) \
? 0 \
- : ERTS_UINT64_BIG_HEAP_SIZE__((X) >= 0 ? (X) : -(X)))
+ : ERTS_UINT64_BIG_HEAP_SIZE__((X) >= 0 ? (X) : -(Uint64)(X)))
#define ERTS_UINT64_HEAP_SIZE(X) \
(IS_USMALL(0, (X)) ? 0 : ERTS_UINT64_BIG_HEAP_SIZE__((X)))
+#define ERTS_MAX_SINT64_HEAP_SIZE (1 + 2)
+#define ERTS_MAX_UINT64_HEAP_SIZE (1 + 2)
+#define ERTS_UINT64_ARRAY_TO_BIG_MAX_HEAP_SZ(LEN) (2*(LEN)+1)
#else
@@ -111,6 +113,9 @@ typedef Uint dsize_t; /* Vector size type */
(IS_SSMALL((X)) ? 0 : (1 + 1))
#define ERTS_UINT64_HEAP_SIZE(X) \
(IS_USMALL(0, (X)) ? 0 : (1 + 1))
+#define ERTS_MAX_SINT64_HEAP_SIZE (1 + 1)
+#define ERTS_MAX_UINT64_HEAP_SIZE (1 + 1)
+#define ERTS_UINT64_ARRAY_TO_BIG_MAX_HEAP_SZ(LEN) ((LEN)+1)
#endif
@@ -126,9 +131,7 @@ Eterm big_minus(Eterm, Eterm, Eterm*);
Eterm big_times(Eterm, Eterm, Eterm*);
Eterm big_div(Eterm, Eterm, Eterm*);
Eterm big_rem(Eterm, Eterm, Eterm*);
-Eterm big_neg(Eterm, Eterm*);
-Eterm big_minus_small(Eterm, Uint, Eterm*);
Eterm big_plus_small(Eterm, Uint, Eterm*);
Eterm big_times_small(Eterm, Uint, Eterm*);
@@ -153,9 +156,11 @@ Eterm bytes_to_big(byte*, dsize_t, int, Eterm*);
byte* big_to_bytes(Eterm, byte*);
int term_to_Uint(Eterm, Uint*);
+int term_to_Uint_mask(Eterm, Uint*);
int term_to_UWord(Eterm, UWord*);
int term_to_Sint(Eterm, Sint*);
#if HAVE_INT64
+Eterm erts_uint64_array_to_big(Uint **, int, int, Uint64 *);
int term_to_Uint64(Eterm, Uint64*);
int term_to_Sint64(Eterm, Sint64*);
#endif
@@ -168,5 +173,15 @@ Eterm erts_sint64_to_big(Sint64, Eterm **);
Eterm erts_chars_to_integer(Process *, char*, Uint, const int);
+/* How list_to_integer classifies the input, was it even a string? */
+typedef enum {
+ LTI_BAD_STRUCTURE = 0,
+ LTI_NO_INTEGER = 1,
+ LTI_SOME_INTEGER = 2,
+ LTI_ALL_INTEGER = 3
+} LTI_result_t;
+
+LTI_result_t erts_list_to_integer(Process *BIF_P, Eterm orig_list,
+ const Uint base,
+ Eterm *integer_out, Eterm *tail_out);
#endif
-
diff --git a/erts/emulator/beam/binary.c b/erts/emulator/beam/binary.c
index c7926f18af..ca3e48e205 100644
--- a/erts/emulator/beam/binary.c
+++ b/erts/emulator/beam/binary.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -26,29 +27,37 @@
#include "global.h"
#include "erl_process.h"
#include "error.h"
+#define ERL_WANT_HIPE_BIF_WRAPPER__
#include "bif.h"
+#undef ERL_WANT_HIPE_BIF_WRAPPER__
#include "big.h"
#include "erl_binary.h"
#include "erl_bits.h"
-#ifdef DEBUG
-static int list_to_bitstr_buf(Eterm obj, char* buf, Uint len);
-#else
-static int list_to_bitstr_buf(Eterm obj, char* buf);
-#endif
-static int bitstr_list_len(Eterm obj, Uint* num_bytes);
+#define L2B_B2L_MIN_EXEC_REDS (CONTEXT_REDS/4)
+#define L2B_B2L_RESCHED_REDS (CONTEXT_REDS/40)
+
+static Export binary_to_list_continue_export;
+static Export list_to_binary_continue_export;
+
+static BIF_RETTYPE binary_to_list_continue(BIF_ALIST_1);
+static BIF_RETTYPE list_to_binary_continue(BIF_ALIST_1);
void
erts_init_binary(void)
{
/* Verify Binary alignment... */
- if ((((UWord) &((Binary *) 0)->orig_bytes[0]) % ((UWord) 8)) != 0) {
- /* I assume that any compiler should be able to optimize this
- away. If not, this test is not very expensive... */
- erl_exit(ERTS_ABORT_EXIT,
- "Internal error: Address of orig_bytes[0] of a Binary"
- " is *not* 8-byte aligned\n");
- }
+ ERTS_CT_ASSERT((offsetof(Binary,orig_bytes) % 8) == 0);
+ ERTS_CT_ASSERT((offsetof(ErtsMagicBinary,u.aligned.data) % 8) == 0);
+
+ erts_init_trap_export(&binary_to_list_continue_export,
+ am_erts_internal, am_binary_to_list_continue, 1,
+ &binary_to_list_continue);
+
+ erts_init_trap_export(&list_to_binary_continue_export,
+ am_erts_internal, am_list_to_binary_continue, 1,
+ &list_to_binary_continue);
+
}
/*
@@ -75,9 +84,6 @@ new_binary(Process *p, byte *buf, Uint len)
* Allocate the binary struct itself.
*/
bptr = erts_bin_nrml_alloc(len);
- bptr->flags = 0;
- bptr->orig_size = len;
- erts_refc_init(&bptr->refc, 1);
if (buf != NULL) {
sys_memcpy(bptr->orig_bytes, buf, len);
}
@@ -95,7 +101,7 @@ new_binary(Process *p, byte *buf, Uint len)
pb->flags = 0;
/*
- * Miscellanous updates. Return the tagged binary.
+ * Miscellaneous updates. Return the tagged binary.
*/
OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
return make_binary(pb);
@@ -105,7 +111,7 @@ new_binary(Process *p, byte *buf, Uint len)
* When heap binary is not desired...
*/
-Eterm erts_new_mso_binary(Process *p, byte *buf, int len)
+Eterm erts_new_mso_binary(Process *p, byte *buf, Uint len)
{
ProcBin* pb;
Binary* bptr;
@@ -114,9 +120,6 @@ Eterm erts_new_mso_binary(Process *p, byte *buf, int len)
* Allocate the binary struct itself.
*/
bptr = erts_bin_nrml_alloc(len);
- bptr->flags = 0;
- bptr->orig_size = len;
- erts_refc_init(&bptr->refc, 1);
if (buf != NULL) {
sys_memcpy(bptr->orig_bytes, buf, len);
}
@@ -134,7 +137,7 @@ Eterm erts_new_mso_binary(Process *p, byte *buf, int len)
pb->flags = 0;
/*
- * Miscellanous updates. Return the tagged binary.
+ * Miscellaneous updates. Return the tagged binary.
*/
OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
return make_binary(pb);
@@ -169,7 +172,6 @@ erts_realloc_binary(Eterm bin, size_t size)
} else { /* REFC */
ProcBin* pb = (ProcBin *) bval;
Binary* newbin = erts_bin_realloc(pb->val, size);
- newbin->orig_size = size;
pb->val = newbin;
pb->size = size;
pb->bytes = (byte*) newbin->orig_bytes;
@@ -333,6 +335,132 @@ BIF_RETTYPE integer_to_binary_1(BIF_ALIST_1)
BIF_RET(res);
}
+#define ERTS_B2L_BYTES_PER_REDUCTION 256
+
+typedef struct {
+ Eterm res;
+ Eterm *hp;
+#ifdef DEBUG
+ Eterm *hp_end;
+#endif
+ byte *bytes;
+ Uint size;
+ Uint bitoffs;
+} ErtsB2LState;
+
+static int b2l_state_destructor(Binary *mbp)
+{
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == b2l_state_destructor);
+ return 1;
+}
+
+static BIF_RETTYPE
+binary_to_list_chunk(Process *c_p,
+ Eterm mb_eterm,
+ ErtsB2LState* sp,
+ int reds_left,
+ int gc_disabled)
+{
+ BIF_RETTYPE ret;
+ int bump_reds;
+ Uint size;
+ byte *bytes;
+
+ size = (reds_left + 1)*ERTS_B2L_BYTES_PER_REDUCTION;
+ if (size > sp->size)
+ size = sp->size;
+ bytes = sp->bytes + (sp->size - size);
+
+ bump_reds = (size - 1)/ERTS_B2L_BYTES_PER_REDUCTION + 1;
+ BUMP_REDS(c_p, bump_reds);
+
+ ASSERT(is_list(sp->res) || is_nil(sp->res));
+
+ sp->res = erts_bin_bytes_to_list(sp->res,
+ sp->hp,
+ bytes,
+ size,
+ sp->bitoffs);
+ sp->size -= size;
+ sp->hp += 2*size;
+
+ if (sp->size > 0) {
+
+ if (!gc_disabled)
+ erts_set_gc_state(c_p, 0);
+
+ ASSERT(c_p->flags & F_DISABLE_GC);
+ ASSERT(is_value(mb_eterm));
+ ERTS_BIF_PREP_TRAP1(ret,
+ &binary_to_list_continue_export,
+ c_p,
+ mb_eterm);
+ }
+ else {
+
+ ASSERT(sp->hp == sp->hp_end);
+ ASSERT(sp->size == 0);
+
+ if (!gc_disabled || !erts_set_gc_state(c_p, 1))
+ ERTS_BIF_PREP_RET(ret, sp->res);
+ else
+ ERTS_BIF_PREP_YIELD_RETURN(ret, c_p, sp->res);
+ ASSERT(!(c_p->flags & F_DISABLE_GC));
+ }
+
+ return ret;
+}
+
+static ERTS_INLINE BIF_RETTYPE
+binary_to_list(Process *c_p, Eterm *hp, Eterm tail, byte *bytes,
+ Uint size, Uint bitoffs, int reds_left, int one_chunk)
+{
+ if (one_chunk) {
+ Eterm res;
+ BIF_RETTYPE ret;
+ int bump_reds = (size - 1)/ERTS_B2L_BYTES_PER_REDUCTION + 1;
+ BUMP_REDS(c_p, bump_reds);
+ res = erts_bin_bytes_to_list(tail, hp, bytes, size, bitoffs);
+ ERTS_BIF_PREP_RET(ret, res);
+ return ret;
+ }
+ else {
+ Binary *mbp = erts_create_magic_binary(sizeof(ErtsB2LState),
+ b2l_state_destructor);
+ ErtsB2LState *sp = ERTS_MAGIC_BIN_DATA(mbp);
+ Eterm mb;
+
+ sp->res = tail;
+ sp->hp = hp;
+#ifdef DEBUG
+ sp->hp_end = sp->hp + 2*size;
+#endif
+ sp->bytes = bytes;
+ sp->size = size;
+ sp->bitoffs = bitoffs;
+
+ hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
+ mb = erts_mk_magic_ref(&hp, &MSO(c_p), mbp);
+ return binary_to_list_chunk(c_p, mb, sp, reds_left, 0);
+ }
+}
+
+static BIF_RETTYPE binary_to_list_continue(BIF_ALIST_1)
+{
+ Binary *mbp = erts_magic_ref2bin(BIF_ARG_1);
+
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == b2l_state_destructor);
+ ASSERT(BIF_P->flags & F_DISABLE_GC);
+
+ return binary_to_list_chunk(BIF_P,
+ BIF_ARG_1,
+ (ErtsB2LState*) ERTS_MAGIC_BIN_DATA(mbp),
+ ERTS_BIF_REDS_LEFT(BIF_P),
+ 1);
+}
+
+HIPE_WRAPPER_BIF_DISABLE_GC(binary_to_list, 1)
+
BIF_RETTYPE binary_to_list_1(BIF_ALIST_1)
{
Eterm real_bin;
@@ -340,11 +468,29 @@ BIF_RETTYPE binary_to_list_1(BIF_ALIST_1)
Uint size;
Uint bitsize;
Uint bitoffs;
+ int reds_left;
+ int one_chunk;
if (is_not_binary(BIF_ARG_1)) {
goto error;
}
+
size = binary_size(BIF_ARG_1);
+ reds_left = ERTS_BIF_REDS_LEFT(BIF_P);
+ one_chunk = size < reds_left*ERTS_B2L_BYTES_PER_REDUCTION;
+ if (!one_chunk) {
+ if (size < L2B_B2L_MIN_EXEC_REDS*ERTS_B2L_BYTES_PER_REDUCTION) {
+ if (reds_left <= L2B_B2L_RESCHED_REDS) {
+ /* Yield and do it with full context reds... */
+ ERTS_BIF_YIELD1(bif_export[BIF_binary_to_list_1],
+ BIF_P, BIF_ARG_1);
+ }
+ /* Allow a bit more reductions... */
+ one_chunk = 1;
+ reds_left = L2B_B2L_MIN_EXEC_REDS;
+ }
+ }
+
ERTS_GET_REAL_BIN(BIF_ARG_1, real_bin, offset, bitoffs, bitsize);
if (bitsize != 0) {
goto error;
@@ -354,14 +500,16 @@ BIF_RETTYPE binary_to_list_1(BIF_ALIST_1)
} else {
Eterm* hp = HAlloc(BIF_P, 2 * size);
byte* bytes = binary_bytes(real_bin)+offset;
-
- BIF_RET(erts_bin_bytes_to_list(NIL, hp, bytes, size, bitoffs));
+ return binary_to_list(BIF_P, hp, NIL, bytes, size,
+ bitoffs, reds_left, one_chunk);
}
error:
BIF_ERROR(BIF_P, BADARG);
}
+HIPE_WRAPPER_BIF_DISABLE_GC(binary_to_list, 3)
+
BIF_RETTYPE binary_to_list_3(BIF_ALIST_3)
{
byte* bytes;
@@ -372,6 +520,8 @@ BIF_RETTYPE binary_to_list_3(BIF_ALIST_3)
Uint start;
Uint stop;
Eterm* hp;
+ int reds_left;
+ int one_chunk;
if (is_not_binary(BIF_ARG_1)) {
goto error;
@@ -380,6 +530,21 @@ BIF_RETTYPE binary_to_list_3(BIF_ALIST_3)
goto error;
}
size = binary_size(BIF_ARG_1);
+ reds_left = ERTS_BIF_REDS_LEFT(BIF_P);
+ one_chunk = size < reds_left*ERTS_B2L_BYTES_PER_REDUCTION;
+ if (!one_chunk) {
+ if (size < L2B_B2L_MIN_EXEC_REDS*ERTS_B2L_BYTES_PER_REDUCTION) {
+ if (reds_left <= L2B_B2L_RESCHED_REDS) {
+ /* Yield and do it with full context reds... */
+ ERTS_BIF_YIELD3(bif_export[BIF_binary_to_list_3],
+ BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ }
+ /* Allow a bit more reductions... */
+ one_chunk = 1;
+ reds_left = L2B_B2L_MIN_EXEC_REDS;
+ }
+ }
+
ERTS_GET_BINARY_BYTES(BIF_ARG_1, bytes, bitoffs, bitsize);
if (start < 1 || start > size || stop < 1 ||
stop > size || stop < start ) {
@@ -387,12 +552,14 @@ BIF_RETTYPE binary_to_list_3(BIF_ALIST_3)
}
i = stop-start+1;
hp = HAlloc(BIF_P, 2*i);
- BIF_RET(erts_bin_bytes_to_list(NIL, hp, bytes+start-1, i, bitoffs));
-
+ return binary_to_list(BIF_P, hp, NIL, bytes+start-1, i,
+ bitoffs, reds_left, one_chunk);
error:
BIF_ERROR(BIF_P, BADARG);
}
+HIPE_WRAPPER_BIF_DISABLE_GC(bitstring_to_list, 1)
+
BIF_RETTYPE bitstring_to_list_1(BIF_ALIST_1)
{
Eterm real_bin;
@@ -403,11 +570,27 @@ BIF_RETTYPE bitstring_to_list_1(BIF_ALIST_1)
byte* bytes;
Eterm previous = NIL;
Eterm* hp;
+ int reds_left;
+ int one_chunk;
if (is_not_binary(BIF_ARG_1)) {
BIF_ERROR(BIF_P, BADARG);
}
size = binary_size(BIF_ARG_1);
+ reds_left = ERTS_BIF_REDS_LEFT(BIF_P);
+ one_chunk = size < reds_left*ERTS_B2L_BYTES_PER_REDUCTION;
+ if (!one_chunk) {
+ if (size < L2B_B2L_MIN_EXEC_REDS*ERTS_B2L_BYTES_PER_REDUCTION) {
+ if (reds_left <= L2B_B2L_RESCHED_REDS) {
+ /* Yield and do it with full context reds... */
+ ERTS_BIF_YIELD1(bif_export[BIF_bitstring_to_list_1],
+ BIF_P, BIF_ARG_1);
+ }
+ /* Allow a bit more reductions... */
+ one_chunk = 1;
+ reds_left = L2B_B2L_MIN_EXEC_REDS;
+ }
+ }
ERTS_GET_REAL_BIN(BIF_ARG_1, real_bin, offset, bitoffs, bitsize);
bytes = binary_bytes(real_bin)+offset;
if (bitsize == 0) {
@@ -431,124 +614,453 @@ BIF_RETTYPE bitstring_to_list_1(BIF_ALIST_1)
previous = CONS(hp, make_binary(last), previous);
hp += 2;
}
- BIF_RET(erts_bin_bytes_to_list(previous, hp, bytes, size, bitoffs));
+
+ return binary_to_list(BIF_P, hp, previous, bytes, size,
+ bitoffs, reds_left, one_chunk);
}
/* Turn a possibly deep list of ints (and binaries) into */
/* One large binary object */
-/*
- * This bif also exists in the binary module, under the name
- * binary:list_to_bin/1, why it's divided into interface and
- * implementation. Also the backend for iolist_to_binary_1.
- */
+typedef enum {
+ ERTS_L2B_OK,
+ ERTS_L2B_YIELD,
+ ERTS_L2B_TYPE_ERROR,
+ ERTS_L2B_OVERFLOW_ERROR
+} ErtsL2BResult;
-BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg)
-{
+#define ERTS_L2B_STATE_INITER(C_P, ARG, BIF, SZFunc, TBufFunc) \
+ {ERTS_IOLIST2BUF_STATE_INITER((C_P), (ARG)), \
+ (ARG), THE_NON_VALUE, (BIF), (SZFunc), (TBufFunc)}
+
+#define ERTS_L2B_STATE_MOVE(TO, FROM) \
+ sys_memcpy((void *) (TO), (void *) (FROM), sizeof(ErtsL2BState))
+
+typedef struct ErtsL2BState_ ErtsL2BState;
+
+struct ErtsL2BState_ {
+ ErtsIOList2BufState buf;
+ Eterm arg;
Eterm bin;
- Eterm h,t;
- ErlDrvSizeT size;
- byte* bytes;
-#ifdef DEBUG
- ErlDrvSizeT offset;
-#endif
+ Export *bif;
+ int (*iolist_to_buf_size)(ErtsIOListState *);
+ ErlDrvSizeT (*iolist_to_buf)(ErtsIOList2BufState *);
+};
+
+static ERTS_INLINE ErtsL2BResult
+list_to_binary_engine(ErtsL2BState *sp)
+{
+ ErlDrvSizeT res;
+ Process *c_p = sp->buf.iolist.c_p;
+
+ /*
+ * have_size == 0 while sp->iolist_to_buf_size()
+ * has not finished the calculation.
+ */
+
+ if (!sp->buf.iolist.have_size) {
+ switch (sp->iolist_to_buf_size(&sp->buf.iolist)) {
+ case ERTS_IOLIST_YIELD:
+ return ERTS_L2B_YIELD;
+ case ERTS_IOLIST_OVERFLOW:
+ return ERTS_L2B_OVERFLOW_ERROR;
+ case ERTS_IOLIST_TYPE:
+ return ERTS_L2B_TYPE_ERROR;
+ case ERTS_IOLIST_OK:
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+
+ ASSERT(sp->buf.iolist.have_size);
+
+ /*
+ * Size calculated... Setup state for
+ * sp->iolist_to_buf_*()
+ */
+
+ sp->bin = new_binary(c_p,
+ (byte *) NULL,
+ sp->buf.iolist.size);
+
+ if (sp->buf.iolist.size == 0)
+ return ERTS_L2B_OK;
+
+ sp->buf.buf = (char *) binary_bytes(sp->bin);
+ sp->buf.len = sp->buf.iolist.size;
+ sp->buf.iolist.obj = sp->arg;
- if (is_nil(arg)) {
- BIF_RET(new_binary(p,(byte*)"",0));
+ if (sp->buf.iolist.reds_left <= 0) {
+ BUMP_ALL_REDS(c_p);
+ return ERTS_L2B_YIELD;
+ }
}
- if (is_not_list(arg)) {
- goto error;
+
+ ASSERT(sp->buf.iolist.size != 0);
+ ASSERT(is_value(sp->bin));
+ ASSERT(sp->buf.buf);
+
+ res = sp->iolist_to_buf(&sp->buf);
+
+ if (!ERTS_IOLIST_TO_BUF_FAILED(res)) {
+ ASSERT(res == 0);
+ return ERTS_L2B_OK;
}
- /* check for [binary()] case */
- h = CAR(list_val(arg));
- t = CDR(list_val(arg));
- if (is_binary(h) && is_nil(t) && !(
- HEADER_SUB_BIN == *(binary_val(h)) && (
- ((ErlSubBin *)binary_val(h))->bitoffs != 0 ||
- ((ErlSubBin *)binary_val(h))->bitsize != 0
- ))) {
- return h;
- }
- switch (erts_iolist_size(arg, &size)) {
- case ERTS_IOLIST_OVERFLOW: BIF_ERROR(p, SYSTEM_LIMIT);
- case ERTS_IOLIST_TYPE: goto error;
- default: ;
- }
- bin = new_binary(p, (byte *)NULL, size);
- bytes = binary_bytes(bin);
-#ifdef DEBUG
- offset =
-#endif
- erts_iolist_to_buf(arg, (char*) bytes, size);
- ASSERT(offset == 0);
- BIF_RET(bin);
+ switch (res) {
+ case ERTS_IOLIST_TO_BUF_YIELD:
+ return ERTS_L2B_YIELD;
+ case ERTS_IOLIST_TO_BUF_OVERFLOW:
+ return ERTS_L2B_OVERFLOW_ERROR;
+ case ERTS_IOLIST_TO_BUF_TYPE_ERROR:
+ return ERTS_L2B_TYPE_ERROR;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid return value from iolist_to_buf_yielding()");
+ return ERTS_L2B_TYPE_ERROR;
+ }
+}
+
+static int
+l2b_state_destructor(Binary *mbp)
+{
+ ErtsL2BState *sp = ERTS_MAGIC_BIN_DATA(mbp);
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == l2b_state_destructor);
+ DESTROY_SAVED_ESTACK(&sp->buf.iolist.estack);
+ return 1;
+}
+
+static ERTS_INLINE Eterm
+l2b_final_touch(Process *c_p, ErtsL2BState *sp)
+{
+ Eterm *hp;
+ ErlSubBin* sbin;
+ if (sp->buf.offset == 0)
+ return sp->bin;
+
+ hp = HAlloc(c_p, ERL_SUB_BIN_SIZE);
+ ASSERT(sp->buf.offset > 0);
+ sbin = (ErlSubBin *) hp;
+ sbin->thing_word = HEADER_SUB_BIN;
+ sbin->size = sp->buf.iolist.size-1;
+ sbin->offs = 0;
+ sbin->orig = sp->bin;
+ sbin->bitoffs = 0;
+ sbin->bitsize = sp->buf.offset;
+ sbin->is_writable = 0;
+ return make_binary(sbin);
+}
+
+static BIF_RETTYPE
+list_to_binary_chunk(Eterm mb_eterm,
+ ErtsL2BState* sp,
+ int reds_left,
+ int gc_disabled)
+{
+ Eterm err = BADARG;
+ BIF_RETTYPE ret;
+ Process *c_p = sp->buf.iolist.c_p;
+
+ sp->buf.iolist.reds_left = reds_left;
- error:
- BIF_ERROR(p, BADARG);
+ switch (list_to_binary_engine(sp)) {
+
+ case ERTS_L2B_OK: {
+ Eterm result = l2b_final_touch(c_p, sp);
+ if (!gc_disabled || !erts_set_gc_state(c_p, 1))
+ ERTS_BIF_PREP_RET(ret, result);
+ else
+ ERTS_BIF_PREP_YIELD_RETURN(ret, c_p, result);
+ ASSERT(!(c_p->flags & F_DISABLE_GC));
+ break;
+ }
+ case ERTS_L2B_YIELD:
+ if (!gc_disabled) {
+ /* first yield... */
+ Eterm *hp;
+ Binary *mbp = erts_create_magic_binary(sizeof(ErtsL2BState),
+ l2b_state_destructor);
+ ErtsL2BState *new_sp = ERTS_MAGIC_BIN_DATA(mbp);
+
+ ERTS_L2B_STATE_MOVE(new_sp, sp);
+ sp = new_sp;
+
+ hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
+ mb_eterm = erts_mk_magic_ref(&hp, &MSO(c_p), mbp);
+
+ ASSERT(is_value(mb_eterm));
+
+ erts_set_gc_state(c_p, 0);
+ }
+
+ ASSERT(c_p->flags & F_DISABLE_GC);
+
+ ERTS_BIF_PREP_TRAP1(ret,
+ &list_to_binary_continue_export,
+ c_p,
+ mb_eterm);
+ break;
+
+ case ERTS_L2B_OVERFLOW_ERROR:
+ err = SYSTEM_LIMIT;
+ /* fall through */
+
+ case ERTS_L2B_TYPE_ERROR:
+ if (!gc_disabled)
+ ERTS_BIF_PREP_ERROR(ret, c_p, err);
+ else {
+ if (erts_set_gc_state(c_p, 1))
+ ERTS_VBUMP_ALL_REDS(c_p);
+
+ ERTS_BIF_PREP_ERROR_TRAPPED1(ret,
+ c_p,
+ err,
+ sp->bif,
+ sp->arg);
+ }
+
+ ASSERT(!(c_p->flags & F_DISABLE_GC));
+ break;
+
+ default:
+ ERTS_INTERNAL_ERROR("Invalid return value from list_to_binary_engine()");
+ ERTS_BIF_PREP_ERROR(ret,c_p, EXC_INTERNAL_ERROR);
+ break;
+ }
+ return ret;
+}
+
+static BIF_RETTYPE list_to_binary_continue(BIF_ALIST_1)
+{
+ Binary *mbp = erts_magic_ref2bin(BIF_ARG_1);
+
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == l2b_state_destructor);
+ ASSERT(BIF_P->flags & F_DISABLE_GC);
+
+ return list_to_binary_chunk(BIF_ARG_1,
+ ERTS_MAGIC_BIN_DATA(mbp),
+ ERTS_BIF_REDS_LEFT(BIF_P),
+ 1);
}
+BIF_RETTYPE erts_list_to_binary_bif(Process *c_p, Eterm arg, Export *bif)
+{
+ int orig_reds_left = ERTS_BIF_REDS_LEFT(c_p);
+ BIF_RETTYPE ret;
+
+ if (orig_reds_left < L2B_B2L_MIN_EXEC_REDS) {
+ if (orig_reds_left <= L2B_B2L_RESCHED_REDS) {
+ /* Yield and do it with full context reds... */
+ ERTS_BIF_PREP_YIELD1(ret, bif, c_p, arg);
+ return ret;
+ }
+ /* Allow a bit more reductions... */
+ orig_reds_left = L2B_B2L_MIN_EXEC_REDS;
+ }
+
+ if (is_nil(arg))
+ ERTS_BIF_PREP_RET(ret, new_binary(c_p, (byte *) "", 0));
+ else if (is_not_list(arg))
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+ else {
+ /* check for [binary()] case */
+ Eterm h = CAR(list_val(arg));
+ Eterm t = CDR(list_val(arg));
+ if (is_binary(h)
+ && is_nil(t)
+ && !(HEADER_SUB_BIN == *(binary_val(h))
+ && (((ErlSubBin *)binary_val(h))->bitoffs != 0
+ || ((ErlSubBin *)binary_val(h))->bitsize != 0))) {
+ ERTS_BIF_PREP_RET(ret, h);
+ }
+ else {
+ ErtsL2BState state = ERTS_L2B_STATE_INITER(c_p,
+ arg,
+ bif,
+ erts_iolist_size_yielding,
+ erts_iolist_to_buf_yielding);
+
+ /*
+ * First try to do it all at once without having to use
+ * yielding iolist_to_buf().
+ */
+ state.buf.iolist.reds_left = orig_reds_left;
+ switch (erts_iolist_size_yielding(&state.buf.iolist)) {
+ case ERTS_IOLIST_OK: {
+ ErlDrvSizeT size = state.buf.iolist.size;
+ Eterm bin;
+ char *buf;
+
+ if (size == 0) {
+ ERTS_BIF_PREP_RET(ret, new_binary(c_p, (byte *) NULL, 0));
+ break; /* done */
+ }
+
+ bin = new_binary(c_p, (byte *) NULL, size);
+ buf = (char *) binary_bytes(bin);
+
+ if (size < ERTS_IOLIST_TO_BUF_BYTES_PER_RED*CONTEXT_REDS) {
+ /* An (over) estimation of reductions needed */
+ int reds_left = state.buf.iolist.reds_left;
+ int to_buf_reds = orig_reds_left - reds_left;
+ to_buf_reds += size/ERTS_IOLIST_TO_BUF_BYTES_PER_RED;
+ if (to_buf_reds <= reds_left) {
+ ErlDrvSizeT res;
+
+ res = erts_iolist_to_buf(arg, buf, size);
+ if (res == 0) {
+ BUMP_REDS(c_p, to_buf_reds);
+ ERTS_BIF_PREP_RET(ret, bin);
+ break; /* done */
+ }
+ if (!ERTS_IOLIST_TO_BUF_FAILED(res))
+ ERTS_INTERNAL_ERROR("iolist_size/iolist_to_buf missmatch");
+ if (res == ERTS_IOLIST_TO_BUF_OVERFLOW)
+ goto overflow;
+ goto type_error;
+ }
+ }
+ /*
+ * Since size has been computed list_to_binary_chunk() expects
+ * state prepared for iolist_to_buf.
+ */
+ state.bin = bin;
+ state.buf.buf = buf;
+ state.buf.len = size;
+ state.buf.iolist.obj = arg;
+ /* Fall through... */
+ }
+ case ERTS_IOLIST_YIELD:
+ ret = list_to_binary_chunk(THE_NON_VALUE,
+ &state,
+ state.buf.iolist.reds_left,
+ 0);
+ break;
+ case ERTS_IOLIST_OVERFLOW:
+ overflow:
+ ERTS_BIF_PREP_ERROR(ret, c_p, SYSTEM_LIMIT);
+ break;
+ case ERTS_IOLIST_TYPE:
+ type_error:
+ default:
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+ break;
+ }
+ }
+ }
+ return ret;
+}
+
+HIPE_WRAPPER_BIF_DISABLE_GC(list_to_binary, 1)
+
BIF_RETTYPE list_to_binary_1(BIF_ALIST_1)
{
- return erts_list_to_binary_bif(BIF_P, BIF_ARG_1);
+ return erts_list_to_binary_bif(BIF_P, BIF_ARG_1, bif_export[BIF_list_to_binary_1]);
}
-/* Turn a possibly deep list of ints (and binaries) into */
-/* One large binary object */
+HIPE_WRAPPER_BIF_DISABLE_GC(iolist_to_binary, 1)
BIF_RETTYPE iolist_to_binary_1(BIF_ALIST_1)
{
if (is_binary(BIF_ARG_1)) {
BIF_RET(BIF_ARG_1);
}
- return erts_list_to_binary_bif(BIF_P, BIF_ARG_1);
+ return erts_list_to_binary_bif(BIF_P, BIF_ARG_1, bif_export[BIF_iolist_to_binary_1]);
}
+static int bitstr_list_len(ErtsIOListState *);
+static ErlDrvSizeT list_to_bitstr_buf_yielding(ErtsIOList2BufState *);
+static ErlDrvSizeT list_to_bitstr_buf_not_yielding(ErtsIOList2BufState *);
+
+HIPE_WRAPPER_BIF_DISABLE_GC(list_to_bitstring, 1)
+
BIF_RETTYPE list_to_bitstring_1(BIF_ALIST_1)
{
- Eterm bin;
- Uint sz;
- int offset;
- byte* bytes;
- ErlSubBin* sb1;
- Eterm* hp;
-
- if (is_nil(BIF_ARG_1)) {
- BIF_RET(new_binary(BIF_P,(byte*)"",0));
- }
- if (is_not_list(BIF_ARG_1)) {
- error:
- BIF_ERROR(BIF_P, BADARG);
- }
- switch (bitstr_list_len(BIF_ARG_1, &sz)) {
- case ERTS_IOLIST_TYPE:
- goto error;
- case ERTS_IOLIST_OVERFLOW:
- BIF_ERROR(BIF_P, SYSTEM_LIMIT);
- }
- bin = new_binary(BIF_P, (byte *)NULL, sz);
- bytes = binary_bytes(bin);
-#ifdef DEBUG
- offset = list_to_bitstr_buf(BIF_ARG_1, (char*) bytes, sz);
-#else
- offset = list_to_bitstr_buf(BIF_ARG_1, (char*) bytes);
-#endif
- ASSERT(offset >= 0);
- if (offset > 0) {
- hp = HAlloc(BIF_P, ERL_SUB_BIN_SIZE);
- sb1 = (ErlSubBin *) hp;
- sb1->thing_word = HEADER_SUB_BIN;
- sb1->size = sz-1;
- sb1->offs = 0;
- sb1->orig = bin;
- sb1->bitoffs = 0;
- sb1->bitsize = offset;
- sb1->is_writable = 0;
- bin = make_binary(sb1);
+ BIF_RETTYPE ret;
+
+ if (is_nil(BIF_ARG_1))
+ ERTS_BIF_PREP_RET(ret, new_binary(BIF_P, (byte *) "", 0));
+ else if (is_not_list(BIF_ARG_1))
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ else {
+ /* check for [bitstring()] case */
+ Eterm h = CAR(list_val(BIF_ARG_1));
+ Eterm t = CDR(list_val(BIF_ARG_1));
+ if (is_binary(h) && is_nil(t)) {
+ ERTS_BIF_PREP_RET(ret, h);
+ }
+ else {
+ ErtsL2BState state = ERTS_L2B_STATE_INITER(BIF_P,
+ BIF_ARG_1,
+ bif_export[BIF_list_to_bitstring_1],
+ bitstr_list_len,
+ list_to_bitstr_buf_yielding);
+ int orig_reds_left = ERTS_BIF_REDS_LEFT(BIF_P);
+
+ /*
+ * First try to do it all at once without having to use
+ * yielding list_to_bitstr_buf().
+ */
+ state.buf.iolist.reds_left = orig_reds_left;
+ switch (bitstr_list_len(&state.buf.iolist)) {
+ case ERTS_IOLIST_OK: {
+ ErlDrvSizeT size = state.buf.iolist.size;
+
+ state.bin = new_binary(BIF_P, (byte *) NULL, size);
+ state.buf.buf = (char *) binary_bytes(state.bin);
+ state.buf.len = size;
+ state.buf.iolist.obj = BIF_ARG_1;
+
+ if (size < ERTS_IOLIST_TO_BUF_BYTES_PER_RED*CONTEXT_REDS) {
+ /* An (over) estimation of reductions needed */
+ int reds_left = state.buf.iolist.reds_left;
+ int to_buf_reds = orig_reds_left - reds_left;
+ to_buf_reds += size/ERTS_IOLIST_TO_BUF_BYTES_PER_RED;
+ if (to_buf_reds <= reds_left) {
+ ErlDrvSizeT res;
+
+ res = list_to_bitstr_buf_not_yielding(&state.buf);
+ if (res == 0) {
+ Eterm res_bin = l2b_final_touch(BIF_P, &state);
+ BUMP_REDS(BIF_P, to_buf_reds);
+ ERTS_BIF_PREP_RET(ret, res_bin);
+ break; /* done */
+ }
+ if (!ERTS_IOLIST_TO_BUF_FAILED(res))
+ ERTS_INTERNAL_ERROR("iolist_size/iolist_to_buf missmatch");
+ if (res == ERTS_IOLIST_TO_BUF_OVERFLOW)
+ goto overflow;
+ goto type_error;
+ }
+ }
+ /*
+ * Since size has been computed list_to_binary_chunk() expects
+ * the state prepared for list_to_bitstr_buf.
+ */
+
+ /* Fall through... */
+ }
+ case ERTS_IOLIST_YIELD:
+ ret = list_to_binary_chunk(THE_NON_VALUE,
+ &state,
+ state.buf.iolist.reds_left,
+ 0);
+ break;
+ case ERTS_IOLIST_OVERFLOW:
+ overflow:
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, SYSTEM_LIMIT);
+ break;
+ case ERTS_IOLIST_TYPE:
+ type_error:
+ default:
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ break;
+ }
+ }
}
-
- BIF_RET(bin);
+
+ return ret;
}
BIF_RETTYPE split_binary_2(BIF_ALIST_2)
@@ -605,123 +1117,353 @@ BIF_RETTYPE split_binary_2(BIF_ALIST_2)
* Local functions.
*/
+static int
+list_to_bitstr_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp);
+
/*
* The input list is assumed to be type-correct and the buffer is
* assumed to be of sufficient size. Those assumptions are verified in
* the DEBUG-built emulator.
*/
-static int
+static ErlDrvSizeT
+list_to_bitstr_buf(int yield_support, ErtsIOList2BufState *state)
+{
+
+#undef LIST_TO_BITSTR_BUF_BCOPY_DBG
+#undef LIST_TO_BITSTR_BUF_BCOPY
#ifdef DEBUG
-list_to_bitstr_buf(Eterm obj, char* buf, Uint len)
+#define LIST_TO_BITSTR_BUF_BCOPY_DBG \
+ len -= size + (offset>7);
#else
-list_to_bitstr_buf(Eterm obj, char* buf)
+#define LIST_TO_BITSTR_BUF_BCOPY_DBG
#endif
-{
- Eterm* objp;
- int offset = 0;
+#define LIST_TO_BITSTR_BUF_BCOPY(CONSP) \
+ do { \
+ byte* bptr; \
+ Uint bitsize; \
+ Uint bitoffs; \
+ Uint num_bits; \
+ size_t size = binary_size(obj); \
+ if (yield_support) { \
+ size_t max_size = ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; \
+ if (yield_count > 0) \
+ max_size *= yield_count+1; \
+ if (size > max_size) { \
+ state->objp = CONSP; \
+ goto L_bcopy_yield; \
+ } \
+ if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) { \
+ int cost = (int) size; \
+ cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; \
+ yield_count -= cost; \
+ } \
+ } \
+ ASSERT(size <= len); \
+ ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize); \
+ num_bits = 8*size+bitsize; \
+ copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits); \
+ offset += bitsize; \
+ buf += size + (offset>7); \
+ LIST_TO_BITSTR_BUF_BCOPY_DBG; \
+ offset = offset & 7; \
+ } while(0)
+
+#ifdef DEBUG
+ ErlDrvSizeT len;
+#endif
+ Eterm obj;
+ char *buf;
+ Eterm *objp = NULL;
+ int offset;
+ int init_yield_count = 0, yield_count;
DECLARE_ESTACK(s);
- goto L_again;
-
- while (!ESTACK_ISEMPTY(s)) {
- obj = ESTACK_POP(s);
- L_again:
- if (is_list(obj)) {
- L_iter_list:
- objp = list_val(obj);
- obj = CAR(objp);
- if (is_byte(obj)) {
- ASSERT(len > 0);
- if (offset == 0) {
- *buf++ = unsigned_val(obj);
- } else {
- *buf = (char)((unsigned_val(obj) >> offset) |
- ((*buf >> (8-offset)) << (8-offset)));
- buf++;
- *buf = (unsigned_val(obj) << (8-offset));
- }
+
+ obj = state->iolist.obj;
+ buf = state->buf;
+ offset = state->offset;
#ifdef DEBUG
- len--;
+ len = state->len;
#endif
- } else if (is_binary(obj)) {
- byte* bptr;
- size_t size = binary_size(obj);
- Uint bitsize;
- Uint bitoffs;
- Uint num_bits;
-
- ASSERT(size <= len);
- ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
- num_bits = 8*size+bitsize;
- copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits);
- offset += bitsize;
- buf += size + (offset>7);
+
+ if (!yield_support) {
+ yield_count = init_yield_count = 0; /* Shut up faulty warning... >:-( */
+ goto L_again;
+ }
+ else {
+
+ if (state->iolist.reds_left <= 0)
+ return ERTS_IOLIST_TO_BUF_YIELD;
+
+ ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
+ init_yield_count = (ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED
+ * state->iolist.reds_left);
+ yield_count = init_yield_count;
+
+ if (!state->iolist.estack.start)
+ goto L_again;
+ else {
+ int chk_stack;
+ /* Restart; restore state... */
+ ESTACK_RESTORE(s, &state->iolist.estack);
+
+ if (!state->bcopy.bptr)
+ chk_stack = 0;
+ else {
+ chk_stack = 1;
+ if (list_to_bitstr_buf_bcopy(state, THE_NON_VALUE, &yield_count)) {
+ /* Yield again... */
+ BUMP_ALL_REDS(state->iolist.c_p);
+ state->iolist.reds_left = 0;
+ ESTACK_SAVE(s, &state->iolist.estack);
+ return ERTS_IOLIST_TO_BUF_YIELD;
+ }
+ buf = state->buf;
+ offset = state->offset;
#ifdef DEBUG
- len -= size + (offset>7);
+ len = state->len;
#endif
- offset = offset & 7;
- } else if (is_list(obj)) {
- ESTACK_PUSH(s, CDR(objp));
- goto L_iter_list; /* on head */
- } else {
- ASSERT(is_nil(obj));
}
- obj = CDR(objp);
- if (is_list(obj)) {
- goto L_iter_list; /* on tail */
- } else if (is_binary(obj)) {
- byte* bptr;
- size_t size = binary_size(obj);
- Uint bitsize;
- Uint bitoffs;
- Uint num_bits;
-
- ASSERT(size <= len);
- ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
- num_bits = 8*size+bitsize;
- copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits);
- offset += bitsize;
- buf += size+(offset>7);
+ objp = state->objp;
+ state->objp = NULL;
+
+ if (objp)
+ goto L_tail;
+ if (!chk_stack)
+ goto L_again;
+ /* check stack */
+ }
+ }
+
+ while (!ESTACK_ISEMPTY(s)) {
+ obj = ESTACK_POP(s);
+ L_again:
+ if (is_list(obj)) {
+ while (1) { /* Tail loop */
+ while (1) { /* Head loop */
+ if (yield_support && --yield_count <= 0)
+ goto L_yield;
+ objp = list_val(obj);
+ obj = CAR(objp);
+ if (is_byte(obj)) {
+ ASSERT(len > 0);
+ if (offset == 0) {
+ *buf++ = unsigned_val(obj);
+ } else {
+ *buf = (char)((unsigned_val(obj) >> offset) |
+ ((*buf >> (8-offset)) << (8-offset)));
+ buf++;
+ *buf = (unsigned_val(obj) << (8-offset));
+ }
#ifdef DEBUG
- len -= size+(offset>7);
+ len--;
#endif
- offset = offset & 7;
- } else {
- ASSERT(is_nil(obj));
+ } else if (is_binary(obj)) {
+ LIST_TO_BITSTR_BUF_BCOPY(objp);
+ } else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ continue; /* Head loop */
+ } else {
+ ASSERT(is_nil(obj));
+ }
+ break;
+ }
+
+ L_tail:
+
+ obj = CDR(objp);
+ if (is_list(obj)) {
+ continue; /* Tail loop */
+ } else if (is_binary(obj)) {
+ LIST_TO_BITSTR_BUF_BCOPY(NULL);
+ } else {
+ ASSERT(is_nil(obj));
+ }
+ break;
}
} else if (is_binary(obj)) {
- byte* bptr;
- size_t size = binary_size(obj);
- Uint bitsize;
- Uint bitoffs;
- Uint num_bits;
-
- ASSERT(size <= len);
- ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
- num_bits = 8*size+bitsize;
- copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits);
- offset += bitsize;
- buf += size + (offset>7);
-#ifdef DEBUG
- len -= size + (offset>7);
-#endif
- offset = offset & 7;
+ LIST_TO_BITSTR_BUF_BCOPY(NULL);
} else {
+ if (yield_support && --yield_count <= 0)
+ goto L_yield;
ASSERT(is_nil(obj));
}
}
DESTROY_ESTACK(s);
- return offset;
+
+ if (yield_support) {
+ int reds;
+ CLEAR_SAVED_ESTACK(&state->iolist.estack);
+ reds = ((init_yield_count - yield_count - 1)
+ / ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED) + 1;
+ BUMP_REDS(state->iolist.c_p, reds);
+ state->iolist.reds_left -= reds;
+ if (state->iolist.reds_left < 0)
+ state->iolist.reds_left = 0;
+ }
+ state->buf = buf;
+ state->offset = offset;
+ return 0;
+
+L_bcopy_yield:
+
+ state->buf = buf;
+ state->offset = offset;
+#ifdef DEBUG
+ state->len = len;
+#endif
+
+ if (list_to_bitstr_buf_bcopy(state, obj, &yield_count) == 0)
+ ERTS_INTERNAL_ERROR("Missing yield");
+
+ BUMP_ALL_REDS(state->iolist.c_p);
+ state->iolist.reds_left = 0;
+ ESTACK_SAVE(s, &state->iolist.estack);
+ return ERTS_IOLIST_TO_BUF_YIELD;
+
+L_yield:
+
+ BUMP_ALL_REDS(state->iolist.c_p);
+ state->iolist.reds_left = 0;
+ state->iolist.obj = obj;
+ state->buf = buf;
+ state->offset = offset;
+ ESTACK_SAVE(s, &state->iolist.estack);
+#ifdef DEBUG
+ state->len = len;
+#endif
+ return ERTS_IOLIST_TO_BUF_YIELD;
+
+
+#undef LIST_TO_BITSTR_BUF_BCOPY_DBG
+#undef LIST_TO_BITSTR_BUF_BCOPY
+
+}
+
+static ErlDrvSizeT
+list_to_bitstr_buf_yielding(ErtsIOList2BufState *state)
+{
+ return list_to_bitstr_buf(1, state);
+}
+
+static ErlDrvSizeT
+list_to_bitstr_buf_not_yielding(ErtsIOList2BufState *state)
+{
+ return list_to_bitstr_buf(0, state);
+}
+
+static int
+list_to_bitstr_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp)
+{
+ int res;
+ char *buf = state->buf;
+ char *next_buf;
+ int offset = state->offset;
+ int next_offset;
+#ifdef DEBUG
+ ErlDrvSizeT len = state->len;
+ ErlDrvSizeT next_len;
+#endif
+ byte* bptr;
+ size_t size;
+ size_t max_size;
+ Uint bitoffs;
+ Uint num_bits;
+ Uint bitsize;
+ int yield_count = *yield_countp;
+
+ if (state->bcopy.bptr) {
+ bptr = state->bcopy.bptr;
+ size = state->bcopy.size;
+ bitoffs = state->bcopy.bitoffs;
+ bitsize = state->bcopy.bitsize;
+ state->bcopy.bptr = NULL;
+ }
+ else {
+
+ ASSERT(is_binary(obj));
+
+ size = binary_size(obj);
+
+ ASSERT(size <= len);
+
+ ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
+ }
+
+ max_size = (size_t) ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT;
+ if (yield_count > 0)
+ max_size *= (size_t) (yield_count+1);
+
+ if (size <= max_size) {
+ if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) {
+ int cost = (int) size;
+ cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT;
+ yield_count -= cost;
+ }
+ next_offset = offset + bitsize;
+ next_buf = buf + size+(next_offset>7);
+#ifdef DEBUG
+ next_len = len - size+(next_offset>7);
+#endif
+ next_offset &= 7;
+ num_bits = 8*size+bitsize;
+ res = 0;
+ }
+ else {
+ ASSERT(0 < max_size && max_size < size);
+ yield_count = 0;
+ state->bcopy.bptr = bptr + max_size;
+ state->bcopy.bitoffs = bitoffs;
+ state->bcopy.bitsize = bitsize;
+ state->bcopy.size = size - max_size;
+ next_buf = buf + max_size;
+#ifdef DEBUG
+ next_len = len - max_size;
+#endif
+ next_offset = offset;
+ num_bits = 8*max_size;
+ size = max_size;
+ res = 1;
+ }
+
+ copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits);
+
+ state->offset = next_offset;
+ state->buf = next_buf;
+#ifdef DEBUG
+ state->len = next_len;
+#endif
+ *yield_countp = yield_count;
+
+ return res;
}
static int
-bitstr_list_len(Eterm obj, Uint* num_bytes)
+bitstr_list_len(ErtsIOListState *state)
{
Eterm* objp;
- Uint len = 0;
- Uint offs = 0;
+ Eterm obj;
+ Uint len, offs;
+ int res, init_yield_count, yield_count;
DECLARE_ESTACK(s);
+
+ if (state->reds_left <= 0)
+ return ERTS_IOLIST_YIELD;
+
+ len = (Uint) state->size;
+ offs = state->offs;
+ obj = state->obj;
+
+ ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
+ init_yield_count = ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED;
+ init_yield_count *= state->reds_left;
+ yield_count = init_yield_count;
+ if (state->estack.start) {
+ /* Restart; restore estack... */
+ ESTACK_RESTORE(s, &state->estack);
+ }
+
goto L_again;
#define SAFE_ADD(Var, Val) \
@@ -748,46 +1490,55 @@ bitstr_list_len(Eterm obj, Uint* num_bytes)
obj = ESTACK_POP(s);
L_again:
if (is_list(obj)) {
- L_iter_list:
- objp = list_val(obj);
- /* Head */
- obj = CAR(objp);
- if (is_byte(obj)) {
- len++;
- if (len == 0) {
- goto L_overflow_error;
+ while (1) { /* Tail loop */
+ while (1) { /* Head loop */
+ if (--yield_count <= 0)
+ goto L_yield;
+ objp = list_val(obj);
+ /* Head */
+ obj = CAR(objp);
+ if (is_byte(obj)) {
+ len++;
+ if (len == 0) {
+ goto L_overflow_error;
+ }
+ } else if (is_binary(obj)) {
+ SAFE_ADD(len, binary_size(obj));
+ SAFE_ADD_BITSIZE(offs, obj);
+ } else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ continue; /* Head loop */
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ break;
}
- } else if (is_binary(obj)) {
- SAFE_ADD(len, binary_size(obj));
- SAFE_ADD_BITSIZE(offs, obj);
- } else if (is_list(obj)) {
- ESTACK_PUSH(s, CDR(objp));
- goto L_iter_list; /* on head */
- } else if (is_not_nil(obj)) {
- goto L_type_error;
+ /* Tail */
+ obj = CDR(objp);
+ if (is_list(obj))
+ continue; /* Tail loop */
+ else if (is_binary(obj)) {
+ SAFE_ADD(len, binary_size(obj));
+ SAFE_ADD_BITSIZE(offs, obj);
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ break;
}
- /* Tail */
- obj = CDR(objp);
- if (is_list(obj))
- goto L_iter_list; /* on tail */
- else if (is_binary(obj)) {
+ } else {
+ if (--yield_count <= 0)
+ goto L_yield;
+ if (is_binary(obj)) {
SAFE_ADD(len, binary_size(obj));
SAFE_ADD_BITSIZE(offs, obj);
} else if (is_not_nil(obj)) {
goto L_type_error;
}
- } else if (is_binary(obj)) {
- SAFE_ADD(len, binary_size(obj));
- SAFE_ADD_BITSIZE(offs, obj);
- } else if (is_not_nil(obj)) {
- goto L_type_error;
}
}
#undef SAFE_ADD
#undef SAFE_ADD_BITSIZE
- DESTROY_ESTACK(s);
-
/*
* Make sure that the number of bits in the bitstring will fit
* in an Uint to ensure that the binary can be matched using
@@ -800,15 +1551,42 @@ bitstr_list_len(Eterm obj, Uint* num_bytes)
if (len << 3 < len) {
goto L_overflow_error;
}
- *num_bytes = len;
- return ERTS_IOLIST_OK;
+ state->size = len;
- L_type_error:
- DESTROY_ESTACK(s);
- return ERTS_IOLIST_TYPE;
+ res = ERTS_IOLIST_OK;
+
+ L_return: {
+ int yc = init_yield_count - yield_count;
+ int reds;
+
+ DESTROY_ESTACK(s);
+ CLEAR_SAVED_ESTACK(&state->estack);
+
+ reds = (yc - 1)/ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED + 1;
+ BUMP_REDS(state->c_p, reds);
+ state->reds_left -= reds;
+ state->size = (ErlDrvSizeT) len;
+ state->have_size = 1;
+ return res;
+ }
L_overflow_error:
- DESTROY_ESTACK(s);
- return ERTS_IOLIST_OVERFLOW;
+ res = ERTS_IOLIST_OVERFLOW;
+ len = 0;
+ goto L_return;
+
+ L_type_error:
+ res = ERTS_IOLIST_TYPE;
+ len = 0;
+ goto L_return;
+
+ L_yield:
+ BUMP_ALL_REDS(state->c_p);
+ state->reds_left = 0;
+ state->size = len;
+ state->offs = offs;
+ state->obj = obj;
+ ESTACK_SAVE(s, &state->estack);
+ return ERTS_IOLIST_YIELD;
}
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index 7d4f52ee23..d9ee940662 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -36,30 +37,31 @@
#include "atom.h"
#include "beam_load.h"
#include "erl_instrument.h"
-#include "erl_bif_timer.h"
+#include "erl_hl_timer.h"
#include "erl_thr_progress.h"
/* Forward declarations -- should really appear somewhere else */
static void process_killer(void);
void do_break(void);
void erl_crash_dump_v(char *file, int line, char* fmt, va_list args);
-void erl_crash_dump(char* file, int line, char* fmt, ...);
#ifdef DEBUG
static void bin_check(void);
#endif
-static void print_garb_info(int to, void *to_arg, Process* p);
+static void print_garb_info(fmtfn_t to, void *to_arg, Process* p);
#ifdef OPPROF
static void dump_frequencies(void);
#endif
-static void dump_attributes(int to, void *to_arg, byte* ptr, int size);
+static void dump_attributes(fmtfn_t to, void *to_arg, byte* ptr, int size);
extern char* erts_system_version[];
+#define WRITE_BUFFER_SIZE (64*1024)
+
static void
-port_info(int to, void *to_arg)
+port_info(fmtfn_t to, void *to_arg)
{
int i, max = erts_ptab_max(&erts_port);
for (i = 0; i < max; i++) {
@@ -70,7 +72,7 @@ port_info(int to, void *to_arg)
}
void
-process_info(int to, void *to_arg)
+process_info(fmtfn_t to, void *to_arg)
{
int i, max = erts_ptab_max(&erts_proc);
for (i = 0; i < max; i++) {
@@ -103,21 +105,23 @@ process_killer(void)
erts_printf("(k)ill (n)ext (r)eturn:\n");
while(1) {
if ((j = sys_get_key(0)) <= 0)
- erl_exit(0, "");
+ erts_exit(0, "");
switch(j) {
case 'k': {
ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
erts_aint32_t state;
- erts_smp_proc_inc_refc(rp);
- erts_smp_proc_lock(rp, rp_locks);
- state = erts_smp_atomic32_read_acqb(&rp->state);
+ erts_proc_inc_refc(rp);
+ erts_proc_lock(rp, rp_locks);
+ state = erts_atomic32_read_acqb(&rp->state);
if (state & (ERTS_PSFLG_FREE
| ERTS_PSFLG_EXITING
| ERTS_PSFLG_ACTIVE
| ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_IN_RUNQ
| ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS)) {
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
erts_printf("Can only kill WAITING processes this way\n");
}
else {
@@ -130,8 +134,8 @@ process_killer(void)
NULL,
0);
}
- erts_smp_proc_unlock(rp, rp_locks);
- erts_smp_proc_dec_refc(rp);
+ erts_proc_unlock(rp, rp_locks);
+ erts_proc_dec_refc(rp);
}
case 'n': br = 1; break;
case 'r': return;
@@ -145,14 +149,14 @@ process_killer(void)
typedef struct {
int is_first;
- int to;
+ fmtfn_t to;
void *to_arg;
} PrintMonitorContext;
static void doit_print_link(ErtsLink *lnk, void *vpcontext)
{
PrintMonitorContext *pcontext = vpcontext;
- int to = pcontext->to;
+ fmtfn_t to = pcontext->to;
void *to_arg = pcontext->to_arg;
if (pcontext->is_first) {
@@ -167,7 +171,7 @@ static void doit_print_link(ErtsLink *lnk, void *vpcontext)
static void doit_print_monitor(ErtsMonitor *mon, void *vpcontext)
{
PrintMonitorContext *pcontext = vpcontext;
- int to = pcontext->to;
+ fmtfn_t to = pcontext->to;
void *to_arg = pcontext->to_arg;
char *prefix = ", ";
@@ -176,26 +180,34 @@ static void doit_print_monitor(ErtsMonitor *mon, void *vpcontext)
prefix = "";
}
- if (mon->type == MON_ORIGIN) {
- if (is_atom(mon->pid)) { /* dist by name */
- ASSERT(is_node_name_atom(mon->pid));
+ switch (mon->type) {
+ case MON_ORIGIN:
+ if (is_atom(mon->u.pid)) { /* dist by name */
+ ASSERT(is_node_name_atom(mon->u.pid));
erts_print(to, to_arg, "%s{to,{%T,%T},%T}", prefix, mon->name,
- mon->pid, mon->ref);
- erts_print(to, to_arg,"}");
+ mon->u.pid, mon->ref);
} else if (is_atom(mon->name)){ /* local by name */
erts_print(to, to_arg, "%s{to,{%T,%T},%T}", prefix, mon->name,
erts_this_dist_entry->sysname, mon->ref);
} else { /* local and distributed by pid */
- erts_print(to, to_arg, "%s{to,%T,%T}", prefix, mon->pid, mon->ref);
+ erts_print(to, to_arg, "%s{to,%T,%T}", prefix, mon->u.pid, mon->ref);
}
- } else { /* MON_TARGET */
- erts_print(to, to_arg, "%s{from,%T,%T}", prefix, mon->pid, mon->ref);
+ break;
+ case MON_TARGET:
+ erts_print(to, to_arg, "%s{from,%T,%T}", prefix, mon->u.pid, mon->ref);
+ break;
+ case MON_NIF_TARGET: {
+ ErtsResource* rsrc = mon->u.resource;
+ erts_print(to, to_arg, "%s{from,{%T,%T},%T}", prefix, rsrc->type->module,
+ rsrc->type->name, mon->ref);
+ break;
+ }
}
}
/* Display info about an individual Erlang process */
void
-print_process_info(int to, void *to_arg, Process *p)
+print_process_info(fmtfn_t to, void *to_arg, Process *p)
{
time_t approx_started;
int garbing = 0;
@@ -209,26 +221,14 @@ print_process_info(int to, void *to_arg, Process *p)
/* Display the state */
erts_print(to, to_arg, "State: ");
- state = erts_smp_atomic32_read_acqb(&p->state);
- if (state & ERTS_PSFLG_FREE)
- erts_print(to, to_arg, "Non Existing\n"); /* Should never happen */
- else if (state & ERTS_PSFLG_EXITING)
- erts_print(to, to_arg, "Exiting\n");
- else if (state & ERTS_PSFLG_GC) {
- garbing = 1;
- running = 1;
- erts_print(to, to_arg, "Garbing\n");
- }
- else if (state & ERTS_PSFLG_SUSPENDED)
- erts_print(to, to_arg, "Suspended\n");
- else if (state & ERTS_PSFLG_RUNNING) {
- running = 1;
- erts_print(to, to_arg, "Running\n");
- }
- else if (state & ERTS_PSFLG_ACTIVE)
- erts_print(to, to_arg, "Scheduled\n");
- else
- erts_print(to, to_arg, "Waiting\n");
+ state = erts_atomic32_read_acqb(&p->state);
+ erts_dump_process_state(to, to_arg, state);
+ if (state & ERTS_PSFLG_GC) {
+ garbing = 1;
+ running = 1;
+ } else if (state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING))
+ running = 1;
/*
* If the process is registered as a global process, display the
@@ -241,9 +241,9 @@ print_process_info(int to, void *to_arg, Process *p)
* Display the initial function name
*/
erts_print(to, to_arg, "Spawned as: %T:%T/%bpu\n",
- p->initial[INITIAL_MOD],
- p->initial[INITIAL_FUN],
- p->initial[INITIAL_ARI]);
+ p->u.initial.module,
+ p->u.initial.function,
+ p->u.initial.arity);
if (p->current != NULL) {
if (running) {
@@ -252,20 +252,20 @@ print_process_info(int to, void *to_arg, Process *p)
erts_print(to, to_arg, "Current call: ");
}
erts_print(to, to_arg, "%T:%T/%bpu\n",
- p->current[0],
- p->current[1],
- p->current[2]);
+ p->current->module,
+ p->current->function,
+ p->current->arity);
}
erts_print(to, to_arg, "Spawned by: %T\n", p->parent);
approx_started = (time_t) p->approx_started;
erts_print(to, to_arg, "Started: %s", ctime(&approx_started));
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p);
+ ERTS_MSGQ_MV_INQ2PRIVQ(p);
erts_print(to, to_arg, "Message queue length: %d\n", p->msg.len);
/* display the message queue only if there is anything in it */
if (!ERTS_IS_CRASH_DUMPING && p->msg.first != NULL && !garbing) {
- ErlMessage* mp;
+ ErtsMessage* mp;
erts_print(to, to_arg, "Message queue: [");
for (mp = p->msg.first; mp; mp = mp->next)
erts_print(to, to_arg, mp->next ? "%T," : "%T", ERL_MESSAGE_TERM(mp));
@@ -301,16 +301,16 @@ print_process_info(int to, void *to_arg, Process *p)
erts_print(to, to_arg, "timeout");
else
erts_print(to, to_arg, "%T:%T/%bpu\n",
- scb->ct[j]->code[0],
- scb->ct[j]->code[1],
- scb->ct[j]->code[2]);
+ scb->ct[j]->info.mfa.module,
+ scb->ct[j]->info.mfa.function,
+ scb->ct[j]->info.mfa.arity);
}
erts_print(to, to_arg, "\n");
}
/* display the links only if there are any*/
if (ERTS_P_LINKS(p) || ERTS_P_MONITORS(p)) {
- PrintMonitorContext context = {1,to};
+ PrintMonitorContext context = {1, to, to_arg};
erts_print(to, to_arg,"Link list: [");
erts_doforall_links(ERTS_P_LINKS(p), &doit_print_link, &context);
erts_doforall_monitors(ERTS_P_MONITORS(p), &doit_print_monitor, &context);
@@ -336,7 +336,7 @@ print_process_info(int to, void *to_arg, Process *p)
erts_print(to, to_arg, "Heap unused: %bpu\n", (p->hend - p->htop));
erts_print(to, to_arg, "OldHeap unused: %bpu\n",
(OLD_HEAP(p) == NULL) ? 0 : (OLD_HEND(p) - OLD_HTOP(p)) );
- erts_print(to, to_arg, "Memory: %beu\n", erts_process_memory(p));
+ erts_print(to, to_arg, "Memory: %beu\n", erts_process_memory(p, !0));
if (garbing) {
print_garb_info(to, to_arg, p);
@@ -346,18 +346,21 @@ print_process_info(int to, void *to_arg, Process *p)
erts_program_counter_info(to, to_arg, p);
} else {
erts_print(to, to_arg, "Stack dump:\n");
-#ifdef ERTS_SMP
if (!garbing)
-#endif
erts_stack_dump(to, to_arg, p);
}
+
+ /* Display all states */
+ erts_print(to, to_arg, "Internal State: ");
+ erts_dump_extended_process_state(to, to_arg, state);
}
static void
-print_garb_info(int to, void *to_arg, Process* p)
+print_garb_info(fmtfn_t to, void *to_arg, Process* p)
{
/* ERTS_SMP: A scheduler is probably concurrently doing gc... */
-#ifndef ERTS_SMP
+ if (!ERTS_IS_CRASH_DUMPING)
+ return;
erts_print(to, to_arg, "New heap start: %bpX\n", p->heap);
erts_print(to, to_arg, "New heap top: %bpX\n", p->htop);
erts_print(to, to_arg, "Stack top: %bpX\n", p->stop);
@@ -365,11 +368,10 @@ print_garb_info(int to, void *to_arg, Process* p)
erts_print(to, to_arg, "Old heap start: %bpX\n", OLD_HEAP(p));
erts_print(to, to_arg, "Old heap top: %bpX\n", OLD_HTOP(p));
erts_print(to, to_arg, "Old heap end: %bpX\n", OLD_HEND(p));
-#endif
}
void
-info(int to, void *to_arg)
+info(fmtfn_t to, void *to_arg)
{
erts_memory(&to, to_arg, NULL, THE_NON_VALUE);
atom_info(to, to_arg);
@@ -384,13 +386,25 @@ info(int to, void *to_arg)
}
+static int code_size(struct erl_module_instance* modi)
+{
+ int size = modi->code_length;
+
+ if (modi->code_hdr) {
+ ErtsLiteralArea* lit = modi->code_hdr->literal_area;
+ if (lit)
+ size += (lit->end - lit->start) * sizeof(Eterm);
+ }
+ return size;
+}
+
void
-loaded(int to, void *to_arg)
+loaded(fmtfn_t to, void *to_arg)
{
int i;
int old = 0;
int cur = 0;
- BeamInstr* code;
+ BeamCodeHeader* code;
Module* modp;
ErtsCodeIndex code_ix;
@@ -401,13 +415,9 @@ loaded(int to, void *to_arg)
* Calculate and print totals.
*/
for (i = 0; i < module_code_size(code_ix); i++) {
- if ((modp = module_code(i, code_ix)) != NULL &&
- ((modp->curr.code_length != 0) ||
- (modp->old.code_length != 0))) {
- cur += modp->curr.code_length;
- if (modp->old.code_length != 0) {
- old += modp->old.code_length;
- }
+ if ((modp = module_code(i, code_ix)) != NULL) {
+ cur += code_size(&modp->curr);
+ old += code_size(&modp->old);
}
}
erts_print(to, to_arg, "Current code: %d\n", cur);
@@ -423,55 +433,49 @@ loaded(int to, void *to_arg)
/*
* Interactive dump; keep it brief.
*/
- if (modp != NULL &&
- ((modp->curr.code_length != 0) ||
- (modp->old.code_length != 0))) {
- erts_print(to, to_arg, "%T", make_atom(modp->module));
- cur += modp->curr.code_length;
- erts_print(to, to_arg, " %d", modp->curr.code_length );
- if (modp->old.code_length != 0) {
- erts_print(to, to_arg, " (%d old)",
- modp->old.code_length );
- old += modp->old.code_length;
- }
+ if (modp != NULL && ((modp->curr.code_length != 0) ||
+ (modp->old.code_length != 0))) {
+ erts_print(to, to_arg, "%T %d", make_atom(modp->module),
+ code_size(&modp->curr));
+ if (modp->old.code_length != 0)
+ erts_print(to, to_arg, " (%d old)", code_size(&modp->old));
erts_print(to, to_arg, "\n");
}
} else {
/*
* To crash dump; make it parseable.
*/
- if (modp != NULL &&
- ((modp->curr.code_length != 0) ||
- (modp->old.code_length != 0))) {
+ if (modp != NULL && ((modp->curr.code_length != 0) ||
+ (modp->old.code_length != 0))) {
erts_print(to, to_arg, "=mod:");
erts_print(to, to_arg, "%T", make_atom(modp->module));
erts_print(to, to_arg, "\n");
erts_print(to, to_arg, "Current size: %d\n",
- modp->curr.code_length);
- code = modp->curr.code;
- if (code != NULL && code[MI_ATTR_PTR]) {
+ code_size(&modp->curr));
+ code = modp->curr.code_hdr;
+ if (code != NULL && code->attr_ptr) {
erts_print(to, to_arg, "Current attributes: ");
- dump_attributes(to, to_arg, (byte *) code[MI_ATTR_PTR],
- code[MI_ATTR_SIZE]);
+ dump_attributes(to, to_arg, code->attr_ptr,
+ code->attr_size);
}
- if (code != NULL && code[MI_COMPILE_PTR]) {
+ if (code != NULL && code->compile_ptr) {
erts_print(to, to_arg, "Current compilation info: ");
- dump_attributes(to, to_arg, (byte *) code[MI_COMPILE_PTR],
- code[MI_COMPILE_SIZE]);
+ dump_attributes(to, to_arg, code->compile_ptr,
+ code->compile_size);
}
if (modp->old.code_length != 0) {
- erts_print(to, to_arg, "Old size: %d\n", modp->old.code_length);
- code = modp->old.code;
- if (code[MI_ATTR_PTR]) {
+ erts_print(to, to_arg, "Old size: %d\n", code_size(&modp->old));
+ code = modp->old.code_hdr;
+ if (code->attr_ptr) {
erts_print(to, to_arg, "Old attributes: ");
- dump_attributes(to, to_arg, (byte *) code[MI_ATTR_PTR],
- code[MI_ATTR_SIZE]);
+ dump_attributes(to, to_arg, code->attr_ptr,
+ code->attr_size);
}
- if (code[MI_COMPILE_PTR]) {
+ if (code->compile_ptr) {
erts_print(to, to_arg, "Old compilation info: ");
- dump_attributes(to, to_arg, (byte *) code[MI_COMPILE_PTR],
- code[MI_COMPILE_SIZE]);
+ dump_attributes(to, to_arg, code->compile_ptr,
+ code->compile_size);
}
}
}
@@ -482,7 +486,7 @@ loaded(int to, void *to_arg)
static void
-dump_attributes(int to, void *to_arg, byte* ptr, int size)
+dump_attributes(fmtfn_t to, void *to_arg, byte* ptr, int size)
{
while (size-- > 0) {
erts_print(to, to_arg, "%02X", *ptr++);
@@ -502,17 +506,19 @@ do_break(void)
halt immediately if break is called */
mode = erts_read_env("ERL_CONSOLE_MODE");
if (mode && strcmp(mode, "window") != 0)
- erl_exit(0, "");
+ erts_exit(0, "");
erts_free_read_env(mode);
#endif /* __WIN32__ */
+ ASSERT(erts_thr_progress_is_blocking());
+
erts_printf("\n"
"BREAK: (a)bort (c)ontinue (p)roc info (i)nfo (l)oaded\n"
" (v)ersion (k)ill (D)b-tables (d)istribution\n");
while (1) {
if ((i = sys_get_key(0)) <= 0)
- erl_exit(0, "");
+ erts_exit(0, "");
switch (i) {
case 'q':
case 'a':
@@ -522,9 +528,9 @@ do_break(void)
* The usual reason for a read error is Ctrl-C. Treat this as
* 'a' to avoid infinite loop.
*/
- erl_exit(0, "");
+ erts_exit(0, "");
case 'A': /* Halt generating crash dump */
- erl_exit(1, "Crash dump requested by user");
+ erts_exit(ERTS_ERROR_EXIT, "Crash dump requested by user");
case 'c':
return;
case 'p':
@@ -545,7 +551,9 @@ do_break(void)
erts_printf("Erlang (%s) emulator version "
ERLANG_VERSION "\n",
EMULATOR);
+#if ERTS_SAVED_COMPILE_TIME
erts_printf("Compiled on " ERLANG_COMPILE_DATE "\n");
+#endif
return;
case 'd':
distribution_info(ERTS_PRINT_STDOUT, NULL);
@@ -580,7 +588,7 @@ do_break(void)
#endif
#ifdef DEBUG
case 't':
- erts_p_slpq();
+ /* erts_p_slpq(); */
return;
case 'b':
bin_check();
@@ -650,7 +658,7 @@ bin_check(void)
erts_printf("%p orig_size: %bpd, norefs = %bpd\n",
bp->val,
bp->val->orig_size,
- erts_smp_atomic_read_nob(&bp->val->refc));
+ erts_refc_read(&bp->val->intern.refc, 1));
}
}
if (printed) {
@@ -664,13 +672,41 @@ bin_check(void)
#endif
+static Sint64 crash_dump_limit = ERTS_SINT64_MAX;
+static Sint64 crash_dump_written = 0;
+
+typedef struct LimitedWriterInfo_ {
+ fmtfn_t to;
+ void* to_arg;
+} LimitedWriterInfo;
+
+static int
+crash_dump_limited_writer(void* vfdp, char* buf, size_t len)
+{
+ const char stop_msg[] = "\n=abort:CRASH DUMP SIZE LIMIT REACHED\n";
+ LimitedWriterInfo* lwi = (LimitedWriterInfo *) vfdp;
+
+ crash_dump_written += len;
+ if (crash_dump_written <= crash_dump_limit) {
+ return lwi->to(lwi->to_arg, buf, len);
+ }
+
+ len -= (crash_dump_written - crash_dump_limit);
+ lwi->to(lwi->to_arg, buf, len);
+ lwi->to(lwi->to_arg, (char*)stop_msg, sizeof(stop_msg)-1);
+ if (lwi->to == &erts_write_fp) {
+ fclose((FILE *) lwi->to_arg);
+ }
+
+ /* We assume that crash dump was called from erts_exit_vv() */
+ erts_exit_epilogue();
+}
+
/* XXX THIS SHOULD BE IN SYSTEM !!!! */
void
erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
{
-#ifdef ERTS_SMP
ErtsThrPrgrData tpd_buf; /* in case we aren't a managed thread... */
-#endif
int fd;
size_t envsz;
time_t now;
@@ -680,28 +716,43 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
char* dumpname;
int secs;
int env_erl_crash_dump_seconds_set = 1;
+ int i;
+ fmtfn_t to = &erts_write_fd;
+ void* to_arg;
+ FILE* fp = 0;
+ LimitedWriterInfo lwi;
+ static char* write_buffer; /* 'static' to avoid a leak warning in valgrind */
if (ERTS_SOMEONE_IS_CRASH_DUMPING)
return;
-#ifdef ERTS_SMP
+ /* Order all managed threads to block, this has to be done
+ first to guarantee that this is the only thread to generate
+ crash dump. */
+ erts_thr_progress_fatal_error_block(&tpd_buf);
+
+#ifdef ERTS_SYS_SUSPEND_SIGNAL
/*
- * Wait for all managed threads to block. If all threads haven't blocked
- * after a minute, we go anyway and hope for the best...
- *
- * We do not release system again. We expect an exit() or abort() after
- * dump has been written.
+ * We suspend all scheduler threads so that we can dump some
+ * data about the currently running processes and scheduler data.
+ * We have to be very very careful when doing this as the schedulers
+ * could be anywhere.
*/
- erts_thr_progress_fatal_error_block(60000, &tpd_buf);
- /* Either worked or not... */
+ sys_init_suspend_handler();
+
+ for (i = 0; i < erts_no_schedulers; i++) {
+ erts_tid_t tid = ERTS_SCHEDULER_IX(i)->tid;
+ if (!erts_equal_tids(tid,erts_thr_self()))
+ sys_thr_suspend(tid);
+ }
- /* Allow us to pass certain places without locking... */
- erts_smp_atomic32_set_mb(&erts_writing_erl_crash_dump, 1);
- erts_smp_tsd_set(erts_is_crash_dumping_key, (void *) 1);
-#else
- erts_writing_erl_crash_dump = 1;
#endif
+ /* Allow us to pass certain places without locking... */
+ erts_atomic32_set_mb(&erts_writing_erl_crash_dump, 1);
+ erts_tsd_set(erts_is_crash_dumping_key, (void *) 1);
+
+
envsz = sizeof(env);
/* ERL_CRASH_DUMP_SECONDS not set
* if we have a heart port, break immediately
@@ -749,69 +800,162 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
return;
}
+ crash_dump_limit = ERTS_SINT64_MAX;
+ envsz = sizeof(env);
+ if (erts_sys_getenv__("ERL_CRASH_DUMP_BYTES", env, &envsz) == 0) {
+ Sint64 limit;
+ char* endptr;
+ errno = 0;
+ limit = ErtsStrToSint64(env, &endptr, 10);
+ if (errno == 0 && limit >= 0 && endptr != env && *endptr == 0) {
+ if (limit == 0)
+ return;
+ crash_dump_limit = limit;
+ to = &crash_dump_limited_writer;
+ }
+ }
+
if (erts_sys_getenv__("ERL_CRASH_DUMP",&dumpnamebuf[0],&dumpnamebufsize) != 0)
dumpname = "erl_crash.dump";
else
dumpname = &dumpnamebuf[0];
+
+ erts_fprintf(stderr,"\nCrash dump is being written to: %s...", dumpname);
fd = open(dumpname,O_WRONLY | O_CREAT | O_TRUNC,0640);
- if (fd < 0)
+ if (fd < 0)
return; /* Can't create the crash dump, skip it */
-
+
+ /*
+ * Wrap into a FILE* so that we can use buffered output. Set an
+ * explicit buffer to make sure the first write does not fail because
+ * of a failure to allocate a buffer.
+ */
+ write_buffer = (char *) erts_alloc_fnf(ERTS_ALC_T_TMP, WRITE_BUFFER_SIZE);
+ if (write_buffer && (fp = fdopen(fd, "w")) != NULL) {
+ setvbuf(fp, write_buffer, _IOFBF, WRITE_BUFFER_SIZE);
+ lwi.to = &erts_write_fp;
+ lwi.to_arg = (void*)fp;
+ } else {
+ lwi.to = &erts_write_fd;
+ lwi.to_arg = (void*)&fd;
+ }
+ if (to == &crash_dump_limited_writer) {
+ to_arg = (void *) &lwi;
+ } else {
+ to = lwi.to;
+ to_arg = lwi.to_arg;
+ }
+
time(&now);
- erts_fdprintf(fd, "=erl_crash_dump:0.3\n%s", ctime(&now));
+ erts_cbprintf(to, to_arg, "=erl_crash_dump:0.4\n%s", ctime(&now));
if (file != NULL)
- erts_fdprintf(fd, "The error occurred in file %s, line %d\n", file, line);
+ erts_cbprintf(to, to_arg, "The error occurred in file %s, line %d\n", file, line);
if (fmt != NULL && *fmt != '\0') {
- erts_fdprintf(fd, "Slogan: ");
- erts_vfdprintf(fd, fmt, args);
- }
- erts_fdprintf(fd, "System version: ");
- erts_print_system_version(fd, NULL, NULL);
- erts_fdprintf(fd, "%s\n", "Compiled: " ERLANG_COMPILE_DATE);
- erts_fdprintf(fd, "Taints: ");
- erts_print_nif_taints(fd, NULL);
- erts_fdprintf(fd, "Atoms: %d\n", atom_table_size());
- info(fd, NULL); /* General system info */
+ erts_cbprintf(to, to_arg, "Slogan: ");
+ erts_vcbprintf(to, to_arg, fmt, args);
+ }
+ erts_cbprintf(to, to_arg, "System version: ");
+ erts_print_system_version(to, to_arg, NULL);
+#if ERTS_SAVED_COMPILE_TIME
+ erts_cbprintf(to, to_arg, "%s\n", "Compiled: " ERLANG_COMPILE_DATE);
+#endif
+
+ erts_cbprintf(to, to_arg, "Taints: ");
+ erts_print_nif_taints(to, to_arg);
+ erts_cbprintf(to, to_arg, "Atoms: %d\n", atom_table_size());
+
+ /* We want to note which thread it was that called erts_exit */
+ if (erts_get_scheduler_data()) {
+ erts_cbprintf(to, to_arg, "Calling Thread: scheduler:%d\n",
+ erts_get_scheduler_data()->no);
+ } else {
+ if (!erts_thr_getname(erts_thr_self(), dumpnamebuf, MAXPATHLEN))
+ erts_cbprintf(to, to_arg, "Calling Thread: %s\n", dumpnamebuf);
+ else
+ erts_cbprintf(to, to_arg, "Calling Thread: %p\n", erts_thr_self());
+ }
+
+#if defined(ERTS_HAVE_TRY_CATCH)
+
+ /*
+ * erts_print_scheduler_info is not guaranteed to be safe to call
+ * here for all schedulers as we may have suspended a scheduler
+ * in the middle of updating the STACK_TOP and STACK_START
+ * variables and thus when scanning the stack we could get
+ * segmentation faults. We protect against this very unlikely
+ * scenario by using the ERTS_SYS_TRY_CATCH.
+ */
+ for (i = 0; i < erts_no_schedulers; i++) {
+ ERTS_SYS_TRY_CATCH(
+ erts_print_scheduler_info(to, to_arg, ERTS_SCHEDULER_IX(i)),
+ erts_cbprintf(to, to_arg, "** crashed **\n"));
+ }
+#endif
+
+
+#ifdef ERTS_SYS_SUSPEND_SIGNAL
+
+ /* We resume all schedulers so that we are in a known safe state
+ when we write the rest of the crash dump */
+ for (i = 0; i < erts_no_schedulers; i++) {
+ erts_tid_t tid = ERTS_SCHEDULER_IX(i)->tid;
+ if (!erts_equal_tids(tid,erts_thr_self()))
+ sys_thr_resume(tid);
+ }
+#endif
+
+ /*
+ * Wait for all managed threads to block. If all threads haven't blocked
+ * after a minute, we go anyway and hope for the best...
+ *
+ * We do not release system again. We expect an exit() or abort() after
+ * dump has been written.
+ */
+ erts_thr_progress_fatal_error_wait(60000);
+ /* Either worked or not... */
+
+#ifndef ERTS_HAVE_TRY_CATCH
+ /* This is safe to call here, as all schedulers are blocked */
+ for (i = 0; i < erts_no_schedulers; i++) {
+ erts_print_scheduler_info(to, to_arg, ERTS_SCHEDULER_IX(i));
+ }
+#endif
+
+ info(to, to_arg); /* General system info */
if (erts_ptab_initialized(&erts_proc))
- process_info(fd, NULL); /* Info about each process and port */
- db_info(fd, NULL, 0);
- erts_print_bif_timer_info(fd, NULL);
- distribution_info(fd, NULL);
- erts_fdprintf(fd, "=loaded_modules\n");
- loaded(fd, NULL);
- erts_dump_fun_entries(fd, NULL);
- erts_deep_process_dump(fd, NULL);
- erts_fdprintf(fd, "=atoms\n");
- dump_atoms(fd, NULL);
+ process_info(to, to_arg); /* Info about each process and port */
+ db_info(to, to_arg, 0);
+ erts_print_bif_timer_info(to, to_arg);
+ distribution_info(to, to_arg);
+ erts_cbprintf(to, to_arg, "=loaded_modules\n");
+ loaded(to, to_arg);
+ erts_dump_fun_entries(to, to_arg);
+ erts_deep_process_dump(to, to_arg);
+ erts_cbprintf(to, to_arg, "=atoms\n");
+ dump_atoms(to, to_arg);
/* Keep the instrumentation data at the end of the dump */
if (erts_instr_memory_map || erts_instr_stat) {
- erts_fdprintf(fd, "=instr_data\n");
+ erts_cbprintf(to, to_arg, "=instr_data\n");
if (erts_instr_stat) {
- erts_fdprintf(fd, "=memory_status\n");
- erts_instr_dump_stat_to_fd(fd, 0);
+ erts_cbprintf(to, to_arg, "=memory_status\n");
+ erts_instr_dump_stat_to(to, to_arg, 0);
}
if (erts_instr_memory_map) {
- erts_fdprintf(fd, "=memory_map\n");
- erts_instr_dump_memory_map_to_fd(fd);
+ erts_cbprintf(to, to_arg, "=memory_map\n");
+ erts_instr_dump_memory_map_to(to, to_arg);
}
}
- erts_fdprintf(fd, "=end\n");
+ erts_cbprintf(to, to_arg, "=end\n");
+ if (fp) {
+ fclose(fp);
+ }
close(fd);
- erts_fprintf(stderr,"\nCrash dump was written to: %s\n", dumpname);
+ erts_fprintf(stderr,"done\n");
}
-void
-erl_crash_dump(char* file, int line, char* fmt, ...)
-{
- va_list args;
-
- va_start(args, fmt);
- erl_crash_dump_v(file, line, fmt, args);
- va_end(args);
-}
diff --git a/erts/emulator/beam/bs_instrs.tab b/erts/emulator/beam/bs_instrs.tab
new file mode 100644
index 0000000000..9f03b19731
--- /dev/null
+++ b/erts/emulator/beam/bs_instrs.tab
@@ -0,0 +1,1021 @@
+// -*- c -*-
+//
+// %CopyrightBegin%
+//
+// Copyright Ericsson AB 2017. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// %CopyrightEnd%
+//
+
+%if ARCH_64
+BS_SAFE_MUL(A, B, Fail, Dst) {
+ Uint64 res = ($A) * ($B);
+ if (res / $B != $A) {
+ $Fail;
+ }
+ $Dst = res;
+}
+%else
+BS_SAFE_MUL(A, B, Fail, Dst) {
+ Uint64 res = (Uint64)($A) * (Uint64)($B);
+ if ((res >> (8*sizeof(Uint))) != 0) {
+ $Fail;
+ }
+ $Dst = res;
+}
+%endif
+
+BS_GET_FIELD_SIZE(Bits, Unit, Fail, Dst) {
+ Sint signed_size;
+ Uint uint_size;
+ Uint temp_bits;
+
+ if (is_small($Bits)) {
+ signed_size = signed_val($Bits);
+ if (signed_size < 0) {
+ $Fail;
+ }
+ uint_size = (Uint) signed_size;
+ } else {
+ if (!term_to_Uint($Bits, &temp_bits)) {
+ $Fail;
+ }
+ uint_size = temp_bits;
+ }
+ $BS_SAFE_MUL(uint_size, $Unit, $Fail, $Dst);
+}
+
+BS_GET_UNCHECKED_FIELD_SIZE(Bits, Unit, Fail, Dst) {
+ Sint signed_size;
+ Uint uint_size;
+ Uint temp_bits;
+
+ if (is_small($Bits)) {
+ signed_size = signed_val($Bits);
+ if (signed_size < 0) {
+ $Fail;
+ }
+ uint_size = (Uint) signed_size;
+ } else {
+ if (!term_to_Uint($Bits, &temp_bits)) {
+ $Fail;
+ }
+ uint_size = temp_bits;
+ }
+ $Dst = uint_size * $Unit;
+}
+
+TEST_BIN_VHEAP(VNh, Nh, Live) {
+ Uint need = $Nh;
+ if (E - HTOP < need || MSO(c_p).overhead + $VNh >= BIN_VHEAP_SZ(c_p)) {
+ SWAPOUT;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, $Live, FCALLS);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ SWAPIN;
+ }
+ HEAP_SPACE_VERIFIED(need);
+}
+
+i_bs_get_binary_all2(Fail, Ms, Live, Unit, Dst) {
+ ErlBinMatchBuffer *_mb;
+ Eterm _result;
+
+ $GC_TEST(0, ERL_SUB_BIN_SIZE, $Live);
+ _mb = ms_matchbuffer($Ms);
+ if (((_mb->size - _mb->offset) % $Unit) == 0) {
+ LIGHT_SWAPOUT;
+ _result = erts_bs_get_binary_all_2(c_p, _mb);
+ LIGHT_SWAPIN;
+ HEAP_SPACE_VERIFIED(0);
+ ASSERT(is_value(_result));
+ $Dst = _result;
+ } else {
+ HEAP_SPACE_VERIFIED(0);
+ $FAIL($Fail);
+ }
+}
+
+i_bs_get_binary2(Fail, Ms, Live, Sz, Flags, Dst) {
+ ErlBinMatchBuffer *_mb;
+ Eterm _result;
+ Uint _size;
+ $BS_GET_FIELD_SIZE($Sz, (($Flags) >> 3), $FAIL($Fail), _size);
+ $GC_TEST(0, ERL_SUB_BIN_SIZE, $Live);
+ _mb = ms_matchbuffer($Ms);
+ LIGHT_SWAPOUT;
+ _result = erts_bs_get_binary_2(c_p, _size, $Flags, _mb);
+ LIGHT_SWAPIN;
+ HEAP_SPACE_VERIFIED(0);
+ if (is_non_value(_result)) {
+ $FAIL($Fail);
+ } else {
+ $Dst = _result;
+ }
+}
+
+i_bs_get_binary_imm2(Fail, Ms, Live, Sz, Flags, Dst) {
+ ErlBinMatchBuffer *_mb;
+ Eterm _result;
+ $GC_TEST(0, heap_bin_size(ERL_ONHEAP_BIN_LIMIT), $Live);
+ _mb = ms_matchbuffer($Ms);
+ LIGHT_SWAPOUT;
+ _result = erts_bs_get_binary_2(c_p, $Sz, $Flags, _mb);
+ LIGHT_SWAPIN;
+ HEAP_SPACE_VERIFIED(0);
+ if (is_non_value(_result)) {
+ $FAIL($Fail);
+ } else {
+ $Dst = _result;
+ }
+}
+
+i_bs_get_float2(Fail, Ms, Live, Sz, Flags, Dst) {
+ ErlBinMatchBuffer *_mb;
+ Eterm _result;
+ Sint _size;
+
+ if (!is_small($Sz) || (_size = unsigned_val($Sz)) > 64) {
+ $FAIL($Fail);
+ }
+ _size *= (($Flags) >> 3);
+ $GC_TEST(0, FLOAT_SIZE_OBJECT, $Live);
+ _mb = ms_matchbuffer($Ms);
+ LIGHT_SWAPOUT;
+ _result = erts_bs_get_float_2(c_p, _size, ($Flags), _mb);
+ LIGHT_SWAPIN;
+ HEAP_SPACE_VERIFIED(0);
+ if (is_non_value(_result)) {
+ $FAIL($Fail);
+ } else {
+ $Dst = _result;
+ }
+}
+
+i_bs_skip_bits2(Fail, Ms, Bits, Unit) {
+ ErlBinMatchBuffer *_mb;
+ size_t new_offset;
+ Uint _size;
+
+ _mb = ms_matchbuffer($Ms);
+ $BS_GET_FIELD_SIZE($Bits, $Unit, $FAIL($Fail), _size);
+ new_offset = _mb->offset + _size;
+ if (new_offset <= _mb->size) {
+ _mb->offset = new_offset;
+ } else {
+ $FAIL($Fail);
+ }
+}
+
+i_bs_skip_bits_all2(Fail, Ms, Unit) {
+ ErlBinMatchBuffer *_mb;
+ _mb = ms_matchbuffer($Ms);
+ if (((_mb->size - _mb->offset) % $Unit) == 0) {
+ _mb->offset = _mb->size;
+ } else {
+ $FAIL($Fail);
+ }
+}
+
+i_bs_skip_bits_imm2(Fail, Ms, Bits) {
+ ErlBinMatchBuffer *_mb;
+ size_t new_offset;
+ _mb = ms_matchbuffer($Ms);
+ new_offset = _mb->offset + ($Bits);
+ if (new_offset <= _mb->size) {
+ _mb->offset = new_offset;
+ } else {
+ $FAIL($Fail);
+ }
+}
+
+i_new_bs_put_binary(Fail, Sz, Flags, Src) {
+ Sint _size;
+ $BS_GET_UNCHECKED_FIELD_SIZE($Sz, (($Flags) >> 3), $BADARG($Fail), _size);
+ if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2(($Src), _size))) {
+ $BADARG($Fail);
+ }
+}
+
+i_new_bs_put_binary_all(Fail, Src, Unit) {
+ if (!erts_new_bs_put_binary_all(ERL_BITS_ARGS_2(($Src), ($Unit)))) {
+ $BADARG($Fail);
+ }
+}
+
+i_new_bs_put_binary_imm(Fail, Sz, Src) {
+ if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2(($Src), ($Sz)))) {
+ $BADARG($Fail);
+ }
+}
+
+i_new_bs_put_float(Fail, Sz, Flags, Src) {
+ Sint _size;
+ $BS_GET_UNCHECKED_FIELD_SIZE($Sz, (($Flags) >> 3), $BADARG($Fail), _size);
+ if (!erts_new_bs_put_float(c_p, ($Src), _size, ($Flags))) {
+ $BADARG($Fail);
+ }
+}
+
+i_new_bs_put_float_imm(Fail, Sz, Flags, Src) {
+ if (!erts_new_bs_put_float(c_p, ($Src), ($Sz), ($Flags))) {
+ $BADARG($Fail);
+ }
+}
+
+i_new_bs_put_integer(Fail, Sz, Flags, Src) {
+ Sint _size;
+ $BS_GET_UNCHECKED_FIELD_SIZE($Sz, (($Flags) >> 3), $BADARG($Fail), _size);
+ if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3(($Src), _size, ($Flags)))) {
+ $BADARG($Fail);
+ }
+}
+
+i_new_bs_put_integer_imm(Fail, Sz, Flags, Src) {
+ if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3(($Src), ($Sz), ($Flags)))) {
+ $BADARG($Fail);
+ }
+}
+
+#
+# i_bs_init*
+#
+
+i_bs_init_fail_heap := bs_init.fail_heap.verify.execute;
+i_bs_init_fail := bs_init.fail.verify.execute;
+i_bs_init := bs_init.plain.execute;
+i_bs_init_heap := bs_init.heap.execute;
+
+bs_init.head() {
+ Eterm BsOp1;
+ Eterm BsOp2;
+}
+
+bs_init.fail_heap(Size, HeapAlloc) {
+ BsOp1 = $Size;
+ BsOp2 = $HeapAlloc;
+}
+
+bs_init.fail(Size) {
+ BsOp1 = $Size;
+ BsOp2 = 0;
+}
+
+bs_init.plain(Size) {
+ BsOp1 = $Size;
+ BsOp2 = 0;
+}
+
+bs_init.heap(Size, HeapAlloc) {
+ BsOp1 = $Size;
+ BsOp2 = $HeapAlloc;
+}
+
+bs_init.verify(Fail) {
+ if (is_small(BsOp1)) {
+ Sint size = signed_val(BsOp1);
+ if (size < 0) {
+ $BADARG($Fail);
+ }
+ BsOp1 = (Eterm) size;
+ } else {
+ Uint bytes;
+
+ if (!term_to_Uint(BsOp1, &bytes)) {
+ c_p->freason = bytes;
+ $FAIL_HEAD_OR_BODY($Fail);
+ }
+ if ((bytes >> (8*sizeof(Uint)-3)) != 0) {
+ $SYSTEM_LIMIT($Fail);
+ }
+ BsOp1 = (Eterm) bytes;
+ }
+}
+
+bs_init.execute(Live, Dst) {
+ if (BsOp1 <= ERL_ONHEAP_BIN_LIMIT) {
+ ErlHeapBin* hb;
+ Uint bin_need;
+
+ bin_need = heap_bin_size(BsOp1);
+ erts_bin_offset = 0;
+ erts_writable_bin = 0;
+ $GC_TEST(0, bin_need+BsOp2+ERL_SUB_BIN_SIZE, $Live);
+ hb = (ErlHeapBin *) HTOP;
+ HTOP += bin_need;
+ hb->thing_word = header_heap_bin(BsOp1);
+ hb->size = BsOp1;
+ erts_current_bin = (byte *) hb->data;
+ $Dst = make_binary(hb);
+ } else {
+ Binary* bptr;
+ ProcBin* pb;
+
+ erts_bin_offset = 0;
+ erts_writable_bin = 0;
+ $TEST_BIN_VHEAP(BsOp1 / sizeof(Eterm),
+ BsOp2 + PROC_BIN_SIZE + ERL_SUB_BIN_SIZE, $Live);
+
+ /*
+ * Allocate the binary struct itself.
+ */
+ bptr = erts_bin_nrml_alloc(BsOp1);
+ erts_current_bin = (byte *) bptr->orig_bytes;
+
+ /*
+ * Now allocate the ProcBin on the heap.
+ */
+ pb = (ProcBin *) HTOP;
+ HTOP += PROC_BIN_SIZE;
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = BsOp1;
+ pb->next = MSO(c_p).first;
+ MSO(c_p).first = (struct erl_off_heap_header*) pb;
+ pb->val = bptr;
+ pb->bytes = (byte*) bptr->orig_bytes;
+ pb->flags = 0;
+
+ OH_OVERHEAD(&(MSO(c_p)), BsOp1 / sizeof(Eterm));
+
+ $Dst = make_binary(pb);
+ }
+}
+
+#
+# i_bs_init_bits*
+#
+
+i_bs_init_bits := bs_init_bits.plain.execute;
+i_bs_init_bits_heap := bs_init_bits.heap.execute;
+i_bs_init_bits_fail := bs_init_bits.fail.verify.execute;
+i_bs_init_bits_fail_heap := bs_init_bits.fail_heap.verify.execute;
+
+bs_init_bits.head() {
+ Eterm num_bits_term;
+ Uint num_bits;
+ Uint alloc;
+}
+
+bs_init_bits.plain(NumBits) {
+ num_bits = $NumBits;
+ alloc = 0;
+}
+
+bs_init_bits.heap(NumBits, Alloc) {
+ num_bits = $NumBits;
+ alloc = $Alloc;
+}
+
+bs_init_bits.fail(NumBitsTerm) {
+ num_bits_term = $NumBitsTerm;
+ alloc = 0;
+}
+
+bs_init_bits.fail_heap(NumBitsTerm, Alloc) {
+ num_bits_term = $NumBitsTerm;
+ alloc = $Alloc;
+}
+
+bs_init_bits.verify(Fail) {
+ if (is_small(num_bits_term)) {
+ Sint size = signed_val(num_bits_term);
+ if (size < 0) {
+ $BADARG($Fail);
+ }
+ num_bits = (Uint) size;
+ } else {
+ Uint bits;
+
+ if (!term_to_Uint(num_bits_term, &bits)) {
+ c_p->freason = bits;
+ $FAIL_HEAD_OR_BODY($Fail);
+ }
+ num_bits = (Uint) bits;
+ }
+}
+
+bs_init_bits.execute(Live, Dst) {
+ Eterm new_binary;
+ Uint num_bytes = ((Uint64)num_bits+(Uint64)7) >> 3;
+
+ if (num_bits & 7) {
+ alloc += ERL_SUB_BIN_SIZE;
+ }
+ if (num_bytes <= ERL_ONHEAP_BIN_LIMIT) {
+ alloc += heap_bin_size(num_bytes);
+ } else {
+ alloc += PROC_BIN_SIZE;
+ }
+ $test_heap(alloc, $Live);
+
+ /* num_bits = Number of bits to build
+ * num_bytes = Number of bytes to allocate in the binary
+ * alloc = Total number of words to allocate on heap
+ * Operands: NotUsed NotUsed Dst
+ */
+ if (num_bytes <= ERL_ONHEAP_BIN_LIMIT) {
+ ErlHeapBin* hb;
+
+ erts_bin_offset = 0;
+ erts_writable_bin = 0;
+ hb = (ErlHeapBin *) HTOP;
+ HTOP += heap_bin_size(num_bytes);
+ hb->thing_word = header_heap_bin(num_bytes);
+ hb->size = num_bytes;
+ erts_current_bin = (byte *) hb->data;
+ new_binary = make_binary(hb);
+
+ do_bits_sub_bin:
+ if (num_bits & 7) {
+ ErlSubBin* sb;
+
+ sb = (ErlSubBin *) HTOP;
+ HTOP += ERL_SUB_BIN_SIZE;
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->size = num_bytes - 1;
+ sb->bitsize = num_bits & 7;
+ sb->offs = 0;
+ sb->bitoffs = 0;
+ sb->is_writable = 0;
+ sb->orig = new_binary;
+ new_binary = make_binary(sb);
+ }
+ HEAP_SPACE_VERIFIED(0);
+ $Dst = new_binary;
+ } else {
+ Binary* bptr;
+ ProcBin* pb;
+
+ erts_bin_offset = 0;
+ erts_writable_bin = 0;
+
+ /*
+ * Allocate the binary struct itself.
+ */
+ bptr = erts_bin_nrml_alloc(num_bytes);
+ erts_current_bin = (byte *) bptr->orig_bytes;
+
+ /*
+ * Now allocate the ProcBin on the heap.
+ */
+ pb = (ProcBin *) HTOP;
+ HTOP += PROC_BIN_SIZE;
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = num_bytes;
+ pb->next = MSO(c_p).first;
+ MSO(c_p).first = (struct erl_off_heap_header*) pb;
+ pb->val = bptr;
+ pb->bytes = (byte*) bptr->orig_bytes;
+ pb->flags = 0;
+ OH_OVERHEAD(&(MSO(c_p)), pb->size / sizeof(Eterm));
+ new_binary = make_binary(pb);
+ goto do_bits_sub_bin;
+ }
+}
+
+bs_add(Fail, Src1, Src2, Unit, Dst) {
+ Eterm Op1 = $Src1;
+ Eterm Op2 = $Src2;
+ Uint unit = $Unit;
+
+ if (is_both_small(Op1, Op2)) {
+ Sint Arg1 = signed_val(Op1);
+ Sint Arg2 = signed_val(Op2);
+
+ if (Arg1 >= 0 && Arg2 >= 0) {
+ $BS_SAFE_MUL(Arg2, unit, $SYSTEM_LIMIT($Fail), Op1);
+ Op1 += Arg1;
+
+ store_bs_add_result:
+ if (Op1 <= MAX_SMALL) {
+ Op1 = make_small(Op1);
+ } else {
+ /*
+ * May generate a heap fragment, but in this
+ * particular case it is OK, since the value will be
+ * stored into an x register (the GC will scan x
+ * registers for references to heap fragments) and
+ * there is no risk that value can be stored into a
+ * location that is not scanned for heap-fragment
+ * references (such as the heap).
+ */
+ SWAPOUT;
+ Op1 = erts_make_integer(Op1, c_p);
+ HTOP = HEAP_TOP(c_p);
+ }
+ $Dst = Op1;
+ $NEXT0();
+ }
+ $BADARG($Fail);
+ } else {
+ Uint a;
+ Uint b;
+ Uint c;
+
+ /*
+ * Now we know that one of the arguments is
+ * not a small. We must convert both arguments
+ * to Uints and check for errors at the same time.
+ *
+ * Error checking is tricky.
+ *
+ * If one of the arguments is not numeric or
+ * not positive, the error reason is BADARG.
+ *
+ * Otherwise if both arguments are numeric,
+ * but at least one argument does not fit in
+ * an Uint, the reason is SYSTEM_LIMIT.
+ */
+
+ if (!term_to_Uint(Op1, &a)) {
+ if (a == BADARG) {
+ $BADARG($Fail);
+ }
+ if (!term_to_Uint(Op2, &b)) {
+ c_p->freason = b;
+ $FAIL_HEAD_OR_BODY($Fail);
+ }
+ $SYSTEM_LIMIT($Fail);
+ } else if (!term_to_Uint(Op2, &b)) {
+ c_p->freason = b;
+ $FAIL_HEAD_OR_BODY($Fail);
+ }
+
+ /*
+ * The arguments are now correct and stored in a and b.
+ */
+
+ $BS_SAFE_MUL(b, unit, $SYSTEM_LIMIT($Fail), c);
+ Op1 = a + c;
+ if (Op1 < a) {
+ /*
+ * If the result is less than one of the
+ * arguments, there must have been an overflow.
+ */
+ $SYSTEM_LIMIT($Fail);
+ }
+ goto store_bs_add_result;
+ }
+ /* No fallthrough */
+ ASSERT(0);
+}
+
+bs_put_string(Len, Ptr) {
+ erts_new_bs_put_string(ERL_BITS_ARGS_2((byte *) $Ptr, $Len));
+}
+
+i_bs_append(Fail, ExtraHeap, Live, Unit, Size, Dst) {
+ Uint live = $Live;
+ Uint res;
+
+ HEAVY_SWAPOUT;
+ reg[live] = x(SCRATCH_X_REG);
+ res = erts_bs_append(c_p, reg, live, $Size, $ExtraHeap, $Unit);
+ HEAVY_SWAPIN;
+ if (is_non_value(res)) {
+ /* c_p->freason is already set (to BADARG or SYSTEM_LIMIT). */
+ $FAIL_HEAD_OR_BODY($Fail);
+ }
+ $Dst = res;
+}
+
+i_bs_private_append(Fail, Unit, Size, Src, Dst) {
+ Eterm res;
+
+ res = erts_bs_private_append(c_p, $Src, $Size, $Unit);
+ if (is_non_value(res)) {
+ /* c_p->freason is already set (to BADARG or SYSTEM_LIMIT). */
+ $FAIL_HEAD_OR_BODY($Fail);
+ }
+ $Dst = res;
+}
+
+bs_init_writable() {
+ HEAVY_SWAPOUT;
+ r(0) = erts_bs_init_writable(c_p, r(0));
+ HEAVY_SWAPIN;
+}
+
+i_bs_utf8_size(Src, Dst) {
+ Eterm arg = $Src;
+ Eterm result;
+
+ /*
+ * Calculate the number of bytes needed to encode the source
+ * operand to UTF-8. If the source operand is invalid (e.g. wrong
+ * type or range) we return a nonsense integer result (0 or 4). We
+ * can get away with that because we KNOW that bs_put_utf8 will do
+ * full error checking.
+ */
+
+ if (arg < make_small(0x80UL)) {
+ result = make_small(1);
+ } else if (arg < make_small(0x800UL)) {
+ result = make_small(2);
+ } else if (arg < make_small(0x10000UL)) {
+ result = make_small(3);
+ } else {
+ result = make_small(4);
+ }
+ $Dst = result;
+}
+
+i_bs_put_utf8(Fail, Src) {
+ if (!erts_bs_put_utf8(ERL_BITS_ARGS_1($Src))) {
+ $BADARG($Fail);
+ }
+}
+
+i_bs_utf16_size(Src, Dst) {
+ Eterm arg = $Src;
+ Eterm result = make_small(2);
+
+ /*
+ * Calculate the number of bytes needed to encode the source
+ * operarand to UTF-16. If the source operand is invalid (e.g. wrong
+ * type or range) we return a nonsense integer result (2 or 4). We
+ * can get away with that because we KNOW that bs_put_utf16 will do
+ * full error checking.
+ */
+
+ if (arg >= make_small(0x10000UL)) {
+ result = make_small(4);
+ }
+ $Dst = result;
+}
+
+bs_put_utf16(Fail, Flags, Src) {
+ if (!erts_bs_put_utf16(ERL_BITS_ARGS_2($Src, $Flags))) {
+ $BADARG($Fail);
+ }
+}
+
+// Validate a value about to be stored in a binary.
+i_bs_validate_unicode(Fail, Src) {
+ Eterm val = $Src;
+
+ /*
+ * There is no need to untag the integer, but it IS necessary
+ * to make sure it is small (if the term is a bignum, it could
+ * slip through the test, and there is no further test that
+ * would catch it, since bit syntax construction silently masks
+ * too big numbers).
+ */
+ if (is_not_small(val) || val > make_small(0x10FFFFUL) ||
+ (make_small(0xD800UL) <= val && val <= make_small(0xDFFFUL))) {
+ $BADARG($Fail);
+ }
+}
+
+// Validate a value that has been matched out.
+i_bs_validate_unicode_retract(Fail, Src, Ms) {
+ /*
+ * There is no need to untag the integer, but it IS necessary
+ * to make sure it is small (a bignum pointer could fall in
+ * the valid range).
+ */
+
+ Eterm i = $Src;
+ if (is_not_small(i) || i > make_small(0x10FFFFUL) ||
+ (make_small(0xD800UL) <= i && i <= make_small(0xDFFFUL))) {
+ Eterm ms = $Ms; /* Match context */
+ ErlBinMatchBuffer* mb;
+
+ /* Invalid value. Retract the position in the binary. */
+ mb = ms_matchbuffer(ms);
+ mb->offset -= 32;
+ $BADARG($Fail);
+ }
+}
+
+
+//
+// Matching of binaries.
+//
+
+i_bs_start_match2 := bs_start_match.fetch.execute;
+
+bs_start_match.head() {
+ Eterm context;
+}
+
+bs_start_match.fetch(Src) {
+ context = $Src;
+}
+
+bs_start_match.execute(Fail, Live, Slots, Dst) {
+ Uint slots;
+ Uint live;
+ Eterm header;
+ if (!is_boxed(context)) {
+ $FAIL($Fail);
+ }
+ header = *boxed_val(context);
+ slots = $Slots;
+ live = $Live;
+ if (header_is_bin_matchstate(header)) {
+ ErlBinMatchState* ms = (ErlBinMatchState *) boxed_val(context);
+ Uint actual_slots = HEADER_NUM_SLOTS(header);
+ ms->save_offset[0] = ms->mb.offset;
+ if (actual_slots < slots) {
+ ErlBinMatchState* dst;
+ Uint live = $Live;
+ Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots);
+
+ $GC_TEST_PRESERVE(wordsneeded, live, context);
+ ms = (ErlBinMatchState *) boxed_val(context);
+ dst = (ErlBinMatchState *) HTOP;
+ *dst = *ms;
+ *HTOP = HEADER_BIN_MATCHSTATE(slots);
+ HTOP += wordsneeded;
+ HEAP_SPACE_VERIFIED(0);
+ $Dst = make_matchstate(dst);
+ }
+ } else if (is_binary_header(header)) {
+ Eterm result;
+ Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots);
+ $GC_TEST_PRESERVE(wordsneeded, live, context);
+ HEAP_TOP(c_p) = HTOP;
+#ifdef DEBUG
+ c_p->stop = E; /* Needed for checking in HeapOnlyAlloc(). */
+#endif
+ result = erts_bs_start_match_2(c_p, context, slots);
+ HTOP = HEAP_TOP(c_p);
+ HEAP_SPACE_VERIFIED(0);
+ if (is_non_value(result)) {
+ $FAIL($Fail);
+ }
+ $Dst = result;
+ } else {
+ $FAIL($Fail);
+ }
+}
+
+bs_test_zero_tail2(Fail, Ctx) {
+ ErlBinMatchBuffer *_mb;
+ _mb = (ErlBinMatchBuffer*) ms_matchbuffer($Ctx);
+ if (_mb->size != _mb->offset) {
+ $FAIL($Fail);
+ }
+}
+
+bs_test_tail_imm2(Fail, Ctx, Offset) {
+ ErlBinMatchBuffer *_mb;
+ _mb = ms_matchbuffer($Ctx);
+ if (_mb->size - _mb->offset != $Offset) {
+ $FAIL($Fail);
+ }
+}
+
+bs_test_unit(Fail, Ctx, Unit) {
+ ErlBinMatchBuffer *_mb;
+ _mb = ms_matchbuffer($Ctx);
+ if ((_mb->size - _mb->offset) % $Unit) {
+ $FAIL($Fail);
+ }
+}
+
+bs_test_unit8(Fail, Ctx) {
+ ErlBinMatchBuffer *_mb;
+ _mb = ms_matchbuffer($Ctx);
+ if ((_mb->size - _mb->offset) & 7) {
+ $FAIL($Fail);
+ }
+}
+
+i_bs_get_integer_8(Ctx, Fail, Dst) {
+ Eterm _result;
+ ErlBinMatchBuffer* _mb = ms_matchbuffer($Ctx);
+
+ if (_mb->size - _mb->offset < 8) {
+ $FAIL($Fail);
+ }
+ if (BIT_OFFSET(_mb->offset) != 0) {
+ _result = erts_bs_get_integer_2(c_p, 8, 0, _mb);
+ } else {
+ _result = make_small(_mb->base[BYTE_OFFSET(_mb->offset)]);
+ _mb->offset += 8;
+ }
+ $Dst = _result;
+}
+
+i_bs_get_integer_16(Ctx, Fail, Dst) {
+ Eterm _result;
+ ErlBinMatchBuffer* _mb = ms_matchbuffer($Ctx);
+
+ if (_mb->size - _mb->offset < 16) {
+ $FAIL($Fail);
+ }
+ if (BIT_OFFSET(_mb->offset) != 0) {
+ _result = erts_bs_get_integer_2(c_p, 16, 0, _mb);
+ } else {
+ _result = make_small(get_int16(_mb->base+BYTE_OFFSET(_mb->offset)));
+ _mb->offset += 16;
+ }
+ $Dst = _result;
+}
+
+%if ARCH_64
+i_bs_get_integer_32(Ctx, Fail, Dst) {
+ Uint32 _integer;
+ ErlBinMatchBuffer* _mb = ms_matchbuffer($Ctx);
+
+ if (_mb->size - _mb->offset < 32) {
+ $FAIL($Fail);
+ }
+ if (BIT_OFFSET(_mb->offset) != 0) {
+ _integer = erts_bs_get_unaligned_uint32(_mb);
+ } else {
+ _integer = get_int32(_mb->base + _mb->offset/8);
+ }
+ _mb->offset += 32;
+ $Dst = make_small(_integer);
+}
+%endif
+
+i_bs_get_integer_imm := bs_get_integer.fetch.execute;
+i_bs_get_integer_small_imm := bs_get_integer.fetch_small.execute;
+
+bs_get_integer.head() {
+ Eterm Ms, Sz;
+}
+
+bs_get_integer.fetch(Ctx, Size, Live) {
+ Uint wordsneeded;
+ Ms = $Ctx;
+ Sz = $Size;
+ wordsneeded = 1+WSIZE(NBYTES(Sz));
+ $GC_TEST_PRESERVE(wordsneeded, $Live, Ms);
+}
+
+bs_get_integer.fetch_small(Ctx, Size) {
+ Ms = $Ctx;
+ Sz = $Size;
+}
+
+bs_get_integer.execute(Fail, Flags, Dst) {
+ ErlBinMatchBuffer* mb;
+ Eterm result;
+
+ mb = ms_matchbuffer(Ms);
+ LIGHT_SWAPOUT;
+ result = erts_bs_get_integer_2(c_p, Sz, $Flags, mb);
+ LIGHT_SWAPIN;
+ HEAP_SPACE_VERIFIED(0);
+ if (is_non_value(result)) {
+ $FAIL($Fail);
+ }
+ $Dst = result;
+}
+
+i_bs_get_integer(Fail, Live, FlagsAndUnit, Ms, Sz, Dst) {
+ Uint flags;
+ Uint size;
+ Eterm ms;
+ ErlBinMatchBuffer* mb;
+ Eterm result;
+
+ flags = $FlagsAndUnit;
+ ms = $Ms;
+ $BS_GET_FIELD_SIZE($Sz, (flags >> 3), $FAIL($Fail), size);
+ if (size >= SMALL_BITS) {
+ Uint wordsneeded;
+ /* Check bits size before potential gc.
+ * We do not want a gc and then realize we don't need
+ * the allocated space (i.e. if the op fails).
+ *
+ * Remember to re-acquire the matchbuffer after gc.
+ */
+
+ mb = ms_matchbuffer(ms);
+ if (mb->size - mb->offset < size) {
+ $FAIL($Fail);
+ }
+ wordsneeded = 1+WSIZE(NBYTES((Uint) size));
+ $GC_TEST_PRESERVE(wordsneeded, $Live, ms);
+ }
+ mb = ms_matchbuffer(ms);
+ LIGHT_SWAPOUT;
+ result = erts_bs_get_integer_2(c_p, size, flags, mb);
+ LIGHT_SWAPIN;
+ HEAP_SPACE_VERIFIED(0);
+ if (is_non_value(result)) {
+ $FAIL($Fail);
+ }
+ $Dst = result;
+}
+
+i_bs_get_utf8(Ctx, Fail, Dst) {
+ ErlBinMatchBuffer* mb = ms_matchbuffer($Ctx);
+ Eterm result = erts_bs_get_utf8(mb);
+
+ if (is_non_value(result)) {
+ $FAIL($Fail);
+ }
+ $Dst = result;
+}
+
+i_bs_get_utf16(Ctx, Fail, Flags, Dst) {
+ ErlBinMatchBuffer* mb = ms_matchbuffer($Ctx);
+ Eterm result = erts_bs_get_utf16(mb, $Flags);
+
+ if (is_non_value(result)) {
+ $FAIL($Fail);
+ }
+ $Dst = result;
+}
+
+bs_context_to_binary := ctx_to_bin.fetch.execute;
+i_bs_get_binary_all_reuse := ctx_to_bin.fetch_bin.execute;
+
+ctx_to_bin.head() {
+ Eterm context;
+ ErlBinMatchBuffer* mb;
+ Uint size;
+ Uint offs;
+}
+
+ctx_to_bin.fetch(Src) {
+ context = $Src;
+ if (is_boxed(context) &&
+ header_is_bin_matchstate(*boxed_val(context))) {
+ ErlBinMatchState* ms;
+ ms = (ErlBinMatchState *) boxed_val(context);
+ mb = &ms->mb;
+ offs = ms->save_offset[0];
+ size = mb->size - offs;
+ } else {
+ $NEXT0();
+ }
+}
+
+ctx_to_bin.fetch_bin(Src, Fail, Unit) {
+ context = $Src;
+ mb = ms_matchbuffer(context);
+ size = mb->size - mb->offset;
+ if (size % $Unit != 0) {
+ $FAIL($Fail);
+ }
+ offs = mb->offset;
+}
+
+ctx_to_bin.execute() {
+ Uint hole_size;
+ Uint orig = mb->orig;
+ ErlSubBin* sb = (ErlSubBin *) boxed_val(context);
+ hole_size = 1 + header_arity(sb->thing_word) - ERL_SUB_BIN_SIZE;
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->size = BYTE_OFFSET(size);
+ sb->bitsize = BIT_OFFSET(size);
+ sb->offs = BYTE_OFFSET(offs);
+ sb->bitoffs = BIT_OFFSET(offs);
+ sb->is_writable = 0;
+ sb->orig = orig;
+ if (hole_size) {
+ sb[1].thing_word = make_pos_bignum_header(hole_size-1);
+ }
+}
+
+i_bs_match_string(Ctx, Fail, Bits, Ptr) {
+ byte* bytes = (byte *) $Ptr;
+ Uint bits = $Bits;
+ ErlBinMatchBuffer* mb;
+ Uint offs;
+
+ mb = ms_matchbuffer($Ctx);
+ if (mb->size - mb->offset < bits) {
+ $FAIL($Fail);
+ }
+ offs = mb->offset & 7;
+ if (offs == 0 && (bits & 7) == 0) {
+ if (sys_memcmp(bytes, mb->base+(mb->offset>>3), bits>>3)) {
+ $FAIL($Fail);
+ }
+ } else if (erts_cmp_bits(bytes, 0, mb->base+(mb->offset>>3), mb->offset & 7, bits)) {
+ $FAIL($Fail);
+ }
+ mb->offset += bits;
+}
+
+i_bs_save2(Src, Slot) {
+ ErlBinMatchState* _ms = (ErlBinMatchState*) boxed_val((Eterm) $Src);
+ _ms->save_offset[$Slot] = _ms->mb.offset;
+}
+
+i_bs_restore2(Src, Slot) {
+ ErlBinMatchState* _ms = (ErlBinMatchState*) boxed_val((Eterm) $Src);
+ _ms->mb.offset = _ms->save_offset[$Slot];
+}
diff --git a/erts/emulator/beam/code_ix.c b/erts/emulator/beam/code_ix.c
index 4344558348..34e46f5f33 100644
--- a/erts/emulator/beam/code_ix.c
+++ b/erts/emulator/beam/code_ix.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2012. All Rights Reserved.
+ * Copyright Ericsson AB 2012-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -33,8 +34,8 @@
# define CIX_TRACE(text)
#endif
-erts_smp_atomic32_t the_active_code_index;
-erts_smp_atomic32_t the_staging_code_index;
+erts_atomic32_t the_active_code_index;
+erts_atomic32_t the_staging_code_index;
static Process* code_writing_process = NULL;
struct code_write_queue_item {
@@ -42,7 +43,7 @@ struct code_write_queue_item {
struct code_write_queue_item* next;
};
static struct code_write_queue_item* code_write_queue = NULL;
-static erts_smp_mtx_t code_write_permission_mtx;
+static erts_mtx_t code_write_permission_mtx;
#ifdef ERTS_ENABLE_LOCK_CHECK
static erts_tsd_key_t has_code_write_permission;
@@ -54,9 +55,10 @@ void erts_code_ix_init(void)
* single threaded with active and staging set both to zero.
* Preloading is finished by a commit that will set things straight.
*/
- erts_smp_atomic32_init_nob(&the_active_code_index, 0);
- erts_smp_atomic32_init_nob(&the_staging_code_index, 0);
- erts_smp_mtx_init(&code_write_permission_mtx, "code_write_permission");
+ erts_atomic32_init_nob(&the_active_code_index, 0);
+ erts_atomic32_init_nob(&the_staging_code_index, 0);
+ erts_mtx_init(&code_write_permission_mtx, "code_write_permission", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_tsd_key_create(&has_code_write_permission,
"erts_has_code_write_permission");
@@ -64,12 +66,12 @@ void erts_code_ix_init(void)
CIX_TRACE("init");
}
-void erts_start_staging_code_ix(void)
+void erts_start_staging_code_ix(int num_new)
{
beam_catches_start_staging();
export_start_staging();
module_start_staging();
- erts_start_staging_ranges();
+ erts_start_staging_ranges(num_new);
CIX_TRACE("start");
}
@@ -89,10 +91,11 @@ void erts_commit_staging_code_ix(void)
/* We need to this lock as we are now making the staging export table active */
export_staging_lock();
ix = erts_staging_code_ix();
- erts_smp_atomic32_set_nob(&the_active_code_index, ix);
+ erts_atomic32_set_nob(&the_active_code_index, ix);
ix = (ix + 1) % ERTS_NUM_CODE_IX;
- erts_smp_atomic32_set_nob(&the_staging_code_index, ix);
+ erts_atomic32_set_nob(&the_staging_code_index, ix);
export_staging_unlock();
+ erts_tracer_nif_clear();
CIX_TRACE("activate");
}
@@ -112,12 +115,10 @@ void erts_abort_staging_code_ix(void)
int erts_try_seize_code_write_permission(Process* c_p)
{
int success;
-#ifdef ERTS_SMP
- ASSERT(!erts_smp_thr_progress_is_blocking()); /* to avoid deadlock */
-#endif
+ ASSERT(!erts_thr_progress_is_blocking()); /* to avoid deadlock */
ASSERT(c_p != NULL);
- erts_smp_mtx_lock(&code_write_permission_mtx);
+ erts_mtx_lock(&code_write_permission_mtx);
success = (code_writing_process == NULL);
if (success) {
code_writing_process = c_p;
@@ -130,35 +131,35 @@ int erts_try_seize_code_write_permission(Process* c_p)
ASSERT(code_writing_process != c_p);
qitem = erts_alloc(ERTS_ALC_T_CODE_IX_LOCK_Q, sizeof(*qitem));
qitem->p = c_p;
- erts_smp_proc_inc_refc(c_p);
+ erts_proc_inc_refc(c_p);
qitem->next = code_write_queue;
code_write_queue = qitem;
erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
}
- erts_smp_mtx_unlock(&code_write_permission_mtx);
+ erts_mtx_unlock(&code_write_permission_mtx);
return success;
}
void erts_release_code_write_permission(void)
{
- erts_smp_mtx_lock(&code_write_permission_mtx);
- ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
+ erts_mtx_lock(&code_write_permission_mtx);
+ ERTS_LC_ASSERT(erts_has_code_write_permission());
while (code_write_queue != NULL) { /* unleash the entire herd */
struct code_write_queue_item* qitem = code_write_queue;
- erts_smp_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS);
if (!ERTS_PROC_IS_EXITING(qitem->p)) {
erts_resume(qitem->p, ERTS_PROC_LOCK_STATUS);
}
- erts_smp_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS);
code_write_queue = qitem->next;
- erts_smp_proc_dec_refc(qitem->p);
+ erts_proc_dec_refc(qitem->p);
erts_free(ERTS_ALC_T_CODE_IX_LOCK_Q, qitem);
}
code_writing_process = NULL;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_tsd_set(has_code_write_permission, (void *) 0);
#endif
- erts_smp_mtx_unlock(&code_write_permission_mtx);
+ erts_mtx_unlock(&code_write_permission_mtx);
}
#ifdef ERTS_ENABLE_LOCK_CHECK
diff --git a/erts/emulator/beam/code_ix.h b/erts/emulator/beam/code_ix.h
index 16ad900228..42976d2301 100644
--- a/erts/emulator/beam/code_ix.h
+++ b/erts/emulator/beam/code_ix.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2012-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2012-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -55,12 +56,55 @@
# endif
# include "sys.h"
#endif
+
+#include "beam_opcodes.h"
+
struct process;
#define ERTS_NUM_CODE_IX 3
typedef unsigned ErtsCodeIndex;
+typedef struct ErtsCodeMFA_ {
+ Eterm module;
+ Eterm function;
+ Uint arity;
+} ErtsCodeMFA;
+
+/*
+ * The ErtsCodeInfo structure is used both in the Export entry
+ * and in the code as the function header.
+ */
+
+/* If you change the size of this, you also have to update the code
+ in ops.tab to reflect the new func_info size */
+typedef struct ErtsCodeInfo_ {
+ BeamInstr op; /* OpCode(i_func_info) */
+ union {
+ struct generic_bp* gen_bp; /* Trace breakpoint */
+#ifdef HIPE
+ void (*ncallee)(void);
+ struct hipe_call_count* hcc;
+#endif
+ }u;
+ ErtsCodeMFA mfa;
+} ErtsCodeInfo;
+
+/* Get the code associated with a ErtsCodeInfo ptr. */
+ERTS_GLB_INLINE
+BeamInstr *erts_codeinfo_to_code(ErtsCodeInfo *ci);
+
+/* Get the ErtsCodeInfo for from a code ptr. */
+ERTS_GLB_INLINE
+ErtsCodeInfo *erts_code_to_codeinfo(BeamInstr *I);
+
+/* Get the code associated with a ErtsCodeMFA ptr. */
+ERTS_GLB_INLINE
+BeamInstr *erts_codemfa_to_code(ErtsCodeMFA *mfa);
+
+/* Get the ErtsCodeMFA from a code ptr. */
+ERTS_GLB_INLINE
+ErtsCodeMFA *erts_code_to_codemfa(BeamInstr *I);
/* Called once at emulator initialization.
*/
@@ -99,7 +143,7 @@ void erts_release_code_write_permission(void);
* Must be followed by calls to either "end" and "commit" or "abort" before
* code write permission can be released.
*/
-void erts_start_staging_code_ix(void);
+void erts_start_staging_code_ix(int num_new);
/* End the staging.
* Preceded by "start" and must be followed by "commit".
@@ -120,20 +164,57 @@ void erts_abort_staging_code_ix(void);
int erts_has_code_write_permission(void);
#endif
-
+/* module/function/arity can be NIL/NIL/-1 when the MFA is pointing to some
+ invalid code, for instance unloaded_fun. */
+#define ASSERT_MFA(MFA) \
+ ASSERT((is_atom((MFA)->module) || is_nil((MFA)->module)) && \
+ (is_atom((MFA)->function) || is_nil((MFA)->function)) && \
+ (((MFA)->arity >= 0 && (MFA)->arity < 1024) || (MFA)->arity == -1))
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-extern erts_smp_atomic32_t the_active_code_index;
-extern erts_smp_atomic32_t the_staging_code_index;
+ERTS_GLB_INLINE
+BeamInstr *erts_codeinfo_to_code(ErtsCodeInfo *ci)
+{
+ ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI) || !ci->op);
+ ASSERT_MFA(&ci->mfa);
+ return (BeamInstr*)(ci + 1);
+}
+
+ERTS_GLB_INLINE
+ErtsCodeInfo *erts_code_to_codeinfo(BeamInstr *I)
+{
+ ErtsCodeInfo *ci = ((ErtsCodeInfo *)(((char *)(I)) - sizeof(ErtsCodeInfo)));
+ ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI) || !ci->op);
+ ASSERT_MFA(&ci->mfa);
+ return ci;
+}
+
+ERTS_GLB_INLINE
+BeamInstr *erts_codemfa_to_code(ErtsCodeMFA *mfa)
+{
+ ASSERT_MFA(mfa);
+ return (BeamInstr*)(mfa + 1);
+}
+
+ERTS_GLB_INLINE
+ErtsCodeMFA *erts_code_to_codemfa(BeamInstr *I)
+{
+ ErtsCodeMFA *mfa = ((ErtsCodeMFA *)(((char *)(I)) - sizeof(ErtsCodeMFA)));
+ ASSERT_MFA(mfa);
+ return mfa;
+}
+
+extern erts_atomic32_t the_active_code_index;
+extern erts_atomic32_t the_staging_code_index;
ERTS_GLB_INLINE ErtsCodeIndex erts_active_code_ix(void)
{
- return erts_smp_atomic32_read_nob(&the_active_code_index);
+ return erts_atomic32_read_nob(&the_active_code_index);
}
ERTS_GLB_INLINE ErtsCodeIndex erts_staging_code_ix(void)
{
- return erts_smp_atomic32_read_nob(&the_staging_code_index);
+ return erts_atomic32_read_nob(&the_staging_code_index);
}
#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c
index 50548850eb..10bf197405 100644
--- a/erts/emulator/beam/copy.c
+++ b/erts/emulator/beam/copy.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2012. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -21,6 +22,8 @@
# include "config.h"
#endif
+#define ERL_WANT_GC_INTERNALS__
+
#include "sys.h"
#include "erl_vm.h"
#include "global.h"
@@ -32,69 +35,86 @@
#include "erl_bits.h"
#include "dtrace-wrapper.h"
-static void move_one_frag(Eterm** hpp, Eterm* src, Uint src_sz, ErlOffHeap*);
+static void move_one_frag(Eterm** hpp, ErlHeapFragment*, ErlOffHeap*, int);
/*
* Copy object "obj" to process p.
*/
-Eterm
-copy_object(Eterm obj, Process* to)
+Eterm copy_object_x(Eterm obj, Process* to, Uint extra)
{
- Uint size = size_object(obj);
- Eterm* hp = HAlloc(to, size);
- Eterm res;
+ if (!is_immed(obj)) {
+ Uint size = size_object(obj);
+ Eterm* hp = HAllocX(to, size, extra);
+ Eterm res;
#ifdef USE_VM_PROBES
- if (DTRACE_ENABLED(copy_object)) {
- DTRACE_CHARBUF(proc_name, 64);
+ if (DTRACE_ENABLED(copy_object)) {
+ DTRACE_CHARBUF(proc_name, 64);
- erts_snprintf(proc_name, sizeof(DTRACE_CHARBUF_NAME(proc_name)),
- "%T", to->common.id);
- DTRACE2(copy_object, proc_name, size);
- }
+ erts_snprintf(proc_name, sizeof(DTRACE_CHARBUF_NAME(proc_name)),
+ "%T", to->common.id);
+ DTRACE2(copy_object, proc_name, size);
+ }
#endif
- res = copy_struct(obj, size, &hp, &to->off_heap);
+ res = copy_struct(obj, size, &hp, &to->off_heap);
#ifdef DEBUG
- if (eq(obj, res) == 0) {
- erl_exit(ERTS_ABORT_EXIT, "copy not equal to source\n");
- }
+ if (eq(obj, res) == 0) {
+ erts_exit(ERTS_ABORT_EXIT, "copy not equal to source\n");
+ }
#endif
- return res;
+ return res;
+ }
+ return obj;
}
/*
* Return the "flat" size of the object.
*/
-#if HALFWORD_HEAP
-Uint size_object_rel(Eterm obj, Eterm* base)
-#else
-Uint size_object(Eterm obj)
-#endif
+#define in_literal_purge_area(PTR) \
+ (lit_purge_ptr && ( \
+ (lit_purge_ptr <= (PTR) && \
+ (PTR) < (lit_purge_ptr + lit_purge_sz))))
+
+Uint size_object_x(Eterm obj, erts_literal_area_t *litopt)
{
Uint sum = 0;
Eterm* ptr;
int arity;
-
+ Eterm *lit_purge_ptr = litopt ? litopt->lit_purge_ptr : NULL;
+ Uint lit_purge_sz = litopt ? litopt->lit_purge_sz : 0;
+#ifdef DEBUG
+ Eterm mypid = erts_get_current_pid();
+#endif
DECLARE_ESTACK(s);
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] size_object %p\n", mypid, obj));
+
for (;;) {
switch (primary_tag(obj)) {
case TAG_PRIMARY_LIST:
+ ptr = list_val(obj);
+ if (litopt && erts_is_literal(obj,ptr) && !in_literal_purge_area(ptr)) {
+ goto pop_next;
+ }
sum += 2;
- ptr = list_val_rel(obj,base);
obj = *ptr++;
if (!IS_CONST(obj)) {
ESTACK_PUSH(s, obj);
- }
+ }
obj = *ptr;
break;
case TAG_PRIMARY_BOXED:
{
- Eterm hdr = *boxed_val_rel(obj,base);
+ Eterm hdr;
+ ptr = boxed_val(obj);
+ if (litopt && erts_is_literal(obj,ptr) && !in_literal_purge_area(ptr)) {
+ goto pop_next;
+ }
+ hdr = *ptr;
ASSERT(is_header(hdr));
switch (hdr & _TAG_HEADER_MASK) {
case ARITYVAL_SUBTAG:
- ptr = tuple_val_rel(obj,base);
+ ptr = tuple_val(obj);
arity = header_arity(hdr);
sum += arity + 1;
if (arity == 0) { /* Empty tuple -- unusual. */
@@ -110,7 +130,7 @@ Uint size_object(Eterm obj)
break;
case FUN_SUBTAG:
{
- Eterm* bptr = fun_val_rel(obj,base);
+ Eterm* bptr = fun_val(obj);
ErlFunThing* funp = (ErlFunThing *) bptr;
unsigned eterms = 1 /* creator */ + funp->num_free;
unsigned sz = thing_arityval(hdr);
@@ -125,6 +145,52 @@ Uint size_object(Eterm obj)
obj = *bptr;
break;
}
+ case MAP_SUBTAG:
+ switch (MAP_HEADER_TYPE(hdr)) {
+ case MAP_HEADER_TAG_FLATMAP_HEAD :
+ {
+ Uint n;
+ flatmap_t *mp;
+ mp = (flatmap_t*)flatmap_val(obj);
+ ptr = (Eterm *)mp;
+ n = flatmap_get_size(mp) + 1;
+ sum += n + 2;
+ ptr += 2; /* hdr + size words */
+ while (n--) {
+ obj = *ptr++;
+ if (!IS_CONST(obj)) {
+ ESTACK_PUSH(s, obj);
+ }
+ }
+ goto pop_next;
+ }
+ case MAP_HEADER_TAG_HAMT_HEAD_BITMAP :
+ case MAP_HEADER_TAG_HAMT_HEAD_ARRAY :
+ case MAP_HEADER_TAG_HAMT_NODE_BITMAP :
+ {
+ Eterm *head;
+ Uint sz;
+ head = hashmap_val(obj);
+ sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ sum += 1 + sz + header_arity(hdr);
+ head += 1 + header_arity(hdr);
+
+ if (sz == 0) {
+ goto pop_next;
+ }
+ while(sz-- > 1) {
+ obj = head[sz];
+ if (!IS_CONST(obj)) {
+ ESTACK_PUSH(s, obj);
+ }
+ }
+ obj = head[0];
+ }
+ break;
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "size_object: bad hashmap type %d\n", MAP_HEADER_TYPE(hdr));
+ }
+ break;
case SUB_BINARY_SUBTAG:
{
Eterm real_bin;
@@ -133,7 +199,7 @@ Uint size_object(Eterm obj)
Uint bitoffs;
Uint extra_bytes;
Eterm hdr;
- ERTS_GET_REAL_BIN_REL(obj, real_bin, offset, bitoffs, bitsize, base);
+ ERTS_GET_REAL_BIN(obj, real_bin, offset, bitoffs, bitsize);
if ((bitsize + bitoffs) > 8) {
sum += ERL_SUB_BIN_SIZE;
extra_bytes = 2;
@@ -143,35 +209,17 @@ Uint size_object(Eterm obj)
} else {
extra_bytes = 0;
}
- hdr = *binary_val_rel(real_bin,base);
+ hdr = *binary_val(real_bin);
if (thing_subtag(hdr) == REFC_BINARY_SUBTAG) {
sum += PROC_BIN_SIZE;
} else {
- sum += heap_bin_size(binary_size_rel(obj,base)+extra_bytes);
+ sum += heap_bin_size(binary_size(obj)+extra_bytes);
}
goto pop_next;
}
break;
- case MAP_SUBTAG:
- {
- Uint n;
- map_t *mp;
- mp = (map_t*)map_val_rel(obj,base);
- ptr = (Eterm *)mp;
- n = map_get_size(mp) + 1;
- sum += n + 2;
- ptr += 2; /* hdr + size words */
- while (n--) {
- obj = *ptr++;
- if (!IS_CONST(obj)) {
- ESTACK_PUSH(s, obj);
- }
- }
- goto pop_next;
- }
- break;
- case BIN_MATCHSTATE_SUBTAG:
- erl_exit(ERTS_ABORT_EXIT,
+ case BIN_MATCHSTATE_SUBTAG:
+ erts_exit(ERTS_ABORT_EXIT,
"size_object: matchstate term not allowed");
default:
sum += thing_arityval(hdr) + 1;
@@ -183,25 +231,380 @@ Uint size_object(Eterm obj)
pop_next:
if (ESTACK_ISEMPTY(s)) {
DESTROY_ESTACK(s);
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] size was: %u\n", mypid, sum));
return sum;
}
obj = ESTACK_POP(s);
break;
default:
- erl_exit(ERTS_ABORT_EXIT, "size_object: bad tag for %#x\n", obj);
+ erts_exit(ERTS_ABORT_EXIT, "size_object: bad tag for %#x\n", obj);
+ }
+ }
+}
+
+/*
+ * Machinery for sharing preserving information
+ * Using a WSTACK but not very transparently; consider refactoring
+ */
+
+#define DECLARE_BITSTORE(s) \
+ DECLARE_WSTACK(s); \
+ int WSTK_CONCAT(s,_bitoffs) = 0; \
+ int WSTK_CONCAT(s,_offset) = 0; \
+ UWord WSTK_CONCAT(s,_buffer) = 0
+
+#define DESTROY_BITSTORE(s) DESTROY_WSTACK(s)
+#define BITSTORE_PUT(s,i) \
+do { \
+ WSTK_CONCAT(s,_buffer) |= i << WSTK_CONCAT(s,_bitoffs); \
+ WSTK_CONCAT(s,_bitoffs) += 2; \
+ if (WSTK_CONCAT(s,_bitoffs) >= 8*sizeof(UWord)) { \
+ WSTACK_PUSH(s, WSTK_CONCAT(s,_buffer)); \
+ WSTK_CONCAT(s,_bitoffs) = 0; \
+ WSTK_CONCAT(s,_buffer) = 0; \
+ } \
+} while(0)
+#define BITSTORE_CLOSE(s) \
+do { \
+ if (WSTK_CONCAT(s,_bitoffs) > 0) { \
+ WSTACK_PUSH(s, WSTK_CONCAT(s,_buffer)); \
+ WSTK_CONCAT(s,_bitoffs) = 0; \
+ } \
+} while(0)
+
+#define BITSTORE_FETCH(s,dst) \
+do { \
+ UWord result; \
+ if (WSTK_CONCAT(s,_bitoffs) <= 0) { \
+ WSTK_CONCAT(s,_buffer) = s.wstart[WSTK_CONCAT(s,_offset)]; \
+ WSTK_CONCAT(s,_offset)++; \
+ WSTK_CONCAT(s,_bitoffs) = 8*sizeof(UWord); \
+ } \
+ WSTK_CONCAT(s,_bitoffs) -= 2; \
+ result = WSTK_CONCAT(s,_buffer) & 3; \
+ WSTK_CONCAT(s,_buffer) >>= 2; \
+ (dst) = result; \
+} while(0)
+
+#define COUNT_OFF_HEAP (0)
+
+/*
+ * Return the real size of an object and find sharing information
+ * This currently returns the same as erts_debug:size/1.
+ * It is argued whether the size of subterms in constant pools
+ * should be counted or not.
+ */
+
+Uint size_shared(Eterm obj)
+{
+ Eterm saved_obj = obj;
+ Uint sum = 0;
+ Eterm* ptr;
+
+ DECLARE_EQUEUE(s);
+ DECLARE_BITSTORE(b);
+
+ for (;;) {
+ switch (primary_tag(obj)) {
+ case TAG_PRIMARY_LIST: {
+ Eterm head, tail;
+ ptr = list_val(obj);
+ /* we're not counting anything that's outside our heap */
+ if (!COUNT_OFF_HEAP && erts_is_literal(obj,ptr)) {
+ goto pop_next;
+ }
+ head = CAR(ptr);
+ tail = CDR(ptr);
+ /* if it's visited, don't count it */
+ if (primary_tag(tail) == TAG_PRIMARY_HEADER ||
+ primary_tag(head) == TAG_PRIMARY_HEADER) {
+ goto pop_next;
+ }
+ /* else make it visited now */
+ switch (primary_tag(tail)) {
+ case TAG_PRIMARY_LIST:
+ ptr[1] = (tail - TAG_PRIMARY_LIST) | TAG_PRIMARY_HEADER;
+ break;
+ case TAG_PRIMARY_IMMED1:
+ CAR(ptr) = (head - primary_tag(head)) | TAG_PRIMARY_HEADER;
+ CDR(ptr) = (tail - TAG_PRIMARY_IMMED1) | primary_tag(head);
+ break;
+ case TAG_PRIMARY_BOXED:
+ BITSTORE_PUT(b, primary_tag(head));
+ CAR(ptr) = (head - primary_tag(head)) | TAG_PRIMARY_HEADER;
+ CDR(ptr) = (tail - TAG_PRIMARY_BOXED) | TAG_PRIMARY_HEADER;
+ break;
+ }
+ /* and count it */
+ sum += 2;
+ if (!IS_CONST(head)) {
+ EQUEUE_PUT(s, head);
+ }
+ obj = tail;
+ break;
+ }
+ case TAG_PRIMARY_BOXED: {
+ Eterm hdr;
+ ptr = boxed_val(obj);
+ /* we're not counting anything that's outside our heap */
+ if (!COUNT_OFF_HEAP && erts_is_literal(obj,ptr)) {
+ goto pop_next;
+ }
+ hdr = *ptr;
+ /* if it's visited, don't count it */
+ if (primary_tag(hdr) != TAG_PRIMARY_HEADER) {
+ goto pop_next;
+ }
+ /* else make it visited now */
+ *ptr = (hdr - primary_tag(hdr)) + BOXED_VISITED;
+ /* and count it */
+ ASSERT(is_header(hdr));
+ switch (hdr & _TAG_HEADER_MASK) {
+ case ARITYVAL_SUBTAG: {
+ int arity = header_arity(hdr);
+ sum += arity + 1;
+ if (arity == 0) { /* Empty tuple -- unusual. */
+ goto pop_next;
+ }
+ while (arity-- > 0) {
+ obj = *++ptr;
+ if (!IS_CONST(obj)) {
+ EQUEUE_PUT(s, obj);
+ }
+ }
+ goto pop_next;
+ }
+ case FUN_SUBTAG: {
+ ErlFunThing* funp = (ErlFunThing *) ptr;
+ unsigned eterms = 1 /* creator */ + funp->num_free;
+ unsigned sz = thing_arityval(hdr);
+ sum += 1 /* header */ + sz + eterms;
+ ptr += 1 /* header */ + sz;
+ while (eterms-- > 0) {
+ obj = *ptr++;
+ if (!IS_CONST(obj)) {
+ EQUEUE_PUT(s, obj);
+ }
+ }
+ goto pop_next;
+ }
+ case SUB_BINARY_SUBTAG: {
+ ErlSubBin* sb = (ErlSubBin *) ptr;
+ Uint extra_bytes;
+ Eterm hdr;
+ ASSERT((sb->thing_word & ~BOXED_VISITED_MASK) == HEADER_SUB_BIN);
+ if (sb->bitsize + sb->bitoffs > 8) {
+ sum += ERL_SUB_BIN_SIZE;
+ extra_bytes = 2;
+ } else if (sb->bitsize + sb->bitoffs > 0) {
+ sum += ERL_SUB_BIN_SIZE;
+ extra_bytes = 1;
+ } else {
+ extra_bytes = 0;
+ }
+ ptr = binary_val(sb->orig);
+ hdr = (*ptr) & ~BOXED_VISITED_MASK;
+ if (thing_subtag(hdr) == REFC_BINARY_SUBTAG) {
+ sum += PROC_BIN_SIZE;
+ } else {
+ ASSERT(thing_subtag(hdr) == HEAP_BINARY_SUBTAG);
+ sum += heap_bin_size(binary_size(obj) + extra_bytes);
+ }
+ goto pop_next;
+ }
+ case MAP_SUBTAG:
+ switch (MAP_HEADER_TYPE(hdr)) {
+ case MAP_HEADER_TAG_FLATMAP_HEAD : {
+ flatmap_t *mp = (flatmap_t*)flatmap_val(obj);
+ Uint n = flatmap_get_size(mp) + 1;
+ ptr = (Eterm *)mp;
+ sum += n + 2;
+ ptr += 2; /* hdr + size words */
+ while (n--) {
+ obj = *ptr++;
+ if (!IS_CONST(obj)) {
+ EQUEUE_PUT(s, obj);
+ }
+ }
+ goto pop_next;
+ }
+ case MAP_HEADER_TAG_HAMT_HEAD_BITMAP :
+ case MAP_HEADER_TAG_HAMT_HEAD_ARRAY :
+ case MAP_HEADER_TAG_HAMT_NODE_BITMAP : {
+ Uint n = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ sum += 1 + n + header_arity(hdr);
+ ptr += 1 + header_arity(hdr);
+ while (n--) {
+ obj = *ptr++;
+ if (!IS_CONST(obj)) {
+ EQUEUE_PUT(s, obj);
+ }
+ }
+ goto pop_next;
+ }
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "size_shared: bad hashmap type %d\n", MAP_HEADER_TYPE(hdr));
+ }
+ case BIN_MATCHSTATE_SUBTAG:
+ erts_exit(ERTS_ABORT_EXIT,
+ "size_shared: matchstate term not allowed");
+ default:
+ sum += thing_arityval(hdr) + 1;
+ goto pop_next;
+ }
+ break;
+ }
+ case TAG_PRIMARY_IMMED1:
+ pop_next:
+ if (EQUEUE_ISEMPTY(s)) {
+ goto cleanup;
+ }
+ obj = EQUEUE_GET(s);
+ break;
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "size_shared: bad tag for %#x\n", obj);
+ }
+ }
+
+cleanup:
+ obj = saved_obj;
+ BITSTORE_CLOSE(b);
+ for (;;) {
+ switch (primary_tag(obj)) {
+ case TAG_PRIMARY_LIST: {
+ Eterm head, tail;
+ ptr = list_val(obj);
+ if (!COUNT_OFF_HEAP && erts_is_literal(obj,ptr)) {
+ goto cleanup_next;
+ }
+ head = CAR(ptr);
+ tail = CDR(ptr);
+ /* if not already clean, clean it up */
+ if (primary_tag(tail) == TAG_PRIMARY_HEADER) {
+ if (primary_tag(head) == TAG_PRIMARY_HEADER) {
+ Eterm saved;
+ BITSTORE_FETCH(b, saved);
+ CAR(ptr) = head = (head - TAG_PRIMARY_HEADER) | saved;
+ CDR(ptr) = tail = (tail - TAG_PRIMARY_HEADER) | TAG_PRIMARY_BOXED;
+ } else {
+ CDR(ptr) = tail = (tail - TAG_PRIMARY_HEADER) | TAG_PRIMARY_LIST;
+ }
+ } else if (primary_tag(head) == TAG_PRIMARY_HEADER) {
+ CAR(ptr) = head = (head - TAG_PRIMARY_HEADER) | primary_tag(tail);
+ CDR(ptr) = tail = (tail - primary_tag(tail)) | TAG_PRIMARY_IMMED1;
+ } else {
+ goto cleanup_next;
+ }
+ /* and its children too */
+ if (!IS_CONST(head)) {
+ EQUEUE_PUT_UNCHECKED(s, head);
+ }
+ obj = tail;
+ break;
+ }
+ case TAG_PRIMARY_BOXED: {
+ Eterm hdr;
+ ptr = boxed_val(obj);
+ if (!COUNT_OFF_HEAP && erts_is_literal(obj,ptr)) {
+ goto cleanup_next;
+ }
+ hdr = *ptr;
+ /* if not already clean, clean it up */
+ if (primary_tag(hdr) == TAG_PRIMARY_HEADER) {
+ goto cleanup_next;
+ }
+ else {
+ ASSERT(primary_tag(hdr) == BOXED_VISITED);
+ *ptr = hdr = (hdr - BOXED_VISITED) + TAG_PRIMARY_HEADER;
+ }
+ /* and its children too */
+ switch (hdr & _TAG_HEADER_MASK) {
+ case ARITYVAL_SUBTAG: {
+ int arity = header_arity(hdr);
+ if (arity == 0) { /* Empty tuple -- unusual. */
+ goto cleanup_next;
+ }
+ while (arity-- > 0) {
+ obj = *++ptr;
+ if (!IS_CONST(obj)) {
+ EQUEUE_PUT_UNCHECKED(s, obj);
+ }
+ }
+ goto cleanup_next;
+ }
+ case FUN_SUBTAG: {
+ ErlFunThing* funp = (ErlFunThing *) ptr;
+ unsigned eterms = 1 /* creator */ + funp->num_free;
+ unsigned sz = thing_arityval(hdr);
+ ptr += 1 /* header */ + sz;
+ while (eterms-- > 0) {
+ obj = *ptr++;
+ if (!IS_CONST(obj)) {
+ EQUEUE_PUT_UNCHECKED(s, obj);
+ }
+ }
+ goto cleanup_next;
+ }
+ case MAP_SUBTAG:
+ switch (MAP_HEADER_TYPE(hdr)) {
+ case MAP_HEADER_TAG_FLATMAP_HEAD : {
+ flatmap_t *mp = (flatmap_t *) ptr;
+ Uint n = flatmap_get_size(mp) + 1;
+ ptr += 2; /* hdr + size words */
+ while (n--) {
+ obj = *ptr++;
+ if (!IS_CONST(obj)) {
+ EQUEUE_PUT_UNCHECKED(s, obj);
+ }
+ }
+ goto cleanup_next;
+ }
+ case MAP_HEADER_TAG_HAMT_HEAD_BITMAP :
+ case MAP_HEADER_TAG_HAMT_HEAD_ARRAY :
+ case MAP_HEADER_TAG_HAMT_NODE_BITMAP : {
+ Uint n = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ sum += 1 + n + header_arity(hdr);
+ ptr += 1 + header_arity(hdr);
+ while (n--) {
+ obj = *ptr++;
+ if (!IS_CONST(obj)) {
+ EQUEUE_PUT_UNCHECKED(s, obj);
+ }
+ }
+ goto cleanup_next;
+ }
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "size_shared: bad hashmap type %d\n", MAP_HEADER_TYPE(hdr));
+ }
+ default:
+ goto cleanup_next;
+ }
+ break;
+ }
+ case TAG_PRIMARY_IMMED1:
+ cleanup_next:
+ if (EQUEUE_ISEMPTY(s)) {
+ goto all_clean;
+ }
+ obj = EQUEUE_GET(s);
+ break;
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "size_shared: bad tag for %#x\n", obj);
}
}
+
+ all_clean:
+ /* Return the result */
+ DESTROY_EQUEUE(s);
+ DESTROY_BITSTORE(b);
+ return sum;
}
+
/*
* Copy a structure to a heap.
*/
-#if HALFWORD_HEAP
-Eterm copy_struct_rel(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap,
- Eterm* src_base, Eterm* dst_base)
-#else
-Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
-#endif
+Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint *bsz, erts_literal_area_t *litopt)
{
char* hstart;
Uint hsize;
@@ -216,19 +619,25 @@ Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
Eterm* argp;
Eterm* const_tuple;
Eterm hdr;
+ Eterm *hend;
int i;
+ Eterm *lit_purge_ptr = litopt ? litopt->lit_purge_ptr : NULL;
+ Uint lit_purge_sz = litopt ? litopt->lit_purge_sz : 0;
#ifdef DEBUG
Eterm org_obj = obj;
Uint org_sz = sz;
+ Eterm mypid = erts_get_current_pid();
#endif
if (IS_CONST(obj))
return obj;
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] copy_struct %p\n", mypid, obj));
+
DTRACE1(copy_struct, (int32_t)sz);
hp = htop = *hpp;
- hbot = htop + sz;
+ hbot = hend = htop + sz;
hstart = (char *)htop;
hsize = (char*) hbot - hstart;
const_tuple = 0;
@@ -237,11 +646,11 @@ Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
switch (primary_tag(obj)) {
case TAG_PRIMARY_LIST:
argp = &res;
- objp = list_val_rel(obj,src_base);
+ objp = list_val(obj);
goto L_copy_list;
case TAG_PRIMARY_BOXED: argp = &res; goto L_copy_boxed;
default:
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"%s, line %d: Internal error in copy_struct: 0x%08x\n",
__FILE__, __LINE__,obj);
}
@@ -249,25 +658,25 @@ Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
L_copy:
while (hp != htop) {
obj = *hp;
-
switch (primary_tag(obj)) {
case TAG_PRIMARY_IMMED1:
hp++;
break;
case TAG_PRIMARY_LIST:
- objp = list_val_rel(obj,src_base);
- #if !HALFWORD_HEAP || defined(DEBUG)
- if (in_area(objp,hstart,hsize)) {
- ASSERT(!HALFWORD_HEAP);
+ objp = list_val(obj);
+ if (ErtsInArea(objp,hstart,hsize)) {
hp++;
break;
}
- #endif
argp = hp++;
/* Fall through */
L_copy_list:
tailp = argp;
+ if (litopt && erts_is_literal(obj,objp) && !in_literal_purge_area(objp)) {
+ *tailp = obj;
+ goto L_copy;
+ }
for (;;) {
tp = tailp;
elem = CAR(objp);
@@ -275,55 +684,53 @@ Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
hbot -= 2;
CAR(hbot) = elem;
tailp = &CDR(hbot);
- }
- else {
+ } else {
CAR(htop) = elem;
- #if HALFWORD_HEAP
- CDR(htop) = CDR(objp);
- *tailp = make_list_rel(htop,dst_base);
- htop += 2;
- goto L_copy;
- #else
tailp = &CDR(htop);
htop += 2;
- #endif
}
- ASSERT(!HALFWORD_HEAP || tp < hp || tp >= hbot);
- *tp = make_list_rel(tailp - 1, dst_base);
+ *tp = make_list(tailp - 1);
obj = CDR(objp);
+
if (!is_list(obj)) {
break;
}
- objp = list_val_rel(obj,src_base);
+ objp = list_val(obj);
+
+ if (litopt && erts_is_literal(obj,objp) && !in_literal_purge_area(objp)) {
+ *tailp = obj;
+ goto L_copy;
+ }
}
switch (primary_tag(obj)) {
case TAG_PRIMARY_IMMED1: *tailp = obj; goto L_copy;
case TAG_PRIMARY_BOXED: argp = tailp; goto L_copy_boxed;
default:
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"%s, line %d: Internal error in copy_struct: 0x%08x\n",
__FILE__, __LINE__,obj);
}
-
+
case TAG_PRIMARY_BOXED:
- #if !HALFWORD_HEAP || defined(DEBUG)
- if (in_area(boxed_val_rel(obj,src_base),hstart,hsize)) {
- ASSERT(!HALFWORD_HEAP);
+ if (ErtsInArea(boxed_val(obj),hstart,hsize)) {
hp++;
break;
}
- #endif
argp = hp++;
L_copy_boxed:
- objp = boxed_val_rel(obj, src_base);
+ objp = boxed_val(obj);
+ if (litopt && erts_is_literal(obj,objp) && !in_literal_purge_area(objp)) {
+ *argp = obj;
+ break;
+ }
hdr = *objp;
switch (hdr & _TAG_HEADER_MASK) {
case ARITYVAL_SUBTAG:
{
int const_flag = 1; /* assume constant tuple */
i = arityval(hdr);
- *argp = make_tuple_rel(htop, dst_base);
+ *argp = make_tuple(htop);
tp = htop; /* tp is pointer to new arity value */
*htop++ = *objp++; /* copy arity value */
while (i--) {
@@ -338,15 +745,6 @@ Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
}
}
break;
- case MAP_SUBTAG:
- {
- i = map_get_size(objp) + 3;
- *argp = make_map_rel(htop, dst_base);
- while (i--) {
- *htop++ = *objp++;
- }
- }
- break;
case REFC_BINARY_SUBTAG:
{
ProcBin* pb;
@@ -361,9 +759,9 @@ Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
while (i--) {
*tp++ = *objp++;
}
- *argp = make_binary_rel(hbot, dst_base);
+ *argp = make_binary(hbot);
pb = (ProcBin*) hbot;
- erts_refc_inc(&pb->val->refc, 2);
+ erts_refc_inc(&pb->val->intern.refc, 2);
pb->next = off_heap->first;
pb->flags = 0;
off_heap->first = (struct erl_off_heap_header*) pb;
@@ -386,9 +784,9 @@ Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
extra_bytes = 1;
} else {
extra_bytes = 0;
- }
+ }
real_size = size+extra_bytes;
- objp = binary_val_rel(real_bin,src_base);
+ objp = binary_val(real_bin);
if (thing_subtag(*objp) == HEAP_BINARY_SUBTAG) {
ErlHeapBin* from = (ErlHeapBin *) objp;
ErlHeapBin* to;
@@ -401,7 +799,7 @@ Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
} else {
ProcBin* from = (ProcBin *) objp;
ProcBin* to;
-
+
ASSERT(thing_subtag(*objp) == REFC_BINARY_SUBTAG);
if (from->flags) {
erts_emasculate_writable_binary(from);
@@ -411,14 +809,14 @@ Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
to->thing_word = HEADER_PROC_BIN;
to->size = real_size;
to->val = from->val;
- erts_refc_inc(&to->val->refc, 2);
+ erts_refc_inc(&to->val->intern.refc, 2);
to->bytes = from->bytes + offset;
to->next = off_heap->first;
to->flags = 0;
off_heap->first = (struct erl_off_heap_header*) to;
OH_OVERHEAD(off_heap, to->size / sizeof(Eterm));
}
- *argp = make_binary_rel(hbot, dst_base);
+ *argp = make_binary(hbot);
if (extra_bytes != 0) {
ErlSubBin* res;
hbot -= ERL_SUB_BIN_SIZE;
@@ -430,7 +828,7 @@ Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
res->offs = 0;
res->is_writable = 0;
res->orig = *argp;
- *argp = make_binary_rel(hbot, dst_base);
+ *argp = make_binary(hbot);
}
break;
}
@@ -448,37 +846,70 @@ Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
funp->next = off_heap->first;
off_heap->first = (struct erl_off_heap_header*) funp;
erts_refc_inc(&funp->fe->refc, 2);
- *argp = make_fun_rel(tp, dst_base);
+ *argp = make_fun(tp);
}
break;
case EXTERNAL_PID_SUBTAG:
case EXTERNAL_PORT_SUBTAG:
case EXTERNAL_REF_SUBTAG:
{
- ExternalThing *etp = (ExternalThing *) htop;
-
- i = thing_arityval(hdr) + 1;
+ ExternalThing *etp = (ExternalThing *) objp;
+ erts_refc_inc(&etp->node->refc, 2);
+ }
+ L_off_heap_node_container_common:
+ {
+ struct erl_off_heap_header *ohhp;
+ ohhp = (struct erl_off_heap_header *) htop;
+ i = thing_arityval(hdr) + 1;
+ *argp = make_boxed(htop);
tp = htop;
while (i--) {
*htop++ = *objp++;
}
- etp->next = off_heap->first;
- off_heap->first = (struct erl_off_heap_header*)etp;
- erts_refc_inc(&etp->node->refc, 2);
+ ohhp->next = off_heap->first;
+ off_heap->first = ohhp;
- *argp = make_external_rel(tp, dst_base);
+ }
+ break;
+ case MAP_SUBTAG:
+ tp = htop;
+ switch (MAP_HEADER_TYPE(hdr)) {
+ case MAP_HEADER_TAG_FLATMAP_HEAD :
+ i = flatmap_get_size(objp) + 3;
+ *argp = make_flatmap(htop);
+ while (i--) {
+ *htop++ = *objp++;
+ }
+ break;
+ case MAP_HEADER_TAG_HAMT_HEAD_BITMAP :
+ case MAP_HEADER_TAG_HAMT_HEAD_ARRAY :
+ *htop++ = *objp++;
+ case MAP_HEADER_TAG_HAMT_NODE_BITMAP :
+ i = 1 + hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ while (i--) { *htop++ = *objp++; }
+ *argp = make_hashmap(tp);
+ break;
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "copy_struct: bad hashmap type %d\n", MAP_HEADER_TYPE(hdr));
}
break;
case BIN_MATCHSTATE_SUBTAG:
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"copy_struct: matchstate term not allowed");
+ case REF_SUBTAG:
+ if (is_magic_ref_thing(objp)) {
+ ErtsMRefThing *mreft = (ErtsMRefThing *) objp;
+ erts_refc_inc(&mreft->mb->intern.refc, 2);
+ goto L_off_heap_node_container_common;
+ }
+ /* Fall through... */
default:
i = thing_arityval(hdr)+1;
hbot -= i;
tp = hbot;
- *argp = make_boxed_rel(hbot, dst_base);
+ *argp = make_boxed(hbot);
while (i--) {
*tp++ = *objp++;
}
@@ -494,22 +925,893 @@ Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
}
}
+ if (bsz) {
+ *hpp = htop;
+ *bsz = hend - hbot;
+ } else {
#ifdef DEBUG
- if (htop != hbot)
- erl_exit(ERTS_ABORT_EXIT,
- "Internal error in copy_struct() when copying %T:"
- " htop=%p != hbot=%p (sz=%beu)\n",
- org_obj, htop, hbot, org_sz);
+ if (!eq(org_obj, res)) {
+ erts_exit(ERTS_ABORT_EXIT,
+ "Internal error in copy_struct() when copying %T:"
+ " not equal to copy %T\n",
+ org_obj, res);
+ }
+ if (htop != hbot)
+ erts_exit(ERTS_ABORT_EXIT,
+ "Internal error in copy_struct() when copying %T:"
+ " htop=%p != hbot=%p (sz=%beu)\n",
+ org_obj, htop, hbot, org_sz);
#else
- if (htop > hbot) {
- erl_exit(ERTS_ABORT_EXIT,
- "Internal error in copy_struct(): htop, hbot overrun\n");
- }
+ if (htop > hbot) {
+ erts_exit(ERTS_ABORT_EXIT,
+ "Internal error in copy_struct(): htop, hbot overrun\n");
+ }
#endif
- *hpp = (Eterm *) (hstart+hsize);
+ *hpp = (Eterm *) (hstart+hsize);
+ }
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] result is at %p\n", mypid, res));
return res;
}
+
+/*
+ * Machinery for the table used by the sharing preserving copier
+ * Using an ESTACK but not very transparently; consider refactoring
+ */
+
+#define DECLARE_SHTABLE(s) \
+ DECLARE_ESTACK(s); \
+ Uint ESTK_CONCAT(s,_offset) = 0
+#define DESTROY_SHTABLE(s) DESTROY_ESTACK(s)
+#define SHTABLE_INCR 4
+#define SHTABLE_NEXT(s) ESTK_CONCAT(s,_offset)
+#define SHTABLE_PUSH(s,x,y,b) \
+do { \
+ if (s.sp > s.end - SHTABLE_INCR) { \
+ erl_grow_estack(&(s), SHTABLE_INCR); \
+ } \
+ *s.sp++ = (x); \
+ *s.sp++ = (y); \
+ *s.sp++ = (Eterm) NULL; \
+ *s.sp++ = (Eterm) (b); \
+ ESTK_CONCAT(s,_offset) += SHTABLE_INCR; \
+} while(0)
+#define SHTABLE_X(s,e) (s.start[e])
+#define SHTABLE_Y(s,e) (s.start[(e)+1])
+#define SHTABLE_FWD(s,e) ((Eterm *) (s.start[(e)+2]))
+#define SHTABLE_FWD_UPD(s,e,p) (s.start[(e)+2] = (Eterm) (p))
+#define SHTABLE_REV(s,e) ((Eterm *) (s.start[(e)+3]))
+
+#define LIST_SHARED_UNPROCESSED ((Eterm) 0)
+#define LIST_SHARED_PROCESSED ((Eterm) 1)
+
+#define HEAP_ELEM_TO_BE_FILLED _unchecked_make_list(NULL)
+
+
+/*
+ * Specialized macros for using/reusing the persistent state
+ */
+
+#define DECLARE_EQUEUE_INIT_INFO(q, info) \
+ UWord* EQUE_DEF_QUEUE(q) = info->queue_default; \
+ ErtsEQueue q = { \
+ EQUE_DEF_QUEUE(q), /* start */ \
+ EQUE_DEF_QUEUE(q), /* front */ \
+ EQUE_DEF_QUEUE(q), /* back */ \
+ 1, /* possibly_empty */ \
+ EQUE_DEF_QUEUE(q) + DEF_EQUEUE_SIZE, /* end */ \
+ ERTS_ALC_T_ESTACK /* alloc_type */ \
+ }
+
+#define DECLARE_EQUEUE_FROM_INFO(q, info) \
+ /* no EQUE_DEF_QUEUE(q), read-only */ \
+ ErtsEQueue q = { \
+ info->queue_start, /* start */ \
+ info->queue_start, /* front */ \
+ info->queue_start, /* back */ \
+ 1, /* possibly_empty */ \
+ info->queue_end, /* end */ \
+ info->queue_alloc_type /* alloc_type */ \
+ }
+
+#define DECLARE_BITSTORE_INIT_INFO(s, info) \
+ UWord* WSTK_DEF_STACK(s) = info->bitstore_default; \
+ ErtsWStack s = { \
+ WSTK_DEF_STACK(s), /* wstart */ \
+ WSTK_DEF_STACK(s), /* wsp */ \
+ WSTK_DEF_STACK(s) + DEF_WSTACK_SIZE, /* wend */ \
+ WSTK_DEF_STACK(s), /* wdflt */ \
+ ERTS_ALC_T_ESTACK /* alloc_type */ \
+ }; \
+ int WSTK_CONCAT(s,_bitoffs) = 0; \
+ /* no WSTK_CONCAT(s,_offset), write-only */ \
+ UWord WSTK_CONCAT(s,_buffer) = 0
+
+#define DECLARE_BITSTORE_FROM_INFO(s, info) \
+ /* no WSTK_DEF_STACK(s), read-only */ \
+ ErtsWStack s = { \
+ info->bitstore_start, /* wstart */ \
+ NULL, /* wsp, read-only */ \
+ NULL, /* wend, read-only */ \
+ NULL, /* wdef, read-only */ \
+ info->bitstore_alloc_type /* alloc_type */ \
+ }; \
+ int WSTK_CONCAT(s,_bitoffs) = 0; \
+ int WSTK_CONCAT(s,_offset) = 0; \
+ UWord WSTK_CONCAT(s,_buffer) = 0
+
+#define DECLARE_SHTABLE_INIT_INFO(s, info) \
+ Eterm* ESTK_DEF_STACK(s) = info->shtable_default; \
+ ErtsEStack s = { \
+ ESTK_DEF_STACK(s), /* start */ \
+ ESTK_DEF_STACK(s), /* sp */ \
+ ESTK_DEF_STACK(s) + DEF_ESTACK_SIZE, /* end */ \
+ ESTK_DEF_STACK(s), /* default */ \
+ ERTS_ALC_T_ESTACK /* alloc_type */ \
+ }; \
+ Uint ESTK_CONCAT(s,_offset) = 0
+
+#define DECLARE_SHTABLE_FROM_INFO(s, info) \
+ /* no ESTK_DEF_STACK(s), read-only */ \
+ ErtsEStack s = { \
+ info->shtable_start, /* start */ \
+ NULL, /* sp, read-only */ \
+ NULL, /* end, read-only */ \
+ NULL, /* def, read-only */ \
+ info->shtable_alloc_type /* alloc_type */ \
+ }; \
+ /* no ESTK_CONCAT(s,_offset), read-only */
+
+/*
+ * Copy object "obj" preserving sharing.
+ * First half: count size and calculate sharing.
+ */
+Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info)
+{
+ Uint sum;
+ Uint e;
+ unsigned sz;
+ Eterm* ptr;
+ Eterm *lit_purge_ptr = info->lit_purge_ptr;
+ Uint lit_purge_sz = info->lit_purge_sz;
+#ifdef DEBUG
+ Eterm mypid = erts_get_current_pid();
+#endif
+
+ DECLARE_EQUEUE_INIT_INFO(s, info);
+ DECLARE_BITSTORE_INIT_INFO(b, info);
+ DECLARE_SHTABLE_INIT_INFO(t, info);
+
+ /* step #0:
+ -------------------------------------------------------
+ get rid of the easy cases first:
+ - copying constants
+ - if not a proper process, do flat copy
+ */
+
+ if (IS_CONST(obj))
+ return 0;
+
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] copy_shared_calculate %p\n", mypid, obj));
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] message is %T\n", mypid, obj));
+
+ /* step #1:
+ -------------------------------------------------------
+ traverse the term and calculate the size;
+ when traversing, transform as you do in size_shared
+ but when you find shared objects:
+
+ a. add entry in the table, indexed by i
+ b. mark them:
+ b1. boxed terms, set header to (i | 11)
+ store (old header, NONV, NULL, backptr) in the entry
+ b2. cons cells, set CDR to NONV, set CAR to i
+ store (old CAR, old CDR, NULL, backptr) in the entry
+ */
+
+ sum = 0;
+
+ for (;;) {
+ switch (primary_tag(obj)) {
+ case TAG_PRIMARY_LIST: {
+ Eterm head, tail;
+ ptr = list_val(obj);
+ /* off heap list pointers are copied verbatim */
+ if (erts_is_literal(obj,ptr)) {
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] bypassed copying %p is %T\n", mypid, ptr, obj));
+ if (in_literal_purge_area(ptr))
+ info->literal_size += size_object(obj);
+ goto pop_next;
+ }
+ head = CAR(ptr);
+ tail = CDR(ptr);
+ /* if it's visited, don't count it;
+ if not already shared, make it shared and store it in the table */
+ if (primary_tag(tail) == TAG_PRIMARY_HEADER ||
+ primary_tag(head) == TAG_PRIMARY_HEADER) {
+ if (tail != THE_NON_VALUE) {
+ e = SHTABLE_NEXT(t);
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] tabling L %p\n", mypid, ptr));
+ SHTABLE_PUSH(t, head, tail, ptr);
+ CAR(ptr) = (e << _TAG_PRIMARY_SIZE) | LIST_SHARED_UNPROCESSED;
+ CDR(ptr) = THE_NON_VALUE;
+ }
+ goto pop_next;
+ }
+ /* else make it visited now */
+ switch (primary_tag(tail)) {
+ case TAG_PRIMARY_LIST:
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] mangling L/L %p\n", mypid, ptr));
+ CDR(ptr) = (tail - TAG_PRIMARY_LIST) | TAG_PRIMARY_HEADER;
+ break;
+ case TAG_PRIMARY_IMMED1:
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] mangling L/I %p\n", mypid, ptr));
+ CAR(ptr) = (head - primary_tag(head)) | TAG_PRIMARY_HEADER;
+ CDR(ptr) = (tail - TAG_PRIMARY_IMMED1) | primary_tag(head);
+ break;
+ case TAG_PRIMARY_BOXED:
+ BITSTORE_PUT(b, primary_tag(head));
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] mangling L/B %p\n", mypid, ptr));
+ CAR(ptr) = (head - primary_tag(head)) | TAG_PRIMARY_HEADER;
+ CDR(ptr) = (tail - TAG_PRIMARY_BOXED) | TAG_PRIMARY_HEADER;
+ break;
+ }
+ /* and count it */
+ sum += 2;
+ if (!IS_CONST(head)) {
+ EQUEUE_PUT(s, head);
+ }
+ obj = tail;
+ break;
+ }
+ case TAG_PRIMARY_BOXED: {
+ Eterm hdr;
+ ptr = boxed_val(obj);
+ /* off heap pointers to boxes are copied verbatim */
+ if (erts_is_literal(obj,ptr)) {
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] bypassed copying %p is %T\n", mypid, ptr, obj));
+ if (in_literal_purge_area(ptr))
+ info->literal_size += size_object(obj);
+ goto pop_next;
+ }
+ hdr = *ptr;
+ /* if it's visited, don't count it;
+ if not already shared, make it shared and store it in the table */
+ if (primary_tag(hdr) != TAG_PRIMARY_HEADER) {
+ if (primary_tag(hdr) == BOXED_VISITED) {
+ e = SHTABLE_NEXT(t);
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] tabling B %p\n", mypid, ptr));
+ SHTABLE_PUSH(t, hdr, THE_NON_VALUE, ptr);
+ *ptr = (e << _TAG_PRIMARY_SIZE) | BOXED_SHARED_UNPROCESSED;
+ }
+ goto pop_next;
+ }
+ /* else make it visited now */
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] mangling B %p\n", mypid, ptr));
+ *ptr = (hdr - primary_tag(hdr)) + BOXED_VISITED;
+ /* and count it */
+ ASSERT(is_header(hdr));
+ switch (hdr & _TAG_HEADER_MASK) {
+ case ARITYVAL_SUBTAG: {
+ int arity = header_arity(hdr);
+ sum += arity + 1;
+ if (arity == 0) { /* Empty tuple -- unusual. */
+ goto pop_next;
+ }
+ while (arity-- > 0) {
+ obj = *++ptr;
+ if (!IS_CONST(obj)) {
+ EQUEUE_PUT(s, obj);
+ }
+ }
+ goto pop_next;
+ }
+ case FUN_SUBTAG: {
+ ErlFunThing* funp = (ErlFunThing *) ptr;
+ unsigned eterms = 1 /* creator */ + funp->num_free;
+ sz = thing_arityval(hdr);
+ sum += 1 /* header */ + sz + eterms;
+ ptr += 1 /* header */ + sz;
+ while (eterms-- > 0) {
+ obj = *ptr++;
+ if (!IS_CONST(obj)) {
+ EQUEUE_PUT(s, obj);
+ }
+ }
+ goto pop_next;
+ }
+ case SUB_BINARY_SUBTAG: {
+ ErlSubBin* sb = (ErlSubBin *) ptr;
+ Eterm real_bin = sb->orig;
+ Uint bit_offset = sb->bitoffs;
+ Uint bit_size = sb->bitsize;
+ size_t size = sb->size;
+ Uint extra_bytes;
+ Eterm hdr;
+ if (bit_size + bit_offset > 8) {
+ sum += ERL_SUB_BIN_SIZE;
+ extra_bytes = 2;
+ } else if (bit_size + bit_offset > 0) {
+ sum += ERL_SUB_BIN_SIZE;
+ extra_bytes = 1;
+ } else {
+ extra_bytes = 0;
+ }
+ ASSERT(is_boxed(real_bin) &&
+ (((*boxed_val(real_bin)) &
+ (_TAG_HEADER_MASK - _BINARY_XXX_MASK - BOXED_VISITED_MASK))
+ == _TAG_HEADER_REFC_BIN));
+ hdr = *_unchecked_binary_val(real_bin) & ~BOXED_VISITED_MASK;
+ if (thing_subtag(hdr) == HEAP_BINARY_SUBTAG) {
+ sum += heap_bin_size(size+extra_bytes);
+ } else {
+ ASSERT(thing_subtag(hdr) == REFC_BINARY_SUBTAG);
+ sum += PROC_BIN_SIZE;
+ }
+ goto pop_next;
+ }
+ case MAP_SUBTAG:
+ switch (MAP_HEADER_TYPE(hdr)) {
+ case MAP_HEADER_TAG_FLATMAP_HEAD : {
+ flatmap_t *mp = (flatmap_t *) ptr;
+ Uint n = flatmap_get_size(mp) + 1;
+ sum += n + 2;
+ ptr += 2; /* hdr + size words */
+ while (n--) {
+ obj = *ptr++;
+ if (!IS_CONST(obj)) {
+ EQUEUE_PUT(s, obj);
+ }
+ }
+ goto pop_next;
+ }
+ case MAP_HEADER_TAG_HAMT_HEAD_BITMAP :
+ case MAP_HEADER_TAG_HAMT_HEAD_ARRAY :
+ case MAP_HEADER_TAG_HAMT_NODE_BITMAP : {
+ Uint n = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ sum += 1 + n + header_arity(hdr);
+ ptr += 1 + header_arity(hdr);
+
+ if (n == 0) {
+ goto pop_next;
+ }
+ while(n--) {
+ obj = *ptr++;
+ if (!IS_CONST(obj)) {
+ EQUEUE_PUT(s, obj);
+ }
+ }
+ goto pop_next;
+ }
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "copy_shared_calculate: bad hashmap type %d\n", MAP_HEADER_TYPE(hdr));
+ }
+ case BIN_MATCHSTATE_SUBTAG:
+ erts_exit(ERTS_ABORT_EXIT,
+ "size_shared: matchstate term not allowed");
+ default:
+ sum += thing_arityval(hdr) + 1;
+ goto pop_next;
+ }
+ break;
+ }
+ case TAG_PRIMARY_IMMED1:
+ pop_next:
+ if (EQUEUE_ISEMPTY(s)) {
+ /* add sentinel to the table */
+ SHTABLE_PUSH(t, THE_NON_VALUE, THE_NON_VALUE, NULL);
+ /* store persistent info */
+ BITSTORE_CLOSE(b);
+ info->queue_start = s.start;
+ info->queue_end = s.end;
+ info->queue_alloc_type = s.alloc_type;
+ info->bitstore_start = b.wstart;
+ info->bitstore_alloc_type = b.alloc_type;
+ info->shtable_start = t.start;
+ info->shtable_alloc_type = t.alloc_type;
+ /* single point of return: the size of the object */
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] size was: %u\n", mypid, sum));
+ return sum + info->literal_size;
+ }
+ obj = EQUEUE_GET(s);
+ break;
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "[pid=%T] size_shared: bad tag for %#x\n", obj);
+ }
+ }
+}
+
+/*
+ * Copy object "obj" preserving sharing.
+ * Second half: copy and restore the object.
+ */
+Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
+ Eterm** hpp, ErlOffHeap* off_heap) {
+ Uint e;
+ unsigned sz;
+ Eterm* ptr;
+ Eterm* hp;
+ Eterm* hscan;
+ Eterm result;
+ Eterm* resp;
+ Eterm *hbot, *hend;
+ unsigned remaining;
+ Eterm *lit_purge_ptr = info->lit_purge_ptr;
+ Uint lit_purge_sz = info->lit_purge_sz;
+#ifdef DEBUG
+ Eterm mypid = erts_get_current_pid();
+ Eterm saved_obj = obj;
+#endif
+
+ DECLARE_EQUEUE_FROM_INFO(s, info);
+ DECLARE_BITSTORE_FROM_INFO(b, info);
+ DECLARE_SHTABLE_FROM_INFO(t, info);
+
+ /* step #0:
+ -------------------------------------------------------
+ get rid of the easy cases first:
+ - copying constants
+ - if not a proper process, do flat copy
+ */
+
+ if (IS_CONST(obj))
+ return obj;
+
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] copy_shared_perform %p\n", mypid, obj));
+
+ /* step #2: was performed before this function was called
+ -------------------------------------------------------
+ allocate new space
+ */
+
+ hscan = hp = *hpp;
+ hbot = hend = hp + size;
+
+ /* step #3:
+ -------------------------------------------------------
+ traverse the term a second time and when traversing:
+ a. if the object is marked as shared
+ a1. if the entry contains a forwarding ptr, use that
+ a2. otherwise, copy it to the new space and store the
+ forwarding ptr to the entry
+ b. otherwise, reverse-transform as you do in size_shared
+ and copy to the new space
+ */
+
+ resp = &result;
+ remaining = 0;
+ for (;;) {
+ switch (primary_tag(obj)) {
+ case TAG_PRIMARY_LIST: {
+ Eterm head, tail;
+ ptr = list_val(obj);
+ /* off heap list pointers are copied verbatim */
+ if (erts_is_literal(obj,ptr)) {
+ if (!in_literal_purge_area(ptr)) {
+ *resp = obj;
+ } else {
+ Uint bsz = 0;
+ *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz, NULL); /* copy literal */
+ hbot -= bsz;
+ }
+ goto cleanup_next;
+ }
+ head = CAR(ptr);
+ tail = CDR(ptr);
+ /* if it is shared */
+ if (tail == THE_NON_VALUE) {
+ e = head >> _TAG_PRIMARY_SIZE;
+ /* if it has been processed, just use the forwarding pointer */
+ if (primary_tag(head) == LIST_SHARED_PROCESSED) {
+ *resp = make_list(SHTABLE_FWD(t, e));
+ goto cleanup_next;
+ }
+ /* else, let's process it now,
+ copy it and keep the forwarding pointer */
+ else {
+ CAR(ptr) = (head - primary_tag(head)) + LIST_SHARED_PROCESSED;
+ head = SHTABLE_X(t, e);
+ tail = SHTABLE_Y(t, e);
+ ptr = &(SHTABLE_X(t, e));
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] tabled L %p is %p\n", mypid, ptr, SHTABLE_REV(t, e)));
+ SHTABLE_FWD_UPD(t, e, hp);
+ }
+ }
+ /* if not already clean, clean it up and copy it */
+ if (primary_tag(tail) == TAG_PRIMARY_HEADER) {
+ if (primary_tag(head) == TAG_PRIMARY_HEADER) {
+ Eterm saved;
+ BITSTORE_FETCH(b, saved);
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] unmangling L/B %p\n", mypid, ptr));
+ CAR(ptr) = head = (head - TAG_PRIMARY_HEADER) + saved;
+ CDR(ptr) = tail = (tail - TAG_PRIMARY_HEADER) + TAG_PRIMARY_BOXED;
+ } else {
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] unmangling L/L %p\n", mypid, ptr));
+ CDR(ptr) = tail = (tail - TAG_PRIMARY_HEADER) + TAG_PRIMARY_LIST;
+ }
+ } else if (primary_tag(head) == TAG_PRIMARY_HEADER) {
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] unmangling L/I %p\n", mypid, ptr));
+ CAR(ptr) = head = (head - TAG_PRIMARY_HEADER) | primary_tag(tail);
+ CDR(ptr) = tail = (tail - primary_tag(tail)) | TAG_PRIMARY_IMMED1;
+ } else {
+ ASSERT(0 && "cannot come here");
+ goto cleanup_next;
+ }
+ /* and its children too */
+ if (IS_CONST(head)) {
+ CAR(hp) = head;
+ } else {
+ EQUEUE_PUT_UNCHECKED(s, head);
+ CAR(hp) = HEAP_ELEM_TO_BE_FILLED;
+ }
+ *resp = make_list(hp);
+ resp = &(CDR(hp));
+ hp += 2;
+ obj = tail;
+ break;
+ }
+ case TAG_PRIMARY_BOXED: {
+ Eterm hdr;
+ ptr = boxed_val(obj);
+ /* off heap pointers to boxes are copied verbatim */
+ if (erts_is_literal(obj,ptr)) {
+ if (!in_literal_purge_area(ptr)) {
+ *resp = obj;
+ } else {
+ Uint bsz = 0;
+ *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz, NULL); /* copy literal */
+ hbot -= bsz;
+ }
+ goto cleanup_next;
+ }
+ hdr = *ptr;
+ /* clean it up, unless it's already clean or shared and processed */
+ switch (primary_tag(hdr)) {
+ case TAG_PRIMARY_HEADER:
+ ASSERT(0 && "cannot come here");
+ /* if it is shared and has been processed,
+ just use the forwarding pointer */
+ case BOXED_SHARED_PROCESSED:
+ e = hdr >> _TAG_PRIMARY_SIZE;
+ *resp = make_boxed(SHTABLE_FWD(t, e));
+ goto cleanup_next;
+ /* if it is shared but has not been processed yet, let's process
+ it now: copy it and keep the forwarding pointer */
+ case BOXED_SHARED_UNPROCESSED:
+ e = hdr >> _TAG_PRIMARY_SIZE;
+ *ptr = (hdr - primary_tag(hdr)) + BOXED_SHARED_PROCESSED;
+ hdr = SHTABLE_X(t, e);
+ ASSERT(primary_tag(hdr) == BOXED_VISITED);
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] tabled B %p is %p\n", mypid, ptr, SHTABLE_REV(t, e)));
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] unmangling B %p\n", mypid, ptr));
+ SHTABLE_X(t, e) = hdr = (hdr - BOXED_VISITED) + TAG_PRIMARY_HEADER;
+ SHTABLE_FWD_UPD(t, e, hp);
+ break;
+ case BOXED_VISITED:
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] unmangling B %p\n", mypid, ptr));
+ *ptr = hdr = (hdr - BOXED_VISITED) + TAG_PRIMARY_HEADER;
+ break;
+ }
+ /* and its children too */
+ switch (hdr & _TAG_HEADER_MASK) {
+ case ARITYVAL_SUBTAG: {
+ int arity = header_arity(hdr);
+ *resp = make_boxed(hp);
+ *hp++ = hdr;
+ while (arity-- > 0) {
+ obj = *++ptr;
+ if (IS_CONST(obj)) {
+ *hp++ = obj;
+ } else {
+ EQUEUE_PUT_UNCHECKED(s, obj);
+ *hp++ = HEAP_ELEM_TO_BE_FILLED;
+ }
+ }
+ goto cleanup_next;
+ }
+ case FUN_SUBTAG: {
+ ErlFunThing* funp = (ErlFunThing *) ptr;
+ unsigned eterms = 1 /* creator */ + funp->num_free;
+ sz = thing_arityval(hdr);
+ funp = (ErlFunThing *) hp;
+ *resp = make_fun(hp);
+ *hp++ = hdr;
+ ptr++;
+ while (sz-- > 0) {
+ *hp++ = *ptr++;
+ }
+ while (eterms-- > 0) {
+ obj = *ptr++;
+ if (IS_CONST(obj)) {
+ *hp++ = obj;
+ } else {
+ EQUEUE_PUT_UNCHECKED(s, obj);
+ *hp++ = HEAP_ELEM_TO_BE_FILLED;
+ }
+ }
+ funp->next = off_heap->first;
+ off_heap->first = (struct erl_off_heap_header*) funp;
+ erts_refc_inc(&funp->fe->refc, 2);
+ goto cleanup_next;
+ }
+ case MAP_SUBTAG:
+ *resp = make_flatmap(hp);
+ *hp++ = hdr;
+ switch (MAP_HEADER_TYPE(hdr)) {
+ case MAP_HEADER_TAG_FLATMAP_HEAD : {
+ flatmap_t *mp = (flatmap_t *) ptr;
+ Uint n = flatmap_get_size(mp) + 1;
+ *hp++ = *++ptr; /* keys */
+ while (n--) {
+ obj = *++ptr;
+ if (IS_CONST(obj)) {
+ *hp++ = obj;
+ } else {
+ EQUEUE_PUT_UNCHECKED(s, obj);
+ *hp++ = HEAP_ELEM_TO_BE_FILLED;
+ }
+ }
+ goto cleanup_next;
+ }
+ case MAP_HEADER_TAG_HAMT_HEAD_BITMAP :
+ case MAP_HEADER_TAG_HAMT_HEAD_ARRAY :
+ *hp++ = *++ptr; /* total map size */
+ case MAP_HEADER_TAG_HAMT_NODE_BITMAP : {
+ Uint n = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ while (n--) {
+ obj = *++ptr;
+ if (IS_CONST(obj)) {
+ *hp++ = obj;
+ } else {
+ EQUEUE_PUT_UNCHECKED(s, obj);
+ *hp++ = HEAP_ELEM_TO_BE_FILLED;
+ }
+ }
+ goto cleanup_next;
+ }
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "copy_shared_perform: bad hashmap type %d\n", MAP_HEADER_TYPE(hdr));
+ }
+ case REFC_BINARY_SUBTAG: {
+ ProcBin* pb = (ProcBin *) ptr;
+ sz = thing_arityval(hdr);
+ if (pb->flags) {
+ erts_emasculate_writable_binary(pb);
+ }
+ pb = (ProcBin *) hp;
+ *resp = make_binary(hp);
+ *hp++ = hdr;
+ ptr++;
+ while (sz-- > 0) {
+ *hp++ = *ptr++;
+ }
+ erts_refc_inc(&pb->val->intern.refc, 2);
+ pb->next = off_heap->first;
+ pb->flags = 0;
+ off_heap->first = (struct erl_off_heap_header*) pb;
+ OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm));
+ goto cleanup_next;
+ }
+ case SUB_BINARY_SUBTAG: {
+ ErlSubBin* sb = (ErlSubBin *) ptr;
+ Eterm real_bin = sb->orig;
+ Uint bit_offset = sb->bitoffs;
+ Uint bit_size = sb->bitsize;
+ Uint offset = sb->offs;
+ size_t size = sb->size;
+ Uint extra_bytes;
+ Uint real_size;
+ if ((bit_size + bit_offset) > 8) {
+ extra_bytes = 2;
+ } else if ((bit_size + bit_offset) > 0) {
+ extra_bytes = 1;
+ } else {
+ extra_bytes = 0;
+ }
+ real_size = size+extra_bytes;
+ ASSERT(is_boxed(real_bin) &&
+ (((*boxed_val(real_bin)) &
+ (_TAG_HEADER_MASK - _BINARY_XXX_MASK - BOXED_VISITED_MASK))
+ == _TAG_HEADER_REFC_BIN));
+ ptr = _unchecked_binary_val(real_bin);
+ *resp = make_binary(hp);
+ if (extra_bytes != 0) {
+ ErlSubBin* res = (ErlSubBin *) hp;
+ hp += ERL_SUB_BIN_SIZE;
+ res->thing_word = HEADER_SUB_BIN;
+ res->size = size;
+ res->bitsize = bit_size;
+ res->bitoffs = bit_offset;
+ res->offs = 0;
+ res->is_writable = 0;
+ res->orig = make_binary(hp);
+ }
+ if (thing_subtag(*ptr & ~BOXED_VISITED_MASK) == HEAP_BINARY_SUBTAG) {
+ ErlHeapBin* from = (ErlHeapBin *) ptr;
+ ErlHeapBin* to = (ErlHeapBin *) hp;
+ hp += heap_bin_size(real_size);
+ to->thing_word = header_heap_bin(real_size);
+ to->size = real_size;
+ sys_memcpy(to->data, ((byte *)from->data)+offset, real_size);
+ } else {
+ ProcBin* from = (ProcBin *) ptr;
+ ProcBin* to = (ProcBin *) hp;
+ ASSERT(thing_subtag(*ptr & ~BOXED_VISITED_MASK) == REFC_BINARY_SUBTAG);
+ if (from->flags) {
+ erts_emasculate_writable_binary(from);
+ }
+ hp += PROC_BIN_SIZE;
+ to->thing_word = HEADER_PROC_BIN;
+ to->size = real_size;
+ to->val = from->val;
+ erts_refc_inc(&to->val->intern.refc, 2);
+ to->bytes = from->bytes + offset;
+ to->next = off_heap->first;
+ to->flags = 0;
+ off_heap->first = (struct erl_off_heap_header*) to;
+ OH_OVERHEAD(off_heap, to->size / sizeof(Eterm));
+ }
+ goto cleanup_next;
+ }
+ case EXTERNAL_PID_SUBTAG:
+ case EXTERNAL_PORT_SUBTAG:
+ case EXTERNAL_REF_SUBTAG:
+ {
+ ExternalThing *etp = (ExternalThing *) ptr;
+ erts_refc_inc(&etp->node->refc, 2);
+ }
+ off_heap_node_container_common:
+ {
+ struct erl_off_heap_header *ohhp;
+ ohhp = (struct erl_off_heap_header *) hp;
+ sz = thing_arityval(hdr);
+ *resp = make_boxed(hp);
+ *hp++ = hdr;
+ ptr++;
+ while (sz-- > 0) {
+ *hp++ = *ptr++;
+ }
+ ohhp->next = off_heap->first;
+ off_heap->first = ohhp;
+ goto cleanup_next;
+ }
+ case REF_SUBTAG:
+ if (is_magic_ref_thing(ptr)) {
+ ErtsMRefThing *mreft = (ErtsMRefThing *) ptr;
+ erts_refc_inc(&mreft->mb->intern.refc, 2);
+ goto off_heap_node_container_common;
+ }
+ /* Fall through... */
+ default:
+ sz = thing_arityval(hdr);
+ *resp = make_boxed(hp);
+ *hp++ = hdr;
+ ptr++;
+ while (sz-- > 0) {
+ *hp++ = *ptr++;
+ }
+ goto cleanup_next;
+ }
+ break;
+ }
+ case TAG_PRIMARY_IMMED1:
+ *resp = obj;
+ cleanup_next:
+ if (EQUEUE_ISEMPTY(s)) {
+ goto all_clean;
+ }
+ obj = EQUEUE_GET(s);
+ for (;;) {
+ ASSERT(hscan < hp);
+ if (remaining == 0) {
+ if (*hscan == HEAP_ELEM_TO_BE_FILLED) {
+ resp = hscan;
+ hscan += 2;
+ break; /* scanning loop */
+ } else if (primary_tag(*hscan) == TAG_PRIMARY_HEADER) {
+ switch (*hscan & _TAG_HEADER_MASK) {
+ case ARITYVAL_SUBTAG:
+ remaining = header_arity(*hscan);
+ hscan++;
+ break;
+ case FUN_SUBTAG: {
+ ErlFunThing* funp = (ErlFunThing *) hscan;
+ hscan += 1 + thing_arityval(*hscan);
+ remaining = 1 + funp->num_free;
+ break;
+ }
+ case MAP_SUBTAG:
+ switch (MAP_HEADER_TYPE(*hscan)) {
+ case MAP_HEADER_TAG_FLATMAP_HEAD : {
+ flatmap_t *mp = (flatmap_t *) hscan;
+ remaining = flatmap_get_size(mp) + 1;
+ hscan += 2;
+ break;
+ }
+ case MAP_HEADER_TAG_HAMT_HEAD_BITMAP :
+ case MAP_HEADER_TAG_HAMT_HEAD_ARRAY :
+ case MAP_HEADER_TAG_HAMT_NODE_BITMAP :
+ remaining = hashmap_bitcount(MAP_HEADER_VAL(*hscan));
+ hscan += MAP_HEADER_ARITY(*hscan) + 1;
+ break;
+ default:
+ erts_exit(ERTS_ABORT_EXIT,
+ "copy_shared_perform: bad hashmap type %d\n",
+ MAP_HEADER_TYPE(*hscan));
+ }
+ break;
+ case SUB_BINARY_SUBTAG:
+ ASSERT(((ErlSubBin *) hscan)->bitoffs +
+ ((ErlSubBin *) hscan)->bitsize > 0);
+ hscan += ERL_SUB_BIN_SIZE;
+ break;
+ default:
+ hscan += 1 + thing_arityval(*hscan);
+ break;
+ }
+ } else {
+ hscan++;
+ }
+ } else if (*hscan == HEAP_ELEM_TO_BE_FILLED) {
+ resp = hscan++;
+ remaining--;
+ break; /* scanning loop */
+ } else {
+ hscan++;
+ remaining--;
+ }
+ }
+ ASSERT(resp < hp);
+ break;
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "size_shared: bad tag for %#x\n", obj);
+ }
+ }
+
+ /* step #4:
+ -------------------------------------------------------
+ traverse the table and reverse-transform all stored entries
+ */
+
+all_clean:
+ for (e = 0; ; e += SHTABLE_INCR) {
+ ptr = SHTABLE_REV(t, e);
+ if (ptr == NULL)
+ break;
+ VERBOSE(DEBUG_SHCOPY, ("[copy] restoring shared: %x\n", ptr));
+ /* entry was a list */
+ if (SHTABLE_Y(t, e) != THE_NON_VALUE) {
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] untabling L %p\n", mypid, ptr));
+ CAR(ptr) = SHTABLE_X(t, e);
+ CDR(ptr) = SHTABLE_Y(t, e);
+ }
+ /* entry was boxed */
+ else {
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] untabling B %p\n", mypid, ptr));
+ *ptr = SHTABLE_X(t, e);
+ ASSERT(primary_tag(*ptr) == TAG_PRIMARY_HEADER);
+ }
+ }
+
+#ifdef DEBUG
+ if (eq(saved_obj, result) == 0) {
+ erts_fprintf(stderr, "original = %T\n", saved_obj);
+ erts_fprintf(stderr, "copy = %T\n", result);
+ erts_exit(ERTS_ABORT_EXIT, "copy (shared) not equal to source\n");
+ }
+#endif
+
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] original was %T\n", mypid, saved_obj));
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] copy is %T\n", mypid, result));
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] result is at %p\n", mypid, result));
+
+ ASSERT(hbot == hp);
+ ASSERT(size == ((hp - *hpp) + (hend - hbot)));
+ *hpp = hend;
+ return result;
+}
+
+
/*
* Copy a term that is guaranteed to be contained in a single
* heap block. The heap block is copied word by word, and any
@@ -519,21 +1821,12 @@ Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
*
* NOTE: Assumes that term is a tuple (ptr is an untagged tuple ptr).
*/
-#if HALFWORD_HEAP
-Eterm copy_shallow_rel(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap,
- Eterm* src_base)
-#else
Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
-#endif
{
Eterm* tp = ptr;
Eterm* hp = *hpp;
const Eterm res = make_tuple(hp);
-#if HALFWORD_HEAP
- const Sint offs = COMPRESS_POINTER(hp - (tp - src_base));
-#else
const Sint offs = (hp - tp) * sizeof(Eterm);
-#endif
while (sz--) {
Eterm val = *tp++;
@@ -554,7 +1847,7 @@ Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
case REFC_BINARY_SUBTAG:
{
ProcBin* pb = (ProcBin *) (tp-1);
- erts_refc_inc(&pb->val->refc, 2);
+ erts_refc_inc(&pb->val->intern.refc, 2);
OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm));
}
goto off_heap_common;
@@ -565,11 +1858,6 @@ Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
erts_refc_inc(&funp->fe->refc, 2);
}
goto off_heap_common;
-
- case MAP_SUBTAG:
- *hp++ = *tp++;
- sz--;
- break;
case EXTERNAL_PID_SUBTAG:
case EXTERNAL_PORT_SUBTAG:
case EXTERNAL_REF_SUBTAG:
@@ -590,6 +1878,15 @@ Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
off_heap->first = ohh;
}
break;
+ case REF_SUBTAG: {
+ ErtsRefThing *rtp = (ErtsRefThing *) (tp - 1);
+ if (is_magic_ref_thing(rtp)) {
+ ErtsMRefThing *mreft = (ErtsMRefThing *) rtp;
+ erts_refc_inc(&mreft->mb->intern.refc, 2);
+ goto off_heap_common;
+ }
+ /* Fall through... */
+ }
default:
{
int tari = header_arity(val);
@@ -605,7 +1902,6 @@ Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
}
}
*hpp = hp;
-
return res;
}
@@ -614,18 +1910,24 @@ Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
* move markers.
* Typically used to copy a multi-fragmented message (from NIF).
*/
-void move_multi_frags(Eterm** hpp, ErlOffHeap* off_heap, ErlHeapFragment* first,
- Eterm* refs, unsigned nrefs)
+void erts_move_multi_frags(Eterm** hpp, ErlOffHeap* off_heap, ErlHeapFragment* first,
+ Eterm* refs, unsigned nrefs, int literals)
{
ErlHeapFragment* bp;
Eterm* hp_start = *hpp;
Eterm* hp_end;
Eterm* hp;
unsigned i;
+ Eterm literal_tag;
+
+#ifdef TAG_LITERAL_PTR
+ literal_tag = (Eterm) literals ? TAG_LITERAL_PTR : 0;
+#else
+ literal_tag = (Eterm) 0;
+#endif
for (bp=first; bp!=NULL; bp=bp->next) {
- move_one_frag(hpp, bp->mem, bp->used_size, off_heap);
- OH_OVERHEAD(off_heap, bp->off_heap.overhead);
+ move_one_frag(hpp, bp, off_heap, literals);
}
hp_end = *hpp;
for (hp=hp_start; hp<hp_end; ++hp) {
@@ -638,6 +1940,9 @@ void move_multi_frags(Eterm** hpp, ErlOffHeap* off_heap, ErlHeapFragment* first,
val = *ptr;
if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
+#ifdef TAG_LITERAL_PTR
+ val |= literal_tag;
+#endif
*hp = val;
}
break;
@@ -645,7 +1950,11 @@ void move_multi_frags(Eterm** hpp, ErlOffHeap* off_heap, ErlHeapFragment* first,
ptr = list_val(gval);
val = *ptr;
if (IS_MOVED_CONS(val)) {
- *hp = ptr[1];
+ val = ptr[1];
+#ifdef TAG_LITERAL_PTR
+ val |= literal_tag;
+#endif
+ *hp = val;
}
break;
case TAG_PRIMARY_HEADER:
@@ -656,15 +1965,15 @@ void move_multi_frags(Eterm** hpp, ErlOffHeap* off_heap, ErlHeapFragment* first,
}
}
for (i=0; i<nrefs; ++i) {
- refs[i] = follow_moved(refs[i]);
+ refs[i] = follow_moved(refs[i], literal_tag);
}
}
static void
-move_one_frag(Eterm** hpp, Eterm* src, Uint src_sz, ErlOffHeap* off_heap)
+move_one_frag(Eterm** hpp, ErlHeapFragment* frag, ErlOffHeap* off_heap, int literals)
{
- Eterm* ptr = src;
- Eterm* end = ptr + src_sz;
+ Eterm* ptr = frag->mem;
+ Eterm* end = ptr + frag->used_size;
Eterm dummy_ref;
Eterm* hp = *hpp;
@@ -676,8 +1985,11 @@ move_one_frag(Eterm** hpp, Eterm* src, Uint src_sz, ErlOffHeap* off_heap)
if (is_header(val)) {
struct erl_off_heap_header* hdr = (struct erl_off_heap_header*)hp;
ASSERT(ptr + header_arity(val) < end);
- MOVE_BOXED(ptr, val, hp, &dummy_ref);
+ move_boxed(&ptr, val, &hp, &dummy_ref);
switch (val & _HEADER_SUBTAG_MASK) {
+ case REF_SUBTAG:
+ if (is_ordinary_ref_thing(hdr))
+ break;
case REFC_BINARY_SUBTAG:
case FUN_SUBTAG:
case EXTERNAL_PID_SUBTAG:
@@ -690,10 +2002,11 @@ move_one_frag(Eterm** hpp, Eterm* src, Uint src_sz, ErlOffHeap* off_heap)
}
else { /* must be a cons cell */
ASSERT(ptr+1 < end);
- MOVE_CONS(ptr, val, hp, &dummy_ref);
+ move_cons(&ptr, val, &hp, &dummy_ref);
ptr += 2;
}
}
*hpp = hp;
+ OH_OVERHEAD(off_heap, frag->off_heap.overhead);
+ frag->off_heap.first = NULL;
}
-
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index ec07ddcd9c..bc168fc58d 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -24,6 +25,7 @@
/* define this to get a lot of debug output */
/* #define ERTS_DIST_MSG_DBG */
+/* #define ERTS_RAW_DIST_MSG_DBG */
#ifdef HAVE_CONFIG_H
# include "config.h"
@@ -44,6 +46,8 @@
#include "erl_thr_progress.h"
#include "dtrace-wrapper.h"
+#define DIST_CTL_DEFAULT_SIZE 64
+
/* Turn this on to get printouts of all distribution messages
* which go on the line
*/
@@ -65,9 +69,13 @@ static void bw(byte *buf, ErlDrvSizeT sz)
static void
dist_msg_dbg(ErtsDistExternal *edep, char *what, byte *buf, int sz)
{
+ ErtsHeapFactory factory;
+ DeclareTmpHeapNoproc(ctl_default,DIST_CTL_DEFAULT_SIZE);
+ Eterm* ctl = ctl_default;
byte *extp = edep->extp;
Eterm msg;
- Sint size = erts_decode_dist_ext_size(edep);
+ Sint ctl_len;
+ Sint size = ctl_len = erts_decode_dist_ext_size(edep);
if (size < 0) {
erts_fprintf(stderr,
"DIST MSG DEBUG: erts_decode_dist_ext_size(%s) failed:\n",
@@ -75,10 +83,9 @@ dist_msg_dbg(ErtsDistExternal *edep, char *what, byte *buf, int sz)
bw(buf, sz);
}
else {
- Eterm *hp;
ErlHeapFragment *mbuf = new_message_buffer(size);
- hp = mbuf->mem;
- msg = erts_decode_dist_ext(&hp, &mbuf->off_heap, edep);
+ erts_factory_static_init(&factory, ctl, ctl_len, &mbuf->off_heap);
+ msg = erts_decode_dist_ext(&factory, edep);
if (is_value(msg))
erts_fprintf(stderr, " %s: %T\n", what, msg);
else {
@@ -114,17 +121,17 @@ Export* dexit_trap = NULL;
Export* dmonitor_p_trap = NULL;
/* local variables */
-
+static Export *dist_ctrl_put_data_trap;
/* forward declarations */
static void clear_dist_entry(DistEntry*);
-static int dsig_send(ErtsDSigData *, Eterm, Eterm, int);
+static int dsig_send_ctl(ErtsDSigData* dsdp, Eterm ctl, int force_busy);
static void send_nodes_mon_msgs(Process *, Eterm, Eterm, Eterm, Eterm);
static void init_nodes_monitors(void);
-static erts_smp_atomic_t no_caches;
-static erts_smp_atomic_t no_nodes;
+static erts_atomic_t no_caches;
+static erts_atomic_t no_nodes;
struct {
Eterm reason;
@@ -137,8 +144,8 @@ delete_cache(ErtsAtomCache *cache)
{
if (cache) {
erts_free(ERTS_ALC_T_DCACHE, (void *) cache);
- ASSERT(erts_smp_atomic_read_nob(&no_caches) > 0);
- erts_smp_atomic_dec_nob(&no_caches);
+ ASSERT(erts_atomic_read_nob(&no_caches) > 0);
+ erts_atomic_dec_nob(&no_caches);
}
}
@@ -149,14 +156,12 @@ create_cache(DistEntry *dep)
int i;
ErtsAtomCache *cp;
- ERTS_SMP_LC_ASSERT(
- is_internal_port(dep->cid)
- && erts_lc_is_port_locked(erts_port_lookup_raw(dep->cid)));
+ ERTS_LC_ASSERT(is_nil(dep->cid));
ASSERT(!dep->cache);
dep->cache = cp = (ErtsAtomCache*) erts_alloc(ERTS_ALC_T_DCACHE,
sizeof(ErtsAtomCache));
- erts_smp_atomic_inc_nob(&no_caches);
+ erts_atomic_inc_nob(&no_caches);
for (i = 0; i < sizeof(cp->in_arr)/sizeof(cp->in_arr[0]); i++) {
cp->in_arr[i] = THE_NON_VALUE;
cp->out_arr[i] = THE_NON_VALUE;
@@ -165,15 +170,17 @@ create_cache(DistEntry *dep)
Uint erts_dist_cache_size(void)
{
- return (Uint) erts_smp_atomic_read_mb(&no_caches)*sizeof(ErtsAtomCache);
+ return (Uint) erts_atomic_read_mb(&no_caches)*sizeof(ErtsAtomCache);
}
static ErtsProcList *
-get_suspended_on_de(DistEntry *dep, Uint32 unset_qflgs)
+get_suspended_on_de(DistEntry *dep, erts_aint32_t unset_qflgs)
{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&dep->qlock));
- dep->qflgs &= ~unset_qflgs;
- if (dep->qflgs & ERTS_DE_QFLG_EXIT) {
+ erts_aint32_t qflgs;
+ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&dep->qlock));
+ qflgs = erts_atomic32_read_band_acqb(&dep->qflgs, ~unset_qflgs);
+ qflgs &= ~unset_qflgs;
+ if (qflgs & ERTS_DE_QFLG_EXIT) {
/* No resume when exit has been scheduled */
return NULL;
}
@@ -252,12 +259,12 @@ static void doit_monitor_net_exits(ErtsMonitor *mon, void *vnecp)
DistEntry *dep = ((NetExitsContext *) vnecp)->dep;
ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK;
- rp = erts_pid2proc(NULL, 0, mon->pid, rp_locks);
+ rp = erts_pid2proc(NULL, 0, mon->u.pid, rp_locks);
if (!rp)
goto done;
if (mon->type == MON_ORIGIN) {
- /* local pid is beeing monitored */
+ /* local pid is being monitored */
rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
/* ASSERT(rmon != NULL); nope, can happen during process exit */
if (rmon != NULL) {
@@ -271,21 +278,20 @@ static void doit_monitor_net_exits(ErtsMonitor *mon, void *vnecp)
rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
/* ASSERT(rmon != NULL); can happen during process exit */
if (rmon != NULL) {
+ ASSERT(rmon->type == MON_ORIGIN);
ASSERT(is_atom(rmon->name) || is_nil(rmon->name));
watched = (is_atom(rmon->name)
? TUPLE2(lhp, rmon->name, dep->sysname)
- : rmon->pid);
-#ifdef ERTS_SMP
+ : rmon->u.pid);
rp_locks |= ERTS_PROC_LOCKS_MSG_SEND;
- erts_smp_proc_lock(rp, ERTS_PROC_LOCKS_MSG_SEND);
-#endif
+ erts_proc_lock(rp, ERTS_PROC_LOCKS_MSG_SEND);
erts_queue_monitor_message(rp, &rp_locks, mon->ref, am_process,
watched, am_noconnection);
erts_destroy_monitor(rmon);
}
UnUseTmpHeapNoproc(3);
}
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
done:
erts_destroy_monitor(mon);
}
@@ -331,10 +337,10 @@ static void doit_link_net_exits_sub(ErtsLink *sublnk, void *vlnecp)
erts_destroy_link(rlnk);
if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
/* We didn't exit the process and it is traced */
- trace_proc(NULL, rp, am_getting_unlinked, sublnk->pid);
+ trace_proc(NULL, 0, rp, am_getting_unlinked, sublnk->pid);
}
}
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
}
done:
erts_destroy_link(sublnk);
@@ -372,10 +378,11 @@ static void doit_node_link_net_exits(ErtsLink *lnk, void *vnecp)
ASSERT(lnk->type == LINK_NODE);
if (is_internal_pid(lnk->pid)) {
ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK;
- rp = erts_pid2proc(NULL, 0, lnk->pid, rp_locks);
- if (!rp) {
+ ErlOffHeap *ohp;
+ rp = erts_proc_lookup(lnk->pid);
+ if (!rp)
goto done;
- }
+ erts_proc_lock(rp, rp_locks);
rlnk = erts_remove_link(&ERTS_P_LINKS(rp), name);
if (rlnk != NULL) {
ASSERT(is_atom(rlnk->pid) && (rlnk->type == LINK_NODE));
@@ -383,18 +390,16 @@ static void doit_node_link_net_exits(ErtsLink *lnk, void *vnecp)
}
n = ERTS_LINK_REFC(lnk);
for (i = 0; i < n; ++i) {
- ErlHeapFragment* bp;
- ErlOffHeap *ohp;
Eterm tup;
- Eterm *hp = erts_alloc_message_heap(3,&bp,&ohp,rp,&rp_locks);
+ Eterm *hp;
+ ErtsMessage *msgp;
+
+ msgp = erts_alloc_message_heap(rp, &rp_locks,
+ 3, &hp, &ohp);
tup = TUPLE2(hp, am_nodedown, name);
- erts_queue_message(rp, &rp_locks, bp, tup, NIL
-#ifdef USE_VM_PROBES
- , NIL
-#endif
- );
+ erts_queue_message(rp, rp_locks, msgp, tup, am_system);
}
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
}
done:
erts_destroy_link(lnk);
@@ -406,16 +411,16 @@ set_node_not_alive(void *unused)
ErlHeapFragment *bp;
Eterm nodename = erts_this_dist_entry->sysname;
- ASSERT(erts_smp_atomic_read_nob(&no_nodes) == 0);
+ ASSERT(erts_atomic_read_nob(&no_nodes) == 0);
- erts_smp_thr_progress_block();
+ erts_thr_progress_block();
erts_set_this_node(am_Noname, 0);
erts_is_alive = 0;
send_nodes_mon_msgs(NULL, am_nodedown, nodename, am_visible, nodedown.reason);
nodedown.reason = NIL;
bp = nodedown.bp;
nodedown.bp = NULL;
- erts_smp_thr_progress_unblock();
+ erts_thr_progress_unblock();
if (bp)
free_message_buffer(bp);
}
@@ -423,7 +428,7 @@ set_node_not_alive(void *unused)
static ERTS_INLINE void
dec_no_nodes(void)
{
- erts_aint_t no = erts_smp_atomic_dec_read_mb(&no_nodes);
+ erts_aint_t no = erts_atomic_dec_read_mb(&no_nodes);
ASSERT(no >= 0);
ASSERT(erts_get_scheduler_id()); /* Need to be a scheduler */
if (no == 0)
@@ -436,12 +441,40 @@ static ERTS_INLINE void
inc_no_nodes(void)
{
#ifdef DEBUG
- erts_aint_t no = erts_smp_atomic_read_nob(&no_nodes);
+ erts_aint_t no = erts_atomic_read_nob(&no_nodes);
ASSERT(erts_is_alive ? no > 0 : no == 0);
#endif
- erts_smp_atomic_inc_mb(&no_nodes);
+ erts_atomic_inc_mb(&no_nodes);
}
-
+
+static void
+kill_dist_ctrl_proc(void *vpid)
+{
+ Eterm pid = (Eterm) vpid;
+ ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
+ Process *rp = erts_pid2proc(NULL, 0, pid, rp_locks);
+ if (rp) {
+ erts_send_exit_signal(NULL, rp->common.id, rp, &rp_locks,
+ am_kill, NIL, NULL, 0);
+ if (rp_locks)
+ erts_proc_unlock(rp, rp_locks);
+ }
+}
+
+static void
+schedule_kill_dist_ctrl_proc(Eterm pid)
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ int sched_id = 1;
+ if (!esdp || ERTS_SCHEDULER_IS_DIRTY(esdp))
+ sched_id = 1;
+ else
+ sched_id = (int) esdp->no;
+ erts_schedule_misc_aux_work(sched_id,
+ kill_dist_ctrl_proc,
+ (void *) (UWord) pid);
+}
+
/*
* proc is currently running or exiting process.
*/
@@ -451,58 +484,62 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason)
if (dep == erts_this_dist_entry) { /* Net kernel has died (clean up!!) */
DistEntry *tdep;
- int no_dist_port = 0;
+ int no_dist_ctrl = 0;
Eterm nd_reason = (reason == am_no_network
? am_no_network
: am_net_kernel_terminated);
- erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_rlock(&erts_dist_table_rwmtx);
for (tdep = erts_hidden_dist_entries; tdep; tdep = tdep->next)
- no_dist_port++;
+ no_dist_ctrl++;
for (tdep = erts_visible_dist_entries; tdep; tdep = tdep->next)
- no_dist_port++;
+ no_dist_ctrl++;
/* KILL all port controllers */
- if (no_dist_port == 0)
- erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
+ if (no_dist_ctrl == 0)
+ erts_rwmtx_runlock(&erts_dist_table_rwmtx);
else {
Eterm def_buf[128];
int i = 0;
- Eterm *dist_port;
+ Eterm *dist_ctrl;
- if (no_dist_port <= sizeof(def_buf)/sizeof(def_buf[0]))
- dist_port = &def_buf[0];
+ if (no_dist_ctrl <= sizeof(def_buf)/sizeof(def_buf[0]))
+ dist_ctrl = &def_buf[0];
else
- dist_port = erts_alloc(ERTS_ALC_T_TMP,
- sizeof(Eterm)*no_dist_port);
+ dist_ctrl = erts_alloc(ERTS_ALC_T_TMP,
+ sizeof(Eterm)*no_dist_ctrl);
for (tdep = erts_hidden_dist_entries; tdep; tdep = tdep->next) {
- ASSERT(is_internal_port(tdep->cid));
- dist_port[i++] = tdep->cid;
+ ASSERT(is_internal_port(tdep->cid) || is_internal_pid(tdep->cid));
+ dist_ctrl[i++] = tdep->cid;
}
for (tdep = erts_visible_dist_entries; tdep; tdep = tdep->next) {
- ASSERT(is_internal_port(tdep->cid));
- dist_port[i++] = tdep->cid;
+ ASSERT(is_internal_port(tdep->cid) || is_internal_pid(tdep->cid));
+ dist_ctrl[i++] = tdep->cid;
}
- erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
-
- for (i = 0; i < no_dist_port; i++) {
- Port *prt = erts_port_lookup(dist_port[i],
- ERTS_PORT_SFLGS_INVALID_LOOKUP);
- if (!prt)
- continue;
- ASSERT(erts_atomic32_read_nob(&prt->state)
- & ERTS_PORT_SFLG_DISTRIBUTION);
-
- erts_port_exit(NULL, ERTS_PORT_SIG_FLG_FORCE_SCHED,
- prt, dist_port[i], nd_reason, NULL);
+ erts_rwmtx_runlock(&erts_dist_table_rwmtx);
+
+ for (i = 0; i < no_dist_ctrl; i++) {
+ if (is_internal_pid(dist_ctrl[i]))
+ schedule_kill_dist_ctrl_proc(dist_ctrl[i]);
+ else {
+ Port *prt = erts_port_lookup(dist_ctrl[i],
+ ERTS_PORT_SFLGS_INVALID_LOOKUP);
+ if (prt) {
+ ASSERT(erts_atomic32_read_nob(&prt->state)
+ & ERTS_PORT_SFLG_DISTRIBUTION);
+
+ erts_port_exit(NULL, ERTS_PORT_SIG_FLG_FORCE_SCHED,
+ prt, dist_ctrl[i], nd_reason, NULL);
+ }
+ }
}
- if (dist_port != &def_buf[0])
- erts_free(ERTS_ALC_T_TMP, dist_port);
+ if (dist_ctrl != &def_buf[0])
+ erts_free(ERTS_ALC_T_TMP, dist_ctrl);
}
/*
- * When last dist port exits, node will be taken
+ * When last dist ctrl exits, node will be taken
* from alive to not alive.
*/
ASSERT(is_nil(nodedown.reason) && !nodedown.bp);
@@ -519,52 +556,51 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason)
&nodedown.bp->off_heap);
}
}
- else { /* Call from distribution port */
+ else { /* Call from distribution controller (port/process) */
NetExitsContext nec = {dep};
ErtsLink *nlinks;
ErtsLink *node_links;
ErtsMonitor *monitors;
Uint32 flags;
- erts_smp_atomic_set_mb(&dep->dist_cmd_scheduled, 1);
- erts_smp_de_rwlock(dep);
+ erts_atomic_set_mb(&dep->dist_cmd_scheduled, 1);
+ erts_de_rwlock(dep);
- ERTS_SMP_LC_ASSERT(is_internal_port(dep->cid)
- && erts_lc_is_port_locked(erts_port_lookup_raw(dep->cid)));
+ if (is_internal_port(dep->cid)) {
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(erts_port_lookup_raw(dep->cid)));
- if (erts_port_task_is_scheduled(&dep->dist_cmd))
- erts_port_task_abort(&dep->dist_cmd);
+ if (erts_port_task_is_scheduled(&dep->dist_cmd))
+ erts_port_task_abort(&dep->dist_cmd);
+ }
if (dep->status & ERTS_DE_SFLG_EXITING) {
#ifdef DEBUG
- erts_smp_mtx_lock(&dep->qlock);
- ASSERT(dep->qflgs & ERTS_DE_QFLG_EXIT);
- erts_smp_mtx_unlock(&dep->qlock);
+ ASSERT(erts_atomic32_read_nob(&dep->qflgs) & ERTS_DE_QFLG_EXIT);
#endif
}
else {
dep->status |= ERTS_DE_SFLG_EXITING;
- erts_smp_mtx_lock(&dep->qlock);
- ASSERT(!(dep->qflgs & ERTS_DE_QFLG_EXIT));
- dep->qflgs |= ERTS_DE_QFLG_EXIT;
- erts_smp_mtx_unlock(&dep->qlock);
+ erts_mtx_lock(&dep->qlock);
+ ASSERT(!(erts_atomic32_read_nob(&dep->qflgs) & ERTS_DE_QFLG_EXIT));
+ erts_atomic32_read_bor_relb(&dep->qflgs, ERTS_DE_QFLG_EXIT);
+ erts_mtx_unlock(&dep->qlock);
}
- erts_smp_de_links_lock(dep);
+ erts_de_links_lock(dep);
monitors = dep->monitors;
nlinks = dep->nlinks;
node_links = dep->node_links;
dep->monitors = NULL;
dep->nlinks = NULL;
dep->node_links = NULL;
- erts_smp_de_links_unlock(dep);
+ erts_de_links_unlock(dep);
nodename = dep->sysname;
flags = dep->flags;
erts_set_dist_entry_not_connected(dep);
- erts_smp_de_rwunlock(dep);
+ erts_de_rwunlock(dep);
erts_sweep_monitors(monitors, &doit_monitor_net_exits, (void *) &nec);
erts_sweep_links(nlinks, &doit_link_net_exits, (void *) &nec);
@@ -598,8 +634,8 @@ void init_dist(void)
nodedown.reason = NIL;
nodedown.bp = NULL;
- erts_smp_atomic_init_nob(&no_nodes, 0);
- erts_smp_atomic_init_nob(&no_caches, 0);
+ erts_atomic_init_nob(&no_nodes, 0);
+ erts_atomic_init_nob(&no_caches, 0);
/* Lookup/Install all references to trap functions */
dsend2_trap = trap_function(am_dsend,2);
@@ -611,6 +647,9 @@ void init_dist(void)
dgroup_leader_trap = trap_function(am_dgroup_leader,2);
dexit_trap = trap_function(am_dexit, 2);
dmonitor_p_trap = trap_function(am_dmonitor_p, 2);
+ dist_ctrl_put_data_trap = erts_export_put(am_erts_internal,
+ am_dist_ctrl_put_data,
+ 2);
}
#define ErtsDistOutputBuf2Binary(OB) \
@@ -622,9 +661,6 @@ alloc_dist_obuf(Uint size)
ErtsDistOutputBuf *obuf;
Uint obuf_size = sizeof(ErtsDistOutputBuf)+sizeof(byte)*(size-1);
Binary *bin = erts_bin_drv_alloc(obuf_size);
- bin->flags = BIN_FLAG_DRV;
- erts_refc_init(&bin->refc, 1);
- bin->orig_size = (SWord) obuf_size;
obuf = (ErtsDistOutputBuf *) &bin->orig_bytes[0];
#ifdef DEBUG
obuf->dbg_pattern = ERTS_DIST_OUTPUT_BUF_DBG_PATTERN;
@@ -638,8 +674,7 @@ free_dist_obuf(ErtsDistOutputBuf *obuf)
{
Binary *bin = ErtsDistOutputBuf2Binary(obuf);
ASSERT(obuf->dbg_pattern == ERTS_DIST_OUTPUT_BUF_DBG_PATTERN);
- if (erts_refc_dectest(&bin->refc, 0) == 0)
- erts_bin_free(bin);
+ erts_bin_release(bin);
}
static ERTS_INLINE Sint
@@ -656,19 +691,24 @@ static void clear_dist_entry(DistEntry *dep)
ErtsProcList *suspendees;
ErtsDistOutputBuf *obuf;
- erts_smp_de_rwlock(dep);
+ erts_de_rwlock(dep);
+ erts_atomic_set_nob(&dep->input_handler,
+ (erts_aint_t) NIL);
cache = dep->cache;
dep->cache = NULL;
#ifdef DEBUG
- erts_smp_de_links_lock(dep);
+ erts_de_links_lock(dep);
ASSERT(!dep->nlinks);
ASSERT(!dep->node_links);
ASSERT(!dep->monitors);
- erts_smp_de_links_unlock(dep);
+ erts_de_links_unlock(dep);
#endif
- erts_smp_mtx_lock(&dep->qlock);
+ erts_mtx_lock(&dep->qlock);
+
+ erts_atomic64_set_nob(&dep->in, 0);
+ erts_atomic64_set_nob(&dep->out, 0);
if (!dep->out_queue.last)
obuf = dep->finalized_out_queue.first;
@@ -677,17 +717,24 @@ static void clear_dist_entry(DistEntry *dep)
obuf = dep->out_queue.first;
}
+ if (dep->tmp_out_queue.first) {
+ dep->tmp_out_queue.last->next = obuf;
+ obuf = dep->tmp_out_queue.first;
+ }
+
dep->out_queue.first = NULL;
dep->out_queue.last = NULL;
+ dep->tmp_out_queue.first = NULL;
+ dep->tmp_out_queue.last = NULL;
dep->finalized_out_queue.first = NULL;
dep->finalized_out_queue.last = NULL;
dep->status = 0;
suspendees = get_suspended_on_de(dep, ERTS_DE_QFLGS_ALL);
- erts_smp_mtx_unlock(&dep->qlock);
- erts_smp_atomic_set_nob(&dep->dist_cmd_scheduled, 0);
+ erts_mtx_unlock(&dep->qlock);
+ erts_atomic_set_nob(&dep->dist_cmd_scheduled, 0);
dep->send = NULL;
- erts_smp_de_rwunlock(dep);
+ erts_de_rwunlock(dep);
erts_resume_processes(suspendees);
@@ -702,13 +749,57 @@ static void clear_dist_entry(DistEntry *dep)
}
if (obufsize) {
- erts_smp_mtx_lock(&dep->qlock);
- ASSERT(dep->qsize >= obufsize);
- dep->qsize -= obufsize;
- erts_smp_mtx_unlock(&dep->qlock);
+ erts_mtx_lock(&dep->qlock);
+ ASSERT(erts_atomic_read_nob(&dep->qsize) >= obufsize);
+ erts_atomic_add_nob(&dep->qsize,
+ (erts_aint_t) -obufsize);
+ erts_mtx_unlock(&dep->qlock);
+ }
+}
+
+int erts_dsend_context_dtor(Binary* ctx_bin)
+{
+ ErtsSendContext* ctx = ERTS_MAGIC_BIN_DATA(ctx_bin);
+ switch (ctx->dss.phase) {
+ case ERTS_DSIG_SEND_PHASE_MSG_SIZE:
+ DESTROY_SAVED_WSTACK(&ctx->dss.u.sc.wstack);
+ break;
+ case ERTS_DSIG_SEND_PHASE_MSG_ENCODE:
+ DESTROY_SAVED_WSTACK(&ctx->dss.u.ec.wstack);
+ break;
+ default:;
+ }
+ if (ctx->dss.phase >= ERTS_DSIG_SEND_PHASE_ALLOC && ctx->dss.obuf) {
+ free_dist_obuf(ctx->dss.obuf);
+ }
+ if (ctx->dep_to_deref)
+ erts_deref_dist_entry(ctx->dep_to_deref);
+
+ return 1;
+}
+
+Eterm erts_dsend_export_trap_context(Process* p, ErtsSendContext* ctx)
+{
+ struct exported_ctx {
+ ErtsSendContext ctx;
+ ErtsAtomCacheMap acm;
+ };
+ Binary* ctx_bin = erts_create_magic_binary(sizeof(struct exported_ctx),
+ erts_dsend_context_dtor);
+ struct exported_ctx* dst = ERTS_MAGIC_BIN_DATA(ctx_bin);
+ Eterm* hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+
+ sys_memcpy(&dst->ctx, ctx, sizeof(ErtsSendContext));
+ ASSERT(ctx->dss.ctl == make_tuple(ctx->ctl_heap));
+ dst->ctx.dss.ctl = make_tuple(dst->ctx.ctl_heap);
+ if (ctx->dss.acmp) {
+ sys_memcpy(&dst->acm, ctx->dss.acmp, sizeof(ErtsAtomCacheMap));
+ dst->ctx.dss.acmp = &dst->acm;
}
+ return erts_mk_magic_ref(&hp, &MSO(p), ctx_bin);
}
+
/*
* The erts_dsig_send_*() functions implemented below, sends asynchronous
* distributed signals to other Erlang nodes. Before sending a distributed
@@ -731,7 +822,7 @@ erts_dsig_send_link(ErtsDSigData *dsdp, Eterm local, Eterm remote)
int res;
UseTmpHeapNoproc(4);
- res = dsig_send(dsdp, ctl, THE_NON_VALUE, 0);
+ res = dsig_send_ctl(dsdp, ctl, 0);
UnUseTmpHeapNoproc(4);
return res;
}
@@ -744,13 +835,13 @@ erts_dsig_send_unlink(ErtsDSigData *dsdp, Eterm local, Eterm remote)
int res;
UseTmpHeapNoproc(4);
- res = dsig_send(dsdp, ctl, THE_NON_VALUE, 0);
+ res = dsig_send_ctl(dsdp, ctl, 0);
UnUseTmpHeapNoproc(4);
return res;
}
-/* A local process that's beeing monitored by a remote one exits. We send:
+/* A local process that's being monitored by a remote one exits. We send:
{DOP_MONITOR_P_EXIT, Local pid or name, Remote pid, ref, reason},
which is rather sad as only the ref is needed, no pid's... */
int
@@ -767,12 +858,12 @@ erts_dsig_send_m_exit(ErtsDSigData *dsdp, Eterm watcher, Eterm watched,
watched, watcher, ref, reason);
#ifdef DEBUG
- erts_smp_de_links_lock(dsdp->dep);
+ erts_de_links_lock(dsdp->dep);
ASSERT(!erts_lookup_monitor(dsdp->dep->monitors, ref));
- erts_smp_de_links_unlock(dsdp->dep);
+ erts_de_links_unlock(dsdp->dep);
#endif
- res = dsig_send(dsdp, ctl, THE_NON_VALUE, 1);
+ res = dsig_send_ctl(dsdp, ctl, 1);
UnUseTmpHeapNoproc(6);
return res;
}
@@ -793,7 +884,7 @@ erts_dsig_send_monitor(ErtsDSigData *dsdp, Eterm watcher, Eterm watched,
make_small(DOP_MONITOR_P),
watcher, watched, ref);
- res = dsig_send(dsdp, ctl, THE_NON_VALUE, 0);
+ res = dsig_send_ctl(dsdp, ctl, 0);
UnUseTmpHeapNoproc(5);
return res;
}
@@ -815,18 +906,17 @@ erts_dsig_send_demonitor(ErtsDSigData *dsdp, Eterm watcher,
make_small(DOP_DEMONITOR_P),
watcher, watched, ref);
- res = dsig_send(dsdp, ctl, THE_NON_VALUE, force);
+ res = dsig_send_ctl(dsdp, ctl, force);
UnUseTmpHeapNoproc(5);
return res;
}
int
-erts_dsig_send_msg(ErtsDSigData *dsdp, Eterm remote, Eterm message)
+erts_dsig_send_msg(Eterm remote, Eterm message, ErtsSendContext* ctx)
{
Eterm ctl;
- DeclareTmpHeapNoproc(ctl_heap,5);
Eterm token = NIL;
- Process *sender = dsdp->proc;
+ Process *sender = ctx->dsd.proc;
int res;
#ifdef USE_VM_PROBES
Sint tok_label = 0;
@@ -838,12 +928,7 @@ erts_dsig_send_msg(ErtsDSigData *dsdp, Eterm remote, Eterm message)
DTRACE_CHARBUF(receiver_name, 64);
#endif
- UseTmpHeapNoproc(5);
- if (SEQ_TRACE_TOKEN(sender) != NIL
-#ifdef USE_VM_PROBES
- && SEQ_TRACE_TOKEN(sender) != am_have_dt_utag
-#endif
- ) {
+ if (have_seqtrace(SEQ_TRACE_TOKEN(sender))) {
seq_trace_update_send(sender);
token = SEQ_TRACE_TOKEN(sender);
seq_trace_output(token, message, SEQ_TRACE_SEND, remote, sender);
@@ -852,13 +937,13 @@ erts_dsig_send_msg(ErtsDSigData *dsdp, Eterm remote, Eterm message)
*node_name = *sender_name = *receiver_name = '\0';
if (DTRACE_ENABLED(message_send) || DTRACE_ENABLED(message_send_remote)) {
erts_snprintf(node_name, sizeof(DTRACE_CHARBUF_NAME(node_name)),
- "%T", dsdp->dep->sysname);
+ "%T", ctx->dsd.dep->sysname);
erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)),
"%T", sender->common.id);
erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)),
"%T", remote);
msize = size_object(message);
- if (token != NIL && token != am_have_dt_utag) {
+ if (have_seqtrace(token)) {
tok_label = signed_val(SEQ_TRACE_T_LABEL(token));
tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token));
tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token));
@@ -866,27 +951,48 @@ erts_dsig_send_msg(ErtsDSigData *dsdp, Eterm remote, Eterm message)
}
#endif
- if (token != NIL)
- ctl = TUPLE4(&ctl_heap[0],
- make_small(DOP_SEND_TT), am_Cookie, remote, token);
- else
- ctl = TUPLE3(&ctl_heap[0], make_small(DOP_SEND), am_Cookie, remote);
+ if (token != NIL) {
+ Eterm el1, el2;
+ if (ctx->dep->flags & DFLAG_SEND_SENDER) {
+ el1 = make_small(DOP_SEND_SENDER_TT);
+ el2 = sender->common.id;
+ }
+ else {
+ el1 = make_small(DOP_SEND_TT);
+ el2 = am_Empty;
+ }
+ ctl = TUPLE4(&ctx->ctl_heap[0], el1, el2, remote, token);
+ }
+ else {
+ Eterm el1, el2;
+ if (ctx->dep->flags & DFLAG_SEND_SENDER) {
+ el1 = make_small(DOP_SEND_SENDER);
+ el2 = sender->common.id;
+ }
+ else {
+ el1 = make_small(DOP_SEND);
+ el2 = am_Empty;
+ }
+ ctl = TUPLE3(&ctx->ctl_heap[0], el1, el2, remote);
+ }
DTRACE6(message_send, sender_name, receiver_name,
msize, tok_label, tok_lastcnt, tok_serial);
DTRACE7(message_send_remote, sender_name, node_name, receiver_name,
msize, tok_label, tok_lastcnt, tok_serial);
- res = dsig_send(dsdp, ctl, message, 0);
- UnUseTmpHeapNoproc(5);
+ ctx->dss.ctl = ctl;
+ ctx->dss.msg = message;
+ ctx->dss.force_busy = 0;
+ res = erts_dsig_send(&ctx->dsd, &ctx->dss);
return res;
}
int
-erts_dsig_send_reg_msg(ErtsDSigData *dsdp, Eterm remote_name, Eterm message)
+erts_dsig_send_reg_msg(Eterm remote_name, Eterm message,
+ ErtsSendContext* ctx)
{
Eterm ctl;
- DeclareTmpHeapNoproc(ctl_heap,6);
Eterm token = NIL;
- Process *sender = dsdp->proc;
+ Process *sender = ctx->dsd.proc;
int res;
#ifdef USE_VM_PROBES
Sint tok_label = 0;
@@ -898,12 +1004,7 @@ erts_dsig_send_reg_msg(ErtsDSigData *dsdp, Eterm remote_name, Eterm message)
DTRACE_CHARBUF(receiver_name, 128);
#endif
- UseTmpHeapNoproc(6);
- if (SEQ_TRACE_TOKEN(sender) != NIL
-#ifdef USE_VM_PROBES
- && SEQ_TRACE_TOKEN(sender) != am_have_dt_utag
-#endif
- ) {
+ if (have_seqtrace(SEQ_TRACE_TOKEN(sender))) {
seq_trace_update_send(sender);
token = SEQ_TRACE_TOKEN(sender);
seq_trace_output(token, message, SEQ_TRACE_SEND, remote_name, sender);
@@ -912,13 +1013,13 @@ erts_dsig_send_reg_msg(ErtsDSigData *dsdp, Eterm remote_name, Eterm message)
*node_name = *sender_name = *receiver_name = '\0';
if (DTRACE_ENABLED(message_send) || DTRACE_ENABLED(message_send_remote)) {
erts_snprintf(node_name, sizeof(DTRACE_CHARBUF_NAME(node_name)),
- "%T", dsdp->dep->sysname);
+ "%T", ctx->dsd.dep->sysname);
erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)),
"%T", sender->common.id);
erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)),
"{%T,%s}", remote_name, node_name);
msize = size_object(message);
- if (token != NIL && token != am_have_dt_utag) {
+ if (have_seqtrace(token)) {
tok_label = signed_val(SEQ_TRACE_T_LABEL(token));
tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token));
tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token));
@@ -927,17 +1028,19 @@ erts_dsig_send_reg_msg(ErtsDSigData *dsdp, Eterm remote_name, Eterm message)
#endif
if (token != NIL)
- ctl = TUPLE5(&ctl_heap[0], make_small(DOP_REG_SEND_TT),
- sender->common.id, am_Cookie, remote_name, token);
+ ctl = TUPLE5(&ctx->ctl_heap[0], make_small(DOP_REG_SEND_TT),
+ sender->common.id, am_Empty, remote_name, token);
else
- ctl = TUPLE4(&ctl_heap[0], make_small(DOP_REG_SEND),
- sender->common.id, am_Cookie, remote_name);
+ ctl = TUPLE4(&ctx->ctl_heap[0], make_small(DOP_REG_SEND),
+ sender->common.id, am_Empty, remote_name);
DTRACE6(message_send, sender_name, receiver_name,
msize, tok_label, tok_lastcnt, tok_serial);
DTRACE7(message_send_remote, sender_name, node_name, receiver_name,
msize, tok_label, tok_lastcnt, tok_serial);
- res = dsig_send(dsdp, ctl, message, 0);
- UnUseTmpHeapNoproc(6);
+ ctx->dss.ctl = ctl;
+ ctx->dss.msg = message;
+ ctx->dss.force_busy = 0;
+ res = erts_dsig_send(&ctx->dsd, &ctx->dss);
return res;
}
@@ -961,11 +1064,7 @@ erts_dsig_send_exit_tt(ErtsDSigData *dsdp, Eterm local, Eterm remote,
#endif
UseTmpHeapNoproc(6);
- if (token != NIL
-#ifdef USE_VM_PROBES
- && token != am_have_dt_utag
-#endif
- ) {
+ if (have_seqtrace(token)) {
seq_trace_update_send(dsdp->proc);
seq_trace_output_exit(token, reason, SEQ_TRACE_SEND, remote, local);
ctl = TUPLE5(&ctl_heap[0],
@@ -984,7 +1083,7 @@ erts_dsig_send_exit_tt(ErtsDSigData *dsdp, Eterm local, Eterm remote,
"{%T,%s}", remote, node_name);
erts_snprintf(reason_str, sizeof(DTRACE_CHARBUF_NAME(reason_str)),
"%T", reason);
- if (token != NIL && token != am_have_dt_utag) {
+ if (have_seqtrace(token)) {
tok_label = signed_val(SEQ_TRACE_T_LABEL(token));
tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token));
tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token));
@@ -994,7 +1093,7 @@ erts_dsig_send_exit_tt(ErtsDSigData *dsdp, Eterm local, Eterm remote,
DTRACE7(process_exit_signal_remote, sender_name, node_name,
remote_name, reason_str, tok_label, tok_lastcnt, tok_serial);
/* forced, i.e ignore busy */
- res = dsig_send(dsdp, ctl, THE_NON_VALUE, 1);
+ res = dsig_send_ctl(dsdp, ctl, 1);
UnUseTmpHeapNoproc(6);
return res;
}
@@ -1010,7 +1109,7 @@ erts_dsig_send_exit(ErtsDSigData *dsdp, Eterm local, Eterm remote, Eterm reason)
ctl = TUPLE4(&ctl_heap[0],
make_small(DOP_EXIT), local, remote, reason);
/* forced, i.e ignore busy */
- res = dsig_send(dsdp, ctl, THE_NON_VALUE, 1);
+ res = dsig_send_ctl(dsdp, ctl, 1);
UnUseTmpHeapNoproc(5);
return res;
}
@@ -1026,7 +1125,7 @@ erts_dsig_send_exit2(ErtsDSigData *dsdp, Eterm local, Eterm remote, Eterm reason
ctl = TUPLE4(&ctl_heap[0],
make_small(DOP_EXIT2), local, remote, reason);
- res = dsig_send(dsdp, ctl, THE_NON_VALUE, 0);
+ res = dsig_send_ctl(dsdp, ctl, 0);
UnUseTmpHeapNoproc(5);
return res;
}
@@ -1043,7 +1142,7 @@ erts_dsig_send_group_leader(ErtsDSigData *dsdp, Eterm leader, Eterm remote)
ctl = TUPLE3(&ctl_heap[0],
make_small(DOP_GROUP_LEADER), leader, remote);
- res = dsig_send(dsdp, ctl, THE_NON_VALUE, 0);
+ res = dsig_send_ctl(dsdp, ctl, 0);
UnUseTmpHeapNoproc(4);
return res;
}
@@ -1091,7 +1190,6 @@ int erts_net_message(Port *prt,
byte *buf,
ErlDrvSizeT len)
{
-#define DIST_CTL_DEFAULT_SIZE 64
ErtsDistExternal ede;
byte *t;
Sint ctl_len;
@@ -1104,7 +1202,7 @@ int erts_net_message(Port *prt,
Process* rp;
DeclareTmpHeapNoproc(ctl_default,DIST_CTL_DEFAULT_SIZE);
Eterm* ctl = ctl_default;
- ErlOffHeap off_heap;
+ ErtsHeapFactory factory;
Eterm* hp;
Sint type;
Eterm token;
@@ -1113,25 +1211,25 @@ int erts_net_message(Port *prt,
ErtsLink *lnk;
Uint tuple_arity;
int res;
+ Uint32 connection_id;
#ifdef ERTS_DIST_MSG_DBG
ErlDrvSizeT orig_len = len;
#endif
UseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
- /* Thanks to Luke Gorrie */
- off_heap.first = NULL;
- off_heap.overhead = 0;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(!prt || erts_lc_is_port_locked(prt));
if (!erts_is_alive) {
UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
return 0;
}
- if (hlen != 0)
- goto data_error;
+
+
+ ASSERT(hlen == 0);
+
if (len == 0) { /* HANDLE TICK !!! */
UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
return 0;
@@ -1150,30 +1248,31 @@ int erts_net_message(Port *prt,
len--;
}
- if (len == 0) {
- PURIFY_MSG("data error");
- goto data_error;
- }
-
- res = erts_prepare_dist_ext(&ede, t, len, dep, dep->cache);
+ res = erts_prepare_dist_ext(&ede, t, len, dep, dep->cache, &connection_id);
- if (res >= 0)
- res = ctl_len = erts_decode_dist_ext_size(&ede);
- else {
+ switch (res) {
+ case ERTS_PREP_DIST_EXT_CLOSED:
+ return 0; /* Connection not alive; ignore signal... */
+ case ERTS_PREP_DIST_EXT_FAILED:
#ifdef ERTS_DIST_MSG_DBG
erts_fprintf(stderr, "DIST MSG DEBUG: erts_prepare_dist_ext() failed:\n");
bw(buf, orig_len);
#endif
- ctl_len = 0;
- }
-
- if (res < 0) {
+ goto data_error;
+ case ERTS_PREP_DIST_EXT_SUCCESS:
+ ctl_len = erts_decode_dist_ext_size(&ede);
+ if (ctl_len < 0) {
#ifdef ERTS_DIST_MSG_DBG
- erts_fprintf(stderr, "DIST MSG DEBUG: erts_decode_dist_ext_size(CTL) failed:\n");
- bw(buf, orig_len);
+ erts_fprintf(stderr, "DIST MSG DEBUG: erts_decode_dist_ext_size(CTL) failed:\n");
+ bw(buf, orig_len);
#endif
- PURIFY_MSG("data error");
- goto data_error;
+ PURIFY_MSG("data error");
+ goto data_error;
+ }
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected result from erts_prepare_dist_ext()");
+ break;
}
if (ctl_len > DIST_CTL_DEFAULT_SIZE) {
@@ -1181,14 +1280,15 @@ int erts_net_message(Port *prt,
}
hp = ctl;
- arg = erts_decode_dist_ext(&hp, &off_heap, &ede);
+ erts_factory_tmp_init(&factory, ctl, ctl_len, ERTS_ALC_T_DCTRL_BUF);
+ arg = erts_decode_dist_ext(&factory, &ede);
if (is_non_value(arg)) {
#ifdef ERTS_DIST_MSG_DBG
- erts_fprintf(stderr, "DIST MSG DEBUG: erts_dist_ext_size(CTL) failed:\n");
+ erts_fprintf(stderr, "DIST MSG DEBUG: erts_decode_dist_ext(CTL) failed:\n");
bw(buf, orig_len);
#endif
PURIFY_MSG("data error");
- goto data_error;
+ goto decode_error;
}
ctl_len = t - buf;
@@ -1203,6 +1303,7 @@ int erts_net_message(Port *prt,
}
token_size = 0;
+ token = NIL;
switch (type = unsigned_val(tuple[1])) {
case DOP_LINK:
@@ -1231,23 +1332,23 @@ int erts_net_message(Port *prt,
break;
}
- erts_smp_de_links_lock(dep);
+ erts_de_links_lock(dep);
res = erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, from);
if (res < 0) {
/* It was already there! Lets skip the rest... */
- erts_smp_de_links_unlock(dep);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ erts_de_links_unlock(dep);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
break;
}
lnk = erts_add_or_lookup_link(&(dep->nlinks), LINK_PID, rp->common.id);
erts_add_link(&(ERTS_LINK_ROOT(lnk)), LINK_PID, from);
- erts_smp_de_links_unlock(dep);
+ erts_de_links_unlock(dep);
if (IS_TRACED_FL(rp, F_TRACE_PROCS))
- trace_proc(NULL, rp, am_getting_linked, from);
+ trace_proc(NULL, 0, rp, am_getting_linked, from);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
break;
case DOP_UNLINK: {
@@ -1270,10 +1371,10 @@ int erts_net_message(Port *prt,
lnk = erts_remove_link(&ERTS_P_LINKS(rp), from);
if (IS_TRACED_FL(rp, F_TRACE_PROCS) && lnk != NULL) {
- trace_proc(NULL, rp, am_getting_unlinked, from);
+ trace_proc(NULL, 0, rp, am_getting_unlinked, from);
}
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
erts_remove_dist_link(&dld, to, from, dep);
erts_destroy_dist_link(&dld);
@@ -1325,11 +1426,11 @@ int erts_net_message(Port *prt,
else {
if (is_atom(watched))
watched = rp->common.id;
- erts_smp_de_links_lock(dep);
+ erts_de_links_lock(dep);
erts_add_monitor(&(dep->monitors), MON_ORIGIN, ref, watched, name);
erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, ref, watcher, name);
- erts_smp_de_links_unlock(dep);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ erts_de_links_unlock(dep);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
}
break;
@@ -1351,14 +1452,14 @@ int erts_net_message(Port *prt,
goto invalid_message;
}
- erts_smp_de_links_lock(dep);
+ erts_de_links_lock(dep);
mon = erts_remove_monitor(&(dep->monitors),ref);
- erts_smp_de_links_unlock(dep);
+ erts_de_links_unlock(dep);
/* ASSERT(mon != NULL); can happen in case of broken dist message */
if (mon == NULL) {
break;
}
- watched = mon->pid;
+ watched = mon->u.pid;
erts_destroy_monitor(mon);
rp = erts_pid2proc_opt(NULL, 0,
watched, ERTS_PROC_LOCK_LINK,
@@ -1367,7 +1468,7 @@ int erts_net_message(Port *prt,
break;
}
mon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
ASSERT(mon != NULL);
if (mon == NULL) {
break;
@@ -1419,67 +1520,81 @@ int erts_net_message(Port *prt,
ErlOffHeap *ohp;
ASSERT(xsize);
heap_frag = erts_dist_ext_trailer(ede_copy);
- ERTS_INIT_HEAP_FRAG(heap_frag, token_size);
+ ERTS_INIT_HEAP_FRAG(heap_frag, token_size, token_size);
hp = heap_frag->mem;
ohp = &heap_frag->off_heap;
token = tuple[5];
token = copy_struct(token, token_size, &hp, ohp);
}
- erts_queue_dist_message(rp, &locks, ede_copy, token);
+ erts_queue_dist_message(rp, locks, ede_copy, token, from);
if (locks)
- erts_smp_proc_unlock(rp, locks);
+ erts_proc_unlock(rp, locks);
}
break;
+ case DOP_SEND_SENDER_TT: {
+ Uint xsize;
case DOP_SEND_TT:
+
if (tuple_arity != 4) {
goto invalid_message;
}
-
- token_size = size_object(tuple[4]);
- /* Fall through ... */
+
+ token = tuple[4];
+ token_size = size_object(token);
+ xsize = ERTS_HEAP_FRAG_SIZE(token_size);
+ goto send_common;
+
+ case DOP_SEND_SENDER:
case DOP_SEND:
+
+ token = NIL;
+ xsize = 0;
+ if (tuple_arity != 3)
+ goto invalid_message;
+
+ send_common:
+
/*
- * There is intentionally no testing of the cookie (it is always '')
- * from R9B and onwards.
+ * If DOP_SEND_SENDER or DOP_SEND_SENDER_TT element 2 contains
+ * the sender pid (i.e. DFLAG_SEND_SENDER is set); otherwise,
+ * the atom '' (empty cookie).
*/
+ ASSERT((type == DOP_SEND_SENDER || type == DOP_SEND_SENDER_TT)
+ ? (is_pid(tuple[2]) && (dep->flags & DFLAG_SEND_SENDER))
+ : tuple[2] == am_Empty);
+
#ifdef ERTS_DIST_MSG_DBG
dist_msg_dbg(&ede, "MSG", buf, orig_len);
#endif
- if (type != DOP_SEND_TT && tuple_arity != 3) {
- goto invalid_message;
- }
to = tuple[3];
if (is_not_pid(to)) {
goto invalid_message;
}
rp = erts_proc_lookup(to);
if (rp) {
- Uint xsize = type == DOP_SEND ? 0 : ERTS_HEAP_FRAG_SIZE(token_size);
ErtsProcLocks locks = 0;
ErtsDistExternal *ede_copy;
ede_copy = erts_make_dist_ext_copy(&ede, xsize);
- if (type == DOP_SEND) {
- token = NIL;
- } else {
+ if (is_not_nil(token)) {
ErlHeapFragment *heap_frag;
ErlOffHeap *ohp;
ASSERT(xsize);
heap_frag = erts_dist_ext_trailer(ede_copy);
- ERTS_INIT_HEAP_FRAG(heap_frag, token_size);
+ ERTS_INIT_HEAP_FRAG(heap_frag, token_size, token_size);
hp = heap_frag->mem;
ohp = &heap_frag->off_heap;
- token = tuple[4];
token = copy_struct(token, token_size, &hp, ohp);
}
- erts_queue_dist_message(rp, &locks, ede_copy, token);
+ erts_queue_dist_message(rp, locks, ede_copy, token, am_Empty);
if (locks)
- erts_smp_proc_unlock(rp, locks);
+ erts_proc_unlock(rp, locks);
}
break;
+ }
case DOP_MONITOR_P_EXIT: {
/* We are monitoring a process on the remote node which dies, we get
@@ -1503,7 +1618,7 @@ int erts_net_message(Port *prt,
goto invalid_message;
}
- erts_smp_de_links_lock(dep);
+ erts_de_links_lock(dep);
sysname = dep->sysname;
mon = erts_remove_monitor(&(dep->monitors), ref);
/*
@@ -1512,11 +1627,11 @@ int erts_net_message(Port *prt,
* removed info about monitor. In this case, do nothing
* and everything will be as it should.
*/
- erts_smp_de_links_unlock(dep);
+ erts_de_links_unlock(dep);
if (mon == NULL) {
break;
}
- rp = erts_pid2proc(NULL, 0, mon->pid, rp_locks);
+ rp = erts_pid2proc(NULL, 0, mon->u.pid, rp_locks);
erts_destroy_monitor(mon);
if (rp == NULL) {
@@ -1526,18 +1641,18 @@ int erts_net_message(Port *prt,
mon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref);
if (mon == NULL) {
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
break;
}
UseTmpHeapNoproc(3);
watched = (is_not_nil(mon->name)
? TUPLE2(&lhp[0], mon->name, sysname)
- : mon->pid);
+ : mon->u.pid);
erts_queue_monitor_message(rp, &rp_locks,
ref, am_process, watched, reason);
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
erts_destroy_monitor(mon);
UnUseTmpHeapNoproc(3);
break;
@@ -1598,10 +1713,14 @@ int erts_net_message(Port *prt,
ERTS_XSIG_FLG_IGN_KILL);
if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
/* We didn't exit the process and it is traced */
- trace_proc(NULL, rp, am_getting_unlinked, from);
+ if (rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) {
+ erts_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND);
+ rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND;
+ }
+ trace_proc(NULL, 0, rp, am_getting_unlinked, from);
}
}
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
}
erts_remove_dist_link(&dld, to, from, dep);
if (lnk)
@@ -1643,7 +1762,7 @@ int erts_net_message(Port *prt,
token,
NULL,
0);
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
}
break;
}
@@ -1661,19 +1780,19 @@ int erts_net_message(Port *prt,
if (!rp)
break;
rp->group_leader = STORE_NC_IN_PROC(rp, from);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
break;
default:
goto invalid_message;
}
- erts_cleanup_offheap(&off_heap);
+ erts_factory_close(&factory);
if (ctl != ctl_default) {
erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl);
}
UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
return 0;
invalid_message:
{
@@ -1681,220 +1800,315 @@ int erts_net_message(Port *prt,
erts_dsprintf(dsbufp, "Invalid distribution message: %.200T", arg);
erts_send_error_to_logger_nogl(dsbufp);
}
- data_error:
+decode_error:
PURIFY_MSG("data error");
- erts_cleanup_offheap(&off_heap);
+ erts_factory_close(&factory);
if (ctl != ctl_default) {
erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl);
}
+data_error:
UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
- erts_deliver_port_exit(prt, dep->cid, am_killed, 0);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ erts_kill_dist_connection(dep, connection_id);
+ ERTS_CHK_NO_PROC_LOCKS;
return -1;
}
-static int
-dsig_send(ErtsDSigData *dsdp, Eterm ctl, Eterm msg, int force_busy)
+static int dsig_send_ctl(ErtsDSigData* dsdp, Eterm ctl, int force_busy)
{
- Eterm cid;
- int suspended = 0;
- int resume = 0;
- Uint32 pass_through_size;
- Uint data_size, dhdr_ext_size;
- ErtsAtomCacheMap *acmp;
- ErtsDistOutputBuf *obuf;
- DistEntry *dep = dsdp->dep;
- Uint32 flags = dep->flags;
- Process *c_p = dsdp->proc;
-
- if (!c_p || dsdp->no_suspend)
- force_busy = 1;
+ struct erts_dsig_send_context ctx;
+ int ret;
+ ctx.ctl = ctl;
+ ctx.msg = THE_NON_VALUE;
+ ctx.force_busy = force_busy;
+ ctx.phase = ERTS_DSIG_SEND_PHASE_INIT;
+#ifdef DEBUG
+ ctx.reds = 1; /* provoke assert below (no reduction count without msg) */
+#endif
+ ret = erts_dsig_send(dsdp, &ctx);
+ ASSERT(ret != ERTS_DSIG_SEND_CONTINUE);
+ return ret;
+}
- ERTS_SMP_LC_ASSERT(!c_p
- || (ERTS_PROC_LOCK_MAIN
- == erts_proc_lc_my_proc_locks(c_p)));
+static ERTS_INLINE void
+notify_dist_data(Process *c_p, Eterm pid)
+{
+ Process *rp;
+ ErtsProcLocks rp_locks;
- if (!erts_is_alive)
- return ERTS_DSIG_SEND_OK;
+ ASSERT(erts_get_scheduler_data()
+ && !ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()));
+ ASSERT(is_internal_pid(pid));
- if (flags & DFLAG_DIST_HDR_ATOM_CACHE) {
- acmp = erts_get_atom_cache_map(c_p);
- pass_through_size = 0;
+ if (c_p && c_p->common.id == pid) {
+ rp = c_p;
+ rp_locks = ERTS_PROC_LOCK_MAIN;
}
else {
- acmp = NULL;
- pass_through_size = 1;
+ rp = erts_proc_lookup(pid);
+ rp_locks = 0;
}
-#ifdef ERTS_DIST_MSG_DBG
- erts_fprintf(stderr, ">>%s CTL: %T\n", pass_through_size ? "P" : " ", ctl);
- if (is_value(msg))
- erts_fprintf(stderr, " MSG: %T\n", msg);
-#endif
+ if (rp) {
+ ErtsMessage *mp = erts_alloc_message(0, NULL);
+ erts_queue_message(rp, rp_locks, mp, am_dist_data, am_system);
+ }
+}
- data_size = pass_through_size;
- erts_reset_atom_cache_map(acmp);
- data_size += erts_encode_dist_ext_size(ctl, flags, acmp);
- if (is_value(msg))
- data_size += erts_encode_dist_ext_size(msg, flags, acmp);
- erts_finalize_atom_cache_map(acmp, flags);
+int
+erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx)
+{
+ int retval;
+ Sint initial_reds = ctx->reds;
+ Eterm cid;
- dhdr_ext_size = erts_encode_ext_dist_header_size(acmp);
- data_size += dhdr_ext_size;
+ while (1) {
+ switch (ctx->phase) {
+ case ERTS_DSIG_SEND_PHASE_INIT:
+ ctx->flags = dsdp->dep->flags;
+ ctx->c_p = dsdp->proc;
- obuf = alloc_dist_obuf(data_size);
- obuf->ext_endp = &obuf->data[0] + pass_through_size + dhdr_ext_size;
+ if (!ctx->c_p || dsdp->no_suspend)
+ ctx->force_busy = 1;
- /* Encode internal version of dist header */
- obuf->extp = erts_encode_ext_dist_header_setup(obuf->ext_endp, acmp);
- /* Encode control message */
- erts_encode_dist_ext(ctl, &obuf->ext_endp, flags, acmp);
- if (is_value(msg)) {
- /* Encode message */
- erts_encode_dist_ext(msg, &obuf->ext_endp, flags, acmp);
- }
+ ERTS_LC_ASSERT(!ctx->c_p
+ || (ERTS_PROC_LOCK_MAIN
+ == erts_proc_lc_my_proc_locks(ctx->c_p)));
- ASSERT(obuf->extp < obuf->ext_endp);
- ASSERT(&obuf->data[0] <= obuf->extp - pass_through_size);
- ASSERT(obuf->ext_endp <= &obuf->data[0] + data_size);
+ if (!erts_is_alive)
+ return ERTS_DSIG_SEND_OK;
- data_size = obuf->ext_endp - obuf->extp;
+ if (ctx->flags & DFLAG_DIST_HDR_ATOM_CACHE) {
+ ctx->acmp = erts_get_atom_cache_map(ctx->c_p);
+ ctx->pass_through_size = 0;
+ }
+ else {
+ ctx->acmp = NULL;
+ ctx->pass_through_size = 1;
+ }
- /*
- * Signal encoded; now verify that the connection still exists,
- * and if so enqueue the signal and schedule it for send.
- */
- obuf->next = NULL;
- erts_smp_de_rlock(dep);
- cid = dep->cid;
- if (cid != dsdp->cid
- || dep->connection_id != dsdp->connection_id
- || dep->status & ERTS_DE_SFLG_EXITING) {
- /* Not the same connection as when we started; drop message... */
- erts_smp_de_runlock(dep);
- free_dist_obuf(obuf);
- }
- else {
- ErtsProcList *plp = NULL;
- erts_smp_mtx_lock(&dep->qlock);
- dep->qsize += size_obuf(obuf);
- if (dep->qsize >= erts_dist_buf_busy_limit)
- dep->qflgs |= ERTS_DE_QFLG_BUSY;
- if (!force_busy && (dep->qflgs & ERTS_DE_QFLG_BUSY)) {
- erts_smp_mtx_unlock(&dep->qlock);
-
- plp = erts_proclist_create(c_p);
- erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
- suspended = 1;
- erts_smp_mtx_lock(&dep->qlock);
- }
-
- /* Enqueue obuf on dist entry */
- if (dep->out_queue.last)
- dep->out_queue.last->next = obuf;
- else
- dep->out_queue.first = obuf;
- dep->out_queue.last = obuf;
+ #ifdef ERTS_DIST_MSG_DBG
+ erts_fprintf(stderr, ">>%s CTL: %T\n", ctx->pass_through_size ? "P" : " ", ctx->ctl);
+ if (is_value(ctx->msg))
+ erts_fprintf(stderr, " MSG: %T\n", ctx->msg);
+ #endif
+
+ ctx->data_size = ctx->pass_through_size;
+ erts_reset_atom_cache_map(ctx->acmp);
+ erts_encode_dist_ext_size(ctx->ctl, ctx->flags, ctx->acmp, &ctx->data_size);
+
+ if (is_value(ctx->msg)) {
+ ctx->u.sc.wstack.wstart = NULL;
+ ctx->u.sc.flags = ctx->flags;
+ ctx->u.sc.level = 0;
+ ctx->phase = ERTS_DSIG_SEND_PHASE_MSG_SIZE;
+ } else {
+ ctx->phase = ERTS_DSIG_SEND_PHASE_ALLOC;
+ }
+ break;
- if (!force_busy) {
- if (!(dep->qflgs & ERTS_DE_QFLG_BUSY)) {
- if (suspended)
- resume = 1; /* was busy when we started, but isn't now */
-#ifdef USE_VM_PROBES
- if (resume && DTRACE_ENABLED(dist_port_not_busy)) {
- DTRACE_CHARBUF(port_str, 64);
- DTRACE_CHARBUF(remote_str, 64);
-
- erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
- "%T", cid);
- erts_snprintf(remote_str, sizeof(DTRACE_CHARBUF_NAME(remote_str)),
- "%T", dep->sysname);
- DTRACE3(dist_port_not_busy, erts_this_node_sysname,
- port_str, remote_str);
- }
-#endif
+ case ERTS_DSIG_SEND_PHASE_MSG_SIZE:
+ if (erts_encode_dist_ext_size_int(ctx->msg, ctx, &ctx->data_size)) {
+ retval = ERTS_DSIG_SEND_CONTINUE;
+ goto done;
}
- else {
- /* Enqueue suspended process on dist entry */
- ASSERT(plp);
- erts_proclist_store_last(&dep->suspended, plp);
+
+ ctx->phase = ERTS_DSIG_SEND_PHASE_ALLOC;
+ case ERTS_DSIG_SEND_PHASE_ALLOC:
+ erts_finalize_atom_cache_map(ctx->acmp, ctx->flags);
+
+ ctx->dhdr_ext_size = erts_encode_ext_dist_header_size(ctx->acmp);
+ ctx->data_size += ctx->dhdr_ext_size;
+
+ ctx->obuf = alloc_dist_obuf(ctx->data_size);
+ ctx->obuf->ext_endp = &ctx->obuf->data[0] + ctx->pass_through_size + ctx->dhdr_ext_size;
+
+ /* Encode internal version of dist header */
+ ctx->obuf->extp = erts_encode_ext_dist_header_setup(ctx->obuf->ext_endp, ctx->acmp);
+ /* Encode control message */
+ erts_encode_dist_ext(ctx->ctl, &ctx->obuf->ext_endp, ctx->flags, ctx->acmp, NULL, NULL);
+ if (is_value(ctx->msg)) {
+ ctx->u.ec.flags = ctx->flags;
+ ctx->u.ec.level = 0;
+ ctx->u.ec.wstack.wstart = NULL;
+ ctx->phase = ERTS_DSIG_SEND_PHASE_MSG_ENCODE;
+ } else {
+ ctx->phase = ERTS_DSIG_SEND_PHASE_FIN;
}
- }
+ break;
+
+ case ERTS_DSIG_SEND_PHASE_MSG_ENCODE:
+ if (erts_encode_dist_ext(ctx->msg, &ctx->obuf->ext_endp, ctx->flags, ctx->acmp, &ctx->u.ec, &ctx->reds)) {
+ retval = ERTS_DSIG_SEND_CONTINUE;
+ goto done;
+ }
+
+ ctx->phase = ERTS_DSIG_SEND_PHASE_FIN;
+ case ERTS_DSIG_SEND_PHASE_FIN: {
+ DistEntry *dep = dsdp->dep;
+ int suspended = 0;
+ int resume = 0;
+
+ ASSERT(ctx->obuf->extp < ctx->obuf->ext_endp);
+ ASSERT(&ctx->obuf->data[0] <= ctx->obuf->extp - ctx->pass_through_size);
+ ASSERT(ctx->obuf->ext_endp <= &ctx->obuf->data[0] + ctx->data_size);
+
+ ctx->data_size = ctx->obuf->ext_endp - ctx->obuf->extp;
- erts_smp_mtx_unlock(&dep->qlock);
- erts_schedule_dist_command(NULL, dep);
- erts_smp_de_runlock(dep);
-
- if (resume) {
- erts_resume(c_p, ERTS_PROC_LOCK_MAIN);
- erts_proclist_destroy(plp);
/*
- * Note that the calling process still have to yield as if it
- * suspended. If not, the calling process could later be
- * erroneously scheduled when it shouldn't be.
+ * Signal encoded; now verify that the connection still exists,
+ * and if so enqueue the signal and schedule it for send.
*/
- }
- }
+ ctx->obuf->next = NULL;
+ erts_de_rlock(dep);
+ cid = dep->cid;
+ if (cid != dsdp->cid
+ || dep->connection_id != dsdp->connection_id
+ || dep->status & ERTS_DE_SFLG_EXITING) {
+ /* Not the same connection as when we started; drop message... */
+ erts_de_runlock(dep);
+ free_dist_obuf(ctx->obuf);
+ }
+ else {
+ Sint qsize;
+ erts_aint32_t qflgs;
+ ErtsProcList *plp = NULL;
+ Eterm notify_proc = NIL;
+ Sint obsz = size_obuf(ctx->obuf);
+
+ erts_mtx_lock(&dep->qlock);
+ qsize = erts_atomic_add_read_nob(&dep->qsize, (erts_aint_t) obsz);
+ ASSERT(qsize >= obsz);
+ qflgs = erts_atomic32_read_nob(&dep->qflgs);
+ if (!(qflgs & ERTS_DE_QFLG_BUSY) && qsize >= erts_dist_buf_busy_limit) {
+ erts_atomic32_read_bor_relb(&dep->qflgs, ERTS_DE_QFLG_BUSY);
+ qflgs |= ERTS_DE_QFLG_BUSY;
+ }
+ if (qsize == obsz && (qflgs & ERTS_DE_QFLG_REQ_INFO)) {
+ /* Previously empty queue and info requested... */
+ qflgs = erts_atomic32_read_band_mb(&dep->qflgs,
+ ~ERTS_DE_QFLG_REQ_INFO);
+ if (qflgs & ERTS_DE_QFLG_REQ_INFO) {
+ notify_proc = dep->cid;
+ ASSERT(is_internal_pid(notify_proc));
+ }
+ /* else: requester will send itself the message... */
+ qflgs &= ~ERTS_DE_QFLG_REQ_INFO;
+ }
+ if (!ctx->force_busy && (qflgs & ERTS_DE_QFLG_BUSY)) {
+ erts_mtx_unlock(&dep->qlock);
- if (c_p) {
- int reds;
- /*
- * Bump reductions on calling process.
- *
- * This is the reduction cost: Always a base cost of 8 reductions
- * plus 16 reductions per kilobyte generated external data.
- */
+ plp = erts_proclist_create(ctx->c_p);
+ erts_suspend(ctx->c_p, ERTS_PROC_LOCK_MAIN, NULL);
+ suspended = 1;
+ erts_mtx_lock(&dep->qlock);
+ }
- data_size >>= (10-4);
-#if defined(ARCH_64) && !HALFWORD_HEAP
- data_size &= 0x003fffffffffffff;
-#elif defined(ARCH_32) || HALFWORD_HEAP
- data_size &= 0x003fffff;
-#else
-# error "Ohh come on ... !?!"
-#endif
- reds = 8 + ((int) data_size > 1000000 ? 1000000 : (int) data_size);
- BUMP_REDS(c_p, reds);
+ /* Enqueue obuf on dist entry */
+ if (dep->out_queue.last)
+ dep->out_queue.last->next = ctx->obuf;
+ else
+ dep->out_queue.first = ctx->obuf;
+ dep->out_queue.last = ctx->obuf;
+
+ if (!ctx->force_busy) {
+ qflgs = erts_atomic32_read_nob(&dep->qflgs);
+ if (!(qflgs & ERTS_DE_QFLG_BUSY)) {
+ if (suspended)
+ resume = 1; /* was busy when we started, but isn't now */
+ #ifdef USE_VM_PROBES
+ if (resume && DTRACE_ENABLED(dist_port_not_busy)) {
+ DTRACE_CHARBUF(port_str, 64);
+ DTRACE_CHARBUF(remote_str, 64);
+
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
+ "%T", cid);
+ erts_snprintf(remote_str, sizeof(DTRACE_CHARBUF_NAME(remote_str)),
+ "%T", dep->sysname);
+ DTRACE3(dist_port_not_busy, erts_this_node_sysname,
+ port_str, remote_str);
+ }
+ #endif
+ }
+ else {
+ /* Enqueue suspended process on dist entry */
+ ASSERT(plp);
+ erts_proclist_store_last(&dep->suspended, plp);
+ }
+ }
+
+ erts_mtx_unlock(&dep->qlock);
+ if (is_internal_port(dep->cid))
+ erts_schedule_dist_command(NULL, dep);
+ erts_de_runlock(dep);
+ if (is_internal_pid(notify_proc))
+ notify_dist_data(ctx->c_p, notify_proc);
+
+ if (resume) {
+ erts_resume(ctx->c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proclist_destroy(plp);
+ /*
+ * Note that the calling process still have to yield as if it
+ * suspended. If not, the calling process could later be
+ * erroneously scheduled when it shouldn't be.
+ */
+ }
+ }
+ ctx->obuf = NULL;
+
+ if (suspended) {
+ #ifdef USE_VM_PROBES
+ if (!resume && DTRACE_ENABLED(dist_port_busy)) {
+ DTRACE_CHARBUF(port_str, 64);
+ DTRACE_CHARBUF(remote_str, 64);
+ DTRACE_CHARBUF(pid_str, 16);
+
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)), "%T", cid);
+ erts_snprintf(remote_str, sizeof(DTRACE_CHARBUF_NAME(remote_str)),
+ "%T", dep->sysname);
+ erts_snprintf(pid_str, sizeof(DTRACE_CHARBUF_NAME(pid_str)),
+ "%T", ctx->c_p->common.id);
+ DTRACE4(dist_port_busy, erts_this_node_sysname,
+ port_str, remote_str, pid_str);
+ }
+ #endif
+ if (!resume && erts_system_monitor_flags.busy_dist_port)
+ monitor_generic(ctx->c_p, am_busy_dist_port, cid);
+ retval = ERTS_DSIG_SEND_YIELD;
+ } else {
+ retval = ERTS_DSIG_SEND_OK;
+ }
+ goto done;
+ }
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "dsig_send invalid phase (%d)\n", (int)ctx->phase);
+ }
}
- if (suspended) {
-#ifdef USE_VM_PROBES
- if (!resume && DTRACE_ENABLED(dist_port_busy)) {
- DTRACE_CHARBUF(port_str, 64);
- DTRACE_CHARBUF(remote_str, 64);
- DTRACE_CHARBUF(pid_str, 16);
-
- erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)), "%T", cid);
- erts_snprintf(remote_str, sizeof(DTRACE_CHARBUF_NAME(remote_str)),
- "%T", dep->sysname);
- erts_snprintf(pid_str, sizeof(DTRACE_CHARBUF_NAME(pid_str)),
- "%T", c_p->common.id);
- DTRACE4(dist_port_busy, erts_this_node_sysname,
- port_str, remote_str, pid_str);
- }
-#endif
- if (!resume && erts_system_monitor_flags.busy_dist_port)
- monitor_generic(c_p, am_busy_dist_port, cid);
- return ERTS_DSIG_SEND_YIELD;
+done:
+ if (ctx->msg && ctx->c_p) {
+ BUMP_REDS(ctx->c_p, (initial_reds - ctx->reds) / TERM_TO_BINARY_LOOP_FACTOR);
}
- return ERTS_DSIG_SEND_OK;
+ return retval;
}
-
static Uint
dist_port_command(Port *prt, ErtsDistOutputBuf *obuf)
{
int fpe_was_unmasked;
- Uint size = obuf->ext_endp - obuf->extp;
+ ErlDrvSizeT size;
+ char *bufp;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_CHK_NO_PROC_LOCKS;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
- if (size > (Uint) INT_MAX)
- erl_exit(ERTS_ABORT_EXIT,
- "Absurdly large distribution output data buffer "
- "(%beu bytes) passed.\n",
- size);
+ if (!obuf) {
+ size = 0;
+ bufp = NULL;
+ }
+ else {
+ size = obuf->ext_endp - obuf->extp;
+ bufp = (char*) obuf->extp;
+ }
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(dist_output)) {
@@ -1909,11 +2123,10 @@ dist_port_command(Port *prt, ErtsDistOutputBuf *obuf)
remote_str, size);
}
#endif
+
prt->caller = NIL;
fpe_was_unmasked = erts_block_fpe();
- (*prt->drv_ptr->output)((ErlDrvData) prt->drv_data,
- (char*) obuf->extp,
- (int) size);
+ (*prt->drv_ptr->output)((ErlDrvData) prt->drv_data, bufp, size);
erts_unblock_fpe(fpe_was_unmasked);
return size;
}
@@ -1922,33 +2135,41 @@ static Uint
dist_port_commandv(Port *prt, ErtsDistOutputBuf *obuf)
{
int fpe_was_unmasked;
- Uint size = obuf->ext_endp - obuf->extp;
+ ErlDrvSizeT size;
SysIOVec iov[2];
ErlDrvBinary* bv[2];
ErlIOVec eiov;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
-
- if (size > (Uint) INT_MAX)
- erl_exit(ERTS_ABORT_EXIT,
- "Absurdly large distribution output data buffer "
- "(%beu bytes) passed.\n",
- size);
+ ERTS_CHK_NO_PROC_LOCKS;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
iov[0].iov_base = NULL;
iov[0].iov_len = 0;
bv[0] = NULL;
- iov[1].iov_base = obuf->extp;
- iov[1].iov_len = size;
- bv[1] = Binary2ErlDrvBinary(ErtsDistOutputBuf2Binary(obuf));
+ if (!obuf) {
+ size = 0;
+ eiov.vsize = 1;
+ }
+ else {
+ size = obuf->ext_endp - obuf->extp;
+ eiov.vsize = 2;
+
+ iov[1].iov_base = obuf->extp;
+ iov[1].iov_len = size;
+ bv[1] = Binary2ErlDrvBinary(ErtsDistOutputBuf2Binary(obuf));
+ }
- eiov.vsize = 2;
eiov.size = size;
eiov.iov = iov;
eiov.binv = bv;
+ if (size > (Uint) INT_MAX)
+ erts_exit(ERTS_DUMP_EXIT,
+ "Absurdly large distribution output data buffer "
+ "(%beu bytes) passed.\n",
+ size);
+
ASSERT(prt->drv_ptr->outputv);
#ifdef USE_VM_PROBES
@@ -1973,9 +2194,9 @@ dist_port_commandv(Port *prt, ErtsDistOutputBuf *obuf)
}
-#if defined(ARCH_64) && !HALFWORD_HEAP
+#if defined(ARCH_64)
#define ERTS_PORT_REDS_MASK__ 0x003fffffffffffffL
-#elif defined(ARCH_32) || HALFWORD_HEAP
+#elif defined(ARCH_32)
#define ERTS_PORT_REDS_MASK__ 0x003fffff
#else
# error "Ohh come on ... !?!"
@@ -1996,28 +2217,25 @@ erts_dist_command(Port *prt, int reds_limit)
Sint reds = ERTS_PORT_REDS_DIST_CMD_START;
Uint32 status;
Uint32 flags;
- Sint obufsize = 0;
+ Sint qsize, obufsize = 0;
ErtsDistOutputQueue oq, foq;
DistEntry *dep = prt->dist_entry;
Uint (*send)(Port *prt, ErtsDistOutputBuf *obuf);
erts_aint32_t sched_flags;
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
-
- erts_refc_inc(&dep->refc, 1); /* Otherwise dist_entry might be
- removed if port command fails */
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
- erts_smp_atomic_set_mb(&dep->dist_cmd_scheduled, 0);
+ erts_atomic_set_mb(&dep->dist_cmd_scheduled, 0);
- erts_smp_de_rlock(dep);
+ erts_de_rlock(dep);
flags = dep->flags;
status = dep->status;
send = dep->send;
- erts_smp_de_runlock(dep);
+ erts_de_runlock(dep);
if (status & ERTS_DE_SFLG_EXITING) {
- erts_deliver_port_exit(prt, prt->common.id, am_killed, 0);
- erts_deref_dist_entry(dep);
+ erts_deliver_port_exit(prt, prt->common.id, am_killed, 0, 1);
return reds + ERTS_PORT_REDS_DIST_CMD_EXIT;
}
@@ -2031,19 +2249,19 @@ erts_dist_command(Port *prt, int reds_limit)
* a mess.
*/
- erts_smp_mtx_lock(&dep->qlock);
+ erts_mtx_lock(&dep->qlock);
oq.first = dep->out_queue.first;
oq.last = dep->out_queue.last;
dep->out_queue.first = NULL;
dep->out_queue.last = NULL;
- erts_smp_mtx_unlock(&dep->qlock);
+ erts_mtx_unlock(&dep->qlock);
foq.first = dep->finalized_out_queue.first;
foq.last = dep->finalized_out_queue.last;
dep->finalized_out_queue.first = NULL;
dep->finalized_out_queue.last = NULL;
- sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ sched_flags = erts_atomic32_read_nob(&prt->sched.flags);
if (reds > reds_limit)
goto preempted;
@@ -2051,21 +2269,21 @@ erts_dist_command(Port *prt, int reds_limit)
if (!(sched_flags & ERTS_PTS_FLG_BUSY_PORT) && foq.first) {
int preempt = 0;
do {
- Uint size;
- ErtsDistOutputBuf *fob;
-
- size = (*send)(prt, foq.first);
+ Uint size;
+ ErtsDistOutputBuf *fob;
+ size = (*send)(prt, foq.first);
+ erts_atomic64_inc_nob(&dep->out);
+ esdp->io.out += (Uint64) size;
#ifdef ERTS_RAW_DIST_MSG_DBG
- erts_fprintf(stderr, ">> ");
- bw(foq.first->extp, size);
+ erts_fprintf(stderr, ">> ");
+ bw(foq.first->extp, size);
#endif
- reds += ERTS_PORT_REDS_DIST_CMD_DATA(size);
- erts_smp_atomic_add_nob(&erts_bytes_out, size);
- fob = foq.first;
- obufsize += size_obuf(fob);
- foq.first = foq.first->next;
- free_dist_obuf(fob);
- sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ reds += ERTS_PORT_REDS_DIST_CMD_DATA(size);
+ fob = foq.first;
+ obufsize += size_obuf(fob);
+ foq.first = foq.first->next;
+ free_dist_obuf(fob);
+ sched_flags = erts_atomic32_read_nob(&prt->sched.flags);
preempt = reds > reds_limit || (sched_flags & ERTS_PTS_FLG_EXIT);
if (sched_flags & ERTS_PTS_FLG_BUSY_PORT)
break;
@@ -2125,32 +2343,34 @@ erts_dist_command(Port *prt, int reds_limit)
}
}
else {
+ int de_busy;
int preempt = 0;
while (oq.first && !preempt) {
- ErtsDistOutputBuf *fob;
- Uint size;
- oq.first->extp
- = erts_encode_ext_dist_header_finalize(oq.first->extp,
- dep->cache,
- flags);
- reds += ERTS_PORT_REDS_DIST_CMD_FINALIZE;
- if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE))
- *--oq.first->extp = PASS_THROUGH; /* Old node; 'pass through'
- needed */
- ASSERT(&oq.first->data[0] <= oq.first->extp
- && oq.first->extp < oq.first->ext_endp);
- size = (*send)(prt, oq.first);
+ ErtsDistOutputBuf *fob;
+ Uint size;
+ oq.first->extp
+ = erts_encode_ext_dist_header_finalize(oq.first->extp,
+ dep->cache,
+ flags);
+ reds += ERTS_PORT_REDS_DIST_CMD_FINALIZE;
+ if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE))
+ *--oq.first->extp = PASS_THROUGH; /* Old node; 'pass through'
+ needed */
+ ASSERT(&oq.first->data[0] <= oq.first->extp
+ && oq.first->extp < oq.first->ext_endp);
+ size = (*send)(prt, oq.first);
+ erts_atomic64_inc_nob(&dep->out);
+ esdp->io.out += (Uint64) size;
#ifdef ERTS_RAW_DIST_MSG_DBG
- erts_fprintf(stderr, ">> ");
- bw(oq.first->extp, size);
+ erts_fprintf(stderr, ">> ");
+ bw(oq.first->extp, size);
#endif
- reds += ERTS_PORT_REDS_DIST_CMD_DATA(size);
- erts_smp_atomic_add_nob(&erts_bytes_out, size);
- fob = oq.first;
- obufsize += size_obuf(fob);
- oq.first = oq.first->next;
- free_dist_obuf(fob);
- sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ reds += ERTS_PORT_REDS_DIST_CMD_DATA(size);
+ fob = oq.first;
+ obufsize += size_obuf(fob);
+ oq.first = oq.first->next;
+ free_dist_obuf(fob);
+ sched_flags = erts_atomic32_read_nob(&prt->sched.flags);
preempt = reds > reds_limit || (sched_flags & ERTS_PTS_FLG_EXIT);
if ((sched_flags & ERTS_PTS_FLG_BUSY_PORT) && oq.first && !preempt)
goto finalize_only;
@@ -2177,23 +2397,24 @@ erts_dist_command(Port *prt, int reds_limit)
* dist entry in a non-busy state and resume suspended
* processes.
*/
- erts_smp_mtx_lock(&dep->qlock);
- ASSERT(dep->qsize >= obufsize);
- dep->qsize -= obufsize;
+ erts_mtx_lock(&dep->qlock);
+ de_busy = !!(erts_atomic32_read_nob(&dep->qflgs) & ERTS_DE_QFLG_BUSY);
+ qsize = (Sint) erts_atomic_add_read_nob(&dep->qsize,
+ (erts_aint_t) -obufsize);
+ ASSERT(qsize >= 0);
obufsize = 0;
if (!(sched_flags & ERTS_PTS_FLG_BUSY_PORT)
- && (dep->qflgs & ERTS_DE_QFLG_BUSY)
- && dep->qsize < erts_dist_buf_busy_limit) {
+ && de_busy && qsize < erts_dist_buf_busy_limit) {
ErtsProcList *suspendees;
int resumed;
suspendees = get_suspended_on_de(dep, ERTS_DE_QFLG_BUSY);
- erts_smp_mtx_unlock(&dep->qlock);
+ erts_mtx_unlock(&dep->qlock);
resumed = erts_resume_processes(suspendees);
reds += resumed*ERTS_PORT_REDS_DIST_CMD_RESUMED;
}
else
- erts_smp_mtx_unlock(&dep->qlock);
+ erts_mtx_unlock(&dep->qlock);
}
ASSERT(!oq.first && !oq.last);
@@ -2202,10 +2423,15 @@ erts_dist_command(Port *prt, int reds_limit)
if (obufsize != 0) {
ASSERT(obufsize > 0);
- erts_smp_mtx_lock(&dep->qlock);
- ASSERT(dep->qsize >= obufsize);
- dep->qsize -= obufsize;
- erts_smp_mtx_unlock(&dep->qlock);
+ erts_mtx_lock(&dep->qlock);
+#ifdef DEBUG
+ qsize = (Sint) erts_atomic_add_read_nob(&dep->qsize,
+ (erts_aint_t) -obufsize);
+ ASSERT(qsize >= 0);
+#else
+ erts_atomic_add_nob(&dep->qsize, (erts_aint_t) -obufsize);
+#endif
+ erts_mtx_unlock(&dep->qlock);
}
ASSERT(foq.first || !foq.last);
@@ -2222,8 +2448,6 @@ erts_dist_command(Port *prt, int reds_limit)
if (reds > INT_MAX/2)
reds = INT_MAX/2;
- erts_deref_dist_entry(dep);
-
return reds;
preempted:
@@ -2259,9 +2483,9 @@ erts_dist_command(Port *prt, int reds_limit)
foq.last = NULL;
#ifdef DEBUG
- erts_smp_mtx_lock(&dep->qlock);
- ASSERT(dep->qsize == obufsize);
- erts_smp_mtx_unlock(&dep->qlock);
+ erts_mtx_lock(&dep->qlock);
+ ASSERT(erts_atomic_read_nob(&dep->qsize) == obufsize);
+ erts_mtx_unlock(&dep->qlock);
#endif
}
else {
@@ -2270,14 +2494,14 @@ erts_dist_command(Port *prt, int reds_limit)
* Unhandle buffers need to be put back first
* in out_queue.
*/
- erts_smp_mtx_lock(&dep->qlock);
- dep->qsize -= obufsize;
+ erts_mtx_lock(&dep->qlock);
+ erts_atomic_add_nob(&dep->qsize, -obufsize);
obufsize = 0;
oq.last->next = dep->out_queue.first;
dep->out_queue.first = oq.first;
if (!dep->out_queue.last)
dep->out_queue.last = oq.last;
- erts_smp_mtx_unlock(&dep->qlock);
+ erts_mtx_unlock(&dep->qlock);
}
erts_schedule_dist_command(prt, NULL);
@@ -2285,6 +2509,370 @@ erts_dist_command(Port *prt, int reds_limit)
goto done;
}
+#if 0
+
+int
+dist_data_finalize(Process *c_p, int reds_limit)
+{
+ int reds = 5;
+ DistEntry *dep = ;
+ ErtsDistOutputQueue oq, foq;
+ ErtsDistOutputBuf *ob;
+ int preempt;
+
+
+ erts_mtx_lock(&dep->qlock);
+ flags = dep->flags;
+ oq.first = dep->out_queue.first;
+ oq.last = dep->out_queue.last;
+ dep->out_queue.first = NULL;
+ dep->out_queue.last = NULL;
+ erts_mtx_unlock(&dep->qlock);
+
+ if (!oq.first) {
+ ASSERT(!oq.last);
+ oq.first = dep->tmp_out_queue.first;
+ oq.last = dep->tmp_out_queue.last;
+ }
+ else {
+ ErtsDistOutputBuf *f, *l;
+ ASSERT(oq.last);
+ if (dep->tmp_out_queue.last) {
+ dep->tmp_out_queue.last->next = oq.first;
+ oq.first = dep->tmp_out_queue.first;
+ }
+ }
+
+ if (!oq.first) {
+ /* Nothing to do... */
+ ASSERT(!oq.last);
+ return reds;
+ }
+
+ foq.first = dep->finalized_out_queue.first;
+ foq.last = dep->finalized_out_queue.last;
+
+ preempt = 0;
+ ob = oq.first;
+ ASSERT(ob);
+
+ do {
+ ob->extp = erts_encode_ext_dist_header_finalize(ob->extp,
+ dep->cache,
+ flags);
+ if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE))
+ *--ob->extp = PASS_THROUGH; /* Old node; 'pass through'
+ needed */
+ ASSERT(&ob->data[0] <= ob->extp && ob->extp < ob->ext_endp);
+ reds += ERTS_PORT_REDS_DIST_CMD_FINALIZE;
+ preempt = reds > reds_limit;
+ if (preempt)
+ break;
+ ob = ob->next;
+ } while (ob);
+ /*
+ * At least one buffer was finalized; if we got preempted,
+ * ob points to the last buffer that we finalized.
+ */
+ if (foq.last)
+ foq.last->next = oq.first;
+ else
+ foq.first = oq.first;
+ if (!preempt) {
+ /* All buffers finalized */
+ foq.last = oq.last;
+ oq.first = oq.last = NULL;
+ }
+ else {
+ /* Not all buffers finalized; split oq. */
+ foq.last = ob;
+ oq.first = ob->next;
+ if (oq.first)
+ ob->next = NULL;
+ else
+ oq.last = NULL;
+ }
+
+ dep->finalized_out_queue.first = foq.first;
+ dep->finalized_out_queue.last = foq.last;
+ dep->tmp_out_queue.first = oq.first;
+ dep->tmp_out_queue.last = oq.last;
+
+ return reds;
+}
+
+#endif
+
+BIF_RETTYPE
+dist_ctrl_get_data_notification_1(BIF_ALIST_1)
+{
+ DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(BIF_P);
+ erts_aint32_t qflgs;
+ erts_aint_t qsize;
+ Eterm receiver = NIL;
+
+ if (!dep)
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+
+ if (erts_dhandle_to_dist_entry(BIF_ARG_1) != dep)
+ BIF_ERROR(BIF_P, BADARG);
+
+ /*
+ * Caller is the only one that can consume from this queue
+ * and the only one that can set the req-info flag...
+ */
+
+ erts_de_rlock(dep);
+
+ ASSERT(dep->cid == BIF_P->common.id);
+
+ qflgs = erts_atomic32_read_acqb(&dep->qflgs);
+
+ if (!(qflgs & ERTS_DE_QFLG_REQ_INFO)) {
+ qsize = erts_atomic_read_acqb(&dep->qsize);
+ ASSERT(qsize >= 0);
+ if (qsize > 0)
+ receiver = BIF_P->common.id; /* Notify ourselves... */
+ else { /* Empty queue; set req-info flag... */
+ qflgs = erts_atomic32_read_bor_mb(&dep->qflgs,
+ ERTS_DE_QFLG_REQ_INFO);
+ qsize = erts_atomic_read_acqb(&dep->qsize);
+ ASSERT(qsize >= 0);
+ if (qsize > 0) {
+ qflgs = erts_atomic32_read_band_mb(&dep->qflgs,
+ ~ERTS_DE_QFLG_REQ_INFO);
+ if (qflgs & ERTS_DE_QFLG_REQ_INFO)
+ receiver = BIF_P->common.id; /* Notify ourselves... */
+ /* else: someone else will notify us... */
+ }
+ /* else: still empty queue... */
+ }
+ }
+ /* else: Already requested... */
+
+ erts_de_runlock(dep);
+
+ if (is_internal_pid(receiver))
+ notify_dist_data(BIF_P, receiver);
+
+ BIF_RET(am_ok);
+}
+
+BIF_RETTYPE
+dist_ctrl_put_data_2(BIF_ALIST_2)
+{
+ DistEntry *dep;
+ ErlDrvSizeT size;
+ Eterm input_handler;
+
+ if (is_binary(BIF_ARG_2))
+ size = binary_size(BIF_ARG_2);
+ else if (is_nil(BIF_ARG_2))
+ size = 0;
+ else if (is_list(BIF_ARG_2))
+ BIF_TRAP2(dist_ctrl_put_data_trap,
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ else
+ BIF_ERROR(BIF_P, BADARG);
+
+ dep = erts_dhandle_to_dist_entry(BIF_ARG_1);
+ if (!dep)
+ BIF_ERROR(BIF_P, BADARG);
+
+ input_handler = (Eterm) erts_atomic_read_nob(&dep->input_handler);
+
+ if (input_handler != BIF_P->common.id)
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+
+ erts_atomic64_inc_nob(&dep->in);
+
+ if (size != 0) {
+ byte *data, *temp_alloc = NULL;
+
+ data = (byte *) erts_get_aligned_binary_bytes(BIF_ARG_2, &temp_alloc);
+ if (!data)
+ BIF_ERROR(BIF_P, BADARG);
+
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+
+ (void) erts_net_message(NULL, dep, NULL, 0, data, size);
+ /*
+ * We ignore any decode failures. On fatal failures the
+ * connection will be taken down by killing the
+ * distribution channel controller...
+ */
+
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+
+ BUMP_REDS(BIF_P, 5);
+
+ erts_free_aligned_binary_bytes(temp_alloc);
+
+ }
+
+ BIF_RET(am_ok);
+}
+
+BIF_RETTYPE
+dist_get_stat_1(BIF_ALIST_1)
+{
+ Sint64 read, write, pend;
+ Eterm res, *hp, **hpp;
+ Uint sz, *szp;
+ DistEntry *dep = erts_dhandle_to_dist_entry(BIF_ARG_1);
+
+ if (!dep)
+ BIF_ERROR(BIF_P, BADARG);
+
+ erts_de_rlock(dep);
+
+ read = (Sint64) erts_atomic64_read_nob(&dep->in);
+ write = (Sint64) erts_atomic64_read_nob(&dep->out);
+ pend = (Sint64) erts_atomic_read_nob(&dep->qsize);
+
+ erts_de_runlock(dep);
+
+ sz = 0;
+ szp = &sz;
+ hpp = NULL;
+
+ while (1) {
+ res = erts_bld_tuple(hpp, szp, 4,
+ am_ok,
+ erts_bld_sint64(hpp, szp, read),
+ erts_bld_sint64(hpp, szp, write),
+ pend ? am_true : am_false);
+ if (hpp)
+ break;
+ hp = HAlloc(BIF_P, sz);
+ hpp = &hp;
+ szp = NULL;
+ }
+
+ BIF_RET(res);
+}
+
+BIF_RETTYPE
+dist_ctrl_input_handler_2(BIF_ALIST_2)
+{
+ DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(BIF_P);
+
+ if (!dep)
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+
+ if (erts_dhandle_to_dist_entry(BIF_ARG_1) != dep)
+ BIF_ERROR(BIF_P, BADARG);
+
+ if (is_not_internal_pid(BIF_ARG_2))
+ BIF_ERROR(BIF_P, BADARG);
+
+ erts_atomic_set_nob(&dep->input_handler,
+ (erts_aint_t) BIF_ARG_2);
+
+ BIF_RET(am_ok);
+}
+
+BIF_RETTYPE
+dist_ctrl_get_data_1(BIF_ALIST_1)
+{
+ DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(BIF_P);
+ int reds = 1;
+ ErtsDistOutputBuf *obuf;
+ Eterm *hp;
+ ProcBin *pb;
+ erts_aint_t qsize;
+
+ if (!dep)
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+
+ if (erts_dhandle_to_dist_entry(BIF_ARG_1) != dep)
+ BIF_ERROR(BIF_P, BADARG);
+
+ erts_de_rlock(dep);
+
+ if (dep->status & ERTS_DE_SFLG_EXITING)
+ goto return_none;
+
+ ASSERT(dep->cid == BIF_P->common.id);
+
+#if 0
+ if (dep->finalized_out_queue.first) {
+ obuf = dep->finalized_out_queue.first;
+ dep->finalized_out_queue.first = obuf->next;
+ if (!obuf->next)
+ dep->finalized_out_queue.last = NULL;
+ }
+ else
+#endif
+ {
+ if (!dep->tmp_out_queue.first) {
+ ASSERT(!dep->tmp_out_queue.last);
+ qsize = erts_atomic_read_acqb(&dep->qsize);
+ if (qsize > 0) {
+ erts_mtx_lock(&dep->qlock);
+ dep->tmp_out_queue.first = dep->out_queue.first;
+ dep->tmp_out_queue.last = dep->out_queue.last;
+ dep->out_queue.first = NULL;
+ dep->out_queue.last = NULL;
+ erts_mtx_unlock(&dep->qlock);
+ }
+ }
+
+ if (!dep->tmp_out_queue.first) {
+ ASSERT(!dep->tmp_out_queue.last);
+ return_none:
+ erts_de_runlock(dep);
+ BIF_RET(am_none);
+ }
+ else {
+ obuf = dep->tmp_out_queue.first;
+ dep->tmp_out_queue.first = obuf->next;
+ if (!obuf->next)
+ dep->tmp_out_queue.last = NULL;
+ }
+
+ obuf->extp = erts_encode_ext_dist_header_finalize(obuf->extp,
+ dep->cache,
+ dep->flags);
+ reds += ERTS_PORT_REDS_DIST_CMD_FINALIZE;
+ if (!(dep->flags & DFLAG_DIST_HDR_ATOM_CACHE))
+ *--obuf->extp = PASS_THROUGH; /* 'pass through' needed */
+ ASSERT(&obuf->data[0] <= obuf->extp
+ && obuf->extp < obuf->ext_endp);
+ }
+
+ erts_atomic64_inc_nob(&dep->out);
+
+ erts_de_runlock(dep);
+
+ hp = HAlloc(BIF_P, PROC_BIN_SIZE);
+ pb = (ProcBin *) (char *) hp;
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = obuf->ext_endp - obuf->extp;
+ pb->next = MSO(BIF_P).first;
+ MSO(BIF_P).first = (struct erl_off_heap_header*) pb;
+ pb->val = ErtsDistOutputBuf2Binary(obuf);
+ pb->bytes = (byte*) obuf->extp;
+ pb->flags = 0;
+
+ qsize = erts_atomic_add_read_nob(&dep->qsize, -size_obuf(obuf));
+ ASSERT(qsize >= 0);
+
+ if (qsize < erts_dist_buf_busy_limit/2
+ && (erts_atomic32_read_acqb(&dep->qflgs) & ERTS_DE_QFLG_BUSY)) {
+ ErtsProcList *resume_procs = NULL;
+ erts_mtx_lock(&dep->qlock);
+ resume_procs = get_suspended_on_de(dep, ERTS_DE_QFLG_BUSY);
+ erts_mtx_unlock(&dep->qlock);
+ if (resume_procs) {
+ int resumed = erts_resume_processes(resume_procs);
+ reds += resumed*ERTS_PORT_REDS_DIST_CMD_RESUMED;
+ }
+ }
+
+ BIF_RET2(make_binary(pb), reds);
+}
+
void
erts_dist_port_not_busy(Port *prt)
{
@@ -2307,54 +2895,56 @@ erts_dist_port_not_busy(Port *prt)
void
erts_kill_dist_connection(DistEntry *dep, Uint32 connection_id)
{
- erts_smp_de_rwlock(dep);
- if (is_internal_port(dep->cid)
- && connection_id == dep->connection_id
+ erts_de_rwlock(dep);
+ if (connection_id == dep->connection_id
&& !(dep->status & ERTS_DE_SFLG_EXITING)) {
dep->status |= ERTS_DE_SFLG_EXITING;
- erts_smp_mtx_lock(&dep->qlock);
- ASSERT(!(dep->qflgs & ERTS_DE_QFLG_EXIT));
- dep->qflgs |= ERTS_DE_QFLG_EXIT;
- erts_smp_mtx_unlock(&dep->qlock);
+ erts_mtx_lock(&dep->qlock);
+ ASSERT(!(erts_atomic32_read_nob(&dep->qflgs) & ERTS_DE_QFLG_EXIT));
+ erts_atomic32_read_bor_nob(&dep->qflgs, ERTS_DE_QFLG_EXIT);
+ erts_mtx_unlock(&dep->qlock);
- erts_schedule_dist_command(NULL, dep);
+ if (is_internal_port(dep->cid))
+ erts_schedule_dist_command(NULL, dep);
+ else if (is_internal_pid(dep->cid))
+ schedule_kill_dist_ctrl_proc(dep->cid);
}
- erts_smp_de_rwunlock(dep);
+ erts_de_rwunlock(dep);
}
struct print_to_data {
- int to;
+ fmtfn_t to;
void *arg;
};
static void doit_print_monitor_info(ErtsMonitor *mon, void *vptdp)
{
- int to = ((struct print_to_data *) vptdp)->to;
+ fmtfn_t to = ((struct print_to_data *) vptdp)->to;
void *arg = ((struct print_to_data *) vptdp)->arg;
Process *rp;
ErtsMonitor *rmon;
- rp = erts_proc_lookup(mon->pid);
+ rp = erts_proc_lookup(mon->u.pid);
if (!rp || (rmon = erts_lookup_monitor(ERTS_P_MONITORS(rp), mon->ref)) == NULL) {
- erts_print(to, arg, "Warning, stray monitor for: %T\n", mon->pid);
+ erts_print(to, arg, "Warning, stray monitor for: %T\n", mon->u.pid);
} else if (mon->type == MON_ORIGIN) {
/* Local pid is being monitored */
erts_print(to, arg, "Remotely monitored by: %T %T\n",
- mon->pid, rmon->pid);
+ mon->u.pid, rmon->u.pid);
} else {
- erts_print(to, arg, "Remote monitoring: %T ", mon->pid);
- if (is_not_atom(rmon->pid))
- erts_print(to, arg, "%T\n", rmon->pid);
+ erts_print(to, arg, "Remote monitoring: %T ", mon->u.pid);
+ if (is_not_atom(rmon->u.pid))
+ erts_print(to, arg, "%T\n", rmon->u.pid);
else
erts_print(to, arg, "{%T, %T}\n",
rmon->name,
- rmon->pid); /* which in this case is the
+ rmon->u.pid); /* which in this case is the
remote system name... */
}
}
-static void print_monitor_info(int to, void *arg, ErtsMonitor *mon)
+static void print_monitor_info(fmtfn_t to, void *arg, ErtsMonitor *mon)
{
struct print_to_data ptd = {to, arg};
erts_doforall_monitors(mon,&doit_print_monitor_info,&ptd);
@@ -2380,7 +2970,7 @@ static void doit_print_link_info(ErtsLink *lnk, void *vptdp)
}
}
-static void print_link_info(int to, void *arg, ErtsLink *lnk)
+static void print_link_info(fmtfn_t to, void *arg, ErtsLink *lnk)
{
struct print_to_data ptd = {to, arg};
erts_doforall_links(lnk, &doit_print_link_info, (void *) &ptd);
@@ -2401,7 +2991,7 @@ static void doit_print_nodelink_info(ErtsLink *lnk, void *vpcontext)
"Remote monitoring: %T %T\n", lnk->pid, pcontext->sysname);
}
-static void print_nodelink_info(int to, void *arg, ErtsLink *lnk, Eterm sysname)
+static void print_nodelink_info(fmtfn_t to, void *arg, ErtsLink *lnk, Eterm sysname)
{
PrintNodeLinkContext context = {{to, arg}, sysname};
erts_doforall_links(lnk, &doit_print_nodelink_info, &context);
@@ -2409,7 +2999,7 @@ static void print_nodelink_info(int to, void *arg, ErtsLink *lnk, Eterm sysname)
static int
-info_dist_entry(int to, void *arg, DistEntry *dep, int visible, int connected)
+info_dist_entry(fmtfn_t to, void *arg, DistEntry *dep, int visible, int connected)
{
if (visible && connected) {
@@ -2436,9 +3026,6 @@ info_dist_entry(int to, void *arg, DistEntry *dep, int visible, int connected)
}
erts_print(to, arg, "Name: %T", dep->sysname);
-#ifdef DEBUG
- erts_print(to, arg, " (refc=%d)", erts_refc_read(&dep->refc, 1));
-#endif
erts_print(to, arg, "\n");
if (!connected && is_nil(dep->cid)) {
if (dep->nlinks) {
@@ -2458,7 +3045,7 @@ info_dist_entry(int to, void *arg, DistEntry *dep, int visible, int connected)
return 0;
}
-int distribution_info(int to, void *arg) /* Called by break handler */
+int distribution_info(fmtfn_t to, void *arg) /* Called by break handler */
{
DistEntry *dep;
@@ -2483,7 +3070,9 @@ int distribution_info(int to, void *arg) /* Called by break handler */
}
for (dep = erts_not_connected_dist_entries; dep; dep = dep->next) {
- info_dist_entry(to, arg, dep, 0, 0);
+ if (dep != erts_this_dist_entry) {
+ info_dist_entry(to, arg, dep, 0, 0);
+ }
}
return(0);
@@ -2556,37 +3145,46 @@ BIF_RETTYPE setnode_2(BIF_ALIST_2)
goto error;
}
- net_kernel = erts_whereis_process(BIF_P, ERTS_PROC_LOCK_MAIN,
- am_net_kernel, ERTS_PROC_LOCK_MAIN, 0);
- if (!net_kernel)
+ net_kernel = erts_whereis_process(BIF_P,
+ ERTS_PROC_LOCK_MAIN,
+ am_net_kernel,
+ ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS,
+ 0);
+ if (!net_kernel || ERTS_PROC_GET_DIST_ENTRY(net_kernel))
goto error;
- /* By setting dist_entry==erts_this_dist_entry and DISTRIBUTION on
- net_kernel do_net_exist will be called when net_kernel
- is terminated !! */
- (void) ERTS_PROC_SET_DIST_ENTRY(net_kernel,
- ERTS_PROC_LOCK_MAIN,
- erts_this_dist_entry);
- erts_refc_inc(&erts_this_dist_entry->refc, 2);
+ /* By setting F_DISTRIBUTION on net_kernel,
+ * erts_do_net_exits will be called when net_kernel is terminated !! */
net_kernel->flags |= F_DISTRIBUTION;
- if (net_kernel != BIF_P)
- erts_smp_proc_unlock(net_kernel, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(net_kernel,
+ (ERTS_PROC_LOCK_STATUS
+ | ((net_kernel != BIF_P)
+ ? ERTS_PROC_LOCK_MAIN
+ : 0)));
#ifdef DEBUG
- erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_rlock(&erts_dist_table_rwmtx);
ASSERT(!erts_visible_dist_entries && !erts_hidden_dist_entries);
- erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_runlock(&erts_dist_table_rwmtx);
#endif
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
inc_no_nodes();
erts_set_this_node(BIF_ARG_1, (Uint32) creation);
erts_is_alive = 1;
send_nodes_mon_msgs(NULL, am_nodeup, BIF_ARG_1, am_visible, NIL);
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+
+ /*
+ * Note erts_this_dist_entry is changed by erts_set_this_node(),
+ * so we *need* to use the new one after erts_set_this_node()
+ * is called.
+ */
+ erts_ref_dist_entry(erts_this_dist_entry);
+ ERTS_PROC_SET_DIST_ENTRY(net_kernel, erts_this_dist_entry);
BIF_RET(am_true);
@@ -2617,18 +3215,18 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3)
Eterm ic, oc;
Eterm *tp;
DistEntry *dep = NULL;
+ ErtsProcLocks proc_unlock = 0;
+ Process *proc;
Port *pp = NULL;
- /* Prepare for success */
- ERTS_BIF_PREP_RET(ret, am_true);
-
/*
* Check and pick out arguments
*/
if (!is_node_name_atom(BIF_ARG_1) ||
- is_not_internal_port(BIF_ARG_2) ||
- (erts_this_node->sysname == am_Noname)) {
+ !(is_internal_port(BIF_ARG_2)
+ || is_internal_pid(BIF_ARG_2))
+ || (erts_this_node->sysname == am_Noname)) {
goto badarg;
}
@@ -2672,77 +3270,124 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3)
else if (!dep)
goto system_limit; /* Should never happen!!! */
- pp = erts_id2port_sflgs(BIF_ARG_2,
- BIF_P,
- ERTS_PROC_LOCK_MAIN,
- ERTS_PORT_SFLGS_INVALID_LOOKUP);
- erts_smp_de_rwlock(dep);
+ if (is_internal_pid(BIF_ARG_2)) {
+ if (BIF_P->common.id == BIF_ARG_2) {
+ proc_unlock = 0;
+ proc = BIF_P;
+ }
+ else {
+ proc_unlock = ERTS_PROC_LOCK_MAIN;
+ proc = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN,
+ BIF_ARG_2, proc_unlock);
+ }
+ erts_de_rwlock(dep);
- if (!pp || (erts_atomic32_read_nob(&pp->state)
- & ERTS_PORT_SFLG_EXITING))
- goto badarg;
+ if (!proc)
+ goto badarg;
+ else if (proc == ERTS_PROC_LOCK_BUSY) {
+ proc_unlock = 0;
+ goto yield;
+ }
- if ((pp->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY) == 0)
- goto badarg;
+ erts_proc_lock(proc, ERTS_PROC_LOCK_STATUS);
+ proc_unlock |= ERTS_PROC_LOCK_STATUS;
+
+ if (ERTS_PROC_GET_DIST_ENTRY(proc)) {
+ if (dep == ERTS_PROC_GET_DIST_ENTRY(proc)
+ && (proc->flags & F_DISTRIBUTION)
+ && dep->cid == BIF_ARG_2) {
+ ERTS_BIF_PREP_RET(ret, erts_make_dhandle(BIF_P, dep));
+ goto done;
+ }
+ goto badarg;
+ }
+
+ if (is_not_nil(dep->cid))
+ goto badarg;
- if (dep->cid == BIF_ARG_2 && pp->dist_entry == dep)
- goto done; /* Already set */
+ proc->flags |= F_DISTRIBUTION;
+ ERTS_PROC_SET_DIST_ENTRY(proc, dep);
+
+ proc_unlock &= ~ERTS_PROC_LOCK_STATUS;
+ erts_proc_unlock(proc, ERTS_PROC_LOCK_STATUS);
+
+ dep->send = NULL; /* Only for distr ports... */
- if (dep->status & ERTS_DE_SFLG_EXITING) {
- /* Suspend on dist entry waiting for the exit to finish */
- ErtsProcList *plp = erts_proclist_create(BIF_P);
- plp->next = NULL;
- erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
- erts_smp_mtx_lock(&dep->qlock);
- erts_proclist_store_last(&dep->suspended, plp);
- erts_smp_mtx_unlock(&dep->qlock);
- goto yield;
}
+ else {
- ASSERT(!(dep->status & ERTS_DE_SFLG_EXITING));
+ pp = erts_id2port_sflgs(BIF_ARG_2,
+ BIF_P,
+ ERTS_PROC_LOCK_MAIN,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP);
+ erts_de_rwlock(dep);
- if (pp->dist_entry || is_not_nil(dep->cid))
- goto badarg;
+ if (!pp || (erts_atomic32_read_nob(&pp->state)
+ & ERTS_PORT_SFLG_EXITING))
+ goto badarg;
- erts_atomic32_read_bor_nob(&pp->state, ERTS_PORT_SFLG_DISTRIBUTION);
+ if ((pp->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY) == 0)
+ goto badarg;
- /*
- * Dist-ports do not use the "busy port message queue" functionality, but
- * instead use "busy dist entry" functionality.
- */
- {
- ErlDrvSizeT disable = ERL_DRV_BUSY_MSGQ_DISABLED;
- erl_drv_busy_msgq_limits(ERTS_Port2ErlDrvPort(pp), &disable, NULL);
- }
+ if (dep->cid == BIF_ARG_2 && pp->dist_entry == dep) {
+ ERTS_BIF_PREP_RET(ret, erts_make_dhandle(BIF_P, dep));
+ goto done; /* Already set */
+ }
- pp->dist_entry = dep;
+ if (dep->status & ERTS_DE_SFLG_EXITING) {
+ /* Suspend on dist entry waiting for the exit to finish */
+ ErtsProcList *plp = erts_proclist_create(BIF_P);
+ plp->next = NULL;
+ erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
+ erts_mtx_lock(&dep->qlock);
+ erts_proclist_store_last(&dep->suspended, plp);
+ erts_mtx_unlock(&dep->qlock);
+ goto yield;
+ }
- dep->version = version;
- dep->creation = 0;
+ ASSERT(!(dep->status & ERTS_DE_SFLG_EXITING));
- ASSERT(pp->drv_ptr->outputv || pp->drv_ptr->output);
+ if (pp->dist_entry || is_not_nil(dep->cid))
+ goto badarg;
-#if 1
- dep->send = (pp->drv_ptr->outputv
- ? dist_port_commandv
- : dist_port_command);
-#else
- dep->send = dist_port_command;
-#endif
- ASSERT(dep->send);
+ erts_atomic32_read_bor_nob(&pp->state, ERTS_PORT_SFLG_DISTRIBUTION);
+
+ pp->dist_entry = dep;
+
+ ASSERT(pp->drv_ptr->outputv || pp->drv_ptr->output);
+
+ dep->send = (pp->drv_ptr->outputv
+ ? dist_port_commandv
+ : dist_port_command);
+ ASSERT(dep->send);
+
+ /*
+ * Dist-ports do not use the "busy port message queue" functionality, but
+ * instead use "busy dist entry" functionality.
+ */
+ {
+ ErlDrvSizeT disable = ERL_DRV_BUSY_MSGQ_DISABLED;
+ erl_drv_busy_msgq_limits(ERTS_Port2ErlDrvPort(pp), &disable, NULL);
+ }
+
+ }
+
+ dep->version = version;
+ dep->creation = 0;
#ifdef DEBUG
- erts_smp_mtx_lock(&dep->qlock);
- ASSERT(dep->qsize == 0);
- erts_smp_mtx_unlock(&dep->qlock);
+ ASSERT(erts_atomic_read_nob(&dep->qsize) == 0);
#endif
- erts_set_dist_entry_connected(dep, BIF_ARG_2, flags);
-
if (flags & DFLAG_DIST_HDR_ATOM_CACHE)
create_cache(dep);
- erts_smp_de_rwunlock(dep);
+ erts_set_dist_entry_connected(dep, BIF_ARG_2, flags);
+
+ erts_de_rwunlock(dep);
+
+ ERTS_BIF_PREP_RET(ret, erts_make_dhandle(BIF_P, dep));
+
dep = NULL; /* inc of refc transferred to port (dist_entry field) */
inc_no_nodes();
@@ -2755,13 +3400,16 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3)
done:
if (dep && dep != erts_this_dist_entry) {
- erts_smp_de_rwunlock(dep);
+ erts_de_rwunlock(dep);
erts_deref_dist_entry(dep);
}
if (pp)
erts_port_release(pp);
+ if (proc_unlock)
+ erts_proc_unlock(proc, proc_unlock);
+
return ret;
yield:
@@ -2807,7 +3455,7 @@ BIF_RETTYPE dist_exit_3(BIF_ALIST_3)
if (BIF_P->common.id == local) {
lp_locks = ERTS_PROC_LOCKS_ALL;
lp = BIF_P;
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR);
}
else {
lp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
@@ -2826,21 +3474,17 @@ BIF_RETTYPE dist_exit_3(BIF_ALIST_3)
NIL,
NULL,
0);
-#ifdef ERTS_SMP
if (lp == BIF_P)
lp_locks &= ~ERTS_PROC_LOCK_MAIN;
-#endif
- erts_smp_proc_unlock(lp, lp_locks);
+ erts_proc_unlock(lp, lp_locks);
if (lp == BIF_P) {
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&BIF_P->state);
+ erts_aint32_t state = erts_atomic32_read_acqb(&BIF_P->state);
/*
* We may have exited current process and may have to take action.
*/
if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) {
-#ifdef ERTS_SMP
if (state & ERTS_PSFLG_PENDING_EXIT)
erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN);
-#endif
ERTS_BIF_EXITED(BIF_P);
}
}
@@ -2926,13 +3570,13 @@ BIF_RETTYPE nodes_1(BIF_ALIST_1)
length = 0;
- erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_rlock(&erts_dist_table_rwmtx);
- ASSERT(erts_no_of_not_connected_dist_entries >= 0);
+ ASSERT(erts_no_of_not_connected_dist_entries > 0);
ASSERT(erts_no_of_hidden_dist_entries >= 0);
ASSERT(erts_no_of_visible_dist_entries >= 0);
if(not_connected)
- length += erts_no_of_not_connected_dist_entries;
+ length += (erts_no_of_not_connected_dist_entries - 1);
if(hidden)
length += erts_no_of_hidden_dist_entries;
if(visible)
@@ -2943,7 +3587,7 @@ BIF_RETTYPE nodes_1(BIF_ALIST_1)
result = NIL;
if (length == 0) {
- erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_runlock(&erts_dist_table_rwmtx);
goto done;
}
@@ -2954,8 +3598,10 @@ BIF_RETTYPE nodes_1(BIF_ALIST_1)
#endif
if(not_connected)
for(dep = erts_not_connected_dist_entries; dep; dep = dep->next) {
- result = CONS(hp, dep->sysname, result);
- hp += 2;
+ if (dep != erts_this_dist_entry) {
+ result = CONS(hp, dep->sysname, result);
+ hp += 2;
+ }
}
if(hidden)
for(dep = erts_hidden_dist_entries; dep; dep = dep->next) {
@@ -2972,7 +3618,7 @@ BIF_RETTYPE nodes_1(BIF_ALIST_1)
hp += 2;
}
ASSERT(endp == hp);
- erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_runlock(&erts_dist_table_rwmtx);
done:
UnUseTmpHeap(2,BIF_P);
@@ -3027,15 +3673,15 @@ monitor_node(Process* p, Eterm Node, Eterm Bool, Eterm Options)
if (dep == erts_this_dist_entry)
goto done;
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK);
- erts_smp_de_rlock(dep);
+ erts_proc_lock(p, ERTS_PROC_LOCK_LINK);
+ erts_de_rlock(dep);
if (ERTS_DE_IS_NOT_CONNECTED(dep)) {
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
- erts_smp_de_runlock(dep);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+ erts_de_runlock(dep);
goto do_trap;
}
- erts_smp_de_links_lock(dep);
- erts_smp_de_runlock(dep);
+ erts_de_links_lock(dep);
+ erts_de_runlock(dep);
if (Bool == am_true) {
ASSERT(dep->cid != NIL);
@@ -3062,11 +3708,10 @@ monitor_node(Process* p, Eterm Node, Eterm Bool, Eterm Options)
}
}
- erts_smp_de_links_unlock(dep);
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+ erts_de_links_unlock(dep);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_LINK);
done:
- erts_deref_dist_entry(dep);
BIF_RET(am_true);
}
@@ -3095,9 +3740,9 @@ BIF_RETTYPE net_kernel_dflag_unicode_io_1(BIF_ALIST_1)
if (de == erts_this_dist_entry) {
BIF_RET(am_true);
}
- erts_smp_de_rlock(de);
+ erts_de_rlock(de);
f = de->flags;
- erts_smp_de_runlock(de);
+ erts_de_runlock(de);
BIF_RET(((f & DFLAG_UNICODE_IO) ? am_true : am_false));
}
@@ -3127,7 +3772,7 @@ struct ErtsNodesMonitor_ {
Uint16 no;
};
-static erts_smp_mtx_t nodes_monitors_mtx;
+static erts_mtx_t nodes_monitors_mtx;
static ErtsNodesMonitor *nodes_monitors;
static ErtsNodesMonitor *nodes_monitors_end;
@@ -3145,7 +3790,8 @@ static ErtsNodesMonitor *nodes_monitors_end;
static void
init_nodes_monitors(void)
{
- erts_smp_mtx_init(&nodes_monitors_mtx, "nodes_monitors");
+ erts_mtx_init(&nodes_monitors_mtx, "nodes_monitors", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
nodes_monitors = NULL;
nodes_monitors_end = NULL;
}
@@ -3186,11 +3832,16 @@ send_nodes_mon_msg(Process *rp,
Uint sz)
{
Eterm msg;
- ErlHeapFragment* bp;
+ Eterm *hp;
+ ErtsMessage *mp;
ErlOffHeap *ohp;
- Eterm *hp = erts_alloc_message_heap(sz, &bp, &ohp, rp, rp_locksp);
#ifdef DEBUG
- Eterm *hend = hp + sz;
+ Eterm *hend;
+#endif
+
+ mp = erts_alloc_message_heap(rp, rp_locksp, sz, &hp, &ohp);
+#ifdef DEBUG
+ hend = hp + sz;
#endif
if (!nmp->opts) {
@@ -3236,11 +3887,7 @@ send_nodes_mon_msg(Process *rp,
}
ASSERT(hend == hp);
- erts_queue_message(rp, rp_locksp, bp, msg, NIL
-#ifdef USE_VM_PROBES
- , NIL
-#endif
- );
+ erts_queue_message(rp, *rp_locksp, mp, msg, am_system);
}
static void
@@ -3269,10 +3916,10 @@ send_nodes_mon_msgs(Process *c_p, Eterm what, Eterm node, Eterm type, Eterm reas
}
#endif
- ERTS_SMP_LC_ASSERT(!c_p
+ ERTS_LC_ASSERT(!c_p
|| (erts_proc_lc_my_proc_locks(c_p)
== ERTS_PROC_LOCK_MAIN));
- erts_smp_mtx_lock(&nodes_monitors_mtx);
+ erts_mtx_lock(&nodes_monitors_mtx);
for (nmp = nodes_monitors; nmp; nmp = nmp->next) {
int i;
@@ -3297,7 +3944,7 @@ send_nodes_mon_msgs(Process *c_p, Eterm what, Eterm node, Eterm type, Eterm reas
continue;
break;
default:
- erl_exit(ERTS_ABORT_EXIT, "Bad node type found\n");
+ erts_exit(ERTS_ABORT_EXIT, "Bad node type found\n");
}
}
@@ -3305,7 +3952,7 @@ send_nodes_mon_msgs(Process *c_p, Eterm what, Eterm node, Eterm type, Eterm reas
if (rp) {
if (rp == c_p)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
}
rp = nmp->proc;
@@ -3332,10 +3979,10 @@ send_nodes_mon_msgs(Process *c_p, Eterm what, Eterm node, Eterm type, Eterm reas
if (rp) {
if (rp == c_p)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
}
- erts_smp_mtx_unlock(&nodes_monitors_mtx);
+ erts_mtx_unlock(&nodes_monitors_mtx);
}
static Eterm
@@ -3345,8 +3992,8 @@ insert_nodes_monitor(Process *c_p, Uint32 opts)
Eterm res = am_false;
ErtsNodesMonitor *xnmp, *nmp;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&nodes_monitors_mtx));
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN);
+ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&nodes_monitors_mtx));
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN);
xnmp = c_p->nodes_monitors;
if (xnmp) {
@@ -3430,8 +4077,8 @@ remove_nodes_monitors(Process *c_p, Uint32 opts, int all)
Eterm res = am_false;
ErtsNodesMonitor *nmp;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&nodes_monitors_mtx));
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN);
+ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&nodes_monitors_mtx));
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN);
nmp = c_p->nodes_monitors;
ASSERT(!nmp || !nmp->prev || nmp->prev->proc != c_p);
@@ -3473,23 +4120,23 @@ remove_nodes_monitors(Process *c_p, Uint32 opts, int all)
void
erts_delete_nodes_monitors(Process *c_p, ErtsProcLocks locks)
{
-#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
if (c_p) {
ErtsProcLocks might_unlock = locks & ~ERTS_PROC_LOCK_MAIN;
if (might_unlock)
erts_proc_lc_might_unlock(c_p, might_unlock);
}
#endif
- if (erts_smp_mtx_trylock(&nodes_monitors_mtx) == EBUSY) {
+ if (erts_mtx_trylock(&nodes_monitors_mtx) == EBUSY) {
ErtsProcLocks unlock_locks = locks & ~ERTS_PROC_LOCK_MAIN;
if (c_p && unlock_locks)
- erts_smp_proc_unlock(c_p, unlock_locks);
- erts_smp_mtx_lock(&nodes_monitors_mtx);
+ erts_proc_unlock(c_p, unlock_locks);
+ erts_mtx_lock(&nodes_monitors_mtx);
if (c_p && unlock_locks)
- erts_smp_proc_lock(c_p, unlock_locks);
+ erts_proc_lock(c_p, unlock_locks);
}
remove_nodes_monitors(c_p, 0, 1);
- erts_smp_mtx_unlock(&nodes_monitors_mtx);
+ erts_mtx_unlock(&nodes_monitors_mtx);
}
Eterm
@@ -3500,7 +4147,7 @@ erts_monitor_nodes(Process *c_p, Eterm on, Eterm olist)
Uint16 opts = (Uint16) 0;
ASSERT(c_p);
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
if (on != am_true && on != am_false)
return THE_NON_VALUE;
@@ -3556,14 +4203,14 @@ erts_monitor_nodes(Process *c_p, Eterm on, Eterm olist)
return THE_NON_VALUE;
}
- erts_smp_mtx_lock(&nodes_monitors_mtx);
+ erts_mtx_lock(&nodes_monitors_mtx);
if (on == am_true)
res = insert_nodes_monitor(c_p, opts);
else
res = remove_nodes_monitors(c_p, opts, 0);
- erts_smp_mtx_unlock(&nodes_monitors_mtx);
+ erts_mtx_unlock(&nodes_monitors_mtx);
return res;
}
@@ -3586,8 +4233,8 @@ erts_processes_monitoring_nodes(Process *c_p)
#endif
ASSERT(c_p);
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
- erts_smp_mtx_lock(&nodes_monitors_mtx);
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
+ erts_mtx_lock(&nodes_monitors_mtx);
sz = 0;
szp = &sz;
@@ -3606,7 +4253,7 @@ erts_processes_monitoring_nodes(Process *c_p)
case ERTS_NODES_MON_OPT_TYPES: type = am_all; break;
case ERTS_NODES_MON_OPT_TYPE_VISIBLE: type = am_visible; break;
case ERTS_NODES_MON_OPT_TYPE_HIDDEN: type = am_hidden; break;
- default: erl_exit(ERTS_ABORT_EXIT, "Bad node type found\n");
+ default: erts_exit(ERTS_ABORT_EXIT, "Bad node type found\n");
}
olist = erts_bld_cons(hpp, szp,
erts_bld_tuple(hpp, szp, 2,
@@ -3636,7 +4283,7 @@ erts_processes_monitoring_nodes(Process *c_p)
ASSERT(hp == hend);
- erts_smp_mtx_unlock(&nodes_monitors_mtx);
+ erts_mtx_unlock(&nodes_monitors_mtx);
return res;
}
diff --git a/erts/emulator/beam/dist.h b/erts/emulator/beam/dist.h
index f32b999198..d4765c50b8 100644
--- a/erts/emulator/beam/dist.h
+++ b/erts/emulator/beam/dist.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2014. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -22,6 +23,7 @@
#include "erl_process.h"
#include "erl_node_tables.h"
+#include "zlib.h"
#define DFLAG_PUBLISHED 0x01
#define DFLAG_ATOM_CACHE 0x02
@@ -41,6 +43,8 @@
#define DFLAG_INTERNAL_TAGS 0x8000
#define DFLAG_UTF8_ATOMS 0x10000
#define DFLAG_MAP_TAG 0x20000
+#define DFLAG_BIG_CREATION 0x40000
+#define DFLAG_SEND_SENDER 0x80000
/* All flags that should be enabled when term_to_binary/1 is used. */
#define TERM_TO_BINARY_DFLAGS (DFLAG_EXTENDED_REFERENCES \
@@ -49,7 +53,8 @@
| DFLAG_EXTENDED_PIDS_PORTS \
| DFLAG_EXPORT_PTR_TAG \
| DFLAG_BIT_BINARIES \
- | DFLAG_MAP_TAG)
+ | DFLAG_MAP_TAG \
+ | DFLAG_BIG_CREATION)
/* opcodes used in distribution messages */
#define DOP_LINK 1
@@ -70,6 +75,9 @@
#define DOP_DEMONITOR_P 20
#define DOP_MONITOR_P_EXIT 21
+#define DOP_SEND_SENDER 22
+#define DOP_SEND_SENDER_TT 23
+
/* distribution trap functions */
extern Export* dsend2_trap;
extern Export* dsend3_trap;
@@ -96,7 +104,7 @@ typedef struct {
} ErtsDSigData;
#define ERTS_DE_IS_NOT_CONNECTED(DEP) \
- (ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&(DEP)->rwmtx) \
+ (ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&(DEP)->rwmtx) \
|| erts_lc_rwmtx_is_rwlocked(&(DEP)->rwmtx)), \
(is_nil((DEP)->cid) || ((DEP)->status & ERTS_DE_SFLG_EXITING)))
@@ -111,8 +119,8 @@ extern int erts_is_alive;
* erts_dsig_prepare() prepares a send of a distributed signal.
* One of the values defined below are returned. If the returned
* value is another than ERTS_DSIG_PREP_CONNECTED, the
- * distributed signal cannot be sent before apropriate actions
- * have been taken. Apropriate actions would typically be setting
+ * distributed signal cannot be sent before appropriate actions
+ * have been taken. Appropriate actions would typically be setting
* up the connection.
*/
@@ -149,21 +157,18 @@ erts_dsig_prepare(ErtsDSigData *dsdp,
if (!dep)
return ERTS_DSIG_PREP_NOT_CONNECTED;
if (dspl == ERTS_DSP_RWLOCK)
- erts_smp_de_rwlock(dep);
+ erts_de_rwlock(dep);
else
- erts_smp_de_rlock(dep);
+ erts_de_rlock(dep);
if (ERTS_DE_IS_NOT_CONNECTED(dep)) {
failure = ERTS_DSIG_PREP_NOT_CONNECTED;
goto fail;
}
if (no_suspend) {
- failure = ERTS_DSIG_PREP_CONNECTED;
- erts_smp_mtx_lock(&dep->qlock);
- if (dep->qflgs & ERTS_DE_QFLG_BUSY)
+ if (erts_atomic32_read_acqb(&dep->qflgs) & ERTS_DE_QFLG_BUSY) {
failure = ERTS_DSIG_PREP_WOULD_SUSPEND;
- erts_smp_mtx_unlock(&dep->qlock);
- if (failure == ERTS_DSIG_PREP_WOULD_SUSPEND)
goto fail;
+ }
}
dsdp->proc = proc;
dsdp->dep = dep;
@@ -171,14 +176,14 @@ erts_dsig_prepare(ErtsDSigData *dsdp,
dsdp->connection_id = dep->connection_id;
dsdp->no_suspend = no_suspend;
if (dspl == ERTS_DSP_NO_LOCK)
- erts_smp_de_runlock(dep);
+ erts_de_runlock(dep);
return ERTS_DSIG_PREP_CONNECTED;
fail:
if (dspl == ERTS_DSP_RWLOCK)
- erts_smp_de_rwunlock(dep);
+ erts_de_rwunlock(dep);
else
- erts_smp_de_runlock(dep);
+ erts_de_runlock(dep);
return failure;
}
@@ -190,7 +195,7 @@ void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry)
Eterm id;
if (prt) {
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
ASSERT((erts_atomic32_read_nob(&prt->state)
& ERTS_PORT_SFLGS_DEAD) == 0);
ASSERT(prt->dist_entry);
@@ -200,7 +205,7 @@ void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry)
}
else {
ASSERT(dist_entry);
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&dist_entry->rwmtx)
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&dist_entry->rwmtx)
|| erts_lc_rwmtx_is_rwlocked(&dist_entry->rwmtx));
ASSERT(is_internal_port(dist_entry->cid));
@@ -208,7 +213,7 @@ void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry)
id = dep->cid;
}
- if (!erts_smp_atomic_xchg_mb(&dep->dist_cmd_scheduled, 1))
+ if (!erts_atomic_xchg_mb(&dep->dist_cmd_scheduled, 1))
erts_port_task_schedule(id, &dep->dist_cmd, ERTS_PORT_TASK_DIST_CMD);
}
@@ -234,7 +239,7 @@ erts_remove_dist_link(ErtsDistLinkData *dldp,
Eterm rid,
DistEntry *dep)
{
- erts_smp_de_links_lock(dep);
+ erts_de_links_lock(dep);
dldp->d_lnk = erts_lookup_link(dep->nlinks, lid);
if (!dldp->d_lnk)
dldp->d_sub_lnk = NULL;
@@ -244,7 +249,7 @@ erts_remove_dist_link(ErtsDistLinkData *dldp,
? NULL
: erts_remove_link(&dep->nlinks, lid));
}
- erts_smp_de_links_unlock(dep);
+ erts_de_links_unlock(dep);
}
ERTS_GLB_INLINE int
@@ -264,17 +269,106 @@ erts_destroy_dist_link(ErtsDistLinkData *dldp)
#endif
+
+
+/* Define for testing */
+/* #define EXTREME_TTB_TRAPPING 1 */
+
+#ifndef EXTREME_TTB_TRAPPING
+#define TERM_TO_BINARY_LOOP_FACTOR 32
+#else
+#define TERM_TO_BINARY_LOOP_FACTOR 1
+#endif
+
+typedef enum { TTBSize, TTBEncode, TTBCompress } TTBState;
+typedef struct TTBSizeContext_ {
+ Uint flags;
+ int level;
+ Uint result;
+ Eterm obj;
+ ErtsWStack wstack;
+} TTBSizeContext;
+
+typedef struct TTBEncodeContext_ {
+ Uint flags;
+ int level;
+ byte* ep;
+ Eterm obj;
+ ErtsWStack wstack;
+ Binary *result_bin;
+} TTBEncodeContext;
+
+typedef struct {
+ Uint real_size;
+ Uint dest_len;
+ byte *dbytes;
+ Binary *result_bin;
+ Binary *destination_bin;
+ z_stream stream;
+} TTBCompressContext;
+
+typedef struct {
+ int alive;
+ TTBState state;
+ union {
+ TTBSizeContext sc;
+ TTBEncodeContext ec;
+ TTBCompressContext cc;
+ } s;
+} TTBContext;
+
+enum erts_dsig_send_phase {
+ ERTS_DSIG_SEND_PHASE_INIT,
+ ERTS_DSIG_SEND_PHASE_MSG_SIZE,
+ ERTS_DSIG_SEND_PHASE_ALLOC,
+ ERTS_DSIG_SEND_PHASE_MSG_ENCODE,
+ ERTS_DSIG_SEND_PHASE_FIN
+};
+
+struct erts_dsig_send_context {
+ enum erts_dsig_send_phase phase;
+ Sint reds;
+
+ Eterm ctl;
+ Eterm msg;
+ int force_busy;
+ Uint32 pass_through_size;
+ Uint data_size, dhdr_ext_size;
+ ErtsAtomCacheMap *acmp;
+ ErtsDistOutputBuf *obuf;
+ Uint32 flags;
+ Process *c_p;
+ union {
+ TTBSizeContext sc;
+ TTBEncodeContext ec;
+ }u;
+};
+
+typedef struct {
+ int suspend;
+
+ Eterm ctl_heap[6];
+ ErtsDSigData dsd;
+ DistEntry* dep_to_deref;
+ DistEntry *dep;
+ struct erts_dsig_send_context dss;
+
+ Eterm return_term;
+}ErtsSendContext;
+
+
/*
* erts_dsig_send_* return values.
*/
#define ERTS_DSIG_SEND_OK 0
#define ERTS_DSIG_SEND_YIELD 1
+#define ERTS_DSIG_SEND_CONTINUE 2
extern int erts_dsig_send_link(ErtsDSigData *, Eterm, Eterm);
-extern int erts_dsig_send_msg(ErtsDSigData *, Eterm, Eterm);
+extern int erts_dsig_send_msg(Eterm, Eterm, ErtsSendContext*);
extern int erts_dsig_send_exit_tt(ErtsDSigData *, Eterm, Eterm, Eterm, Eterm);
extern int erts_dsig_send_unlink(ErtsDSigData *, Eterm, Eterm);
-extern int erts_dsig_send_reg_msg(ErtsDSigData *, Eterm, Eterm);
+extern int erts_dsig_send_reg_msg(Eterm, Eterm, ErtsSendContext*);
extern int erts_dsig_send_group_leader(ErtsDSigData *, Eterm, Eterm);
extern int erts_dsig_send_exit(ErtsDSigData *, Eterm, Eterm, Eterm);
extern int erts_dsig_send_exit2(ErtsDSigData *, Eterm, Eterm, Eterm);
@@ -282,6 +376,10 @@ extern int erts_dsig_send_demonitor(ErtsDSigData *, Eterm, Eterm, Eterm, int);
extern int erts_dsig_send_monitor(ErtsDSigData *, Eterm, Eterm, Eterm);
extern int erts_dsig_send_m_exit(ErtsDSigData *, Eterm, Eterm, Eterm, Eterm);
+extern int erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx);
+extern int erts_dsend_context_dtor(Binary*);
+extern Eterm erts_dsend_export_trap_context(Process* p, ErtsSendContext* ctx);
+
extern int erts_dist_command(Port *prt, int reds);
extern void erts_dist_port_not_busy(Port *prt);
extern void erts_kill_dist_connection(DistEntry *dep, Uint32);
diff --git a/erts/emulator/beam/dtrace-wrapper.h b/erts/emulator/beam/dtrace-wrapper.h
index 6ec0c91e21..15ea182976 100644
--- a/erts/emulator/beam/dtrace-wrapper.h
+++ b/erts/emulator/beam/dtrace-wrapper.h
@@ -1,19 +1,20 @@
/*
* %CopyrightBegin%
*
- * Copyright Dustin Sallings, Michal Ptaszek, Scott Lystig Fritchie 2011-2012.
+ * Copyright Dustin Sallings, Michal Ptaszek, Scott Lystig Fritchie 2011-2017.
* All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -73,7 +74,7 @@
#if defined(_SDT_PROBE) && !defined(STAP_PROBE11)
/* SLF: This is Ubuntu 11-style SystemTap hackery */
-/* work arround for missing STAP macro */
+/* workaround for missing STAP macro */
#define STAP_PROBE11(provider,name,arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10,arg11) \
_SDT_PROBE(provider, name, 11, \
(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10,arg11))
diff --git a/erts/emulator/beam/elib_memmove.c b/erts/emulator/beam/elib_memmove.c
index d2fe8649ed..2f45f69026 100644
--- a/erts/emulator/beam/elib_memmove.c
+++ b/erts/emulator/beam/elib_memmove.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
diff --git a/erts/emulator/beam/erl_afit_alloc.c b/erts/emulator/beam/erl_afit_alloc.c
index eca4e3b3bb..4ebe37ee1d 100644
--- a/erts/emulator/beam/erl_afit_alloc.c
+++ b/erts/emulator/beam/erl_afit_alloc.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -53,7 +54,7 @@ static void link_free_block (Allctr_t *, Block_t *);
static void unlink_free_block (Allctr_t *, Block_t *);
-static Eterm info_options (Allctr_t *, char *, int *,
+static Eterm info_options (Allctr_t *, char *, fmtfn_t *,
void *arg, Uint **, Uint *);
static void init_atoms (void);
@@ -226,7 +227,7 @@ add_2tup(Uint **hpp, Uint *szp, Eterm *lp, Eterm el1, Eterm el2)
static Eterm
info_options(Allctr_t *allctr,
char *prefix,
- int *print_to_p,
+ fmtfn_t *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
@@ -240,7 +241,7 @@ info_options(Allctr_t *allctr,
if (hpp || szp) {
if (!atoms_initialized)
- erl_exit(1, "%s:%d: Internal error: Atoms not initialized",
+ erts_exit(ERTS_ERROR_EXIT, "%s:%d: Internal error: Atoms not initialized",
__FILE__, __LINE__);;
res = NIL;
diff --git a/erts/emulator/beam/erl_afit_alloc.h b/erts/emulator/beam/erl_afit_alloc.h
index b90ac8f7c5..74258e284a 100644
--- a/erts/emulator/beam/erl_afit_alloc.h
+++ b/erts/emulator/beam/erl_afit_alloc.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 05ac24e04d..88285d8be6 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -29,6 +30,7 @@
#endif
#define ERTS_ALLOC_C__
#define ERTS_ALC_INTERNAL__
+#define ERTS_WANT_MEM_MAPPERS
#include "sys.h"
#define ERL_THREADS_EMU_INTERNAL__
#include "erl_threads.h"
@@ -39,12 +41,14 @@
#include "erl_instrument.h"
#include "erl_mseg.h"
#include "erl_monitors.h"
-#include "erl_bif_timer.h"
+#include "erl_hl_timer.h"
#include "erl_cpu_topology.h"
#include "erl_thr_queue.h"
+#include "erl_nfunc_sched.h"
#if defined(ERTS_ALC_T_DRV_SEL_D_STATE) || defined(ERTS_ALC_T_DRV_EV_D_STATE)
#include "erl_check_io.h"
#endif
+#include "erl_bif_unique.h"
#define GET_ERL_GF_ALLOC_IMPL
#include "erl_goodfit_alloc.h"
@@ -79,14 +83,6 @@
#define ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC
#define ERTS_ALC_DEFAULT_ACUL_LL_ALLOC ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC
-#ifndef ERTS_SMP
-# undef ERTS_ALC_DEFAULT_ACUL
-# define ERTS_ALC_DEFAULT_ACUL 0
-# undef ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC
-# define ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC 0
-# undef ERTS_ALC_DEFAULT_ACUL_LL_ALLOC
-# define ERTS_ALC_DEFAULT_ACUL_LL_ALLOC 0
-#endif
#ifdef DEBUG
static Uint install_debug_functions(void);
@@ -102,9 +98,9 @@ static Uint install_debug_functions(void);
static int lock_all_physical_memory = 0;
-ErtsAllocatorFunctions_t erts_allctrs[ERTS_ALC_A_MAX+1];
+ErtsAllocatorFunctions_t ERTS_WRITE_UNLIKELY(erts_allctrs[ERTS_ALC_A_MAX+1]);
ErtsAllocatorInfo_t erts_allctrs_info[ERTS_ALC_A_MAX+1];
-ErtsAllocatorThrSpec_t erts_allctr_thr_spec[ERTS_ALC_A_MAX+1];
+ErtsAllocatorThrSpec_t ERTS_WRITE_UNLIKELY(erts_allctr_thr_spec[ERTS_ALC_A_MAX+1]);
#define ERTS_MIN(A, B) ((A) < (B) ? (A) : (B))
#define ERTS_MAX(A, B) ((A) > (B) ? (A) : (B))
@@ -122,10 +118,6 @@ typedef union {
static ErtsAllocatorState_t std_alloc_state;
static ErtsAllocatorState_t ll_alloc_state;
-#if HALFWORD_HEAP
-static ErtsAllocatorState_t std_low_alloc_state;
-static ErtsAllocatorState_t ll_low_alloc_state;
-#endif
static ErtsAllocatorState_t sl_alloc_state;
static ErtsAllocatorState_t temp_alloc_state;
static ErtsAllocatorState_t eheap_alloc_state;
@@ -133,40 +125,34 @@ static ErtsAllocatorState_t binary_alloc_state;
static ErtsAllocatorState_t ets_alloc_state;
static ErtsAllocatorState_t driver_alloc_state;
static ErtsAllocatorState_t fix_alloc_state;
+static ErtsAllocatorState_t literal_alloc_state;
+#ifdef ERTS_ALC_A_EXEC
+static ErtsAllocatorState_t exec_alloc_state;
+#endif
+static ErtsAllocatorState_t test_alloc_state;
+
+enum {
+ ERTS_ALC_INFO_A_ALLOC_UTIL = ERTS_ALC_A_MAX + 1,
+ ERTS_ALC_INFO_A_MSEG_ALLOC,
+ ERTS_ALC_INFO_A_ERTS_MMAP,
+ ERTS_ALC_INFO_A_DISABLED_EXEC, /* fake a disabled "exec_alloc" */
+ ERTS_ALC_INFO_A_END
+};
typedef struct {
- erts_smp_atomic32_t refc;
+ erts_atomic32_t refc;
int only_sz;
int internal;
Uint req_sched;
Process *proc;
- Eterm ref;
- Eterm ref_heap[REF_THING_SIZE];
- int allocs[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+1+2];
+ ErtsIRefStorage iref;
+ int allocs[ERTS_ALC_INFO_A_END - ERTS_ALC_A_MIN + 1];
} ErtsAllocInfoReq;
-#define ERTS_ALC_INFO_A_ALLOC_UTIL (ERTS_ALC_A_MAX + 1)
-#define ERTS_ALC_INFO_A_MSEG_ALLOC (ERTS_ALC_A_MAX + 2)
-#define ERTS_ALC_INFO_A_MAX ERTS_ALC_INFO_A_MSEG_ALLOC
-
-#if !HALFWORD_HEAP
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(aireq,
- ErtsAllocInfoReq,
- 5,
- ERTS_ALC_T_AINFO_REQ)
-#else
-static ERTS_INLINE ErtsAllocInfoReq *
-aireq_alloc(void)
-{
- return erts_alloc(ERTS_ALC_T_AINFO_REQ, sizeof(ErtsAllocInfoReq));
-}
-
-static ERTS_INLINE void
-aireq_free(ErtsAllocInfoReq *ptr)
-{
- erts_free(ERTS_ALC_T_AINFO_REQ, ptr);
-}
-#endif
+ ErtsAllocInfoReq,
+ 5,
+ ERTS_ALC_T_AINFO_REQ)
ErtsAlcType_t erts_fix_core_allocator_ix;
@@ -180,6 +166,8 @@ enum allctr_type {
struct au_init {
int enable;
int thr_spec;
+ int disable_allowed;
+ int thr_spec_allowed;
int carrier_migration_allowed;
enum allctr_type atype;
struct {
@@ -228,13 +216,12 @@ typedef struct {
struct au_init ets_alloc;
struct au_init driver_alloc;
struct au_init fix_alloc;
-#if HALFWORD_HEAP
- struct au_init std_low_alloc;
- struct au_init ll_low_alloc;
-#endif
+ struct au_init literal_alloc;
+ struct au_init exec_alloc;
+ struct au_init test_alloc;
} erts_alc_hndl_args_init_t;
-#define ERTS_AU_INIT__ {0, 0, 1, GOODFIT, DEFAULT_ALLCTR_INIT, {1,1,1,1}}
+#define ERTS_AU_INIT__ {0, 0, 1, 1, 1, GOODFIT, DEFAULT_ALLCTR_INIT, {1,1,1,1}}
#define SET_DEFAULT_ALLOC_OPTS(IP) \
do { \
@@ -258,10 +245,6 @@ set_default_sl_alloc_opts(struct au_init *ip)
#endif
ip->init.util.ts = ERTS_ALC_MTA_SHORT_LIVED;
ip->init.util.rsbcst = 80;
-#if HALFWORD_HEAP
- ip->init.util.force = 1;
- ip->init.util.low_mem = 1;
-#endif
ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
}
@@ -297,9 +280,9 @@ set_default_ll_alloc_opts(struct au_init *ip)
ip->init.util.name_prefix = "ll_";
ip->init.util.alloc_no = ERTS_ALC_A_LONG_LIVED;
#ifndef SMALL_MEMORY
- ip->init.util.mmbcs = 2*1024*1024 - 40; /* Main carrier size */
+ ip->init.util.mmbcs = 2*1024*1024; /* Main carrier size */
#else
- ip->init.util.mmbcs = 1*1024*1024 - 40; /* Main carrier size */
+ ip->init.util.mmbcs = 1*1024*1024; /* Main carrier size */
#endif
ip->init.util.ts = ERTS_ALC_MTA_LONG_LIVED;
ip->init.util.asbcst = 0;
@@ -310,11 +293,99 @@ set_default_ll_alloc_opts(struct au_init *ip)
}
static void
+set_default_literal_alloc_opts(struct au_init *ip)
+{
+ SET_DEFAULT_ALLOC_OPTS(ip);
+ ip->enable = 1;
+ ip->thr_spec = 0;
+ ip->disable_allowed = 0;
+ ip->thr_spec_allowed = 0;
+ ip->carrier_migration_allowed = 0;
+ ip->atype = BESTFIT;
+ ip->init.bf.ao = 1;
+ ip->init.util.ramv = 0;
+ ip->init.util.mmsbc = 0;
+ ip->init.util.sbct = ~((UWord) 0);
+ ip->init.util.name_prefix = "literal_";
+ ip->init.util.alloc_no = ERTS_ALC_A_LITERAL;
+#ifndef SMALL_MEMORY
+ ip->init.util.mmbcs = 1024*1024; /* Main carrier size */
+#else
+ ip->init.util.mmbcs = 256*1024; /* Main carrier size */
+#endif
+ ip->init.util.ts = ERTS_ALC_MTA_LITERAL;
+ ip->init.util.asbcst = 0;
+ ip->init.util.rsbcst = 0;
+ ip->init.util.rsbcmt = 0;
+ ip->init.util.rmbcmt = 0;
+ ip->init.util.acul = 0;
+
+#if defined(ARCH_32)
+# if HAVE_ERTS_MSEG
+ ip->init.util.mseg_alloc = &erts_alcu_literal_32_mseg_alloc;
+ ip->init.util.mseg_realloc = &erts_alcu_literal_32_mseg_realloc;
+ ip->init.util.mseg_dealloc = &erts_alcu_literal_32_mseg_dealloc;
+# endif
+ ip->init.util.sys_alloc = &erts_alcu_literal_32_sys_alloc;
+ ip->init.util.sys_realloc = &erts_alcu_literal_32_sys_realloc;
+ ip->init.util.sys_dealloc = &erts_alcu_literal_32_sys_dealloc;
+#elif defined(ARCH_64)
+# ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION
+ ip->init.util.mseg_alloc = &erts_alcu_mmapper_mseg_alloc;
+ ip->init.util.mseg_realloc = &erts_alcu_mmapper_mseg_realloc;
+ ip->init.util.mseg_dealloc = &erts_alcu_mmapper_mseg_dealloc;
+ ip->init.util.mseg_mmapper = &erts_literal_mmapper;
+# endif
+#else
+# error Unknown architecture
+#endif
+}
+
+#ifdef ERTS_ALC_A_EXEC
+static void
+set_default_exec_alloc_opts(struct au_init *ip)
+{
+ SET_DEFAULT_ALLOC_OPTS(ip);
+ ip->enable = 1;
+ ip->thr_spec = 0;
+ ip->disable_allowed = 0;
+ ip->thr_spec_allowed = 0;
+ ip->carrier_migration_allowed = 0;
+ ip->atype = BESTFIT;
+ ip->init.bf.ao = 1;
+ ip->init.util.ramv = 0;
+ ip->init.util.mmsbc = 0;
+ ip->init.util.sbct = ~((UWord) 0);
+ ip->init.util.name_prefix = "exec_";
+ ip->init.util.alloc_no = ERTS_ALC_A_EXEC;
+ ip->init.util.mmbcs = 0; /* No main carrier */
+ ip->init.util.ts = ERTS_ALC_MTA_EXEC;
+ ip->init.util.asbcst = 0;
+ ip->init.util.rsbcst = 0;
+ ip->init.util.rsbcmt = 0;
+ ip->init.util.rmbcmt = 0;
+ ip->init.util.acul = 0;
+
+# ifdef ERTS_HAVE_EXEC_MMAPPER
+ ip->init.util.mseg_alloc = &erts_alcu_mmapper_mseg_alloc;
+ ip->init.util.mseg_realloc = &erts_alcu_mmapper_mseg_realloc;
+ ip->init.util.mseg_dealloc = &erts_alcu_mmapper_mseg_dealloc;
+ ip->init.util.mseg_mmapper = &erts_exec_mmapper;
+# else
+ ip->init.util.mseg_alloc = &erts_alcu_exec_mseg_alloc;
+ ip->init.util.mseg_realloc = &erts_alcu_exec_mseg_realloc;
+ ip->init.util.mseg_dealloc = &erts_alcu_exec_mseg_dealloc;
+# endif
+}
+#endif /* ERTS_ALC_A_EXEC */
+
+static void
set_default_temp_alloc_opts(struct au_init *ip)
{
SET_DEFAULT_ALLOC_OPTS(ip);
ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
ip->thr_spec = 1;
+ ip->disable_allowed = 0;
ip->carrier_migration_allowed = 0;
ip->atype = AFIT;
ip->init.util.name_prefix = "temp_";
@@ -327,10 +398,6 @@ set_default_temp_alloc_opts(struct au_init *ip)
ip->init.util.ts = ERTS_ALC_MTA_TEMPORARY;
ip->init.util.rsbcst = 90;
ip->init.util.rmbcmt = 100;
-#if HALFWORD_HEAP
- ip->init.util.force = 1;
- ip->init.util.low_mem = 1;
-#endif
}
static void
@@ -349,10 +416,6 @@ set_default_eheap_alloc_opts(struct au_init *ip)
#endif
ip->init.util.ts = ERTS_ALC_MTA_EHEAP;
ip->init.util.rsbcst = 50;
-#if HALFWORD_HEAP
- ip->init.util.force = 1;
- ip->init.util.low_mem = 1;
-#endif
ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC;
}
@@ -431,7 +494,33 @@ set_default_fix_alloc_opts(struct au_init *ip,
ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
}
-#ifdef ERTS_SMP
+static void
+set_default_test_alloc_opts(struct au_init *ip)
+{
+ SET_DEFAULT_ALLOC_OPTS(ip);
+ ip->enable = 0; /* Disabled by default */
+ ip->thr_spec = -1 * erts_no_schedulers;
+ ip->atype = AOFIRSTFIT;
+ ip->init.aoff.flavor = AOFF_BF;
+ ip->init.util.name_prefix = "test_";
+ ip->init.util.alloc_no = ERTS_ALC_A_TEST;
+ ip->init.util.mmbcs = 0; /* Main carrier size */
+ ip->init.util.ts = ERTS_ALC_MTA_TEST;
+ ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
+
+ /* Use a constant minimal MBC size */
+#if ERTS_SA_MB_CARRIERS
+ ip->init.util.smbcs = ERTS_SACRR_UNIT_SZ;
+ ip->init.util.lmbcs = ERTS_SACRR_UNIT_SZ;
+ ip->init.util.sbct = ERTS_SACRR_UNIT_SZ;
+#else
+ ip->init.util.smbcs = 1 << 12;
+ ip->init.util.lmbcs = 1 << 12;
+ ip->init.util.sbct = 1 << 12;
+#endif
+}
+
+
static void
adjust_tpref(struct au_init *ip, int no_sched)
@@ -454,7 +543,6 @@ adjust_tpref(struct au_init *ip, int no_sched)
}
}
-#endif
static void handle_args(int *, char **, erts_alc_hndl_args_init_t *);
@@ -483,7 +571,6 @@ static void adjust_fix_alloc_sizes(UWord extra_block_size)
if (extra_block_size && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].enabled) {
int j;
-#ifdef ERTS_SMP
if (erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].thr_spec) {
int i;
ErtsAllocatorThrSpec_t* tspec;
@@ -499,7 +586,6 @@ static void adjust_fix_alloc_sizes(UWord extra_block_size)
}
}
else
-#endif
{
Allctr_t* allctr = erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra;
for (j=0; j < ERTS_ALC_NO_FIXED_SIZES; ++j) {
@@ -522,7 +608,6 @@ strategy_support_carrier_migration(struct au_init *auip)
static ERTS_INLINE void
adjust_carrier_migration_support(struct au_init *auip)
{
-#ifdef ERTS_SMP
if (auip->init.util.acul) {
auip->thr_spec = -1; /* Need thread preferred */
@@ -536,9 +621,6 @@ adjust_carrier_migration_support(struct au_init *auip)
auip->init.aoff.flavor = AOFF_BF;
}
}
-#else
- auip->init.util.acul = 0;
-#endif
}
void
@@ -559,22 +641,31 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_PROC)]
= sizeof(Process);
-#if !HALFWORD_HEAP
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MONITOR_SH)]
= ERTS_MONITOR_SH_SIZE * sizeof(Uint);
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_NLINK_SH)]
= ERTS_LINK_SH_SIZE * sizeof(Uint);
-#endif
- fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_DRV_EV_D_STATE)]
- = sizeof(ErtsDrvEventDataState);
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_DRV_SEL_D_STATE)]
= sizeof(ErtsDrvSelectDataState);
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_NIF_SEL_D_STATE)]
+ = sizeof(ErtsNifSelectDataState);
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MSG_REF)]
- = sizeof(ErlMessage);
-#ifdef ERTS_SMP
+ = sizeof(ErtsMessageRef);
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_THR_Q_EL_SL)]
= sizeof(ErtsThrQElement_t);
-#endif
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_LL_PTIMER)]
+ = erts_timer_type_size(ERTS_ALC_T_LL_PTIMER);
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_HL_PTIMER)]
+ = erts_timer_type_size(ERTS_ALC_T_HL_PTIMER);
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_BIF_TIMER)]
+ = erts_timer_type_size(ERTS_ALC_T_BIF_TIMER);
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_NIF_EXP_TRACE)]
+ = sizeof(NifExportTrace);
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MREF_NSCHED_ENT)]
+ = sizeof(ErtsNSchedMagicRefTableEntry);
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MINDIRECTION)]
+ = ERTS_MAGIC_BIN_UNALIGNED_SIZE(sizeof(ErtsMagicIndirectionWord));
+
#ifdef HARD_DEBUG
hdbg_init();
#endif
@@ -601,6 +692,11 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
set_default_driver_alloc_opts(&init.driver_alloc);
set_default_fix_alloc_opts(&init.fix_alloc,
fix_type_sizes);
+ set_default_literal_alloc_opts(&init.literal_alloc);
+#ifdef ERTS_ALC_A_EXEC
+ set_default_exec_alloc_opts(&init.exec_alloc);
+#endif
+ set_default_test_alloc_opts(&init.test_alloc);
if (argc && argv)
handle_args(argc, argv, &init);
@@ -611,24 +707,14 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
if (mlockall(MCL_CURRENT|MCL_FUTURE) != 0) {
int err = errno;
char *errstr = err ? strerror(err) : "unknown";
- erl_exit(-1, "Failed to lock physical memory: %s (%d)\n",
+ erts_exit(1, "Failed to lock physical memory: %s (%d)\n",
errstr, err);
}
#else
- erl_exit(-1, "Failed to lock physical memory: Not supported\n");
+ erts_exit(1, "Failed to lock physical memory: Not supported\n");
#endif
}
-#ifndef ERTS_SMP
- init.sl_alloc.thr_spec = 0;
- init.std_alloc.thr_spec = 0;
- init.ll_alloc.thr_spec = 0;
- init.eheap_alloc.thr_spec = 0;
- init.binary_alloc.thr_spec = 0;
- init.ets_alloc.thr_spec = 0;
- init.driver_alloc.thr_spec = 0;
- init.fix_alloc.thr_spec = 0;
-#endif
/* Make adjustments for carrier migration support */
init.temp_alloc.init.util.acul = 0;
@@ -640,6 +726,10 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
adjust_carrier_migration_support(&init.ets_alloc);
adjust_carrier_migration_support(&init.driver_alloc);
adjust_carrier_migration_support(&init.fix_alloc);
+ adjust_carrier_migration_support(&init.literal_alloc);
+#ifdef ERTS_ALC_A_EXEC
+ adjust_carrier_migration_support(&init.exec_alloc);
+#endif
if (init.erts_alloc_config) {
/* Adjust flags that erts_alloc_config won't like */
@@ -654,6 +744,10 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
init.ets_alloc.thr_spec = 0;
init.driver_alloc.thr_spec = 0;
init.fix_alloc.thr_spec = 0;
+ init.literal_alloc.thr_spec = 0;
+#ifdef ERTS_ALC_A_EXEC
+ init.exec_alloc.thr_spec = 0;
+#endif
/* No carrier migration */
init.temp_alloc.init.util.acul = 0;
@@ -665,9 +759,12 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
init.ets_alloc.init.util.acul = 0;
init.driver_alloc.init.util.acul = 0;
init.fix_alloc.init.util.acul = 0;
+ init.literal_alloc.init.util.acul = 0;
+#ifdef ERTS_ALC_A_EXEC
+ init.exec_alloc.init.util.acul = 0;
+#endif
}
-#ifdef ERTS_SMP
/* Only temp_alloc can use thread specific interface */
if (init.temp_alloc.thr_spec)
init.temp_alloc.thr_spec = erts_no_schedulers;
@@ -681,12 +778,12 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
adjust_tpref(&init.ets_alloc, erts_no_schedulers);
adjust_tpref(&init.driver_alloc, erts_no_schedulers);
adjust_tpref(&init.fix_alloc, erts_no_schedulers);
-
-#else
- /* No thread specific if not smp */
- init.temp_alloc.thr_spec = 0;
+ adjust_tpref(&init.literal_alloc, erts_no_schedulers);
+#ifdef ERTS_ALC_A_EXEC
+ adjust_tpref(&init.exec_alloc, erts_no_schedulers);
#endif
+
/*
* The following allocators cannot be run with afit strategy.
* Make sure they don't...
@@ -699,11 +796,13 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
refuse_af_strategy(&init.ets_alloc);
refuse_af_strategy(&init.driver_alloc);
refuse_af_strategy(&init.fix_alloc);
+ refuse_af_strategy(&init.literal_alloc);
+#ifdef ERTS_ALC_A_EXEC
+ refuse_af_strategy(&init.exec_alloc);
+#endif
-#ifdef ERTS_SMP
if (!init.temp_alloc.thr_spec)
refuse_af_strategy(&init.temp_alloc);
-#endif
erts_mtrace_pre_init();
#if HAVE_ERTS_MSEG
@@ -733,24 +832,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
erts_allctrs[ERTS_ALC_A_SYSTEM].free = erts_sys_free;
erts_allctrs_info[ERTS_ALC_A_SYSTEM].enabled = 1;
-#if HALFWORD_HEAP
- /* Init low memory variants by cloning */
- init.std_low_alloc = init.std_alloc;
- init.std_low_alloc.init.util.name_prefix = "std_low_";
- init.std_low_alloc.init.util.alloc_no = ERTS_ALC_A_STANDARD_LOW;
- init.std_low_alloc.init.util.force = 1;
- init.std_low_alloc.init.util.low_mem = 1;
-
- init.ll_low_alloc = init.ll_alloc;
- init.ll_low_alloc.init.util.name_prefix = "ll_low_";
- init.ll_low_alloc.init.util.alloc_no = ERTS_ALC_A_LONG_LIVED_LOW;
- init.ll_low_alloc.init.util.force = 1;
- init.ll_low_alloc.init.util.low_mem = 1;
-
- set_au_allocator(ERTS_ALC_A_STANDARD_LOW, &init.std_low_alloc, ncpu);
- set_au_allocator(ERTS_ALC_A_LONG_LIVED_LOW, &init.ll_low_alloc, ncpu);
-#endif /* HALFWORD */
-
set_au_allocator(ERTS_ALC_A_TEMPORARY, &init.temp_alloc, ncpu);
set_au_allocator(ERTS_ALC_A_SHORT_LIVED, &init.sl_alloc, ncpu);
set_au_allocator(ERTS_ALC_A_STANDARD, &init.std_alloc, ncpu);
@@ -760,16 +841,21 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
set_au_allocator(ERTS_ALC_A_ETS, &init.ets_alloc, ncpu);
set_au_allocator(ERTS_ALC_A_DRIVER, &init.driver_alloc, ncpu);
set_au_allocator(ERTS_ALC_A_FIXED_SIZE, &init.fix_alloc, ncpu);
+ set_au_allocator(ERTS_ALC_A_LITERAL, &init.literal_alloc, ncpu);
+#ifdef ERTS_ALC_A_EXEC
+ set_au_allocator(ERTS_ALC_A_EXEC, &init.exec_alloc, ncpu);
+#endif
+ set_au_allocator(ERTS_ALC_A_TEST, &init.test_alloc, ncpu);
for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
if (!erts_allctrs[i].alloc)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"Missing alloc function for %s\n", ERTS_ALC_A2AD(i));
if (!erts_allctrs[i].realloc)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"Missing realloc function for %s\n", ERTS_ALC_A2AD(i));
if (!erts_allctrs[i].free)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"Missing free function for %s\n", ERTS_ALC_A2AD(i));
}
@@ -793,14 +879,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
start_au_allocator(ERTS_ALC_A_LONG_LIVED,
&init.ll_alloc,
&ll_alloc_state);
-#if HALFWORD_HEAP
- start_au_allocator(ERTS_ALC_A_LONG_LIVED_LOW,
- &init.ll_low_alloc,
- &ll_low_alloc_state);
- start_au_allocator(ERTS_ALC_A_STANDARD_LOW,
- &init.std_low_alloc,
- &std_low_alloc_state);
-#endif
start_au_allocator(ERTS_ALC_A_EHEAP,
&init.eheap_alloc,
&eheap_alloc_state);
@@ -820,13 +898,22 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
start_au_allocator(ERTS_ALC_A_FIXED_SIZE,
&init.fix_alloc,
&fix_alloc_state);
+ start_au_allocator(ERTS_ALC_A_LITERAL,
+ &init.literal_alloc,
+ &literal_alloc_state);
+#ifdef ERTS_ALC_A_EXEC
+ start_au_allocator(ERTS_ALC_A_EXEC,
+ &init.exec_alloc,
+ &exec_alloc_state);
+#endif
+ start_au_allocator(ERTS_ALC_A_TEST,
+ &init.test_alloc,
+ &test_alloc_state);
erts_mtrace_install_wrapper_functions();
extra_block_size += erts_instr_init(init.instr.stat, init.instr.map);
-#if !HALFWORD_HEAP
init_aireq_alloc();
-#endif
#ifdef DEBUG
extra_block_size += install_debug_functions();
@@ -843,7 +930,7 @@ erts_alloc_late_init(void)
static void *
erts_realloc_fixed_size(ErtsAlcType_t type, void *extra, void *p, Uint size)
{
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"Attempt to reallocate a block of the fixed size type %s\n",
ERTS_ALC_T2TD(type));
}
@@ -879,8 +966,6 @@ set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu)
return;
}
-#ifdef USE_THREADS
-#ifdef ERTS_SMP
if (init->thr_spec) {
if (init->thr_spec > 0) {
af->alloc = erts_alcu_alloc_thr_spec;
@@ -910,7 +995,6 @@ set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu)
ai->thr_spec = tspec->size;
}
else
-#endif
if (init->init.util.ts) {
af->alloc = erts_alcu_alloc_ts;
if (init->init.util.fix_type_size)
@@ -922,16 +1006,9 @@ set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu)
af->free = erts_alcu_free_ts;
}
else
-#endif
{
- af->alloc = erts_alcu_alloc;
- if (init->init.util.fix_type_size)
- af->realloc = erts_realloc_fixed_size;
- else if (init->init.util.ramv)
- af->realloc = erts_alcu_realloc_mv;
- else
- af->realloc = erts_alcu_realloc;
- af->free = erts_alcu_free;
+ erts_exit(ERTS_ABORT_EXIT, "%salloc is not thread safe\n",
+ init->init.util.name_prefix);
}
af->extra = NULL;
ai->alloc_util = 1;
@@ -965,7 +1042,7 @@ start_au_allocator(ErtsAlcType_t alctr_n,
* tspec->size)
+ ERTS_CACHE_LINE_SIZE - 1));
if (!states)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"Failed to allocate allocator states for %salloc\n",
init->init.util.name_prefix);
tspec->allctr = (Allctr_t **) states;
@@ -993,7 +1070,7 @@ start_au_allocator(ErtsAlcType_t alctr_n,
(tot_fix_list_size
+ ERTS_CACHE_LINE_SIZE - 1));
if (!fix_lists)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"Failed to allocate fix lists for %salloc\n",
init->init.util.name_prefix);
@@ -1004,7 +1081,7 @@ start_au_allocator(ErtsAlcType_t alctr_n,
}
for (i = 0; i < size; i++) {
- void *as;
+ Allctr_t *as;
atype = init->atype;
if (!init->thr_spec)
@@ -1041,22 +1118,22 @@ start_au_allocator(ErtsAlcType_t alctr_n,
switch (atype) {
case GOODFIT:
- as = (void *) erts_gfalc_start((GFAllctr_t *) as0,
+ as = erts_gfalc_start((GFAllctr_t *) as0,
&init->init.gf,
&init->init.util);
break;
case BESTFIT:
- as = (void *) erts_bfalc_start((BFAllctr_t *) as0,
+ as = erts_bfalc_start((BFAllctr_t *) as0,
&init->init.bf,
&init->init.util);
break;
case AFIT:
- as = (void *) erts_afalc_start((AFAllctr_t *) as0,
+ as = erts_afalc_start((AFAllctr_t *) as0,
&init->init.af,
&init->init.util);
break;
case AOFIRSTFIT:
- as = (void *) erts_aoffalc_start((AOFFAllctr_t *) as0,
+ as = erts_aoffalc_start((AOFFAllctr_t *) as0,
&init->init.aoff,
&init->init.util);
break;
@@ -1067,7 +1144,7 @@ start_au_allocator(ErtsAlcType_t alctr_n,
}
if (!as)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"Failed to start %salloc\n", init->init.util.name_prefix);
ASSERT(as == (void *) as0);
@@ -1226,9 +1303,6 @@ get_acul_value(struct au_init *auip, char *param_end, char** argv, int* ip)
if (sys_strcmp(value, "de") == 0) {
switch (auip->init.util.alloc_no) {
case ERTS_ALC_A_LONG_LIVED:
-#if HALFWORD_HEAP
- case ERTS_ALC_A_LONG_LIVED_LOW:
-#endif
return ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC;
case ERTS_ALC_A_EHEAP:
return ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC;
@@ -1306,9 +1380,17 @@ handle_au_arg(struct au_init *auip,
else
goto bad_switch;
break;
- case 'e':
- auip->enable = get_bool_value(sub_param+1, argv, ip);
+ case 'e': {
+ int e = get_bool_value(sub_param + 1, argv, ip);
+ if (!auip->disable_allowed && !e) {
+ if (!u_switch)
+ bad_value(param, sub_param + 1, "false");
+ else
+ ASSERT(auip->enable); /* ignore */
+ }
+ else auip->enable = e;
break;
+ }
case 'l':
if (has_prefix("lmbcs", sub_param)) {
auip->default_.lmbcs = 0;
@@ -1377,7 +1459,14 @@ handle_au_arg(struct au_init *auip,
case 't': {
int res = get_bool_value(sub_param+1, argv, ip);
if (res > 0) {
- auip->thr_spec = 1;
+ if (!auip->thr_spec_allowed) {
+ if (!u_switch)
+ bad_value(param, sub_param + 1, "true");
+ else
+ ASSERT(!auip->thr_spec); /* ignore */
+ }
+ else
+ auip->thr_spec = 1;
break;
}
else if (res == 0) {
@@ -1404,8 +1493,8 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
&init->ll_alloc,
&init->driver_alloc,
&init->fix_alloc,
- &init->sl_alloc,
- &init->temp_alloc
+ &init->sl_alloc
+ /* test_alloc not affected by +Mea??? or +Mu??? */
};
int aui_sz = (int) sizeof(aui)/sizeof(aui[0]);
char *arg;
@@ -1425,6 +1514,26 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
case 'B':
handle_au_arg(&init->binary_alloc, &argv[i][3], argv, &i, 0);
break;
+ case 'I':
+ if (has_prefix("scs", argv[i]+3)) {
+#if HAVE_ERTS_MSEG
+ init->mseg.literal_mmap.scs =
+#endif
+ get_mb_value(argv[i]+6, argv, &i);
+ }
+ else
+ handle_au_arg(&init->literal_alloc, &argv[i][3], argv, &i, 0);
+ break;
+ case 'X':
+ if (has_prefix("scs", argv[i]+3)) {
+#ifdef ERTS_HAVE_EXEC_MMAPPER
+ init->mseg.exec_mmap.scs =
+#endif
+ get_mb_value(argv[i]+6, argv, &i);
+ }
+ else
+ handle_au_arg(&init->exec_alloc, &argv[i][3], argv, &i, 0);
+ break;
case 'D':
handle_au_arg(&init->std_alloc, &argv[i][3], argv, &i, 0);
break;
@@ -1461,25 +1570,25 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
}
else if (has_prefix("scs", argv[i]+3)) {
#if HAVE_ERTS_MSEG
- init->mseg.mmap.scs =
+ init->mseg.dflt_mmap.scs =
#endif
get_mb_value(argv[i]+6, argv, &i);
}
else if (has_prefix("sco", argv[i]+3)) {
#if HAVE_ERTS_MSEG
- init->mseg.mmap.sco =
+ init->mseg.dflt_mmap.sco =
#endif
get_bool_value(argv[i]+6, argv, &i);
}
else if (has_prefix("scrpm", argv[i]+3)) {
#if HAVE_ERTS_MSEG
- init->mseg.mmap.scrpm =
+ init->mseg.dflt_mmap.scrpm =
#endif
get_bool_value(argv[i]+8, argv, &i);
}
else if (has_prefix("scrfsd", argv[i]+3)) {
#if HAVE_ERTS_MSEG
- init->mseg.mmap.scrfsd =
+ init->mseg.dflt_mmap.scrfsd =
#endif
get_amount_value(argv[i]+9, argv, &i);
}
@@ -1496,6 +1605,9 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
case 'T':
handle_au_arg(&init->temp_alloc, &argv[i][3], argv, &i, 0);
break;
+ case 'Z':
+ handle_au_arg(&init->test_alloc, &argv[i][3], argv, &i, 0);
+ break;
case 'Y': { /* sys_alloc */
if (has_prefix("tt", param+2)) {
/* set trim threshold */
@@ -1727,9 +1839,7 @@ erts_alloc_register_scheduler(void *vesdp)
int ix = (int) esdp->no;
int aix;
-#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
-#endif
for (aix = ERTS_ALC_A_MIN; aix <= ERTS_ALC_A_MAX; aix++) {
ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[aix];
esdp->alloc_data.deallctr[aix] = NULL;
@@ -1747,7 +1857,6 @@ erts_alloc_register_scheduler(void *vesdp)
}
}
-#ifdef ERTS_SMP
void
erts_alloc_scheduler_handle_delayed_dealloc(void *vesdp,
int *need_thr_progress,
@@ -1776,12 +1885,10 @@ erts_alloc_scheduler_handle_delayed_dealloc(void *vesdp,
}
}
}
-#endif
erts_aint32_t
erts_alloc_fix_alloc_shrink(int ix, erts_aint32_t flgs)
{
-#ifdef ERTS_SMP
ErtsAllocatorThrSpec_t *tspec;
tspec = &erts_allctr_thr_spec[ERTS_ALC_A_FIXED_SIZE];
if (erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].thr_spec && tspec->enabled)
@@ -1789,11 +1896,6 @@ erts_alloc_fix_alloc_shrink(int ix, erts_aint32_t flgs)
if (ix == 0 && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra)
return erts_alcu_fix_alloc_shrink(
erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra, flgs);
-#else
- if (ix == 1 && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra)
- return erts_alcu_fix_alloc_shrink(
- erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra, flgs);
-#endif
return 0;
}
@@ -1858,7 +1960,7 @@ erts_alc_fatal_error(int error, int func, ErtsAlcType_t n, ...)
case ERTS_ALC_O_FREE: op_str = "free"; break;
default: op_str = "UNKNOWN"; break;
}
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"%s: %s operation not supported (memory type: \"%s\")\n",
allctr_str, op_str, t_str);
break;
@@ -1872,18 +1974,18 @@ erts_alc_fatal_error(int error, int func, ErtsAlcType_t n, ...)
va_start(argp, n);
size = va_arg(argp, Uint);
va_end(argp);
- erl_exit(1,
+ erts_exit(ERTS_DUMP_EXIT,
"%s: Cannot %s %lu bytes of memory (of type \"%s\").\n",
allctr_str, op, size, t_str);
break;
}
case ERTS_ALC_E_NOALLCTR:
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"erts_alloc: Unknown allocator type: %d\n",
ERTS_ALC_T2A(ERTS_ALC_N2T(n)));
break;
default:
- erl_exit(ERTS_ABORT_EXIT, "erts_alloc: Unknown error: %d\n", error);
+ erts_exit(ERTS_ABORT_EXIT, "erts_alloc: Unknown error: %d\n", error);
break;
}
}
@@ -1945,48 +2047,6 @@ alcu_size(ErtsAlcType_t ai, ErtsAlcUFixInfo_t *fi, int fisz)
return res;
}
-#if HALFWORD_HEAP
-static ERTS_INLINE int
-alcu_is_low(ErtsAlcType_t ai)
-{
- int is_low = 0;
- ASSERT(erts_allctrs_info[ai].enabled);
- ASSERT(erts_allctrs_info[ai].alloc_util);
-
- if (!erts_allctrs_info[ai].thr_spec) {
- Allctr_t *allctr = erts_allctrs_info[ai].extra;
- is_low = allctr->mseg_opt.low_mem;
- }
- else {
- ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[ai];
- int i;
-# ifdef DEBUG
- int found_one = 0;
-# endif
-
- ASSERT(tspec->enabled);
-
- for (i = tspec->size - 1; i >= 0; i--) {
- Allctr_t *allctr = tspec->allctr[i];
- if (allctr) {
-# ifdef DEBUG
- if (!found_one) {
- is_low = allctr->mseg_opt.low_mem;
- found_one = 1;
- }
- else ASSERT(is_low == allctr->mseg_opt.low_mem);
-# else
- is_low = allctr->mseg_opt.low_mem;
- break;
-# endif
- }
- }
- ASSERT(found_one);
- }
- return is_low;
-}
-#endif /* HALFWORD */
-
static ERTS_INLINE void
add_fix_values(UWord *ap, UWord *up, ErtsAlcUFixInfo_t *fi, ErtsAlcType_t type)
{
@@ -1998,7 +2058,7 @@ add_fix_values(UWord *ap, UWord *up, ErtsAlcUFixInfo_t *fi, ErtsAlcType_t type)
}
Eterm
-erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
+erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg)
{
/*
* NOTE! When updating this function, make sure to also update
@@ -2016,9 +2076,6 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
int code;
int ets;
int maximum;
-#if HALFWORD_HEAP
- int low;
-#endif
} want = {0};
struct {
UWord total;
@@ -2031,9 +2088,6 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
UWord code;
UWord ets;
UWord maximum;
-#if HALFWORD_HEAP
- UWord low;
-#endif
} size = {0};
Eterm atoms[sizeof(size)/sizeof(UWord)];
UWord *uintps[sizeof(size)/sizeof(UWord)];
@@ -2045,7 +2099,7 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
int only_one_value = 0;
ErtsAlcUFixInfo_t fi[ERTS_ALC_NO_FIXED_SIZES] = {{0,0}};
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
/* Figure out whats wanted... */
@@ -2092,11 +2146,6 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
atoms[length] = am_maximum;
uintps[length++] = &size.maximum;
}
-#if HALFWORD_HEAP
- want.low = 1;
- atoms[length] = am_low;
- uintps[length++] = &size.low;
-#endif
}
else {
DeclareTmpHeapNoproc(tmp_heap,2);
@@ -2190,15 +2239,6 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
return am_badarg;
}
break;
-#if HALFWORD_HEAP
- case am_low:
- if (!want.low) {
- want.low = 1;
- atoms[length] = am_low;
- uintps[length++] = &size.low;
- }
- break;
-#endif
default:
UnUseTmpHeapNoproc(2);
return am_badarg;
@@ -2210,11 +2250,12 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
return am_badarg;
}
- /* All alloc_util allocators *have* to be enabled */
+ /* All alloc_util allocators *have* to be enabled, except test_alloc */
for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++) {
switch (ai) {
case ERTS_ALC_A_SYSTEM:
+ case ERTS_ALC_A_TEST:
break;
default:
if (!erts_allctrs_info[ai].enabled
@@ -2231,10 +2272,10 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
if (proc) {
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN
== erts_proc_lc_my_proc_locks(proc));
/* We'll need locks early in the lock order */
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
}
/* Calculate values needed... */
@@ -2255,6 +2296,8 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
* contain any allocated memory.
*/
continue;
+ case ERTS_ALC_A_TEST:
+ continue;
case ERTS_ALC_A_EHEAP:
save = &size.processes;
break;
@@ -2276,11 +2319,6 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
if (save)
*save = asz;
size.total += asz;
-#if HALFWORD_HEAP
- if (alcu_is_low(ai)) {
- size.low += asz;
- }
-#endif
}
}
}
@@ -2307,7 +2345,6 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
&size.processes_used,
fi,
ERTS_ALC_T_PROC);
-#if !HALFWORD_HEAP
add_fix_values(&size.processes,
&size.processes_used,
fi,
@@ -2317,11 +2354,26 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
&size.processes_used,
fi,
ERTS_ALC_T_NLINK_SH);
-#endif
add_fix_values(&size.processes,
&size.processes_used,
fi,
ERTS_ALC_T_MSG_REF);
+ add_fix_values(&size.processes,
+ &size.processes_used,
+ fi,
+ ERTS_ALC_T_LL_PTIMER);
+ add_fix_values(&size.processes,
+ &size.processes_used,
+ fi,
+ ERTS_ALC_T_HL_PTIMER);
+ add_fix_values(&size.processes,
+ &size.processes_used,
+ fi,
+ ERTS_ALC_T_BIF_TIMER);
+ add_fix_values(&size.processes,
+ &size.processes_used,
+ fi,
+ ERTS_ALC_T_NIF_EXP_TRACE);
}
if (want.atom || want.atom_used) {
@@ -2367,7 +2419,7 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
if (print_to_p) {
int i;
- int to = *print_to_p;
+ fmtfn_t to = *print_to_p;
void *arg = print_to_arg;
/* Print result... */
@@ -2381,7 +2433,7 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
Uint *hp;
Uint hsz;
- erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
if (only_one_value) {
ASSERT(length == 1);
@@ -2421,7 +2473,7 @@ struct aa_values {
};
Eterm
-erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
+erts_allocated_areas(fmtfn_t *print_to_p, void *print_to_arg, void *proc)
{
#define MAX_AA_VALUES (24)
struct aa_values values[MAX_AA_VALUES];
@@ -2430,11 +2482,11 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
Uint reserved_atom_space, atom_space;
if (proc) {
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN
== erts_proc_lc_my_proc_locks(proc));
/* We'll need locks early in the lock order */
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
}
i = 0;
@@ -2556,7 +2608,7 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
if (print_to_p) {
/* Print result... */
- int to = *print_to_p;
+ fmtfn_t to = *print_to_p;
void *arg = print_to_arg;
erts_print(to, arg, "=allocated_areas\n");
@@ -2586,7 +2638,7 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
Uint hsz;
Uint *hszp;
- erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
hpp = NULL;
hsz = 0;
@@ -2645,14 +2697,17 @@ erts_alloc_util_allocators(void *proc)
/*
* Currently all allocators except sys_alloc are
* alloc_util allocators.
+ * Also hide test_alloc which is disabled by default
+ * and only intended for our own testing.
*/
- sz = ((ERTS_ALC_A_MAX + 1 - ERTS_ALC_A_MIN) - 1)*2;
+ sz = ((ERTS_ALC_A_MAX + 1 - ERTS_ALC_A_MIN) - 2)*2;
ASSERT(sz > 0);
hp = HAlloc((Process *) proc, sz);
res = NIL;
for (i = ERTS_ALC_A_MAX; i >= ERTS_ALC_A_MIN; i--) {
switch (i) {
case ERTS_ALC_A_SYSTEM:
+ case ERTS_ALC_A_TEST:
break;
default: {
char *alc_str = (char *) ERTS_ALC_A2AD(i);
@@ -2667,11 +2722,11 @@ erts_alloc_util_allocators(void *proc)
}
void
-erts_allocator_info(int to, void *arg)
+erts_allocator_info(fmtfn_t to, void *arg)
{
ErtsAlcType_t a;
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) {
int ai;
@@ -2724,18 +2779,22 @@ erts_allocator_info(int to, void *arg)
#if HAVE_ERTS_MSEG
{
struct erts_mmap_info_struct emis;
-#ifdef ERTS_SMP
int max = (int) erts_no_schedulers;
-#else
- int max = 0;
-#endif
int i;
for (i = 0; i <= max; i++) {
erts_print(to, arg, "=allocator:mseg_alloc[%d]\n", i);
- erts_mseg_info(i, &to, arg, 0, NULL, NULL);
+ erts_mseg_info(i, &to, arg, 0, 0, NULL, NULL);
}
- erts_print(to, arg, "=allocator:mseg_alloc.erts_mmap\n");
- erts_mmap_info(&to, arg, NULL, NULL, &emis);
+ erts_print(to, arg, "=allocator:erts_mmap.default_mmap\n");
+ erts_mmap_info(&erts_dflt_mmapper, &to, arg, NULL, NULL, &emis);
+#if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
+ erts_print(to, arg, "=allocator:erts_mmap.literal_mmap\n");
+ erts_mmap_info(&erts_literal_mmapper, &to, arg, NULL, NULL, &emis);
+#endif
+#ifdef ERTS_HAVE_EXEC_MMAPPER
+ erts_print(to, arg, "=allocator:erts_mmap.exec_mmap\n");
+ erts_mmap_info(&erts_exec_mmapper, &to, arg, NULL, NULL, &emis);
+#endif
}
#endif
@@ -2846,6 +2905,11 @@ erts_allocator_options(void *proc)
atoms[length] = am_atom_put("alloc_util", 10);
terms[length++] = erts_alcu_au_info_options(NULL, NULL, hpp, szp);
+#if HAVE_ERTS_MMAP
+ atoms[length] = ERTS_MAKE_AM("erts_mmap");
+ terms[length++] = erts_mmap_info_options(&erts_dflt_mmapper, NULL, NULL,
+ NULL, hpp, szp);
+#endif
{
Eterm o[3], v[3];
o[0] = am_atom_put("m", 1);
@@ -2882,7 +2946,12 @@ erts_allocator_options(void *proc)
#if ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
terms[length++] = am_atom_put("sys_aligned_alloc", 17);
#endif
-
+#if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
+ terms[length++] = ERTS_MAKE_AM("literal_mmap");
+#endif
+#ifdef ERTS_HAVE_EXEC_MMAPPER
+ terms[length++] = ERTS_MAKE_AM("exec_mmap");
+#endif
features = length ? erts_bld_list(hpp, szp, length, terms) : NIL;
#if defined(__GLIBC__)
@@ -2961,18 +3030,26 @@ reply_alloc_info(void *vair)
int global_instances = air->req_sched == sched_id;
ErtsProcLocks rp_locks;
Process *rp = air->proc;
- Eterm ref_copy = NIL, ai_list, msg;
- Eterm *hp = NULL, *hp_end = NULL, *hp_start = NULL;
+ Eterm ref_copy = NIL, ai_list, msg = NIL;
+ Eterm *hp = NULL, *hp_start = NULL, *hp_end = NULL;
Eterm **hpp;
Uint sz, *szp;
ErlOffHeap *ohp = NULL;
- ErlHeapFragment *bp = NULL;
- struct erts_mmap_info_struct emis;
+ ErtsMessage *mp = NULL;
+#if HAVE_ERTS_MMAP
+ struct erts_mmap_info_struct mmap_info_dflt;
+# if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
+ struct erts_mmap_info_struct mmap_info_literal;
+# endif
+# ifdef ERTS_HAVE_EXEC_MMAPPER
+ struct erts_mmap_info_struct mmap_info_exec;
+# endif
+#endif
int i;
Eterm (*info_func)(Allctr_t *,
int,
int,
- int *,
+ fmtfn_t *,
void *,
Uint **,
Uint *) = (air->only_sz
@@ -2988,9 +3065,10 @@ reply_alloc_info(void *vair)
while (1) {
if (hpp)
- ref_copy = STORE_NC(hpp, ohp, air->ref);
+ ref_copy = erts_iref_storage_make_ref(&air->iref,
+ hpp, ohp, 0);
else
- *szp += REF_THING_SIZE;
+ *szp += erts_iref_storage_heap_size(&air->iref);
ai_list = NIL;
for (i = 0; air->allocs[i] != ERTS_ALC_A_INVALID; i++);
@@ -3073,30 +3151,64 @@ reply_alloc_info(void *vair)
make_small(0),
ainfo);
break;
+ case ERTS_ALC_INFO_A_ERTS_MMAP:
+ alloc_atom = erts_bld_atom(hpp, szp, "erts_mmap");
+#if HAVE_ERTS_MMAP
+ ainfo = (air->only_sz ? NIL :
+ erts_mmap_info(&erts_dflt_mmapper, NULL, NULL,
+ hpp, szp, &mmap_info_dflt));
+ ainfo = erts_bld_tuple3(hpp, szp,
+ alloc_atom,
+ erts_bld_atom(hpp,szp,"default_mmap"),
+ ainfo);
+# if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
+ ai_list = erts_bld_cons(hpp, szp,
+ ainfo, ai_list);
+ ainfo = (air->only_sz ? NIL :
+ erts_mmap_info(&erts_literal_mmapper, NULL, NULL,
+ hpp, szp, &mmap_info_literal));
+ ainfo = erts_bld_tuple3(hpp, szp,
+ alloc_atom,
+ erts_bld_atom(hpp,szp,"literal_mmap"),
+ ainfo);
+# endif
+# ifdef ERTS_HAVE_EXEC_MMAPPER
+ ai_list = erts_bld_cons(hpp, szp,
+ ainfo, ai_list);
+ ainfo = (air->only_sz ? NIL :
+ erts_mmap_info(&erts_exec_mmapper, NULL, NULL,
+ hpp, szp, &mmap_info_exec));
+ ainfo = erts_bld_tuple3(hpp, szp,
+ alloc_atom,
+ erts_bld_atom(hpp,szp,"exec_mmap"),
+ ainfo);
+# endif
+#else /* !HAVE_ERTS_MMAP */
+ ainfo = erts_bld_tuple2(hpp, szp, alloc_atom,
+ am_false);
+#endif
+ break;
case ERTS_ALC_INFO_A_MSEG_ALLOC:
alloc_atom = erts_bld_atom(hpp, szp, "mseg_alloc");
#if HAVE_ERTS_MSEG
- ainfo = (air->only_sz
- ? NIL
- : erts_mseg_info(0, NULL, NULL, hpp != NULL,
- hpp, szp));
+ ainfo = erts_mseg_info(0, NULL, NULL, hpp != NULL,
+ air->only_sz, hpp, szp);
ainfo = erts_bld_tuple3(hpp, szp,
alloc_atom,
make_small(0),
ainfo);
- ai_list = erts_bld_cons(hpp, szp,
- ainfo, ai_list);
- ainfo = (air->only_sz ? NIL : erts_mmap_info(NULL, NULL, hpp, szp, &emis));
- ainfo = erts_bld_tuple3(hpp, szp,
- alloc_atom,
- erts_bld_atom(hpp,szp,"erts_mmap"),
- ainfo);
#else
ainfo = erts_bld_tuple2(hpp, szp, alloc_atom,
am_false);
#endif
break;
+#ifndef ERTS_ALC_A_EXEC
+ case ERTS_ALC_INFO_A_DISABLED_EXEC:
+ alloc_atom = erts_bld_atom(hpp, szp, "exec_alloc");
+ ainfo = erts_bld_tuple2(hpp, szp, alloc_atom, am_false);
+ break;
+#endif
default:
alloc_atom = erts_bld_atom(hpp, szp,
(char *) ERTS_ALC_A2AD(ai));
@@ -3114,7 +3226,7 @@ reply_alloc_info(void *vair)
make_small(0), ainfo);
}
else {
- erl_exit(ERTS_ABORT_EXIT, "%s:%d: internal error\n",
+ erts_exit(ERTS_ABORT_EXIT, "%s:%d: internal error\n",
__FILE__, __LINE__);
}
}
@@ -3123,15 +3235,15 @@ reply_alloc_info(void *vair)
}
switch (ai) {
case ERTS_ALC_A_SYSTEM:
- case ERTS_ALC_INFO_A_ALLOC_UTIL:
+ case ERTS_ALC_INFO_A_ALLOC_UTIL:
+ case ERTS_ALC_INFO_A_ERTS_MMAP:
+ case ERTS_ALC_INFO_A_DISABLED_EXEC:
break;
case ERTS_ALC_INFO_A_MSEG_ALLOC:
-#if HAVE_ERTS_MSEG && defined(ERTS_SMP)
+#if HAVE_ERTS_MSEG
alloc_atom = erts_bld_atom(hpp, szp, "mseg_alloc");
- ainfo = (air->only_sz
- ? NIL
- : erts_mseg_info(sched_id, NULL, NULL,
- hpp != NULL, hpp, szp));
+ ainfo = erts_mseg_info(sched_id, NULL, NULL,
+ hpp != NULL, air->only_sz, hpp, szp);
ainfo = erts_bld_tuple(hpp, szp, 3,
alloc_atom,
make_small(sched_id),
@@ -3167,33 +3279,28 @@ reply_alloc_info(void *vair)
if (hpp)
break;
- hp = erts_alloc_message_heap(sz, &bp, &ohp, rp, &rp_locks);
+ mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
hp_start = hp;
hp_end = hp + sz;
szp = NULL;
hpp = &hp;
}
- if (bp)
- bp = erts_resize_message_buffer(bp, hp - hp_start, &msg, 1);
- else {
- ASSERT(hp);
- HRelease(rp, hp_end, hp);
- }
- erts_queue_message(rp, &rp_locks, bp, msg, NIL
-#ifdef USE_VM_PROBES
- , NIL
-#endif
- );
+ if (hp != hp_end)
+ erts_shrink_message_heap(&mp, rp, hp_start, hp, hp_end, &msg, 1);
+
+ erts_queue_message(rp, rp_locks, mp, msg, am_system);
if (air->req_sched == sched_id)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
- erts_smp_proc_unlock(rp, rp_locks);
- erts_smp_proc_dec_refc(rp);
+ erts_proc_unlock(rp, rp_locks);
+ erts_proc_dec_refc(rp);
- if (erts_smp_atomic32_dec_read_nob(&air->refc) == 0)
+ if (erts_atomic32_dec_read_nob(&air->refc) == 0) {
+ erts_iref_storage_clean(&air->iref);
aireq_free(air);
+ }
}
int
@@ -3204,9 +3311,8 @@ erts_request_alloc_info(struct process *c_p,
int internal)
{
ErtsAllocInfoReq *air = aireq_alloc();
- Eterm req_ai[ERTS_ALC_A_MAX+1+2] = {0};
+ Eterm req_ai[ERTS_ALC_INFO_A_END] = {0};
Eterm alist;
- Eterm *hp;
int airix = 0, ai;
air->req_sched = erts_get_scheduler_id();
@@ -3220,8 +3326,7 @@ erts_request_alloc_info(struct process *c_p,
if (is_not_internal_ref(ref))
return 0;
- hp = &air->ref_heap[0];
- air->ref = STORE_NC(&hp, NULL, ref);
+ erts_iref_storage_save(&air->iref, ref);
if (is_not_list(allocs))
return 0;
@@ -3240,6 +3345,16 @@ erts_request_alloc_info(struct process *c_p,
ai = ERTS_ALC_INFO_A_MSEG_ALLOC;
goto save_alloc;
}
+ if (erts_is_atom_str("erts_mmap", alloc, 0)) {
+ ai = ERTS_ALC_INFO_A_ERTS_MMAP;
+ goto save_alloc;
+ }
+#ifndef ERTS_ALC_A_EXEC
+ if (erts_is_atom_str("exec_alloc", alloc, 0)) {
+ ai = ERTS_ALC_INFO_A_DISABLED_EXEC;
+ goto save_alloc;
+ }
+#endif
if (erts_is_atom_str("alloc_util", alloc, 0)) {
ai = ERTS_ALC_INFO_A_ALLOC_UTIL;
save_alloc:
@@ -3261,18 +3376,16 @@ erts_request_alloc_info(struct process *c_p,
air->allocs[airix] = ERTS_ALC_A_INVALID;
- erts_smp_atomic32_init_nob(&air->refc,
+ erts_atomic32_init_nob(&air->refc,
(erts_aint32_t) erts_no_schedulers);
- erts_smp_proc_add_refc(c_p, (Sint32) erts_no_schedulers);
+ erts_proc_add_refc(c_p, (Sint) erts_no_schedulers);
-#ifdef ERTS_SMP
if (erts_no_schedulers > 1)
erts_schedule_multi_misc_aux_work(1,
erts_no_schedulers,
reply_alloc_info,
(void *) air);
-#endif
reply_alloc_info((void *) air);
@@ -3327,35 +3440,13 @@ void erts_allctr_wrapper_pre_unlock(void)
}
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
- * Deprecated functions *
- * *
- * These functions are still defined since "non-OTP linked in drivers" may *
- * contain (illegal) calls to them. *
-\* */
-
-/* --- DO *NOT* USE THESE FUNCTIONS --- */
-
-void *sys_alloc(Uint sz)
-{ return erts_alloc_fnf(ERTS_ALC_T_UNDEF, sz); }
-void *sys_realloc(void *ptr, Uint sz)
-{ return erts_realloc_fnf(ERTS_ALC_T_UNDEF, ptr, sz); }
-void sys_free(void *ptr)
-{ erts_free(ERTS_ALC_T_UNDEF, ptr); }
-void *safe_alloc(Uint sz)
-{ return erts_alloc(ERTS_ALC_T_UNDEF, sz); }
-void *safe_realloc(void *ptr, Uint sz)
-{ return erts_realloc(ERTS_ALC_T_UNDEF, ptr, sz); }
-
-
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* NOTE: erts_alc_test() is only supposed to be used for testing. *
* *
* Keep alloc_SUITE_data/allocator_test.h updated if changes are made *
* to erts_alc_test() *
\* */
-#define ERTS_ALC_TEST_ABORT erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error\n")
+#define ERTS_ALC_TEST_ABORT erts_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error\n")
UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
{
@@ -3369,35 +3460,29 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
case 0xf:
switch (op) {
case 0xf00:
-#ifdef USE_THREADS
if (((Allctr_t *) a1)->thread_safe)
return (UWord) erts_alcu_alloc_ts(ERTS_ALC_T_UNDEF,
(void *) a1,
(Uint) a2);
else
-#endif
return (UWord) erts_alcu_alloc(ERTS_ALC_T_UNDEF,
(void *) a1,
(Uint) a2);
case 0xf01:
-#ifdef USE_THREADS
if (((Allctr_t *) a1)->thread_safe)
return (UWord) erts_alcu_realloc_ts(ERTS_ALC_T_UNDEF,
(void *) a1,
(void *) a2,
(Uint) a3);
else
-#endif
return (UWord) erts_alcu_realloc(ERTS_ALC_T_UNDEF,
(void *) a1,
(void *) a2,
(Uint) a3);
case 0xf02:
-#ifdef USE_THREADS
if (((Allctr_t *) a1)->thread_safe)
erts_alcu_free_ts(ERTS_ALC_T_UNDEF, (void *) a1, (void *) a2);
else
-#endif
erts_alcu_free(ERTS_ALC_T_UNDEF, (void *) a1, (void *) a2);
return 0;
case 0xf03: {
@@ -3408,8 +3493,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
init.enable = 1;
init.atype = GOODFIT;
init.init.util.name_prefix = (char *) a1;
- init.init.util.ts = a2 ? 1 : 0;
-
+ init.init.util.ts = 1;
if ((char **) a3) {
char **argv = (char **) a3;
int i = 0;
@@ -3464,7 +3548,6 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
erts_alcu_stop((Allctr_t *) a1);
erts_free(ERTS_ALC_T_UNDEF, (void *) a1);
break;
-#ifdef USE_THREADS
case 0xf05: return (UWord) 1;
case 0xf06: return (UWord) ((Allctr_t *) a1)->thread_safe;
#ifdef ETHR_NO_FORKSAFETY
@@ -3534,12 +3617,42 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
ethr_thr_exit((void *) a1);
ERTS_ALC_TEST_ABORT;
break;
-#endif /* #ifdef USE_THREADS */
-#ifdef ERTS_SMP
case 0xf13: return (UWord) 1;
-#else
- case 0xf13: return (UWord) 0;
-#endif
+ case 0xf14: return (UWord) erts_alloc(ERTS_ALC_T_TEST, (Uint)a1);
+
+ case 0xf15: erts_free(ERTS_ALC_T_TEST, (void*)a1); return 0;
+
+ case 0xf16: {
+ Uint extra_hdr_sz = UNIT_CEILING((Uint)a1);
+ ErtsAllocatorThrSpec_t* ts = &erts_allctr_thr_spec[ERTS_ALC_A_TEST];
+ Uint offset = ts->allctr[0]->mbc_header_size;
+ void* orig_creating_mbc = ts->allctr[0]->creating_mbc;
+ void* orig_destroying_mbc = ts->allctr[0]->destroying_mbc;
+ void* new_creating_mbc = *(void**)a2; /* inout arg */
+ void* new_destroying_mbc = *(void**)a3; /* inout arg */
+ int i;
+
+ for (i=0; i < ts->size; i++) {
+ Allctr_t* ap = ts->allctr[i];
+ if (ap->mbc_header_size != offset
+ || ap->creating_mbc != orig_creating_mbc
+ || ap->destroying_mbc != orig_destroying_mbc
+ || ap->mbc_list.first != NULL)
+ return -1;
+ }
+ for (i=0; i < ts->size; i++) {
+ ts->allctr[i]->mbc_header_size += extra_hdr_sz;
+ ts->allctr[i]->creating_mbc = new_creating_mbc;
+ ts->allctr[i]->destroying_mbc = new_destroying_mbc;
+ }
+ *(void**)a2 = orig_creating_mbc;
+ *(void**)a3 = orig_destroying_mbc;
+ return offset;
+ }
+ case 0xf17: {
+ ErtsAllocatorThrSpec_t* ts = &erts_allctr_thr_spec[ERTS_ALC_A_TEST];
+ return ts->allctr[0]->largest_mbc_size;
+ }
default:
break;
}
@@ -3618,7 +3731,8 @@ hdbg_init(void)
hdbg_mblks[ERL_ALC_HDBG_MAX_MBLK-1].next = NULL;
free_hdbg_mblks = &hdbg_mblks[0];
used_hdbg_mblks = NULL;
- erts_mtx_init(&hdbg_mblk_mtx, "erts_alloc_hard_debug");
+ erts_mtx_init(&hdbg_mblk_mtx, "erts_alloc_hard_debug", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
}
static void *check_memory_fence(void *ptr,
@@ -3707,10 +3821,8 @@ void check_allocators(void)
ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) erts_allctrs[i].extra;
Allctr_t *allctr = real_af->extra;
Carrier_t *ct;
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_lock(&allctr->mutex);
-#endif
if (allctr->check_mbc) {
for (ct = allctr->mbc_list.first; ct; ct = ct->next) {
@@ -3718,10 +3830,8 @@ void check_allocators(void)
allctr->check_mbc(allctr,ct);
}
}
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
-#endif
}
}
}
@@ -3782,7 +3892,7 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
found_type = GET_TYPE_OF_PATTERN(pre_pattern);
if (pre_pattern != MK_PATTERN(n)) {
if ((FIXED_FENCE_PATTERN_MASK & pre_pattern) != FIXED_FENCE_PATTERN)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"ERROR: Fence at beginning of memory block (p=0x%u) "
"clobbered.\n",
(UWord) ptr);
@@ -3799,12 +3909,12 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
char *op_str;
if ((FIXED_FENCE_PATTERN_MASK & post_pattern) != FIXED_FENCE_PATTERN)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"ERROR: Fence at end of memory block (p=0x%u, sz=%u) "
"clobbered.\n",
(UWord) ptr, (UWord) sz);
if (found_type != GET_TYPE_OF_PATTERN(post_pattern))
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"ERROR: Fence around memory block (p=0x%u, sz=%u) "
"clobbered.\n",
(UWord) ptr, (UWord) sz);
@@ -3827,7 +3937,7 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
default: op_str = "???"; break;
}
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"ERROR: Memory block (p=0x%u, sz=%u) allocated as type \"%s\","
" but %s as type \"%s\".\n",
(UWord) ptr, (UWord) sz, ftype, op_str, otype);
@@ -3916,12 +4026,20 @@ debug_free(ErtsAlcType_t n, void *extra, void *ptr)
ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
void *dptr;
Uint size;
+ int free_pattern = n;
ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
dptr = check_memory_fence(ptr, &size, n, ERTS_ALC_O_FREE);
- sys_memset((void *) dptr, n, size + FENCE_SZ);
+#ifdef ERTS_ALC_A_EXEC
+# if defined(__i386__) || defined(__x86_64__)
+ if (ERTS_ALC_T2A(ERTS_ALC_N2T(n)) == ERTS_ALC_A_EXEC) {
+ free_pattern = 0x0f; /* Illegal instruction */
+ }
+# endif
+#endif
+ sys_memset((void *) dptr, free_pattern, size + FENCE_SZ);
(*real_af->free)(n, real_af->extra, dptr);
@@ -3939,7 +4057,7 @@ static Uint
install_debug_functions(void)
{
int i;
- ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
+ ERTS_CT_ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
sys_memcpy((void *)real_allctrs,(void *)erts_allctrs,sizeof(erts_allctrs));
diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h
index d3109b9432..117f96a4ad 100644
--- a/erts/emulator/beam/erl_alloc.h
+++ b/erts/emulator/beam/erl_alloc.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -26,9 +27,8 @@
#include "erl_thr_progress.h"
#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
#include "erl_alloc_util.h"
-#ifdef USE_THREADS
#include "erl_threads.h"
-#endif
+#include "erl_mmap.h"
#ifdef DEBUG
# undef ERTS_ALC_WANT_INLINE
@@ -42,9 +42,11 @@
#if ERTS_CAN_INLINE && ERTS_ALC_WANT_INLINE
# define ERTS_ALC_DO_INLINE 1
# define ERTS_ALC_INLINE static ERTS_INLINE
+# define ERTS_ALC_FORCE_INLINE static ERTS_FORCE_INLINE
#else
# define ERTS_ALC_DO_INLINE 0
# define ERTS_ALC_INLINE
+# define ERTS_ALC_FORCE_INLINE
#endif
#define ERTS_ALC_NO_FIXED_SIZES \
@@ -65,11 +67,11 @@ void *erts_sys_aligned_realloc(UWord alignment, void *ptr, UWord size, UWord old
void erts_sys_aligned_free(UWord alignment, void *ptr);
#endif
-Eterm erts_memory(int *, void *, void *, Eterm);
-Eterm erts_allocated_areas(int *, void *, void *);
+Eterm erts_memory(fmtfn_t *, void *, void *, Eterm);
+Eterm erts_allocated_areas(fmtfn_t *, void *, void *);
Eterm erts_alloc_util_allocators(void *proc);
-void erts_allocator_info(int, void *);
+void erts_allocator_info(fmtfn_t, void *);
Eterm erts_allocator_options(void *proc);
struct process;
@@ -150,12 +152,10 @@ void erts_allctr_wrapper_pre_lock(void);
void erts_allctr_wrapper_pre_unlock(void);
void erts_alloc_register_scheduler(void *vesdp);
-#ifdef ERTS_SMP
void erts_alloc_scheduler_handle_delayed_dealloc(void *vesdp,
int *need_thr_progress,
ErtsThrPrgrVal *thr_prgr_p,
int *more_work);
-#endif
erts_aint32_t erts_alloc_fix_alloc_shrink(int ix, erts_aint32_t flgs);
__decl_noreturn void erts_alloc_enomem(ErtsAlcType_t,Uint)
@@ -169,12 +169,11 @@ __decl_noreturn void erts_realloc_n_enomem(ErtsAlcType_t,void*,Uint)
__decl_noreturn void erts_alc_fatal_error(int,int,ErtsAlcType_t,...)
__noreturn;
-/* --- DO *NOT* USE THESE DEPRECATED FUNCTIONS --- Instead use: */
-void *safe_alloc(Uint) __deprecated; /* erts_alloc() */
-void *safe_realloc(void *, Uint) __deprecated; /* erts_realloc() */
-void sys_free(void *) __deprecated; /* erts_free() */
-void *sys_alloc(Uint ) __deprecated; /* erts_alloc_fnf() */
-void *sys_realloc(void *, Uint) __deprecated; /* erts_realloc_fnf() */
+#undef ERTS_HAVE_IS_IN_LITERAL_RANGE
+#if defined(ARCH_32) || defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
+# define ERTS_HAVE_IS_IN_LITERAL_RANGE
+#endif
+
/*
* erts_alloc[_fnf](), erts_realloc[_fnf](), erts_free() works as
@@ -203,6 +202,9 @@ void erts_free(ErtsAlcType_t type, void *ptr);
void *erts_alloc_fnf(ErtsAlcType_t type, Uint size);
void *erts_realloc_fnf(ErtsAlcType_t type, void *ptr, Uint size);
int erts_is_allctr_wrapper_prelocked(void);
+#ifdef ERTS_HAVE_IS_IN_LITERAL_RANGE
+int erts_is_in_literal_range(void* ptr);
+#endif
#endif /* #if !ERTS_ALC_DO_INLINE */
@@ -220,12 +222,14 @@ ERTS_ALC_INLINE
void *erts_alloc(ErtsAlcType_t type, Uint size)
{
void *res;
+ ERTS_MSACC_PUSH_AND_SET_STATE_X(ERTS_MSACC_STATE_ALLOC);
res = (*erts_allctrs[ERTS_ALC_T2A(type)].alloc)(
- ERTS_ALC_T2N(type),
- erts_allctrs[ERTS_ALC_T2A(type)].extra,
- size);
+ ERTS_ALC_T2N(type),
+ erts_allctrs[ERTS_ALC_T2A(type)].extra,
+ size);
if (!res)
erts_alloc_n_enomem(ERTS_ALC_T2N(type), size);
+ ERTS_MSACC_POP_STATE_X();
return res;
}
@@ -233,6 +237,7 @@ ERTS_ALC_INLINE
void *erts_realloc(ErtsAlcType_t type, void *ptr, Uint size)
{
void *res;
+ ERTS_MSACC_PUSH_AND_SET_STATE_X(ERTS_MSACC_STATE_ALLOC);
res = (*erts_allctrs[ERTS_ALC_T2A(type)].realloc)(
ERTS_ALC_T2N(type),
erts_allctrs[ERTS_ALC_T2A(type)].extra,
@@ -240,37 +245,48 @@ void *erts_realloc(ErtsAlcType_t type, void *ptr, Uint size)
size);
if (!res)
erts_realloc_n_enomem(ERTS_ALC_T2N(type), ptr, size);
+ ERTS_MSACC_POP_STATE_X();
return res;
}
ERTS_ALC_INLINE
void erts_free(ErtsAlcType_t type, void *ptr)
{
+ ERTS_MSACC_PUSH_AND_SET_STATE_X(ERTS_MSACC_STATE_ALLOC);
(*erts_allctrs[ERTS_ALC_T2A(type)].free)(
ERTS_ALC_T2N(type),
erts_allctrs[ERTS_ALC_T2A(type)].extra,
ptr);
+ ERTS_MSACC_POP_STATE_X();
}
ERTS_ALC_INLINE
void *erts_alloc_fnf(ErtsAlcType_t type, Uint size)
{
- return (*erts_allctrs[ERTS_ALC_T2A(type)].alloc)(
+ void *res;
+ ERTS_MSACC_PUSH_AND_SET_STATE_X(ERTS_MSACC_STATE_ALLOC);
+ res = (*erts_allctrs[ERTS_ALC_T2A(type)].alloc)(
ERTS_ALC_T2N(type),
erts_allctrs[ERTS_ALC_T2A(type)].extra,
size);
+ ERTS_MSACC_POP_STATE_X();
+ return res;
}
ERTS_ALC_INLINE
void *erts_realloc_fnf(ErtsAlcType_t type, void *ptr, Uint size)
{
- return (*erts_allctrs[ERTS_ALC_T2A(type)].realloc)(
+ void *res;
+ ERTS_MSACC_PUSH_AND_SET_STATE_X(ERTS_MSACC_STATE_ALLOC);
+ res = (*erts_allctrs[ERTS_ALC_T2A(type)].realloc)(
ERTS_ALC_T2N(type),
erts_allctrs[ERTS_ALC_T2A(type)].extra,
ptr,
size);
+ ERTS_MSACC_POP_STATE_X();
+ return res;
}
ERTS_ALC_INLINE
@@ -280,6 +296,28 @@ int erts_is_allctr_wrapper_prelocked(void)
&& !!erts_tsd_get(erts_allctr_prelock_tsd_key); /* by me */
}
+#ifdef ERTS_HAVE_IS_IN_LITERAL_RANGE
+
+ERTS_ALC_FORCE_INLINE
+int erts_is_in_literal_range(void* ptr)
+{
+#if defined(ARCH_32)
+ Uint ix = (UWord)ptr >> ERTS_MMAP_SUPERALIGNED_BITS;
+
+ return erts_literal_vspace_map[ix / ERTS_VSPACE_WORD_BITS]
+ & ((UWord)1 << (ix % ERTS_VSPACE_WORD_BITS));
+
+#elif defined(ARCH_64)
+ extern char* erts_literals_start;
+ extern UWord erts_literals_size;
+ return ErtsInArea(ptr, erts_literals_start, erts_literals_size);
+#else
+# error No ARCH_xx
+#endif
+}
+
+#endif /* ERTS_HAVE_IS_IN_LITERAL_RANGE */
+
#endif /* #if ERTS_ALC_DO_INLINE || defined(ERTS_ALC_INTERNAL__) */
#define ERTS_ALC_GET_THR_IX() ((int) erts_get_scheduler_id())
@@ -296,54 +334,23 @@ erts_alloc_get_verify_unused_temp_alloc(Allctr_t **allctr);
(((((SZ) - 1) / ERTS_CACHE_LINE_SIZE) + 1) * ERTS_CACHE_LINE_SIZE)
#define ERTS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
-ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \
- (void) 0, (void) 0, (void) 0)
-
-#define ERTS_SMP_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
-static erts_smp_spinlock_t NAME##_lck; \
-ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \
- erts_smp_spinlock_init(&NAME##_lck, #NAME "_alloc_lock"),\
- erts_smp_spin_lock(&NAME##_lck), \
- erts_smp_spin_unlock(&NAME##_lck))
-
-#ifdef ERTS_SMP
-
-#define ERTS_TS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
-ERTS_SMP_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT)
-
-#else /* !ERTS_SMP */
+ ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, (void) 0, (void) 0, (void) 0)
#define ERTS_TS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
-static erts_mtx_t NAME##_lck; \
-ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \
- erts_mtx_init(NAME##_lck, #NAME "_alloc_lock"), \
- erts_mtx_lock(&NAME##_lck), \
- erts_mtx_unlock(&NAME##_lck))
-
-
-#endif
-
-#define ERTS_PALLOC_IMPL(NAME, TYPE, PASZ) \
-ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, (void) 0, (void) 0, (void) 0)
+ERTS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT)
#define ERTS_TS_PALLOC_IMPL(NAME, TYPE, PASZ) \
static erts_spinlock_t NAME##_lck; \
ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, \
- erts_spinlock_init(&NAME##_lck, #NAME "_alloc_lock"),\
+ erts_spinlock_init(&NAME##_lck, #NAME "_alloc_lock", NIL, \
+ ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR),\
erts_spin_lock(&NAME##_lck), \
erts_spin_unlock(&NAME##_lck))
-#ifdef ERTS_SMP
-#define ERTS_SMP_PALLOC_IMPL(NAME, TYPE, PASZ) \
+#define ERTS_PALLOC_IMPL(NAME, TYPE, PASZ) \
ERTS_TS_PALLOC_IMPL(NAME, TYPE, PASZ)
-#else /* !ERTS_SMP */
-
-#define ERTS_SMP_PALLOC_IMPL(NAME, TYPE, PASZ) \
- ERTS_PALLOC_IMPL(NAME, TYPE, PASZ)
-
-#endif
#define ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, ILCK, LCK, ULCK) \
ERTS_PRE_ALLOC_IMPL(NAME##_pre, TYPE, PASZ, ILCK, LCK, ULCK) \
@@ -367,21 +374,11 @@ NAME##_free(TYPE *p) \
erts_free(ALCT, (void *) p); \
}
-#ifdef ERTS_SMP
#define ERTS_SCHED_PREF_PALLOC_IMPL(NAME, TYPE, PASZ) \
ERTS_SCHED_PREF_PRE_ALLOC_IMPL(NAME, TYPE, PASZ)
-#else
-#define ERTS_SCHED_PREF_PALLOC_IMPL(NAME, TYPE, PASZ) \
- ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, (void) 0, (void) 0, (void) 0)
-#endif
-#ifdef ERTS_SMP
#define ERTS_SCHED_PREF_AUX(NAME, TYPE, PASZ) \
ERTS_SCHED_PREF_PRE_ALLOC_IMPL(NAME##_pre, TYPE, PASZ)
-#else
-#define ERTS_SCHED_PREF_AUX(NAME, TYPE, PASZ) \
-ERTS_PRE_ALLOC_IMPL(NAME##_pre, TYPE, PASZ, (void) 0, (void) 0, (void) 0)
-#endif
#define ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
ERTS_SCHED_PREF_AUX(NAME, TYPE, PASZ) \
@@ -405,8 +402,34 @@ NAME##_free(TYPE *p) \
erts_free(ALCT, (void *) p); \
}
+#define ERTS_THR_PREF_AUX(NAME, TYPE, PASZ) \
+ERTS_THR_PREF_PRE_ALLOC_IMPL(NAME##_pre, TYPE, PASZ)
+
+#define ERTS_THR_PREF_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
+ERTS_THR_PREF_AUX(NAME, TYPE, PASZ) \
+static void \
+init_##NAME##_alloc(int nthreads) \
+{ \
+ init_##NAME##_pre_alloc(nthreads); \
+} \
+static ERTS_INLINE TYPE * \
+NAME##_alloc(void) \
+{ \
+ TYPE *res = NAME##_pre_alloc(); \
+ if (!res) \
+ res = erts_alloc(ALCT, sizeof(TYPE)); \
+ return res; \
+} \
+static ERTS_INLINE void \
+NAME##_free(TYPE *p) \
+{ \
+ if (!NAME##_pre_free(p)) \
+ erts_free(ALCT, (void *) p); \
+}
+
+
#ifdef DEBUG
-#define ERTS_PRE_ALLOC_SIZE(SZ) 2
+#define ERTS_PRE_ALLOC_SIZE(SZ) ((SZ) < 1000 ? (SZ)/10 + 10 : 100)
#define ERTS_PRE_ALLOC_CLOBBER(P, T) memset((void *) (P), 0xfd, sizeof(T))
#else
#define ERTS_PRE_ALLOC_SIZE(SZ) ((SZ) > 1 ? (SZ) : 1)
@@ -485,7 +508,8 @@ init_##NAME##_alloc(void) \
{ \
sspa_data_##NAME##__ = \
erts_sspa_create(sizeof(union erts_sspa_##NAME##__), \
- ERTS_PRE_ALLOC_SIZE((PASZ))); \
+ ERTS_PRE_ALLOC_SIZE((PASZ)), \
+ 0, NULL); \
} \
\
static TYPE * \
@@ -507,6 +531,57 @@ NAME##_free(TYPE *p) \
(char *) p); \
}
+
+#define ERTS_THR_PREF_PRE_ALLOC_IMPL(NAME, TYPE, PASZ) \
+union erts_sspa_##NAME##__ { \
+ erts_sspa_blk_t next; \
+ TYPE type; \
+}; \
+ \
+static erts_sspa_data_t *sspa_data_##NAME##__; \
+ \
+static void \
+init_##NAME##_alloc(int nthreads) \
+{ \
+ sspa_data_##NAME##__ = \
+ erts_sspa_create(sizeof(union erts_sspa_##NAME##__), \
+ ERTS_PRE_ALLOC_SIZE((PASZ)), \
+ nthreads, \
+ #NAME); \
+} \
+ \
+void \
+erts_##NAME##_alloc_init_thread(void) \
+{ \
+ int id = erts_atomic_inc_read_nob(&sspa_data_##NAME##__->id_generator);\
+ if (id > sspa_data_##NAME##__->nthreads) { \
+ erts_exit(ERTS_ABORT_EXIT, \
+ "%s:%d:%s(): Too many threads for '" #NAME "'\n", \
+ __FILE__, __LINE__, __func__); \
+ } \
+ erts_tsd_set(sspa_data_##NAME##__->tsd_key, (void*)(SWord)id); \
+} \
+ \
+static TYPE * \
+NAME##_alloc(void) \
+{ \
+ int id = (int)(SWord)erts_tsd_get(sspa_data_##NAME##__->tsd_key); \
+ if (id == 0) \
+ return NULL; \
+ return (TYPE *) erts_sspa_alloc(sspa_data_##NAME##__, \
+ id-1); \
+} \
+ \
+static int \
+NAME##_free(TYPE *p) \
+{ \
+ int id = (int)(SWord)erts_tsd_get(sspa_data_##NAME##__->tsd_key); \
+ return erts_sspa_free(sspa_data_##NAME##__, \
+ id - 1, \
+ (char *) p); \
+}
+
+
#ifdef DEBUG
#define ERTS_ALC_DBG_BLK_SZ(PTR) (*(((UWord *) (PTR)) - 2))
#endif /* #ifdef DEBUG */
@@ -515,5 +590,3 @@ NAME##_free(TYPE *p) \
#undef ERTS_ALC_ATTRIBUTES
#endif /* #ifndef ERL_ALLOC_H__ */
-
-
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 17ac6316b7..2960272eab 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -1,18 +1,19 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2003-2014. All Rights Reserved.
+# Copyright Ericsson AB 2003-2017. All Rights Reserved.
#
-# The contents of this file are subject to the Erlang Public License,
-# Version 1.1, (the "License"); you may not use this file except in
-# compliance with the License. You should have received a copy of the
-# Erlang Public License along with this software. If not, it can be
-# retrieved online at http://www.erlang.org/.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
#
-# Software distributed under the License is distributed on an "AS IS"
-# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-# the License for the specific language governing rights and limitations
-# under the License.
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
#
# %CopyrightEnd%
#
@@ -52,19 +53,10 @@
#
# IMPORTANT! Only use 7-bit ascii text in this file!
-+if smp
-+disable threads_no_smp
-+else
-+if threads
-+enable threads_no_smp
-+else
-+disable threads_no_smp
-+endif
-+endif
# --- Allocator declarations -------------------------------------------------
#
-# If, and only if, the same thread performes *all* allocations,
+# If, and only if, the same thread performs *all* allocations,
# reallocations and deallocations of all memory types that are handled
# by a specific allocator (<ALLOCATOR> in type declaration), set
# <MULTI_THREAD> for this specific allocator to false; otherwise, set
@@ -76,8 +68,6 @@
allocator SYSTEM true sys_alloc
-+if smp
-
allocator TEMPORARY true temp_alloc
allocator SHORT_LIVED true sl_alloc
allocator STANDARD true std_alloc
@@ -85,33 +75,15 @@ allocator LONG_LIVED true ll_alloc
allocator EHEAP true eheap_alloc
allocator ETS true ets_alloc
allocator FIXED_SIZE true fix_alloc
-
-+if halfword
-allocator LONG_LIVED_LOW true ll_low_alloc
-allocator STANDARD_LOW true std_low_alloc
-+endif
-
-+else # Non smp build
-
-allocator TEMPORARY false temp_alloc
-allocator SHORT_LIVED false sl_alloc
-allocator STANDARD false std_alloc
-allocator LONG_LIVED false ll_alloc
-allocator EHEAP false eheap_alloc
-allocator ETS false ets_alloc
-allocator FIXED_SIZE false fix_alloc
-
-+if halfword
-allocator LONG_LIVED_LOW false ll_low_alloc
-allocator STANDARD_LOW false std_low_alloc
-+endif
-
+allocator LITERAL true literal_alloc
++if exec_alloc
+allocator EXEC true exec_alloc
+endif
allocator BINARY true binary_alloc
allocator DRIVER true driver_alloc
-
+allocator TEST true test_alloc
# --- Class declarations -----------------------------------------------------
#
@@ -159,13 +131,18 @@ type OLD_HEAP EHEAP PROCESSES old_heap
type HEAP_FRAG EHEAP PROCESSES heap_frag
type TMP_HEAP TEMPORARY PROCESSES tmp_heap
type MSG_REF FIXED_SIZE PROCESSES msg_ref
+type MSG EHEAP PROCESSES message
+type MSGQ_CHNG SHORT_LIVED PROCESSES messages_queue_change
type MSG_ROOTS TEMPORARY PROCESSES msg_roots
type ROOTSET TEMPORARY PROCESSES root_set
type LOADER_TMP TEMPORARY CODE loader_tmp
type PREPARED_CODE SHORT_LIVED CODE prepared_code
-type BIF_TIMER_TABLE LONG_LIVED SYSTEM bif_timer_table
-type SL_BIF_TIMER SHORT_LIVED PROCESSES bif_timer_sl
-type LL_BIF_TIMER STANDARD PROCESSES bif_timer_ll
+type TIMER_SERVICE LONG_LIVED SYSTEM timer_service
+type LL_PTIMER FIXED_SIZE PROCESSES ll_ptimer
+type HL_PTIMER FIXED_SIZE PROCESSES hl_ptimer
+type BIF_TIMER FIXED_SIZE PROCESSES bif_timer
+type TIMER_REQUEST SHORT_LIVED PROCESSES timer_request
+type BTM_YIELD_STATE SHORT_LIVED PROCESSES btm_yield_state
type REG_TABLE STANDARD SYSTEM reg_tab
type FUN_TABLE STANDARD CODE fun_tab
type DIST_TABLE STANDARD SYSTEM dist_tab
@@ -222,6 +199,7 @@ type DB_DMC_ERROR ETS ETS db_dmc_error
type DB_DMC_ERR_INFO ETS ETS db_dmc_error_info
type DB_TERM ETS ETS db_term
type DB_PROC_CLEANUP SHORT_LIVED ETS db_proc_cleanup_state
+type ETS_ALL_REQ SHORT_LIVED ETS ets_all_request
type INSTR_INFO LONG_LIVED SYSTEM instr_info
type LOGGER_DSBUF TEMPORARY SYSTEM logger_dsbuf
type TMP_DSBUF TEMPORARY SYSTEM tmp_dsbuf
@@ -259,7 +237,6 @@ type PRTSD STANDARD SYSTEM port_specific_data
type CPUDATA LONG_LIVED SYSTEM cpu_data
type TMP_CPU_IDS SHORT_LIVED SYSTEM tmp_cpu_ids
type EXT_TERM_DATA SHORT_LIVED PROCESSES external_term_data
-type ZLIB STANDARD SYSTEM zlib
type CPU_GRPS_MAP LONG_LIVED SYSTEM cpu_groups_map
type AUX_WORK_TMO LONG_LIVED SYSTEM aux_work_timeouts
type MISC_AUX_WORK_Q LONG_LIVED SYSTEM misc_aux_work_q
@@ -267,33 +244,34 @@ type CODE_IX_LOCK_Q SHORT_LIVED SYSTEM code_ix_lock_q
type PROC_INTERVAL LONG_LIVED SYSTEM process_interval
type BUSY_CALLER_TAB SHORT_LIVED SYSTEM busy_caller_table
type BUSY_CALLER SHORT_LIVED SYSTEM busy_caller
-type PORT_DATA_HEAP STANDARD SYSTEM port_data_heap
type PROC_SYS_TSK SHORT_LIVED PROCESSES proc_sys_task
type PROC_SYS_TSK_QS SHORT_LIVED PROCESSES proc_sys_task_queues
+type NEW_TIME_OFFSET SHORT_LIVED SYSTEM new_time_offset
+type IOB_REQ SHORT_LIVED SYSTEM io_bytes_request
+type TRACER_NIF LONG_LIVED SYSTEM tracer_nif
+type TRACE_MSG_QUEUE SHORT_LIVED SYSTEM trace_message_queue
+type SCHED_ASYNC_JOB SHORT_LIVED SYSTEM async_calls
+type DIRTY_START STANDARD PROCESSES dirty_start
+type DIRTY_SL SHORT_LIVED SYSTEM dirty_short_lived
+type MREF_NSCHED_ENT FIXED_SIZE SYSTEM nsched_magic_ref_entry
+type MREF_ENT STANDARD SYSTEM magic_ref_entry
+type MREF_TAB_BKTS STANDARD SYSTEM magic_ref_table_buckets
+type MREF_TAB LONG_LIVED SYSTEM magic_ref_table
+type MINDIRECTION FIXED_SIZE SYSTEM magic_indirection
+type BINARY_FIND SHORT_LIVED PROCESSES binary_find
+type OPEN_PORT_ENV TEMPORARY SYSTEM open_port_env
+type CRASH_DUMP STANDARD SYSTEM crash_dump
-+if threads_no_smp
-# Need thread safe allocs, but std_alloc and fix_alloc are not;
-# use driver_alloc which is...
-type THR_Q_EL DRIVER SYSTEM thr_q_element
-type THR_Q_EL_SL DRIVER SYSTEM sl_thr_q_element
-type MISC_AUX_WORK DRIVER SYSTEM misc_aux_work
-+else
type THR_Q_EL STANDARD SYSTEM thr_q_element
type THR_Q_EL_SL FIXED_SIZE SYSTEM sl_thr_q_element
type MISC_AUX_WORK SHORT_LIVED SYSTEM misc_aux_work
-+endif
type THR_Q STANDARD SYSTEM thr_queue
type THR_Q_SL SHORT_LIVED SYSTEM short_lived_thr_queue
type THR_Q_LL LONG_LIVED SYSTEM long_lived_thr_queue
-+if smp
type ASYNC SHORT_LIVED SYSTEM async
-+else
-# sl_alloc is not thread safe in non smp build; therefore, we use driver_alloc
-type ASYNC DRIVER SYSTEM async
-+endif
+type ZLIB STANDARD SYSTEM zlib
-+if smp
type PORT_LOCK STANDARD SYSTEM port_lock
type DRIVER_LOCK STANDARD SYSTEM driver_lock
type XPORTS_LIST SHORT_LIVED SYSTEM extra_port_list
@@ -302,40 +280,30 @@ type RUNQ_BLNS LONG_LIVED SYSTEM run_queue_balancing
type THR_PRGR_IDATA LONG_LIVED SYSTEM thr_prgr_internal_data
type THR_PRGR_DATA LONG_LIVED SYSTEM thr_prgr_data
type T_THR_PRGR_DATA SHORT_LIVED SYSTEM temp_thr_prgr_data
-+endif
+type RELEASE_LAREA SHORT_LIVED SYSTEM release_literal_area
#
# Types used for special emulators
#
-+if threads
-
type ETHR_STD STANDARD SYSTEM ethread_standard
type ETHR_SL SHORT_LIVED SYSTEM ethread_short_lived
type ETHR_LL LONG_LIVED SYSTEM ethread_long_lived
-+endif
-
-+if shared_heap
-
-type STACK STANDARD PROCESSES stack
-type ACTIVE_PROCS STANDARD PROCESSES active_procs
-
-+endif
-
-+if smp
-type SL_PTIMER SHORT_LIVED SYSTEM ptimer_sl
-type LL_PTIMER STANDARD SYSTEM ptimer_ll
type SYS_MSG_Q SHORT_LIVED PROCESSES system_messages_queue
type FP_EXCEPTION LONG_LIVED SYSTEM fp_exception
type LL_MPATHS LONG_LIVED SYSTEM ll_migration_paths
type SL_MPATHS SHORT_LIVED SYSTEM sl_migration_paths
-+endif
+if hipe
-# Currently most hipe code use this type.
-type HIPE SYSTEM SYSTEM hipe_data
+type HIPE_LL LONG_LIVED SYSTEM hipe_long_lived
+type HIPE_SL SHORT_LIVED SYSTEM hipe_short_lived
+type HIPE_STK STANDARD SYSTEM hipe_nstack
+
++if exec_alloc
+type HIPE_EXEC EXEC CODE hipe_code
++endif
+endif
@@ -345,45 +313,38 @@ type SSB SHORT_LIVED PROCESSES ssb
+endif
++if lcnt
-+if halfword
-
-type DDLL_PROCESS STANDARD_LOW SYSTEM ddll_processes
-type MONITOR_LH STANDARD_LOW PROCESSES monitor_lh
-type NLINK_LH STANDARD_LOW PROCESSES nlink_lh
-type CODE LONG_LIVED_LOW CODE code
-type DB_HEIR_DATA STANDARD_LOW ETS db_heir_data
-type DB_MS_PSDO_PROC LONG_LIVED_LOW ETS db_match_pseudo_proc
-type SCHDLR_DATA LONG_LIVED_LOW SYSTEM scheduler_data
-type LL_TEMP_TERM LONG_LIVED_LOW SYSTEM ll_temp_term
+type LCNT_CARRIER STANDARD SYSTEM lcnt_lock_info_carrier
+type LCNT_VECTOR SHORT_LIVED SYSTEM lcnt_sample_vector
-type EXPORT LONG_LIVED_LOW CODE export_entry
-type MONITOR_SH STANDARD_LOW PROCESSES monitor_sh
-type NLINK_SH STANDARD_LOW PROCESSES nlink_sh
-type AINFO_REQ STANDARD_LOW SYSTEM alloc_info_request
-type SCHED_WTIME_REQ STANDARD_LOW SYSTEM sched_wall_time_request
-type GC_INFO_REQ STANDARD_LOW SYSTEM gc_info_request
++endif
-+else # "fullword"
+type DEBUG SHORT_LIVED SYSTEM debugging
type DDLL_PROCESS STANDARD SYSTEM ddll_processes
type MONITOR_LH STANDARD PROCESSES monitor_lh
type NLINK_LH STANDARD PROCESSES nlink_lh
type CODE LONG_LIVED CODE code
+type LITERAL LITERAL CODE literal
+type LITERAL_REF SHORT_LIVED CODE literal_area_ref
+type PURGE_DATA SHORT_LIVED CODE purge_data
type DB_HEIR_DATA STANDARD ETS db_heir_data
type DB_MS_PSDO_PROC LONG_LIVED ETS db_match_pseudo_proc
type SCHDLR_DATA LONG_LIVED SYSTEM scheduler_data
type LL_TEMP_TERM LONG_LIVED SYSTEM ll_temp_term
+type NIF_TRAP_EXPORT STANDARD PROCESSES nif_trap_export_entry
+type NIF_EXP_TRACE FIXED_SIZE PROCESSES nif_export_trace
type EXPORT LONG_LIVED CODE export_entry
type MONITOR_SH FIXED_SIZE PROCESSES monitor_sh
type NLINK_SH FIXED_SIZE PROCESSES nlink_sh
type AINFO_REQ SHORT_LIVED SYSTEM alloc_info_request
type SCHED_WTIME_REQ SHORT_LIVED SYSTEM sched_wall_time_request
type GC_INFO_REQ SHORT_LIVED SYSTEM gc_info_request
-
-+endif
-
+type PORT_DATA_HEAP STANDARD SYSTEM port_data_heap
+type MSACC DRIVER SYSTEM microstate_accounting
+type SYS_CHECK_REQ SHORT_LIVED SYSTEM system_check_request
#
# Types used by system specific code
@@ -392,8 +353,8 @@ type GC_INFO_REQ SHORT_LIVED SYSTEM gc_info_request
type TEMP_TERM TEMPORARY SYSTEM temp_term
type DRV_TAB LONG_LIVED SYSTEM drv_tab
type DRV_EV_STATE LONG_LIVED SYSTEM driver_event_state
-type DRV_EV_D_STATE FIXED_SIZE SYSTEM driver_event_data_state
type DRV_SEL_D_STATE FIXED_SIZE SYSTEM driver_select_data_state
+type NIF_SEL_D_STATE FIXED_SIZE SYSTEM enif_select_data_state
type FD_LIST SHORT_LIVED SYSTEM fd_list
type POLLSET LONG_LIVED SYSTEM pollset
type POLLSET_UPDREQ SHORT_LIVED SYSTEM pollset_update_req
@@ -411,24 +372,12 @@ type CS_PROG_PATH LONG_LIVED SYSTEM cs_prog_path
type ENVIRONMENT TEMPORARY SYSTEM environment
type PUTENV_STR SYSTEM SYSTEM putenv_string
type PRT_REP_EXIT STANDARD SYSTEM port_report_exit
+type SYS_BLOCKING STANDARD SYSTEM sys_blocking
-+endif
-
-+if ose
-
-type SYS_READ_BUF TEMPORARY SYSTEM sys_read_buf
-type FD_TAB LONG_LIVED SYSTEM fd_tab
-type FD_ENTRY_BUF STANDARD SYSTEM fd_entry_buf
-type FD_SIG_LIST SHORT_LIVED SYSTEM fd_sig_list
-type DRV_EV STANDARD SYSTEM driver_event
-type CS_PROG_PATH LONG_LIVED SYSTEM cs_prog_path
-type ENVIRONMENT TEMPORARY SYSTEM environment
-type PUTENV_STR SYSTEM SYSTEM putenv_string
-type PRT_REP_EXIT STANDARD SYSTEM port_report_exit
+type SYS_WRITE_BUF TEMPORARY SYSTEM sys_write_buf
+endif
-
+if win32
type DRV_DATA_BUF SYSTEM SYSTEM drv_data_buf
@@ -440,4 +389,7 @@ type CON_VPRINTF_BUF TEMPORARY SYSTEM con_vprintf_buf
+endif
+# This type should only be used for test
+type TEST TEST SYSTEM testing
+
# ----------------------------------------------------------------------------
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index 45f0cc4312..4d4bddb93f 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -51,6 +52,7 @@
#ifdef ERTS_ENABLE_LOCK_COUNT
#include "erl_lock_count.h"
#endif
+#include "lttng-wrapper.h"
#if defined(ERTS_ALLOC_UTIL_HARD_DEBUG) && defined(__GNUC__)
#warning "* * * * * * * * * *"
@@ -205,7 +207,7 @@ MBC after deallocating first block:
ASSERT(((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG|PREV_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \
(B)->bhdr = ((Sz) | (F)), \
(B)->u.carrier = (C))
-
+
# define IS_MBC_FIRST_ABLK(AP,B) \
((((UWord)(B) & ~ERTS_SACRR_UNIT_MASK) == MBC_HEADER_SIZE(AP)) \
&& ((B)->bhdr & MBC_ABLK_OFFSET_MASK) == 0)
@@ -303,9 +305,6 @@ MBC after deallocating first block:
# define ERTS_ALC_CPOOL_DEBUG
#endif
-#ifndef ERTS_SMP
-# undef ERTS_ALC_CPOOL_DEBUG
-#endif
#ifdef ERTS_ALC_CPOOL_DEBUG
# define ERTS_ALC_CPOOL_ASSERT(A) \
@@ -320,13 +319,8 @@ MBC after deallocating first block:
# define ERTS_ALC_CPOOL_ASSERT(A) ((void) 1)
#endif
-#ifdef ERTS_SMP
#define ERTS_ALC_IS_CPOOL_ENABLED(A) ((A)->cpool.util_limit)
-#else
-#define ERTS_ALC_IS_CPOOL_ENABLED(A) (0)
-#endif
-#ifdef ERTS_SMP
#define ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON 1000
#define ERTS_ALC_CPOOL_ALLOC_OP_INC 8
@@ -365,29 +359,16 @@ do { \
} \
} while (0)
-#else
-#define ERTS_ALC_CPOOL_ALLOC_OP(A)
-#define ERTS_ALC_CPOOL_REALLOC_OP(A)
-#define ERTS_ALC_CPOOL_FREE_OP(A)
-#endif
#define ERTS_CRR_ALCTR_FLG_IN_POOL (((erts_aint_t) 1) << 0)
#define ERTS_CRR_ALCTR_FLG_BUSY (((erts_aint_t) 1) << 1)
#define ERTS_CRR_ALCTR_FLG_MASK (ERTS_CRR_ALCTR_FLG_IN_POOL | \
ERTS_CRR_ALCTR_FLG_BUSY)
-#ifdef ERTS_SMP
#define SBC_HEADER_SIZE \
- (UNIT_CEILING(sizeof(Carrier_t) \
- - sizeof(ErtsAlcCPoolData_t) \
- + ABLK_HDR_SZ) \
+ (UNIT_CEILING(offsetof(Carrier_t, cpool) \
+ + ABLK_HDR_SZ) \
- ABLK_HDR_SZ)
-#else
-#define SBC_HEADER_SIZE \
- (UNIT_CEILING(sizeof(Carrier_t) \
- + ABLK_HDR_SZ) \
- - ABLK_HDR_SZ)
-#endif
#define MBC_HEADER_SIZE(AP) ((AP)->mbc_header_size)
@@ -401,7 +382,7 @@ do { \
#define SET_CARRIER_HDR(C, Sz, F, AP) \
(ASSERT(((Sz) & FLG_MASK) == 0), (C)->chdr = ((Sz) | (F)), \
- erts_smp_atomic_init_nob(&(C)->allctr, (erts_aint_t) (AP)))
+ erts_atomic_init_nob(&(C)->allctr, (erts_aint_t) (AP)))
#define BLK_TO_SBC(B) \
((Carrier_t *) (((char *) (B)) - SBC_HEADER_SIZE))
@@ -597,15 +578,11 @@ do { \
(AP)->mbcs.blocks.curr.size -= (CRR)->cpool.blocks_size; \
} while (0)
-#ifdef ERTS_SMP
#define STAT_MBC_BLK_ALLOC_CRR(CRR, BSZ) \
do { \
(CRR)->cpool.blocks++; \
(CRR)->cpool.blocks_size += (BSZ); \
} while (0)
-#else
-#define STAT_MBC_BLK_ALLOC_CRR(CRR, BSZ) ((void) (CRR)) /* Get rid of warning */
-#endif
#define STAT_MBC_BLK_ALLOC(AP, CRR, BSZ, FLGS) \
do { \
@@ -625,7 +602,6 @@ stat_cpool_mbc_blk_free(Allctr_t *allctr,
Carrier_t **busy_pcrr_pp,
UWord blksz)
{
-#ifdef ERTS_SMP
ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks > 0);
crr->cpool.blocks--;
@@ -650,9 +626,6 @@ stat_cpool_mbc_blk_free(Allctr_t *allctr,
#endif
return 1;
-#else
- return 0;
-#endif
}
#define STAT_MBC_BLK_FREE(AP, CRR, BPCRRPP, BSZ, FLGS) \
@@ -688,12 +661,7 @@ do { \
#endif
#ifdef DEBUG
-#ifdef USE_THREADS
-# ifdef ERTS_SMP
# define IS_ACTUALLY_BLOCKING (erts_thr_progress_is_blocking())
-# else
-# define IS_ACTUALLY_BLOCKING 0
-# endif
#define ERTS_ALCU_DBG_CHK_THR_ACCESS(A) \
do { \
if (!(A)->thread_safe && !IS_ACTUALLY_BLOCKING) { \
@@ -702,7 +670,7 @@ do { \
(A)->debug.saved_tid = 1; \
} \
else { \
- ERTS_SMP_LC_ASSERT( \
+ ERTS_LC_ASSERT( \
ethr_equal_tids((A)->debug.tid, erts_thr_self())); \
} \
} \
@@ -710,16 +678,13 @@ do { \
#else
#define ERTS_ALCU_DBG_CHK_THR_ACCESS(A)
#endif
-#else
-#define ERTS_ALCU_DBG_CHK_THR_ACCESS(A)
-#endif
static void make_name_atoms(Allctr_t *allctr);
static Block_t *create_carrier(Allctr_t *, Uint, UWord);
static void destroy_carrier(Allctr_t *, Block_t *, Carrier_t **);
static void mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp);
-static void dealloc_block(Allctr_t *, void *, int);
+static void dealloc_block(Allctr_t *, void *, ErtsAlcFixList_t *, int);
/* internal data... */
@@ -751,12 +716,77 @@ internal_free(void *ptr)
#endif
+#ifdef ARCH_32
+
+/*
+ * Bit vector for the entire 32-bit virtual address space
+ * with one bit for each super aligned memory segment.
+ */
+
+#define VSPACE_MAP_BITS (1 << (32 - ERTS_MMAP_SUPERALIGNED_BITS))
+#define VSPACE_MAP_SZ (VSPACE_MAP_BITS / ERTS_VSPACE_WORD_BITS)
+
+static ERTS_INLINE void set_bit(UWord* map, Uint ix)
+{
+ ASSERT(ix / ERTS_VSPACE_WORD_BITS < VSPACE_MAP_SZ);
+ map[ix / ERTS_VSPACE_WORD_BITS]
+ |= ((UWord)1 << (ix % ERTS_VSPACE_WORD_BITS));
+}
+
+static ERTS_INLINE void clr_bit(UWord* map, Uint ix)
+{
+ ASSERT(ix / ERTS_VSPACE_WORD_BITS < VSPACE_MAP_SZ);
+ map[ix / ERTS_VSPACE_WORD_BITS]
+ &= ~((UWord)1 << (ix % ERTS_VSPACE_WORD_BITS));
+}
+
+static ERTS_INLINE int is_bit_set(UWord* map, Uint ix)
+{
+ ASSERT(ix / ERTS_VSPACE_WORD_BITS < VSPACE_MAP_SZ);
+ return map[ix / ERTS_VSPACE_WORD_BITS]
+ & ((UWord)1 << (ix % ERTS_VSPACE_WORD_BITS));
+}
+
+UWord erts_literal_vspace_map[VSPACE_MAP_SZ];
+
+static void set_literal_range(void* start, Uint size)
+{
+ Uint ix = (UWord)start >> ERTS_MMAP_SUPERALIGNED_BITS;
+ Uint n = size >> ERTS_MMAP_SUPERALIGNED_BITS;
+
+ ASSERT(!((UWord)start & ERTS_INV_SUPERALIGNED_MASK));
+ ASSERT(!((UWord)size & ERTS_INV_SUPERALIGNED_MASK));
+ ASSERT(n);
+ while (n--) {
+ ASSERT(!is_bit_set(erts_literal_vspace_map, ix));
+ set_bit(erts_literal_vspace_map, ix);
+ ix++;
+ }
+}
+
+static void clear_literal_range(void* start, Uint size)
+{
+ Uint ix = (UWord)start >> ERTS_MMAP_SUPERALIGNED_BITS;
+ Uint n = size >> ERTS_MMAP_SUPERALIGNED_BITS;
+
+ ASSERT(!((UWord)start & ERTS_INV_SUPERALIGNED_MASK));
+ ASSERT(!((UWord)size & ERTS_INV_SUPERALIGNED_MASK));
+ ASSERT(n);
+ while (n--) {
+ ASSERT(is_bit_set(erts_literal_vspace_map, ix));
+ clr_bit(erts_literal_vspace_map, ix);
+ ix++;
+ }
+}
+
+#endif /* ARCH_32 */
+
/* mseg ... */
#if HAVE_ERTS_MSEG
-static ERTS_INLINE void *
-alcu_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
+static void*
+erts_alcu_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
{
void *res;
UWord size = (UWord) *size_p;
@@ -766,8 +796,9 @@ alcu_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
return res;
}
-static ERTS_INLINE void *
-alcu_mseg_realloc(Allctr_t *allctr, void *seg, Uint old_size, Uint *new_size_p)
+static void*
+erts_alcu_mseg_realloc(Allctr_t *allctr, void *seg,
+ Uint old_size, Uint *new_size_p)
{
void *res;
UWord new_size = (UWord) *new_size_p;
@@ -778,19 +809,162 @@ alcu_mseg_realloc(Allctr_t *allctr, void *seg, Uint old_size, Uint *new_size_p)
return res;
}
-static ERTS_INLINE void
-alcu_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size, Uint flags)
+static void
+erts_alcu_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size, Uint flags)
{
erts_mseg_dealloc_opt(allctr->alloc_no, seg, (UWord) size, flags, &allctr->mseg_opt);
INC_CC(allctr->calls.mseg_dealloc);
}
-#endif
-static ERTS_INLINE void *
-alcu_sys_alloc(Allctr_t *allctr, Uint size, int superalign)
+#if defined(ARCH_32)
+
+void*
+erts_alcu_literal_32_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
+{
+ void* res;
+ Uint sz = ERTS_SUPERALIGNED_CEILING(*size_p);
+ ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL &&
+ allctr->t == 0);
+ ERTS_LC_ASSERT(allctr->thread_safe);
+
+ res = erts_alcu_mseg_alloc(allctr, &sz, flags);
+ if (res) {
+ set_literal_range(res, sz);
+ *size_p = sz;
+ }
+ return res;
+}
+
+void*
+erts_alcu_literal_32_mseg_realloc(Allctr_t *allctr, void *seg,
+ Uint old_size, Uint *new_size_p)
+{
+ void* res;
+ Uint new_sz = ERTS_SUPERALIGNED_CEILING(*new_size_p);
+ ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL &&
+ allctr->t == 0);
+ ERTS_LC_ASSERT(allctr->thread_safe);
+
+ if (seg && old_size)
+ clear_literal_range(seg, old_size);
+ res = erts_alcu_mseg_realloc(allctr, seg, old_size, &new_sz);
+ if (res) {
+ set_literal_range(res, new_sz);
+ *new_size_p = new_sz;
+ }
+ return res;
+}
+
+void
+erts_alcu_literal_32_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size,
+ Uint flags)
+{
+ ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL &&
+ allctr->t == 0);
+ ERTS_LC_ASSERT(allctr->thread_safe);
+
+ erts_alcu_mseg_dealloc(allctr, seg, size, flags);
+
+ clear_literal_range(seg, size);
+}
+
+#elif defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
+
+/* For allocators that have their own mmapper (super carrier),
+ * like literal_alloc and exec_alloc on amd64
+ */
+void*
+erts_alcu_mmapper_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
+{
+ void* res;
+ UWord size = (UWord) *size_p;
+ Uint32 mmap_flags = ERTS_MMAPFLG_SUPERCARRIER_ONLY;
+ if (flags & ERTS_MSEG_FLG_2POW)
+ mmap_flags |= ERTS_MMAPFLG_SUPERALIGNED;
+
+ res = erts_mmap(allctr->mseg_mmapper, mmap_flags, &size);
+ *size_p = (Uint)size;
+ INC_CC(allctr->calls.mseg_alloc);
+ return res;
+}
+
+void*
+erts_alcu_mmapper_mseg_realloc(Allctr_t *allctr, void *seg,
+ Uint old_size, Uint *new_size_p)
{
void *res;
+ UWord new_size = (UWord) *new_size_p;
+ res = erts_mremap(allctr->mseg_mmapper, ERTS_MSEG_FLG_NONE, seg, old_size, &new_size);
+ *new_size_p = (Uint) new_size;
+ INC_CC(allctr->calls.mseg_realloc);
+ return res;
+}
+
+void
+erts_alcu_mmapper_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size,
+ Uint flags)
+{
+ Uint32 mmap_flags = ERTS_MMAPFLG_SUPERCARRIER_ONLY;
+ if (flags & ERTS_MSEG_FLG_2POW)
+ mmap_flags |= ERTS_MMAPFLG_SUPERALIGNED;
+
+ erts_munmap(allctr->mseg_mmapper, mmap_flags, seg, (UWord)size);
+ INC_CC(allctr->calls.mseg_dealloc);
+}
+#endif /* ARCH_64 && ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION */
+
+#if defined(ERTS_ALC_A_EXEC) && !defined(ERTS_HAVE_EXEC_MMAPPER)
+
+/*
+ * For exec_alloc on non-amd64 that just need memory with PROT_EXEC
+ */
+void*
+erts_alcu_exec_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
+{
+ void* res = erts_alcu_mseg_alloc(allctr, size_p, flags);
+
+ if (res) {
+ int r = mprotect(res, *size_p, PROT_EXEC | PROT_READ | PROT_WRITE);
+ ASSERT(r == 0); (void)r;
+ }
+ return res;
+}
+
+void*
+erts_alcu_exec_mseg_realloc(Allctr_t *allctr, void *seg,
+ Uint old_size, Uint *new_size_p)
+{
+ void *res;
+
+ if (seg && old_size) {
+ int r = mprotect(seg, old_size, PROT_READ | PROT_WRITE);
+ ASSERT(r == 0); (void)r;
+ }
+ res = erts_alcu_mseg_realloc(allctr, seg, old_size, new_size_p);
+ if (res) {
+ int r = mprotect(res, *new_size_p, PROT_EXEC | PROT_READ | PROT_WRITE);
+ ASSERT(r == 0); (void)r;
+ }
+ return res;
+}
+
+void
+erts_alcu_exec_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size, Uint flags)
+{
+ int r = mprotect(seg, size, PROT_READ | PROT_WRITE);
+ ASSERT(r == 0); (void)r;
+ erts_alcu_mseg_dealloc(allctr, seg, size, flags);
+}
+#endif /* ERTS_ALC_A_EXEC && !ERTS_HAVE_EXEC_MMAPPER */
+
+#endif /* HAVE_ERTS_MSEG */
+
+static void*
+erts_alcu_sys_alloc(Allctr_t *allctr, Uint* size_p, int superalign)
+{
+ void *res;
+ const Uint size = *size_p;
#if ERTS_SA_MB_CARRIERS && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
if (superalign)
res = erts_sys_aligned_alloc(ERTS_SACRR_UNIT_SZ, size);
@@ -803,10 +977,11 @@ alcu_sys_alloc(Allctr_t *allctr, Uint size, int superalign)
return res;
}
-static ERTS_INLINE void *
-alcu_sys_realloc(Allctr_t *allctr, void *ptr, Uint size, Uint old_size, int superalign)
+static void*
+erts_alcu_sys_realloc(Allctr_t *allctr, void *ptr, Uint *size_p, Uint old_size, int superalign)
{
void *res;
+ const Uint size = *size_p;
#if ERTS_SA_MB_CARRIERS && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
if (superalign)
@@ -824,8 +999,8 @@ alcu_sys_realloc(Allctr_t *allctr, void *ptr, Uint size, Uint old_size, int supe
return res;
}
-static ERTS_INLINE void
-alcu_sys_free(Allctr_t *allctr, void *ptr, int superalign)
+static void
+erts_alcu_sys_dealloc(Allctr_t *allctr, void *ptr, Uint size, int superalign)
{
#if ERTS_SA_MB_CARRIERS && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
if (superalign)
@@ -838,6 +1013,59 @@ alcu_sys_free(Allctr_t *allctr, void *ptr, int superalign)
erts_mtrace_crr_free(allctr->alloc_no, ERTS_ALC_A_SYSTEM, ptr);
}
+#ifdef ARCH_32
+
+void*
+erts_alcu_literal_32_sys_alloc(Allctr_t *allctr, Uint* size_p, int superalign)
+{
+ void* res;
+ Uint size = ERTS_SUPERALIGNED_CEILING(*size_p);
+ ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL &&
+ allctr->t == 0);
+ ERTS_LC_ASSERT(allctr->thread_safe);
+
+ res = erts_alcu_sys_alloc(allctr, &size, 1);
+ if (res) {
+ set_literal_range(res, size);
+ *size_p = size;
+ }
+ return res;
+}
+
+void*
+erts_alcu_literal_32_sys_realloc(Allctr_t *allctr, void *ptr, Uint* size_p, Uint old_size, int superalign)
+{
+ void* res;
+ Uint size = ERTS_SUPERALIGNED_CEILING(*size_p);
+
+ ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL &&
+ allctr->t == 0);
+ ERTS_LC_ASSERT(allctr->thread_safe);
+
+ if (ptr && old_size)
+ clear_literal_range(ptr, old_size);
+ res = erts_alcu_sys_realloc(allctr, ptr, &size, old_size, 1);
+ if (res) {
+ set_literal_range(res, size);
+ *size_p = size;
+ }
+ return res;
+}
+
+void
+erts_alcu_literal_32_sys_dealloc(Allctr_t *allctr, void *ptr, Uint size, int superalign)
+{
+ ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL &&
+ allctr->t == 0);
+ ERTS_LC_ASSERT(allctr->thread_safe);
+
+ erts_alcu_sys_dealloc(allctr, ptr, size, 1);
+
+ clear_literal_range(ptr, size);
+}
+
+#endif /* ARCH_32 */
+
static Uint
get_next_mbc_size(Allctr_t *allctr)
{
@@ -927,7 +1155,88 @@ unlink_carrier(CarrierList_t *cl, Carrier_t *crr)
}
}
-#ifdef ERTS_SMP
+
+#ifdef DEBUG
+static int is_in_list(ErtsDoubleLink_t* sentinel, ErtsDoubleLink_t* node)
+{
+ ErtsDoubleLink_t* p;
+
+ ASSERT(node != sentinel);
+ for (p = sentinel->next; p != sentinel; p = p->next) {
+ if (p == node)
+ return 1;
+ }
+ return 0;
+}
+#endif /* DEBUG */
+
+static ERTS_INLINE void
+link_edl_after(ErtsDoubleLink_t* after_me, ErtsDoubleLink_t* node)
+{
+ ErtsDoubleLink_t* before_me = after_me->next;
+ ASSERT(node != after_me && node != before_me);
+ node->next = before_me;
+ node->prev = after_me;
+ before_me->prev = node;
+ after_me->next = node;
+}
+
+static ERTS_INLINE void
+link_edl_before(ErtsDoubleLink_t* before_me, ErtsDoubleLink_t* node)
+{
+ ErtsDoubleLink_t* after_me = before_me->prev;
+ ASSERT(node != before_me && node != after_me);
+ node->next = before_me;
+ node->prev = after_me;
+ before_me->prev = node;
+ after_me->next = node;
+}
+
+static ERTS_INLINE void
+unlink_edl(ErtsDoubleLink_t* node)
+{
+ node->next->prev = node->prev;
+ node->prev->next = node->next;
+}
+
+static ERTS_INLINE void
+relink_edl_before(ErtsDoubleLink_t* before_me, ErtsDoubleLink_t* node)
+{
+ if (node != before_me && node != before_me->prev) {
+ unlink_edl(node);
+ link_edl_before(before_me, node);
+ }
+}
+
+static ERTS_INLINE int is_abandoned(Carrier_t *crr)
+{
+ return crr->cpool.abandoned.next != NULL;
+}
+
+static ERTS_INLINE void
+link_abandoned_carrier(ErtsDoubleLink_t* list, Carrier_t *crr)
+{
+ ASSERT(!is_abandoned(crr));
+
+ link_edl_after(list, &crr->cpool.abandoned);
+
+ ASSERT(crr->cpool.abandoned.next != &crr->cpool.abandoned);
+ ASSERT(crr->cpool.abandoned.prev != &crr->cpool.abandoned);
+}
+
+static ERTS_INLINE void
+unlink_abandoned_carrier(Carrier_t *crr)
+{
+ ASSERT(is_in_list(&crr->cpool.orig_allctr->cpool.pooled_list,
+ &crr->cpool.abandoned) ||
+ is_in_list(&crr->cpool.orig_allctr->cpool.traitor_list,
+ &crr->cpool.abandoned));
+
+ unlink_edl(&crr->cpool.abandoned);
+
+ crr->cpool.abandoned.next = NULL;
+ crr->cpool.abandoned.prev = NULL;
+}
static ERTS_INLINE void
clear_busy_pool_carrier(Allctr_t *allctr, Carrier_t *crr)
@@ -946,16 +1255,15 @@ clear_busy_pool_carrier(Allctr_t *allctr, Carrier_t *crr)
erts_aint_t old_val = new_val|ERTS_CRR_ALCTR_FLG_BUSY;
ERTS_ALC_CPOOL_ASSERT(old_val
- == erts_smp_atomic_xchg_relb(&crr->allctr,
+ == erts_atomic_xchg_relb(&crr->allctr,
new_val));
}
#else
- erts_smp_atomic_set_relb(&crr->allctr, new_val);
+ erts_atomic_set_relb(&crr->allctr, new_val);
#endif
}
}
-#endif
#if 0
#define ERTS_DBG_CHK_FIX_LIST(A, FIX, IX, B) \
@@ -979,24 +1287,24 @@ chk_fix_list(Allctr_t *allctr, ErtsAlcFixList_t *fix, int ix, int before)
static void *mbc_alloc(Allctr_t *allctr, Uint size);
-#ifdef ERTS_SMP
typedef struct {
ErtsAllctrDDBlock_t ddblock__; /* must be first */
ErtsAlcType_t fix_type;
} ErtsAllctrFixDDBlock_t;
-#endif
+
+#define ERTS_ALC_FIX_NO_UNUSE (((ErtsAlcType_t) 1) << ERTS_ALC_N_BITS)
static ERTS_INLINE void
dealloc_fix_block(Allctr_t *allctr,
ErtsAlcType_t type,
void *ptr,
+ ErtsAlcFixList_t *fix,
int dec_cc_on_redirect)
{
-#ifdef ERTS_SMP
/* May be redirected... */
- ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type = type;
-#endif
- dealloc_block(allctr, ptr, dec_cc_on_redirect);
+ ASSERT((type & ERTS_ALC_FIX_NO_UNUSE) == 0);
+ ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type = type | ERTS_ALC_FIX_NO_UNUSE;
+ dealloc_block(allctr, ptr, fix, dec_cc_on_redirect);
}
static ERTS_INLINE void
@@ -1029,12 +1337,10 @@ fix_cpool_check_shrink(Allctr_t *allctr,
fix->u.cpool.shrink_list = 0;
else {
void *p;
-#ifdef ERTS_SMP
if (busy_pcrr_pp) {
clear_busy_pool_carrier(allctr, *busy_pcrr_pp);
*busy_pcrr_pp = NULL;
}
-#endif
fix->u.cpool.shrink_list--;
p = fix->list;
fix->list = *((void **) p);
@@ -1042,8 +1348,7 @@ fix_cpool_check_shrink(Allctr_t *allctr,
if (fix->u.cpool.min_list_size > fix->list_size)
fix->u.cpool.min_list_size = fix->list_size;
- fix->u.cpool.allocated--;
- dealloc_fix_block(allctr, type, p, 0);
+ dealloc_fix_block(allctr, type, p, fix, 0);
}
}
}
@@ -1058,6 +1363,7 @@ fix_cpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)
&& type <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE];
+ ASSERT(size == fix->type_size);
res = fix->list;
if (res) {
@@ -1069,8 +1375,6 @@ fix_cpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)
fix_cpool_check_shrink(allctr, type, fix, NULL);
return res;
}
- if (size < 2*sizeof(UWord))
- size += sizeof(UWord);
if (size >= allctr->sbc_threshold) {
Block_t *blk;
blk = create_carrier(allctr, size, CFLG_SBC);
@@ -1089,7 +1393,8 @@ static ERTS_INLINE void
fix_cpool_free(Allctr_t *allctr,
ErtsAlcType_t type,
void *p,
- Carrier_t **busy_pcrr_pp)
+ Carrier_t **busy_pcrr_pp,
+ int unuse)
{
ErtsAlcFixList_t *fix;
@@ -1097,8 +1402,9 @@ fix_cpool_free(Allctr_t *allctr,
&& type <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE];
-
- fix->u.cpool.used--;
+
+ if (unuse)
+ fix->u.cpool.used--;
if ((!busy_pcrr_pp || !*busy_pcrr_pp)
&& !fix->u.cpool.shrink_list
@@ -1127,10 +1433,8 @@ fix_cpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
int ix, o;
int flush = flgs == 0;
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_lock(&allctr->mutex);
-#endif
for (ix = 0; ix < ERTS_ALC_NO_FIXED_SIZES; ix++) {
ErtsAlcFixList_t *fix = &allctr->fix[ix];
@@ -1156,8 +1460,7 @@ fix_cpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
fix->list = *((void **) ptr);
fix->list_size--;
fix->u.cpool.shrink_list--;
- fix->u.cpool.allocated--;
- dealloc_fix_block(allctr, type, ptr, 0);
+ dealloc_fix_block(allctr, type, ptr, fix, 0);
}
if (fix->u.cpool.min_list_size > fix->list_size)
fix->u.cpool.min_list_size = fix->list_size;
@@ -1171,10 +1474,8 @@ fix_cpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
if (all_empty)
sched_fix_shrink(allctr, 0);
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
-#endif
return res;
}
@@ -1189,6 +1490,7 @@ fix_nocpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)
&& type <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE];
+ ASSERT(size == fix->type_size);
ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
fix->u.nocpool.used++;
@@ -1211,8 +1513,6 @@ fix_nocpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)
ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
return res;
}
- if (size < 2*sizeof(UWord))
- size += sizeof(UWord);
if (fix->u.nocpool.limit < fix->u.nocpool.used)
fix->u.nocpool.limit = fix->u.nocpool.used;
if (fix->u.nocpool.max_used < fix->u.nocpool.used)
@@ -1287,10 +1587,8 @@ fix_nocpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
int ix, o;
int flush = flgs == 0;
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_lock(&allctr->mutex);
-#endif
for (ix = 0; ix < ERTS_ALC_NO_FIXED_SIZES; ix++) {
ErtsAlcFixList_t *fix = &allctr->fix[ix];
@@ -1318,7 +1616,7 @@ fix_nocpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
ptr = fix->list;
fix->list = *((void **) ptr);
fix->list_size--;
- dealloc_block(allctr, ptr, 0);
+ dealloc_block(allctr, ptr, NULL, 0);
fix->u.nocpool.allocated--;
}
if (fix->list_size != 0) {
@@ -1332,10 +1630,8 @@ fix_nocpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
if (all_empty)
sched_fix_shrink(allctr, 0);
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
-#endif
return res;
}
@@ -1351,7 +1647,16 @@ erts_alcu_fix_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
static void dealloc_carrier(Allctr_t *allctr, Carrier_t *crr, int superaligned);
-#ifdef ERTS_SMP
+static ERTS_INLINE void
+dealloc_mbc(Allctr_t *allctr, Carrier_t *crr)
+{
+ ASSERT(IS_MB_CARRIER(crr));
+ if (allctr->destroying_mbc)
+ allctr->destroying_mbc(allctr, crr);
+
+ dealloc_carrier(allctr, crr, 1);
+}
+
static ERTS_INLINE Allctr_t*
get_pref_allctr(void *extra)
@@ -1361,7 +1666,7 @@ get_pref_allctr(void *extra)
pref_ix = ERTS_ALC_GET_THR_IX();
- ASSERT(sizeof(UWord) == sizeof(Allctr_t *));
+ ERTS_CT_ASSERT(sizeof(UWord) == sizeof(Allctr_t *));
ASSERT(0 <= pref_ix && pref_ix < tspec->size);
return tspec->allctr[pref_ix];
@@ -1392,7 +1697,7 @@ get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep,
crr = BLK_TO_SBC(blk);
if (sizep)
*sizep = SBC_BLK_SZ(blk) - ABLK_HDR_SZ;
- iallctr = erts_smp_atomic_read_dirty(&crr->allctr);
+ iallctr = erts_atomic_read_dirty(&crr->allctr);
}
else {
crr = ABLK_TO_MBC(blk);
@@ -1400,10 +1705,10 @@ get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep,
if (sizep)
*sizep = MBC_ABLK_SZ(blk) - ABLK_HDR_SZ;
if (!ERTS_ALC_IS_CPOOL_ENABLED(pref_allctr))
- iallctr = erts_smp_atomic_read_dirty(&crr->allctr);
+ iallctr = erts_atomic_read_dirty(&crr->allctr);
else {
int locked_pref_allctr = 0;
- iallctr = erts_smp_atomic_read_ddrb(&crr->allctr);
+ iallctr = erts_atomic_read_ddrb(&crr->allctr);
if (ERTS_ALC_TS_PREF_LOCK_IF_USED == pref_lock
&& pref_allctr->thread_safe) {
@@ -1419,7 +1724,7 @@ get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep,
erts_aint_t act;
ERTS_ALC_CPOOL_ASSERT(!(iallctr & ERTS_CRR_ALCTR_FLG_BUSY));
- act = erts_smp_atomic_cmpxchg_ddrb(&crr->allctr,
+ act = erts_atomic_cmpxchg_ddrb(&crr->allctr,
iallctr|ERTS_CRR_ALCTR_FLG_BUSY,
iallctr);
if (act == iallctr) {
@@ -1665,11 +1970,13 @@ handle_delayed_fix_dealloc(Allctr_t *allctr, void *ptr)
type = ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type;
- ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= type
- && type <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
+ ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE
+ <= (type & ~ERTS_ALC_FIX_NO_UNUSE));
+ ASSERT((type & ~ERTS_ALC_FIX_NO_UNUSE)
+ <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))
- fix_nocpool_free(allctr, type, ptr);
+ fix_nocpool_free(allctr, (type & ~ERTS_ALC_FIX_NO_UNUSE), ptr);
else {
Block_t *blk = UMEM2BLK(ptr);
Carrier_t *busy_pcrr_p;
@@ -1684,7 +1991,9 @@ handle_delayed_fix_dealloc(Allctr_t *allctr, void *ptr)
NULL, &busy_pcrr_p);
if (used_allctr == allctr) {
doit:
- fix_cpool_free(allctr, type, ptr, &busy_pcrr_p);
+ fix_cpool_free(allctr, (type & ~ERTS_ALC_FIX_NO_UNUSE),
+ ptr, &busy_pcrr_p,
+ !(type & ERTS_ALC_FIX_NO_UNUSE));
clear_busy_pool_carrier(allctr, busy_pcrr_p);
}
else {
@@ -1775,13 +2084,25 @@ handle_delayed_dealloc(Allctr_t *allctr,
* data has been overwritten by the queue.
*/
Carrier_t *crr = FIRST_BLK_TO_MBC(allctr, blk);
+
+ /* Restore word overwritten by the dd-queue as it will be read
+ * if this carrier is pulled from dc_list by cpool_fetch()
+ */
+ ERTS_ALC_CPOOL_ASSERT(FBLK_TO_MBC(blk) != crr);
+ ERTS_CT_ASSERT(sizeof(ErtsAllctrDDBlock_t) == sizeof(void*));
+#ifdef MBC_ABLK_OFFSET_BITS
+ blk->u.carrier = crr;
+#else
+ blk->carrier = crr;
+#endif
+
ERTS_ALC_CPOOL_ASSERT(ERTS_ALC_IS_CPOOL_ENABLED(allctr));
ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr);
ERTS_ALC_CPOOL_ASSERT(((erts_aint_t) allctr)
- != (erts_smp_atomic_read_nob(&crr->allctr)
+ != (erts_atomic_read_nob(&crr->allctr)
& ~ERTS_CRR_ALCTR_FLG_MASK));
- erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr));
+ erts_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr));
schedule_dealloc_carrier(allctr, crr);
}
@@ -1792,7 +2113,7 @@ handle_delayed_dealloc(Allctr_t *allctr,
if (fix)
handle_delayed_fix_dealloc(allctr, ptr);
else
- dealloc_block(allctr, ptr, 1);
+ dealloc_block(allctr, ptr, NULL, 1);
}
}
@@ -1827,9 +2148,7 @@ enqueue_dealloc_other_instance(ErtsAlcType_t type,
erts_alloc_notify_delayed_dealloc(allctr->ix);
}
-#endif
-#ifdef ERTS_SMP
static void
set_new_allctr_abandon_limit(Allctr_t *allctr);
static void
@@ -1891,26 +2210,28 @@ erts_alcu_check_delayed_dealloc(Allctr_t *allctr,
thr_prgr_p,
more_work);
}
-#endif
#define ERTS_ALCU_HANDLE_DD_IN_OP(Allctr, Locked) \
handle_delayed_dealloc((Allctr), (Locked), 1, \
ERTS_ALCU_DD_OPS_LIM_LOW, NULL, NULL, NULL)
static void
-dealloc_block(Allctr_t *allctr, void *ptr, int dec_cc_on_redirect)
+dealloc_block(Allctr_t *allctr, void *ptr, ErtsAlcFixList_t *fix, int dec_cc_on_redirect)
{
Block_t *blk = UMEM2BLK(ptr);
- ERTS_SMP_LC_ASSERT(!allctr->thread_safe
+ ERTS_LC_ASSERT(!allctr->thread_safe
|| erts_lc_mtx_is_locked(&allctr->mutex));
- if (IS_SBC_BLK(blk))
+ if (IS_SBC_BLK(blk)) {
destroy_carrier(allctr, blk, NULL);
-#ifndef ERTS_SMP
- else
- mbc_free(allctr, ptr, NULL);
-#else
+ if (fix && ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
+ ErtsAlcType_t type = ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type;
+ if (!(type & ERTS_ALC_FIX_NO_UNUSE))
+ fix->u.cpool.used--;
+ fix->u.cpool.allocated--;
+ }
+ }
else if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))
mbc_free(allctr, ptr, NULL);
else {
@@ -1919,6 +2240,12 @@ dealloc_block(Allctr_t *allctr, void *ptr, int dec_cc_on_redirect)
used_allctr = get_used_allctr(allctr, ERTS_ALC_TS_PREF_LOCK_NO, ptr,
NULL, &busy_pcrr_p);
if (used_allctr == allctr) {
+ if (fix) {
+ ErtsAlcType_t type = ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type;
+ if (!(type & ERTS_ALC_FIX_NO_UNUSE))
+ fix->u.cpool.used--;
+ fix->u.cpool.allocated--;
+ }
mbc_free(allctr, ptr, &busy_pcrr_p);
clear_busy_pool_carrier(allctr, busy_pcrr_p);
}
@@ -1934,7 +2261,6 @@ dealloc_block(Allctr_t *allctr, void *ptr, int dec_cc_on_redirect)
erts_alloc_notify_delayed_dealloc(used_allctr->ix);
}
}
-#endif
}
/* Multi block carrier alloc/realloc/free ... */
@@ -1957,7 +2283,7 @@ mbc_alloc_block(Allctr_t *allctr, Uint size, Uint *blk_szp)
if (!blk) {
blk = create_carrier(allctr, get_blk_sz, CFLG_MBC);
-#if !HALFWORD_HEAP && !ERTS_SUPER_ALIGNED_MSEG_ONLY
+#if !ERTS_SUPER_ALIGNED_MSEG_ONLY
if (!blk) {
/* Emergency! We couldn't create the carrier as we wanted.
Try to place it in a sys_alloced sbc. */
@@ -2182,9 +2508,7 @@ mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp)
else {
(*allctr->link_free_block)(allctr, blk);
HARD_CHECK_BLK_CARRIER(allctr, blk);
-#ifdef ERTS_SMP
check_abandon_carrier(allctr, blk, busy_pcrr_pp);
-#endif
}
}
@@ -2218,10 +2542,8 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,
return NULL;
#else /* !MBC_REALLOC_ALWAYS_MOVES */
-#ifdef ERTS_SMP
if (busy_pcrr_pp && *busy_pcrr_pp)
goto realloc_move; /* Don't want to use carrier in pool */
-#endif
get_blk_sz = blk_sz = UMEMSZ2BLKSZ(allctr, size);
@@ -2342,9 +2664,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,
HARD_CHECK_BLK_CARRIER(allctr, blk);
-#ifdef ERTS_SMP
check_abandon_carrier(allctr, nxt_blk, NULL);
-#endif
return p;
}
@@ -2456,9 +2776,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,
if (cand_blk_sz < get_blk_sz) {
/* We wont fit in cand_blk get a new one */
-#ifdef ERTS_SMP
realloc_move:
-#endif
#endif /* !MBC_REALLOC_ALWAYS_MOVES */
new_p = mbc_alloc(allctr, size);
@@ -2560,13 +2878,11 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,
#endif /* !MBC_REALLOC_ALWAYS_MOVES */
}
-#ifdef ERTS_SMP
#define ERTS_ALC_MAX_DEALLOC_CARRIER 10
-#define ERTS_ALC_CPOOL_MAX_FETCH_INSPECT 10
+#define ERTS_ALC_CPOOL_MAX_FETCH_INSPECT 20
+#define ERTS_ALC_CPOOL_MAX_TRAITOR_INSPECT 10
#define ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT 100
-#define ERTS_ALC_CPOOL_MAX_NO_CARRIERS 5
-#define ERTS_ALC_CPOOL_INSERT_ALLOWED_OFFSET 100
#define ERTS_ALC_CPOOL_MAX_FAILED_STAT_READS 3
#define ERTS_ALC_CPOOL_PTR_MOD_MRK (((erts_aint_t) 1) << 0)
@@ -2732,7 +3048,7 @@ cpool_insert(Allctr_t *allctr, Carrier_t *crr)
ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */
|| erts_thr_progress_is_managed_thread());
- ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_read_nob(&crr->allctr)
+ ERTS_ALC_CPOOL_ASSERT(erts_atomic_read_nob(&crr->allctr)
== (erts_aint_t) allctr);
erts_atomic_add_nob(&allctr->cpool.stat.blocks_size,
@@ -2743,9 +3059,6 @@ cpool_insert(Allctr_t *allctr, Carrier_t *crr)
(erts_aint_t) CARRIER_SZ(crr));
erts_atomic_inc_nob(&allctr->cpool.stat.no_carriers);
- erts_smp_atomic_set_nob(&crr->allctr,
- ((erts_aint_t) allctr)|ERTS_CRR_ALCTR_FLG_IN_POOL);
-
/*
* We search in 'next' direction and begin by passing
* one element before trying to insert. This in order to
@@ -2804,6 +3117,10 @@ cpool_insert(Allctr_t *allctr, Carrier_t *crr)
cpool_set_mod_marked(&cpd2p->prev,
(erts_aint_t) &crr->cpool,
(erts_aint_t) cpd1p);
+
+ erts_atomic_set_wb(&crr->allctr,
+ ((erts_aint_t) allctr)|ERTS_CRR_ALCTR_FLG_IN_POOL);
+ LTTNG3(carrier_pool_put, ERTS_ALC_A2AD(allctr->alloc_no), allctr->ix, CARRIER_SZ(crr));
}
static void
@@ -2904,72 +3221,206 @@ cpool_delete(Allctr_t *allctr, Allctr_t *prev_allctr, Carrier_t *crr)
static Carrier_t *
cpool_fetch(Allctr_t *allctr, UWord size)
{
- int i;
+ int i, i_stop, has_passed_sentinel;
Carrier_t *crr;
ErtsAlcCPoolData_t *cpdp;
- ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel;
+ ErtsAlcCPoolData_t *cpool_entrance;
+ ErtsAlcCPoolData_t *sentinel;
+ ErtsDoubleLink_t* dl;
+ ErtsDoubleLink_t* first_old_traitor;
ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */
|| erts_thr_progress_is_managed_thread());
- i = 0;
+ i = ERTS_ALC_CPOOL_MAX_FETCH_INSPECT;
+ first_old_traitor = allctr->cpool.traitor_list.next;
+ cpool_entrance = NULL;
- /* First; check our own pending dealloc carrier list... */
- crr = allctr->cpool.dc_list.last;
- while (crr && i < ERTS_ALC_CPOOL_MAX_FETCH_INSPECT) {
- if (erts_atomic_read_nob(&crr->cpool.max_size) >= size) {
- unlink_carrier(&allctr->cpool.dc_list, crr);
-#ifdef ERTS_ALC_CPOOL_DEBUG
- ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_xchg_nob(&crr->allctr,
- ((erts_aint_t) allctr))
- == (((erts_aint_t) allctr) & ~ERTS_CRR_ALCTR_FLG_MASK));
-#else
- erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr));
-#endif
- return crr;
+ LTTNG3(carrier_pool_get, ERTS_ALC_A2AD(allctr->alloc_no), allctr->ix, (unsigned long)size);
+ /*
+ * Search my own pooled_list,
+ * i.e my abandoned carriers that were in the pool last time I checked.
+ */
+
+ dl = allctr->cpool.pooled_list.next;
+ while(dl != &allctr->cpool.pooled_list) {
+ erts_aint_t exp, act;
+ crr = (Carrier_t *) (((char *) dl) - offsetof(Carrier_t, cpool.abandoned));
+
+ ASSERT(!is_in_list(&allctr->cpool.traitor_list, dl));
+ ASSERT(crr->cpool.orig_allctr == allctr);
+ dl = dl->next;
+ exp = erts_atomic_read_rb(&crr->allctr);
+ if ((exp & ERTS_CRR_ALCTR_FLG_MASK) == ERTS_CRR_ALCTR_FLG_IN_POOL
+ && erts_atomic_read_nob(&crr->cpool.max_size) >= size) {
+ /* Try to fetch it... */
+ act = erts_atomic_cmpxchg_mb(&crr->allctr,
+ (erts_aint_t) allctr,
+ exp);
+ if (act == exp) {
+ cpool_delete(allctr, ((Allctr_t *) (act & ~ERTS_CRR_ALCTR_FLG_MASK)), crr);
+ unlink_abandoned_carrier(crr);
+
+ /* Move sentinel to continue next search from here */
+ relink_edl_before(dl, &allctr->cpool.pooled_list);
+ return crr;
+ }
+ exp = act;
+ }
+ if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) {
+ if (!cpool_entrance)
+ cpool_entrance = &crr->cpool;
+ }
+ else { /* Not in pool, move to traitor_list */
+ unlink_abandoned_carrier(crr);
+ link_abandoned_carrier(&allctr->cpool.traitor_list, crr);
+ }
+ if (--i <= 0) {
+ /* Move sentinel to continue next search from here */
+ relink_edl_before(dl, &allctr->cpool.pooled_list);
+ return NULL;
}
- crr = crr->prev;
- i++;
}
- /* ... then the pool ... */
+ /* Now search traitor_list.
+ * i.e carriers employed by other allocators last time I checked.
+ * They might have been abandoned since then.
+ */
+
+ i_stop = (i < ERTS_ALC_CPOOL_MAX_TRAITOR_INSPECT ?
+ 0 : i - ERTS_ALC_CPOOL_MAX_TRAITOR_INSPECT);
+ dl = first_old_traitor;
+ while(dl != &allctr->cpool.traitor_list) {
+ erts_aint_t exp, act;
+ crr = (Carrier_t *) (((char *) dl) - offsetof(Carrier_t, cpool.abandoned));
+ ASSERT(dl != &allctr->cpool.pooled_list);
+ ASSERT(crr->cpool.orig_allctr == allctr);
+ dl = dl->next;
+ exp = erts_atomic_read_rb(&crr->allctr);
+ if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) {
+ if (!(exp & ERTS_CRR_ALCTR_FLG_BUSY)
+ && erts_atomic_read_nob(&crr->cpool.max_size) >= size) {
+ /* Try to fetch it... */
+ act = erts_atomic_cmpxchg_mb(&crr->allctr,
+ (erts_aint_t) allctr,
+ exp);
+ if (act == exp) {
+ cpool_delete(allctr, ((Allctr_t *) (act & ~ERTS_CRR_ALCTR_FLG_MASK)), crr);
+ unlink_abandoned_carrier(crr);
+
+ /* Move sentinel to continue next search from here */
+ relink_edl_before(dl, &allctr->cpool.traitor_list);
+ return crr;
+ }
+ exp = act;
+ }
+ if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) {
+ if (!cpool_entrance)
+ cpool_entrance = &crr->cpool;
+
+ /* Move to pooled_list */
+ unlink_abandoned_carrier(crr);
+ link_abandoned_carrier(&allctr->cpool.pooled_list, crr);
+ }
+ }
+ if (--i <= i_stop) {
+ /* Move sentinel to continue next search from here */
+ relink_edl_before(dl, &allctr->cpool.traitor_list);
+ if (i > 0)
+ break;
+ else
+ return NULL;
+ }
+ }
/*
- * We search in 'prev' direction and begin by passing
- * one element before trying to fetch. This in order to
- * avoid contention with threads inserting elements.
+ * Finally search the shared pool and try employ foreign carriers
*/
- cpdp = cpool_aint2cpd(cpool_read(&sentinel->prev));
- if (cpdp == sentinel)
- return NULL;
+ sentinel = &carrier_pool[allctr->alloc_no].sentinel;
+ if (cpool_entrance) {
+ /* We saw a pooled carried above, use it as entrance into the pool
+ */
+ cpdp = cpool_entrance;
+ }
+ else {
+ /* No pooled carried seen above. Start search at cpool sentinel,
+ * but begin by passing one element before trying to fetch.
+ * This in order to avoid contention with threads inserting elements.
+ */
+ cpool_entrance = sentinel;
+ cpdp = cpool_aint2cpd(cpool_read(&cpool_entrance->prev));
+ if (cpdp == sentinel)
+ goto check_dc_list;
+ }
- while (i < ERTS_ALC_CPOOL_MAX_FETCH_INSPECT) {
+ has_passed_sentinel = 0;
+ while (1) {
erts_aint_t exp;
cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev));
- if (cpdp == sentinel) {
+ if (cpdp == cpool_entrance) {
+ if (cpool_entrance == sentinel) {
+ cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev));
+ if (cpdp == sentinel)
+ break;
+ }
+ i = 0; /* Last one to inspect */
+ }
+ else if (cpdp == sentinel) {
+ if (has_passed_sentinel) {
+ /* We been here before. cpool_entrance must have been removed */
+ break;
+ }
cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev));
if (cpdp == sentinel)
- return NULL;
- i = ERTS_ALC_CPOOL_MAX_FETCH_INSPECT; /* Last one to inspect */
+ break;
+ has_passed_sentinel = 1;
}
- crr = (Carrier_t *) (((char *) cpdp) - offsetof(Carrier_t, cpool));
- exp = erts_smp_atomic_read_rb(&crr->allctr);
- if (((exp & (ERTS_CRR_ALCTR_FLG_IN_POOL|ERTS_CRR_ALCTR_FLG_BUSY))
- == ERTS_CRR_ALCTR_FLG_IN_POOL)
+ crr = (Carrier_t *)(((char *)cpdp) - offsetof(Carrier_t, cpool));
+ exp = erts_atomic_read_rb(&crr->allctr);
+ if (((exp & (ERTS_CRR_ALCTR_FLG_MASK)) == ERTS_CRR_ALCTR_FLG_IN_POOL)
&& (erts_atomic_read_nob(&cpdp->max_size) >= size)) {
erts_aint_t act;
/* Try to fetch it... */
- act = erts_smp_atomic_cmpxchg_mb(&crr->allctr,
+ act = erts_atomic_cmpxchg_mb(&crr->allctr,
(erts_aint_t) allctr,
exp);
if (act == exp) {
cpool_delete(allctr, ((Allctr_t *) (act & ~ERTS_CRR_ALCTR_FLG_MASK)), crr);
+ if (crr->cpool.orig_allctr == allctr) {
+ unlink_abandoned_carrier(crr);
+ }
return crr;
}
}
- i++;
+ if (--i <= 0)
+ return NULL;
+ }
+
+check_dc_list:
+ /* Last; check our own pending dealloc carrier list... */
+ crr = allctr->cpool.dc_list.last;
+ while (crr) {
+ if (erts_atomic_read_nob(&crr->cpool.max_size) >= size) {
+ Block_t* blk;
+ unlink_carrier(&allctr->cpool.dc_list, crr);
+#ifdef ERTS_ALC_CPOOL_DEBUG
+ ERTS_ALC_CPOOL_ASSERT(erts_atomic_xchg_nob(&crr->allctr,
+ ((erts_aint_t) allctr))
+ == (((erts_aint_t) allctr) & ~ERTS_CRR_ALCTR_FLG_MASK));
+#else
+ erts_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr));
+#endif
+ blk = MBC_TO_FIRST_BLK(allctr, crr);
+ ASSERT(FBLK_TO_MBC(blk) == crr);
+ allctr->link_free_block(allctr, blk);
+ return crr;
+ }
+ crr = crr->prev;
+ if (--i <= 0)
+ return NULL;
}
+
return NULL;
}
@@ -2993,7 +3444,7 @@ check_pending_dealloc_carrier(Allctr_t *allctr,
dcrr = crr;
crr = crr->next;
- dealloc_carrier(allctr, dcrr, 1);
+ dealloc_mbc(allctr, dcrr);
i++;
} while (crr && i < ERTS_ALC_MAX_DEALLOC_CARRIER);
@@ -3024,18 +3475,20 @@ static void
schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr)
{
Allctr_t *orig_allctr;
+ Block_t *blk;
int check_pending_dealloc;
erts_aint_t max_size;
+ ASSERT(IS_MB_CARRIER(crr));
+
if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
- dealloc_carrier(allctr, crr, 1);
+ dealloc_mbc(allctr, crr);
return;
}
orig_allctr = crr->cpool.orig_allctr;
if (allctr != orig_allctr) {
- Block_t *blk = MBC_TO_FIRST_BLK(allctr, crr);
int cinit = orig_allctr->dd.ix - allctr->dd.ix;
/*
@@ -3052,13 +3505,14 @@ schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr)
* since the block is an mbc block that is free and last
* in the carrier.
*/
+ blk = MBC_TO_FIRST_BLK(allctr, crr);
ERTS_ALC_CPOOL_ASSERT(IS_FREE_LAST_MBC_BLK(blk));
ERTS_ALC_CPOOL_ASSERT(IS_MBC_FIRST_ABLK(allctr, blk));
ERTS_ALC_CPOOL_ASSERT(crr == FBLK_TO_MBC(blk));
ERTS_ALC_CPOOL_ASSERT(crr == FIRST_BLK_TO_MBC(allctr, blk));
ERTS_ALC_CPOOL_ASSERT(((erts_aint_t) allctr)
- == (erts_smp_atomic_read_nob(&crr->allctr)
+ == (erts_atomic_read_nob(&crr->allctr)
& ~ERTS_CRR_ALCTR_FLG_MASK));
if (ddq_enqueue(&orig_allctr->dd.q, BLK2UMEM(blk), cinit))
@@ -3066,13 +3520,18 @@ schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr)
return;
}
+ if (is_abandoned(crr))
+ unlink_abandoned_carrier(crr);
+
if (crr->cpool.thr_prgr == ERTS_THR_PRGR_INVALID
|| erts_thr_progress_has_reached(crr->cpool.thr_prgr)) {
- dealloc_carrier(allctr, crr, 1);
+ dealloc_mbc(allctr, crr);
return;
}
- max_size = (erts_aint_t) allctr->largest_fblk_in_mbc(allctr, crr);
+ blk = MBC_TO_FIRST_BLK(allctr, crr);
+ ASSERT(IS_FREE_LAST_MBC_BLK(blk));
+ max_size = (erts_aint_t) MBC_FBLK_SZ(blk);
erts_atomic_set_nob(&crr->cpool.max_size, max_size);
crr->next = NULL;
@@ -3112,6 +3571,8 @@ cpool_init_carrier_data(Allctr_t *allctr, Carrier_t *crr)
limit = (csz/100)*allctr->cpool.util_limit;
crr->cpool.abandon_limit = limit;
}
+ crr->cpool.abandoned.next = NULL;
+ crr->cpool.abandoned.prev = NULL;
}
static void
@@ -3142,6 +3603,9 @@ abandon_carrier(Allctr_t *allctr, Carrier_t *crr)
STAT_MBC_CPOOL_INSERT(allctr, crr);
unlink_carrier(&allctr->mbc_list, crr);
+ if (crr->cpool.orig_allctr == allctr) {
+ link_abandoned_carrier(&allctr->cpool.pooled_list, crr);
+ }
allctr->remove_mbc(allctr, crr);
@@ -3199,7 +3663,6 @@ cpool_read_stat(Allctr_t *allctr, UWord *nocp, UWord *cszp, UWord *nobp, UWord *
}
-#endif /* ERTS_SMP */
#ifdef DEBUG
@@ -3259,8 +3722,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
int is_mseg = 0;
#endif
- if (HALFWORD_HEAP
- || (ERTS_SUPER_ALIGNED_MSEG_ONLY && (flags & CFLG_MBC))
+ if ((ERTS_SUPER_ALIGNED_MSEG_ONLY && (flags & CFLG_MBC))
|| !allow_sys_alloc_carriers) {
flags |= CFLG_FORCE_MSEG;
flags &= ~CFLG_FORCE_SYS_ALLOC;
@@ -3268,15 +3730,39 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
return NULL;
#endif
}
+ flags |= allctr->crr_set_flgs;
+ flags &= ~allctr->crr_clr_flgs;
ASSERT((flags & CFLG_SBC && !(flags & CFLG_MBC))
|| (flags & CFLG_MBC && !(flags & CFLG_SBC)));
ASSERT(!(flags & CFLG_FORCE_MSEG && flags & CFLG_FORCE_SYS_ALLOC));
- blk_sz = UMEMSZ2BLKSZ(allctr, umem_sz);
+ if (umem_sz > (ERTS_UINT_MAX - ERTS_UINT_MAX/100)) {
+ /* Do an overly conservative _overflow_ check here so we don't
+ * have to deal with it from here on. I guess we could be more accurate
+ * but I don't think the need to allocate over 99% of the address space
+ * will ever arise on any machine, neither 32 nor 64 bit.
+ */
+ return NULL;
+ }
+
+ if (flags & CFLG_MAIN_CARRIER) {
+ ASSERT(flags & CFLG_MBC);
+ ASSERT(flags & CFLG_NO_CPOOL);
+ ASSERT(umem_sz == allctr->main_carrier_size);
+ ERTS_UNDEF(blk_sz, 0);
+
+ if (allctr->main_carrier_size < allctr->min_mbc_size)
+ allctr->main_carrier_size = allctr->min_mbc_size;
+ crr_sz = bcrr_sz = allctr->main_carrier_size;
+ }
+ else {
+ ERTS_UNDEF(bcrr_sz, 0);
+ ERTS_UNDEF(crr_sz, 0);
+ blk_sz = UMEMSZ2BLKSZ(allctr, umem_sz);
+ }
-#ifdef ERTS_SMP
allctr->cpool.disable_abandon = ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON;
if ((flags & (CFLG_MBC|CFLG_NO_CPOOL)) == CFLG_MBC
@@ -3292,7 +3778,6 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
return blk;
}
}
-#endif
#if HAVE_ERTS_MSEG
@@ -3320,13 +3805,15 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
mseg_flags = ERTS_MSEG_FLG_NONE;
}
else {
- crr_sz = (*allctr->get_next_mbc_size)(allctr);
- if (crr_sz < MBC_HEADER_SIZE(allctr) + blk_sz)
- crr_sz = MBC_HEADER_SIZE(allctr) + blk_sz;
- mseg_flags = ERTS_MSEG_FLG_2POW;
+ if (!(flags & CFLG_MAIN_CARRIER)) {
+ crr_sz = (*allctr->get_next_mbc_size)(allctr);
+ if (crr_sz < MBC_HEADER_SIZE(allctr) + blk_sz)
+ crr_sz = MBC_HEADER_SIZE(allctr) + blk_sz;
+ }
+ mseg_flags = ERTS_MSEG_FLG_2POW;
}
- crr = (Carrier_t *) alcu_mseg_alloc(allctr, &crr_sz, mseg_flags);
+ crr = (Carrier_t *) allctr->mseg_alloc(allctr, &crr_sz, mseg_flags);
if (!crr) {
have_tried_mseg = 1;
if (!(have_tried_sys_alloc || flags & CFLG_FORCE_MSEG))
@@ -3358,23 +3845,22 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
if (flags & CFLG_SBC) {
bcrr_sz = blk_sz + SBC_HEADER_SIZE;
}
- else {
+ else if (!(flags & CFLG_MAIN_CARRIER)) {
bcrr_sz = MBC_HEADER_SIZE(allctr) + blk_sz;
- if (!(flags & CFLG_MAIN_CARRIER)
- && bcrr_sz < allctr->smallest_mbc_size)
- bcrr_sz = allctr->smallest_mbc_size;
+ if (bcrr_sz < allctr->smallest_mbc_size)
+ bcrr_sz = allctr->smallest_mbc_size;
}
crr_sz = (flags & CFLG_FORCE_SIZE
? UNIT_CEILING(bcrr_sz)
: SYS_ALLOC_CARRIER_CEILING(bcrr_sz));
- crr = (Carrier_t *) alcu_sys_alloc(allctr, crr_sz, flags & CFLG_MBC);
+ crr = (Carrier_t *) allctr->sys_alloc(allctr, &crr_sz, flags & CFLG_MBC);
if (!crr) {
if (crr_sz > UNIT_CEILING(bcrr_sz)) {
crr_sz = UNIT_CEILING(bcrr_sz);
- crr = (Carrier_t *) alcu_sys_alloc(allctr, crr_sz, flags & CFLG_MBC);
+ crr = (Carrier_t *) allctr->sys_alloc(allctr, &crr_sz, flags & CFLG_MBC);
}
if (!crr) {
#if HAVE_ERTS_MSEG
@@ -3421,9 +3907,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
allctr->main_carrier = crr;
}
-#ifdef ERTS_SMP
cpool_init_carrier_data(allctr, crr);
-#endif
link_carrier(&allctr->mbc_list, crr);
@@ -3433,6 +3917,21 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
}
+#ifdef USE_LTTNG_VM_TRACEPOINTS
+ if (LTTNG_ENABLED(carrier_create)) {
+ lttng_decl_carrier_stats(mbc_stats);
+ lttng_decl_carrier_stats(sbc_stats);
+ LTTNG_CARRIER_STATS_TO_LTTNG_STATS(&(allctr->mbcs), mbc_stats);
+ LTTNG_CARRIER_STATS_TO_LTTNG_STATS(&(allctr->sbcs), sbc_stats);
+ LTTNG5(carrier_create,
+ ERTS_ALC_A2AD(allctr->alloc_no),
+ allctr->ix,
+ crr_sz,
+ mbc_stats,
+ sbc_stats);
+ }
+#endif
+
DEBUG_SAVE_ALIGNMENT(crr);
return blk;
}
@@ -3473,7 +3972,7 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
new_crr_sz = new_blk_sz + SBC_HEADER_SIZE;
new_crr_sz = ERTS_SACRR_UNIT_CEILING(new_crr_sz);
- new_crr = (Carrier_t *) alcu_mseg_realloc(allctr,
+ new_crr = (Carrier_t *) allctr->mseg_realloc(allctr,
old_crr,
old_crr_sz,
&new_crr_sz);
@@ -3488,11 +3987,6 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
DEBUG_SAVE_ALIGNMENT(new_crr);
return new_blk;
}
-#if HALFWORD_HEAP
- /* Old carrier unchanged; restore stat */
- STAT_MSEG_SBC_ALLOC(allctr, old_crr_sz, old_blk_sz);
- return NULL;
-#endif
create_flags |= CFLG_FORCE_SYS_ALLOC; /* since mseg_realloc()
failed */
}
@@ -3503,7 +3997,7 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
(void *) BLK2UMEM(old_blk),
MIN(new_blk_sz, old_blk_sz) - ABLK_HDR_SZ);
unlink_carrier(&allctr->sbc_list, old_crr);
- alcu_mseg_dealloc(allctr, old_crr, old_crr_sz, ERTS_MSEG_FLG_NONE);
+ allctr->mseg_dealloc(allctr, old_crr, old_crr_sz, ERTS_MSEG_FLG_NONE);
}
else {
/* Old carrier unchanged; restore stat */
@@ -3520,9 +4014,9 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
? UNIT_CEILING(new_bcrr_sz)
: SYS_ALLOC_CARRIER_CEILING(new_bcrr_sz));
- new_crr = (Carrier_t *) alcu_sys_realloc(allctr,
+ new_crr = (Carrier_t *) allctr->sys_realloc(allctr,
(void *) old_crr,
- new_crr_sz,
+ &new_crr_sz,
old_crr_sz,
0);
if (new_crr) {
@@ -3541,9 +4035,9 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
else if (new_crr_sz > UNIT_CEILING(new_bcrr_sz)) {
new_crr_sz = new_blk_sz + SBC_HEADER_SIZE;
new_crr_sz = UNIT_CEILING(new_crr_sz);
- new_crr = (Carrier_t *) alcu_sys_realloc(allctr,
+ new_crr = (Carrier_t *) allctr->sys_realloc(allctr,
(void *) old_crr,
- new_crr_sz,
+ &new_crr_sz,
old_crr_sz,
0);
if (new_crr)
@@ -3564,7 +4058,7 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
(void *) BLK2UMEM(old_blk),
MIN(new_blk_sz, old_blk_sz) - ABLK_HDR_SZ);
unlink_carrier(&allctr->sbc_list, old_crr);
- alcu_sys_free(allctr, old_crr, 0);
+ allctr->sys_dealloc(allctr, old_crr, CARRIER_SZ(old_crr), 0);
}
else {
/* Old carrier unchanged; restore... */
@@ -3580,13 +4074,13 @@ dealloc_carrier(Allctr_t *allctr, Carrier_t *crr, int superaligned)
{
#if HAVE_ERTS_MSEG
if (IS_MSEG_CARRIER(crr))
- alcu_mseg_dealloc(allctr, crr, CARRIER_SZ(crr),
+ allctr->mseg_dealloc(allctr, crr, CARRIER_SZ(crr),
(superaligned
? ERTS_MSEG_FLG_2POW
: ERTS_MSEG_FLG_NONE));
else
#endif
- alcu_sys_free(allctr, crr, superaligned);
+ allctr->sys_dealloc(allctr, crr, CARRIER_SZ(crr), superaligned);
}
static void
@@ -3626,24 +4120,24 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp)
ASSERT(IS_LAST_BLK(blk));
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- (*allctr->link_free_block)(allctr, blk, 0);
+ (*allctr->link_free_block)(allctr, blk);
HARD_CHECK_BLK_CARRIER(allctr, blk);
- (*allctr->unlink_free_block)(allctr, blk, 0);
+ (*allctr->unlink_free_block)(allctr, blk);
#endif
}
#endif
- if (allctr->destroying_mbc)
- (*allctr->destroying_mbc)(allctr, crr);
-
-#ifdef ERTS_SMP
if (busy_pcrr_pp && *busy_pcrr_pp) {
ERTS_ALC_CPOOL_ASSERT(*busy_pcrr_pp == crr);
*busy_pcrr_pp = NULL;
+ ERTS_ALC_CPOOL_ASSERT(erts_atomic_read_nob(&crr->allctr)
+ == (((erts_aint_t) allctr)
+ | ERTS_CRR_ALCTR_FLG_IN_POOL
+ | ERTS_CRR_ALCTR_FLG_BUSY));
+ erts_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr));
cpool_delete(allctr, allctr, crr);
}
else
-#endif
{
unlink_carrier(&allctr->mbc_list, crr);
#if HAVE_ERTS_MSEG
@@ -3654,13 +4148,27 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp)
else
#endif
STAT_SYS_ALLOC_MBC_FREE(allctr, crr_sz);
+
+ if (allctr->remove_mbc)
+ allctr->remove_mbc(allctr, crr);
}
-#ifdef ERTS_SMP
- schedule_dealloc_carrier(allctr, crr);
-#else
- dealloc_carrier(allctr, crr, 1);
+#ifdef USE_LTTNG_VM_TRACEPOINTS
+ if (LTTNG_ENABLED(carrier_destroy)) {
+ lttng_decl_carrier_stats(mbc_stats);
+ lttng_decl_carrier_stats(sbc_stats);
+ LTTNG_CARRIER_STATS_TO_LTTNG_STATS(&(allctr->mbcs), mbc_stats);
+ LTTNG_CARRIER_STATS_TO_LTTNG_STATS(&(allctr->sbcs), sbc_stats);
+ LTTNG5(carrier_destroy,
+ ERTS_ALC_A2AD(allctr->alloc_no),
+ allctr->ix,
+ crr_sz,
+ mbc_stats,
+ sbc_stats);
+ }
#endif
+
+ schedule_dealloc_carrier(allctr, crr);
}
}
@@ -3676,9 +4184,6 @@ static struct {
Eterm e;
Eterm t;
Eterm ramv;
-#if HALFWORD_HEAP
- Eterm low;
-#endif
Eterm sbct;
#if HAVE_ERTS_MSEG
Eterm asbcst;
@@ -3706,9 +4211,7 @@ static struct {
Eterm fix_types;
Eterm mbcs;
-#ifdef ERTS_SMP
Eterm mbcs_pool;
-#endif
Eterm sbcs;
Eterm sys_alloc_carriers_size;
@@ -3769,9 +4272,6 @@ init_atoms(Allctr_t *allctr)
AM_INIT(e);
AM_INIT(t);
AM_INIT(ramv);
-#if HALFWORD_HEAP
- AM_INIT(low);
-#endif
AM_INIT(sbct);
#if HAVE_ERTS_MSEG
AM_INIT(asbcst);
@@ -3799,9 +4299,7 @@ init_atoms(Allctr_t *allctr)
AM_INIT(fix_types);
AM_INIT(mbcs);
-#ifdef ERTS_SMP
AM_INIT(mbcs_pool);
-#endif
AM_INIT(sbcs);
AM_INIT(sys_alloc_carriers_size);
@@ -3932,7 +4430,7 @@ add_fix_types(Allctr_t *allctr, int internal, Uint **hpp, Uint *szp,
static Eterm
sz_info_fix(Allctr_t *allctr,
int internal,
- int *print_to_p,
+ fmtfn_t *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
@@ -3953,7 +4451,7 @@ sz_info_fix(Allctr_t *allctr,
UWord used = fix->type_size * fix->u.cpool.used;
if (print_to_p) {
- int to = *print_to_p;
+ fmtfn_t to = *print_to_p;
void *arg = print_to_arg;
erts_print(to,
arg,
@@ -3981,7 +4479,7 @@ sz_info_fix(Allctr_t *allctr,
UWord used = fix->type_size*fix->u.nocpool.used;
if (print_to_p) {
- int to = *print_to_p;
+ fmtfn_t to = *print_to_p;
void *arg = print_to_arg;
erts_print(to,
arg,
@@ -4007,7 +4505,7 @@ static Eterm
sz_info_carriers(Allctr_t *allctr,
CarriersStats_t *cs,
char *prefix,
- int *print_to_p,
+ fmtfn_t *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
@@ -4016,7 +4514,7 @@ sz_info_carriers(Allctr_t *allctr,
UWord curr_size = cs->curr.norm.mseg.size + cs->curr.norm.sys_alloc.size;
if (print_to_p) {
- int to = *print_to_p;
+ fmtfn_t to = *print_to_p;
void *arg = print_to_arg;
erts_print(to,
arg,
@@ -4051,13 +4549,12 @@ sz_info_carriers(Allctr_t *allctr,
return res;
}
-#ifdef ERTS_SMP
static Eterm
info_cpool(Allctr_t *allctr,
int sz_only,
char *prefix,
- int *print_to_p,
+ fmtfn_t *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
@@ -4074,7 +4571,7 @@ info_cpool(Allctr_t *allctr,
}
if (print_to_p) {
- int to = *print_to_p;
+ fmtfn_t to = *print_to_p;
void *arg = print_to_arg;
if (!sz_only)
erts_print(to, arg, "%sblocks: %bpu\n", prefix, nob);
@@ -4105,13 +4602,12 @@ info_cpool(Allctr_t *allctr,
return res;
}
-#endif /* ERTS_SMP */
static Eterm
info_carriers(Allctr_t *allctr,
CarriersStats_t *cs,
char *prefix,
- int *print_to_p,
+ fmtfn_t *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
@@ -4123,7 +4619,7 @@ info_carriers(Allctr_t *allctr,
curr_size = cs->curr.norm.mseg.size + cs->curr.norm.sys_alloc.size;
if (print_to_p) {
- int to = *print_to_p;
+ fmtfn_t to = *print_to_p;
void *arg = print_to_arg;
erts_print(to,
arg,
@@ -4232,7 +4728,7 @@ make_name_atoms(Allctr_t *allctr)
size_t prefix_len = strlen(allctr->name_prefix);
if (prefix_len > MAX_ATOM_CHARACTERS + sizeof(realloc) - 1)
- erl_exit(1,"Too long allocator name: %salloc\n",allctr->name_prefix);
+ erts_exit(ERTS_ERROR_EXIT,"Too long allocator name: %salloc\n",allctr->name_prefix);
memcpy((void *) buf, (void *) allctr->name_prefix, prefix_len);
@@ -4249,7 +4745,7 @@ make_name_atoms(Allctr_t *allctr)
static Eterm
info_calls(Allctr_t *allctr,
- int *print_to_p,
+ fmtfn_t *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
@@ -4266,7 +4762,7 @@ info_calls(Allctr_t *allctr,
erts_print(TO, TOA, "%s%s calls: %b64u\n",PRFX,NAME,CC)
char *prefix = allctr->name_prefix;
- int to = *print_to_p;
+ fmtfn_t to = *print_to_p;
void *arg = print_to_arg;
PRINT_CC_5(to, arg, prefix, "alloc", allctr->calls.this_alloc);
@@ -4342,7 +4838,7 @@ info_calls(Allctr_t *allctr,
static Eterm
info_options(Allctr_t *allctr,
- int *print_to_p,
+ fmtfn_t *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
@@ -4360,11 +4856,7 @@ info_options(Allctr_t *allctr,
return res;
}
-#ifdef ERTS_SMP
acul = allctr->cpool.util_limit;
-#else
- acul = 0;
-#endif
if (print_to_p) {
char topt[21]; /* Enough for any 64-bit integer */
@@ -4377,9 +4869,6 @@ info_options(Allctr_t *allctr,
"option e: true\n"
"option t: %s\n"
"option ramv: %s\n"
-#if HALFWORD_HEAP
- "option low: %s\n"
-#endif
"option sbct: %beu\n"
#if HAVE_ERTS_MSEG
"option asbcst: %bpu\n"
@@ -4398,9 +4887,6 @@ info_options(Allctr_t *allctr,
"option acul: %d\n",
topt,
allctr->ramv ? "true" : "false",
-#if HALFWORD_HEAP
- allctr->mseg_opt.low_mem ? "true" : "false",
-#endif
allctr->sbc_threshold,
#if HAVE_ERTS_MSEG
allctr->mseg_opt.abs_shrink_th,
@@ -4463,9 +4949,6 @@ info_options(Allctr_t *allctr,
add_2tup(hpp, szp, &res,
am.sbct,
bld_uint(hpp, szp, allctr->sbc_threshold));
-#if HALFWORD_HEAP
- add_2tup(hpp, szp, &res, am.low, allctr->mseg_opt.low_mem ? am_true : am_false);
-#endif
add_2tup(hpp, szp, &res, am.ramv, allctr->ramv ? am_true : am_false);
add_2tup(hpp, szp, &res, am.t, (allctr->t ? am_true : am_false));
add_2tup(hpp, szp, &res, am.e, am_true);
@@ -4503,7 +4986,7 @@ reset_max_values(CarriersStats_t *cs)
\* */
Eterm
-erts_alcu_au_info_options(int *print_to_p, void *print_to_arg,
+erts_alcu_au_info_options(fmtfn_t *print_to_p, void *print_to_arg,
Uint **hpp, Uint *szp)
{
Eterm res = THE_NON_VALUE;
@@ -4546,7 +5029,7 @@ erts_alcu_au_info_options(int *print_to_p, void *print_to_arg,
Eterm
erts_alcu_info_options(Allctr_t *allctr,
- int *print_to_p,
+ fmtfn_t *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
@@ -4556,19 +5039,15 @@ erts_alcu_info_options(Allctr_t *allctr,
if (hpp || szp)
ensure_atoms_initialized(allctr);
-#ifdef USE_THREADS
if (allctr->thread_safe) {
erts_allctr_wrapper_pre_lock();
erts_mtx_lock(&allctr->mutex);
}
-#endif
res = info_options(allctr, print_to_p, print_to_arg, hpp, szp);
-#ifdef USE_THREADS
if (allctr->thread_safe) {
erts_mtx_unlock(&allctr->mutex);
erts_allctr_wrapper_pre_unlock();
}
-#endif
return res;
}
@@ -4578,15 +5057,13 @@ Eterm
erts_alcu_sz_info(Allctr_t *allctr,
int internal,
int begin_max_period,
- int *print_to_p,
+ fmtfn_t *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
{
Eterm res, mbcs, sbcs, fix = THE_NON_VALUE;
-#ifdef ERTS_SMP
Eterm mbcs_pool;
-#endif
res = THE_NON_VALUE;
@@ -4601,16 +5078,14 @@ erts_alcu_sz_info(Allctr_t *allctr,
if (hpp || szp)
ensure_atoms_initialized(allctr);
-#ifdef USE_THREADS
if (allctr->thread_safe) {
erts_allctr_wrapper_pre_lock();
erts_mtx_lock(&allctr->mutex);
}
-#endif
ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
- /* Update sbc values not continously updated */
+ /* Update sbc values not continuously updated */
allctr->sbcs.blocks.curr.no
= allctr->sbcs.curr.norm.mseg.no + allctr->sbcs.curr.norm.sys_alloc.no;
allctr->sbcs.blocks.max.no = allctr->sbcs.max.no;
@@ -4622,23 +5097,19 @@ erts_alcu_sz_info(Allctr_t *allctr,
fix = sz_info_fix(allctr, internal, print_to_p, print_to_arg, hpp, szp);
mbcs = sz_info_carriers(allctr, &allctr->mbcs, "mbcs ", print_to_p,
print_to_arg, hpp, szp);
-#ifdef ERTS_SMP
if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
mbcs_pool = info_cpool(allctr, 1, "mbcs_pool ", print_to_p,
print_to_arg, hpp, szp);
else
mbcs_pool = THE_NON_VALUE; /* shut up annoying warning... */
-#endif
sbcs = sz_info_carriers(allctr, &allctr->sbcs, "sbcs ", print_to_p,
print_to_arg, hpp, szp);
if (hpp || szp) {
res = NIL;
add_2tup(hpp, szp, &res, am.sbcs, sbcs);
-#ifdef ERTS_SMP
if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
add_2tup(hpp, szp, &res, am.mbcs_pool, mbcs_pool);
-#endif
add_2tup(hpp, szp, &res, am.mbcs, mbcs);
add_fix_types(allctr, internal, hpp, szp, &res, fix);
}
@@ -4649,12 +5120,10 @@ erts_alcu_sz_info(Allctr_t *allctr,
}
-#ifdef USE_THREADS
if (allctr->thread_safe) {
erts_mtx_unlock(&allctr->mutex);
erts_allctr_wrapper_pre_unlock();
}
-#endif
return res;
}
@@ -4664,15 +5133,13 @@ Eterm
erts_alcu_info(Allctr_t *allctr,
int internal,
int begin_max_period,
- int *print_to_p,
+ fmtfn_t *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
{
Eterm res, sett, mbcs, sbcs, calls, fix = THE_NON_VALUE;
-#ifdef ERTS_SMP
Eterm mbcs_pool;
-#endif
res = THE_NON_VALUE;
@@ -4687,16 +5154,14 @@ erts_alcu_info(Allctr_t *allctr,
if (hpp || szp)
ensure_atoms_initialized(allctr);
-#ifdef USE_THREADS
if (allctr->thread_safe) {
erts_allctr_wrapper_pre_lock();
erts_mtx_lock(&allctr->mutex);
}
-#endif
ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
- /* Update sbc values not continously updated */
+ /* Update sbc values not continuously updated */
allctr->sbcs.blocks.curr.no
= allctr->sbcs.curr.norm.mseg.no + allctr->sbcs.curr.norm.sys_alloc.no;
allctr->sbcs.blocks.max.no = allctr->sbcs.max.no;
@@ -4717,13 +5182,11 @@ erts_alcu_info(Allctr_t *allctr,
fix = sz_info_fix(allctr, internal, print_to_p, print_to_arg, hpp, szp);
mbcs = info_carriers(allctr, &allctr->mbcs, "mbcs ", print_to_p,
print_to_arg, hpp, szp);
-#ifdef ERTS_SMP
if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
mbcs_pool = info_cpool(allctr, 0, "mbcs_pool ", print_to_p,
print_to_arg, hpp, szp);
else
mbcs_pool = THE_NON_VALUE; /* shut up annoying warning... */
-#endif
sbcs = info_carriers(allctr, &allctr->sbcs, "sbcs ", print_to_p,
print_to_arg, hpp, szp);
calls = info_calls(allctr, print_to_p, print_to_arg, hpp, szp);
@@ -4733,10 +5196,8 @@ erts_alcu_info(Allctr_t *allctr,
add_2tup(hpp, szp, &res, am.calls, calls);
add_2tup(hpp, szp, &res, am.sbcs, sbcs);
-#ifdef ERTS_SMP
if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
add_2tup(hpp, szp, &res, am.mbcs_pool, mbcs_pool);
-#endif
add_2tup(hpp, szp, &res, am.mbcs, mbcs);
add_fix_types(allctr, internal, hpp, szp, &res, fix);
add_2tup(hpp, szp, &res, am.options, sett);
@@ -4752,12 +5213,10 @@ erts_alcu_info(Allctr_t *allctr,
}
-#ifdef USE_THREADS
if (allctr->thread_safe) {
erts_mtx_unlock(&allctr->mutex);
erts_allctr_wrapper_pre_unlock();
}
-#endif
return res;
}
@@ -4767,10 +5226,8 @@ void
erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t *fi, int fisz)
{
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_lock(&allctr->mutex);
-#endif
size->carriers = allctr->mbcs.curr.norm.mseg.size;
size->carriers += allctr->mbcs.curr.norm.sys_alloc.size;
@@ -4780,14 +5237,12 @@ erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t *
size->blocks = allctr->mbcs.blocks.curr.size;
size->blocks += allctr->sbcs.blocks.curr.size;
-#ifdef ERTS_SMP
if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
UWord csz, bsz;
cpool_read_stat(allctr, NULL, &csz, NULL, &bsz);
size->blocks += bsz;
size->carriers += csz;
}
-#endif
if (fi) {
int ix;
@@ -4809,10 +5264,8 @@ erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t *
}
}
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
-#endif
}
/* ----------------------------------------------------------------------- */
@@ -4827,7 +5280,7 @@ do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size)
ASSERT(allctr);
- ERTS_SMP_LC_ASSERT(!allctr->thread_safe
+ ERTS_LC_ASSERT(!allctr->thread_safe
|| erts_lc_mtx_is_locked(&allctr->mutex));
ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
@@ -4860,13 +5313,13 @@ do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size)
void *erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size)
{
void *res;
+ ASSERT(!"This is not thread safe");
res = do_erts_alcu_alloc(type, extra, size);
DEBUG_CHECK_ALIGNMENT(res);
return res;
}
-#ifdef USE_THREADS
void *
erts_alcu_alloc_ts(ErtsAlcType_t type, void *extra, Uint size)
@@ -4882,7 +5335,6 @@ erts_alcu_alloc_ts(ErtsAlcType_t type, void *extra, Uint size)
return res;
}
-#ifdef ERTS_SMP
void *
erts_alcu_alloc_thr_spec(ErtsAlcType_t type, void *extra, Uint size)
@@ -4922,21 +5374,17 @@ erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size)
if (pref_allctr->thread_safe)
erts_mtx_lock(&pref_allctr->mutex);
-#ifdef ERTS_SMP
ASSERT(pref_allctr->dd.use);
ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1);
-#endif
ERTS_ALCU_DBG_CHK_THR_ACCESS(pref_allctr);
res = do_erts_alcu_alloc(type, pref_allctr, size);
-#ifdef ERTS_SMP
if (!res && ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1)) {
/* Cleaned up a bit more; try one more time... */
res = do_erts_alcu_alloc(type, pref_allctr, size);
}
-#endif
if (pref_allctr->thread_safe)
erts_mtx_unlock(&pref_allctr->mutex);
@@ -4947,9 +5395,7 @@ erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size)
return res;
}
-#endif
-#endif
/* ------------------------------------------------------------------------- */
@@ -4962,7 +5408,7 @@ do_erts_alcu_free(ErtsAlcType_t type, void *extra, void *p,
ASSERT(allctr);
- ERTS_SMP_LC_ASSERT(!allctr->thread_safe
+ ERTS_LC_ASSERT(!allctr->thread_safe
|| erts_lc_mtx_is_locked(&allctr->mutex));
ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
@@ -4973,7 +5419,7 @@ do_erts_alcu_free(ErtsAlcType_t type, void *extra, void *p,
if (allctr->fix) {
if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
- fix_cpool_free(allctr, type, p, busy_pcrr_pp);
+ fix_cpool_free(allctr, type, p, busy_pcrr_pp, 1);
else
fix_nocpool_free(allctr, type, p);
}
@@ -4992,7 +5438,6 @@ void erts_alcu_free(ErtsAlcType_t type, void *extra, void *p)
do_erts_alcu_free(type, extra, p, NULL);
}
-#ifdef USE_THREADS
void
erts_alcu_free_ts(ErtsAlcType_t type, void *extra, void *p)
@@ -5003,7 +5448,6 @@ erts_alcu_free_ts(ErtsAlcType_t type, void *extra, void *p)
erts_mtx_unlock(&allctr->mutex);
}
-#ifdef ERTS_SMP
void
erts_alcu_free_thr_spec(ErtsAlcType_t type, void *extra, void *p)
@@ -5053,9 +5497,7 @@ erts_alcu_free_thr_pref(ErtsAlcType_t type, void *extra, void *p)
}
}
-#endif
-#endif
/* ------------------------------------------------------------------------- */
@@ -5075,7 +5517,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
ASSERT(allctr);
- ERTS_SMP_LC_ASSERT(!allctr->thread_safe
+ ERTS_LC_ASSERT(!allctr->thread_safe
|| erts_lc_mtx_is_locked(&allctr->mutex));
ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
@@ -5150,11 +5592,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
Block_t *new_blk;
if(IS_SBC_BLK(blk)) {
do_carrier_resize:
-#if HALFWORD_HEAP
- new_blk = resize_carrier(allctr, blk, size, CFLG_SBC | CFLG_FORCE_MSEG);
-#else
new_blk = resize_carrier(allctr, blk, size, CFLG_SBC);
-#endif
res = new_blk ? BLK2UMEM(new_blk) : NULL;
}
else if (alcu_flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
@@ -5208,7 +5646,6 @@ erts_alcu_realloc_mv(ErtsAlcType_t type, void *extra, void *p, Uint size)
}
-#ifdef USE_THREADS
void *
erts_alcu_realloc_ts(ErtsAlcType_t type, void *extra, void *ptr, Uint size)
@@ -5247,7 +5684,6 @@ erts_alcu_realloc_mv_ts(ErtsAlcType_t type, void *extra, void *p, Uint size)
return res;
}
-#ifdef ERTS_SMP
void *
erts_alcu_realloc_thr_spec(ErtsAlcType_t type, void *extra,
@@ -5328,9 +5764,7 @@ realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size,
Allctr_t *pref_allctr, *used_allctr;
UWord old_user_size;
Carrier_t *busy_pcrr_p;
-#ifdef ERTS_SMP
int retried;
-#endif
if (!p)
return erts_alcu_alloc_thr_pref(type, extra, size);
@@ -5340,12 +5774,10 @@ realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size,
if (pref_allctr->thread_safe)
erts_mtx_lock(&pref_allctr->mutex);
-#ifdef ERTS_SMP
ASSERT(pref_allctr->dd.use);
ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1);
retried = 0;
restart:
-#endif
used_allctr = get_used_allctr(pref_allctr, ERTS_ALC_TS_PREF_LOCK_NO,
p, &old_user_size, &busy_pcrr_p);
@@ -5361,13 +5793,11 @@ restart:
0,
&busy_pcrr_p);
clear_busy_pool_carrier(used_allctr, busy_pcrr_p);
-#ifdef ERTS_SMP
if (!res && !retried && ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1)) {
/* Cleaned up a bit more; try one more time... */
retried = 1;
goto restart;
}
-#endif
if (pref_allctr->thread_safe)
erts_mtx_unlock(&pref_allctr->mutex);
}
@@ -5422,9 +5852,7 @@ erts_alcu_realloc_mv_thr_pref(ErtsAlcType_t type, void *extra,
return realloc_thr_pref(type, extra, p, size, 1);
}
-#endif
-#endif
/* ------------------------------------------------------------------------- */
@@ -5434,7 +5862,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
/* erts_alcu_start assumes that allctr has been zeroed */
if (((UWord)allctr & ERTS_CRR_ALCTR_FLG_MASK) != 0) {
- erl_exit(ERTS_ABORT_EXIT, "%s:%d:erts_alcu_start: Alignment error\n",
+ erts_exit(ERTS_ABORT_EXIT, "%s:%d:erts_alcu_start: Alignment error\n",
__FILE__, __LINE__);
}
@@ -5445,14 +5873,9 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
sys_memcpy((void *) &allctr->mseg_opt,
(void *) &erts_mseg_default_opt,
sizeof(ErtsMsegOpt_t));
-#ifdef ERTS_SMP
if (init->tspec || init->tpref)
allctr->mseg_opt.sched_spec = 1;
-#endif
-# if HALFWORD_HEAP
- allctr->mseg_opt.low_mem = init->low_mem;
-# endif
-#endif
+#endif /* HAVE_ERTS_MSEG */
allctr->name_prefix = init->name_prefix;
if (!allctr->name_prefix)
@@ -5509,7 +5932,6 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
goto error;
allctr->min_block_size = UNIT_CEILING(allctr->min_block_size
+ sizeof(FreeBlkFtr_t));
-#if ERTS_SMP
if (init->tpref) {
Uint sz = ABLK_HDR_SZ;
sz += (init->fix ?
@@ -5519,6 +5941,10 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->min_block_size = sz;
}
+ allctr->cpool.pooled_list.next = &allctr->cpool.pooled_list;
+ allctr->cpool.pooled_list.prev = &allctr->cpool.pooled_list;
+ allctr->cpool.traitor_list.next = &allctr->cpool.traitor_list;
+ allctr->cpool.traitor_list.prev = &allctr->cpool.traitor_list;
allctr->cpool.dc_list.first = NULL;
allctr->cpool.dc_list.last = NULL;
allctr->cpool.abandon_limit = 0;
@@ -5529,7 +5955,6 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
erts_atomic_init_nob(&allctr->cpool.stat.no_carriers, 0);
allctr->cpool.check_limit_count = ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT;
allctr->cpool.util_limit = init->ts ? 0 : init->acul;
-#endif
allctr->sbc_threshold = init->sbct;
#ifndef ARCH_64
@@ -5553,26 +5978,16 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->mseg_opt.abs_shrink_th = ~((UWord) 0) / 100;
#endif
-#ifdef USE_THREADS
if (init->ts) {
allctr->thread_safe = 1;
-
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_mtx_init_x_opt(&allctr->mutex,
- "alcu_allocator",
- make_small(allctr->alloc_no),
- ERTS_LCNT_LT_ALLOC,1);
-#else
- erts_mtx_init_x(&allctr->mutex,
- "alcu_allocator",
- make_small(allctr->alloc_no),1);
-#endif /*ERTS_ENABLE_LOCK_COUNT*/
-
+
+ erts_mtx_init(&allctr->mutex, "alcu_allocator", make_small(allctr->alloc_no),
+ ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
+
#ifdef DEBUG
allctr->debug.saved_tid = 0;
#endif
}
-#endif
if(!allctr->get_free_block
|| !allctr->link_free_block
@@ -5585,36 +6000,68 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
if (allctr->mbc_header_size < sizeof(Carrier_t))
goto error;
-#ifdef ERTS_SMP
allctr->dd.use = 0;
if (init->tpref) {
allctr->dd.use = 1;
init_dd_queue(&allctr->dd.q);
allctr->dd.ix = init->ix;
}
-#endif
allctr->mbc_header_size = (UNIT_CEILING(allctr->mbc_header_size
+ ABLK_HDR_SZ)
- ABLK_HDR_SZ);
+ if (init->sys_alloc) {
+ ASSERT(init->sys_realloc && init->sys_dealloc);
+ allctr->sys_alloc = init->sys_alloc;
+ allctr->sys_realloc = init->sys_realloc;
+ allctr->sys_dealloc = init->sys_dealloc;
+ }
+ else {
+ ASSERT(!init->sys_realloc && !init->sys_dealloc);
+ allctr->sys_alloc = &erts_alcu_sys_alloc;
+ allctr->sys_realloc = &erts_alcu_sys_realloc;
+ allctr->sys_dealloc = &erts_alcu_sys_dealloc;
+ }
+#if HAVE_ERTS_MSEG
+ if (init->mseg_alloc) {
+ ASSERT(init->mseg_realloc && init->mseg_dealloc);
+ allctr->mseg_alloc = init->mseg_alloc;
+ allctr->mseg_realloc = init->mseg_realloc;
+ allctr->mseg_dealloc = init->mseg_dealloc;
+ allctr->mseg_mmapper = init->mseg_mmapper;
+ }
+ else {
+ ASSERT(!init->mseg_realloc && !init->mseg_dealloc);
+ allctr->mseg_alloc = &erts_alcu_mseg_alloc;
+ allctr->mseg_realloc = &erts_alcu_mseg_realloc;
+ allctr->mseg_dealloc = &erts_alcu_mseg_dealloc;
+ }
+ /* If a custom carrier alloc function is specified, make sure it's used */
+ if (init->mseg_alloc && !init->sys_alloc) {
+ allctr->crr_set_flgs = CFLG_FORCE_MSEG;
+ allctr->crr_clr_flgs = CFLG_FORCE_SYS_ALLOC;
+ }
+ else if (!init->mseg_alloc && init->sys_alloc) {
+ allctr->crr_set_flgs = CFLG_FORCE_SYS_ALLOC;
+ allctr->crr_clr_flgs = CFLG_FORCE_MSEG;
+ }
+#endif
+
if (allctr->main_carrier_size) {
Block_t *blk;
blk = create_carrier(allctr,
allctr->main_carrier_size,
- CFLG_MBC
+ (ERTS_SUPER_ALIGNED_MSEG_ONLY
+ ? CFLG_FORCE_MSEG : CFLG_FORCE_SYS_ALLOC)
+ | CFLG_MBC
| CFLG_FORCE_SIZE
| CFLG_NO_CPOOL
-#if !HALFWORD_HEAP && !ERTS_SUPER_ALIGNED_MSEG_ONLY
- | CFLG_FORCE_SYS_ALLOC
-#endif
| CFLG_MAIN_CARRIER);
if (!blk) {
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_destroy(&allctr->mutex);
-#endif
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"Failed to create main carrier for %salloc\n",
init->name_prefix);
}
@@ -5633,9 +6080,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->fix[i].type_size = init->fix_type_size[i];
allctr->fix[i].list_size = 0;
allctr->fix[i].list = NULL;
-#ifdef ERTS_SMP
ASSERT(allctr->fix[i].type_size >= sizeof(ErtsAllctrFixDDBlock_t));
-#endif
if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
allctr->fix[i].u.cpool.min_list_size = 0;
allctr->fix[i].u.cpool.shrink_list = 0;
@@ -5655,10 +6100,8 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
error:
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_destroy(&allctr->mutex);
-#endif
return 0;
@@ -5676,10 +6119,8 @@ erts_alcu_stop(Allctr_t *allctr)
while (allctr->mbc_list.first)
destroy_carrier(allctr, MBC_TO_FIRST_BLK(allctr, allctr->mbc_list.first), NULL);
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_destroy(&allctr->mutex);
-#endif
}
@@ -5688,15 +6129,13 @@ erts_alcu_stop(Allctr_t *allctr)
void
erts_alcu_init(AlcUInit_t *init)
{
-#ifdef ERTS_SMP
int i;
for (i = 0; i <= ERTS_ALC_A_MAX; i++) {
ErtsAlcCPoolData_t *sentinel = &carrier_pool[i].sentinel;
erts_atomic_init_nob(&sentinel->next, (erts_aint_t) sentinel);
erts_atomic_init_nob(&sentinel->prev, (erts_aint_t) sentinel);
}
-#endif
- ASSERT(SBC_BLK_SZ_MASK == MBC_FBLK_SZ_MASK); /* see BLK_SZ */
+ ERTS_CT_ASSERT(SBC_BLK_SZ_MASK == MBC_FBLK_SZ_MASK); /* see BLK_SZ */
#if HAVE_ERTS_MSEG
ASSERT(erts_mseg_unit_size() == ERTS_SACRR_UNIT_SZ);
max_mseg_carriers = init->mmc;
@@ -5710,7 +6149,8 @@ erts_alcu_init(AlcUInit_t *init)
carrier_alignment = sizeof(Unit_t);
#endif
- erts_mtx_init(&init_atoms_mtx, "alcu_init_atoms");
+ erts_mtx_init(&init_atoms_mtx, "alcu_init_atoms", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
atoms_initialized = 0;
initialized = 1;
@@ -5759,10 +6199,9 @@ erts_alcu_test(UWord op, UWord a1, UWord a2)
case 0x019: return (UWord) PREV_BLK((Block_t *) a1);
case 0x01a: return (UWord) IS_MBC_FIRST_BLK((Allctr_t*)a1, (Block_t *) a2);
case 0x01b: return (UWord) sizeof(Unit_t);
- case 0x01c: return (unsigned long) BLK_TO_MBC((Block_t*) a1);
+ case 0x01c: return (UWord) BLK_TO_MBC((Block_t*) a1);
case 0x01d: ((Allctr_t*) a1)->add_mbc((Allctr_t*)a1, (Carrier_t*)a2); break;
case 0x01e: ((Allctr_t*) a1)->remove_mbc((Allctr_t*)a1, (Carrier_t*)a2); break;
-#ifdef ERTS_SMP
case 0x01f: return (UWord) sizeof(ErtsAlcCrrPool_t);
case 0x020:
SET_CARRIER_HDR((Carrier_t *) a2, 0, SCH_SYS_ALLOC|SCH_MBC, (Allctr_t *) a1);
@@ -5776,13 +6215,15 @@ erts_alcu_test(UWord op, UWord a1, UWord a2)
return (UWord) a2;
case 0x023: return (UWord) cpool_is_empty((Allctr_t *) a1);
case 0x024: return (UWord) cpool_dbg_is_in_pool((Allctr_t *) a1, (Carrier_t *) a2);
+ case 0x025: /* UMEM2BLK_TEST*/
+#ifdef DEBUG
+# ifdef HARD_DEBUG
+ return (UWord)UMEM2BLK(a1-3*sizeof(UWord));
+# else
+ return (UWord)UMEM2BLK(a1-2*sizeof(UWord));
+# endif
#else
- case 0x01f: return (UWord) 0;
- case 0x020: return (UWord) 0;
- case 0x021: return (UWord) 0;
- case 0x022: return (UWord) 0;
- case 0x023: return (UWord) 0;
- case 0x024: return (UWord) 0;
+ return (UWord)UMEM2BLK(a1);
#endif
default: ASSERT(0); return ~((UWord) 0);
@@ -5820,7 +6261,7 @@ erts_alcu_verify_unused(Allctr_t *allctr)
if (no) {
UWord sz = allctr->sbcs.blocks.curr.size;
sz += allctr->mbcs.blocks.curr.size;
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"%salloc() used when expected to be unused!\n"
"Total amount of blocks allocated: %bpu\n"
"Total amount of bytes allocated: %bpu\n",
@@ -5831,13 +6272,9 @@ erts_alcu_verify_unused(Allctr_t *allctr)
void
erts_alcu_verify_unused_ts(Allctr_t *allctr)
{
-#ifdef USE_THREADS
erts_mtx_lock(&allctr->mutex);
-#endif
erts_alcu_verify_unused(allctr);
-#ifdef USE_THREADS
erts_mtx_unlock(&allctr->mutex);
-#endif
}
#ifdef DEBUG
@@ -5860,11 +6297,6 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
ASSERT(SBC2BLK(allctr, sbc) == iblk);
ASSERT(CARRIER_SZ(sbc) - SBC_HEADER_SIZE >= SBC_BLK_SZ(iblk));
-#if HAVE_ERTS_MSEG
- if (IS_MSEG_CARRIER(sbc)) {
- ASSERT(CARRIER_SZ(sbc) % ERTS_SACRR_UNIT_SZ == 0);
- }
-#endif
crr = sbc;
cl = &allctr->sbc_list;
}
@@ -5973,3 +6405,45 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
#endif /* ERTS_ALLOC_UTIL_HARD_DEBUG */
+#ifdef ERTS_ENABLE_LOCK_COUNT
+
+static void lcnt_enable_allocator_lock_count(Allctr_t *allocator, int enable) {
+ if(!allocator->thread_safe) {
+ return;
+ }
+
+ if(enable) {
+ erts_lcnt_install_new_lock_info(&allocator->mutex.lcnt,
+ "alcu_allocator", make_small(allocator->alloc_no),
+ ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
+ } else {
+ erts_lcnt_uninstall(&allocator->mutex.lcnt);
+ }
+}
+
+static void lcnt_update_thread_spec_locks(ErtsAllocatorThrSpec_t *tspec, int enable) {
+ if(tspec->enabled) {
+ int i;
+
+ for(i = 0; i < tspec->size; i++) {
+ lcnt_enable_allocator_lock_count(tspec->allctr[i], enable);
+ }
+ }
+}
+
+void erts_lcnt_update_allocator_locks(int enable) {
+ int i;
+
+ for(i = ERTS_ALC_A_MIN; i < ERTS_ALC_A_MAX; i++) {
+ ErtsAllocatorInfo_t *ai = &erts_allctrs_info[i];
+
+ if(ai->enabled && ai->alloc_util) {
+ if(ai->thr_spec) {
+ lcnt_update_thread_spec_locks((ErtsAllocatorThrSpec_t*)ai->extra, enable);
+ } else {
+ lcnt_enable_allocator_lock_count((Allctr_t*)ai->extra, enable);
+ }
+ }
+ }
+}
+#endif /* ERTS_ENABLE_LOCK_COUNT */
diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h
index 7be6b1ed9d..faeb5ef368 100644
--- a/erts/emulator/beam/erl_alloc_util.h
+++ b/erts/emulator/beam/erl_alloc_util.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -23,6 +24,11 @@
#define ERTS_ALCU_VSN_STR "3.0"
#include "erl_alloc_types.h"
+#define ERL_THREADS_EMU_INTERNAL__
+#include "erl_threads.h"
+
+#include "erl_mseg.h"
+#include "lttng-wrapper.h"
#define ERTS_AU_PREF_ALLOC_BITS 11
#define ERTS_AU_MAX_PREF_ALLOC_INSTANCES (1 << ERTS_AU_PREF_ALLOC_BITS)
@@ -44,7 +50,6 @@ typedef struct {
int tspec;
int tpref;
int ramv;
- int low_mem; /* HALFWORD only */
UWord sbct;
UWord asbcst;
UWord rsbcst;
@@ -60,6 +65,16 @@ typedef struct {
void *fix;
size_t *fix_type_size;
+
+#if HAVE_ERTS_MSEG
+ void* (*mseg_alloc)(Allctr_t*, Uint *size_p, Uint flags);
+ void* (*mseg_realloc)(Allctr_t*, void *seg, Uint old_size, Uint *new_size_p);
+ void (*mseg_dealloc)(Allctr_t*, void *seg, Uint size, Uint flags);
+ ErtsMemMapper *mseg_mmapper;
+#endif
+ void* (*sys_alloc)(Allctr_t *allctr, Uint *size_p, int superalign);
+ void* (*sys_realloc)(Allctr_t *allctr, void *ptr, Uint *size_p, Uint old_size, int superalign);
+ void (*sys_dealloc)(Allctr_t *allctr, void *ptr, Uint size, int superalign);
} AllctrInit_t;
typedef struct {
@@ -89,7 +104,6 @@ typedef struct {
0, /* (bool) tspec: thread specific */\
0, /* (bool) tpref: thread preferred */\
0, /* (bool) ramv: realloc always moves */\
- 0, /* (bool) low_mem: HALFWORD only */\
512*1024, /* (bytes) sbct: sbc threshold */\
2*1024*2024, /* (amount) asbcst: abs sbc shrink threshold */\
20, /* (%) rsbcst: rel sbc shrink threshold */\
@@ -124,7 +138,6 @@ typedef struct {
0, /* (bool) tspec: thread specific */\
0, /* (bool) tpref: thread preferred */\
0, /* (bool) ramv: realloc always moves */\
- 0, /* (bool) low_mem: HALFWORD only */\
64*1024, /* (bytes) sbct: sbc threshold */\
2*1024*2024, /* (amount) asbcst: abs sbc shrink threshold */\
20, /* (%) rsbcst: rel sbc shrink threshold */\
@@ -147,12 +160,10 @@ void * erts_alcu_alloc(ErtsAlcType_t, void *, Uint);
void * erts_alcu_realloc(ErtsAlcType_t, void *, void *, Uint);
void * erts_alcu_realloc_mv(ErtsAlcType_t, void *, void *, Uint);
void erts_alcu_free(ErtsAlcType_t, void *, void *);
-#ifdef USE_THREADS
void * erts_alcu_alloc_ts(ErtsAlcType_t, void *, Uint);
void * erts_alcu_realloc_ts(ErtsAlcType_t, void *, void *, Uint);
void * erts_alcu_realloc_mv_ts(ErtsAlcType_t, void *, void *, Uint);
void erts_alcu_free_ts(ErtsAlcType_t, void *, void *);
-#ifdef ERTS_SMP
void * erts_alcu_alloc_thr_spec(ErtsAlcType_t, void *, Uint);
void * erts_alcu_realloc_thr_spec(ErtsAlcType_t, void *, void *, Uint);
void * erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t, void *, void *, Uint);
@@ -161,34 +172,57 @@ void * erts_alcu_alloc_thr_pref(ErtsAlcType_t, void *, Uint);
void * erts_alcu_realloc_thr_pref(ErtsAlcType_t, void *, void *, Uint);
void * erts_alcu_realloc_mv_thr_pref(ErtsAlcType_t, void *, void *, Uint);
void erts_alcu_free_thr_pref(ErtsAlcType_t, void *, void *);
-#endif
-#endif
-Eterm erts_alcu_au_info_options(int *, void *, Uint **, Uint *);
-Eterm erts_alcu_info_options(Allctr_t *, int *, void *, Uint **, Uint *);
-Eterm erts_alcu_sz_info(Allctr_t *, int, int, int *, void *, Uint **, Uint *);
-Eterm erts_alcu_info(Allctr_t *, int, int, int *, void *, Uint **, Uint *);
+Eterm erts_alcu_au_info_options(fmtfn_t *, void *, Uint **, Uint *);
+Eterm erts_alcu_info_options(Allctr_t *, fmtfn_t *, void *, Uint **, Uint *);
+Eterm erts_alcu_sz_info(Allctr_t *, int, int, fmtfn_t *, void *, Uint **, Uint *);
+Eterm erts_alcu_info(Allctr_t *, int, int, fmtfn_t *, void *, Uint **, Uint *);
void erts_alcu_init(AlcUInit_t *);
void erts_alcu_current_size(Allctr_t *, AllctrSize_t *,
ErtsAlcUFixInfo_t *, int);
-#ifdef ERTS_SMP
void erts_alcu_check_delayed_dealloc(Allctr_t *, int, int *, ErtsThrPrgrVal *, int *);
-#endif
erts_aint32_t erts_alcu_fix_alloc_shrink(Allctr_t *, erts_aint32_t);
+#ifdef ARCH_32
+extern UWord erts_literal_vspace_map[];
+# define ERTS_VSPACE_WORD_BITS (sizeof(UWord)*8)
#endif
+#if HAVE_ERTS_MSEG
+# if defined(ARCH_32)
+void* erts_alcu_literal_32_mseg_alloc(Allctr_t*, Uint *size_p, Uint flags);
+void* erts_alcu_literal_32_mseg_realloc(Allctr_t*, void *seg, Uint old_size, Uint *new_size_p);
+void erts_alcu_literal_32_mseg_dealloc(Allctr_t*, void *seg, Uint size, Uint flags);
+
+# elif defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
+void* erts_alcu_mmapper_mseg_alloc(Allctr_t*, Uint *size_p, Uint flags);
+void* erts_alcu_mmapper_mseg_realloc(Allctr_t*, void *seg, Uint old_size, Uint *new_size_p);
+void erts_alcu_mmapper_mseg_dealloc(Allctr_t*, void *seg, Uint size, Uint flags);
+# endif
+
+# if defined(ERTS_ALC_A_EXEC) && !defined(ERTS_HAVE_EXEC_MMAPPER)
+void* erts_alcu_exec_mseg_alloc(Allctr_t*, Uint *size_p, Uint flags);
+void* erts_alcu_exec_mseg_realloc(Allctr_t*, void *seg, Uint old_size, Uint *new_size_p);
+void erts_alcu_exec_mseg_dealloc(Allctr_t*, void *seg, Uint size, Uint flags);
+# endif
+#endif /* HAVE_ERTS_MSEG */
+
+#ifdef ARCH_32
+void* erts_alcu_literal_32_sys_alloc(Allctr_t*, Uint *size_p, int superalign);
+void* erts_alcu_literal_32_sys_realloc(Allctr_t*, void *ptr, Uint *size_p, Uint old_size, int superalign);
+void erts_alcu_literal_32_sys_dealloc(Allctr_t*, void *ptr, Uint size, int superalign);
+#endif
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_update_allocator_locks(int enable);
+#endif
+
+#endif /* !ERL_ALLOC_UTIL__ */
+
#if defined(GET_ERL_ALLOC_UTIL_IMPL) && !defined(ERL_ALLOC_UTIL_IMPL__)
#define ERL_ALLOC_UTIL_IMPL__
#define ERTS_ALCU_FLG_FAIL_REALLOC_MOVE (((Uint32) 1) << 0)
-#ifdef USE_THREADS
-#define ERL_THREADS_EMU_INTERNAL__
-#include "erl_threads.h"
-#endif
-
-#include "erl_mseg.h"
-
#undef ERTS_ALLOC_UTIL_HARD_DEBUG
#ifdef DEBUG
# if 0
@@ -196,10 +230,6 @@ erts_aint32_t erts_alcu_fix_alloc_shrink(Allctr_t *, erts_aint32_t);
# endif
#endif
-#undef MIN
-#undef MAX
-#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
-#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
#define FLOOR(X, I) (((X)/(I))*(I))
#define CEILING(X, I) ((((X) - 1)/(I) + 1)*(I))
@@ -222,7 +252,7 @@ erts_aint32_t erts_alcu_fix_alloc_shrink(Allctr_t *, erts_aint32_t);
#if ERTS_HAVE_MSEG_SUPER_ALIGNED \
|| (!HAVE_ERTS_MSEG && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC)
-# ifndef MSEG_ALIGN_BITS
+# ifdef MSEG_ALIGN_BITS
# define ERTS_SUPER_ALIGN_BITS MSEG_ALIGN_BITS
# else
# define ERTS_SUPER_ALIGN_BITS 18
@@ -266,34 +296,36 @@ erts_aint32_t erts_alcu_fix_alloc_shrink(Allctr_t *, erts_aint32_t);
typedef union {char c[ERTS_ALLOC_ALIGN_BYTES]; long l; double d;} Unit_t;
-#ifdef ERTS_SMP
+
+typedef struct ErtsDoubleLink_t_ {
+ struct ErtsDoubleLink_t_ *next;
+ struct ErtsDoubleLink_t_ *prev;
+}ErtsDoubleLink_t;
typedef struct {
erts_atomic_t next;
erts_atomic_t prev;
- Allctr_t *orig_allctr;
+ Allctr_t *orig_allctr; /* read-only while carrier is alive */
ErtsThrPrgrVal thr_prgr;
erts_atomic_t max_size;
UWord abandon_limit;
UWord blocks;
UWord blocks_size;
+ ErtsDoubleLink_t abandoned; /* node in pooled_list or traitor_list */
} ErtsAlcCPoolData_t;
-#endif
typedef struct Carrier_t_ Carrier_t;
struct Carrier_t_ {
UWord chdr;
Carrier_t *next;
Carrier_t *prev;
- erts_smp_atomic_t allctr;
-#ifdef ERTS_SMP
+ erts_atomic_t allctr;
ErtsAlcCPoolData_t cpool; /* Overwritten by block if sbc */
-#endif
};
#define ERTS_ALC_CARRIER_TO_ALLCTR(C) \
- ((Allctr_t *) (erts_smp_atomic_read_nob(&(C)->allctr) & ~FLG_MASK))
+ ((Allctr_t *) (erts_atomic_read_nob(&(C)->allctr) & ~FLG_MASK))
typedef struct {
Carrier_t *first;
@@ -374,7 +406,18 @@ typedef struct {
} blocks;
} CarriersStats_t;
-#ifdef ERTS_SMP
+#ifdef USE_LTTNG_VM_TRACEPOINTS
+#define LTTNG_CARRIER_STATS_TO_LTTNG_STATS(CSP, LSP) \
+ do { \
+ (LSP)->carriers.size = (CSP)->curr.norm.mseg.size \
+ + (CSP)->curr.norm.sys_alloc.size; \
+ (LSP)->carriers.no = (CSP)->curr.norm.mseg.no \
+ + (CSP)->curr.norm.sys_alloc.no; \
+ (LSP)->blocks.size = (CSP)->blocks.curr.size; \
+ (LSP)->blocks.no = (CSP)->blocks.curr.no; \
+ } while (0)
+#endif
+
typedef union ErtsAllctrDDBlock_t_ ErtsAllctrDDBlock_t;
@@ -417,7 +460,6 @@ typedef struct {
} head;
} ErtsAllctrDDQueue_t;
-#endif
typedef struct {
size_t type_size;
@@ -440,7 +482,6 @@ typedef struct {
} ErtsAlcFixList_t;
struct Allctr_t_ {
-#ifdef ERTS_SMP
struct {
/*
* We want the queue at the beginning of
@@ -451,7 +492,6 @@ struct Allctr_t_ {
int use;
int ix;
} dd;
-#endif
/* Allocator name prefix */
char * name_prefix;
@@ -494,13 +534,19 @@ struct Allctr_t_ {
Uint min_mbc_size;
Uint min_mbc_first_free_size;
Uint min_block_size;
+ UWord crr_set_flgs;
+ UWord crr_clr_flgs;
/* Carriers */
CarrierList_t mbc_list;
CarrierList_t sbc_list;
-#ifdef ERTS_SMP
struct {
- CarrierList_t dc_list;
+ /* pooled_list, traitor list and dc_list contain only
+ carriers _created_ by this allocator */
+ ErtsDoubleLink_t pooled_list;
+ ErtsDoubleLink_t traitor_list;
+ CarrierList_t dc_list;
+
UWord abandon_limit;
int disable_abandon;
int check_limit_count;
@@ -512,7 +558,6 @@ struct Allctr_t_ {
erts_atomic_t no_carriers;
} stat;
} cpool;
-#endif
/* Main carrier (if there is one) */
Carrier_t * main_carrier;
@@ -522,7 +567,7 @@ struct Allctr_t_ {
Block_t *, Uint);
void (*link_free_block) (Allctr_t *, Block_t *);
void (*unlink_free_block) (Allctr_t *, Block_t *);
- Eterm (*info_options) (Allctr_t *, char *, int *,
+ Eterm (*info_options) (Allctr_t *, char *, fmtfn_t *,
void *, Uint **, Uint *);
Uint (*get_next_mbc_size) (Allctr_t *);
@@ -534,6 +579,16 @@ struct Allctr_t_ {
void (*remove_mbc) (Allctr_t *, Carrier_t *);
UWord (*largest_fblk_in_mbc) (Allctr_t *, Carrier_t *);
+#if HAVE_ERTS_MSEG
+ void* (*mseg_alloc)(Allctr_t*, Uint *size_p, Uint flags);
+ void* (*mseg_realloc)(Allctr_t*, void *seg, Uint old_size, Uint *new_size_p);
+ void (*mseg_dealloc)(Allctr_t*, void *seg, Uint size, Uint flags);
+ ErtsMemMapper *mseg_mmapper;
+#endif
+ void* (*sys_alloc)(Allctr_t *allctr, Uint *size_p, int superalign);
+ void* (*sys_realloc)(Allctr_t *allctr, void *ptr, Uint *size_p, Uint old_size, int superalign);
+ void (*sys_dealloc)(Allctr_t *allctr, void *ptr, Uint size, int superalign);
+
void (*init_atoms) (void);
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
@@ -545,7 +600,6 @@ struct Allctr_t_ {
int fix_shrink_scheduled;
ErtsAlcFixList_t *fix;
-#ifdef USE_THREADS
/* Mutex for this allocator */
erts_mtx_t mutex;
int thread_safe;
@@ -554,7 +608,6 @@ struct Allctr_t_ {
Allctr_t *next;
} ts_list;
-#endif
int atoms_initialized;
@@ -577,13 +630,11 @@ struct Allctr_t_ {
CarriersStats_t mbcs;
#ifdef DEBUG
-#ifdef USE_THREADS
struct {
int saved_tid;
erts_tid_t tid;
} debug;
#endif
-#endif
};
int erts_alcu_start(Allctr_t *, AllctrInit_t *);
@@ -600,7 +651,6 @@ void erts_alcu_assert_failed(char* expr, char* file, int line, char *func);
int is_sbc_blk(Block_t*);
#endif
-
#endif /* #if defined(GET_ERL_ALLOC_UTIL_IMPL)
&& !defined(ERL_ALLOC_UTIL_IMPL__) */
diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.c b/erts/emulator/beam/erl_ao_firstfit_alloc.c
index 396aa88e0b..05ba1f9891 100644
--- a/erts/emulator/beam/erl_ao_firstfit_alloc.c
+++ b/erts/emulator/beam/erl_ao_firstfit_alloc.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -122,7 +123,7 @@ struct AOFF_Carrier_t_ {
AOFF_RBTree_t rbt_node; /* My node in the carrier tree */
AOFF_RBTree_t* root; /* Root of my block tree */
};
-#define RBT_NODE_TO_MBC(PTR) ((AOFF_Carrier_t*)((char*)(PTR) - offsetof(AOFF_Carrier_t, rbt_node)))
+#define RBT_NODE_TO_MBC(PTR) ErtsContainerStruct((PTR), AOFF_Carrier_t, rbt_node)
/*
To support carrier migration we keep two kinds of rb-trees:
@@ -208,7 +209,9 @@ static Block_t* aoff_get_free_block(Allctr_t *, Uint, Block_t *, Uint);
static void aoff_link_free_block(Allctr_t *, Block_t*);
static void aoff_unlink_free_block(Allctr_t *allctr, Block_t *del);
static void aoff_creating_mbc(Allctr_t*, Carrier_t*);
+#ifdef DEBUG
static void aoff_destroying_mbc(Allctr_t*, Carrier_t*);
+#endif
static void aoff_add_mbc(Allctr_t*, Carrier_t*);
static void aoff_remove_mbc(Allctr_t*, Carrier_t*);
static UWord aoff_largest_fblk_in_mbc(Allctr_t*, Carrier_t*);
@@ -221,7 +224,7 @@ static AOFF_RBTree_t* rbt_search(AOFF_RBTree_t* root, Uint size);
static int rbt_assert_is_member(AOFF_RBTree_t* root, AOFF_RBTree_t* node);
#endif
-static Eterm info_options(Allctr_t *, char *, int *, void *, Uint **, Uint *);
+static Eterm info_options(Allctr_t *, char *, fmtfn_t *, void *, Uint **, Uint *);
static void init_atoms(void);
@@ -270,7 +273,11 @@ erts_aoffalc_start(AOFFAllctr_t *alc,
allctr->get_next_mbc_size = NULL;
allctr->creating_mbc = aoff_creating_mbc;
+#ifdef DEBUG
allctr->destroying_mbc = aoff_destroying_mbc;
+#else
+ allctr->destroying_mbc = NULL;
+#endif
allctr->add_mbc = aoff_add_mbc;
allctr->remove_mbc = aoff_remove_mbc;
allctr->largest_fblk_in_mbc = aoff_largest_fblk_in_mbc;
@@ -884,17 +891,18 @@ static void aoff_creating_mbc(Allctr_t *allctr, Carrier_t *carrier)
HARD_CHECK_TREE(NULL, 0, *root, 0);
}
+#define IS_CRR_IN_TREE(CRR,ROOT) \
+ ((CRR)->rbt_node.parent || (ROOT) == &(CRR)->rbt_node)
+
+#ifdef DEBUG
static void aoff_destroying_mbc(Allctr_t *allctr, Carrier_t *carrier)
{
AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr;
AOFF_Carrier_t *crr = (AOFF_Carrier_t*) carrier;
- AOFF_RBTree_t *root = alc->mbc_root;
- if (crr->rbt_node.parent || &crr->rbt_node == root) {
- aoff_remove_mbc(allctr, carrier);
- }
- /*else already removed */
+ ASSERT(!IS_CRR_IN_TREE(crr, alc->mbc_root));
}
+#endif
static void aoff_add_mbc(Allctr_t *allctr, Carrier_t *carrier)
{
@@ -902,6 +910,7 @@ static void aoff_add_mbc(Allctr_t *allctr, Carrier_t *carrier)
AOFF_Carrier_t *crr = (AOFF_Carrier_t*) carrier;
AOFF_RBTree_t **root = &alc->mbc_root;
+ ASSERT(!IS_CRR_IN_TREE(crr, *root));
HARD_CHECK_TREE(NULL, 0, *root, 0);
/* Link carrier in address order tree
@@ -918,6 +927,10 @@ static void aoff_remove_mbc(Allctr_t *allctr, Carrier_t *carrier)
AOFF_RBTree_t **root = &alc->mbc_root;
ASSERT(allctr == ERTS_ALC_CARRIER_TO_ALLCTR(carrier));
+
+ if (!IS_CRR_IN_TREE(crr,*root))
+ return;
+
HARD_CHECK_TREE(NULL, 0, *root, 0);
rbt_delete(root, &crr->rbt_node);
@@ -1001,7 +1014,7 @@ add_2tup(Uint **hpp, Uint *szp, Eterm *lp, Eterm el1, Eterm el2)
static Eterm
info_options(Allctr_t *allctr,
char *prefix,
- int *print_to_p,
+ fmtfn_t *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
@@ -1022,7 +1035,7 @@ info_options(Allctr_t *allctr,
if (hpp || szp) {
if (!atoms_initialized)
- erl_exit(1, "%s:%d: Internal error: Atoms not initialized",
+ erts_exit(ERTS_ERROR_EXIT, "%s:%d: Internal error: Atoms not initialized",
__FILE__, __LINE__);;
res = NIL;
diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.h b/erts/emulator/beam/erl_ao_firstfit_alloc.h
index 25b344c6a8..7349c6ab19 100644
--- a/erts/emulator/beam/erl_ao_firstfit_alloc.h
+++ b/erts/emulator/beam/erl_ao_firstfit_alloc.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
diff --git a/erts/emulator/beam/erl_arith.c b/erts/emulator/beam/erl_arith.c
index 5150a8a507..b6625db0d3 100644
--- a/erts/emulator/beam/erl_arith.c
+++ b/erts/emulator/beam/erl_arith.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -41,15 +42,8 @@
# define MAX(x, y) (((x) > (y)) ? (x) : (y))
#endif
-#if !HEAP_ON_C_STACK
-# define DECLARE_TMP(VariableName,N,P) \
- Eterm *VariableName = ((ERTS_PROC_GET_SCHDATA(P)->erl_arith_tmp_heap) + (2 * N))
-#else
-# define DECLARE_TMP(VariableName,N,P) \
- Eterm VariableName[2]
-#endif
-# define ARG_IS_NOT_TMP(Arg,Tmp) ((Arg) != make_big((Tmp)))
-
+#define DECLARE_TMP(VariableName,N,P) Eterm VariableName[2]
+#define ARG_IS_NOT_TMP(Arg,Tmp) ((Arg) != make_big((Tmp)))
static Eterm shift(Process* p, Eterm arg1, Eterm arg2, int right);
@@ -120,7 +114,7 @@ BIF_RETTYPE intdiv_2(BIF_ALIST_2)
}
if (is_both_small(BIF_ARG_1,BIF_ARG_2)){
Sint ires = signed_val(BIF_ARG_1) / signed_val(BIF_ARG_2);
- if (MY_IS_SSMALL(ires))
+ if (IS_SSMALL(ires))
BIF_RET(make_small(ires));
}
BIF_RET(erts_int_div(BIF_P, BIF_ARG_1, BIF_ARG_2));
@@ -282,8 +276,12 @@ shift(Process* p, Eterm arg1, Eterm arg2, int right)
goto do_bsl;
} else if (is_small(arg1) || is_big(arg1)) {
/*
- * N bsl PositiveBigNum is too large to represent.
+ * N bsl PositiveBigNum is too large to represent,
+ * unless N is 0.
*/
+ if (arg1 == make_small(0)) {
+ BIF_RET(arg1);
+ }
BIF_ERROR(p, SYSTEM_LIMIT);
}
/* Fall through if the left argument is not an integer. */
@@ -342,8 +340,7 @@ erts_mixed_plus(Process* p, Eterm arg1, Eterm arg2)
switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
ires = signed_val(arg1) + signed_val(arg2);
- ASSERT(MY_IS_SSMALL(ires) == IS_SSMALL(ires));
- if (MY_IS_SSMALL(ires)) {
+ if (IS_SSMALL(ires)) {
return make_small(ires);
} else {
hp = HAlloc(p, 2);
@@ -488,8 +485,7 @@ erts_mixed_minus(Process* p, Eterm arg1, Eterm arg2)
switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
ires = signed_val(arg1) - signed_val(arg2);
- ASSERT(MY_IS_SSMALL(ires) == IS_SSMALL(ires));
- if (MY_IS_SSMALL(ires)) {
+ if (IS_SSMALL(ires)) {
return make_small(ires);
} else {
hp = HAlloc(p, 2);
@@ -1183,8 +1179,7 @@ erts_gc_mixed_plus(Process* p, Eterm* reg, Uint live)
switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
ires = signed_val(arg1) + signed_val(arg2);
- ASSERT(MY_IS_SSMALL(ires) == IS_SSMALL(ires));
- if (MY_IS_SSMALL(ires)) {
+ if (IS_SSMALL(ires)) {
return make_small(ires);
} else {
if (ERTS_NEED_GC(p, 2)) {
@@ -1351,8 +1346,7 @@ erts_gc_mixed_minus(Process* p, Eterm* reg, Uint live)
switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
ires = signed_val(arg1) - signed_val(arg2);
- ASSERT(MY_IS_SSMALL(ires) == IS_SSMALL(ires));
- if (MY_IS_SSMALL(ires)) {
+ if (IS_SSMALL(ires)) {
return make_small(ires);
} else {
if (ERTS_NEED_GC(p, 2)) {
@@ -2048,3 +2042,8 @@ Eterm erts_gc_bnot(Process* p, Eterm* reg, Uint live)
}
return result;
}
+
+/* Needed to remove compiler optimization */
+double erts_get_positive_zero_float() {
+ return 0.0f;
+}
diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c
index decae6b2ca..3ceb2fd368 100644
--- a/erts/emulator/beam/erl_async.c
+++ b/erts/emulator/beam/erl_async.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -27,14 +28,12 @@
#include "erl_thr_queue.h"
#include "erl_async.h"
#include "dtrace-wrapper.h"
+#include "lttng-wrapper.h"
#define ERTS_MAX_ASYNC_READY_CALLS_IN_SEQ 20
#define ERTS_ASYNC_PRINT_JOB 0
-#if !defined(ERTS_SMP) && defined(USE_THREADS) && !ERTS_USE_ASYNC_READY_Q
-# error "Need async ready queue in non-smp case"
-#endif
typedef struct _erl_async {
DE_Handle* hndl; /* The DE_Handle is needed when port is gone */
@@ -44,16 +43,13 @@ typedef struct _erl_async {
ErlDrvPDL pdl;
void (*async_invoke)(void*);
void (*async_free)(void*);
-#if ERTS_USE_ASYNC_READY_Q
Uint sched_id;
union {
ErtsThrQPrepEnQ_t *prep_enq;
ErtsThrQFinDeQ_t fin_deq;
} q;
-#endif
} ErtsAsync;
-#if ERTS_USE_ASYNC_READY_Q
/*
* We can do without the enqueue mutex since it isn't needed for
@@ -92,7 +88,6 @@ typedef union {
char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncReadyQ))];
} ErtsAlgndAsyncReadyQ;
-#endif /* ERTS_USE_ASYNC_READY_Q */
typedef struct {
ErtsThrQ_t thr_q;
@@ -117,12 +112,10 @@ typedef struct {
char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncInit))];
} init;
ErtsAlgndAsyncQ *queue;
-#if ERTS_USE_ASYNC_READY_Q
ErtsAlgndAsyncReadyQ *ready_queue;
-#endif
} ErtsAsyncData;
-#if defined(USE_THREADS) && defined(USE_VM_PROBES)
+#if defined(USE_VM_PROBES)
/*
* Some compilers, e.g. GCC 4.2.1 and -O3, will optimize away DTrace
@@ -138,15 +131,6 @@ int erts_async_thread_suggested_stack_size; /* Initialized by erl_init.c */
static ErtsAsyncData *async;
-#ifndef USE_THREADS
-
-void
-erts_init_async(void)
-{
-
-}
-
-#else
static void *async_main(void *);
@@ -156,7 +140,6 @@ async_q(int i)
return &async->queue[i].aq;
}
-#if ERTS_USE_ASYNC_READY_Q
static ERTS_INLINE ErtsAsyncReadyQ *
async_ready_q(Uint sched_id)
@@ -164,27 +147,21 @@ async_ready_q(Uint sched_id)
return &async->ready_queue[((int)sched_id)-1].arq;
}
-#endif
-
void
erts_init_async(void)
{
async = NULL;
if (erts_async_max_threads > 0) {
-#if ERTS_USE_ASYNC_READY_Q
ErtsThrQInit_t qinit = ERTS_THR_Q_INIT_DEFAULT;
-#endif
erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER;
- char *ptr;
+ char *ptr, thr_name[16];
size_t tot_size = 0;
int i;
tot_size += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncData));
tot_size += sizeof(ErtsAlgndAsyncQ)*erts_async_max_threads;
-#if ERTS_USE_ASYNC_READY_Q
tot_size += sizeof(ErtsAlgndAsyncReadyQ)*erts_no_schedulers;
-#endif
ptr = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_ASYNC_DATA,
tot_size);
@@ -193,14 +170,14 @@ erts_init_async(void)
ptr += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncData));
async->init.data.no_initialized = 0;
- erts_mtx_init(&async->init.data.mtx, "async_init_mtx");
+ erts_mtx_init(&async->init.data.mtx, "async_init_mtx", NIL,
+ ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
erts_cnd_init(&async->init.data.cnd);
erts_atomic_init_nob(&async->init.data.id, 0);
async->queue = (ErtsAlgndAsyncQ *) ptr;
ptr += sizeof(ErtsAlgndAsyncQ)*erts_async_max_threads;
-#if ERTS_USE_ASYNC_READY_Q
qinit.live.queue = ERTS_THR_Q_LIVE_LONG;
qinit.live.objects = ERTS_THR_Q_LIVE_SHORT;
@@ -212,14 +189,14 @@ erts_init_async(void)
for (i = 1; i <= erts_no_schedulers; i++) {
ErtsAsyncReadyQ *arq = async_ready_q(i);
#if ERTS_USE_ASYNC_READY_ENQ_MTX
- erts_mtx_init(&arq->x.data.enq_mtx, "async_enq_mtx");
+ erts_mtx_init(&arq->x.data.enq_mtx, "async_enq_mtx", make_small(i),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
#endif
erts_thr_q_finalize_dequeue_state_init(&arq->fin_deq);
qinit.arg = (void *) (SWord) i;
erts_thr_q_initialize(&arq->thr_q, &qinit);
}
-#endif
/* Create async threads... */
@@ -227,23 +204,16 @@ erts_init_async(void)
thr_opts.suggested_stack_size
= erts_async_thread_suggested_stack_size;
-#ifdef ETHR_HAVE_THREAD_NAMES
- thr_opts.name = malloc(sizeof(char)*(strlen("async_XXXX")+1));
-#endif
+ thr_opts.name = thr_name;
for (i = 0; i < erts_async_max_threads; i++) {
ErtsAsyncQ *aq = async_q(i);
-#ifdef ETHR_HAVE_THREAD_NAMES
- sprintf(thr_opts.name, "async_%d", i+1);
-#endif
+ erts_snprintf(thr_opts.name, 16, "async_%d", i+1);
erts_thr_create(&aq->thr_id, async_main, (void*) aq, &thr_opts);
}
-#ifdef ETHR_HAVE_THREAD_NAMES
- free(thr_opts.name);
-#endif
/* Wait for async threads to initialize... */
erts_mtx_lock(&async->init.data.mtx);
@@ -257,7 +227,6 @@ erts_init_async(void)
}
}
-#if ERTS_USE_ASYNC_READY_Q
void *
erts_get_async_ready_queue(Uint sched_id)
@@ -265,7 +234,6 @@ erts_get_async_ready_queue(Uint sched_id)
return (void *) async ? async_ready_q(sched_id) : NULL;
}
-#endif
static ERTS_INLINE void async_add(ErtsAsync *a, ErtsAsyncQ* q)
{
@@ -274,10 +242,8 @@ static ERTS_INLINE void async_add(ErtsAsync *a, ErtsAsyncQ* q)
#endif
if (is_internal_port(a->port)) {
-#if ERTS_USE_ASYNC_READY_Q
ErtsAsyncReadyQ *arq = async_ready_q(a->sched_id);
a->q.prep_enq = erts_thr_q_prepare_enqueue(&arq->thr_q);
-#endif
/* make sure the driver will stay around */
if (a->hndl)
erts_ddll_reference_referenced_driver(a->hndl);
@@ -288,6 +254,13 @@ static ERTS_INLINE void async_add(ErtsAsync *a, ErtsAsyncQ* q)
#endif
erts_thr_q_enqueue(&q->thr_q, a);
+#ifdef USE_LTTNG_VM_TRACEPOINTS
+ if (LTTNG_ENABLED(aio_pool_put)) {
+ lttng_decl_portbuf(port_str);
+ lttng_portid_to_str(a->port, port_str);
+ LTTNG2(aio_pool_put, port_str, -1);
+ }
+#endif
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(aio_pool_add)) {
DTRACE_CHARBUF(port_str, 16);
@@ -306,10 +279,8 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
erts_tse_t *tse,
ErtsThrQPrepEnQ_t **prep_enq)
{
-#if ERTS_USE_ASYNC_READY_Q
int saved_fin_deq = 0;
ErtsThrQFinDeQ_t fin_deq;
-#endif
#ifdef USE_VM_PROBES
int len;
#endif
@@ -318,11 +289,17 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
ErtsAsync *a = (ErtsAsync *) erts_thr_q_dequeue(q);
if (a) {
-#if ERTS_USE_ASYNC_READY_Q
*prep_enq = a->q.prep_enq;
erts_thr_q_get_finalize_dequeue_data(q, &a->q.fin_deq);
if (saved_fin_deq)
erts_thr_q_append_finalize_dequeue_data(&a->q.fin_deq, &fin_deq);
+#ifdef USE_LTTNG_VM_TRACEPOINTS
+ if (LTTNG_ENABLED(aio_pool_get)) {
+ lttng_decl_portbuf(port_str);
+ int length = erts_thr_q_length_dirty(q);
+ lttng_portid_to_str(a->port, port_str);
+ LTTNG2(aio_pool_get, port_str, length);
+ }
#endif
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(aio_pool_get)) {
@@ -343,7 +320,6 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
erts_tse_reset(tse);
-#if ERTS_USE_ASYNC_READY_Q
chk_fin_deq:
if (erts_thr_q_get_finalize_dequeue_data(q, &tmp_fin_deq)) {
if (!saved_fin_deq) {
@@ -353,13 +329,11 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
erts_thr_q_append_finalize_dequeue_data(&fin_deq,
&tmp_fin_deq);
}
-#endif
switch (erts_thr_q_inspect(q, 1)) {
case ERTS_THR_Q_DIRTY:
break;
case ERTS_THR_Q_NEED_THR_PRGR:
-#ifdef ERTS_SMP
{
ErtsThrPrgrVal prgr = erts_thr_q_need_thr_progress(q);
erts_thr_progress_wakeup(NULL, prgr);
@@ -371,17 +345,14 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
erts_tse_wait(tse);
break;
}
-#endif
case ERTS_THR_Q_CLEAN:
-#if ERTS_USE_ASYNC_READY_Q
if (saved_fin_deq) {
if (erts_thr_q_finalize_dequeue(&fin_deq))
goto chk_fin_deq;
else
saved_fin_deq = 0;
}
-#endif
erts_tse_wait(tse);
break;
@@ -397,29 +368,26 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
static ERTS_INLINE void call_async_ready(ErtsAsync *a)
{
-#if ERTS_USE_ASYNC_READY_Q
Port *p = erts_id2port_sflgs(a->port,
NULL,
0,
ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
-#else
- Port *p = erts_thr_id2port_sflgs(a->port,
- ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
-#endif
if (!p) {
- if (a->async_free)
+ if (a->async_free) {
+ ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_PORT);
a->async_free(a->async_data);
+ ERTS_MSACC_POP_STATE();
+ }
}
else {
if (async_ready(p, a->async_data)) {
- if (a->async_free)
+ if (a->async_free) {
+ ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_PORT);
a->async_free(a->async_data);
+ ERTS_MSACC_POP_STATE();
+ }
}
-#if ERTS_USE_ASYNC_READY_Q
erts_port_release(p);
-#else
- erts_thr_port_release(p);
-#endif
}
if (a->pdl)
driver_pdl_dec_refc(a->pdl);
@@ -429,7 +397,6 @@ static ERTS_INLINE void call_async_ready(ErtsAsync *a)
static ERTS_INLINE void async_reply(ErtsAsync *a, ErtsThrQPrepEnQ_t *prep_enq)
{
-#if ERTS_USE_ASYNC_READY_Q
ErtsAsyncReadyQ *arq;
#if ERTS_ASYNC_PRINT_JOB
@@ -448,12 +415,6 @@ static ERTS_INLINE void async_reply(ErtsAsync *a, ErtsThrQPrepEnQ_t *prep_enq)
erts_mtx_unlock(&arq->x.data.enq_mtx);
#endif
-#else /* ERTS_USE_ASYNC_READY_Q */
-
- call_async_ready(a);
- erts_free(ERTS_ALC_T_ASYNC, (void *) a);
-
-#endif /* ERTS_USE_ASYNC_READY_Q */
}
@@ -467,7 +428,8 @@ static erts_tse_t *async_thread_init(ErtsAsyncQ *aq)
{
ErtsThrQInit_t qinit = ERTS_THR_Q_INIT_DEFAULT;
erts_tse_t *tse = erts_tse_fetch();
-#ifdef ERTS_SMP
+ ERTS_DECLARE_DUMMY(Uint no);
+
ErtsThrPrgrCallbacks callbacks;
callbacks.arg = (void *) tse;
@@ -476,24 +438,23 @@ static erts_tse_t *async_thread_init(ErtsAsyncQ *aq)
callbacks.wait = NULL;
erts_thr_progress_register_unmanaged_thread(&callbacks);
-#endif
qinit.live.queue = ERTS_THR_Q_LIVE_LONG;
qinit.live.objects = ERTS_THR_Q_LIVE_SHORT;
qinit.arg = (void *) tse;
qinit.notify = async_wakeup;
-#if ERTS_USE_ASYNC_READY_Q
qinit.auto_finalize_dequeue = 0;
-#endif
erts_thr_q_initialize(&aq->thr_q, &qinit);
/* Inform main thread that we are done initializing... */
erts_mtx_lock(&async->init.data.mtx);
- async->init.data.no_initialized++;
+ no = async->init.data.no_initialized++;
erts_cnd_signal(&async->init.data.cnd);
erts_mtx_unlock(&async->init.data.mtx);
+ erts_msacc_init_thread("async", no, 0);
+
return tse;
}
@@ -501,6 +462,7 @@ static void *async_main(void* arg)
{
ErtsAsyncQ *aq = (ErtsAsyncQ *) arg;
erts_tse_t *tse = async_thread_init(aq);
+ ERTS_MSACC_DECLARE_CACHE();
while (1) {
ErtsThrQPrepEnQ_t *prep_enq;
@@ -508,11 +470,14 @@ static void *async_main(void* arg)
if (is_nil(a->port))
break; /* Time to die */
+ ERTS_MSACC_UPDATE_CACHE();
+
#if ERTS_ASYNC_PRINT_JOB
erts_fprintf(stderr, "<- %ld\n", a->async_id);
#endif
-
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_PORT);
a->async_invoke(a->async_data);
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_OTHER);
async_reply(a, prep_enq);
}
@@ -520,12 +485,10 @@ static void *async_main(void* arg)
return NULL;
}
-#endif /* USE_THREADS */
void
erts_exit_flush_async(void)
{
-#ifdef USE_THREADS
int i;
ErtsAsync a;
a.port = NIL;
@@ -539,11 +502,8 @@ erts_exit_flush_async(void)
async_add(&a, async_q(i));
for (i = 0; i < erts_async_max_threads; i++)
erts_thr_join(async->queue[i].aq.thr_id, NULL);
-#endif
}
-#if defined(USE_THREADS) && ERTS_USE_ASYNC_READY_Q
-
int erts_check_async_ready(void *varq)
{
ErtsAsyncReadyQ *arq = (ErtsAsyncReadyQ *) varq;
@@ -584,18 +544,15 @@ int erts_async_ready_clean(void *varq, void *val)
case ERTS_THR_Q_DIRTY:
return ERTS_ASYNC_READY_DIRTY;
case ERTS_THR_Q_NEED_THR_PRGR:
-#ifdef ERTS_SMP
*((ErtsThrPrgrVal *) val)
= erts_thr_q_need_thr_progress(&arq->thr_q);
return ERTS_ASYNC_READY_NEED_THR_PRGR;
-#endif
case ERTS_THR_Q_CLEAN:
break;
}
return ERTS_ASYNC_READY_CLEAN;
}
-#endif
/*
** Generate a fair async key prom an ErlDrvPort
@@ -633,25 +590,22 @@ long driver_async(ErlDrvPort ix, unsigned int* key,
Port* prt;
long id;
unsigned int qix;
-#if ERTS_USE_ASYNC_READY_Q
Uint sched_id;
+ ERTS_MSACC_PUSH_STATE();
sched_id = erts_get_scheduler_id();
if (!sched_id)
sched_id = 1;
-#endif
prt = erts_drvport2port(ix);
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
a = (ErtsAsync*) erts_alloc(ERTS_ALC_T_ASYNC, sizeof(ErtsAsync));
-#if ERTS_USE_ASYNC_READY_Q
a->sched_id = sched_id;
-#endif
a->hndl = (DE_Handle*)prt->drv_ptr->handle;
a->port = prt->common.id;
a->pdl = NULL;
@@ -681,7 +635,6 @@ long driver_async(ErlDrvPort ix, unsigned int* key,
(*key % erts_async_max_threads) : 0;
*key = qix;
}
-#ifdef USE_THREADS
if (erts_async_max_threads > 0) {
if (prt->port_data_lock) {
driver_pdl_inc_refc(prt->port_data_lock);
@@ -690,13 +643,17 @@ long driver_async(ErlDrvPort ix, unsigned int* key,
async_add(a, async_q(qix));
return id;
}
-#endif
-
+
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_PORT);
(*a->async_invoke)(a->async_data);
+ ERTS_MSACC_POP_STATE();
if (async_ready(prt, a->async_data)) {
- if (a->async_free != NULL)
+ if (a->async_free != NULL) {
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_PORT);
(*a->async_free)(a->async_data);
+ ERTS_MSACC_POP_STATE();
+ }
}
erts_free(ERTS_ALC_T_ASYNC, (void *) a);
diff --git a/erts/emulator/beam/erl_async.h b/erts/emulator/beam/erl_async.h
index 95374a8fc9..70ef247e0a 100644
--- a/erts/emulator/beam/erl_async.h
+++ b/erts/emulator/beam/erl_async.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2011. All Rights Reserved.
+ * Copyright Ericsson AB 2011-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -26,41 +27,14 @@ extern int erts_async_max_threads;
#define ERTS_ASYNC_THREAD_MAX_STACK_SIZE 8192 /* Kilo words */
extern int erts_async_thread_suggested_stack_size;
-#ifdef USE_THREADS
-
-#ifdef ERTS_SMP
-/*
- * With smp support we can choose to have, or not to
- * have an async ready queue.
- */
-#define ERTS_USE_ASYNC_READY_Q 1
-#endif
-
-#ifndef ERTS_SMP
-/* In non-smp case we *need* the async ready queue */
-# undef ERTS_USE_ASYNC_READY_Q
-# define ERTS_USE_ASYNC_READY_Q 1
-#endif
-
-#ifndef ERTS_USE_ASYNC_READY_Q
-# define ERTS_USE_ASYNC_READY_Q 0
-#endif
-
-#if ERTS_USE_ASYNC_READY_Q
int erts_check_async_ready(void *);
int erts_async_ready_clean(void *, void *);
void *erts_get_async_ready_queue(Uint sched_id);
#define ERTS_ASYNC_READY_CLEAN 0
#define ERTS_ASYNC_READY_DIRTY 1
-#ifdef ERTS_SMP
#define ERTS_ASYNC_READY_NEED_THR_PRGR 2
-#endif
-#endif /* ERTS_USE_ASYNC_READY_Q */
-
-#endif /* USE_THREADS */
void erts_init_async(void);
void erts_exit_flush_async(void);
-
#endif /* ERL_ASYNC_H__ */
diff --git a/erts/emulator/beam/erl_bestfit_alloc.c b/erts/emulator/beam/erl_bestfit_alloc.c
index 59c14899a2..6173c408e1 100644
--- a/erts/emulator/beam/erl_bestfit_alloc.c
+++ b/erts/emulator/beam/erl_bestfit_alloc.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -103,7 +104,7 @@ static void bf_link_free_block (Allctr_t *, Block_t *);
static ERTS_INLINE void bf_unlink_free_block (Allctr_t *, Block_t *);
-static Eterm info_options (Allctr_t *, char *, int *,
+static Eterm info_options (Allctr_t *, char *, fmtfn_t *,
void *, Uint **, Uint *);
static void init_atoms (void);
@@ -920,7 +921,7 @@ add_2tup(Uint **hpp, Uint *szp, Eterm *lp, Eterm el1, Eterm el2)
static Eterm
info_options(Allctr_t *allctr,
char *prefix,
- int *print_to_p,
+ fmtfn_t *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
@@ -939,7 +940,7 @@ info_options(Allctr_t *allctr,
if (hpp || szp) {
if (!atoms_initialized)
- erl_exit(1, "%s:%d: Internal error: Atoms not initialized",
+ erts_exit(ERTS_ERROR_EXIT, "%s:%d: Internal error: Atoms not initialized",
__FILE__, __LINE__);;
res = NIL;
diff --git a/erts/emulator/beam/erl_bestfit_alloc.h b/erts/emulator/beam/erl_bestfit_alloc.h
index 870439e886..3a5f51f5dc 100644
--- a/erts/emulator/beam/erl_bestfit_alloc.h
+++ b/erts/emulator/beam/erl_bestfit_alloc.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c
index ff775691b3..4cafa499a9 100644
--- a/erts/emulator/beam/erl_bif_binary.c
+++ b/erts/emulator/beam/erl_bif_binary.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -32,10 +33,13 @@
#include "global.h"
#include "erl_process.h"
#include "error.h"
+#define ERL_WANT_HIPE_BIF_WRAPPER__
#include "bif.h"
+#undef ERL_WANT_HIPE_BIF_WRAPPER__
#include "big.h"
#include "erl_binary.h"
#include "erl_bits.h"
+#include "erl_bif_unique.h"
/*
@@ -51,10 +55,8 @@
/* Init and local variables */
-static Export binary_match_trap_export;
-static BIF_RETTYPE binary_match_trap(BIF_ALIST_3);
-static Export binary_matches_trap_export;
-static BIF_RETTYPE binary_matches_trap(BIF_ALIST_3);
+static Export binary_find_trap_export;
+static BIF_RETTYPE binary_find_trap(BIF_ALIST_3);
static Export binary_longest_prefix_trap_export;
static BIF_RETTYPE binary_longest_prefix_trap(BIF_ALIST_3);
static Export binary_longest_suffix_trap_export;
@@ -66,19 +68,15 @@ static BIF_RETTYPE binary_copy_trap(BIF_ALIST_2);
static Uint max_loop_limit;
static BIF_RETTYPE
-binary_match(Process *p, Eterm arg1, Eterm arg2, Eterm arg3);
+binary_match(Process *p, Eterm arg1, Eterm arg2, Eterm arg3, Uint flags);
static BIF_RETTYPE
-binary_matches(Process *p, Eterm arg1, Eterm arg2, Eterm arg3);
+binary_split(Process *p, Eterm arg1, Eterm arg2, Eterm arg3);
void erts_init_bif_binary(void)
{
- erts_init_trap_export(&binary_match_trap_export,
- am_erlang, am_binary_match_trap, 3,
- &binary_match_trap);
-
- erts_init_trap_export(&binary_matches_trap_export,
- am_erlang, am_binary_matches_trap, 3,
- &binary_matches_trap);
+ erts_init_trap_export(&binary_find_trap_export,
+ am_erlang, am_binary_find_trap, 3,
+ &binary_find_trap);
erts_init_trap_export(&binary_longest_prefix_trap_export,
am_erlang, am_binary_longest_prefix_trap, 3,
@@ -173,6 +171,16 @@ static void *my_alloc(MyAllocator *my, Uint size)
#define ALPHABET_SIZE 256
+typedef struct _findall_data {
+ Uint pos;
+ Uint len;
+#ifdef HARDDEBUG
+ Uint id;
+#endif
+ Eterm epos;
+ Eterm elen;
+} FindallData;
+
typedef struct _ac_node {
#ifdef HARDDEBUG
Uint32 id; /* To identify h pointer targets when
@@ -210,6 +218,103 @@ typedef struct _bm_data {
Sint badshift[ALPHABET_SIZE];
} BMData;
+typedef struct _ac_find_all_state {
+ ACNode *q;
+ Uint pos;
+ Uint len;
+ Uint m;
+ Uint allocated;
+ FindallData *out;
+} ACFindAllState;
+
+typedef struct _ac_find_first_state {
+ ACNode *q;
+ Uint pos;
+ Uint len;
+ ACNode *candidate;
+ Uint candidate_start;
+} ACFindFirstState;
+
+typedef struct _bm_find_all_state {
+ Sint pos;
+ Sint len;
+ Uint m;
+ Uint allocated;
+ FindallData *out;
+} BMFindAllState;
+
+typedef struct _bm_find_first_state {
+ Sint pos;
+ Sint len;
+} BMFindFirstState;
+
+typedef enum _bf_return {
+ BF_RESTART = -3,
+ BF_NOT_FOUND,
+ BF_BADARG,
+ BF_OK
+} BFReturn;
+
+typedef struct _binary_find_all_context {
+ ErtsHeapFactory factory;
+ Eterm term;
+ Sint head;
+ Sint tail;
+ Uint end_pos;
+ Uint size;
+ FindallData *data;
+ union {
+ ACFindAllState ac;
+ BMFindAllState bm;
+ } d;
+} BinaryFindAllContext;
+
+typedef struct _binary_find_first_context {
+ Uint pos;
+ Uint len;
+ union {
+ ACFindFirstState ac;
+ BMFindFirstState bm;
+ } d;
+} BinaryFindFirstContext;
+
+typedef struct _binary_find_context BinaryFindContext;
+
+typedef struct _binary_find_search {
+ void (*init) (BinaryFindContext *);
+ BFReturn (*find) (BinaryFindContext *, byte *);
+ void (*done) (BinaryFindContext *);
+} BinaryFindSearch;
+
+typedef Eterm (*BinaryFindResult)(Process *, Eterm, BinaryFindContext **);
+
+typedef enum _binary_find_state {
+ BFSearch,
+ BFResult,
+ BFDone
+} BinaryFindState;
+
+struct _binary_find_context {
+ Eterm pat_type;
+ Eterm pat_term;
+ Binary *pat_bin;
+ Uint flags;
+ Uint hsstart;
+ Uint hsend;
+ int loop_factor;
+ int exported;
+ Uint reds;
+ BinaryFindState state;
+ Eterm trap_term;
+ BinaryFindSearch *search;
+ BinaryFindResult not_found;
+ BinaryFindResult found;
+ union {
+ BinaryFindAllContext fa;
+ BinaryFindFirstContext ff;
+ } u;
+};
+
#ifdef HARDDEBUG
static void dump_bm_data(BMData *bm);
static void dump_ac_trie(ACTrie *act);
@@ -231,23 +336,16 @@ static void dump_ac_node(ACNode *node, int indent, int ch);
MYALIGN(sizeof(ACTrie))) /* Structure */
-#ifndef MAX
-#define MAX(A,B) (((A) > (B)) ? (A) : (B))
-#endif
-
-#ifndef MIN
-#define MIN(A,B) (((A) > (B)) ? (B) : (A))
-#endif
/*
* Callback for the magic binary
*/
-static void cleanup_my_data_ac(Binary *bp)
+static int cleanup_my_data_ac(Binary *bp)
{
- return;
+ return 1;
}
-static void cleanup_my_data_bm(Binary *bp)
+static int cleanup_my_data_bm(Binary *bp)
{
- return;
+ return 1;
}
/*
@@ -310,8 +408,8 @@ static BMData *create_bmdata(MyAllocator *my, byte *x, Uint len,
/*
* Aho Corasick - Build a Trie and fill in the failure functions
* when all strings are added.
- * The algorithm is nicely described by Dieter B�hler of University of
- * T�bingen:
+ * The algorithm is nicely described by Dieter Bühler of University of
+ * Tübingen:
* http://www-sr.informatik.uni-tuebingen.de/~buehler/AC/AC.html
*/
@@ -382,7 +480,7 @@ static void ac_compute_failure_functions(ACTrie *act, ACNode **qbuff)
qbuff[qt++] = child;
/* Search for correct failure function, follow the parent's
failure function until you find a similar transition
- funtion to this child's */
+ function to this child's */
r = parent->h;
while (r != NULL && r->g[i] == NULL) {
r = r->h;
@@ -423,32 +521,25 @@ static void ac_compute_failure_functions(ACTrie *act, ACNode **qbuff)
* Basic AC finds the first end before the first start...
*
*/
-typedef struct {
- ACNode *q;
- Uint pos;
- Uint len;
- ACNode *candidate;
- Uint candidate_start;
-} ACFindFirstState;
-
-
-static void ac_init_find_first_match(ACFindFirstState *state, ACTrie *act, Sint startpos, Uint len)
+static void ac_init_find_first_match(BinaryFindContext *ctx)
{
+ ACFindFirstState *state = &(ctx->u.ff.d.ac);
+ ACTrie *act = ERTS_MAGIC_BIN_DATA(ctx->pat_bin);
state->q = act->root;
- state->pos = startpos;
- state->len = len;
+ state->pos = ctx->hsstart;
+ state->len = ctx->hsend;
state->candidate = NULL;
state->candidate_start = 0;
}
-#define AC_OK 0
-#define AC_NOT_FOUND -1
-#define AC_RESTART -2
#define AC_LOOP_FACTOR 10
-static int ac_find_first_match(ACFindFirstState *state, byte *haystack,
- Uint *mpos, Uint *mlen, Uint *reductions)
+static BFReturn ac_find_first_match(BinaryFindContext *ctx, byte *haystack)
{
+ ACFindFirstState *state = &(ctx->u.ff.d.ac);
+ Uint *mpos = &(ctx->u.ff.pos);
+ Uint *mlen = &(ctx->u.ff.len);
+ Uint *reductions = &(ctx->reds);
ACNode *q = state->q;
Uint i = state->pos;
ACNode *candidate = state->candidate, *r;
@@ -464,7 +555,7 @@ static int ac_find_first_match(ACFindFirstState *state, byte *haystack,
state->len = len;
state->candidate = candidate;
state->candidate_start = candidate_start;
- return AC_RESTART;
+ return BF_RESTART;
}
while (q->g[haystack[i]] == NULL && q->h != q) {
@@ -494,66 +585,33 @@ static int ac_find_first_match(ACFindFirstState *state, byte *haystack,
}
*reductions = reds;
if (!candidate) {
- return AC_NOT_FOUND;
+ return BF_NOT_FOUND;
}
#ifdef HARDDEBUG
dump_ac_node(candidate,0,'?');
#endif
*mpos = candidate_start;
*mlen = candidate->d;
- return AC_OK;
+ return BF_OK;
}
-typedef struct _findall_data {
- Uint pos;
- Uint len;
-#ifdef HARDDEBUG
- Uint id;
-#endif
- Eterm epos;
- Eterm elen;
-} FindallData;
-
-typedef struct {
- ACNode *q;
- Uint pos;
- Uint len;
- Uint m;
- Uint allocated;
- FindallData *out;
-} ACFindAllState;
-
-static void ac_init_find_all(ACFindAllState *state, ACTrie *act, Sint startpos, Uint len)
+static void ac_init_find_all(BinaryFindContext *ctx)
{
+ ACFindAllState *state = &(ctx->u.fa.d.ac);
+ ACTrie *act = ERTS_MAGIC_BIN_DATA(ctx->pat_bin);
state->q = act->root;
- state->pos = startpos;
- state->len = len;
+ state->pos = ctx->hsstart;
+ state->len = ctx->hsend;
state->m = 0;
state->allocated = 0;
state->out = NULL;
}
-static void ac_restore_find_all(ACFindAllState *state, char *buff)
-{
- memcpy(state,buff,sizeof(ACFindAllState));
- if (state->allocated > 0) {
- state->out = erts_alloc(ERTS_ALC_T_TMP, sizeof(FindallData) * (state->allocated));
- memcpy(state->out,buff+sizeof(ACFindAllState),sizeof(FindallData)*state->m);
- } else {
- state->out = NULL;
- }
-}
-
-static void ac_serialize_find_all(ACFindAllState *state, char *buff)
-{
- memcpy(buff,state,sizeof(ACFindAllState));
- memcpy(buff+sizeof(ACFindAllState),state->out,sizeof(FindallData)*state->m);
-}
-
-static void ac_clean_find_all(ACFindAllState *state)
+static void ac_clean_find_all(BinaryFindContext *ctx)
{
+ ACFindAllState *state = &(ctx->u.fa.d.ac);
if (state->out != NULL) {
- erts_free(ERTS_ALC_T_TMP, state->out);
+ erts_free(ERTS_ALC_T_BINARY_FIND, state->out);
}
#ifdef HARDDEBUG
state->out = NULL;
@@ -561,16 +619,14 @@ static void ac_clean_find_all(ACFindAllState *state)
#endif
}
-#define SIZEOF_AC_SERIALIZED_FIND_ALL_STATE(S) \
- (sizeof(ACFindAllState)+(sizeof(FindallData)*(S).m))
-
/*
* Differs to the find_first function in that it stores all matches and the values
* arte returned only in the state.
*/
-static int ac_find_all_non_overlapping(ACFindAllState *state, byte *haystack,
- Uint *reductions)
+static BFReturn ac_find_all_non_overlapping(BinaryFindContext *ctx, byte *haystack)
{
+ ACFindAllState *state = &(ctx->u.fa.d.ac);
+ Uint *reductions = &(ctx->reds);
ACNode *q = state->q;
Uint i = state->pos;
Uint rstart;
@@ -581,7 +637,6 @@ static int ac_find_all_non_overlapping(ACFindAllState *state, byte *haystack,
FindallData *out = state->out;
register Uint reds = *reductions;
-
while (i < len) {
if (--reds == 0) {
state->q = q;
@@ -590,7 +645,7 @@ static int ac_find_all_non_overlapping(ACFindAllState *state, byte *haystack,
state->m = m;
state->allocated = allocated;
state->out = out;
- return AC_RESTART;
+ return BF_RESTART;
}
while (q->g[haystack[i]] == NULL && q->h != q) {
q = q->h;
@@ -628,11 +683,11 @@ static int ac_find_all_non_overlapping(ACFindAllState *state, byte *haystack,
if (m >= allocated) {
if (!allocated) {
allocated = 10;
- out = erts_alloc(ERTS_ALC_T_TMP,
+ out = erts_alloc(ERTS_ALC_T_BINARY_FIND,
sizeof(FindallData) * allocated);
} else {
allocated *= 2;
- out = erts_realloc(ERTS_ALC_T_TMP, out,
+ out = erts_realloc(ERTS_ALC_T_BINARY_FIND, out,
sizeof(FindallData) *
allocated);
}
@@ -659,7 +714,7 @@ static int ac_find_all_non_overlapping(ACFindAllState *state, byte *haystack,
*reductions = reds;
state->m = m;
state->out = out;
- return (m == 0) ? AC_NOT_FOUND : AC_OK;
+ return (m == 0) ? BF_NOT_FOUND : BF_OK;
}
/*
@@ -746,27 +801,22 @@ static void compute_goodshifts(BMData *bmd)
erts_free(ERTS_ALC_T_TMP, suffixes);
}
-typedef struct {
- Sint pos;
- Sint len;
-} BMFindFirstState;
-
-#define BM_OK 0 /* used only for find_all */
-#define BM_NOT_FOUND -1
-#define BM_RESTART -2
#define BM_LOOP_FACTOR 10 /* Should we have a higher value? */
-static void bm_init_find_first_match(BMFindFirstState *state, Sint startpos,
- Uint len)
+static void bm_init_find_first_match(BinaryFindContext *ctx)
{
- state->pos = startpos;
- state->len = (Sint) len;
+ BMFindFirstState *state = &(ctx->u.ff.d.bm);
+ state->pos = ctx->hsstart;
+ state->len = ctx->hsend;
}
-
-static Sint bm_find_first_match(BMFindFirstState *state, BMData *bmd,
- byte *haystack, Uint *reductions)
+static BFReturn bm_find_first_match(BinaryFindContext *ctx, byte *haystack)
{
+ BMFindFirstState *state = &(ctx->u.ff.d.bm);
+ BMData *bmd = ERTS_MAGIC_BIN_DATA(ctx->pat_bin);
+ Uint *mpos = &(ctx->u.ff.pos);
+ Uint *mlen = &(ctx->u.ff.len);
+ Uint *reductions = &(ctx->reds);
Sint blen = bmd->len;
Sint len = state->len;
Sint *gs = bmd->goodshift;
@@ -779,61 +829,37 @@ static Sint bm_find_first_match(BMFindFirstState *state, BMData *bmd,
while (j <= len - blen) {
if (--reds == 0) {
state->pos = j;
- return BM_RESTART;
+ return BF_RESTART;
}
for (i = blen - 1; i >= 0 && needle[i] == haystack[i + j]; --i)
;
if (i < 0) { /* found */
*reductions = reds;
- return j;
+ *mpos = (Uint) j;
+ *mlen = (Uint) blen;
+ return BF_OK;
}
j += MAX(gs[i],bs[haystack[i+j]] - blen + 1 + i);
}
*reductions = reds;
- return BM_NOT_FOUND;
+ return BF_NOT_FOUND;
}
-typedef struct {
- Sint pos;
- Sint len;
- Uint m;
- Uint allocated;
- FindallData *out;
-} BMFindAllState;
-
-static void bm_init_find_all(BMFindAllState *state, Sint startpos, Uint len)
+static void bm_init_find_all(BinaryFindContext *ctx)
{
- state->pos = startpos;
- state->len = (Sint) len;
+ BMFindAllState *state = &(ctx->u.fa.d.bm);
+ state->pos = ctx->hsstart;
+ state->len = ctx->hsend;
state->m = 0;
state->allocated = 0;
state->out = NULL;
}
-static void bm_restore_find_all(BMFindAllState *state, char *buff)
-{
- memcpy(state,buff,sizeof(BMFindAllState));
- if (state->allocated > 0) {
- state->out = erts_alloc(ERTS_ALC_T_TMP, sizeof(FindallData) *
- (state->allocated));
- memcpy(state->out,buff+sizeof(BMFindAllState),
- sizeof(FindallData)*state->m);
- } else {
- state->out = NULL;
- }
-}
-
-static void bm_serialize_find_all(BMFindAllState *state, char *buff)
-{
- memcpy(buff,state,sizeof(BMFindAllState));
- memcpy(buff+sizeof(BMFindAllState),state->out,
- sizeof(FindallData)*state->m);
-}
-
-static void bm_clean_find_all(BMFindAllState *state)
+static void bm_clean_find_all(BinaryFindContext *ctx)
{
+ BMFindAllState *state = &(ctx->u.fa.d.bm);
if (state->out != NULL) {
- erts_free(ERTS_ALC_T_TMP, state->out);
+ erts_free(ERTS_ALC_T_BINARY_FIND, state->out);
}
#ifdef HARDDEBUG
state->out = NULL;
@@ -841,17 +867,15 @@ static void bm_clean_find_all(BMFindAllState *state)
#endif
}
-#define SIZEOF_BM_SERIALIZED_FIND_ALL_STATE(S) \
- (sizeof(BMFindAllState)+(sizeof(FindallData)*(S).m))
-
/*
* Differs to the find_first function in that it stores all matches and the
* values are returned only in the state.
*/
-static Sint bm_find_all_non_overlapping(BMFindAllState *state,
- BMData *bmd, byte *haystack,
- Uint *reductions)
+static BFReturn bm_find_all_non_overlapping(BinaryFindContext *ctx, byte *haystack)
{
+ BMFindAllState *state = &(ctx->u.fa.d.bm);
+ BMData *bmd = ERTS_MAGIC_BIN_DATA(ctx->pat_bin);
+ Uint *reductions = &(ctx->reds);
Sint blen = bmd->len;
Sint len = state->len;
Sint *gs = bmd->goodshift;
@@ -870,7 +894,7 @@ static Sint bm_find_all_non_overlapping(BMFindAllState *state,
state->m = m;
state->allocated = allocated;
state->out = out;
- return BM_RESTART;
+ return BF_RESTART;
}
for (i = blen - 1; i >= 0 && needle[i] == haystack[i + j]; --i)
;
@@ -878,10 +902,11 @@ static Sint bm_find_all_non_overlapping(BMFindAllState *state,
if (m >= allocated) {
if (!allocated) {
allocated = 10;
- out = erts_alloc(ERTS_ALC_T_TMP, sizeof(FindallData) * allocated);
+ out = erts_alloc(ERTS_ALC_T_BINARY_FIND,
+ sizeof(FindallData) * allocated);
} else {
allocated *= 2;
- out = erts_realloc(ERTS_ALC_T_TMP, out,
+ out = erts_realloc(ERTS_ALC_T_BINARY_FIND, out,
sizeof(FindallData) * allocated);
}
}
@@ -896,7 +921,7 @@ static Sint bm_find_all_non_overlapping(BMFindAllState *state,
state->m = m;
state->out = out;
*reductions = reds;
- return (m == 0) ? BM_NOT_FOUND : BM_OK;
+ return (m == 0) ? BF_NOT_FOUND : BF_OK;
}
/*
@@ -1016,276 +1041,165 @@ BIF_RETTYPE binary_compile_pattern_1(BIF_ALIST_1)
if (do_binary_match_compile(BIF_ARG_1,&tag,&bin)) {
BIF_ERROR(BIF_P,BADARG);
}
- hp = HAlloc(BIF_P, PROC_BIN_SIZE+3);
- ret = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), bin);
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE+3);
+ ret = erts_mk_magic_ref(&hp, &MSO(BIF_P), bin);
ret = TUPLE2(hp, tag, ret);
BIF_RET(ret);
}
-#define DO_BIN_MATCH_OK 0
-#define DO_BIN_MATCH_BADARG -1
-#define DO_BIN_MATCH_RESTART -2
-
-static int do_binary_match(Process *p, Eterm subject, Uint hsstart, Uint hsend,
- Eterm type, Binary *bin, Eterm state_term,
- Eterm *res_term)
-{
- byte *bytes;
- Uint bitoffs, bitsize;
- byte *temp_alloc = NULL;
-
- ERTS_GET_BINARY_BYTES(subject, bytes, bitoffs, bitsize);
- if (bitsize != 0) {
- goto badarg;
- }
- if (bitoffs != 0) {
- bytes = erts_get_aligned_binary_bytes(subject, &temp_alloc);
- }
- if (state_term != NIL) {
- Eterm *ptr = big_val(state_term);
- type = ptr[1];
- }
-
- if (type == am_bm) {
- BMData *bm;
- Sint pos;
- Eterm ret;
- Eterm *hp;
- BMFindFirstState state;
- Uint reds = get_reds(p, BM_LOOP_FACTOR);
- Uint save_reds = reds;
+#define BF_FLAG_GLOBAL 0x01
+#define BF_FLAG_SPLIT_TRIM 0x02
+#define BF_FLAG_SPLIT_TRIM_ALL 0x04
- bm = (BMData *) ERTS_MAGIC_BIN_DATA(bin);
-#ifdef HARDDEBUG
- dump_bm_data(bm);
-#endif
- if (state_term == NIL) {
- bm_init_find_first_match(&state, hsstart, hsend);
- } else {
- Eterm *ptr = big_val(state_term);
- memcpy(&state,ptr+2,sizeof(state));
- }
-#ifdef HARDDEBUG
- erts_printf("(bm) state->pos = %ld, state->len = %lu\n",state.pos,
- state.len);
-#endif
- pos = bm_find_first_match(&state, bm, bytes, &reds);
- if (pos == BM_NOT_FOUND) {
- ret = am_nomatch;
- } else if (pos == BM_RESTART) {
- int x = (sizeof(BMFindFirstState) / sizeof(Eterm)) +
- !!(sizeof(BMFindFirstState) % sizeof(Eterm));
+static void bf_context_init(BinaryFindContext *ctx, BinaryFindResult not_found,
+ BinaryFindResult single, BinaryFindResult global,
+ Binary *pat_bin);
+static BinaryFindContext *bf_context_export(Process *p, BinaryFindContext *src);
+static int bf_context_destructor(Binary *ctx_bin);
#ifdef HARDDEBUG
- erts_printf("Trap bm!\n");
+static void bf_context_dump(BinaryFindContext *ctx);
#endif
- hp = HAlloc(p,x+2);
- hp[0] = make_pos_bignum_header(x+1);
- hp[1] = type;
- memcpy(hp+2,&state,sizeof(state));
- *res_term = make_big(hp);
- erts_free_aligned_binary_bytes(temp_alloc);
- return DO_BIN_MATCH_RESTART;
- } else {
- Eterm erlen = erts_make_integer((Uint) bm->len, p);
- ret = erts_make_integer(pos,p);
- hp = HAlloc(p,3);
- ret = TUPLE2(hp, ret, erlen);
- }
- erts_free_aligned_binary_bytes(temp_alloc);
- BUMP_REDS(p, (save_reds - reds) / BM_LOOP_FACTOR);
- *res_term = ret;
- return DO_BIN_MATCH_OK;
- } else if (type == am_ac) {
- ACTrie *act;
- Uint pos, rlen;
- int acr;
- ACFindFirstState state;
- Eterm ret;
- Eterm *hp;
- Uint reds = get_reds(p, AC_LOOP_FACTOR);
- Uint save_reds = reds;
- act = (ACTrie *) ERTS_MAGIC_BIN_DATA(bin);
-#ifdef HARDDEBUG
- dump_ac_trie(act);
-#endif
- if (state_term == NIL) {
- ac_init_find_first_match(&state, act, hsstart, hsend);
- } else {
- Eterm *ptr = big_val(state_term);
- memcpy(&state,ptr+2,sizeof(state));
- }
- acr = ac_find_first_match(&state, bytes, &pos, &rlen, &reds);
- if (acr == AC_NOT_FOUND) {
- ret = am_nomatch;
- } else if (acr == AC_RESTART) {
- int x = (sizeof(state) / sizeof(Eterm)) +
- !!(sizeof(ACFindFirstState) % sizeof(Eterm));
-#ifdef HARDDEBUG
- erts_printf("Trap ac!\n");
-#endif
- hp = HAlloc(p,x+2);
- hp[0] = make_pos_bignum_header(x+1);
- hp[1] = type;
- memcpy(hp+2,&state,sizeof(state));
- *res_term = make_big(hp);
- erts_free_aligned_binary_bytes(temp_alloc);
- return DO_BIN_MATCH_RESTART;
- } else {
- Eterm epos = erts_make_integer(pos,p);
- Eterm erlen = erts_make_integer(rlen,p);
- hp = HAlloc(p,3);
- ret = TUPLE2(hp, epos, erlen);
+static BinaryFindSearch bf_search_ac_global = {
+ ac_init_find_all,
+ ac_find_all_non_overlapping,
+ ac_clean_find_all
+};
+
+static BinaryFindSearch bf_search_ac_single = {
+ ac_init_find_first_match,
+ ac_find_first_match,
+ NULL
+};
+
+static BinaryFindSearch bf_search_bm_global = {
+ bm_init_find_all,
+ bm_find_all_non_overlapping,
+ bm_clean_find_all
+};
+
+static BinaryFindSearch bf_search_bm_single = {
+ bm_init_find_first_match,
+ bm_find_first_match,
+ NULL
+};
+
+static void bf_context_init(BinaryFindContext *ctx, BinaryFindResult not_found,
+ BinaryFindResult single, BinaryFindResult global,
+ Binary *pat_bin)
+{
+ ctx->exported = 0;
+ ctx->state = BFSearch;
+ ctx->not_found = not_found;
+ if (ctx->flags & BF_FLAG_GLOBAL) {
+ ctx->found = global;
+ if (ctx->pat_type == am_bm) {
+ ctx->search = &bf_search_bm_global;
+ ctx->loop_factor = BM_LOOP_FACTOR;
+ } else if (ctx->pat_type == am_ac) {
+ ctx->search = &bf_search_ac_global;
+ ctx->loop_factor = AC_LOOP_FACTOR;
+ }
+ } else {
+ ctx->found = single;
+ if (ctx->pat_type == am_bm) {
+ ctx->search = &bf_search_bm_single;
+ ctx->loop_factor = BM_LOOP_FACTOR;
+ } else if (ctx->pat_type == am_ac) {
+ ctx->search = &bf_search_ac_single;
+ ctx->loop_factor = AC_LOOP_FACTOR;
}
- erts_free_aligned_binary_bytes(temp_alloc);
- BUMP_REDS(p, (save_reds - reds) / AC_LOOP_FACTOR);
- *res_term = ret;
- return DO_BIN_MATCH_OK;
}
- badarg:
- return DO_BIN_MATCH_BADARG;
+ ctx->trap_term = THE_NON_VALUE;
+ ctx->pat_bin = pat_bin;
+ ctx->search->init(ctx);
}
-static int do_binary_matches(Process *p, Eterm subject, Uint hsstart,
- Uint hsend, Eterm type, Binary *bin,
- Eterm state_term, Eterm *res_term)
+static BinaryFindContext *bf_context_export(Process *p, BinaryFindContext *src)
{
- byte *bytes;
- Uint bitoffs, bitsize;
- byte *temp_alloc = NULL;
+ Binary *ctx_bin;
+ BinaryFindContext *ctx;
+ Eterm *hp;
- ERTS_GET_BINARY_BYTES(subject, bytes, bitoffs, bitsize);
- if (bitsize != 0) {
- goto badarg;
- }
- if (bitoffs != 0) {
- bytes = erts_get_aligned_binary_bytes(subject, &temp_alloc);
- }
- if (state_term != NIL) {
- Eterm *ptr = big_val(state_term);
- type = ptr[1];
+ ASSERT(src->exported == 0);
+ ctx_bin = erts_create_magic_binary(sizeof(BinaryFindContext),
+ bf_context_destructor);
+ ctx = ERTS_MAGIC_BIN_DATA(ctx_bin);
+ sys_memcpy(ctx, src, sizeof(BinaryFindContext));
+ if (ctx->pat_bin != NULL && ctx->pat_term == THE_NON_VALUE) {
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE * 2);
+ ctx->pat_term = erts_mk_magic_ref(&hp, &MSO(p), ctx->pat_bin);
+ } else {
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
}
+ ctx->trap_term = erts_mk_magic_ref(&hp, &MSO(p), ctx_bin);
+ ctx->exported = 1;
+ return ctx;
+}
- if (type == am_bm) {
- BMData *bm;
- Sint pos;
- Eterm ret,tpl;
- Eterm *hp;
- BMFindAllState state;
- Uint reds = get_reds(p, BM_LOOP_FACTOR);
- Uint save_reds = reds;
+static int bf_context_destructor(Binary *ctx_bin)
+{
+ BinaryFindContext *ctx;
- bm = (BMData *) ERTS_MAGIC_BIN_DATA(bin);
-#ifdef HARDDEBUG
- dump_bm_data(bm);
-#endif
- if (state_term == NIL) {
- bm_init_find_all(&state, hsstart, hsend);
- } else {
- Eterm *ptr = big_val(state_term);
- bm_restore_find_all(&state,(char *) (ptr+2));
+ ctx = ERTS_MAGIC_BIN_DATA(ctx_bin);
+ if (ctx->state != BFDone) {
+ if (ctx->search->done != NULL) {
+ ctx->search->done(ctx);
}
+ ctx->state = BFDone;
+ }
+ return 1;
+}
- pos = bm_find_all_non_overlapping(&state, bm, bytes, &reds);
- if (pos == BM_NOT_FOUND) {
- ret = NIL;
- } else if (pos == BM_RESTART) {
- int x =
- (SIZEOF_BM_SERIALIZED_FIND_ALL_STATE(state) / sizeof(Eterm)) +
- !!(SIZEOF_BM_SERIALIZED_FIND_ALL_STATE(state) % sizeof(Eterm));
#ifdef HARDDEBUG
- erts_printf("Trap bm!\n");
-#endif
- hp = HAlloc(p,x+2);
- hp[0] = make_pos_bignum_header(x+1);
- hp[1] = type;
- bm_serialize_find_all(&state, (char *) (hp+2));
- *res_term = make_big(hp);
- erts_free_aligned_binary_bytes(temp_alloc);
- bm_clean_find_all(&state);
- return DO_BIN_MATCH_RESTART;
- } else {
- FindallData *fad = state.out;
- int i;
- for (i = 0; i < state.m; ++i) {
- fad[i].epos = erts_make_integer(fad[i].pos,p);
- fad[i].elen = erts_make_integer(fad[i].len,p);
- }
- hp = HAlloc(p,state.m * (3 + 2));
- ret = NIL;
- for (i = state.m - 1; i >= 0; --i) {
- tpl = TUPLE2(hp, fad[i].epos, fad[i].elen);
- hp +=3;
- ret = CONS(hp,tpl,ret);
- hp += 2;
- }
- }
- erts_free_aligned_binary_bytes(temp_alloc);
- bm_clean_find_all(&state);
- BUMP_REDS(p, (save_reds - reds) / BM_LOOP_FACTOR);
- *res_term = ret;
- return DO_BIN_MATCH_OK;
- } else if (type == am_ac) {
+static void bf_context_dump(BinaryFindContext *ctx)
+{
+ if (ctx->pat_type == am_bm) {
+ BMData *bm;
+ bm = ERTS_MAGIC_BIN_DATA(ctx->pat_bin);
+ dump_bm_data(bm);
+ } else {
ACTrie *act;
- int acr;
- ACFindAllState state;
- Eterm ret,tpl;
- Eterm *hp;
- Uint reds = get_reds(p, AC_LOOP_FACTOR);
- Uint save_reds = reds;
-
- act = (ACTrie *) ERTS_MAGIC_BIN_DATA(bin);
-#ifdef HARDDEBUG
+ act = ERTS_MAGIC_BIN_DATA(ctx->pat_bin);
dump_ac_trie(act);
+ }
+}
#endif
- if (state_term == NIL) {
- ac_init_find_all(&state, act, hsstart, hsend);
- } else {
- Eterm *ptr = big_val(state_term);
- ac_restore_find_all(&state,(char *) (ptr+2));
+
+static Eterm do_match_not_found_result(Process *p, Eterm subject, BinaryFindContext **ctxp);
+static Eterm do_match_single_result(Process *p, Eterm subject, BinaryFindContext **ctxp);
+static Eterm do_match_global_result(Process *p, Eterm subject, BinaryFindContext **ctxp);
+static Eterm do_split_not_found_result(Process *p, Eterm subject, BinaryFindContext **ctxp);
+static Eterm do_split_single_result(Process *p, Eterm subject, BinaryFindContext **ctxp);
+static Eterm do_split_global_result(Process *p, Eterm subject, BinaryFindContext **ctxp);
+
+static BFReturn maybe_binary_match_compile(BinaryFindContext *ctx, Eterm arg, Binary **pat_bin)
+{
+ Eterm *tp;
+ ctx->pat_term = THE_NON_VALUE;
+ if (is_tuple(arg)) {
+ tp = tuple_val(arg);
+ if (arityval(*tp) != 2 || is_not_atom(tp[1])) {
+ return BF_BADARG;
}
- acr = ac_find_all_non_overlapping(&state, bytes, &reds);
- if (acr == AC_NOT_FOUND) {
- ret = NIL;
- } else if (acr == AC_RESTART) {
- int x =
- (SIZEOF_AC_SERIALIZED_FIND_ALL_STATE(state) / sizeof(Eterm)) +
- !!(SIZEOF_AC_SERIALIZED_FIND_ALL_STATE(state) % sizeof(Eterm));
-#ifdef HARDDEBUG
- erts_printf("Trap ac!\n");
-#endif
- hp = HAlloc(p,x+2);
- hp[0] = make_pos_bignum_header(x+1);
- hp[1] = type;
- ac_serialize_find_all(&state, (char *) (hp+2));
- *res_term = make_big(hp);
- erts_free_aligned_binary_bytes(temp_alloc);
- ac_clean_find_all(&state);
- return DO_BIN_MATCH_RESTART;
- } else {
- FindallData *fad = state.out;
- int i;
- for (i = 0; i < state.m; ++i) {
- fad[i].epos = erts_make_integer(fad[i].pos,p);
- fad[i].elen = erts_make_integer(fad[i].len,p);
- }
- hp = HAlloc(p,state.m * (3 + 2));
- ret = NIL;
- for (i = state.m - 1; i >= 0; --i) {
- tpl = TUPLE2(hp, fad[i].epos, fad[i].elen);
- hp +=3;
- ret = CONS(hp,tpl,ret);
- hp += 2;
- }
+ if (((tp[1] != am_bm) && (tp[1] != am_ac)) ||
+ !is_internal_magic_ref(tp[2])) {
+ return BF_BADARG;
}
- erts_free_aligned_binary_bytes(temp_alloc);
- ac_clean_find_all(&state);
- BUMP_REDS(p, (save_reds - reds) / AC_LOOP_FACTOR);
- *res_term = ret;
- return DO_BIN_MATCH_OK;
+ *pat_bin = erts_magic_ref2bin(tp[2]);
+ if ((tp[1] == am_bm &&
+ ERTS_MAGIC_BIN_DESTRUCTOR(*pat_bin) != cleanup_my_data_bm) ||
+ (tp[1] == am_ac &&
+ ERTS_MAGIC_BIN_DESTRUCTOR(*pat_bin) != cleanup_my_data_ac)) {
+ *pat_bin = NULL;
+ return BF_BADARG;
+ }
+ ctx->pat_type = tp[1];
+ ctx->pat_term = tp[2];
+ } else if (do_binary_match_compile(arg, &(ctx->pat_type), pat_bin) != 0) {
+ return BF_BADARG;
}
- badarg:
- return DO_BIN_MATCH_BADARG;
+ return BF_OK;
}
static int parse_match_opts_list(Eterm l, Eterm bin, Uint *posp, Uint *endp)
@@ -1293,7 +1207,7 @@ static int parse_match_opts_list(Eterm l, Eterm bin, Uint *posp, Uint *endp)
Eterm *tp;
Uint pos;
Sint len;
- if (l == ((Eterm) 0) || l == NIL) {
+ if (l == THE_NON_VALUE || l == NIL) {
/* Invalid term or NIL, we're called from binary_match(es)_2 or
have no options*/
*posp = 0;
@@ -1324,9 +1238,9 @@ static int parse_match_opts_list(Eterm l, Eterm bin, Uint *posp, Uint *endp)
goto badarg;
}
if (len < 0) {
- Sint lentmp = -len;
+ Uint lentmp = -(Uint)len;
/* overflow */
- if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ if ((Sint)lentmp < 0) {
goto badarg;
}
len = lentmp;
@@ -1351,187 +1265,605 @@ static int parse_match_opts_list(Eterm l, Eterm bin, Uint *posp, Uint *endp)
}
}
-static BIF_RETTYPE binary_match_trap(BIF_ALIST_3)
+static int parse_split_opts_list(Eterm l, Eterm bin, Uint *posp, Uint *endp, Uint *optp)
{
- int runres;
- Eterm result;
- Binary *bin = ((ProcBin *) binary_val(BIF_ARG_3))->val;
- runres = do_binary_match(BIF_P,BIF_ARG_1,0,0,NIL,bin,BIF_ARG_2,&result);
- if (runres == DO_BIN_MATCH_OK) {
- BIF_RET(result);
+ Eterm *tp;
+ Uint pos;
+ Sint len;
+ *optp = 0;
+ *posp = 0;
+ *endp = binary_size(bin);
+ if (l == THE_NON_VALUE || l == NIL) {
+ return 0;
+ } else if (is_list(l)) {
+ while(is_list(l)) {
+ Eterm t = CAR(list_val(l));
+ Uint orig_size;
+ if (is_atom(t)) {
+ if (t == am_global) {
+ *optp |= BF_FLAG_GLOBAL;
+ l = CDR(list_val(l));
+ continue;
+ }
+ if (t == am_trim) {
+ *optp |= BF_FLAG_SPLIT_TRIM;
+ l = CDR(list_val(l));
+ continue;
+ }
+ if (t == am_trim_all) {
+ *optp |= BF_FLAG_SPLIT_TRIM_ALL;
+ l = CDR(list_val(l));
+ continue;
+ }
+ }
+ if (!is_tuple(t)) {
+ goto badarg;
+ }
+ tp = tuple_val(t);
+ if (arityval(*tp) != 2) {
+ goto badarg;
+ }
+ if (tp[1] != am_scope || is_not_tuple(tp[2])) {
+ goto badarg;
+ }
+ tp = tuple_val(tp[2]);
+ if (arityval(*tp) != 2) {
+ goto badarg;
+ }
+ if (!term_to_Uint(tp[1], &pos)) {
+ goto badarg;
+ }
+ if (!term_to_Sint(tp[2], &len)) {
+ goto badarg;
+ }
+ if (len < 0) {
+ Uint lentmp = -(Uint)len;
+ /* overflow */
+ if ((Sint)lentmp < 0) {
+ goto badarg;
+ }
+ len = lentmp;
+ pos -= len;
+ }
+ /* overflow */
+ if ((pos + len) < pos || (len > 0 && (pos + len) == pos)) {
+ goto badarg;
+ }
+ *endp = len + pos;
+ *posp = pos;
+ if ((orig_size = binary_size(bin)) < pos ||
+ orig_size < (*endp)) {
+ goto badarg;
+ }
+ l = CDR(list_val(l));
+ }
+ return 0;
} else {
- BUMP_ALL_REDS(BIF_P);
- BIF_TRAP3(&binary_match_trap_export, BIF_P, BIF_ARG_1, result,
- BIF_ARG_3);
+ badarg:
+ return 1;
}
}
-static BIF_RETTYPE binary_matches_trap(BIF_ALIST_3)
+static BFReturn do_binary_find(Process *p, Eterm subject, BinaryFindContext **ctxp,
+ Binary *pat_bin, Binary *ctx_bin, Eterm *res_term)
{
- int runres;
- Eterm result;
- Binary *bin = ((ProcBin *) binary_val(BIF_ARG_3))->val;
- runres = do_binary_matches(BIF_P,BIF_ARG_1,0,0,NIL,bin,BIF_ARG_2,&result);
- if (runres == DO_BIN_MATCH_OK) {
- BIF_RET(result);
+ BinaryFindContext *ctx;
+ int is_first_call;
+ Uint initial_reds;
+ BFReturn runres;
+
+ if (ctx_bin == NULL) {
+ is_first_call = 1;
+ ctx = *ctxp;
} else {
- BUMP_ALL_REDS(BIF_P);
- BIF_TRAP3(&binary_matches_trap_export, BIF_P, BIF_ARG_1, result,
- BIF_ARG_3);
+ is_first_call = 0;
+ ctx = ERTS_MAGIC_BIN_DATA(ctx_bin);
+ ctx->pat_bin = pat_bin;
+ *ctxp = ctx;
}
-}
-BIF_RETTYPE binary_match_3(BIF_ALIST_3)
-{
- return binary_match(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ initial_reds = ctx->reds = get_reds(p, ctx->loop_factor);
+
+ switch (ctx->state) {
+ case BFSearch: {
+ byte *bytes;
+ Uint bitoffs, bitsize;
+ byte *temp_alloc = NULL;
+
+ ERTS_GET_BINARY_BYTES(subject, bytes, bitoffs, bitsize);
+ if (bitsize != 0) {
+ goto badarg;
+ }
+ if (bitoffs != 0) {
+ bytes = erts_get_aligned_binary_bytes(subject, &temp_alloc);
+ }
+#ifdef HARDDEBUG
+ bf_context_dump(ctx);
+#endif
+ runres = ctx->search->find(ctx, bytes);
+ if (runres == BF_NOT_FOUND) {
+ *res_term = ctx->not_found(p, subject, &ctx);
+ *ctxp = ctx;
+ } else if (runres == BF_RESTART) {
+#ifdef HARDDEBUG
+ if (ctx->pat_type == am_ac) {
+ erts_printf("Trap ac!\n");
+ } else {
+ erts_printf("Trap bm!\n");
+ }
+#endif
+ if (is_first_call) {
+ ctx = bf_context_export(p, ctx);
+ *ctxp = ctx;
+ erts_set_gc_state(p, 0);
+ }
+ erts_free_aligned_binary_bytes(temp_alloc);
+ *res_term = THE_NON_VALUE;
+ BUMP_ALL_REDS(p);
+ return BF_RESTART;
+ } else {
+ *res_term = ctx->found(p, subject, &ctx);
+ *ctxp = ctx;
+ }
+ erts_free_aligned_binary_bytes(temp_alloc);
+ if (*res_term == THE_NON_VALUE) {
+ if (is_first_call) {
+ erts_set_gc_state(p, 0);
+ }
+ BUMP_ALL_REDS(p);
+ return BF_RESTART;
+ }
+ if (ctx->search->done != NULL) {
+ ctx->search->done(ctx);
+ }
+ ctx->state = BFDone;
+ if (!is_first_call) {
+ erts_set_gc_state(p, 1);
+ }
+ BUMP_REDS(p, (initial_reds - ctx->reds) / ctx->loop_factor);
+ return BF_OK;
+ }
+ case BFResult: {
+ *res_term = ctx->found(p, subject, &ctx);
+ *ctxp = ctx;
+ if (*res_term == THE_NON_VALUE) {
+ if (is_first_call) {
+ erts_set_gc_state(p, 0);
+ }
+ BUMP_ALL_REDS(p);
+ return BF_RESTART;
+ }
+ if (ctx->search->done != NULL) {
+ ctx->search->done(ctx);
+ }
+ ctx->state = BFDone;
+ if (!is_first_call) {
+ erts_set_gc_state(p, 1);
+ }
+ BUMP_REDS(p, (initial_reds - ctx->reds) / ctx->loop_factor);
+ return BF_OK;
+ }
+ default:
+ ASSERT(!"Unknown state in do_binary_find");
+ }
+
+badarg:
+ if (!is_first_call) {
+ if (ctx->search->done != NULL) {
+ ctx->search->done(ctx);
+ }
+ ctx->state = BFDone;
+ erts_set_gc_state(p, 1);
+ }
+ return BF_BADARG;
}
static BIF_RETTYPE
-binary_match(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
+binary_match(Process *p, Eterm arg1, Eterm arg2, Eterm arg3, Uint flags)
{
- Uint hsstart;
- Uint hsend;
- Eterm *tp;
- Eterm type;
- Binary *bin;
- Eterm bin_term = NIL;
+ BinaryFindContext c_buff;
+ BinaryFindContext *ctx = &c_buff;
+ Binary *pat_bin;
int runres;
Eterm result;
- if (is_not_binary(arg1)) {
+ if (is_not_binary(arg1) || binary_bitsize(arg1) != 0) {
goto badarg;
}
- if (parse_match_opts_list(arg3,arg1,&hsstart,&hsend)) {
+ ctx->flags = flags;
+ if (parse_match_opts_list(arg3, arg1, &(ctx->hsstart), &(ctx->hsend))) {
goto badarg;
}
- if (hsend == 0) {
- BIF_RET(am_nomatch);
+ if (ctx->hsend == 0) {
+ result = do_match_not_found_result(p, arg1, &ctx);
+ BIF_RET(result);
}
- if (is_tuple(arg2)) {
- tp = tuple_val(arg2);
- if (arityval(*tp) != 2 || is_not_atom(tp[1])) {
- goto badarg;
- }
- if (((tp[1] != am_bm) && (tp[1] != am_ac)) ||
- !ERTS_TERM_IS_MAGIC_BINARY(tp[2])) {
- goto badarg;
- }
- type = tp[1];
- bin = ((ProcBin *) binary_val(tp[2]))->val;
- if (type == am_bm &&
- ERTS_MAGIC_BIN_DESTRUCTOR(bin) != cleanup_my_data_bm) {
- goto badarg;
- }
- if (type == am_ac &&
- ERTS_MAGIC_BIN_DESTRUCTOR(bin) != cleanup_my_data_ac) {
- goto badarg;
- }
- bin_term = tp[2];
- } else if (do_binary_match_compile(arg2,&type,&bin)) {
+ if (maybe_binary_match_compile(ctx, arg2, &pat_bin) != BF_OK) {
goto badarg;
}
- runres = do_binary_match(p,arg1,hsstart,hsend,type,bin,NIL,&result);
- if (runres == DO_BIN_MATCH_RESTART && bin_term == NIL) {
- Eterm *hp = HAlloc(p, PROC_BIN_SIZE);
- bin_term = erts_mk_magic_binary_term(&hp, &MSO(p), bin);
- } else if (bin_term == NIL) {
- erts_bin_free(bin);
+ bf_context_init(ctx, do_match_not_found_result, do_match_single_result,
+ do_match_global_result, pat_bin);
+ runres = do_binary_find(p, arg1, &ctx, pat_bin, NULL, &result);
+ if (runres == BF_OK && ctx->pat_term == THE_NON_VALUE) {
+ erts_bin_free(pat_bin);
}
switch (runres) {
- case DO_BIN_MATCH_OK:
+ case BF_OK:
BIF_RET(result);
- case DO_BIN_MATCH_RESTART:
- BUMP_ALL_REDS(p);
- BIF_TRAP3(&binary_match_trap_export, p, arg1, result, bin_term);
+ case BF_RESTART:
+ ASSERT(result == THE_NON_VALUE && ctx->trap_term != result && ctx->pat_term != result);
+ BIF_TRAP3(&binary_find_trap_export, p, arg1, ctx->trap_term, ctx->pat_term);
default:
goto badarg;
}
- badarg:
- BIF_ERROR(p,BADARG);
+badarg:
+ BIF_ERROR(p, BADARG);
+}
+
+BIF_RETTYPE binary_match_2(BIF_ALIST_2)
+{
+ return binary_match(BIF_P, BIF_ARG_1, BIF_ARG_2, THE_NON_VALUE, 0);
+}
+
+BIF_RETTYPE binary_match_3(BIF_ALIST_3)
+{
+ return binary_match(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, 0);
+}
+
+BIF_RETTYPE binary_matches_2(BIF_ALIST_2)
+{
+ return binary_match(BIF_P, BIF_ARG_1, BIF_ARG_2, THE_NON_VALUE, BF_FLAG_GLOBAL);
}
BIF_RETTYPE binary_matches_3(BIF_ALIST_3)
{
- return binary_matches(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ return binary_match(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, BF_FLAG_GLOBAL);
}
static BIF_RETTYPE
-binary_matches(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
+binary_split(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
{
- Uint hsstart, hsend;
- Eterm *tp;
- Eterm type;
- Binary *bin;
- Eterm bin_term = NIL;
+ BinaryFindContext c_buff;
+ BinaryFindContext *ctx = &c_buff;
+ Binary *pat_bin;
int runres;
Eterm result;
- if (is_not_binary(arg1)) {
+ if (is_not_binary(arg1) || binary_bitsize(arg1) != 0) {
goto badarg;
}
- if (parse_match_opts_list(arg3,arg1,&hsstart,&hsend)) {
+ if (parse_split_opts_list(arg3, arg1, &(ctx->hsstart), &(ctx->hsend), &(ctx->flags))) {
goto badarg;
}
- if (hsend == 0) {
- BIF_RET(NIL);
+ if (ctx->hsend == 0) {
+ result = do_split_not_found_result(p, arg1, &ctx);
+ BIF_RET(result);
}
- if (is_tuple(arg2)) {
- tp = tuple_val(arg2);
- if (arityval(*tp) != 2 || is_not_atom(tp[1])) {
- goto badarg;
- }
- if (((tp[1] != am_bm) && (tp[1] != am_ac)) ||
- !ERTS_TERM_IS_MAGIC_BINARY(tp[2])) {
- goto badarg;
- }
- type = tp[1];
- bin = ((ProcBin *) binary_val(tp[2]))->val;
- if (type == am_bm &&
- ERTS_MAGIC_BIN_DESTRUCTOR(bin) != cleanup_my_data_bm) {
- goto badarg;
- }
- if (type == am_ac &&
- ERTS_MAGIC_BIN_DESTRUCTOR(bin) != cleanup_my_data_ac) {
- goto badarg;
- }
- bin_term = tp[2];
- } else if (do_binary_match_compile(arg2,&type,&bin)) {
+ if (maybe_binary_match_compile(ctx, arg2, &pat_bin) != BF_OK) {
goto badarg;
}
- runres = do_binary_matches(p,arg1,hsstart,hsend,type,bin,
- NIL,&result);
- if (runres == DO_BIN_MATCH_RESTART && bin_term == NIL) {
- Eterm *hp = HAlloc(p, PROC_BIN_SIZE);
- bin_term = erts_mk_magic_binary_term(&hp, &MSO(p), bin);
- } else if (bin_term == NIL) {
- erts_bin_free(bin);
+ bf_context_init(ctx, do_split_not_found_result, do_split_single_result,
+ do_split_global_result, pat_bin);
+ runres = do_binary_find(p, arg1, &ctx, pat_bin, NULL, &result);
+ if (runres == BF_OK && ctx->pat_term == THE_NON_VALUE) {
+ erts_bin_free(pat_bin);
}
switch (runres) {
- case DO_BIN_MATCH_OK:
+ case BF_OK:
BIF_RET(result);
- case DO_BIN_MATCH_RESTART:
- BUMP_ALL_REDS(p);
- BIF_TRAP3(&binary_matches_trap_export, p, arg1, result,
- bin_term);
+ case BF_RESTART:
+ ASSERT(result == THE_NON_VALUE && ctx->trap_term != result && ctx->pat_term != result);
+ BIF_TRAP3(&binary_find_trap_export, p, arg1, ctx->trap_term, ctx->pat_term);
default:
goto badarg;
}
- badarg:
- BIF_ERROR(p,BADARG);
+badarg:
+ BIF_ERROR(p, BADARG);
}
+BIF_RETTYPE binary_split_2(BIF_ALIST_2)
+{
+ return binary_split(BIF_P, BIF_ARG_1, BIF_ARG_2, THE_NON_VALUE);
+}
-BIF_RETTYPE binary_match_2(BIF_ALIST_2)
+BIF_RETTYPE binary_split_3(BIF_ALIST_3)
{
- return binary_match(BIF_P,BIF_ARG_1,BIF_ARG_2,((Eterm) 0));
+ return binary_split(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
}
+static Eterm do_match_not_found_result(Process *p, Eterm subject, BinaryFindContext **ctxp)
+{
+ if ((*ctxp)->flags & BF_FLAG_GLOBAL) {
+ return NIL;
+ } else {
+ return am_nomatch;
+ }
+}
-BIF_RETTYPE binary_matches_2(BIF_ALIST_2)
+static Eterm do_match_single_result(Process *p, Eterm subject, BinaryFindContext **ctxp)
+{
+ BinaryFindContext *ctx = (*ctxp);
+ BinaryFindFirstContext *ff = &(ctx->u.ff);
+ Eterm erlen;
+ Eterm *hp;
+ Eterm ret;
+
+ erlen = erts_make_integer((Uint)(ff->len), p);
+ ret = erts_make_integer(ff->pos, p);
+ hp = HAlloc(p, 3);
+ ret = TUPLE2(hp, ret, erlen);
+
+ return ret;
+}
+
+static Eterm do_match_global_result(Process *p, Eterm subject, BinaryFindContext **ctxp)
+{
+ BinaryFindContext *ctx = (*ctxp);
+ BinaryFindAllContext *fa = &(ctx->u.fa);
+ FindallData *fad;
+ Eterm tpl;
+ Sint i;
+ register Uint reds = ctx->reds;
+
+ if (ctx->state == BFSearch) {
+ if (ctx->pat_type == am_ac) {
+ fa->data = fa->d.ac.out;
+ fa->size = fa->d.ac.m;
+ } else {
+ fa->data = fa->d.bm.out;
+ fa->size = fa->d.bm.m;
+ }
+ fa->tail = fa->size - 1;
+ fa->head = 0;
+ fa->end_pos = 0;
+ fa->term = NIL;
+ if (ctx->exported == 0 && ((fa->size * 2) >= reds)) {
+ ctx = bf_context_export(p, ctx);
+ *ctxp = ctx;
+ fa = &(ctx->u.fa);
+ }
+ erts_factory_proc_prealloc_init(&(fa->factory), p, fa->size * (3 + 2));
+ ctx->state = BFResult;
+ }
+
+ fad = fa->data;
+
+ if (fa->end_pos == 0) {
+ for (i = fa->head; i < fa->size; ++i) {
+ if (--reds == 0) {
+ ASSERT(ctx->exported == 1);
+ fa->head = i;
+ ctx->reds = reds;
+ return THE_NON_VALUE;
+ }
+ fad[i].epos = erts_make_integer(fad[i].pos, p);
+ fad[i].elen = erts_make_integer(fad[i].len, p);
+ }
+ fa->end_pos = 1;
+ fa->head = fa->tail;
+ }
+
+ for (i = fa->head; i >= 0; --i) {
+ if (--reds == 0) {
+ ASSERT(ctx->exported == 1);
+ fa->head = i;
+ ctx->reds = reds;
+ return THE_NON_VALUE;
+ }
+ tpl = TUPLE2(fa->factory.hp, fad[i].epos, fad[i].elen);
+ fa->factory.hp += 3;
+ fa->term = CONS(fa->factory.hp, tpl, fa->term);
+ fa->factory.hp += 2;
+ }
+ ctx->reds = reds;
+ erts_factory_close(&(fa->factory));
+
+ return fa->term;
+}
+
+static Eterm do_split_not_found_result(Process *p, Eterm subject, BinaryFindContext **ctxp)
+{
+ BinaryFindContext *ctx = (*ctxp);
+ Eterm *hp;
+ Eterm ret;
+
+ if (ctx->flags & (BF_FLAG_SPLIT_TRIM | BF_FLAG_SPLIT_TRIM_ALL)
+ && binary_size(subject) == 0) {
+ return NIL;
+ }
+ hp = HAlloc(p, 2);
+ ret = CONS(hp, subject, NIL);
+ return ret;
+}
+
+static Eterm do_split_single_result(Process *p, Eterm subject, BinaryFindContext **ctxp)
{
- return binary_matches(BIF_P,BIF_ARG_1,BIF_ARG_2,((Eterm) 0));
+ BinaryFindContext *ctx = (*ctxp);
+ BinaryFindFirstContext *ff = &(ctx->u.ff);
+ Sint pos;
+ Sint len;
+ size_t orig_size;
+ Eterm orig;
+ Uint offset;
+ Uint bit_offset;
+ Uint bit_size;
+ ErlSubBin *sb1;
+ ErlSubBin *sb2;
+ Eterm *hp;
+ Eterm ret;
+
+ pos = ff->pos;
+ len = ff->len;
+
+ orig_size = binary_size(subject);
+
+ if ((ctx->flags & (BF_FLAG_SPLIT_TRIM | BF_FLAG_SPLIT_TRIM_ALL)) &&
+ (orig_size - pos - len) == 0) {
+ if (pos == 0) {
+ ret = NIL;
+ } else {
+ hp = HAlloc(p, (ERL_SUB_BIN_SIZE + 2));
+ ERTS_GET_REAL_BIN(subject, orig, offset, bit_offset, bit_size);
+ sb1 = (ErlSubBin *) hp;
+ sb1->thing_word = HEADER_SUB_BIN;
+ sb1->size = pos;
+ sb1->offs = offset;
+ sb1->orig = orig;
+ sb1->bitoffs = bit_offset;
+ sb1->bitsize = bit_size;
+ sb1->is_writable = 0;
+ hp += ERL_SUB_BIN_SIZE;
+
+ ret = CONS(hp, make_binary(sb1), NIL);
+ hp += 2;
+ }
+ } else {
+ if ((ctx->flags & BF_FLAG_SPLIT_TRIM_ALL) && (pos == 0)) {
+ hp = HAlloc(p, 1 * (ERL_SUB_BIN_SIZE + 2));
+ ERTS_GET_REAL_BIN(subject, orig, offset, bit_offset, bit_size);
+ sb1 = NULL;
+ } else {
+ hp = HAlloc(p, 2 * (ERL_SUB_BIN_SIZE + 2));
+ ERTS_GET_REAL_BIN(subject, orig, offset, bit_offset, bit_size);
+ sb1 = (ErlSubBin *) hp;
+ sb1->thing_word = HEADER_SUB_BIN;
+ sb1->size = pos;
+ sb1->offs = offset;
+ sb1->orig = orig;
+ sb1->bitoffs = bit_offset;
+ sb1->bitsize = 0;
+ sb1->is_writable = 0;
+ hp += ERL_SUB_BIN_SIZE;
+ }
+
+ sb2 = (ErlSubBin *) hp;
+ sb2->thing_word = HEADER_SUB_BIN;
+ sb2->size = orig_size - pos - len;
+ sb2->offs = offset + pos + len;
+ sb2->orig = orig;
+ sb2->bitoffs = bit_offset;
+ sb2->bitsize = bit_size;
+ sb2->is_writable = 0;
+ hp += ERL_SUB_BIN_SIZE;
+
+ ret = CONS(hp, make_binary(sb2), NIL);
+ hp += 2;
+ if (sb1 != NULL) {
+ ret = CONS(hp, make_binary(sb1), ret);
+ hp += 2;
+ }
+ }
+ return ret;
+}
+
+static Eterm do_split_global_result(Process *p, Eterm subject, BinaryFindContext **ctxp)
+{
+ BinaryFindContext *ctx = (*ctxp);
+ BinaryFindAllContext *fa = &(ctx->u.fa);
+ FindallData *fad;
+ Eterm orig;
+ size_t orig_size;
+ Uint offset;
+ Uint bit_offset;
+ Uint bit_size;
+ ErlSubBin *sb;
+ Uint do_trim;
+ Sint i;
+ register Uint reds = ctx->reds;
+
+ if (ctx->state == BFSearch) {
+ if (ctx->pat_type == am_ac) {
+ fa->data = fa->d.ac.out;
+ fa->size = fa->d.ac.m;
+ } else {
+ fa->data = fa->d.bm.out;
+ fa->size = fa->d.bm.m;
+ }
+ fa->tail = fa->size - 1;
+ fa->head = fa->tail;
+ orig_size = binary_size(subject);
+ fa->end_pos = (Uint)(orig_size);
+ fa->term = NIL;
+ if (ctx->exported == 0 && ((fa->head + 1) >= reds)) {
+ ctx = bf_context_export(p, ctx);
+ *ctxp = ctx;
+ fa = &(ctx->u.fa);
+ }
+ erts_factory_proc_prealloc_init(&(fa->factory), p, (fa->size + 1) * (ERL_SUB_BIN_SIZE + 2));
+ ctx->state = BFResult;
+ }
+
+ ERTS_GET_REAL_BIN(subject, orig, offset, bit_offset, bit_size);
+ ASSERT(bit_size == 0);
+ fad = fa->data;
+ do_trim = ctx->flags & (BF_FLAG_SPLIT_TRIM | BF_FLAG_SPLIT_TRIM_ALL);
+
+ for (i = fa->head; i >= 0; --i) {
+ if (--reds == 0) {
+ ASSERT(ctx->exported == 1);
+ fa->head = i;
+ ctx->reds = reds;
+ if (!do_trim && (ctx->flags & BF_FLAG_SPLIT_TRIM)) {
+ ctx->flags &= ~BF_FLAG_SPLIT_TRIM;
+ }
+ return THE_NON_VALUE;
+ }
+ sb = (ErlSubBin *)(fa->factory.hp);
+ sb->size = fa->end_pos - (fad[i].pos + fad[i].len);
+ if (!(sb->size == 0 && do_trim)) {
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->offs = offset + fad[i].pos + fad[i].len;
+ sb->orig = orig;
+ sb->bitoffs = bit_offset;
+ sb->bitsize = 0;
+ sb->is_writable = 0;
+ fa->factory.hp += ERL_SUB_BIN_SIZE;
+ fa->term = CONS(fa->factory.hp, make_binary(sb), fa->term);
+ fa->factory.hp += 2;
+ do_trim &= ~BF_FLAG_SPLIT_TRIM;
+ }
+ fa->end_pos = fad[i].pos;
+ }
+
+ fa->head = i;
+ ctx->reds = reds;
+
+ sb = (ErlSubBin *)(fa->factory.hp);
+ sb->size = fad[0].pos;
+ if (!(sb->size == 0 && do_trim)) {
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->offs = offset;
+ sb->orig = orig;
+ sb->bitoffs = bit_offset;
+ sb->bitsize = 0;
+ sb->is_writable = 0;
+ fa->factory.hp += ERL_SUB_BIN_SIZE;
+ fa->term = CONS(fa->factory.hp, make_binary(sb), fa->term);
+ fa->factory.hp += 2;
+ }
+ erts_factory_close(&(fa->factory));
+
+ return fa->term;
}
+static BIF_RETTYPE binary_find_trap(BIF_ALIST_3)
+{
+ int runres;
+ Eterm result;
+ Binary *ctx_bin = erts_magic_ref2bin(BIF_ARG_2);
+ Binary *pat_bin = erts_magic_ref2bin(BIF_ARG_3);
+ BinaryFindContext *ctx = NULL;
+
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(ctx_bin) == bf_context_destructor);
+ runres = do_binary_find(BIF_P, BIF_ARG_1, &ctx, pat_bin, ctx_bin, &result);
+ if (runres == BF_OK) {
+ ASSERT(result != THE_NON_VALUE);
+ BIF_RET(result);
+ } else {
+ ASSERT(result == THE_NON_VALUE && ctx->trap_term != result && ctx->pat_term != result);
+ BIF_TRAP3(&binary_find_trap_export, BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ }
+}
BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen)
{
@@ -1555,9 +1887,9 @@ BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen)
goto badarg;
}
if (len < 0) {
- Sint lentmp = -len;
+ Uint lentmp = -(Uint)len;
/* overflow */
- if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ if ((Sint)lentmp < 0) {
goto badarg;
}
len = lentmp;
@@ -1644,9 +1976,9 @@ BIF_RETTYPE erts_gc_binary_part(Process *p, Eterm *reg, Eterm live, int range_is
goto badarg;
}
if (len < 0) {
- Sint lentmp = -len;
+ Uint lentmp = -(Uint)len;
/* overflow */
- if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ if ((Sint)lentmp < 0) {
goto badarg;
}
len = lentmp;
@@ -1822,7 +2154,7 @@ static int do_search_backward(CommonData *cd, Uint *posp, Uint *redsp)
}
}
-static void cleanup_common_data(Binary *bp)
+static int cleanup_common_data(Binary *bp)
{
int i;
CommonData *cd;
@@ -1839,7 +2171,7 @@ static void cleanup_common_data(Binary *bp)
break;
}
}
- return;
+ return 1;
}
static BIF_RETTYPE do_longest_common(Process *p, Eterm list, int direction)
@@ -1942,8 +2274,8 @@ static BIF_RETTYPE do_longest_common(Process *p, Eterm list, int direction)
cd[i].type = CL_TYPE_HEAP;
}
}
- hp = HAlloc(p, PROC_BIN_SIZE);
- bin_term = erts_mk_magic_binary_term(&hp, &MSO(p), mb);
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ bin_term = erts_mk_magic_ref(&hp, &MSO(p), mb);
BUMP_ALL_REDS(p);
BIF_TRAP3(trapper, p, bin_term, epos,list);
}
@@ -1970,8 +2302,7 @@ static BIF_RETTYPE do_longest_common_trap(Process *p, Eterm bin_term, Eterm curr
#else
term_to_Uint(current_pos, &pos);
#endif
- ASSERT(ERTS_TERM_IS_MAGIC_BINARY(bin_term));
- bin = ((ProcBin *) binary_val(bin_term))->val;
+ bin = erts_magic_ref2bin(bin_term);
cd = (CommonData *) ERTS_MAGIC_BIN_DATA(bin);
if (direction == DIRECTION_PREFIX) {
trapper = &binary_longest_prefix_trap_export;
@@ -2213,9 +2544,9 @@ static BIF_RETTYPE binary_bin_to_list_common(Process *p,
goto badarg;
}
if (len < 0) {
- Sint lentmp = -len;
+ Uint lentmp = -(Uint)len;
/* overflow */
- if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ if ((Sint)lentmp < 0) {
goto badarg;
}
len = lentmp;
@@ -2294,18 +2625,11 @@ BIF_RETTYPE binary_bin_to_list_1(BIF_ALIST_1)
BIF_ERROR(BIF_P,BADARG);
}
-/*
- * Ok, erlang:list_to_binary does not interrupt, and we really don't want
- * an alternative implementation for the exact same thing, why we
- * have descided to use the old non-restarting implementation for now.
- * In reality, there are seldom many iterations involved in doing this, so the
- * problem of long-running bifs is not really that big in this case.
- * So, for now we use the old implementation also in the module binary.
- */
+HIPE_WRAPPER_BIF_DISABLE_GC(binary_list_to_bin, 1)
BIF_RETTYPE binary_list_to_bin_1(BIF_ALIST_1)
{
- return erts_list_to_binary_bif(BIF_P, BIF_ARG_1);
+ return erts_list_to_binary_bif(BIF_P, BIF_ARG_1, bif_export[BIF_binary_list_to_bin_1]);
}
typedef struct {
@@ -2326,7 +2650,7 @@ typedef struct {
#define BINARY_COPY_LOOP_FACTOR 100
-static void cleanup_copy_bin_state(Binary *bp)
+static int cleanup_copy_bin_state(Binary *bp)
{
CopyBinState *cbs = (CopyBinState *) ERTS_MAGIC_BIN_DATA(bp);
if (cbs->result != NULL) {
@@ -2346,6 +2670,7 @@ static void cleanup_copy_bin_state(Binary *bp)
break;
}
cbs->source_type = BC_TYPE_EMPTY;
+ return 1;
}
/*
@@ -2431,9 +2756,6 @@ static BIF_RETTYPE do_binary_copy(Process *p, Eterm bin, Eterm en)
}
cbs->result = erts_bin_nrml_alloc(target_size); /* Always offheap
if trapping */
- cbs->result->flags = 0;
- cbs->result->orig_size = target_size;
- erts_refc_init(&(cbs->result->refc), 1);
t = (byte *) cbs->result->orig_bytes; /* No offset or anything */
pos = 0;
i = 0;
@@ -2445,8 +2767,8 @@ static BIF_RETTYPE do_binary_copy(Process *p, Eterm bin, Eterm en)
cbs->source_size = size;
cbs->result_pos = pos;
cbs->times_left = n-i;
- hp = HAlloc(p,PROC_BIN_SIZE);
- trap_term = erts_mk_magic_binary_term(&hp, &MSO(p), mb);
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ trap_term = erts_mk_magic_ref(&hp, &MSO(p), mb);
BUMP_ALL_REDS(p);
BIF_TRAP2(&binary_copy_trap_export, p, bin, trap_term);
} else {
@@ -2481,7 +2803,7 @@ BIF_RETTYPE binary_copy_trap(BIF_ALIST_2)
Uint reds = get_reds(BIF_P, BINARY_COPY_LOOP_FACTOR);
byte *t;
Uint pos;
- Binary *mb = ((ProcBin *) binary_val(BIF_ARG_2))->val;
+ Binary *mb = erts_magic_ref2bin(BIF_ARG_2);
CopyBinState *cbs = (CopyBinState *) ERTS_MAGIC_BIN_DATA(mb);
Uint opos;
@@ -2555,7 +2877,6 @@ BIF_RETTYPE binary_referenced_byte_size_1(BIF_ALIST_1)
}
pb = (ProcBin *) binary_val(bin);
if (pb->thing_word == HEADER_PROC_BIN) {
- /* XXX:PaN - Halfword - orig_size is a long, we should handle that */
res = erts_make_integer((Uint) pb->val->orig_size, BIF_P);
} else { /* heap binary */
res = erts_make_integer((Uint) ((ErlHeapBin *) pb)->size, BIF_P);
@@ -2573,7 +2894,7 @@ BIF_RETTYPE binary_referenced_byte_size_1(BIF_ALIST_1)
#endif
static int get_need(Uint u) {
-#if defined(ARCH_64) && !HALFWORD_HEAP
+#if defined(ARCH_64)
if (u > 0xFFFFFFFFUL) {
if (u > 0xFFFFFFFFFFFFUL) {
if (u > 0xFFFFFFFFFFFFFFUL) {
diff --git a/erts/emulator/beam/erl_bif_chksum.c b/erts/emulator/beam/erl_bif_chksum.c
index 4302fe8f79..9417803e14 100644
--- a/erts/emulator/beam/erl_bif_chksum.c
+++ b/erts/emulator/beam/erl_bif_chksum.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c
index 56cd2ba04f..f673ef3194 100644
--- a/erts/emulator/beam/erl_bif_ddll.c
+++ b/erts/emulator/beam/erl_bif_ddll.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -45,14 +46,9 @@
#include "big.h"
#include "dist.h"
#include "erl_version.h"
+#include "erl_bif_unique.h"
#include "dtrace-wrapper.h"
-
-#ifdef ERTS_SMP
-#define DDLL_SMP 1
-#else
-#define DDLL_SMP 0
-#endif
-
+#include "lttng-wrapper.h"
/*
* Local types
@@ -104,18 +100,18 @@ static void dereference_all_processes(DE_Handle *dh);
static void restore_process_references(DE_Handle *dh);
static void ddll_no_more_references(void *vdh);
-#define lock_drv_list() erts_smp_rwmtx_rwlock(&erts_driver_list_lock)
-#define unlock_drv_list() erts_smp_rwmtx_rwunlock(&erts_driver_list_lock)
+#define lock_drv_list() erts_rwmtx_rwlock(&erts_driver_list_lock)
+#define unlock_drv_list() erts_rwmtx_rwunlock(&erts_driver_list_lock)
#define assert_drv_list_locked() \
- ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) \
- || erts_smp_lc_rwmtx_is_rlocked(&erts_driver_list_lock))
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) \
+ || erts_lc_rwmtx_is_rlocked(&erts_driver_list_lock))
#define assert_drv_list_rwlocked() \
- ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&erts_driver_list_lock))
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_driver_list_lock))
#define assert_drv_list_rlocked() \
- ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rlocked(&erts_driver_list_lock))
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&erts_driver_list_lock))
#define assert_drv_list_not_locked() \
- ERTS_SMP_LC_ASSERT(!erts_smp_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) \
- && !erts_smp_lc_rwmtx_is_rlocked(&erts_driver_list_lock))
+ ERTS_LC_ASSERT(!erts_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) \
+ && !erts_lc_rwmtx_is_rlocked(&erts_driver_list_lock))
#define FREE_PORT_FLAGS (ERTS_PORT_SFLGS_DEAD & (~ERTS_PORT_SFLG_INITIALIZING))
@@ -131,13 +127,13 @@ kill_ports_driver_unloaded(DE_Handle *dh)
if (!prt)
continue;
- ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
+ ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
state = erts_atomic32_read_nob(&prt->state);
if (state & FREE_PORT_FLAGS)
continue;
- erts_smp_port_lock(prt);
+ erts_port_lock(prt);
state = erts_atomic32_read_nob(&prt->state);
if (!(state & ERTS_PORT_SFLGS_DEAD) && prt->drv_ptr->handle == dh)
@@ -277,10 +273,8 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3)
path[path_len++] = '/';
sys_strcpy(path+path_len,name);
-#if DDLL_SMP
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
lock_drv_list();
-#endif
if ((drv = lookup_driver(name)) != NULL) {
if (drv->handle == NULL) {
/* static_driver */
@@ -401,24 +395,18 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3)
erts_ddll_reference_driver(dh);
ASSERT(dh->status == ERL_DE_RELOAD);
dh->status = ERL_DE_FORCE_RELOAD;
-#if DDLL_SMP
unlock_drv_list();
-#endif
kill_ports_driver_unloaded(dh);
/* Dereference, eventually causing driver destruction */
-#if DDLL_SMP
lock_drv_list();
-#endif
erts_ddll_dereference_driver(dh);
}
-#if DDLL_SMP
erts_ddll_reference_driver(dh);
unlock_drv_list();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
lock_drv_list();
erts_ddll_dereference_driver(dh);
-#endif
BIF_P->flags |= F_USING_DDLL;
if (monitor) {
@@ -429,18 +417,14 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3)
hp = HAlloc(BIF_P, 3);
t = TUPLE2(hp, am_ok, ok_term);
}
-#if DDLL_SMP
unlock_drv_list();
-#endif
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path);
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P));
BIF_RET(t);
soft_error:
-#if DDLL_SMP
unlock_drv_list();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
-#endif
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
if (do_build_load_error) {
soft_error_term = build_load_error(BIF_P, build_this_load_error);
}
@@ -449,11 +433,11 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3)
t = TUPLE2(hp, am_error, soft_error_term);
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path);
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P));
BIF_RET(t);
error:
assert_drv_list_not_locked();
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P));
if (path != NULL) {
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path);
}
@@ -515,7 +499,7 @@ Eterm erl_ddll_try_unload_2(BIF_ALIST_2)
Eterm l;
int kill_ports = 0;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
for(l = options; is_list(l); l = CDR(list_val(l))) {
Eterm opt = CAR(list_val(l));
@@ -548,9 +532,7 @@ Eterm erl_ddll_try_unload_2(BIF_ALIST_2)
goto error;
}
-#if DDLL_SMP
lock_drv_list();
-#endif
if ((drv = lookup_driver(name)) == NULL) {
soft_error_term = am_not_loaded;
@@ -594,7 +576,7 @@ Eterm erl_ddll_try_unload_2(BIF_ALIST_2)
dh->reload_full_path = dh->reload_driver_name = NULL;
dh->reload_flags = 0;
}
- if (erts_smp_atomic32_read_nob(&dh->port_count) > 0) {
+ if (erts_atomic32_read_nob(&dh->port_count) > 0) {
++kill_ports;
}
dh->status = ERL_DE_UNLOAD;
@@ -605,23 +587,17 @@ done:
/* Avoid closing the driver by referencing it */
erts_ddll_reference_driver(dh);
dh->status = ERL_DE_FORCE_UNLOAD;
-#if DDLL_SMP
unlock_drv_list();
-#endif
kill_ports_driver_unloaded(dh);
-#if DDLL_SMP
lock_drv_list();
-#endif
erts_ddll_dereference_driver(dh);
}
-#if DDLL_SMP
erts_ddll_reference_driver(dh);
unlock_drv_list();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
lock_drv_list();
erts_ddll_dereference_driver(dh);
-#endif
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
BIF_P->flags |= F_USING_DDLL;
if (monitor > 0) {
@@ -635,17 +611,13 @@ done:
if (kill_ports > 1) {
ERTS_BIF_CHK_EXITED(BIF_P); /* May be exited by port killing */
}
-#if DDLL_SMP
unlock_drv_list();
-#endif
BIF_RET(t);
soft_error:
-#if DDLL_SMP
unlock_drv_list();
-#endif
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
hp = HAlloc(BIF_P, 3);
t = TUPLE2(hp, am_error, soft_error_term);
BIF_RET(t);
@@ -655,7 +627,7 @@ soft_error:
if (name != NULL) {
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
}
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_ERROR(BIF_P, BADARG);
}
@@ -694,9 +666,7 @@ BIF_RETTYPE erl_ddll_loaded_drivers_0(BIF_ALIST_0)
int need = 3;
Eterm res = NIL;
erts_driver_t *drv;
-#if DDLL_SMP
lock_drv_list();
-#endif
for (drv = driver_list; drv; drv = drv->next) {
need += sys_strlen(drv->name)*2+2;
}
@@ -709,9 +679,7 @@ BIF_RETTYPE erl_ddll_loaded_drivers_0(BIF_ALIST_0)
}
res = TUPLE2(hp,am_ok,res);
/* hp += 3 */
-#if DDLL_SMP
unlock_drv_list();
-#endif
BIF_RET(res);
}
@@ -733,9 +701,7 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2)
Eterm *hp;
int i;
Uint filter;
-#if DDLL_SMP
int have_lock = 0;
-#endif
if ((name = pick_list_or_atom(name_term)) == NULL) {
goto error;
@@ -745,10 +711,8 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2)
goto error;
}
-#if DDLL_SMP
lock_drv_list();
have_lock = 1;
-#endif
if ((drv = lookup_driver(name)) == NULL) {
goto error;
}
@@ -778,7 +742,7 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2)
} else if (drv->handle->status == ERL_DE_PERMANENT) {
res = am_permanent;
} else {
- res = make_small(erts_smp_atomic32_read_nob(&drv->handle->port_count));
+ res = make_small(erts_atomic32_read_nob(&drv->handle->port_count));
}
goto done;
case am_linked_in_driver:
@@ -824,9 +788,7 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2)
hp += 2;
}
done:
-#if DDLL_SMP
unlock_drv_list();
-#endif
if (pei)
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, pei);
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
@@ -835,11 +797,9 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2)
if (name != NULL) {
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
}
-#if DDLL_SMP
if (have_lock) {
unlock_drv_list();
}
-#endif
BIF_ERROR(p,BADARG);
}
@@ -896,13 +856,9 @@ BIF_RETTYPE erl_ddll_format_error_int_1(BIF_ALIST_1)
if (errdesc_to_code(code_term,&errint) != 0) {
goto error;
}
-#if DDLL_SMP
lock_drv_list();
-#endif
errstring = erts_ddll_error(errint);
-#if DDLL_SMP
unlock_drv_list();
-#endif
break;
}
if (errstring == NULL) {
@@ -965,7 +921,7 @@ Eterm erts_ddll_monitor_driver(Process *p,
void erts_ddll_remove_monitor(Process *p, Eterm ref, ErtsProcLocks plocks)
{
erts_driver_t *drv;
- erts_smp_proc_unlock(p, plocks);
+ erts_proc_unlock(p, plocks);
lock_drv_list();
drv = driver_list;
while (drv != NULL) {
@@ -990,7 +946,7 @@ void erts_ddll_remove_monitor(Process *p, Eterm ref, ErtsProcLocks plocks)
}
done:
unlock_drv_list();
- erts_smp_proc_lock(p, plocks);
+ erts_proc_lock(p, plocks);
}
/*
@@ -999,7 +955,7 @@ void erts_ddll_remove_monitor(Process *p, Eterm ref, ErtsProcLocks plocks)
void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks)
{
erts_driver_t *drv;
- erts_smp_proc_unlock(p, plocks);
+ erts_proc_unlock(p, plocks);
lock_drv_list();
drv = driver_list;
while (drv != NULL) {
@@ -1037,18 +993,14 @@ void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks)
dh->status = ERL_DE_UNLOAD;
}
if (!left
- && erts_smp_atomic32_read_nob(&drv->handle->port_count) > 0) {
+ && erts_atomic32_read_nob(&drv->handle->port_count) > 0) {
if (kill_ports) {
DE_Handle *dh = drv->handle;
erts_ddll_reference_driver(dh);
dh->status = ERL_DE_FORCE_UNLOAD;
-#if DDLL_SMP
unlock_drv_list();
-#endif
kill_ports_driver_unloaded(dh);
-#if DDLL_SMP
lock_drv_list(); /* Needed for future list operations */
-#endif
drv = drv->next; /* before allowing destruction */
erts_ddll_dereference_driver(dh);
} else {
@@ -1062,7 +1014,7 @@ void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks)
}
}
unlock_drv_list();
- erts_smp_proc_lock(p, plocks);
+ erts_proc_lock(p, plocks);
}
void erts_ddll_lock_driver(DE_Handle *dh, char *name)
{
@@ -1090,16 +1042,16 @@ void erts_ddll_lock_driver(DE_Handle *dh, char *name)
void erts_ddll_increment_port_count(DE_Handle *dh)
{
assert_drv_list_locked();
- erts_smp_atomic32_inc_nob(&dh->port_count);
+ erts_atomic32_inc_nob(&dh->port_count);
}
void erts_ddll_decrement_port_count(DE_Handle *dh)
{
assert_drv_list_locked();
-#if DEBUG
- ASSERT(erts_smp_atomic32_dec_read_nob(&dh->port_count) >= 0);
+#ifdef DEBUG
+ ASSERT(erts_atomic32_dec_read_nob(&dh->port_count) >= 0);
#else
- erts_smp_atomic32_dec_nob(&dh->port_count);
+ erts_atomic32_dec_nob(&dh->port_count);
#endif
}
@@ -1278,10 +1230,8 @@ static Eterm notify_when_loaded(Process *p, Eterm name_term, char *name, ErtsPro
Eterm immediate_type = NIL;
erts_driver_t *drv;
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & plocks);
-#if DDLL_SMP
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & plocks);
lock_drv_list();
-#endif
if ((drv = lookup_driver(name)) == NULL) {
immediate_tag = am_unloaded;
immediate_type = am_DOWN;
@@ -1307,24 +1257,18 @@ static Eterm notify_when_loaded(Process *p, Eterm name_term, char *name, ErtsPro
case ERL_DE_FORCE_RELOAD:
break;
default:
- erl_exit(1,"Internal error, unknown state %u in dynamic driver.", drv->handle->status);
+ erts_exit(ERTS_ERROR_EXIT,"Internal error, unknown state %u in dynamic driver.", drv->handle->status);
}
p->flags |= F_USING_DDLL;
r = add_monitor(p, drv->handle, ERL_DE_PROC_AWAIT_LOAD);
-#if DDLL_SMP
unlock_drv_list();
-#endif
BIF_RET(r);
immediate:
r = erts_make_ref(p);
-#if DDLL_SMP
- erts_smp_proc_unlock(p, plocks);
-#endif
+ erts_proc_unlock(p, plocks);
notify_proc(p, r, name_term, immediate_type, immediate_tag, 0);
-#if DDLL_SMP
unlock_drv_list();
- erts_smp_proc_lock(p, plocks);
-#endif
+ erts_proc_lock(p, plocks);
BIF_RET(r);
}
@@ -1335,10 +1279,8 @@ static Eterm notify_when_unloaded(Process *p, Eterm name_term, char *name, ErtsP
Eterm immediate_type = NIL;
erts_driver_t *drv;
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & plocks);
-#if DDLL_SMP
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & plocks);
lock_drv_list();
-#endif
if ((drv = lookup_driver(name)) == NULL) {
immediate_tag = am_unloaded;
immediate_type = am_DOWN;
@@ -1352,20 +1294,14 @@ static Eterm notify_when_unloaded(Process *p, Eterm name_term, char *name, ErtsP
p->flags |= F_USING_DDLL;
r = add_monitor(p, drv->handle, flag);
-#if DDLL_SMP
unlock_drv_list();
-#endif
BIF_RET(r);
immediate:
r = erts_make_ref(p);
-#if DDLL_SMP
- erts_smp_proc_unlock(p, plocks);
-#endif
+ erts_proc_unlock(p, plocks);
notify_proc(p, r, name_term, immediate_type, immediate_tag, 0);
-#if DDLL_SMP
unlock_drv_list();
- erts_smp_proc_lock(p, plocks);
-#endif
+ erts_proc_lock(p, plocks);
BIF_RET(r);
}
@@ -1473,8 +1409,10 @@ static void add_proc_loaded_deref(DE_Handle *dh, Process *proc)
static Eterm copy_ref(Eterm ref, Eterm *hp)
{
- RefThing *ptr = ref_thing_ptr(ref);
- memcpy(hp, ptr, sizeof(RefThing));
+ ErtsORefThing *ptr;
+ ASSERT(is_internal_ordinary_ref(ref));
+ ptr = ordinary_ref_thing_ptr(ref);
+ memcpy(hp, ptr, sizeof(ErtsORefThing));
return (make_internal_ref(hp));
}
@@ -1567,8 +1505,8 @@ static int do_load_driver_entry(DE_Handle *dh, char *path, char *name)
res = ERL_DE_LOAD_ERROR_BAD_NAME;
goto error;
}
- erts_smp_atomic_init_nob(&(dh->refc), (erts_aint_t) 0);
- erts_smp_atomic32_init_nob(&dh->port_count, 0);
+ erts_atomic_init_nob(&(dh->refc), (erts_aint_t) 0);
+ erts_atomic32_init_nob(&dh->port_count, 0);
dh->full_path = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sys_strlen(path) + 1);
sys_strcpy(dh->full_path, path);
dh->flags = 0;
@@ -1617,6 +1555,7 @@ static int do_unload_driver_entry(DE_Handle *dh, Eterm *save_name)
if (q->finish) {
int fpe_was_unmasked = erts_block_fpe();
DTRACE1(driver_finish, q->name);
+ LTTNG1(driver_finish, q->name);
(*(q->finish))();
erts_unblock_fpe(fpe_was_unmasked);
}
@@ -1638,7 +1577,7 @@ static int load_driver_entry(DE_Handle **dhp, char *path, char *name)
dh->handle = NULL;
dh->procs = NULL;
- erts_smp_atomic32_init_nob(&dh->port_count, 0);
+ erts_atomic32_init_nob(&dh->port_count, 0);
erts_refc_init(&(dh->refc), (erts_aint_t) 0);
dh->status = -1;
dh->reload_full_path = NULL;
@@ -1705,38 +1644,37 @@ static void notify_proc(Process *proc, Eterm ref, Eterm driver_name, Eterm type,
Eterm mess;
Eterm r;
Eterm *hp;
- ErlHeapFragment *bp;
- ErlOffHeap *ohp;
+ ErtsMessage *mp;
ErtsProcLocks rp_locks = 0;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ErlOffHeap *ohp;
+ ERTS_CHK_NO_PROC_LOCKS;
assert_drv_list_rwlocked();
if (errcode != 0) {
int need = load_error_need(errcode);
Eterm e;
- hp = erts_alloc_message_heap(6 /* tuple */ + 3 /* Error tuple */ +
- REF_THING_SIZE + need, &bp, &ohp,
- proc, &rp_locks);
+ mp = erts_alloc_message_heap(proc, &rp_locks,
+ (6 /* tuple */ + 3 /* Error tuple */ +
+ ERTS_REF_THING_SIZE + need),
+ &hp, &ohp);
r = copy_ref(ref,hp);
- hp += REF_THING_SIZE;
+ hp += ERTS_REF_THING_SIZE;
e = build_load_error_hp(hp, errcode);
hp += need;
mess = TUPLE2(hp,tag,e);
hp += 3;
mess = TUPLE5(hp,type,r,am_driver,driver_name,mess);
} else {
- hp = erts_alloc_message_heap(6 /* tuple */ + REF_THING_SIZE, &bp, &ohp, proc, &rp_locks);
+ mp = erts_alloc_message_heap(proc, &rp_locks,
+ 6 /* tuple */ + ERTS_REF_THING_SIZE,
+ &hp, &ohp);
r = copy_ref(ref,hp);
- hp += REF_THING_SIZE;
+ hp += ERTS_REF_THING_SIZE;
mess = TUPLE5(hp,type,r,am_driver,driver_name,tag);
}
- erts_queue_message(proc, &rp_locks, bp, mess, am_undefined
-#ifdef USE_VM_PROBES
- , NIL
-#endif
- );
- erts_smp_proc_unlock(proc, rp_locks);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ erts_queue_message(proc, rp_locks, mp, mess, am_system);
+ erts_proc_unlock(proc, rp_locks);
+ ERTS_CHK_NO_PROC_LOCKS;
}
static void notify_all(DE_Handle *dh, char *name, Uint awaiting, Eterm type, Eterm tag)
@@ -1808,7 +1746,7 @@ static Eterm build_load_error(Process *p, int code)
{
int need = load_error_need(code);
Eterm *hp = NULL;
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
if (need) {
hp = HAlloc(p,need);
}
diff --git a/erts/emulator/beam/erl_bif_guard.c b/erts/emulator/beam/erl_bif_guard.c
index bbd8aa31d9..8a5c6ada6c 100644
--- a/erts/emulator/beam/erl_bif_guard.c
+++ b/erts/emulator/beam/erl_bif_guard.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -140,6 +141,39 @@ BIF_RETTYPE trunc_1(BIF_ALIST_1)
BIF_RET(res);
}
+BIF_RETTYPE floor_1(BIF_ALIST_1)
+{
+ Eterm res;
+ FloatDef f;
+
+ if (is_not_float(BIF_ARG_1)) {
+ if (is_integer(BIF_ARG_1))
+ BIF_RET(BIF_ARG_1);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ GET_DOUBLE(BIF_ARG_1, f);
+ res = double_to_integer(BIF_P, floor(f.fd));
+ BIF_RET(res);
+}
+
+BIF_RETTYPE ceil_1(BIF_ALIST_1)
+{
+ Eterm res;
+ FloatDef f;
+
+ /* check arg */
+ if (is_not_float(BIF_ARG_1)) {
+ if (is_integer(BIF_ARG_1))
+ BIF_RET(BIF_ARG_1);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ /* get the float */
+ GET_DOUBLE(BIF_ARG_1, f);
+
+ res = double_to_integer(BIF_P, ceil(f.fd));
+ BIF_RET(res);
+}
+
BIF_RETTYPE round_1(BIF_ALIST_1)
{
Eterm res;
@@ -156,7 +190,7 @@ BIF_RETTYPE round_1(BIF_ALIST_1)
GET_DOUBLE(BIF_ARG_1, f);
/* round it and return the resultant integer */
- res = double_to_integer(BIF_P, (f.fd > 0.0) ? f.fd + 0.5 : f.fd - 0.5);
+ res = double_to_integer(BIF_P, round(f.fd));
BIF_RET(res);
}
@@ -459,23 +493,25 @@ Eterm erts_gc_byte_size_1(Process* p, Eterm* reg, Uint live)
Eterm erts_gc_map_size_1(Process* p, Eterm* reg, Uint live)
{
Eterm arg = reg[live];
- if (is_map(arg)) {
- map_t *mp = (map_t*)map_val(arg);
- Uint size = map_get_size(mp);
- if (IS_USMALL(0, size)) {
- return make_small(size);
- } else {
- Eterm* hp;
- if (ERTS_NEED_GC(p, BIG_UINT_HEAP_SIZE)) {
- erts_garbage_collect(p, BIG_UINT_HEAP_SIZE, reg, live);
- }
- hp = p->htop;
- p->htop += BIG_UINT_HEAP_SIZE;
- return uint_to_big(size, hp);
- }
- } else {
- BIF_ERROR(p, BADARG);
- }
+ if (is_flatmap(arg)) {
+ flatmap_t *mp = (flatmap_t*)flatmap_val(arg);
+ return make_small(flatmap_get_size(mp));
+ } else if (is_hashmap(arg)) {
+ Eterm* hp;
+ Uint size;
+ size = hashmap_size(arg);
+ if (IS_USMALL(0, size)) {
+ return make_small(size);
+ }
+ if (ERTS_NEED_GC(p, BIG_UINT_HEAP_SIZE)) {
+ erts_garbage_collect(p, BIG_UINT_HEAP_SIZE, reg, live);
+ }
+ hp = p->htop;
+ p->htop += BIG_UINT_HEAP_SIZE;
+ return uint_to_big(size, hp);
+ }
+ p->fvalue = arg;
+ BIF_ERROR(p, BADMAP);
}
Eterm erts_gc_abs_1(Process* p, Eterm* reg, Uint live)
@@ -594,8 +630,7 @@ Eterm erts_gc_round_1(Process* p, Eterm* reg, Uint live)
}
GET_DOUBLE(arg, f);
- return gc_double_to_integer(p, (f.fd > 0.0) ? f.fd + 0.5 : f.fd - 0.5,
- reg, live);
+ return gc_double_to_integer(p, round(f.fd), reg, live);
}
Eterm erts_gc_trunc_1(Process* p, Eterm* reg, Uint live)
@@ -618,6 +653,38 @@ Eterm erts_gc_trunc_1(Process* p, Eterm* reg, Uint live)
reg, live);
}
+Eterm erts_gc_floor_1(Process* p, Eterm* reg, Uint live)
+{
+ Eterm arg;
+ FloatDef f;
+
+ arg = reg[live];
+ if (is_not_float(arg)) {
+ if (is_integer(arg)) {
+ return arg;
+ }
+ BIF_ERROR(p, BADARG);
+ }
+ GET_DOUBLE(arg, f);
+ return gc_double_to_integer(p, floor(f.fd), reg, live);
+}
+
+Eterm erts_gc_ceil_1(Process* p, Eterm* reg, Uint live)
+{
+ Eterm arg;
+ FloatDef f;
+
+ arg = reg[live];
+ if (is_not_float(arg)) {
+ if (is_integer(arg)) {
+ return arg;
+ }
+ BIF_ERROR(p, BADARG);
+ }
+ GET_DOUBLE(arg, f);
+ return gc_double_to_integer(p, ceil(f.fd), reg, live);
+}
+
static Eterm
gc_double_to_integer(Process* p, double x, Eterm* reg, Uint live)
{
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 2adba9b240..27bbf70c0b 100755..100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -21,12 +22,14 @@
# include "config.h"
#endif
+#define ERTS_WANT_MEM_MAPPERS
#include "sys.h"
#include "erl_vm.h"
#include "global.h"
#include "erl_process.h"
#include "error.h"
#include "erl_driver.h"
+#include "erl_nif.h"
#include "bif.h"
#include "big.h"
#include "erl_version.h"
@@ -41,8 +44,12 @@
#include "erl_cpu_topology.h"
#include "erl_async.h"
#include "erl_thr_progress.h"
+#include "erl_bif_unique.h"
+#include "erl_map.h"
+#include "erl_check_io.h"
#define ERTS_PTAB_WANT_DEBUG_FUNCS__
#include "erl_ptab.h"
+#include "erl_time.h"
#ifdef HIPE
#include "hipe_arch.h"
#endif
@@ -58,9 +65,12 @@
static Export* alloc_info_trap = NULL;
static Export* alloc_sizes_trap = NULL;
+static Export* gather_io_bytes_trap = NULL;
static Export *gather_sched_wall_time_res_trap;
+static Export *gather_msacc_res_trap;
static Export *gather_gc_info_res_trap;
+static Export *gather_system_check_res_trap;
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
@@ -69,9 +79,6 @@ static char otp_version[] = ERLANG_OTP_VERSION;
static char erts_system_version[] = ("Erlang/OTP " ERLANG_OTP_RELEASE
"%s"
" [erts-" ERLANG_VERSION "]"
-#if !HEAP_ON_C_STACK && !HALFWORD_HEAP
- " [no-c-stack-objects]"
-#endif
#ifndef OTP_RELEASE
#ifdef ERLANG_GIT_VERSION
" [source-" ERLANG_GIT_VERSION "]"
@@ -80,27 +87,17 @@ static char erts_system_version[] = ("Erlang/OTP " ERLANG_OTP_RELEASE
#endif
#endif
#ifdef ARCH_64
-#if HALFWORD_HEAP
- " [64-bit halfword]"
-#else
" [64-bit]"
#endif
-#endif
-#ifdef ERTS_SMP
" [smp:%beu:%beu]"
-#endif
-#ifdef USE_THREADS
-#ifdef ERTS_DIRTY_SCHEDULERS
" [ds:%beu:%beu:%beu]"
+#if defined(ERTS_DIRTY_SCHEDULERS_TEST)
+ " [dirty-schedulers-TEST]"
#endif
" [async-threads:%d]"
-#endif
#ifdef HIPE
" [hipe]"
#endif
-#ifdef ERTS_ENABLE_KERNEL_POLL
- " [kernel-poll:%s]"
-#endif
#ifdef ET_DEBUG
#if ET_DEBUG
" [type-assertions]"
@@ -115,6 +112,9 @@ static char erts_system_version[] = ("Erlang/OTP " ERLANG_OTP_RELEASE
#ifdef ERTS_ENABLE_LOCK_COUNT
" [lock-counting]"
#endif
+#ifdef ERTS_OPCODE_COUNTER_SUPPORT
+ " [instruction-counting]"
+#endif
#ifdef PURIFY
" [purify-compiled]"
#endif
@@ -124,12 +124,18 @@ static char erts_system_version[] = ("Erlang/OTP " ERLANG_OTP_RELEASE
#ifdef ERTS_FRMPTR
" [frame-pointer]"
#endif
+#ifdef USE_LTTNG
+ " [lttng]"
+#endif
#ifdef USE_DTRACE
" [dtrace]"
#endif
#ifdef USE_SYSTEMTAP
" [systemtap]"
#endif
+#ifdef SHCOPY
+ " [sharing-preserving]"
+#endif
"\n");
#define ASIZE(a) (sizeof(a)/sizeof(a[0]))
@@ -164,7 +170,33 @@ bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
if (szp)
*szp += 4+2;
if (hpp) {
- Uint refc = (Uint) erts_smp_atomic_read_nob(&pb->val->refc);
+ Uint refc = (Uint) erts_refc_read(&pb->val->intern.refc, 1);
+ tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
+ res = CONS(*hpp + 4, tuple, res);
+ *hpp += 4+2;
+ }
+ }
+ }
+ return res;
+}
+
+static Eterm
+bld_magic_ref_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
+{
+ struct erl_off_heap_header* ohh;
+ Eterm res = NIL;
+ Eterm tuple;
+
+ for (ohh = oh->first; ohh; ohh = ohh->next) {
+ if (is_ref_thing_header((*((Eterm *) ohh)))) {
+ ErtsMRefThing *mrtp = (ErtsMRefThing *) ohh;
+ Eterm val = erts_bld_uword(hpp, szp, (UWord) mrtp->mb);
+ Eterm orig_size = erts_bld_uint(hpp, szp, mrtp->mb->orig_size);
+
+ if (szp)
+ *szp += 4+2;
+ if (hpp) {
+ Uint refc = (Uint) erts_refc_read(&mrtp->mb->intern.refc, 1);
tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
res = CONS(*hpp + 4, tuple, res);
*hpp += 4+2;
@@ -189,8 +221,10 @@ bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
static void do_calc_mon_size(ErtsMonitor *mon, void *vpsz)
{
Uint *psz = vpsz;
- *psz += IS_CONST(mon->ref) ? 0 : NC_HEAP_SIZE(mon->ref);
- *psz += IS_CONST(mon->pid) ? 0 : NC_HEAP_SIZE(mon->pid);
+ *psz += NC_HEAP_SIZE(mon->ref);
+ *psz += (mon->type == MON_NIF_TARGET ?
+ erts_resource_ref_size(mon->u.resource) :
+ (is_immed(mon->u.pid) ? 0 : NC_HEAP_SIZE(mon->u.pid)));
*psz += 8; /* CONS + 5-tuple */
}
@@ -205,12 +239,11 @@ static void do_make_one_mon_element(ErtsMonitor *mon, void * vpmlc)
{
MonListContext *pmlc = vpmlc;
Eterm tup;
- Eterm r = (IS_CONST(mon->ref)
- ? mon->ref
- : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->ref));
- Eterm p = (IS_CONST(mon->pid)
- ? mon->pid
- : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->pid));
+ Eterm r = STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->ref);
+ Eterm p = (mon->type == MON_NIF_TARGET ?
+ erts_bld_resource_ref(&(pmlc->hp), &MSO(pmlc->p), mon->u.resource)
+ : (is_immed(mon->u.pid) ? mon->u.pid
+ : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->u.pid)));
tup = TUPLE5(pmlc->hp, pmlc->tag, make_small(mon->type), r, p, mon->name);
pmlc->hp += 6;
pmlc->res = CONS(pmlc->hp, tup, pmlc->res);
@@ -249,7 +282,7 @@ make_monitor_list(Process *p, ErtsMonitor *root)
static void do_calc_lnk_size(ErtsLink *lnk, void *vpsz)
{
Uint *psz = vpsz;
- *psz += IS_CONST(lnk->pid) ? 0 : NC_HEAP_SIZE(lnk->pid);
+ *psz += is_immed(lnk->pid) ? 0 : NC_HEAP_SIZE(lnk->pid);
if (lnk->type != LINK_NODE && ERTS_LINK_ROOT(lnk) != NULL) {
/* Node links use this pointer as ref counter... */
erts_doforall_links(ERTS_LINK_ROOT(lnk),&do_calc_lnk_size,vpsz);
@@ -269,7 +302,7 @@ static void do_make_one_lnk_element(ErtsLink *lnk, void * vpllc)
LnkListContext *pllc = vpllc;
Eterm tup;
Eterm old_res, targets = NIL;
- Eterm p = (IS_CONST(lnk->pid)
+ Eterm p = (is_immed(lnk->pid)
? lnk->pid
: STORE_NC(&(pllc->hp), &MSO(pllc->p), lnk->pid));
if (lnk->type == LINK_NODE) {
@@ -307,22 +340,18 @@ make_link_list(Process *p, ErtsLink *root, Eterm tail)
}
int
-erts_print_system_version(int to, void *arg, Process *c_p)
+erts_print_system_version(fmtfn_t to, void *arg, Process *c_p)
{
int i, rc = -1;
char *rc_str = "";
char rc_buf[100];
char *ov = otp_version;
-#ifdef ERTS_SMP
Uint total, online, active;
-#ifdef ERTS_DIRTY_SCHEDULERS
Uint dirty_cpu, dirty_cpu_onln, dirty_io;
- (void) erts_schedulers_state(&total, &online, &active, &dirty_cpu, &dirty_cpu_onln, &dirty_io, 0);
-#else
- (void) erts_schedulers_state(&total, &online, &active, NULL, NULL, NULL, 0);
-#endif
-#endif
+ erts_schedulers_state(&total, &online, &active,
+ &dirty_cpu, &dirty_cpu_onln, NULL,
+ &dirty_io, NULL);
for (i = 0; i < sizeof(otp_version)-4; i++) {
if (ov[i] == '-' && ov[i+1] == 'r' && ov[i+2] == 'c')
rc = atoi(&ov[i+3]);
@@ -337,24 +366,24 @@ erts_print_system_version(int to, void *arg, Process *c_p)
}
return erts_print(to, arg, erts_system_version,
rc_str
-#ifdef ERTS_SMP
, total, online
-#ifdef ERTS_DIRTY_SCHEDULERS
, dirty_cpu, dirty_cpu_onln, dirty_io
-#endif
-#endif
-#ifdef USE_THREADS
, erts_async_max_threads
-#endif
-#ifdef ERTS_ENABLE_KERNEL_POLL
- , erts_use_kernel_poll ? "true" : "false"
-#endif
);
}
typedef struct {
- Eterm entity;
+ /* {Entity,Node} = {monitor.Name,monitor.Pid} for external by name
+ * {Entity,Node} = {monitor.Pid,NIL} for external/external by pid
+ * {Entity,Node} = {monitor.Name,erlang:node()} for internal by name
+ * {Entity,Node} = {monitor.resource,MON_NIF_TARGET}*/
+ union {
+ Eterm term;
+ ErtsResource* resource;
+ }entity;
Eterm node;
+ /* pid is actual target being monitored, no matter pid/port or name */
+ Eterm pid;
} MonitorInfo;
typedef struct {
@@ -397,7 +426,7 @@ static void collect_one_link(ErtsLink *lnk, void *vmicp)
if (!(lnk->type == LINK_PID)) {
return;
}
- micp->mi[micp->mi_i].entity = lnk->pid;
+ micp->mi[micp->mi_i].entity.term = lnk->pid;
micp->sz += 2 + NC_HEAP_SIZE(lnk->pid);
micp->mi_i++;
}
@@ -410,23 +439,29 @@ static void collect_one_origin_monitor(ErtsMonitor *mon, void *vmicp)
return;
}
EXTEND_MONITOR_INFOS(micp);
- if (is_atom(mon->pid)) { /* external by name */
- micp->mi[micp->mi_i].entity = mon->name;
- micp->mi[micp->mi_i].node = mon->pid;
- micp->sz += 3; /* need one 2-tuple */
- } else if (is_external_pid(mon->pid)) { /* external by pid */
- micp->mi[micp->mi_i].entity = mon->pid;
- micp->mi[micp->mi_i].node = NIL;
- micp->sz += NC_HEAP_SIZE(mon->pid);
+ if (is_atom(mon->u.pid)) { /* external by name */
+ micp->mi[micp->mi_i].entity.term = mon->name;
+ micp->mi[micp->mi_i].node = mon->u.pid;
+ micp->sz += 3; /* need one 2-tuple */
+ } else if (is_external_pid(mon->u.pid)) { /* external by pid */
+ micp->mi[micp->mi_i].entity.term = mon->u.pid;
+ micp->mi[micp->mi_i].node = NIL;
+ micp->sz += NC_HEAP_SIZE(mon->u.pid);
} else if (!is_nil(mon->name)) { /* internal by name */
- micp->mi[micp->mi_i].entity = mon->name;
- micp->mi[micp->mi_i].node = erts_this_dist_entry->sysname;
- micp->sz += 3; /* need one 2-tuple */
+ micp->mi[micp->mi_i].entity.term = mon->name;
+ micp->mi[micp->mi_i].node = erts_this_dist_entry->sysname;
+ micp->sz += 3; /* need one 2-tuple */
} else { /* internal by pid */
- micp->mi[micp->mi_i].entity = mon->pid;
- micp->mi[micp->mi_i].node = NIL;
+ micp->mi[micp->mi_i].entity.term = mon->u.pid;
+ micp->mi[micp->mi_i].node = NIL;
/* no additional heap space needed */
}
+
+ /* have always pid at hand, to assist with figuring out if its a port or
+ * a process, when we monitored by name and process_info is requested.
+ * See: erl_bif_info.c:process_info_aux section for am_monitors */
+ micp->mi[micp->mi_i].pid = mon->u.pid;
+
micp->mi_i++;
micp->sz += 2 + 3; /* For a cons cell and a 2-tuple */
}
@@ -435,15 +470,24 @@ static void collect_one_target_monitor(ErtsMonitor *mon, void *vmicp)
{
MonitorInfoCollection *micp = vmicp;
- if (mon->type != MON_TARGET) {
- return;
+ if (mon->type != MON_TARGET && mon->type != MON_NIF_TARGET) {
+ return;
}
EXTEND_MONITOR_INFOS(micp);
- micp->mi[micp->mi_i].node = NIL;
- micp->mi[micp->mi_i].entity = mon->pid;
- micp->sz += (NC_HEAP_SIZE(mon->pid) + 2 /* cons */);
+
+ if (mon->type == MON_NIF_TARGET) {
+ micp->mi[micp->mi_i].entity.resource = mon->u.resource;
+ micp->mi[micp->mi_i].node = make_small(MON_NIF_TARGET);
+ micp->sz += erts_resource_ref_size(mon->u.resource);
+ }
+ else {
+ micp->mi[micp->mi_i].entity.term = mon->u.pid;
+ micp->mi[micp->mi_i].node = NIL;
+ micp->sz += NC_HEAP_SIZE(mon->u.pid);
+ }
+ micp->sz += 2; /* cons */;
micp->mi_i++;
}
@@ -536,6 +580,7 @@ pi_locks(Eterm info)
switch (info) {
case am_status:
case am_priority:
+ case am_trap_exit:
return ERTS_PROC_LOCK_STATUS;
case am_links:
case am_monitors:
@@ -586,9 +631,13 @@ static Eterm pi_args[] = {
am_suspending,
am_min_heap_size,
am_min_bin_vheap_size,
+ am_max_heap_size,
am_current_location,
am_current_stacktrace,
-};
+ am_message_queue_data,
+ am_garbage_collection_info,
+ am_magic_ref
+};
#define ERTS_PI_ARGS ((int) (sizeof(pi_args)/sizeof(Eterm)))
@@ -633,8 +682,12 @@ pi_arg2ix(Eterm arg)
case am_suspending: return 26;
case am_min_heap_size: return 27;
case am_min_bin_vheap_size: return 28;
- case am_current_location: return 29;
- case am_current_stacktrace: return 30;
+ case am_max_heap_size: return 29;
+ case am_current_location: return 30;
+ case am_current_stacktrace: return 31;
+ case am_message_queue_data: return 32;
+ case am_garbage_collection_info: return 33;
+ case am_magic_ref: return 34;
default: return -1;
}
}
@@ -663,18 +716,12 @@ static Eterm pi_1_keys[] = {
#define ERTS_PI_1_NO_OF_KEYS (sizeof(pi_1_keys)/sizeof(Eterm))
static Eterm pi_1_keys_list;
-#if HEAP_ON_C_STACK
static Eterm pi_1_keys_list_heap[2*ERTS_PI_1_NO_OF_KEYS];
-#endif
static void
process_info_init(void)
{
-#if HEAP_ON_C_STACK
Eterm *hp = &pi_1_keys_list_heap[0];
-#else
- Eterm *hp = erts_alloc(ERTS_ALC_T_LL_TEMP_TERM,sizeof(Eterm)*2*ERTS_PI_1_NO_OF_KEYS);
-#endif
int i;
pi_1_keys_list = NIL;
@@ -698,7 +745,6 @@ process_info_init(void)
static ERTS_INLINE Process *
pi_pid2proc(Process *c_p, Eterm pid, ErtsProcLocks info_locks)
{
-#ifdef ERTS_SMP
/*
* If the main lock is needed, we use erts_pid2proc_not_running()
* instead of erts_pid2proc() for two reasons:
@@ -716,16 +762,16 @@ pi_pid2proc(Process *c_p, Eterm pid, ErtsProcLocks info_locks)
return erts_pid2proc_not_running(c_p, ERTS_PROC_LOCK_MAIN,
pid, info_locks);
else
-#endif
return erts_pid2proc(c_p, ERTS_PROC_LOCK_MAIN,
pid, info_locks);
}
-BIF_RETTYPE
+static BIF_RETTYPE
process_info_aux(Process *BIF_P,
Process *rp,
+ ErtsProcLocks rp_locks,
Eterm rpid,
Eterm item,
int always_wrap);
@@ -816,10 +862,32 @@ process_info_list(Process *c_p, Eterm pid, Eterm list, int always_wrap,
*fail_type = ERTS_PI_FAIL_TYPE_AWAIT_EXIT;
goto done;
}
- else if (!(locks & ERTS_PROC_LOCK_STATUS)) {
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ else {
+ ErtsProcLocks unlock_locks = 0;
+
+ if (c_p == rp)
+ locks |= ERTS_PROC_LOCK_MAIN;
+
+ if (!(locks & ERTS_PROC_LOCK_STATUS))
+ unlock_locks |= ERTS_PROC_LOCK_STATUS;
+
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ /*
+ * Move in queue into private queue and
+ * release msgq lock, enabling others to
+ * send messages to the process while it
+ * is being inspected...
+ */
+ ASSERT(locks & ERTS_PROC_LOCK_MAIN);
+ ERTS_MSGQ_MV_INQ2PRIVQ(rp);
+ locks &= ~ERTS_PROC_LOCK_MSGQ;
+ unlock_locks |= ERTS_PROC_LOCK_MSGQ;
+ }
+
+ if (unlock_locks)
+ erts_proc_unlock(rp, unlock_locks);
+
}
-
/*
* We always handle 'messages' first if it should be part
@@ -831,7 +899,7 @@ process_info_list(Process *c_p, Eterm pid, Eterm list, int always_wrap,
if (want_messages) {
ix = pi_arg2ix(am_messages);
ASSERT(part_res[ix] == THE_NON_VALUE);
- part_res[ix] = process_info_aux(c_p, rp, pid, am_messages, always_wrap);
+ part_res[ix] = process_info_aux(c_p, rp, locks, pid, am_messages, always_wrap);
ASSERT(part_res[ix] != THE_NON_VALUE);
}
@@ -839,7 +907,7 @@ process_info_list(Process *c_p, Eterm pid, Eterm list, int always_wrap,
ix = res_elem_ix[res_elem_ix_ix];
if (part_res[ix] == THE_NON_VALUE) {
arg = pi_ix2arg(ix);
- part_res[ix] = process_info_aux(c_p, rp, pid, arg, always_wrap);
+ part_res[ix] = process_info_aux(c_p, rp, locks, pid, arg, always_wrap);
ASSERT(part_res[ix] != THE_NON_VALUE);
}
}
@@ -875,7 +943,7 @@ process_info_list(Process *c_p, Eterm pid, Eterm list, int always_wrap,
if (c_p == rp)
locks &= ~ERTS_PROC_LOCK_MAIN;
if (locks && rp)
- erts_smp_proc_unlock(rp, locks);
+ erts_proc_unlock(rp, locks);
if (res_elem_ix != &def_res_elem_ix_buf[0])
erts_free(ERTS_ALC_T_TMP, res_elem_ix);
@@ -906,7 +974,7 @@ BIF_RETTYPE process_info_1(BIF_ALIST_1)
case ERTS_PI_FAIL_TYPE_AWAIT_EXIT:
ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined);
default:
- erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error", __FILE__, __LINE__);
+ erts_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error", __FILE__, __LINE__);
}
}
@@ -946,7 +1014,7 @@ BIF_RETTYPE process_info_2(BIF_ALIST_2)
case ERTS_PI_FAIL_TYPE_AWAIT_EXIT:
ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined);
default:
- erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error",
+ erts_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error",
__FILE__, __LINE__);
}
}
@@ -966,22 +1034,42 @@ BIF_RETTYPE process_info_2(BIF_ALIST_2)
ERTS_BIF_YIELD2(bif_export[BIF_process_info_2], BIF_P,
BIF_ARG_1, BIF_ARG_2);
else if (rp != BIF_P && ERTS_PROC_PENDING_EXIT(rp)) {
- erts_smp_proc_unlock(rp, info_locks|ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(rp, info_locks|ERTS_PROC_LOCK_STATUS);
ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined);
}
else {
+ ErtsProcLocks unlock_locks = 0;
+
+ if (BIF_P == rp)
+ info_locks |= ERTS_PROC_LOCK_MAIN;
+
if (!(info_locks & ERTS_PROC_LOCK_STATUS))
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
- res = process_info_aux(BIF_P, rp, pid, BIF_ARG_2, 0);
+ unlock_locks |= ERTS_PROC_LOCK_STATUS;
+
+ if (info_locks & ERTS_PROC_LOCK_MSGQ) {
+ /*
+ * Move in queue into private queue and
+ * release msgq lock, enabling others to
+ * send messages to the process while it
+ * is being inspected...
+ */
+ ASSERT(info_locks & ERTS_PROC_LOCK_MAIN);
+ ERTS_MSGQ_MV_INQ2PRIVQ(rp);
+ info_locks &= ~ERTS_PROC_LOCK_MSGQ;
+ unlock_locks |= ERTS_PROC_LOCK_MSGQ;
+ }
+
+ if (unlock_locks)
+ erts_proc_unlock(rp, unlock_locks);
+
+ res = process_info_aux(BIF_P, rp, info_locks, pid, BIF_ARG_2, 0);
}
ASSERT(is_value(res));
-#ifdef ERTS_SMP
if (BIF_P == rp)
info_locks &= ~ERTS_PROC_LOCK_MAIN;
if (rp && info_locks)
- erts_smp_proc_unlock(rp, info_locks);
-#endif
+ erts_proc_unlock(rp, info_locks);
ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED));
BIF_RET(res);
@@ -990,6 +1078,7 @@ BIF_RETTYPE process_info_2(BIF_ALIST_2)
Eterm
process_info_aux(Process *BIF_P,
Process *rp,
+ ErtsProcLocks rp_locks,
Eterm rpid,
Eterm item,
int always_wrap)
@@ -1048,174 +1137,68 @@ process_info_aux(Process *BIF_P,
case am_initial_call:
hp = HAlloc(BIF_P, 3+4);
res = TUPLE3(hp,
- rp->initial[INITIAL_MOD],
- rp->initial[INITIAL_FUN],
- make_small(rp->initial[INITIAL_ARI]));
+ rp->u.initial.module,
+ rp->u.initial.function,
+ make_small(rp->u.initial.arity));
hp += 4;
break;
case am_status:
- res = erts_process_status(BIF_P, ERTS_PROC_LOCK_MAIN, rp, rpid);
+ res = erts_process_status(rp, rpid);
ASSERT(res != am_undefined);
hp = HAlloc(BIF_P, 3);
break;
case am_messages: {
- ErlMessage* mp;
- int n;
-
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
- n = rp->msg.len;
- if (n == 0 || ERTS_TRACE_FLAGS(rp) & F_SENSITIVE) {
+ if (rp->msg.len == 0 || ERTS_TRACE_FLAGS(rp) & F_SENSITIVE) {
hp = HAlloc(BIF_P, 3);
} else {
- int remove_bad_messages = 0;
- struct {
- Uint copy_struct_size;
- ErlMessage* msgp;
- } *mq = erts_alloc(ERTS_ALC_T_TMP, n*sizeof(*mq));
- Sint i = 0;
- Uint heap_need = 3;
+ ErtsMessageInfo *mip;
+ Sint i;
+ Uint heap_need;
+#ifdef DEBUG
Eterm *hp_end;
+#endif
- for (mp = rp->msg.first; mp; mp = mp->next) {
- heap_need += 2;
- mq[i].msgp = mp;
- if (rp != BIF_P) {
- Eterm msg = ERL_MESSAGE_TERM(mq[i].msgp);
- if (is_value(msg)) {
- mq[i].copy_struct_size = (is_immed(msg)? 0 :
- size_object(msg));
- }
- else if (mq[i].msgp->data.attached) {
- mq[i].copy_struct_size
- = erts_msg_attached_data_size(mq[i].msgp);
- }
- else {
- /* Bad distribution message; ignore */
- remove_bad_messages = 1;
- mq[i].copy_struct_size = 0;
- }
- heap_need += mq[i].copy_struct_size;
- }
- else {
- mq[i].copy_struct_size = 0;
- if (mp->data.attached)
- heap_need += erts_msg_attached_data_size(mp);
- }
- i++;
- }
+ mip = erts_alloc(ERTS_ALC_T_TMP,
+ rp->msg.len*sizeof(ErtsMessageInfo));
- hp = HAlloc(BIF_P, heap_need);
+ /*
+ * Note that message queue may shrink when calling
+ * erts_prep_msgq_for_inspection() since it removes
+ * corrupt distribution messages.
+ */
+ heap_need = erts_prep_msgq_for_inspection(BIF_P, rp, rp_locks, mip);
+ heap_need += 3; /* top 2-tuple */
+ heap_need += rp->msg.len*2; /* Cons cells */
+
+ hp = HAlloc(BIF_P, heap_need); /* heap_need is exact */
+#ifdef DEBUG
hp_end = hp + heap_need;
- ASSERT(i == n);
- for (i--; i >= 0; i--) {
- Eterm msg = ERL_MESSAGE_TERM(mq[i].msgp);
- if (rp != BIF_P) {
- if (is_value(msg)) {
- if (mq[i].copy_struct_size)
- msg = copy_struct(msg,
- mq[i].copy_struct_size,
- &hp,
- &MSO(BIF_P));
- }
- else if (mq[i].msgp->data.attached) {
- ErlHeapFragment *hfp;
- /*
- * Decode it into a message buffer and attach it
- * to the message instead of the attached external
- * term.
- *
- * Note that we may not pass a process pointer
- * to erts_msg_distext2heap(), since it would then
- * try to alter locks on that process.
- */
- msg = erts_msg_distext2heap(
- NULL, NULL, &hfp, &ERL_MESSAGE_TOKEN(mq[i].msgp),
- mq[i].msgp->data.dist_ext);
-
- ERL_MESSAGE_TERM(mq[i].msgp) = msg;
- mq[i].msgp->data.heap_frag = hfp;
-
- if (is_non_value(msg)) {
- ASSERT(!mq[i].msgp->data.heap_frag);
- /* Bad distribution message; ignore */
- remove_bad_messages = 1;
- continue;
- }
- else {
- /* Make our copy of the message */
- ASSERT(size_object(msg) == hfp->used_size);
- msg = copy_struct(msg,
- hfp->used_size,
- &hp,
- &MSO(BIF_P));
- }
- }
- else {
- /* Bad distribution message; ignore */
- remove_bad_messages = 1;
- continue;
- }
- }
- else {
- if (mq[i].msgp->data.attached) {
- /* Decode it on the heap */
- erts_move_msg_attached_data_to_heap(&hp,
- &MSO(BIF_P),
- mq[i].msgp);
- msg = ERL_MESSAGE_TERM(mq[i].msgp);
- ASSERT(!mq[i].msgp->data.attached);
- if (is_non_value(msg)) {
- /* Bad distribution message; ignore */
- remove_bad_messages = 1;
- continue;
- }
- }
- }
-
+#endif
+
+ /* Build list of messages... */
+ for (i = rp->msg.len - 1, res = NIL; i >= 0; i--) {
+ Eterm msg = ERL_MESSAGE_TERM(mip[i].msgp);
+ Uint sz = mip[i].size;
+
+ if (sz != 0)
+ msg = copy_struct(msg, sz, &hp, &BIF_P->off_heap);
+
res = CONS(hp, msg, res);
hp += 2;
}
- HRelease(BIF_P, hp_end, hp+3);
- erts_free(ERTS_ALC_T_TMP, mq);
- if (remove_bad_messages) {
- ErlMessage **mpp;
- /*
- * We need to remove bad distribution messages from
- * the queue, so that the value returned for
- * 'message_queue_len' is consistent with the value
- * returned for 'messages'.
- */
- mpp = &rp->msg.first;
- mp = rp->msg.first;
- while (mp) {
- if (is_value(ERL_MESSAGE_TERM(mp))) {
- mpp = &mp->next;
- mp = mp->next;
- }
- else {
- ErlMessage* bad_mp = mp;
- ASSERT(!mp->data.attached);
- if (rp->msg.save == &mp->next)
- rp->msg.save = mpp;
- if (rp->msg.last == &mp->next)
- rp->msg.last = mpp;
- *mpp = mp->next;
- mp = mp->next;
- rp->msg.len--;
- free_message(bad_mp);
- }
- }
- }
+
+ ASSERT(hp_end == hp + 3);
+
+ erts_free(ERTS_ALC_T_TMP, mip);
}
break;
}
case am_message_queue_len:
hp = HAlloc(BIF_P, 3);
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
res = make_small(rp->msg.len);
break;
@@ -1231,7 +1214,7 @@ process_info_aux(Process *BIF_P,
hp = HAlloc(BIF_P, 3 + mic.sz);
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
- item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
+ item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity.term);
res = CONS(hp, item, res);
hp += 2;
}
@@ -1241,37 +1224,49 @@ process_info_aux(Process *BIF_P,
case am_monitors: {
MonitorInfoCollection mic;
- int i;
+ int i;
INIT_MONITOR_INFOS(mic);
- erts_doforall_monitors(ERTS_P_MONITORS(rp),&collect_one_origin_monitor,&mic);
- hp = HAlloc(BIF_P, 3 + mic.sz);
+ erts_doforall_monitors(ERTS_P_MONITORS(rp),
+ &collect_one_origin_monitor, &mic);
+ hp = HAlloc(BIF_P, 3 + mic.sz);
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
- if (is_atom(mic.mi[i].entity)) {
+ if (is_atom(mic.mi[i].entity.term)) {
/* Monitor by name.
- * Build {process, {Name, Node}} and cons it.
+ * Build {process|port, {Name, Node}} and cons it.
*/
Eterm t1, t2;
-
- t1 = TUPLE2(hp, mic.mi[i].entity, mic.mi[i].node);
+ /* If pid is an atom, then it is a remote named monitor, which
+ has to be a process */
+ Eterm m_type = is_port(mic.mi[i].pid) ? am_port : am_process;
+ ASSERT(is_pid(mic.mi[i].pid)
+ || is_port(mic.mi[i].pid)
+ || is_atom(mic.mi[i].pid));
+
+ t1 = TUPLE2(hp, mic.mi[i].entity.term, mic.mi[i].node);
hp += 3;
- t2 = TUPLE2(hp, am_process, t1);
+ t2 = TUPLE2(hp, m_type, t1);
hp += 3;
res = CONS(hp, t2, res);
- hp += 2;
+ hp += 2;
}
else {
- /* Monitor by pid. Build {process, Pid} and cons it. */
+ /* Monitor by pid. Build {process|port, Pid} and cons it. */
Eterm t;
- Eterm pid = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
- t = TUPLE2(hp, am_process, pid);
+ Eterm pid = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity.term);
+
+ Eterm m_type = is_port(mic.mi[i].pid) ? am_port : am_process;
+ ASSERT(is_pid(mic.mi[i].pid)
+ || is_port(mic.mi[i].pid));
+
+ t = TUPLE2(hp, m_type, pid);
hp += 3;
res = CONS(hp, t, res);
- hp += 2;
+ hp += 2;
}
}
- DESTROY_MONITOR_INFOS(mic);
+ DESTROY_MONITOR_INFOS(mic);
break;
}
@@ -1286,7 +1281,12 @@ process_info_aux(Process *BIF_P,
res = NIL;
for (i = 0; i < mic.mi_i; ++i) {
- item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
+ if (mic.mi[i].node == make_small(MON_NIF_TARGET)) {
+ item = erts_bld_resource_ref(&hp, &MSO(BIF_P), mic.mi[i].entity.resource);
+ }
+ else {
+ item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity.term);
+ }
res = CONS(hp, item, res);
hp += 2;
}
@@ -1356,7 +1356,7 @@ process_info_aux(Process *BIF_P,
break;
case am_trap_exit: {
- erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state);
+ erts_aint32_t state = erts_atomic32_read_nob(&rp->state);
hp = HAlloc(BIF_P, 3);
if (state & ERTS_PSFLG_TRAP_EXIT)
res = am_true;
@@ -1402,8 +1402,20 @@ process_info_aux(Process *BIF_P,
break;
}
+ case am_max_heap_size: {
+ Uint hsz = 3;
+ (void) erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp),
+ MAX_HEAP_SIZE_FLAGS_GET(rp),
+ NULL, &hsz);
+ hp = HAlloc(BIF_P, hsz);
+ res = erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp),
+ MAX_HEAP_SIZE_FLAGS_GET(rp),
+ &hp, NULL);
+ break;
+ }
+
case am_total_heap_size: {
- ErlMessage *mp;
+ ErtsMessage *mp;
Uint total_heap_size;
Uint hsz = 3;
@@ -1413,11 +1425,10 @@ process_info_aux(Process *BIF_P,
total_heap_size += rp->mbuf_sz;
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
-
- for (mp = rp->msg.first; mp; mp = mp->next)
- if (mp->data.attached)
- total_heap_size += erts_msg_attached_data_size(mp);
+ if (rp->flags & F_ON_HEAP_MSGQ)
+ for (mp = rp->msg.first; mp; mp = mp->next)
+ if (mp->data.attached)
+ total_heap_size += erts_msg_attached_data_size(mp);
(void) erts_bld_uint(NULL, &hsz, total_heap_size);
hp = HAlloc(BIF_P, hsz);
@@ -1436,7 +1447,7 @@ process_info_aux(Process *BIF_P,
case am_memory: { /* Memory consumed in bytes */
Uint hsz = 3;
- Uint size = erts_process_memory(rp);
+ Uint size = erts_process_memory(rp, 0);
(void) erts_bld_uint(NULL, &hsz, size);
hp = HAlloc(BIF_P, hsz);
res = erts_bld_uint(&hp, NULL, size);
@@ -1446,8 +1457,12 @@ process_info_aux(Process *BIF_P,
case am_garbage_collection: {
DECL_AM(minor_gcs);
Eterm t;
+ Uint map_sz = 0;
- hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2 + 3+2 + 3); /* last "3" is for outside tuple */
+ erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp), MAX_HEAP_SIZE_FLAGS_GET(rp), NULL, &map_sz);
+
+ hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2 + 3+2 + 3+2 + map_sz + 3);
+ /* last "3" is for outside tuple */
t = TUPLE2(hp, AM_minor_gcs, make_small(GEN_GCS(rp))); hp += 3;
res = CONS(hp, t, NIL); hp += 2;
@@ -1458,9 +1473,40 @@ process_info_aux(Process *BIF_P,
res = CONS(hp, t, res); hp += 2;
t = TUPLE2(hp, am_min_bin_vheap_size, make_small(MIN_VHEAP_SIZE(rp))); hp += 3;
res = CONS(hp, t, res); hp += 2;
+
+ t = erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp), MAX_HEAP_SIZE_FLAGS_GET(rp), &hp, NULL);
+
+ t = TUPLE2(hp, am_max_heap_size, t); hp += 3;
+ res = CONS(hp, t, res); hp += 2;
break;
}
+ case am_garbage_collection_info: {
+ Uint sz = 0, actual_sz = 0;
+
+ if (rp == BIF_P) {
+ sz += ERTS_PROCESS_GC_INFO_MAX_SIZE;
+ } else {
+ erts_process_gc_info(rp, &sz, NULL, 0, 0);
+ sz += 3;
+ }
+
+ hp = HAlloc(BIF_P, sz);
+ res = erts_process_gc_info(rp, &actual_sz, &hp, 0, 0);
+
+ /* We may have some extra space, fill with 0 tuples */
+ if (actual_sz <= sz - 3) {
+ for (; actual_sz < sz - 3; hp++, actual_sz++)
+ hp[0] = make_arityval(0);
+ } else {
+ for (; actual_sz < sz; hp++, actual_sz++)
+ hp[0] = make_arityval(0);
+ hp = HAlloc(BIF_P, 3);
+ }
+
+ break;
+ }
+
case am_group_leader: {
int sz = NC_HEAP_SIZE(rp->group_leader);
hp = HAlloc(BIF_P, 3 + sz);
@@ -1515,7 +1561,7 @@ process_info_aux(Process *BIF_P,
}
case am_last_calls: {
- struct saved_calls *scb = ERTS_PROC_GET_SAVED_CALLS_BUF(BIF_P);
+ struct saved_calls *scb = ERTS_PROC_GET_SAVED_CALLS_BUF(rp);
if (!scb) {
hp = HAlloc(BIF_P, 3);
res = am_false;
@@ -1545,9 +1591,9 @@ process_info_aux(Process *BIF_P,
term = am_timeout;
else {
term = TUPLE3(hp,
- scb->ct[j]->code[0],
- scb->ct[j]->code[1],
- make_small(scb->ct[j]->code[2]));
+ scb->ct[j]->info.mfa.module,
+ scb->ct[j]->info.mfa.function,
+ make_small(scb->ct[j]->info.mfa.arity));
hp += 4;
}
list = CONS(hp, term, list);
@@ -1562,6 +1608,30 @@ process_info_aux(Process *BIF_P,
break;
}
+ case am_message_queue_data:
+ switch (rp->flags & (F_OFF_HEAP_MSGQ|F_ON_HEAP_MSGQ)) {
+ case F_OFF_HEAP_MSGQ:
+ res = am_off_heap;
+ break;
+ case F_ON_HEAP_MSGQ:
+ res = am_on_heap;
+ break;
+ default:
+ res = am_error;
+ ERTS_INTERNAL_ERROR("Inconsistent message queue management state");
+ break;
+ }
+ hp = HAlloc(BIF_P, 3);
+ break;
+
+ case am_magic_ref: {
+ Uint sz = 3;
+ (void) bld_magic_ref_bin_list(NULL, &sz, &MSO(rp));
+ hp = HAlloc(BIF_P, sz);
+ res = bld_magic_ref_bin_list(&hp, NULL, &MSO(rp));
+ break;
+ }
+
default:
return THE_NON_VALUE; /* will produce badarg */
@@ -1580,10 +1650,10 @@ current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info)
if (rp->current == NULL) {
erts_lookup_function_info(&fi, rp->i, full_info);
- rp->current = fi.current;
+ rp->current = fi.mfa;
} else if (full_info) {
erts_lookup_function_info(&fi, rp->i, full_info);
- if (fi.current == NULL) {
+ if (fi.mfa == NULL) {
/* Use the current function without location info */
erts_set_current_function(&fi, rp->current);
}
@@ -1599,9 +1669,9 @@ current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info)
* instead if it can be looked up.
*/
erts_lookup_function_info(&fi2, rp->cp, full_info);
- if (fi2.current) {
+ if (fi2.mfa) {
fi = fi2;
- rp->current = fi2.current;
+ rp->current = fi2.mfa;
}
}
@@ -1616,8 +1686,9 @@ current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info)
hp = erts_build_mfa_item(&fi, hp, am_true, &res);
} else {
hp = HAlloc(BIF_P, 3+4);
- res = TUPLE3(hp, rp->current[0],
- rp->current[1], make_small(rp->current[2]));
+ res = TUPLE3(hp, rp->current->module,
+ rp->current->function,
+ make_small(rp->current->arity));
hp += 4;
}
*hpp = hp;
@@ -1638,11 +1709,11 @@ current_stacktrace(Process* p, Process* rp, Eterm** hpp)
Eterm mfa;
Eterm res = NIL;
- depth = 8;
+ depth = erts_backtrace_depth;
sz = offsetof(struct StackTrace, trace) + sizeof(BeamInstr *)*depth;
s = (struct StackTrace *) erts_alloc(ERTS_ALC_T_TMP, sz);
s->depth = 0;
- if (rp->i) {
+ if (depth > 0 && rp->i) {
s->trace[s->depth++] = rp->i;
depth--;
}
@@ -1658,7 +1729,7 @@ current_stacktrace(Process* p, Process* rp, Eterm** hpp)
heap_size = 3;
for (i = 0; i < depth; i++) {
erts_lookup_function_info(stkp, s->trace[i], 1);
- if (stkp->current) {
+ if (stkp->mfa) {
heap_size += stkp->needed + 2;
stkp++;
}
@@ -1736,12 +1807,12 @@ info_1_tuple(Process* BIF_P, /* Pointer to current process. */
if (arity == 2) {
Eterm res = THE_NON_VALUE;
char *buf;
- int len = is_string(*tp);
+ Sint len = is_string(*tp);
if (len <= 0)
return res;
buf = (char *) erts_alloc(ERTS_ALC_T_TMP, len+1);
if (intlist_to_buf(*tp, buf, len) != len)
- erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
+ erts_exit(ERTS_ERROR_EXIT, "%s:%d: Internal error\n", __FILE__, __LINE__);
buf[len] = '\0';
res = erts_instr_dump_memory_map(buf) ? am_true : am_false;
erts_free(ERTS_ALC_T_TMP, (void *) buf);
@@ -1755,12 +1826,12 @@ info_1_tuple(Process* BIF_P, /* Pointer to current process. */
else {
Eterm res = THE_NON_VALUE;
char *buf;
- int len = is_string(tp[1]);
+ Sint len = is_string(tp[1]);
if (len <= 0)
return res;
buf = (char *) erts_alloc(ERTS_ALC_T_TMP, len+1);
if (intlist_to_buf(tp[1], buf, len) != len)
- erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
+ erts_exit(ERTS_ERROR_EXIT, "%s:%d: Internal error\n", __FILE__, __LINE__);
buf[len] = '\0';
res = erts_instr_dump_stat(buf, 1) ? am_true : am_false;
erts_free(ERTS_ALC_T_TMP, (void *) buf);
@@ -2045,27 +2116,20 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
Uint arity = *tp++;
return info_1_tuple(BIF_P, tp, arityval(arity));
} else if (BIF_ARG_1 == am_scheduler_id) {
-#ifdef ERTS_SMP
- ASSERT(BIF_P->scheduler_data);
- BIF_RET(make_small(BIF_P->scheduler_data->no));
-#else
- BIF_RET(make_small(1));
-#endif
+ ErtsSchedulerData *esdp = erts_proc_sched_data(BIF_P);
+ BIF_RET(make_small(esdp->no));
} else if (BIF_ARG_1 == am_compat_rel) {
ASSERT(erts_compat_rel > 0);
BIF_RET(make_small(erts_compat_rel));
} else if (BIF_ARG_1 == am_multi_scheduling) {
-#ifndef ERTS_SMP
- BIF_RET(am_disabled);
-#else
- if (erts_no_schedulers == 1)
- BIF_RET(am_disabled);
- else {
- BIF_RET(erts_is_multi_scheduling_blocked()
- ? am_blocked
- : am_enabled);
+ {
+ int msb = erts_is_multi_scheduling_blocked();
+ BIF_RET(!msb
+ ? am_enabled
+ : (msb > 0
+ ? am_blocked
+ : am_blocked_normal));
}
-#endif
} else if (BIF_ARG_1 == am_build_type) {
#if defined(DEBUG)
ERTS_DECL_AM(debug);
@@ -2098,6 +2162,50 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(am_opt);
#endif
BIF_RET(res);
+ } else if (BIF_ARG_1 == am_time_offset) {
+ switch (erts_time_offset_state()) {
+ case ERTS_TIME_OFFSET_PRELIMINARY: {
+ ERTS_DECL_AM(preliminary);
+ BIF_RET(AM_preliminary);
+ }
+ case ERTS_TIME_OFFSET_FINAL: {
+ ERTS_DECL_AM(final);
+ BIF_RET(AM_final);
+ }
+ case ERTS_TIME_OFFSET_VOLATILE: {
+ ERTS_DECL_AM(volatile);
+ BIF_RET(AM_volatile);
+ }
+ default:
+ ERTS_INTERNAL_ERROR("Invalid time offset state");
+ }
+ } else if (ERTS_IS_ATOM_STR("os_monotonic_time_source", BIF_ARG_1)) {
+ BIF_RET(erts_monotonic_time_source(BIF_P));
+ } else if (ERTS_IS_ATOM_STR("os_system_time_source", BIF_ARG_1)) {
+ BIF_RET(erts_system_time_source(BIF_P));
+ } else if (ERTS_IS_ATOM_STR("time_correction", BIF_ARG_1)) {
+ BIF_RET(erts_has_time_correction() ? am_true : am_false);
+ } else if (ERTS_IS_ATOM_STR("start_time", BIF_ARG_1)) {
+ BIF_RET(erts_get_monotonic_start_time(BIF_P));
+ } else if (ERTS_IS_ATOM_STR("end_time", BIF_ARG_1)) {
+ BIF_RET(erts_get_monotonic_end_time(BIF_P));
+ } else if (ERTS_IS_ATOM_STR("time_warp_mode", BIF_ARG_1)) {
+ switch (erts_time_warp_mode()) {
+ case ERTS_NO_TIME_WARP_MODE: {
+ ERTS_DECL_AM(no_time_warp);
+ BIF_RET(AM_no_time_warp);
+ }
+ case ERTS_SINGLE_TIME_WARP_MODE: {
+ ERTS_DECL_AM(single_time_warp);
+ BIF_RET(AM_single_time_warp);
+ }
+ case ERTS_MULTI_TIME_WARP_MODE: {
+ ERTS_DECL_AM(multi_time_warp);
+ BIF_RET(AM_multi_time_warp);
+ }
+ default:
+ ERTS_INTERNAL_ERROR("Invalid time warp mode");
+ }
} else if (BIF_ARG_1 == am_allocated_areas) {
res = erts_allocated_areas(NULL, NULL, BIF_P);
BIF_RET(res);
@@ -2125,15 +2233,15 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
res = build_snifs_term(&hp, NULL, NIL);
BIF_RET(res);
} else if (BIF_ARG_1 == am_sequential_tracer) {
- val = erts_get_system_seq_tracer();
- ASSERT(is_internal_pid(val) || is_internal_port(val) || val==am_false);
+ ErtsTracer seq_tracer = erts_get_system_seq_tracer();
+ val = erts_tracer_to_term(BIF_P, seq_tracer);
hp = HAlloc(BIF_P, 3);
res = TUPLE2(hp, am_sequential_tracer, val);
BIF_RET(res);
} else if (BIF_ARG_1 == am_garbage_collection){
- Uint val = (Uint) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
+ Uint val = (Uint) erts_atomic32_read_nob(&erts_max_gen_gcs);
Eterm tup;
- hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2);
+ hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2 + 3+2);
tup = TUPLE2(hp, am_fullsweep_after, make_small(val)); hp += 3;
res = CONS(hp, tup, NIL); hp += 2;
@@ -2144,9 +2252,12 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
tup = TUPLE2(hp, am_min_bin_vheap_size, make_small(BIN_VH_MIN_SIZE)); hp += 3;
res = CONS(hp, tup, res); hp += 2;
+ tup = TUPLE2(hp, am_max_heap_size, make_small(H_MAX_SIZE)); hp += 3;
+ res = CONS(hp, tup, res); hp += 2;
+
BIF_RET(res);
} else if (BIF_ARG_1 == am_fullsweep_after){
- Uint val = (Uint) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
+ Uint val = (Uint) erts_atomic32_read_nob(&erts_max_gen_gcs);
hp = HAlloc(BIF_P, 3);
res = TUPLE2(hp, am_fullsweep_after, make_small(val));
BIF_RET(res);
@@ -2154,6 +2265,12 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
hp = HAlloc(BIF_P, 3);
res = TUPLE2(hp, am_min_heap_size,make_small(H_MIN_SIZE));
BIF_RET(res);
+ } else if (BIF_ARG_1 == am_max_heap_size) {
+ Uint sz = 0;
+ erts_max_heap_size_map(H_MAX_SIZE, H_MAX_FLAGS, NULL, &sz);
+ hp = HAlloc(BIF_P, sz);
+ res = erts_max_heap_size_map(H_MAX_SIZE, H_MAX_FLAGS, &hp, NULL);
+ BIF_RET(res);
} else if (BIF_ARG_1 == am_min_bin_vheap_size) {
hp = HAlloc(BIF_P, 3);
res = TUPLE2(hp, am_min_bin_vheap_size,make_small(BIN_VH_MIN_SIZE));
@@ -2173,8 +2290,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
erts_dsprintf_buf_t *dsbufp = erts_create_info_dsbuf(0);
/* Need to be the only thread running... */
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
if (BIF_ARG_1 == am_info)
info(ERTS_PRINT_DSBUF, (void *) dsbufp);
@@ -2185,8 +2302,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
else
distribution_info(ERTS_PRINT_DSBUF, (void *) dsbufp);
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
ASSERT(dsbufp && dsbufp->str);
res = new_binary(BIF_P, (byte *) dsbufp->str, dsbufp->str_len);
@@ -2195,9 +2312,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
} else if (ERTS_IS_ATOM_STR("dist_ctrl", BIF_ARG_1)) {
DistEntry *dep;
i = 0;
- /* Need to be the only thread running... */
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_rwmtx_rlock(&erts_dist_table_rwmtx);
for (dep = erts_visible_dist_entries; dep; dep = dep->next)
++i;
for (dep = erts_hidden_dist_entries; dep; dep = dep->next)
@@ -2220,8 +2335,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
res = CONS(hp, tpl, res);
hp += 2;
}
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_rwmtx_runlock(&erts_dist_table_rwmtx);
BIF_RET(res);
} else if (BIF_ARG_1 == am_system_version) {
erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
@@ -2247,16 +2361,10 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(erts_allocator_options((void *) BIF_P));
}
else if (BIF_ARG_1 == am_thread_pool_size) {
-#ifdef USE_THREADS
extern int erts_async_max_threads;
-#endif
int n;
-#ifdef USE_THREADS
n = erts_async_max_threads;
-#else
- n = 0;
-#endif
BIF_RET(make_small(n));
}
else if (BIF_ARG_1 == am_alloc_util_allocators) {
@@ -2300,12 +2408,12 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
for (i = num_instructions-1; i >= 0; i--) {
res = erts_bld_cons(hpp, hszp,
erts_bld_tuple(hpp, hszp, 2,
- erts_atom_put(opc[i].name,
+ erts_atom_put((byte *)opc[i].name,
strlen(opc[i].name),
ERTS_ATOM_ENC_LATIN1,
1),
erts_bld_uint(hpp, hszp,
- opc[i].count)),
+ erts_instr_count[i])),
res);
}
@@ -2324,7 +2432,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
#endif
BIF_RET(res);
-#endif /* #ifndef ERTS_SMP */
+#endif /* #ifndef ERTS_OPCODE_COUNTER_SUPPORT */
} else if (BIF_ARG_1 == am_wordsize) {
return make_small(sizeof(Eterm));
} else if (BIF_ARG_1 == am_endian) {
@@ -2404,11 +2512,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(res);
#endif
} else if (BIF_ARG_1 == am_threads) {
-#ifdef USE_THREADS
return am_true;
-#else
- return am_false;
-#endif
} else if (BIF_ARG_1 == am_creation) {
return make_small(erts_this_node->creation);
} else if (BIF_ARG_1 == am_break_ignored) {
@@ -2459,12 +2563,15 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
ERL_DRV_EXTENDED_MINOR_VERSION);
hp = HAlloc(BIF_P, 2*n);
BIF_RET(buf_to_intlist(&hp, buf, n, NIL));
+ } else if (ERTS_IS_ATOM_STR("nif_version", BIF_ARG_1)) {
+ char buf[42];
+ int n = erts_snprintf(buf, 42, "%d.%d",
+ ERL_NIF_MAJOR_VERSION,
+ ERL_NIF_MINOR_VERSION);
+ hp = HAlloc(BIF_P, 2*n);
+ BIF_RET(buf_to_intlist(&hp, buf, n, NIL));
} else if (ERTS_IS_ATOM_STR("smp_support", BIF_ARG_1)) {
-#ifdef ERTS_SMP
BIF_RET(am_true);
-#else
- BIF_RET(am_false);
-#endif
} else if (ERTS_IS_ATOM_STR("scheduler_bind_type", BIF_ARG_1)) {
BIF_RET(erts_bound_schedulers_term(BIF_P));
} else if (ERTS_IS_ATOM_STR("scheduler_bindings", BIF_ARG_1)) {
@@ -2476,84 +2583,93 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
res = make_small(erts_no_schedulers);
BIF_RET(res);
} else if (ERTS_IS_ATOM_STR("schedulers_state", BIF_ARG_1)) {
-#ifndef ERTS_SMP
- Eterm *hp = HAlloc(BIF_P, 4);
- res = TUPLE3(hp, make_small(1), make_small(1), make_small(1));
+ Eterm *hp;
+ Uint total, online, active;
+ erts_schedulers_state(&total, &online, &active,
+ NULL, NULL, NULL, NULL, NULL);
+ hp = HAlloc(BIF_P, 4);
+ res = TUPLE3(hp,
+ make_small(total),
+ make_small(online),
+ make_small(active));
BIF_RET(res);
-#else
+ } else if (ERTS_IS_ATOM_STR("schedulers_state", BIF_ARG_1)) {
+ Eterm *hp;
Uint total, online, active;
- switch (erts_schedulers_state(&total,
- &online,
- &active,
- NULL,
- NULL,
- NULL,
- 1)) {
- case ERTS_SCHDLR_SSPND_DONE: {
- Eterm *hp = HAlloc(BIF_P, 4);
- res = TUPLE3(hp,
- make_small(total),
- make_small(online),
- make_small(active));
- BIF_RET(res);
+ erts_schedulers_state(&total, &online, &active,
+ NULL, NULL, NULL, NULL, NULL);
+ hp = HAlloc(BIF_P, 4);
+ res = TUPLE3(hp,
+ make_small(total),
+ make_small(online),
+ make_small(active));
+ BIF_RET(res);
+ } else if (ERTS_IS_ATOM_STR("all_schedulers_state", BIF_ARG_1)) {
+ Eterm *hp, tpl;
+ Uint sz, total, online, active,
+ dirty_cpu_total, dirty_cpu_online, dirty_cpu_active,
+ dirty_io_total, dirty_io_active;
+ erts_schedulers_state(&total, &online, &active,
+ &dirty_cpu_total, &dirty_cpu_online, &dirty_cpu_active,
+ &dirty_io_total, &dirty_io_active);
+
+ sz = 2+5;
+ if (dirty_cpu_total)
+ sz += 2+5;
+ if (dirty_io_total)
+ sz += 2+5;
+
+ hp = HAlloc(BIF_P, sz);
+
+ res = NIL;
+ if (dirty_io_total) {
+ tpl = TUPLE4(hp,
+ am_dirty_io,
+ make_small(dirty_io_total),
+ make_small(dirty_io_total),
+ make_small(dirty_io_active));
+ hp += 5;
+ res = CONS(hp, tpl, res);
+ hp += 2;
}
- case ERTS_SCHDLR_SSPND_YIELD_RESTART:
- ERTS_VBUMP_ALL_REDS(BIF_P);
- BIF_TRAP1(bif_export[BIF_system_info_1],
- BIF_P, BIF_ARG_1);
- default:
- ASSERT(0);
- BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
+ if (dirty_cpu_total) {
+ tpl = TUPLE4(hp,
+ am_dirty_cpu,
+ make_small(dirty_cpu_total),
+ make_small(dirty_cpu_online),
+ make_small(dirty_cpu_active));
+ hp += 5;
+ res = CONS(hp, tpl, res);
+ hp += 2;
}
-#endif
+ tpl = TUPLE4(hp,
+ am_normal,
+ make_small(total),
+ make_small(online),
+ make_small(active));
+ hp += 5;
+ res = CONS(hp, tpl, res);
+ BIF_RET(res);
} else if (ERTS_IS_ATOM_STR("schedulers_online", BIF_ARG_1)) {
-#ifndef ERTS_SMP
- BIF_RET(make_small(1));
-#else
- Uint total, online, active;
- switch (erts_schedulers_state(&total, &online, &active, NULL, NULL, NULL, 1)) {
- case ERTS_SCHDLR_SSPND_DONE:
- BIF_RET(make_small(online));
- case ERTS_SCHDLR_SSPND_YIELD_RESTART:
- ERTS_VBUMP_ALL_REDS(BIF_P);
- BIF_TRAP1(bif_export[BIF_system_info_1],
- BIF_P, BIF_ARG_1);
- default:
- ASSERT(0);
- BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
- }
-#endif
+ Uint online;
+ erts_schedulers_state(NULL, &online, NULL, NULL, NULL, NULL, NULL, NULL);
+ BIF_RET(make_small(online));
} else if (ERTS_IS_ATOM_STR("schedulers_active", BIF_ARG_1)) {
-#ifndef ERTS_SMP
- BIF_RET(make_small(1));
-#else
- Uint total, online, active;
- switch (erts_schedulers_state(&total, &online, &active, NULL, NULL, NULL, 1)) {
- case ERTS_SCHDLR_SSPND_DONE:
- BIF_RET(make_small(active));
- case ERTS_SCHDLR_SSPND_YIELD_RESTART:
- ERTS_VBUMP_ALL_REDS(BIF_P);
- BIF_TRAP1(bif_export[BIF_system_info_1],
- BIF_P, BIF_ARG_1);
- default:
- ASSERT(0);
- BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
- }
-#endif
-#if defined(ERTS_SMP) && defined(ERTS_DIRTY_SCHEDULERS)
+ Uint active;
+ erts_schedulers_state(NULL, NULL, &active, NULL, NULL, NULL, NULL, NULL);
+ BIF_RET(make_small(active));
} else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers", BIF_ARG_1)) {
Uint dirty_cpu;
- erts_schedulers_state(NULL, NULL, NULL, &dirty_cpu, NULL, NULL, 1);
+ erts_schedulers_state(NULL, NULL, NULL, &dirty_cpu, NULL, NULL, NULL, NULL);
BIF_RET(make_small(dirty_cpu));
} else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers_online", BIF_ARG_1)) {
Uint dirty_cpu_onln;
- erts_schedulers_state(NULL, NULL, NULL, NULL, &dirty_cpu_onln, NULL, 1);
+ erts_schedulers_state(NULL, NULL, NULL, NULL, &dirty_cpu_onln, NULL, NULL, NULL);
BIF_RET(make_small(dirty_cpu_onln));
} else if (ERTS_IS_ATOM_STR("dirty_io_schedulers", BIF_ARG_1)) {
Uint dirty_io;
- erts_schedulers_state(NULL, NULL, NULL, NULL, NULL, &dirty_io, 1);
+ erts_schedulers_state(NULL, NULL, NULL, NULL, NULL, NULL, &dirty_io, NULL);
BIF_RET(make_small(dirty_io));
-#endif
} else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
res = make_small(erts_no_run_queues);
BIF_RET(res);
@@ -2574,7 +2690,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(make_small(CONTEXT_REDS));
} else if (ERTS_IS_ATOM_STR("kernel_poll", BIF_ARG_1)) {
#ifdef ERTS_ENABLE_KERNEL_POLL
- BIF_RET(erts_use_kernel_poll ? am_true : am_false);
+ BIF_RET(am_true);
#else
BIF_RET(am_false);
#endif
@@ -2599,14 +2715,15 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
} else if (ERTS_IS_ATOM_STR("check_io", BIF_ARG_1)) {
BIF_RET(erts_check_io_info(BIF_P));
} else if (ERTS_IS_ATOM_STR("multi_scheduling_blockers", BIF_ARG_1)) {
-#ifndef ERTS_SMP
- BIF_RET(NIL);
-#else
if (erts_no_schedulers == 1)
BIF_RET(NIL);
else
- BIF_RET(erts_multi_scheduling_blockers(BIF_P));
-#endif
+ BIF_RET(erts_multi_scheduling_blockers(BIF_P, 0));
+ } else if (ERTS_IS_ATOM_STR("normal_multi_scheduling_blockers", BIF_ARG_1)) {
+ if (erts_no_schedulers == 1)
+ BIF_RET(NIL);
+ else
+ BIF_RET(erts_multi_scheduling_blockers(BIF_P, 1));
} else if (ERTS_IS_ATOM_STR("modified_timing_level", BIF_ARG_1)) {
BIF_RET(ERTS_USE_MODIFIED_TIMING()
? make_small(erts_modified_timing_level)
@@ -2630,6 +2747,15 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
res = erts_bld_uint(&hp, NULL, erts_dist_buf_busy_limit);
BIF_RET(res);
+ } else if (ERTS_IS_ATOM_STR("delayed_node_table_gc", BIF_ARG_1)) {
+ Uint hsz = 0;
+ Uint dntgc = erts_delayed_node_table_gc();
+ if (dntgc == ERTS_NODE_TAB_DELAY_GC_INFINITY)
+ BIF_RET(am_infinity);
+ (void) erts_bld_uint(NULL, &hsz, dntgc);
+ hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
+ res = erts_bld_uint(&hp, NULL, dntgc);
+ BIF_RET(res);
} else if (ERTS_IS_ATOM_STR("ethread_info", BIF_ARG_1)) {
BIF_RET(erts_get_ethread_info(BIF_P));
}
@@ -2646,6 +2772,9 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
#elif defined(USE_SYSTEMTAP)
DECL_AM(systemtap);
BIF_RET(AM_systemtap);
+#elif defined(USE_LTTNG)
+ DECL_AM(lttng);
+ BIF_RET(AM_lttng);
#else
BIF_RET(am_none);
#endif
@@ -2657,12 +2786,21 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(am_false);
#endif
}
-#ifdef ERTS_SMP
else if (ERTS_IS_ATOM_STR("thread_progress", BIF_ARG_1)) {
erts_thr_progress_dbg_print_state();
BIF_RET(am_true);
}
-#endif
+ else if (BIF_ARG_1 == am_message_queue_data) {
+ switch (erts_default_spo_flags & (SPO_ON_HEAP_MSGQ|SPO_OFF_HEAP_MSGQ)) {
+ case SPO_OFF_HEAP_MSGQ:
+ BIF_RET(am_off_heap);
+ case SPO_ON_HEAP_MSGQ:
+ BIF_RET(am_on_heap);
+ default:
+ ERTS_INTERNAL_ERROR("Inconsistent message queue management state");
+ BIF_RET(am_error);
+ }
+ }
else if (ERTS_IS_ATOM_STR("compile_info",BIF_ARG_1)) {
Uint sz;
Eterm res = NIL, tup, text;
@@ -2691,6 +2829,36 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
else if (ERTS_IS_ATOM_STR("ets_limit",BIF_ARG_1)) {
BIF_RET(make_small(erts_db_get_max_tabs()));
}
+ else if (ERTS_IS_ATOM_STR("atom_limit",BIF_ARG_1)) {
+ BIF_RET(make_small(erts_get_atom_limit()));
+ }
+ else if (ERTS_IS_ATOM_STR("atom_count",BIF_ARG_1)) {
+ BIF_RET(make_small(atom_table_size()));
+ }
+ else if (ERTS_IS_ATOM_STR("tolerant_timeofday",BIF_ARG_1)) {
+ if (erts_has_time_correction()
+ && erts_time_offset_state() == ERTS_TIME_OFFSET_FINAL) {
+ BIF_RET(am_enabled);
+ }
+ BIF_RET(am_disabled);
+ }
+ else if (ERTS_IS_ATOM_STR("eager_check_io",BIF_ARG_1)) {
+ BIF_RET(am_true);
+ }
+ else if (ERTS_IS_ATOM_STR("literal_test",BIF_ARG_1)) {
+#ifdef ERTS_HAVE_IS_IN_LITERAL_RANGE
+#ifdef ARCH_64
+ DECL_AM(range);
+ BIF_RET(AM_range);
+#else /* ARCH_32 */
+ DECL_AM(range_bitmask);
+ BIF_RET(AM_range_bitmask);
+#endif /* ARCH_32 */
+#else /* ! ERTS_HAVE_IS_IN_LITERAL_RANGE */
+ DECL_AM(tag);
+ BIF_RET(AM_tag);
+#endif
+ }
BIF_ERROR(BIF_P, BADARG);
}
@@ -2708,11 +2876,12 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
*/
Eterm
-erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, Eterm item)
+erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
+ Eterm item)
{
Eterm res = THE_NON_VALUE;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
if (item == am_id) {
if (hpp)
@@ -2737,7 +2906,7 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, Eterm ite
if (hpp) {
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
- item = STORE_NC(hpp, ohp, mic.mi[i].entity);
+ item = STORE_NC(hpp, ohp, mic.mi[i].entity.term);
res = CONS(*hpp, item, res);
*hpp += 2;
}
@@ -2756,8 +2925,8 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, Eterm ite
Eterm item;
INIT_MONITOR_INFOS(mic);
-
- erts_doforall_monitors(ERTS_P_MONITORS(prt), &collect_one_origin_monitor, &mic);
+ erts_doforall_monitors(ERTS_P_MONITORS(prt),
+ &collect_one_origin_monitor, &mic);
if (szp)
*szp += mic.sz;
@@ -2766,14 +2935,16 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, Eterm ite
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
Eterm t;
- item = STORE_NC(hpp, ohp, mic.mi[i].entity);
- t = TUPLE2(*hpp, am_process, item);
+ Eterm m_type;
+
+ item = STORE_NC(hpp, ohp, mic.mi[i].entity.term);
+ m_type = is_port(item) ? am_port : am_process;
+ t = TUPLE2(*hpp, m_type, item);
*hpp += 3;
res = CONS(*hpp, t, res);
*hpp += 2;
}
- }
-
+ } // hpp
DESTROY_MONITOR_INFOS(mic);
if (szp) {
@@ -2781,6 +2952,33 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, Eterm ite
goto done;
}
}
+ else if (item == am_monitored_by) {
+ MonitorInfoCollection mic;
+ int i;
+ Eterm item;
+
+ INIT_MONITOR_INFOS(mic);
+ erts_doforall_monitors(ERTS_P_MONITORS(prt),
+ &collect_one_target_monitor, &mic);
+ if (szp)
+ *szp += mic.sz;
+
+ if (hpp) {
+ res = NIL;
+ for (i = 0; i < mic.mi_i; ++i) {
+ ASSERT(mic.mi[i].node == NIL);
+ item = STORE_NC(hpp, ohp, mic.mi[i].entity.term);
+ res = CONS(*hpp, item, res);
+ *hpp += 2;
+ }
+ } // hpp
+ DESTROY_MONITOR_INFOS(mic);
+
+ if (szp) {
+ res = am_true;
+ goto done;
+ }
+ }
else if (item == am_name) {
int count = sys_strlen(prt->name);
@@ -2874,9 +3072,6 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, Eterm ite
}
else if (ERTS_IS_ATOM_STR("locking", item)) {
if (hpp) {
-#ifndef ERTS_SMP
- res = am_false;
-#else
if (erts_atomic32_read_nob(&prt->state)
& ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) {
DECL_AM(port_level);
@@ -2890,7 +3085,6 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, Eterm ite
& ERL_DRV_FLAG_USE_PORT_LOCKING));
res = AM_driver_level;
}
-#endif
}
if (szp) {
res = am_true;
@@ -2903,7 +3097,7 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, Eterm ite
goto done;
}
res = ((ERTS_PTS_FLG_PARALLELISM &
- erts_smp_atomic32_read_nob(&prt->sched.flags))
+ erts_atomic32_read_nob(&prt->sched.flags))
? am_true
: am_false);
}
@@ -2979,7 +3173,7 @@ fun_info_2(BIF_ALIST_2)
}
break;
case am_refc:
- val = erts_make_integer(erts_smp_atomic_read_nob(&funp->fe->refc), p);
+ val = erts_make_integer(erts_atomic_read_nob(&funp->fe->refc), p);
hp = HAlloc(p, 3);
break;
case am_arity:
@@ -3006,7 +3200,7 @@ fun_info_2(BIF_ALIST_2)
break;
case am_module:
hp = HAlloc(p, 3);
- val = exp->code[0];
+ val = exp->info.mfa.module;
break;
case am_new_index:
hp = HAlloc(p, 3);
@@ -3034,11 +3228,11 @@ fun_info_2(BIF_ALIST_2)
break;
case am_arity:
hp = HAlloc(p, 3);
- val = make_small(exp->code[2]);
+ val = make_small(exp->info.mfa.arity);
break;
case am_name:
hp = HAlloc(p, 3);
- val = exp->code[1];
+ val = exp->info.mfa.function;
break;
default:
goto error;
@@ -3050,6 +3244,27 @@ fun_info_2(BIF_ALIST_2)
return TUPLE2(hp, what, val);
}
+BIF_RETTYPE
+fun_info_mfa_1(BIF_ALIST_1)
+{
+ Process* p = BIF_P;
+ Eterm fun = BIF_ARG_1;
+ Eterm* hp;
+
+ if (is_fun(fun)) {
+ ErlFunThing* funp = (ErlFunThing *) fun_val(fun);
+ hp = HAlloc(p, 4);
+ BIF_RET(TUPLE3(hp,funp->fe->module,funp->fe->address[-2],make_small(funp->arity)));
+ } else if (is_export(fun)) {
+ Export* exp = (Export *) ((UWord) (export_val(fun))[1]);
+ hp = HAlloc(p, 4);
+ BIF_RET(TUPLE3(hp,exp->info.mfa.module,
+ exp->info.mfa.function,
+ make_small(exp->info.mfa.arity)));
+ }
+ BIF_ERROR(p, BADARG);
+}
+
BIF_RETTYPE is_process_alive_1(BIF_ALIST_1)
{
if(is_internal_pid(BIF_ARG_1)) {
@@ -3063,7 +3278,7 @@ BIF_RETTYPE is_process_alive_1(BIF_ALIST_1)
BIF_RET(am_false);
}
else {
- if (erts_smp_atomic32_read_acqb(&rp->state)
+ if (erts_atomic32_read_acqb(&rp->state)
& (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING))
ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_false);
else
@@ -3098,7 +3313,7 @@ BIF_RETTYPE process_display_2(BIF_ALIST_2)
BIF_ARG_1, BIF_ARG_2);
if (rp != BIF_P && ERTS_PROC_PENDING_EXIT(rp)) {
Eterm args[2] = {BIF_ARG_1, BIF_ARG_2};
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCKS_ALL);
+ erts_proc_unlock(rp, ERTS_PROC_LOCKS_ALL);
ERTS_BIF_AWAIT_X_APPLY_TRAP(BIF_P,
BIF_ARG_1,
am_erlang,
@@ -3107,15 +3322,12 @@ BIF_RETTYPE process_display_2(BIF_ALIST_2)
2);
}
erts_stack_dump(ERTS_PRINT_STDERR, NULL, rp);
-#ifdef ERTS_SMP
- erts_smp_proc_unlock(rp, (BIF_P == rp
+ erts_proc_unlock(rp, (BIF_P == rp
? ERTS_PROC_LOCKS_ALL_MINOR
: ERTS_PROC_LOCKS_ALL));
-#endif
BIF_RET(am_true);
}
-
/* this is a general call which return some possibly useful information */
BIF_RETTYPE statistics_1(BIF_ALIST_1)
@@ -3124,10 +3336,70 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
Eterm* hp;
if (BIF_ARG_1 == am_scheduler_wall_time) {
- res = erts_sched_wall_time_request(BIF_P, 0, 0);
+ res = erts_sched_wall_time_request(BIF_P, 0, 0, 1, 0);
+ if (is_non_value(res))
+ BIF_RET(am_undefined);
+ BIF_TRAP1(gather_sched_wall_time_res_trap, BIF_P, res);
+ } else if (BIF_ARG_1 == am_scheduler_wall_time_all) {
+ res = erts_sched_wall_time_request(BIF_P, 0, 0, 1, 1);
if (is_non_value(res))
BIF_RET(am_undefined);
BIF_TRAP1(gather_sched_wall_time_res_trap, BIF_P, res);
+ } else if ((BIF_ARG_1 == am_total_active_tasks)
+ | (BIF_ARG_1 == am_total_run_queue_lengths)
+ | (BIF_ARG_1 == am_total_active_tasks_all)
+ | (BIF_ARG_1 == am_total_run_queue_lengths_all)) {
+ Uint no = erts_run_queues_len(NULL, 0,
+ ((BIF_ARG_1 == am_total_active_tasks)
+ | (BIF_ARG_1 == am_total_active_tasks_all)),
+ ((BIF_ARG_1 == am_total_active_tasks_all)
+ | (BIF_ARG_1 == am_total_run_queue_lengths_all)));
+ if (IS_USMALL(0, no))
+ res = make_small(no);
+ else {
+ Eterm *hp = HAlloc(BIF_P, BIG_UINT_HEAP_SIZE);
+ res = uint_to_big(no, hp);
+ }
+ BIF_RET(res);
+ } else if ((BIF_ARG_1 == am_active_tasks)
+ | (BIF_ARG_1 == am_run_queue_lengths)
+ | (BIF_ARG_1 == am_active_tasks_all)
+ | (BIF_ARG_1 == am_run_queue_lengths_all)) {
+ Eterm res, *hp, **hpp;
+ Uint sz, *szp;
+ int incl_dirty_io = ((BIF_ARG_1 == am_active_tasks_all)
+ | (BIF_ARG_1 == am_run_queue_lengths_all));
+ int no_qs = (erts_no_run_queues + ERTS_NUM_DIRTY_CPU_RUNQS +
+ (incl_dirty_io ? ERTS_NUM_DIRTY_IO_RUNQS : 0));
+ Uint *qszs = erts_alloc(ERTS_ALC_T_TMP,sizeof(Uint)*no_qs*2);
+ (void) erts_run_queues_len(qszs, 0,
+ ((BIF_ARG_1 == am_active_tasks)
+ | (BIF_ARG_1 == am_active_tasks_all)),
+ incl_dirty_io);
+ sz = 0;
+ szp = &sz;
+ hpp = NULL;
+ while (1) {
+ int i;
+ for (i = 0; i < no_qs; i++)
+ qszs[no_qs+i] = erts_bld_uint(hpp, szp, qszs[i]);
+ res = erts_bld_list(hpp, szp, no_qs, &qszs[no_qs]);
+ if (hpp) {
+ erts_free(ERTS_ALC_T_TMP, qszs);
+ BIF_RET(res);
+ }
+ hp = HAlloc(BIF_P, sz);
+ szp = NULL;
+ hpp = &hp;
+ }
+#ifdef ERTS_ENABLE_MSACC
+ } else if (BIF_ARG_1 == am_microstate_accounting) {
+ Eterm threads;
+ res = erts_msacc_request(BIF_P, ERTS_MSACC_GATHER, &threads);
+ if (is_non_value(res))
+ BIF_RET(am_undefined);
+ BIF_TRAP2(gather_msacc_res_trap, BIF_P, res, threads);
+#endif
} else if (BIF_ARG_1 == am_context_switches) {
Eterm cs = erts_make_integer(erts_get_total_context_switches(), BIF_P);
hp = HAlloc(BIF_P, 3);
@@ -3167,51 +3439,44 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
res = TUPLE2(hp, b1, b2);
BIF_RET(res);
} else if (BIF_ARG_1 == am_runtime) {
- UWord u1, u2, dummy;
+ ErtsMonotonicTime u1, u2;
Eterm b1, b2;
- elapsed_time_both(&u1,&dummy,&u2,&dummy);
- b1 = erts_make_integer(u1,BIF_P);
- b2 = erts_make_integer(u2,BIF_P);
- hp = HAlloc(BIF_P,3);
+ Uint hsz;
+ erts_runtime_elapsed_both(&u1, NULL, &u2, NULL);
+ hsz = 3; /* 2-tuple */
+ (void) erts_bld_monotonic_time(NULL, &hsz, u1);
+ (void) erts_bld_monotonic_time(NULL, &hsz, u2);
+ hp = HAlloc(BIF_P, hsz);
+ b1 = erts_bld_monotonic_time(&hp, NULL, u1);
+ b2 = erts_bld_monotonic_time(&hp, NULL, u2);
res = TUPLE2(hp, b1, b2);
BIF_RET(res);
} else if (BIF_ARG_1 == am_run_queue) {
- res = erts_run_queues_len(NULL);
+ res = erts_run_queues_len(NULL, 1, 0, 0);
BIF_RET(make_small(res));
} else if (BIF_ARG_1 == am_wall_clock) {
- UWord w1, w2;
+ ErtsMonotonicTime w1, w2;
Eterm b1, b2;
- wall_clock_elapsed_time_both(&w1, &w2);
- b1 = erts_make_integer((Uint) w1,BIF_P);
- b2 = erts_make_integer((Uint) w2,BIF_P);
- hp = HAlloc(BIF_P,3);
+ Uint hsz;
+ erts_wall_clock_elapsed_both(&w1, &w2);
+ hsz = 3; /* 2-tuple */
+ (void) erts_bld_monotonic_time(NULL, &hsz, w1);
+ (void) erts_bld_monotonic_time(NULL, &hsz, w2);
+ hp = HAlloc(BIF_P, hsz);
+ b1 = erts_bld_monotonic_time(&hp, NULL, w1);
+ b2 = erts_bld_monotonic_time(&hp, NULL, w2);
res = TUPLE2(hp, b1, b2);
BIF_RET(res);
} else if (BIF_ARG_1 == am_io) {
- Eterm r1, r2;
- Eterm in, out;
- Uint hsz = 9;
- Uint bytes_in = (Uint) erts_smp_atomic_read_nob(&erts_bytes_in);
- Uint bytes_out = (Uint) erts_smp_atomic_read_nob(&erts_bytes_out);
-
- (void) erts_bld_uint(NULL, &hsz, bytes_in);
- (void) erts_bld_uint(NULL, &hsz, bytes_out);
- hp = HAlloc(BIF_P, hsz);
- in = erts_bld_uint(&hp, NULL, bytes_in);
- out = erts_bld_uint(&hp, NULL, bytes_out);
-
- r1 = TUPLE2(hp, am_input, in);
- hp += 3;
- r2 = TUPLE2(hp, am_output, out);
- hp += 3;
- BIF_RET(TUPLE2(hp, r1, r2));
+ Eterm ref = erts_request_io_bytes(BIF_P);
+ BIF_TRAP2(gather_io_bytes_trap, BIF_P, ref, make_small(erts_no_schedulers));
}
else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
Eterm res, *hp, **hpp;
Uint sz, *szp;
- int no_qs = erts_no_run_queues;
+ int no_qs = erts_no_run_queues + ERTS_NUM_DIRTY_RUNQS;
Uint *qszs = erts_alloc(ERTS_ALC_T_TMP,sizeof(Uint)*no_qs*2);
- (void) erts_run_queues_len(qszs);
+ (void) erts_run_queues_len(qszs, 0, 0, 1);
sz = 0;
szp = &sz;
hpp = NULL;
@@ -3237,7 +3502,12 @@ BIF_RETTYPE error_logger_warning_map_0(BIF_ALIST_0)
BIF_RET(erts_error_logger_warnings);
}
-static erts_smp_atomic_t available_internal_state;
+static erts_atomic_t available_internal_state;
+
+static int empty_magic_ref_destructor(Binary *bin)
+{
+ return 1;
+}
BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
{
@@ -3245,7 +3515,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
* NOTE: Only supposed to be used for testing, and debugging.
*/
- if (!erts_smp_atomic_read_nob(&available_internal_state)) {
+ if (!erts_atomic_read_nob(&available_internal_state)) {
BIF_ERROR(BIF_P, EXC_UNDEF);
}
@@ -3277,20 +3547,43 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
else if (ERTS_IS_ATOM_STR("DbTable_words", BIF_ARG_1)) {
/* Used by ets_SUITE (stdlib) */
size_t words = (sizeof(DbTable) + sizeof(Uint) - 1)/sizeof(Uint);
- BIF_RET(make_small((Uint) words));
+ Eterm* hp = HAlloc(BIF_P ,3);
+ BIF_RET(TUPLE2(hp, make_small((Uint) words),
+ erts_ets_hash_sizeof_ext_segtab()));
}
else if (ERTS_IS_ATOM_STR("check_io_debug", BIF_ARG_1)) {
- /* Used by (emulator) */
- int res;
+ /* Used by driver_SUITE (emulator) */
+ Uint sz, *szp;
+ Eterm res, *hp, **hpp;
+ int no_errors;
+ ErtsCheckIoDebugInfo ciodi = {0};
#ifdef HAVE_ERTS_CHECK_IO_DEBUG
- erts_smp_proc_unlock(BIF_P,ERTS_PROC_LOCK_MAIN);
- res = erts_check_io_debug();
- erts_smp_proc_lock(BIF_P,ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P,ERTS_PROC_LOCK_MAIN);
+ no_errors = erts_check_io_debug(&ciodi);
+ erts_proc_lock(BIF_P,ERTS_PROC_LOCK_MAIN);
#else
- res = 0;
+ no_errors = 0;
#endif
- ASSERT(res >= 0);
- BIF_RET(erts_make_integer((Uint) res, BIF_P));
+ sz = 0;
+ szp = &sz;
+ hpp = NULL;
+ while (1) {
+ res = erts_bld_tuple(hpp, szp, 4,
+ erts_bld_uint(hpp, szp,
+ (Uint) no_errors),
+ erts_bld_uint(hpp, szp,
+ (Uint) ciodi.no_used_fds),
+ erts_bld_uint(hpp, szp,
+ (Uint) ciodi.no_driver_select_structs),
+ erts_bld_uint(hpp, szp,
+ (Uint) ciodi.no_enif_select_structs));
+ if (hpp)
+ break;
+ hp = HAlloc(BIF_P, sz);
+ szp = NULL;
+ hpp = &hp;
+ }
+ BIF_RET(res);
}
else if (ERTS_IS_ATOM_STR("process_info_args", BIF_ARG_1)) {
/* Used by process_SUITE (emulator) */
@@ -3317,9 +3610,9 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
}
else if (ERTS_IS_ATOM_STR("nbalance", BIF_ARG_1)) {
Uint n;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
n = erts_debug_nbalance();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(erts_make_integer(n, BIF_P));
}
else if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1)) {
@@ -3334,16 +3627,60 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
}
else if (ERTS_IS_ATOM_STR("memory", BIF_ARG_1)) {
Eterm res;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
res = erts_memory(NULL, NULL, BIF_P, THE_NON_VALUE);
- erts_smp_thr_progress_unblock();
+ erts_thr_progress_unblock();
BIF_RET(res);
}
else if (ERTS_IS_ATOM_STR("mmap", BIF_ARG_1)) {
BIF_RET(erts_mmap_debug_info(BIF_P));
}
+ else if (ERTS_IS_ATOM_STR("unique_monotonic_integer_state", BIF_ARG_1)) {
+ BIF_RET(erts_debug_get_unique_monotonic_integer_state(BIF_P));
+ }
+ else if (ERTS_IS_ATOM_STR("min_unique_monotonic_integer", BIF_ARG_1)) {
+ Sint64 value = erts_get_min_unique_monotonic_integer();
+ if (IS_SSMALL(value))
+ BIF_RET(make_small(value));
+ else {
+ Uint hsz = ERTS_SINT64_HEAP_SIZE(value);
+ Eterm *hp = HAlloc(BIF_P, hsz);
+ BIF_RET(erts_sint64_to_big(value, &hp));
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("min_unique_integer", BIF_ARG_1)) {
+ Sint64 value = erts_get_min_unique_integer();
+ if (IS_SSMALL(value))
+ BIF_RET(make_small(value));
+ else {
+ Uint hsz = ERTS_SINT64_HEAP_SIZE(value);
+ Eterm *hp = HAlloc(BIF_P, hsz);
+ BIF_RET(erts_sint64_to_big(value, &hp));
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("stack_check", BIF_ARG_1)) {
+ UWord size;
+ char c;
+ if (erts_is_above_stack_limit(&c))
+ size = erts_check_stack_recursion_downwards(&c);
+ else
+ size = erts_check_stack_recursion_upwards(&c);
+ if (IS_SSMALL(size))
+ BIF_RET(make_small(size));
+ else {
+ Uint hsz = BIG_UWORD_HEAP_SIZE(size);
+ Eterm *hp = HAlloc(BIF_P, hsz);
+ BIF_RET(uword_to_big(size, hp));
+ }
+ } else if (ERTS_IS_ATOM_STR("scheduler_dump", BIF_ARG_1)) {
+#if defined(ERTS_HAVE_TRY_CATCH) && defined(ERTS_SYS_SUSPEND_SIGNAL)
+ BIF_RET(am_true);
+#else
+ BIF_RET(am_false);
+#endif
+ }
}
else if (is_tuple(BIF_ARG_1)) {
Eterm* tp = tuple_val(BIF_ARG_1);
@@ -3353,10 +3690,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
/* Used by timer process_SUITE, timer_bif_SUITE, and
node_container_SUITE (emulator) */
if (is_internal_pid(tp[2])) {
- BIF_RET(erts_process_status(BIF_P,
- ERTS_PROC_LOCK_MAIN,
- NULL,
- tp[2]));
+ BIF_RET(erts_process_status(NULL, tp[2]));
}
}
else if (ERTS_IS_ATOM_STR("link_list", tp[1])) {
@@ -3370,11 +3704,11 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
tp[2],
ERTS_PROC_LOCK_LINK);
if (!p) {
- ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P);
+ ERTS_ASSERT_IS_NOT_EXITING(BIF_P);
BIF_RET(am_undefined);
}
res = make_link_list(BIF_P, ERTS_P_LINKS(p), NIL);
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_LINK);
BIF_RET(res);
}
else if(is_internal_port(tp[2])) {
@@ -3393,11 +3727,10 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
DistEntry *dep = erts_find_dist_entry(tp[2]);
if(dep) {
Eterm subres;
- erts_smp_de_links_lock(dep);
+ erts_de_links_lock(dep);
subres = make_link_list(BIF_P, dep->nlinks, NIL);
subres = make_link_list(BIF_P, dep->node_links, subres);
- erts_smp_de_links_unlock(dep);
- erts_deref_dist_entry(dep);
+ erts_de_links_unlock(dep);
BIF_RET(subres);
} else {
BIF_RET(am_undefined);
@@ -3415,20 +3748,19 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
tp[2],
ERTS_PROC_LOCK_LINK);
if (!p) {
- ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P);
+ ERTS_ASSERT_IS_NOT_EXITING(BIF_P);
BIF_RET(am_undefined);
}
res = make_monitor_list(BIF_P, ERTS_P_MONITORS(p));
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_LINK);
BIF_RET(res);
} else if(is_node_name_atom(tp[2])) {
DistEntry *dep = erts_find_dist_entry(tp[2]);
if(dep) {
Eterm ml;
- erts_smp_de_links_lock(dep);
+ erts_de_links_lock(dep);
ml = make_monitor_list(BIF_P, dep->monitors);
- erts_smp_de_links_unlock(dep);
- erts_deref_dist_entry(dep);
+ erts_de_links_unlock(dep);
BIF_RET(ml);
} else {
BIF_RET(am_undefined);
@@ -3443,7 +3775,6 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
else {
Uint cno = dist_entry_channel_no(dep);
res = make_small(cno);
- erts_deref_dist_entry(dep);
}
BIF_RET(res);
}
@@ -3455,7 +3786,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
}
else {
Eterm res = ERTS_PROC_PENDING_EXIT(rp) ? am_true : am_false;
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
BIF_RET(res);
}
}
@@ -3505,15 +3836,14 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
DFLAG_BIT_BINARIES);
BIF_RET(erts_term_to_binary(BIF_P, tp[2], 0, dflags));
}
- else if (ERTS_IS_ATOM_STR("dist_port", tp[1])) {
+ else if (ERTS_IS_ATOM_STR("dist_ctrl", tp[1])) {
Eterm res = am_undefined;
DistEntry *dep = erts_sysname_to_connected_dist_entry(tp[2]);
if (dep) {
- erts_smp_de_rlock(dep);
- if (is_internal_port(dep->cid))
+ erts_de_rlock(dep);
+ if (is_internal_port(dep->cid) || is_internal_pid(dep->cid))
res = dep->cid;
- erts_smp_de_runlock(dep);
- erts_deref_dist_entry(dep);
+ erts_de_runlock(dep);
}
BIF_RET(res);
}
@@ -3538,6 +3868,86 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
BIF_RET(erts_debug_reader_groups_map(BIF_P, (int) groups));
}
+ else if (ERTS_IS_ATOM_STR("internal_hash", tp[1])) {
+ Uint hash = (Uint) make_internal_hash(tp[2], 0);
+ Uint hsz = 0;
+ Eterm* hp;
+ erts_bld_uint(NULL, &hsz, hash);
+ hp = HAlloc(BIF_P,hsz);
+ return erts_bld_uint(&hp, NULL, hash);
+ }
+ else if (ERTS_IS_ATOM_STR("atom", tp[1])) {
+ Uint ix;
+ if (!term_to_Uint(tp[2], &ix))
+ BIF_ERROR(BIF_P, BADARG);
+ while (ix >= atom_table_size()) {
+ char tmp[20];
+ erts_snprintf(tmp, sizeof(tmp), "am%x", atom_table_size());
+ erts_atom_put((byte *) tmp, strlen(tmp), ERTS_ATOM_ENC_LATIN1, 1);
+ }
+ return make_atom(ix);
+ }
+ else if (ERTS_IS_ATOM_STR("magic_ref", tp[1])) {
+ Binary *bin;
+ UWord bin_addr, refc;
+ Eterm bin_addr_term, refc_term, test_type;
+ Uint sz;
+ Eterm *hp;
+ if (!is_internal_magic_ref(tp[2])) {
+ if (is_internal_ordinary_ref(tp[2])) {
+ ErtsORefThing *rtp;
+ rtp = (ErtsORefThing *) internal_ref_val(tp[2]);
+ if (erts_is_ref_numbers_magic(rtp->num))
+ BIF_RET(am_true);
+ }
+ BIF_RET(am_false);
+ }
+ bin = erts_magic_ref2bin(tp[2]);
+ refc = erts_refc_read(&bin->intern.refc, 1);
+ bin_addr = (UWord) bin;
+ sz = 4;
+ erts_bld_uword(NULL, &sz, bin_addr);
+ erts_bld_uword(NULL, &sz, refc);
+ hp = HAlloc(BIF_P, sz);
+ bin_addr_term = erts_bld_uword(&hp, NULL, bin_addr);
+ refc_term = erts_bld_uword(&hp, NULL, refc);
+ test_type = (ERTS_MAGIC_BIN_DESTRUCTOR(bin) == empty_magic_ref_destructor
+ ? am_true : am_false);
+ BIF_RET(TUPLE3(hp, bin_addr_term, refc_term, test_type));
+ }
+
+ break;
+ }
+ case 3: {
+ if (ERTS_IS_ATOM_STR("check_time_config", tp[1])) {
+ int res, time_correction;
+ ErtsTimeWarpMode time_warp_mode;
+ if (tp[2] == am_true)
+ time_correction = !0;
+ else if (tp[2] == am_false)
+ time_correction = 0;
+ else
+ break;
+ if (ERTS_IS_ATOM_STR("no_time_warp", tp[3]))
+ time_warp_mode = ERTS_NO_TIME_WARP_MODE;
+ else if (ERTS_IS_ATOM_STR("single_time_warp", tp[3]))
+ time_warp_mode = ERTS_SINGLE_TIME_WARP_MODE;
+ else if (ERTS_IS_ATOM_STR("multi_time_warp", tp[3]))
+ time_warp_mode = ERTS_MULTI_TIME_WARP_MODE;
+ else
+ break;
+ res = erts_check_time_adj_support(time_correction,
+ time_warp_mode);
+ BIF_RET(res ? am_true : am_false);
+ }
+ else if (ERTS_IS_ATOM_STR("make_unique_integer", tp[1])) {
+ Eterm res = erts_debug_make_unique_integer(BIF_P,
+ tp[2],
+ tp[3]);
+ if (is_non_value(res))
+ break;
+ BIF_RET(res);
+ }
break;
}
default:
@@ -3547,8 +3957,51 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
-static erts_smp_atomic_t hipe_test_reschedule_flag;
+BIF_RETTYPE erts_internal_is_system_process_1(BIF_ALIST_1)
+{
+ if (is_internal_pid(BIF_ARG_1)) {
+ Process *rp = erts_proc_lookup(BIF_ARG_1);
+ if (rp && (rp->static_flags & ERTS_STC_FLG_SYSTEM_PROC))
+ BIF_RET(am_true);
+ BIF_RET(am_false);
+ }
+
+ if (is_external_pid(BIF_ARG_1)
+ && external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry) {
+ BIF_RET(am_false);
+ }
+
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE erts_internal_system_check_1(BIF_ALIST_1)
+{
+ Eterm res;
+ if (ERTS_IS_ATOM_STR("schedulers", BIF_ARG_1)) {
+ res = erts_system_check_request(BIF_P);
+ if (is_non_value(res))
+ BIF_RET(am_undefined);
+ BIF_TRAP1(gather_system_check_res_trap, BIF_P, res);
+ }
+
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+static erts_atomic_t hipe_test_reschedule_flag;
+#if defined(VALGRIND) && defined(__GNUC__)
+/* Force noinline for valgrind suppression */
+static void broken_halt_test(Eterm bif_arg_2) __attribute__((noinline));
+#endif
+
+static void broken_halt_test(Eterm bif_arg_2)
+{
+ /* Ugly ugly code used by bif_SUITE:erlang_halt/1 */
+#if defined(ERTS_HAVE_TRY_CATCH)
+ erts_get_scheduler_data()->run_queue = NULL;
+#endif
+ erts_exit(ERTS_DUMP_EXIT, "%T", bif_arg_2);
+}
BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
{
@@ -3558,7 +4011,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1)
&& (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false)) {
erts_aint_t on = (erts_aint_t) (BIF_ARG_2 == am_true);
- erts_aint_t prev_on = erts_smp_atomic_xchg_nob(&available_internal_state, on);
+ erts_aint_t prev_on = erts_atomic_xchg_nob(&available_internal_state, on);
if (on) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp, "Process %T ", BIF_P->common.id);
@@ -3574,7 +4027,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
BIF_RET(prev_on ? am_true : am_false);
}
- if (!erts_smp_atomic_read_nob(&available_internal_state)) {
+ if (!erts_atomic_read_nob(&available_internal_state)) {
BIF_ERROR(BIF_P, EXC_UNDEF);
}
@@ -3598,13 +4051,13 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
Sint ms;
if (term_to_Sint(BIF_ARG_2, &ms) != 0) {
if (ms > 0) {
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
if (block)
- erts_smp_thr_progress_block();
+ erts_thr_progress_block();
while (erts_milli_sleep((long) ms) != 0);
if (block)
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
}
BIF_RET(am_true);
}
@@ -3613,9 +4066,9 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
Sint ms;
if (term_to_Sint(BIF_ARG_2, &ms) != 0) {
if (ms > 0) {
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
while (erts_milli_sleep((long) ms) != 0);
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
}
BIF_RET(am_true);
}
@@ -3645,9 +4098,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
BIF_RET(am_false);
}
else {
- FLAGS(rp) |= F_FORCE_GC;
- if (BIF_P != rp)
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+ ERTS_FORCE_GC(BIF_P);
BIF_RET(am_true);
}
}
@@ -3685,10 +4136,8 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
BIF_RET(AM_dead);
}
-#ifdef ERTS_SMP
if (BIF_P == rp)
rp_locks |= ERTS_PROC_LOCK_MAIN;
-#endif
xres = erts_send_exit_signal(NULL, /* NULL in order to
force a pending exit
when we send to our
@@ -3700,11 +4149,9 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
NIL,
NULL,
0);
-#ifdef ERTS_SMP
if (BIF_P == rp)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
-#endif
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
if (xres > 1) {
DECL_AM(message);
BIF_RET(AM_message);
@@ -3766,14 +4213,14 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
}
else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_suspend", BIF_ARG_1)) {
/* Used by hipe test suites */
- erts_aint_t flag = erts_smp_atomic_read_nob(&hipe_test_reschedule_flag);
+ erts_aint_t flag = erts_atomic_read_nob(&hipe_test_reschedule_flag);
if (!flag && BIF_ARG_2 != am_false) {
- erts_smp_atomic_set_nob(&hipe_test_reschedule_flag, 1);
+ erts_atomic_set_nob(&hipe_test_reschedule_flag, 1);
erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_set_internal_state_2],
BIF_P, BIF_ARG_1, BIF_ARG_2);
}
- erts_smp_atomic_set_nob(&hipe_test_reschedule_flag, !flag);
+ erts_atomic_set_nob(&hipe_test_reschedule_flag, !flag);
BIF_RET(NIL);
}
else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_resume", BIF_ARG_1)) {
@@ -3784,7 +4231,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
if (rp) {
erts_resume(rp, ERTS_PROC_LOCK_STATUS);
res = am_true;
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
}
BIF_RET(res);
}
@@ -3793,7 +4240,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
BIF_RET(am_true);
}
else if (ERTS_IS_ATOM_STR("abort", BIF_ARG_1)) {
- erl_exit(ERTS_ABORT_EXIT, "%T\n", BIF_ARG_2);
+ erts_exit(ERTS_ABORT_EXIT, "%T\n", BIF_ARG_2);
}
else if (ERTS_IS_ATOM_STR("kill_dist_connection", BIF_ARG_1)) {
DistEntry *dep = erts_sysname_to_connected_dist_entry(BIF_ARG_2);
@@ -3801,16 +4248,14 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
BIF_RET(am_false);
else {
Uint32 con_id;
- erts_smp_de_rlock(dep);
+ erts_de_rlock(dep);
con_id = dep->connection_id;
- erts_smp_de_runlock(dep);
+ erts_de_runlock(dep);
erts_kill_dist_connection(dep, con_id);
- erts_deref_dist_entry(dep);
BIF_RET(am_true);
}
}
else if (ERTS_IS_ATOM_STR("not_running_optimization", BIF_ARG_1)) {
-#ifdef ERTS_SMP
int old_use_opt, use_opt;
switch (BIF_ARG_2) {
case am_true:
@@ -3823,271 +4268,460 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, BADARG);
}
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
old_use_opt = !erts_disable_proc_not_running_opt;
erts_disable_proc_not_running_opt = !use_opt;
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(old_use_opt ? am_true : am_false);
-#else
- BIF_ERROR(BIF_P, EXC_NOTSUP);
-#endif
}
else if (ERTS_IS_ATOM_STR("wait", BIF_ARG_1)) {
if (ERTS_IS_ATOM_STR("deallocations", BIF_ARG_2)) {
- if (erts_debug_wait_deallocations(BIF_P)) {
+ int flag = ERTS_DEBUG_WAIT_COMPLETED_DEALLOCATIONS;
+ if (erts_debug_wait_completed(BIF_P, flag)) {
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_ok);
+ }
+ }
+ if (ERTS_IS_ATOM_STR("timer_cancellations", BIF_ARG_2)) {
+ int flag = ERTS_DEBUG_WAIT_COMPLETED_TIMER_CANCELLATIONS;
+ if (erts_debug_wait_completed(BIF_P, flag)) {
ERTS_BIF_YIELD_RETURN(BIF_P, am_ok);
}
}
}
+ else if (ERTS_IS_ATOM_STR("broken_halt", BIF_ARG_1)) {
+ broken_halt_test(BIF_ARG_2);
+ }
+ else if (ERTS_IS_ATOM_STR("unique_monotonic_integer_state", BIF_ARG_1)) {
+ int res = erts_debug_set_unique_monotonic_integer_state(BIF_ARG_2);
+ BIF_RET(res ? am_true : am_false);
+ }
+ else if (ERTS_IS_ATOM_STR("node_tab_delayed_delete", BIF_ARG_1)) {
+ /* node_container_SUITE */
+ Sint64 msecs;
+ if (term_to_Sint64(BIF_ARG_2, &msecs)) {
+ /* Negative value restore original value... */
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_debug_test_node_tab_delayed_delete(msecs);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ BIF_RET(am_ok);
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("fill_heap", BIF_ARG_1)) {
+ UWord left = HeapWordsLeft(BIF_P);
+ if (left > 1) {
+ Eterm* hp = HAlloc(BIF_P, left);
+ *hp = make_pos_bignum_header(left - 1);
+ }
+ if (BIF_ARG_2 == am_true) {
+ FLAGS(BIF_P) |= F_NEED_FULLSWEEP;
+ }
+ BIF_RET(am_ok);
+ }
+ else if (ERTS_IS_ATOM_STR("make", BIF_ARG_1)) {
+ if (ERTS_IS_ATOM_STR("magic_ref", BIF_ARG_2)) {
+ Binary *bin = erts_create_magic_binary(0, empty_magic_ref_destructor);
+ UWord bin_addr = (UWord) bin;
+ Eterm bin_addr_term, magic_ref, res;
+ Eterm *hp;
+ Uint sz = ERTS_MAGIC_REF_THING_SIZE + 3;
+ erts_bld_uword(NULL, &sz, bin_addr);
+ hp = HAlloc(BIF_P, sz);
+ bin_addr_term = erts_bld_uword(&hp, NULL, bin_addr);
+ magic_ref = erts_mk_magic_ref(&hp, &BIF_P->off_heap, bin);
+ res = TUPLE2(hp, magic_ref, bin_addr_term);
+ BIF_RET(res);
+ }
+ }
}
BIF_ERROR(BIF_P, BADARG);
}
#ifdef ERTS_ENABLE_LOCK_COUNT
+
+typedef struct {
+ /* info->location_count may increase between size calculation and term
+ * building, so we cap it at the value sampled in lcnt_build_result_vector.
+ *
+ * Shrinking is safe though. */
+ int max_location_count;
+ erts_lcnt_lock_info_t *info;
+} lcnt_sample_t;
+
+typedef struct lcnt_sample_vector_ {
+ lcnt_sample_t *elements;
+ size_t size;
+} lcnt_sample_vector_t;
+
+static lcnt_sample_vector_t lcnt_build_sample_vector(erts_lcnt_lock_info_list_t *list) {
+ erts_lcnt_lock_info_t *iterator;
+ lcnt_sample_vector_t result;
+ size_t allocated_entries;
+
+ allocated_entries = 64;
+ result.size = 0;
+
+ result.elements = erts_alloc(ERTS_ALC_T_LCNT_VECTOR,
+ allocated_entries * sizeof(lcnt_sample_t));
+
+ iterator = NULL;
+ while(erts_lcnt_iterate_list(list, &iterator)) {
+ erts_lcnt_retain_lock_info(iterator);
+
+ result.elements[result.size].max_location_count = iterator->location_count;
+ result.elements[result.size].info = iterator;
+
+ result.size++;
+
+ if(result.size >= allocated_entries) {
+ allocated_entries *= 2;
+
+ result.elements = erts_realloc(ERTS_ALC_T_LCNT_VECTOR, result.elements,
+ allocated_entries * sizeof(lcnt_sample_t));
+ }
+ }
+
+ return result;
+}
+
+static void lcnt_destroy_sample_vector(lcnt_sample_vector_t *vector) {
+ size_t i;
+
+ for(i = 0; i < vector->size; i++) {
+ erts_lcnt_release_lock_info(vector->elements[i].info);
+ }
+
+ erts_free(ERTS_ALC_T_LCNT_VECTOR, vector->elements);
+}
+
+/* The size of an integer is not guaranteed to be constant since we're walking
+ * over live data, and may cross over into bignum territory between size calc
+ * and the actual build. This takes care of that through always assuming the
+ * worst, but needs to be fixed up with HRelease once the final term has been
+ * built. */
+static ERTS_INLINE Eterm bld_unstable_uint64(Uint **hpp, Uint *szp, Uint64 ui) {
+ Eterm res = THE_NON_VALUE;
+
+ if(szp) {
+ *szp += ERTS_UINT64_HEAP_SIZE(~((Uint64) 0));
+ }
+
+ if(hpp) {
+ if (IS_USMALL(0, ui)) {
+ res = make_small(ui);
+ } else {
+ res = erts_uint64_to_big(ui, hpp);
+ }
+ }
+
+ return res;
+}
+
static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_stats_t *stats, Eterm res) {
- Uint tries = 0, colls = 0;
- unsigned long timer_s = 0, timer_ns = 0, timer_n = 0;
- unsigned int line = 0;
-
+ unsigned int i;
+ const char *file;
+
Eterm af, uil;
Eterm uit, uic;
Eterm uits, uitns, uitn;
Eterm tt, tstat, tloc, t;
-
+ Eterm thist, vhist[ERTS_LCNT_HISTOGRAM_SLOT_SIZE];
+
/* term:
- * [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}}}]
- */
-
- tries = (Uint) ethr_atomic_read(&stats->tries);
- colls = (Uint) ethr_atomic_read(&stats->colls);
-
- line = stats->line;
- timer_s = stats->timer.s;
- timer_ns = stats->timer.ns;
- timer_n = stats->timer_n;
-
- af = erts_atom_put(stats->file, strlen(stats->file), ERTS_ATOM_ENC_LATIN1, 1);
- uil = erts_bld_uint( hpp, szp, line);
+ * [{{file, line},
+ {tries, colls, {seconds, nanoseconds, n_blocks}},
+ * { .. histogram .. }] */
+
+ file = stats->file ? stats->file : "undefined";
+
+ af = erts_atom_put((byte *)file, strlen(file), ERTS_ATOM_ENC_LATIN1, 1);
+ uil = erts_bld_uint( hpp, szp, stats->line);
tloc = erts_bld_tuple(hpp, szp, 2, af, uil);
-
- uit = erts_bld_uint( hpp, szp, tries);
- uic = erts_bld_uint( hpp, szp, colls);
-
- uits = erts_bld_uint( hpp, szp, timer_s);
- uitns = erts_bld_uint( hpp, szp, timer_ns);
- uitn = erts_bld_uint( hpp, szp, timer_n);
+
+ uit = bld_unstable_uint64(hpp, szp, (Uint)ethr_atomic_read(&stats->attempts));
+ uic = bld_unstable_uint64(hpp, szp, (Uint)ethr_atomic_read(&stats->collisions));
+
+ uits = bld_unstable_uint64(hpp, szp, stats->total_time_waited.s);
+ uitns = bld_unstable_uint64(hpp, szp, stats->total_time_waited.ns);
+ uitn = bld_unstable_uint64(hpp, szp, stats->times_waited);
tt = erts_bld_tuple(hpp, szp, 3, uits, uitns, uitn);
tstat = erts_bld_tuple(hpp, szp, 3, uit, uic, tt);
-
- t = erts_bld_tuple(hpp, szp, 2, tloc, tstat);
-
- res = erts_bld_cons( hpp, szp, t, res);
+
+ for(i = 0; i < ERTS_LCNT_HISTOGRAM_SLOT_SIZE; i++) {
+ vhist[i] = bld_unstable_uint64(hpp, szp, stats->wait_time_histogram.ns[i]);
+ }
+
+ thist = erts_bld_tuplev(hpp, szp, ERTS_LCNT_HISTOGRAM_SLOT_SIZE, vhist);
+
+ t = erts_bld_tuple(hpp, szp, 3, tloc, tstat, thist);
+ res = erts_bld_cons( hpp, szp, t, res);
return res;
}
-static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock, Eterm res) {
+static Eterm lcnt_pretty_print_lock_id(erts_lcnt_lock_info_t *info) {
+ Eterm id = info->id;
+
+ if((info->flags & ERTS_LOCK_FLAGS_MASK_TYPE) == ERTS_LOCK_TYPE_PROCLOCK) {
+ /* Use registered names as id's for process locks if available. Thread
+ * progress is delayed since we may be running on a dirty scheduler. */
+ ErtsThrPrgrDelayHandle delay_handle;
+ Process *process;
+
+ delay_handle = erts_thr_progress_unmanaged_delay();
+
+ process = erts_proc_lookup(info->id);
+ if (process && process->common.u.alive.reg) {
+ id = process->common.u.alive.reg->name;
+ }
+
+ erts_thr_progress_unmanaged_continue(delay_handle);
+ } else if(info->flags & ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR) {
+ if(is_small(id) && !sys_strcmp(info->name, "alcu_allocator")) {
+ const char *name = (const char*)ERTS_ALC_A2AD(signed_val(id));
+ id = erts_atom_put((byte*)name, strlen(name), ERTS_ATOM_ENC_LATIN1, 1);
+ }
+ }
+
+ return id;
+}
+
+static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, lcnt_sample_t *sample, Eterm res) {
+ erts_lcnt_lock_info_t *info = sample->info;
+
Eterm name, type, id, stats = NIL, t;
- Process *proc = NULL;
- char *ltype;
+ const char *lock_desc;
int i;
+
+ /* term: [{name, id, type, stats()}] */
+
+ ASSERT(info->name);
- /* term:
- * [{name, id, type, stats()}]
- */
-
- ASSERT(lock->name);
-
- ltype = erts_lcnt_lock_type(lock->flag);
-
- ASSERT(ltype);
-
- type = erts_atom_put(ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
- name = erts_atom_put(lock->name, strlen(lock->name), ERTS_ATOM_ENC_LATIN1, 1);
-
- if (lock->flag & ERTS_LCNT_LT_ALLOC) {
- /* use allocator types names as id's for allocator locks */
- ltype = (char *) ERTS_ALC_A2AD(signed_val(lock->id));
- id = erts_atom_put(ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
- } else if (lock->flag & ERTS_LCNT_LT_PROCLOCK) {
- /* use registered names as id's for process locks if available */
- proc = erts_proc_lookup(lock->id);
- if (proc && proc->common.u.alive.reg) {
- id = proc->common.u.alive.reg->name;
- } else {
- /* otherwise use process id */
- id = lock->id;
- }
+ lock_desc = erts_lock_flags_get_type_name(info->flags);
+
+ type = erts_atom_put((byte*)lock_desc, strlen(lock_desc), ERTS_ATOM_ENC_LATIN1, 1);
+ name = erts_atom_put((byte*)info->name, strlen(info->name), ERTS_ATOM_ENC_LATIN1, 1);
+
+ /* Only attempt to resolve ids when actually emitting the term. This ought
+ * to be safe since all immediates are the same size. */
+ if(hpp != NULL) {
+ id = lcnt_pretty_print_lock_id(info);
} else {
- id = lock->id;
+ id = NIL;
}
-
- for (i = 0; i < lock->n_stats; i++) {
- stats = lcnt_build_lock_stats_term(hpp, szp, &(lock->stats[i]), stats);
+
+ for(i = 0; i < MIN(info->location_count, sample->max_location_count); i++) {
+ stats = lcnt_build_lock_stats_term(hpp, szp, &(info->location_stats[i]), stats);
}
-
- t = erts_bld_tuple(hpp, szp, 4, name, id, type, stats);
-
- res = erts_bld_cons( hpp, szp, t, res);
+
+ t = erts_bld_tuple(hpp, szp, 4, name, id, type, stats);
+ res = erts_bld_cons(hpp, szp, t, res);
return res;
}
-static Eterm lcnt_build_result_term(Eterm **hpp, Uint *szp, erts_lcnt_data_t *data, Eterm res) {
+static Eterm lcnt_build_result_term(Eterm **hpp, Uint *szp, erts_lcnt_time_t *duration,
+ lcnt_sample_vector_t *current_locks,
+ lcnt_sample_vector_t *deleted_locks, Eterm res) {
+ const char *str_duration = "duration";
+ const char *str_locks = "locks";
+
Eterm dts, dtns, tdt, adur, tdur, aloc, lloc = NIL, tloc;
- erts_lcnt_lock_t *lock = NULL;
- char *str_duration = "duration";
- char *str_locks = "locks";
-
- /* term:
- * [{'duration', {seconds, nanoseconds}}, {'locks', locks()}]
- */
-
+ size_t i;
+
+ /* term: [{'duration', {seconds, nanoseconds}}, {'locks', locks()}] */
+
/* duration tuple */
- dts = erts_bld_uint( hpp, szp, data->duration.s);
- dtns = erts_bld_uint( hpp, szp, data->duration.ns);
+ dts = bld_unstable_uint64(hpp, szp, duration->s);
+ dtns = bld_unstable_uint64(hpp, szp, duration->ns);
tdt = erts_bld_tuple(hpp, szp, 2, dts, dtns);
-
- adur = erts_atom_put(str_duration, strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1);
+
+ adur = erts_atom_put((byte *)str_duration, strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1);
tdur = erts_bld_tuple(hpp, szp, 2, adur, tdt);
/* lock tuple */
-
- aloc = erts_atom_put(str_locks, strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1);
-
- for (lock = data->current_locks->head; lock != NULL ; lock = lock->next ) {
- lloc = lcnt_build_lock_term(hpp, szp, lock, lloc);
+ aloc = erts_atom_put((byte *)str_locks, strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1);
+
+ for(i = 0; i < current_locks->size; i++) {
+ lloc = lcnt_build_lock_term(hpp, szp, &current_locks->elements[i], lloc);
}
-
- for (lock = data->deleted_locks->head; lock != NULL ; lock = lock->next ) {
- lloc = lcnt_build_lock_term(hpp, szp, lock, lloc);
+
+ for(i = 0; i < deleted_locks->size; i++) {
+ lloc = lcnt_build_lock_term(hpp, szp, &deleted_locks->elements[i], lloc);
}
-
+
tloc = erts_bld_tuple(hpp, szp, 2, aloc, lloc);
-
- res = erts_bld_cons( hpp, szp, tloc, res);
- res = erts_bld_cons( hpp, szp, tdur, res);
+
+ res = erts_bld_cons(hpp, szp, tloc, res);
+ res = erts_bld_cons(hpp, szp, tdur, res);
+
+ return res;
+}
+
+static struct {
+ const char *name;
+ erts_lock_flags_t flag;
+} lcnt_category_map[] = {
+ {"allocator", ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR},
+ {"db", ERTS_LOCK_FLAGS_CATEGORY_DB},
+ {"debug", ERTS_LOCK_FLAGS_CATEGORY_DEBUG},
+ {"distribution", ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION},
+ {"generic", ERTS_LOCK_FLAGS_CATEGORY_GENERIC},
+ {"io", ERTS_LOCK_FLAGS_CATEGORY_IO},
+ {"process", ERTS_LOCK_FLAGS_CATEGORY_PROCESS},
+ {"scheduler", ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER},
+ {NULL, 0}
+ };
+
+static erts_lock_flags_t lcnt_atom_to_lock_category(Eterm atom) {
+ int i = 0;
+
+ for(i = 0; lcnt_category_map[i].name != NULL; i++) {
+ if(erts_is_atom_str(lcnt_category_map[i].name, atom, 0)) {
+ return lcnt_category_map[i].flag;
+ }
+ }
+
+ return 0;
+}
+
+static Eterm lcnt_build_category_list(Eterm **hpp, Uint *szp, erts_lock_flags_t mask) {
+ Eterm res;
+ int i;
+
+ res = NIL;
+
+ for(i = 0; lcnt_category_map[i].name != NULL; i++) {
+ if(mask & lcnt_category_map[i].flag) {
+ Eterm category = erts_atom_put((byte*)lcnt_category_map[i].name,
+ strlen(lcnt_category_map[i].name),
+ ERTS_ATOM_ENC_UTF8, 0);
+
+ res = erts_bld_cons(hpp, szp, category, res);
+ }
+ }
return res;
-}
+}
+
#endif
-BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1)
+BIF_RETTYPE erts_debug_lcnt_clear_0(BIF_ALIST_0)
{
-#ifdef ERTS_ENABLE_LOCK_COUNT
- Eterm res = NIL;
-#endif
+#ifndef ERTS_ENABLE_LOCK_COUNT
+ BIF_RET(am_error);
+#else
+ erts_lcnt_clear_counters();
+ BIF_RET(am_ok);
+#endif
+}
- if (BIF_ARG_1 == am_enabled) {
-#ifdef ERTS_ENABLE_LOCK_COUNT
- BIF_RET(am_true);
+BIF_RETTYPE erts_debug_lcnt_collect_0(BIF_ALIST_0)
+{
+#ifndef ERTS_ENABLE_LOCK_COUNT
+ BIF_RET(am_error);
#else
- BIF_RET(am_false);
+ lcnt_sample_vector_t current_locks, deleted_locks;
+ erts_lcnt_data_t data;
+
+ Eterm *term_heap_start, *term_heap_end;
+ Uint term_heap_size = 0;
+ Eterm result;
+
+ data = erts_lcnt_get_data();
+
+ current_locks = lcnt_build_sample_vector(data.current_locks);
+ deleted_locks = lcnt_build_sample_vector(data.deleted_locks);
+
+ lcnt_build_result_term(NULL, &term_heap_size, &data.duration,
+ &current_locks, &deleted_locks, NIL);
+
+ term_heap_start = HAlloc(BIF_P, term_heap_size);
+ term_heap_end = term_heap_start;
+
+ result = lcnt_build_result_term(&term_heap_end, NULL,
+ &data.duration, &current_locks, &deleted_locks, NIL);
+
+ HRelease(BIF_P, term_heap_start + term_heap_size, term_heap_end);
+
+ lcnt_destroy_sample_vector(&current_locks);
+ lcnt_destroy_sample_vector(&deleted_locks);
+
+ BIF_RET(result);
#endif
- }
+}
+
+BIF_RETTYPE erts_debug_lcnt_control_1(BIF_ALIST_1)
+{
#ifdef ERTS_ENABLE_LOCK_COUNT
+ if(ERTS_IS_ATOM_STR("mask", BIF_ARG_1)) {
+ erts_lock_flags_t mask;
+ Eterm *term_heap_block;
+ Uint term_heap_size;
- else if (BIF_ARG_1 == am_info) {
- erts_lcnt_data_t *data;
- Uint hsize = 0;
- Uint *szp;
- Eterm* hp;
+ mask = erts_lcnt_get_category_mask();
+ term_heap_size = 0;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ lcnt_build_category_list(NULL, &term_heap_size, mask);
- erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_SUSPEND);
- data = erts_lcnt_get_data();
+ term_heap_block = HAlloc(BIF_P, term_heap_size);
- /* calculate size */
+ BIF_RET(lcnt_build_category_list(&term_heap_block, NULL, mask));
+ } else if(ERTS_IS_ATOM_STR("copy_save", BIF_ARG_1)) {
+ if(erts_lcnt_get_preserve_info()) {
+ BIF_RET(am_true);
+ }
- szp = &hsize;
- lcnt_build_result_term(NULL, szp, data, NIL);
+ BIF_RET(am_false);
+ }
+#endif
+ BIF_ERROR(BIF_P, BADARG);
+}
- /* alloc and build */
+BIF_RETTYPE erts_debug_lcnt_control_2(BIF_ALIST_2)
+{
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ if(ERTS_IS_ATOM_STR("mask", BIF_ARG_1)) {
+ erts_lock_flags_t category_mask = 0;
+ Eterm categories = BIF_ARG_2;
- hp = HAlloc(BIF_P, hsize);
+ if(!(is_list(categories) || is_nil(categories))) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
- res = lcnt_build_result_term(&hp, NULL, data, res);
-
- erts_lcnt_clear_rt_opt(ERTS_LCNT_OPT_SUSPEND);
+ while(is_list(categories)) {
+ Eterm *cell = list_val(categories);
+ erts_lock_flags_t category;
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
-
- BIF_RET(res);
- } else if (BIF_ARG_1 == am_clear) {
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ category = lcnt_atom_to_lock_category(CAR(cell));
- erts_lcnt_clear_counters();
+ if(!category) {
+ Eterm *hp = HAlloc(BIF_P, 4);
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ BIF_RET(TUPLE3(hp, am_error, am_badarg, CAR(cell)));
+ }
- BIF_RET(am_ok);
- } else if (is_tuple(BIF_ARG_1)) {
- Eterm* tp = tuple_val(BIF_ARG_1);
+ category_mask |= category;
+ categories = CDR(cell);
+ }
- switch (arityval(tp[0])) {
- case 2: {
- int opt = 0;
- int val = 0;
- if (ERTS_IS_ATOM_STR("copy_save", tp[1])) {
- opt = ERTS_LCNT_OPT_COPYSAVE;
- } else if (ERTS_IS_ATOM_STR("process_locks", tp[1])) {
- opt = ERTS_LCNT_OPT_PROCLOCK;
- } else if (ERTS_IS_ATOM_STR("port_locks", tp[1])) {
- opt = ERTS_LCNT_OPT_PORTLOCK;
- } else if (ERTS_IS_ATOM_STR("suspend", tp[1])) {
- opt = ERTS_LCNT_OPT_SUSPEND;
- } else if (ERTS_IS_ATOM_STR("location", tp[1])) {
- opt = ERTS_LCNT_OPT_LOCATION;
- } else {
- BIF_ERROR(BIF_P, BADARG);
- }
- if (tp[2] == am_true) {
- val = 1;
- } else if (tp[2] == am_false) {
- val = 0;
- } else {
- BIF_ERROR(BIF_P, BADARG);
- }
+ erts_lcnt_set_category_mask(category_mask);
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
-
- if (val) {
- res = erts_lcnt_set_rt_opt(opt) ? am_true : am_false;
- } else {
- res = erts_lcnt_clear_rt_opt(opt) ? am_true : am_false;
- }
-#ifdef ERTS_SMP
- if (res != tp[2]) {
- if (opt == ERTS_LCNT_OPT_PORTLOCK) {
- erts_lcnt_enable_io_lock_count(val);
- } else if (opt == ERTS_LCNT_OPT_PROCLOCK) {
- erts_lcnt_enable_proc_lock_count(val);
- }
- }
-#endif
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- BIF_RET(res);
- break;
- }
-
- default:
- break;
- }
- }
+ BIF_RET(am_ok);
+ } else if(BIF_ARG_2 == am_true || BIF_ARG_2 == am_false) {
+ int enabled = (BIF_ARG_2 == am_true);
+
+ if(ERTS_IS_ATOM_STR("copy_save", BIF_ARG_1)) {
+ erts_lcnt_set_preserve_info(enabled);
-#endif
+ BIF_RET(am_ok);
+ }
+ }
+#endif
BIF_ERROR(BIF_P, BADARG);
}
@@ -4102,21 +4736,24 @@ static void os_info_init(void)
os_flavor(buf, 1024);
flav = erts_atom_put((byte *) buf, strlen(buf), ERTS_ATOM_ENC_LATIN1, 1);
erts_free(ERTS_ALC_T_TMP, (void *) buf);
- hp = erts_alloc(ERTS_ALC_T_LL_TEMP_TERM, (3+4)*sizeof(Eterm));
+ hp = erts_alloc(ERTS_ALC_T_LITERAL, (3+4)*sizeof(Eterm));
os_type_tuple = TUPLE2(hp, type, flav);
+ erts_set_literal_tag(&os_type_tuple, hp, 3);
+
hp += 3;
os_version(&major, &minor, &build);
os_version_tuple = TUPLE3(hp,
make_small(major),
make_small(minor),
make_small(build));
+ erts_set_literal_tag(&os_version_tuple, hp, 4);
}
void
erts_bif_info_init(void)
{
- erts_smp_atomic_init_nob(&available_internal_state, 0);
- erts_smp_atomic_init_nob(&hipe_test_reschedule_flag, 0);
+ erts_atomic_init_nob(&available_internal_state, 0);
+ erts_atomic_init_nob(&hipe_test_reschedule_flag, 0);
alloc_info_trap = erts_export_put(am_erlang, am_alloc_info, 1);
alloc_sizes_trap = erts_export_put(am_erlang, am_alloc_sizes, 1);
@@ -4124,6 +4761,12 @@ erts_bif_info_init(void)
= erts_export_put(am_erlang, am_gather_sched_wall_time_result, 1);
gather_gc_info_res_trap
= erts_export_put(am_erlang, am_gather_gc_info_result, 1);
+ gather_io_bytes_trap
+ = erts_export_put(am_erts_internal, am_gather_io_bytes, 2);
+ gather_msacc_res_trap
+ = erts_export_put(am_erts_internal, am_gather_microstate_accounting_result, 2);
+ gather_system_check_res_trap
+ = erts_export_put(am_erts_internal, am_gather_system_check_result, 1);
process_info_init();
os_info_init();
}
diff --git a/erts/emulator/beam/erl_bif_lists.c b/erts/emulator/beam/erl_bif_lists.c
index 820ed2385d..73d327da3e 100644
--- a/erts/emulator/beam/erl_bif_lists.c
+++ b/erts/emulator/beam/erl_bif_lists.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -39,32 +40,93 @@ static BIF_RETTYPE append(Process* p, Eterm A, Eterm B)
Eterm list;
Eterm copy;
Eterm last;
- size_t need;
- Eterm* hp;
- int i;
+ Eterm* hp = NULL;
+ Sint i;
- if ((i = erts_list_length(A)) < 0) {
- BIF_ERROR(p, BADARG);
+ list = A;
+
+ if (is_nil(list)) {
+ BIF_RET(B);
}
- if (i == 0) {
- BIF_RET(B);
- } else if (is_nil(B)) {
- BIF_RET(A);
+
+ if (is_not_list(list)) {
+ BIF_ERROR(p, BADARG);
}
- need = 2*i;
- hp = HAlloc(p, need);
- list = A;
+ /* optimistic append on heap first */
+
+ if ((i = HeapWordsLeft(p) / 2) < 4) {
+ goto list_tail;
+ }
+
+ hp = HEAP_TOP(p);
copy = last = CONS(hp, CAR(list_val(list)), make_list(hp+2));
list = CDR(list_val(list));
- hp += 2;
+ hp += 2;
+ i -= 2; /* don't use the last 2 words (extra i--;) */
+
+ while(i-- && is_list(list)) {
+ Eterm* listp = list_val(list);
+ last = CONS(hp, CAR(listp), make_list(hp+2));
+ list = CDR(listp);
+ hp += 2;
+ }
+
+ /* A is proper and B is NIL return A as-is, don't update HTOP */
+
+ if (is_nil(list) && is_nil(B)) {
+ BIF_RET(A);
+ }
+
+ if (is_nil(list)) {
+ HEAP_TOP(p) = hp;
+ CDR(list_val(last)) = B;
+ BIF_RET(copy);
+ }
+
+list_tail:
+
+ if ((i = erts_list_length(list)) < 0) {
+ BIF_ERROR(p, BADARG);
+ }
+
+ /* remaining list was proper and B is NIL */
+ if (is_nil(B)) {
+ BIF_RET(A);
+ }
+
+ if (hp) {
+ /* Note: fall through case, already written
+ * on the heap.
+ * The last 2 words of the heap is not written yet
+ */
+ Eterm *hp_save = hp;
+ ASSERT(i != 0);
+ HEAP_TOP(p) = hp + 2;
+ if (i == 1) {
+ hp[0] = CAR(list_val(list));
+ hp[1] = B;
+ BIF_RET(copy);
+ }
+ hp = HAlloc(p, 2*(i - 1));
+ last = CONS(hp_save, CAR(list_val(list)), make_list(hp));
+ } else {
+ hp = HAlloc(p, 2*i);
+ copy = last = CONS(hp, CAR(list_val(list)), make_list(hp+2));
+ hp += 2;
+ }
+
+ list = CDR(list_val(list));
i--;
+
+ ASSERT(i > -1);
while(i--) {
- Eterm* listp = list_val(list);
- last = CONS(hp, CAR(listp), make_list(hp+2));
- list = CDR(listp);
- hp += 2;
+ Eterm* listp = list_val(list);
+ last = CONS(hp, CAR(listp), make_list(hp+2));
+ list = CDR(listp);
+ hp += 2;
}
+
CDR(list_val(last)) = B;
BIF_RET(copy);
}
@@ -98,9 +160,9 @@ static Eterm subtract(Process* p, Eterm A, Eterm B)
Eterm small_vec[SMALL_VEC_SIZE]; /* Preallocated memory for small lists */
Eterm* vec_p;
Eterm* vp;
- int i;
- int n;
- int m;
+ Sint i;
+ Sint n;
+ Sint m;
if ((n = erts_list_length(A)) < 0) {
BIF_ERROR(p, BADARG);
@@ -390,7 +452,7 @@ keyfind(int Bif, Process* p, Eterm Key, Eterm Pos, Eterm List)
Eterm *tuple_ptr = tuple_val(term);
if (pos <= arityval(*tuple_ptr)) {
Eterm element = tuple_ptr[pos];
- if (CMP(Key, element) == 0) {
+ if (CMP_EQ(Key, element)) {
return term;
}
}
diff --git a/erts/emulator/beam/erl_bif_op.c b/erts/emulator/beam/erl_bif_op.c
index 37dd6457db..a594ec1493 100644
--- a/erts/emulator/beam/erl_bif_op.c
+++ b/erts/emulator/beam/erl_bif_op.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -88,22 +89,22 @@ BIF_RETTYPE not_1(BIF_ALIST_1)
BIF_RETTYPE sgt_2(BIF_ALIST_2)
{
- BIF_RET(cmp_gt(BIF_ARG_1, BIF_ARG_2) ? am_true : am_false);
+ BIF_RET(CMP_GT(BIF_ARG_1, BIF_ARG_2) ? am_true : am_false);
}
BIF_RETTYPE sge_2(BIF_ALIST_2)
{
- BIF_RET(cmp_ge(BIF_ARG_1, BIF_ARG_2) ? am_true : am_false);
+ BIF_RET(CMP_GE(BIF_ARG_1, BIF_ARG_2) ? am_true : am_false);
}
BIF_RETTYPE slt_2(BIF_ALIST_2)
{
- BIF_RET(cmp_lt(BIF_ARG_1, BIF_ARG_2) ? am_true : am_false);
+ BIF_RET(CMP_LT(BIF_ARG_1, BIF_ARG_2) ? am_true : am_false);
}
BIF_RETTYPE sle_2(BIF_ALIST_2)
{
- BIF_RET(cmp_le(BIF_ARG_1, BIF_ARG_2) ? am_true : am_false);
+ BIF_RET(CMP_LE(BIF_ARG_1, BIF_ARG_2) ? am_true : am_false);
}
BIF_RETTYPE seq_2(BIF_ALIST_2)
@@ -113,7 +114,7 @@ BIF_RETTYPE seq_2(BIF_ALIST_2)
BIF_RETTYPE seqeq_2(BIF_ALIST_2)
{
- BIF_RET(cmp_eq(BIF_ARG_1, BIF_ARG_2) ? am_true : am_false);
+ BIF_RET(CMP_EQ(BIF_ARG_1, BIF_ARG_2) ? am_true : am_false);
}
BIF_RETTYPE sneq_2(BIF_ALIST_2)
@@ -123,7 +124,7 @@ BIF_RETTYPE sneq_2(BIF_ALIST_2)
BIF_RETTYPE sneqeq_2(BIF_ALIST_2)
{
- BIF_RET(cmp_ne(BIF_ARG_1, BIF_ARG_2) ? am_true : am_false);
+ BIF_RET(CMP_NE(BIF_ARG_1, BIF_ARG_2) ? am_true : am_false);
}
BIF_RETTYPE is_atom_1(BIF_ALIST_1)
@@ -257,9 +258,9 @@ Eterm erl_is_function(Process* p, Eterm arg1, Eterm arg2)
BIF_RET(am_true);
}
} else if (is_export(arg1)) {
- Export* exp = (Export *) EXPAND_POINTER((export_val(arg1))[1]);
+ Export* exp = (Export *) (export_val(arg1)[1]);
- if (exp->code[2] == (Uint) arity) {
+ if (exp->info.mfa.arity == (Uint) arity) {
BIF_RET(am_true);
}
}
diff --git a/erts/emulator/beam/erl_bif_os.c b/erts/emulator/beam/erl_bif_os.c
index e07c622928..910325a2f4 100644
--- a/erts/emulator/beam/erl_bif_os.c
+++ b/erts/emulator/beam/erl_bif_os.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2012. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -36,6 +37,8 @@
#include "dist.h"
#include "erl_version.h"
+static int check_env_name(char *name);
+
/*
* Return the pid for the Erlang process in the host OS.
*/
@@ -100,8 +103,10 @@ BIF_RETTYPE os_getenv_1(BIF_ALIST_1)
key_str = erts_convert_filename_to_native(BIF_ARG_1,buf,STATIC_BUF_SIZE,
ERTS_ALC_T_TMP,1,0,&len);
- if (!key_str) {
- BIF_ERROR(p, BADARG);
+ if (!check_env_name(key_str)) {
+ if (key_str && key_str != &buf[0])
+ erts_free(ERTS_ALC_T_TMP, key_str);
+ BIF_ERROR(p, BADARG);
}
if (key_str != &buf[0])
@@ -142,25 +147,20 @@ BIF_RETTYPE os_putenv_2(BIF_ALIST_2)
{
char def_buf_key[STATIC_BUF_SIZE];
char def_buf_value[STATIC_BUF_SIZE];
- char *key_buf, *value_buf;
+ char *key_buf = NULL, *value_buf = NULL;
key_buf = erts_convert_filename_to_native(BIF_ARG_1,def_buf_key,
STATIC_BUF_SIZE,
ERTS_ALC_T_TMP,0,0,NULL);
- if (!key_buf) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ if (!check_env_name(key_buf))
+ goto badarg;
+
value_buf = erts_convert_filename_to_native(BIF_ARG_2,def_buf_value,
STATIC_BUF_SIZE,
ERTS_ALC_T_TMP,1,0,
NULL);
- if (!value_buf) {
- if (key_buf != def_buf_key) {
- erts_free(ERTS_ALC_T_TMP, key_buf);
- }
- BIF_ERROR(BIF_P, BADARG);
- }
-
+ if (!value_buf)
+ goto badarg;
if (erts_sys_putenv(key_buf, value_buf)) {
if (key_buf != def_buf_key) {
@@ -178,6 +178,13 @@ BIF_RETTYPE os_putenv_2(BIF_ALIST_2)
erts_free(ERTS_ALC_T_TMP, value_buf);
}
BIF_RET(am_true);
+
+badarg:
+ if (key_buf && key_buf != def_buf_key)
+ erts_free(ERTS_ALC_T_TMP, key_buf);
+ if (value_buf && value_buf != def_buf_value)
+ erts_free(ERTS_ALC_T_TMP, value_buf);
+ BIF_ERROR(BIF_P, BADARG);
}
BIF_RETTYPE os_unsetenv_1(BIF_ALIST_1)
@@ -187,18 +194,57 @@ BIF_RETTYPE os_unsetenv_1(BIF_ALIST_1)
key_buf = erts_convert_filename_to_native(BIF_ARG_1,buf,STATIC_BUF_SIZE,
ERTS_ALC_T_TMP,0,0,NULL);
- if (!key_buf) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ if (!check_env_name(key_buf))
+ goto badarg;
+
+ if (erts_sys_unsetenv(key_buf))
+ goto badarg;
- if (erts_sys_unsetenv(key_buf)) {
- if (key_buf != buf) {
- erts_free(ERTS_ALC_T_TMP, key_buf);
- }
- BIF_ERROR(BIF_P, BADARG);
- }
if (key_buf != buf) {
erts_free(ERTS_ALC_T_TMP, key_buf);
}
BIF_RET(am_true);
+
+badarg:
+ if (key_buf && key_buf != buf)
+ erts_free(ERTS_ALC_T_TMP, key_buf);
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE os_set_signal_2(BIF_ALIST_2) {
+ if (is_atom(BIF_ARG_1) && ((BIF_ARG_2 == am_ignore) ||
+ (BIF_ARG_2 == am_default) ||
+ (BIF_ARG_2 == am_handle))) {
+ if (!erts_set_signal(BIF_ARG_1, BIF_ARG_2))
+ goto error;
+
+ BIF_RET(am_ok);
+ }
+
+error:
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+static int
+check_env_name(char *raw_name)
+{
+ byte *c = (byte *) raw_name;
+ int encoding;
+
+ if (!c)
+ return 0;
+
+ encoding = erts_get_native_filename_encoding();
+
+ if (erts_raw_env_char_is_7bit_ascii_char('\0', c, encoding))
+ return 0; /* Do not allow empty name... */
+
+ /* Verify no '=' characters in variable name... */
+ do {
+ if (erts_raw_env_char_is_7bit_ascii_char('=', c, encoding))
+ return 0;
+ c = erts_raw_env_next_char(c, encoding);
+ } while (!erts_raw_env_char_is_7bit_ascii_char('\0', c, encoding));
+
+ return 1; /* Seems ok... */
}
diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c
index afb33c1cdb..c4a4dd5863 100644
--- a/erts/emulator/beam/erl_bif_port.c
+++ b/erts/emulator/beam/erl_bif_port.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2001-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2001-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -40,19 +41,20 @@
#include "external.h"
#include "packet_parser.h"
#include "erl_bits.h"
+#include "erl_bif_unique.h"
#include "dtrace-wrapper.h"
static Port *open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump);
-static byte* convert_environment(Process* p, Eterm env);
+static char* convert_environment(Eterm env);
static char **convert_args(Eterm);
static void free_args(char **);
char *erts_default_arg0 = "default";
-BIF_RETTYPE open_port_2(BIF_ALIST_2)
+BIF_RETTYPE erts_internal_open_port_2(BIF_ALIST_2)
{
Port *port;
- Eterm port_id;
+ Eterm res;
char *str;
int err_type, err_num;
@@ -60,27 +62,63 @@ BIF_RETTYPE open_port_2(BIF_ALIST_2)
if (!port) {
if (err_type == -3) {
ASSERT(err_num == BADARG || err_num == SYSTEM_LIMIT);
- BIF_ERROR(BIF_P, err_num);
+ if (err_num == BADARG)
+ res = am_badarg;
+ else if (err_num == SYSTEM_LIMIT)
+ res = am_system_limit;
+ else
+ /* this is only here to silence gcc, it should not happen */
+ BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
} else if (err_type == -2) {
str = erl_errno_id(err_num);
+ res = erts_atom_put((byte *) str, strlen(str), ERTS_ATOM_ENC_LATIN1, 1);
} else {
- str = "einval";
+ res = am_einval;
}
- BIF_P->fvalue = erts_atom_put((byte *) str, strlen(str), ERTS_ATOM_ENC_LATIN1, 1);
- BIF_ERROR(BIF_P, EXC_ERROR);
+ BIF_RET(res);
}
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
+ if (port->drv_ptr->flags & ERL_DRV_FLAG_USE_INIT_ACK) {
+
+ /* Copied from erl_port_task.c */
+ port->async_open_port = erts_alloc(ERTS_ALC_T_PRTSD,
+ sizeof(*port->async_open_port));
+ erts_make_ref_in_array(port->async_open_port->ref);
+ port->async_open_port->to = BIF_P->common.id;
+
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE | ERTS_PROC_LOCK_LINK);
+ if (ERTS_PROC_PENDING_EXIT(BIF_P)) {
+ /* need to exit caller instead */
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE | ERTS_PROC_LOCK_LINK);
+ KILL_CATCHES(BIF_P);
+ BIF_P->freason = EXC_EXIT;
+ erts_port_release(port);
+ BIF_RET(am_badarg);
+ }
+
+ ERTS_MSGQ_MV_INQ2PRIVQ(BIF_P);
+ BIF_P->msg.save = BIF_P->msg.last;
+
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE);
+
+ res = erts_proc_store_ref(BIF_P, port->async_open_port->ref);
+ } else {
+ res = port->common.id;
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
+ }
- port_id = port->common.id;
erts_add_link(&ERTS_P_LINKS(port), LINK_PID, BIF_P->common.id);
- erts_add_link(&ERTS_P_LINKS(BIF_P), LINK_PID, port_id);
+ erts_add_link(&ERTS_P_LINKS(BIF_P), LINK_PID, port->common.id);
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+ if (IS_TRACED_FL(BIF_P, F_TRACE_PROCS))
+ trace_proc(BIF_P, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK, BIF_P,
+ am_link, port->common.id);
+
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
erts_port_release(port);
- BIF_RET(port_id);
+ BIF_RET(res);
}
static ERTS_INLINE Port *
@@ -101,6 +139,12 @@ sig_lookup_port(Process *c_p, Eterm id_or_name)
return lookup_port(c_p, id_or_name, ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
}
+/* Non-inline copy of sig_lookup_port to be exported */
+Port *erts_sig_lookup_port(Process *c_p, Eterm id_or_name)
+{
+ return lookup_port(c_p, id_or_name, ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
+}
+
static ERTS_INLINE Port *
data_lookup_port(Process *c_p, Eterm id_or_name)
{
@@ -170,7 +214,7 @@ BIF_RETTYPE erts_internal_port_command_3(BIF_ALIST_3)
ASSERT(!(flags & ERTS_PORT_SIG_FLG_FORCE));
/* Fall through... */
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
ERTS_BIF_PREP_RET(res, ref);
break;
case ERTS_PORT_OP_DONE:
@@ -216,7 +260,7 @@ BIF_RETTYPE erts_internal_port_call_3(BIF_ALIST_3)
retval = am_badarg;
break;
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(retval));
+ ASSERT(is_internal_ordinary_ref(retval));
break;
case ERTS_PORT_OP_DONE:
ASSERT(is_not_internal_ref(retval));
@@ -227,12 +271,10 @@ BIF_RETTYPE erts_internal_port_call_3(BIF_ALIST_3)
break;
}
- state = erts_smp_atomic32_read_acqb(&BIF_P->state);
+ state = erts_atomic32_read_acqb(&BIF_P->state);
if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) {
-#ifdef ERTS_SMP
if (state & ERTS_PSFLG_PENDING_EXIT)
erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN);
-#endif
ERTS_BIF_EXITED(BIF_P);
}
@@ -266,7 +308,7 @@ BIF_RETTYPE erts_internal_port_control_3(BIF_ALIST_3)
retval = am_badarg;
break;
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(retval));
+ ASSERT(is_internal_ordinary_ref(retval));
break;
case ERTS_PORT_OP_DONE:
ASSERT(is_not_internal_ref(retval));
@@ -277,12 +319,10 @@ BIF_RETTYPE erts_internal_port_control_3(BIF_ALIST_3)
break;
}
- state = erts_smp_atomic32_read_acqb(&BIF_P->state);
+ state = erts_atomic32_read_acqb(&BIF_P->state);
if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) {
-#ifdef ERTS_SMP
if (state & ERTS_PSFLG_PENDING_EXIT)
erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN);
-#endif
ERTS_BIF_EXITED(BIF_P);
}
@@ -306,14 +346,13 @@ BIF_RETTYPE erts_internal_port_close_1(BIF_ALIST_1)
if (!prt)
BIF_RET(am_badarg);
-
- switch (erts_port_exit(BIF_P, 0, prt, prt->common.id, am_normal, &ref)) {
+ switch (erts_port_exit(BIF_P, 0, prt, BIF_P->common.id, am_normal, &ref)) {
case ERTS_PORT_OP_CALLER_EXIT:
case ERTS_PORT_OP_BADARG:
case ERTS_PORT_OP_DROPPED:
BIF_RET(am_badarg);
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
BIF_RET(ref);
case ERTS_PORT_OP_DONE:
BIF_RET(am_true);
@@ -340,13 +379,13 @@ BIF_RETTYPE erts_internal_port_connect_2(BIF_ALIST_2)
ref = NIL;
#endif
- switch (erts_port_connect(BIF_P, 0, prt, prt->common.id, BIF_ARG_2, &ref)) {
+ switch (erts_port_connect(BIF_P, 0, prt, BIF_P->common.id, BIF_ARG_2, &ref)) {
case ERTS_PORT_OP_CALLER_EXIT:
case ERTS_PORT_OP_BADARG:
case ERTS_PORT_OP_DROPPED:
BIF_RET(am_badarg);
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
BIF_RET(ref);
break;
case ERTS_PORT_OP_DONE:
@@ -385,7 +424,7 @@ BIF_RETTYPE erts_internal_port_info_1(BIF_ALIST_1)
case ERTS_PORT_OP_DROPPED:
BIF_RET(am_undefined);
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(retval));
+ ASSERT(is_internal_ordinary_ref(retval));
BIF_RET(retval);
case ERTS_PORT_OP_DONE:
ASSERT(is_not_internal_ref(retval));
@@ -424,7 +463,7 @@ BIF_RETTYPE erts_internal_port_info_2(BIF_ALIST_2)
case ERTS_PORT_OP_DROPPED:
BIF_RET(am_undefined);
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(retval));
+ ASSERT(is_internal_ordinary_ref(retval));
BIF_RET(retval);
case ERTS_PORT_OP_DONE:
ASSERT(is_not_internal_ref(retval));
@@ -468,39 +507,35 @@ cleanup_old_port_data(erts_aint_t data)
ASSERT(is_immed((Eterm) data));
}
else {
-#ifdef ERTS_SMP
ErtsPortDataHeap *pdhp = (ErtsPortDataHeap *) data;
size_t size;
- ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
- size = sizeof(ErtsPortDataHeap) + pdhp->hsize*(sizeof(Eterm) - 1);
+ ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
+ size = sizeof(ErtsPortDataHeap) + (pdhp->hsize-1)*sizeof(Eterm);
erts_schedule_thr_prgr_later_cleanup_op(free_port_data_heap,
(void *) pdhp,
&pdhp->later_op,
size);
-#else
- free_port_data_heap((void *) data);
-#endif
}
}
void
erts_init_port_data(Port *prt)
{
- erts_smp_atomic_init_nob(&prt->data, (erts_aint_t) am_undefined);
+ erts_atomic_init_nob(&prt->data, (erts_aint_t) am_undefined);
}
void
erts_cleanup_port_data(Port *prt)
{
ASSERT(erts_atomic32_read_nob(&prt->state) & ERTS_PORT_SFLGS_INVALID_LOOKUP);
- cleanup_old_port_data(erts_smp_atomic_read_nob(&prt->data));
- erts_smp_atomic_set_nob(&prt->data, (erts_aint_t) THE_NON_VALUE);
+ cleanup_old_port_data(erts_atomic_xchg_nob(&prt->data,
+ (erts_aint_t) NULL));
}
Uint
erts_port_data_size(Port *prt)
{
- erts_aint_t data = erts_smp_atomic_read_ddrb(&prt->data);
+ erts_aint_t data = erts_atomic_read_ddrb(&prt->data);
if ((data & 0x3) != 0) {
ASSERT(is_immed((Eterm) (UWord) data));
@@ -508,14 +543,14 @@ erts_port_data_size(Port *prt)
}
else {
ErtsPortDataHeap *pdhp = (ErtsPortDataHeap *) data;
- return (Uint) sizeof(ErtsPortDataHeap) + pdhp->hsize*(sizeof(Eterm)-1);
+ return (Uint) sizeof(ErtsPortDataHeap) + (pdhp->hsize-1)*sizeof(Eterm);
}
}
ErlOffHeap *
erts_port_data_offheap(Port *prt)
{
- erts_aint_t data = erts_smp_atomic_read_ddrb(&prt->data);
+ erts_aint_t data = erts_atomic_read_ddrb(&prt->data);
if ((data & 0x3) != 0) {
ASSERT(is_immed((Eterm) (UWord) data));
@@ -550,19 +585,26 @@ BIF_RETTYPE port_set_data_2(BIF_ALIST_2)
hsize = size_object(BIF_ARG_2);
pdhp = erts_alloc(ERTS_ALC_T_PORT_DATA_HEAP,
- sizeof(ErtsPortDataHeap) + hsize*(sizeof(Eterm)-1));
+ sizeof(ErtsPortDataHeap) + (hsize-1)*sizeof(Eterm));
hp = &pdhp->heap[0];
pdhp->off_heap.first = NULL;
pdhp->off_heap.overhead = 0;
+ pdhp->hsize = hsize;
pdhp->data = copy_struct(BIF_ARG_2, hsize, &hp, &pdhp->off_heap);
data = (erts_aint_t) pdhp;
ASSERT((data & 0x3) == 0);
}
- data = erts_smp_atomic_xchg_wb(&prt->data, data);
+ data = erts_atomic_xchg_wb(&prt->data, data);
+ if (data == (erts_aint_t)NULL) {
+ /* Port terminated by racing thread */
+ data = erts_atomic_xchg_wb(&prt->data, data);
+ ASSERT(data != (erts_aint_t)NULL);
+ cleanup_old_port_data(data);
+ BIF_ERROR(BIF_P, BADARG);
+ }
cleanup_old_port_data(data);
-
BIF_RET(am_true);
}
@@ -580,7 +622,9 @@ BIF_RETTYPE port_get_data_1(BIF_ALIST_1)
if (!prt)
BIF_ERROR(BIF_P, BADARG);
- data = erts_smp_atomic_read_ddrb(&prt->data);
+ data = erts_atomic_read_ddrb(&prt->data);
+ if (data == (erts_aint_t)NULL)
+ BIF_ERROR(BIF_P, BADARG); /* Port terminated by racing thread */
if ((data & 0x3) != 0) {
res = (Eterm) (UWord) data;
@@ -607,7 +651,7 @@ BIF_RETTYPE port_get_data_1(BIF_ALIST_1)
static Port *
open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
{
- int i;
+ Sint i;
Eterm option;
Uint arity;
Eterm* tp;
@@ -674,11 +718,11 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
goto badarg;
}
} else if (option == am_env) {
- byte* bytes;
- if ((bytes = convert_environment(p, *tp)) == NULL) {
+ if (opts.envir) /* ignore previous env option... */
+ erts_free(ERTS_ALC_T_OPEN_PORT_ENV, opts.envir);
+ opts.envir = convert_environment(*tp);
+ if (!opts.envir)
goto badarg;
- }
- opts.envir = (char *) bytes;
} else if (option == am_args) {
char **av;
char **oav = opts.argv;
@@ -781,7 +825,7 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
} else {
name_buf = (char *) erts_alloc(ERTS_ALC_T_TMP, i + 1);
if (intlist_to_buf(name, name_buf, i) != i)
- erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
+ erts_exit(ERTS_ERROR_EXIT, "%s:%d: Internal error\n", __FILE__, __LINE__);
name_buf[i] = '\0';
}
driver = &vanilla_driver;
@@ -869,11 +913,11 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
}
if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(p, am_out);
+ trace_sched(p, ERTS_PROC_LOCK_MAIN, am_out);
}
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
port = erts_open_driver(driver, p->common.id, name_buf, &opts, err_typep, err_nump);
#ifdef USE_VM_PROBES
@@ -886,21 +930,22 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
DTRACE3(port_open, process_str, name_buf, port_str);
}
#endif
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+
+ if (port && IS_TRACED_FL(port, F_TRACE_PORTS))
+ trace_port(port, am_getting_linked, p->common.id);
+
+ erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
+ trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in);
+ }
if (!port) {
DEBUGF(("open_driver returned (%d:%d)\n",
err_typep ? *err_typep : 4711,
err_nump ? *err_nump : 4711));
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(p, am_in);
- }
goto do_return;
}
-
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(p, am_in);
- }
if (linebuf && port->linebuf == NULL){
port->linebuf = allocate_linebuf(linebuf);
@@ -911,6 +956,8 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
erts_atomic32_read_bor_relb(&port->state, sflgs);
do_return:
+ if (opts.envir)
+ erts_free(ERTS_ALC_T_OPEN_PORT_ENV, opts.envir);
if (name_buf)
erts_free(ERTS_ALC_T_TMP, (void *) name_buf);
if (opts.argv) {
@@ -935,14 +982,16 @@ static char **convert_args(Eterm l)
{
char **pp;
char *b;
- int n;
- int i = 0;
+ Sint n;
+ Sint i = 0;
Eterm str;
if (is_not_list(l) && is_not_nil(l)) {
return NULL;
}
n = erts_list_length(l);
+ if (n < 0)
+ return NULL;
/* We require at least one element in argv[0] + NULL at end */
pp = erts_alloc(ERTS_ALC_T_TMP, (n + 2) * sizeof(char **));
pp[i++] = erts_default_arg0;
@@ -974,74 +1023,129 @@ static void free_args(char **av)
}
erts_free(ERTS_ALC_T_TMP, av);
}
-
-static byte* convert_environment(Process* p, Eterm env)
+#ifdef DEBUG
+#define ERTS_CONV_ENV_BUF_EXTRA 2
+#else
+#define ERTS_CONV_ENV_BUF_EXTRA 1024
+#endif
+
+static char* convert_environment(Eterm env)
{
- Eterm all;
- Eterm* temp_heap;
- Eterm* hp;
- Uint heap_size;
- int n;
- Sint size;
+ /*
+ * Returns environment buffer in memory allocated
+ * as ERTS_ALC_T_OPEN_PORT_ENV. Caller *needs*
+ * to deallocate...
+ */
+
+ Sint size, alloc_size;
byte* bytes;
int encoding = erts_get_native_filename_encoding();
- if ((n = erts_list_length(env)) < 0) {
- return NULL;
- }
- heap_size = 2*(5*n+1);
- temp_heap = hp = (Eterm *) erts_alloc(ERTS_ALC_T_TMP, heap_size*sizeof(Eterm));
- bytes = NULL; /* Indicating error */
+ alloc_size = ERTS_CONV_ENV_BUF_EXTRA;
+ bytes = erts_alloc(ERTS_ALC_T_OPEN_PORT_ENV,
+ alloc_size);
+ size = 0;
- /*
- * All errors below are handled by jumping to 'done', to ensure that the memory
- * gets deallocated. Do NOT return directly from this function.
- */
+ /* ERTS_CONV_ENV_BUF_EXTRA >= for end delimiter... */
+ ERTS_CT_ASSERT(ERTS_CONV_ENV_BUF_EXTRA >= 2);
- all = CONS(hp, make_small(0), NIL);
- hp += 2;
+ while (is_list(env)) {
+ Sint var_sz, val_sz, need;
+ byte *str, *limit;
+ Eterm tmp, *tp, *consp;
- while(is_list(env)) {
- Eterm tmp;
- Eterm* tp;
+ consp = list_val(env);
+ tmp = CAR(consp);
+ if (is_not_tuple_arity(tmp, 2))
+ goto error;
- tmp = CAR(list_val(env));
- if (is_not_tuple_arity(tmp, 2)) {
- goto done;
- }
tp = tuple_val(tmp);
- tmp = CONS(hp, make_small(0), NIL);
- hp += 2;
- if (tp[2] != am_false) {
- tmp = CONS(hp, tp[2], tmp);
- hp += 2;
- }
- tmp = CONS(hp, make_small('='), tmp);
- hp += 2;
- tmp = CONS(hp, tp[1], tmp);
- hp += 2;
- all = CONS(hp, tmp, all);
- hp += 2;
- env = CDR(list_val(env));
- }
- if (is_not_nil(env)) {
- goto done;
- }
- if ((size = erts_native_filename_need(all,encoding)) < 0) {
- goto done;
+ /* Check encoding of env variable... */
+ if (is_not_list(tp[1]))
+ goto error;
+ var_sz = erts_native_filename_need(tp[1], encoding);
+ if (var_sz <= 0)
+ goto error;
+ /* Check encoding of value... */
+ if (tp[2] == am_false || is_nil(tp[2]))
+ val_sz = 0;
+ else if (is_not_list(tp[2]))
+ goto error;
+ else {
+ val_sz = erts_native_filename_need(tp[2], encoding);
+ if (val_sz < 0)
+ goto error;
+ }
+
+ /* Ensure enough memory... */
+ need = size;
+ need += var_sz + val_sz;
+ /* '=' and '\0' */
+ need += 2 * erts_raw_env_7bit_ascii_char_need(encoding);
+ if (need > alloc_size) {
+ alloc_size = (need - alloc_size) + alloc_size;
+ alloc_size += ERTS_CONV_ENV_BUF_EXTRA;
+ bytes = erts_realloc(ERTS_ALC_T_OPEN_PORT_ENV,
+ bytes, alloc_size);
+ }
+
+ /* Write environment variable name... */
+ str = bytes + size;
+ erts_native_filename_put(tp[1], encoding, str);
+ /* empty variable name is not allowed... */
+ if (erts_raw_env_char_is_7bit_ascii_char('\0', str, encoding))
+ goto error;
+
+ /*
+ * Drop null characters at the end and verify that we do
+ * not have any '=' characters in the name...
+ */
+ limit = str + var_sz;
+ while (str < limit) {
+ if (erts_raw_env_char_is_7bit_ascii_char('\0', str, encoding))
+ break;
+ if (erts_raw_env_char_is_7bit_ascii_char('=', str, encoding))
+ goto error;
+ str = erts_raw_env_next_char(str, encoding);
+ }
+
+ /* Write the equals sign... */
+ str = erts_raw_env_7bit_ascii_char_put('=', str, encoding);
+
+ /* Write the value... */
+ if (val_sz > 0) {
+ limit = str + val_sz;
+ erts_native_filename_put(tp[2], encoding, str);
+ while (str < limit) {
+ if (erts_raw_env_char_is_7bit_ascii_char('\0', str, encoding))
+ break;
+ str = erts_raw_env_next_char(str, encoding);
+ }
+ }
+
+ /* Delimit... */
+ str = erts_raw_env_7bit_ascii_char_put('\0', str, encoding);
+
+ size = str - bytes;
+ ASSERT(size <= alloc_size);
+
+ env = CDR(consp);
}
- /*
- * Put the result in a binary (no risk for a memory leak that way).
- */
- (void) erts_new_heap_binary(p, NULL, size, &bytes);
- erts_native_filename_put(all,encoding,bytes);
+ /* End delimit... */
+ (void) erts_raw_env_7bit_ascii_char_put('\0', &bytes[size], encoding);
+
+ if (is_nil(env))
+ return (char *) bytes;
+
+error:
+
+ if (bytes)
+ erts_free(ERTS_ALC_T_OPEN_PORT_ENV, bytes);
- done:
- erts_free(ERTS_ALC_T_TMP, temp_heap);
- return bytes;
+ return (char *) NULL; /* error... */
}
/* ------------ decode_packet() and friends: */
@@ -1159,7 +1263,7 @@ static Eterm http_bld_uri(struct packet_callback_args* pca,
return erts_bld_tuple(hpp, szp, 3, am_scheme, s1, s2);
default:
- erl_exit(1, "%s, line %d: type=%u\n", __FILE__, __LINE__, uri->type);
+ erts_exit(ERTS_ERROR_EXIT, "%s, line %d: type=%u\n", __FILE__, __LINE__, uri->type);
}
}
@@ -1319,7 +1423,8 @@ BIF_RETTYPE decode_packet_3(BIF_ALIST_3)
ErlSubBin* rest;
Eterm res;
Eterm options;
- int code;
+ int code;
+ char delimiter = '\n';
if (!is_binary(BIF_ARG_2) ||
(!is_list(BIF_ARG_3) && !is_nil(BIF_ARG_3))) {
@@ -1360,6 +1465,11 @@ BIF_RETTYPE decode_packet_3(BIF_ALIST_3)
case am_line_length:
trunc_len = val;
goto next_option;
+ case am_line_delimiter:
+ if (type == TCP_PB_LINE_LF && val <= 255) {
+ delimiter = (char)val;
+ goto next_option;
+ }
}
}
}
@@ -1380,7 +1490,7 @@ BIF_RETTYPE decode_packet_3(BIF_ALIST_3)
pca.aligned_ptr = bin_ptr;
}
packet_sz = packet_get_length(type, (char*)pca.aligned_ptr, pca.bin_sz,
- max_plen, trunc_len, &http_state);
+ max_plen, trunc_len, delimiter, &http_state);
if (!(packet_sz > 0 && packet_sz <= pca.bin_sz)) {
if (packet_sz < 0) {
goto error;
diff --git a/erts/emulator/beam/erl_bif_re.c b/erts/emulator/beam/erl_bif_re.c
index 448c6f6f6d..bc819505e7 100644
--- a/erts/emulator/beam/erl_bif_re.c
+++ b/erts/emulator/beam/erl_bif_re.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -63,12 +64,43 @@ static void erts_erts_pcre_stack_free(void *ptr) {
erts_free(ERTS_ALC_T_RE_STACK,ptr);
}
+#define ERTS_PCRE_STACK_MARGIN (10*1024)
+
+# define ERTS_STACK_LIMIT ((char *) ethr_get_stacklimit())
+
+static int
+stack_guard_downwards(void)
+{
+ char *limit = ERTS_STACK_LIMIT;
+ char c;
+
+ ASSERT(limit);
+
+ return erts_check_below_limit(&c, limit + ERTS_PCRE_STACK_MARGIN);
+}
+
+static int
+stack_guard_upwards(void)
+{
+ char *limit = ERTS_STACK_LIMIT;
+ char c;
+
+ ASSERT(limit);
+
+ return erts_check_above_limit(&c, limit - ERTS_PCRE_STACK_MARGIN);
+}
+
void erts_init_bif_re(void)
{
+ char c;
erts_pcre_malloc = &erts_erts_pcre_malloc;
erts_pcre_free = &erts_erts_pcre_free;
erts_pcre_stack_malloc = &erts_erts_pcre_stack_malloc;
erts_pcre_stack_free = &erts_erts_pcre_stack_free;
+ if ((char *) erts_ptr_id(&c) > ERTS_STACK_LIMIT)
+ erts_pcre_stack_guard = stack_guard_downwards;
+ else
+ erts_pcre_stack_guard = stack_guard_upwards;
default_table = NULL; /* ISO8859-1 default, forced into pcre */
max_loop_limit = CONTEXT_REDS * LOOP_FACTOR;
@@ -99,7 +131,7 @@ Sint erts_re_set_loop_limit(Sint limit)
static int term_to_int(Eterm term, int *sp)
{
-#if defined(ARCH_64) && !HALFWORD_HEAP
+#if defined(ARCH_64)
if (is_small(term)) {
Uint x = signed_val(term);
@@ -150,7 +182,7 @@ static int term_to_int(Eterm term, int *sp)
static Eterm make_signed_integer(int x, Process *p)
{
-#if defined(ARCH_64) && !HALFWORD_HEAP
+#if defined(ARCH_64)
return make_small(x);
#else
Eterm* hp;
@@ -476,6 +508,17 @@ build_compile_result(Process *p, Eterm error_tag, pcre *result, int errcode, con
* Compile BIFs
*/
+BIF_RETTYPE
+re_version_0(BIF_ALIST_0)
+{
+ Eterm ret;
+ size_t version_size = 0;
+ byte *version = (byte *) erts_pcre_version();
+ version_size = strlen((const char *) version);
+ ret = new_binary(BIF_P, version, version_size);
+ BIF_RET(ret);
+}
+
static BIF_RETTYPE
re_compile(Process* p, Eterm arg1, Eterm arg2)
{
@@ -599,10 +642,11 @@ static void cleanup_restart_context(RestartContext *rc)
}
}
-static void cleanup_restart_context_bin(Binary *bp)
+static int cleanup_restart_context_bin(Binary *bp)
{
RestartContext *rc = ERTS_MAGIC_BIN_DATA(bp);
cleanup_restart_context(rc);
+ return 1;
}
/*
@@ -629,9 +673,15 @@ static Eterm build_exec_return(Process *p, int rc, RestartContext *restartp, Ete
}
} else {
ReturnInfo *ri;
- ReturnInfo defri = {RetIndex,0,{0}};
+ ReturnInfo defri;
if (restartp->ret_info == NULL) {
+ /* OpenBSD 5.8 gcc compiler for some reason creates
+ bad code if the above initialization is done
+ inline with the struct. So don't do that. */
+ defri.type = RetIndex;
+ defri.num_spec = 0;
+ defri.v[0] = 0;
ri = &defri;
} else {
ri = restartp->ret_info;
@@ -1312,17 +1362,17 @@ handle_iolist:
Binary *mbp = erts_create_magic_binary(sizeof(RestartContext),
cleanup_restart_context_bin);
RestartContext *restartp = ERTS_MAGIC_BIN_DATA(mbp);
- Eterm magic_bin;
+ Eterm magic_ref;
Eterm *hp;
memcpy(restartp,&restart,sizeof(RestartContext));
BUMP_ALL_REDS(p);
- hp = HAlloc(p, PROC_BIN_SIZE);
- magic_bin = erts_mk_magic_binary_term(&hp, &MSO(p), mbp);
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ magic_ref = erts_mk_magic_ref(&hp, &MSO(p), mbp);
BIF_TRAP3(&re_exec_trap_export,
p,
arg1,
arg2 /* To avoid GC of precompiled code, XXX: not utilized yet */,
- magic_bin);
+ magic_ref);
}
res = build_exec_return(p, rc, &restart, arg1);
@@ -1359,9 +1409,7 @@ static BIF_RETTYPE re_exec_trap(BIF_ALIST_3)
Uint loop_limit_tmp;
Eterm res;
- ASSERT(ERTS_TERM_IS_MAGIC_BINARY(BIF_ARG_3));
-
- mbp = ((ProcBin *) binary_val(BIF_ARG_3))->val;
+ mbp = erts_magic_ref2bin(BIF_ARG_3);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp)
== cleanup_restart_context_bin);
diff --git a/erts/emulator/beam/erl_bif_timer.c b/erts/emulator/beam/erl_bif_timer.c
deleted file mode 100644
index 03ac97283c..0000000000
--- a/erts/emulator/beam/erl_bif_timer.c
+++ /dev/null
@@ -1,705 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2012. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
-
-#include "erl_bif_timer.h"
-#include "global.h"
-#include "bif.h"
-#include "error.h"
-#include "big.h"
-#include "erl_thr_progress.h"
-
-/****************************************************************************
-** BIF Timer support
-****************************************************************************/
-
-#define BTM_FLG_SL_TIMER (((Uint32) 1) << 0)
-#define BTM_FLG_CANCELED (((Uint32) 1) << 1)
-#define BTM_FLG_HEAD (((Uint32) 1) << 2)
-#define BTM_FLG_BYNAME (((Uint32) 1) << 3)
-#define BTM_FLG_WRAP (((Uint32) 1) << 4)
-
-struct ErtsBifTimer_ {
- struct {
- union {
- ErtsBifTimer **head;
- ErtsBifTimer *prev;
- } u;
- ErtsBifTimer *next;
- } tab;
- union {
- Eterm name;
- struct {
- ErtsBifTimer *prev;
- ErtsBifTimer *next;
- Process *ess;
- } proc;
- } receiver;
- ErlTimer tm;
- ErlHeapFragment* bp;
- Uint32 flags;
- Eterm message;
- Uint32 ref_numbers[ERTS_REF_NUMBERS];
-};
-
-#ifdef SMALL_MEMORY
-#define TIMER_HASH_VEC_SZ 3331
-#define BTM_PREALC_SZ 10
-#else
-#define TIMER_HASH_VEC_SZ 10007
-#define BTM_PREALC_SZ 100
-#endif
-static ErtsBifTimer **bif_timer_tab;
-static Uint no_bif_timers;
-
-
-static erts_smp_rwmtx_t bif_timer_lock;
-
-#define erts_smp_safe_btm_rwlock(P, L) \
- safe_btm_lock((P), (L), 1)
-#define erts_smp_safe_btm_rlock(P, L) \
- safe_btm_lock((P), (L), 0)
-#define erts_smp_btm_rwlock() \
- erts_smp_rwmtx_rwlock(&bif_timer_lock)
-#define erts_smp_btm_tryrwlock() \
- erts_smp_rwmtx_tryrwlock(&bif_timer_lock)
-#define erts_smp_btm_rwunlock() \
- erts_smp_rwmtx_rwunlock(&bif_timer_lock)
-#define erts_smp_btm_rlock() \
- erts_smp_rwmtx_rlock(&bif_timer_lock)
-#define erts_smp_btm_tryrlock() \
- erts_smp_rwmtx_tryrlock(&bif_timer_lock)
-#define erts_smp_btm_runlock() \
- erts_smp_rwmtx_runlock(&bif_timer_lock)
-#define erts_smp_btm_lock_init() \
- erts_smp_rwmtx_init(&bif_timer_lock, "bif_timers")
-
-
-static ERTS_INLINE int
-safe_btm_lock(Process *c_p, ErtsProcLocks c_p_locks, int rw_lock)
-{
- ASSERT(c_p && c_p_locks);
-#ifdef ERTS_SMP
- if ((rw_lock ? erts_smp_btm_tryrwlock() : erts_smp_btm_tryrlock()) != EBUSY)
- return 0;
- erts_smp_proc_unlock(c_p, c_p_locks);
- if (rw_lock)
- erts_smp_btm_rwlock();
- else
- erts_smp_btm_rlock();
- erts_smp_proc_lock(c_p, c_p_locks);
- if (ERTS_PROC_IS_EXITING(c_p)) {
- if (rw_lock)
- erts_smp_btm_rwunlock();
- else
- erts_smp_btm_runlock();
- return 1;
- }
-#endif
- return 0;
-}
-
-ERTS_SCHED_PREF_PALLOC_IMPL(btm_pre, ErtsBifTimer, BTM_PREALC_SZ)
-
-static ERTS_INLINE int
-get_index(Uint32 *ref_numbers, Uint32 len)
-{
- Uint32 hash;
- /* len can potentially be larger than ERTS_REF_NUMBERS
- if it has visited another node... */
- if (len > ERTS_REF_NUMBERS)
- len = ERTS_REF_NUMBERS;
-
-#if ERTS_REF_NUMBERS != 3
-#error "ERTS_REF_NUMBERS changed. Update me..."
-#endif
- switch (len) {
- case 3: if (!ref_numbers[2]) len = 2;
- case 2: if (!ref_numbers[1]) len = 1;
- default: break;
- }
-
- ASSERT(1 <= len && len <= ERTS_REF_NUMBERS);
-
- hash = block_hash((byte *) ref_numbers, len * sizeof(Uint32), 0x08d12e65);
- return (int) (hash % ((Uint32) TIMER_HASH_VEC_SZ));
-}
-
-static Eterm
-create_ref(Uint *hp, Uint32 *ref_numbers, Uint32 len)
-{
- Uint32 *datap;
- int i;
-
-
- if (len > ERTS_MAX_REF_NUMBERS) {
- /* Such large refs should no be able to appear in the emulator */
- erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
- }
-
-#if defined(ARCH_64) && !HALFWORD_HEAP
- hp[0] = make_ref_thing_header(len/2 + 1);
- datap = (Uint32 *) &hp[1];
- *(datap++) = len;
-#else
- hp[0] = make_ref_thing_header(len);
- datap = (Uint32 *) &hp[1];
-#endif
-
- for (i = 0; i < len; i++)
- datap[i] = ref_numbers[i];
-
- return make_internal_ref(hp);
-}
-
-static int
-eq_non_standard_ref_numbers(Uint32 *rn1, Uint32 len1, Uint32 *rn2, Uint32 len2)
-{
-#if defined(ARCH_64) && !HALFWORD_HEAP
-#define MAX_REF_HEAP_SZ (1+(ERTS_MAX_REF_NUMBERS/2+1))
-#else
-#define MAX_REF_HEAP_SZ (1+ERTS_MAX_REF_NUMBERS)
-#endif
- DeclareTmpHeapNoproc(r1_hp,(MAX_REF_HEAP_SZ * 2));
- Eterm *r2_hp = r1_hp +MAX_REF_HEAP_SZ;
-
- return eq(create_ref(r1_hp, rn1, len1), create_ref(r2_hp, rn2, len2));
-#undef MAX_REF_HEAP_SZ
-}
-
-static ERTS_INLINE int
-eq_ref_numbers(Uint32 *rn1, Uint32 len1, Uint32 *rn2, Uint32 len2)
-{
- int res;
- if (len1 != ERTS_REF_NUMBERS || len2 != ERTS_REF_NUMBERS) {
- /* Can potentially happen, but will never... */
- return eq_non_standard_ref_numbers(rn1, len1, rn2, len2);
- }
-
-#if ERTS_REF_NUMBERS != 3
-#error "ERTS_REF_NUMBERS changed. Update me..."
-#endif
- res = rn1[0] == rn2[0] && rn1[1] == rn2[1] && rn1[2] == rn2[2];
-
- ASSERT(res
- ? eq_non_standard_ref_numbers(rn1, len1, rn2, len2)
- : !eq_non_standard_ref_numbers(rn1, len1, rn2, len2));
-
- return res;
-}
-
-static ERTS_INLINE ErtsBifTimer *
-tab_find(Eterm ref)
-{
- Uint32 *ref_numbers = internal_ref_numbers(ref);
- Uint32 ref_numbers_len = internal_ref_no_of_numbers(ref);
- int ix = get_index(ref_numbers, ref_numbers_len);
- ErtsBifTimer* btm;
-
- for (btm = bif_timer_tab[ix]; btm; btm = btm->tab.next)
- if (eq_ref_numbers(ref_numbers, ref_numbers_len,
- btm->ref_numbers, ERTS_REF_NUMBERS))
- return btm;
- return NULL;
-}
-
-static ERTS_INLINE void
-tab_remove(ErtsBifTimer* btm)
-{
- if (btm->flags & BTM_FLG_HEAD) {
- *btm->tab.u.head = btm->tab.next;
- if (btm->tab.next) {
- btm->tab.next->flags |= BTM_FLG_HEAD;
- btm->tab.next->tab.u.head = btm->tab.u.head;
- }
- }
- else {
- btm->tab.u.prev->tab.next = btm->tab.next;
- if (btm->tab.next)
- btm->tab.next->tab.u.prev = btm->tab.u.prev;
- }
- btm->flags |= BTM_FLG_CANCELED;
- ASSERT(no_bif_timers > 0);
- no_bif_timers--;
-}
-
-static ERTS_INLINE void
-tab_insert(ErtsBifTimer* btm)
-{
- int ix = get_index(btm->ref_numbers, ERTS_REF_NUMBERS);
- ErtsBifTimer* btm_list = bif_timer_tab[ix];
-
- if (btm_list) {
- btm_list->flags &= ~BTM_FLG_HEAD;
- btm_list->tab.u.prev = btm;
- }
-
- btm->flags |= BTM_FLG_HEAD;
- btm->tab.u.head = &bif_timer_tab[ix];
- btm->tab.next = btm_list;
- bif_timer_tab[ix] = btm;
- no_bif_timers++;
-}
-
-static ERTS_INLINE void
-link_proc(Process *p, ErtsBifTimer* btm)
-{
- btm->receiver.proc.ess = p;
- btm->receiver.proc.prev = NULL;
- btm->receiver.proc.next = p->u.bif_timers;
- if (p->u.bif_timers)
- p->u.bif_timers->receiver.proc.prev = btm;
- p->u.bif_timers = btm;
-}
-
-static ERTS_INLINE void
-unlink_proc(ErtsBifTimer* btm)
-{
- if (btm->receiver.proc.prev)
- btm->receiver.proc.prev->receiver.proc.next = btm->receiver.proc.next;
- else
- btm->receiver.proc.ess->u.bif_timers = btm->receiver.proc.next;
- if (btm->receiver.proc.next)
- btm->receiver.proc.next->receiver.proc.prev = btm->receiver.proc.prev;
-}
-
-static void
-bif_timer_cleanup(ErtsBifTimer* btm)
-{
- ASSERT(btm);
-
- if (btm->bp)
- free_message_buffer(btm->bp);
-
- if (!btm_pre_free(btm)) {
- if (btm->flags & BTM_FLG_SL_TIMER)
- erts_free(ERTS_ALC_T_SL_BIF_TIMER, (void *) btm);
- else
- erts_free(ERTS_ALC_T_LL_BIF_TIMER, (void *) btm);
- }
-}
-
-static void
-bif_timer_timeout(ErtsBifTimer* btm)
-{
- ASSERT(btm);
-
-
- erts_smp_btm_rwlock();
-
- if (btm->flags & BTM_FLG_CANCELED) {
- /*
- * A concurrent cancel is ongoing. Do not send the timeout message,
- * but cleanup here since the cancel call-back won't be called.
- */
-#ifndef ERTS_SMP
- ASSERT(0);
-#endif
- }
- else {
- ErtsProcLocks rp_locks = 0;
- Process* rp;
-
- tab_remove(btm);
-
- ASSERT(!erts_get_current_process());
-
- if (btm->flags & BTM_FLG_BYNAME)
- rp = erts_whereis_process(NULL, 0, btm->receiver.name, 0, 0);
- else {
- rp = btm->receiver.proc.ess;
- unlink_proc(btm);
- }
-
- if (rp) {
- Eterm message;
- ErlHeapFragment *bp;
-
- bp = btm->bp;
- btm->bp = NULL; /* Prevent cleanup of message buffer... */
-
- if (!(btm->flags & BTM_FLG_WRAP))
- message = btm->message;
- else {
-#if ERTS_REF_NUMBERS != 3
-#error "ERTS_REF_NUMBERS changed. Update me..."
-#endif
- Eterm ref;
- Uint *hp;
- Uint wrap_size = REF_THING_SIZE + 4;
- message = btm->message;
-
- if (!bp) {
- ErlOffHeap *ohp;
- ASSERT(is_immed(message));
- hp = erts_alloc_message_heap(wrap_size,
- &bp,
- &ohp,
- rp,
- &rp_locks);
- } else {
- Eterm old_size = bp->used_size;
- bp = erts_resize_message_buffer(bp, old_size + wrap_size,
- &message, 1);
- hp = &bp->mem[0] + old_size;
- }
-
- write_ref_thing(hp,
- btm->ref_numbers[0],
- btm->ref_numbers[1],
- btm->ref_numbers[2]);
- ref = make_internal_ref(hp);
- hp += REF_THING_SIZE;
- message = TUPLE3(hp, am_timeout, ref, message);
- }
-
- erts_queue_message(rp, &rp_locks, bp, message, NIL
-#ifdef USE_VM_PROBES
- , NIL
-#endif
- );
- erts_smp_proc_unlock(rp, rp_locks);
- }
- }
-
- erts_smp_btm_rwunlock();
-
- bif_timer_cleanup(btm);
-}
-
-static Eterm
-setup_bif_timer(Uint32 xflags,
- Process *c_p,
- Eterm time,
- Eterm receiver,
- Eterm message)
-{
- Process *rp;
- ErtsBifTimer* btm;
- Uint timeout;
- Eterm ref;
- Uint32 *ref_numbers;
-
- if (!term_to_Uint(time, &timeout))
- return THE_NON_VALUE;
-#if defined(ARCH_64) && !HALFWORD_HEAP
- if ((timeout >> 32) != 0)
- return THE_NON_VALUE;
-#endif
- if (is_not_internal_pid(receiver) && is_not_atom(receiver))
- return THE_NON_VALUE;
-
- ref = erts_make_ref(c_p);
-
- if (is_atom(receiver))
- rp = NULL;
- else {
- rp = erts_pid2proc(c_p, ERTS_PROC_LOCK_MAIN,
- receiver, ERTS_PROC_LOCK_MSGQ);
- if (!rp)
- return ref;
- }
-
- if (timeout < ERTS_ALC_MIN_LONG_LIVED_TIME) {
- if (timeout < 1000) {
- btm = btm_pre_alloc();
- if (!btm)
- goto sl_timer_alloc;
- btm->flags = 0;
- }
- else {
- sl_timer_alloc:
- btm = (ErtsBifTimer *) erts_alloc(ERTS_ALC_T_SL_BIF_TIMER,
- sizeof(ErtsBifTimer));
- btm->flags = BTM_FLG_SL_TIMER;
- }
- }
- else {
- btm = (ErtsBifTimer *) erts_alloc(ERTS_ALC_T_LL_BIF_TIMER,
- sizeof(ErtsBifTimer));
- btm->flags = 0;
- }
-
- if (rp) {
- link_proc(rp, btm);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ);
- }
- else {
- ASSERT(is_atom(receiver));
- btm->receiver.name = receiver;
- btm->flags |= BTM_FLG_BYNAME;
- }
-
- btm->flags |= xflags;
-
- ref_numbers = internal_ref_numbers(ref);
- ASSERT(internal_ref_no_of_numbers(ref) == 3);
-#if ERTS_REF_NUMBERS != 3
-#error "ERTS_REF_NUMBERS changed. Update me..."
-#endif
- btm->ref_numbers[0] = ref_numbers[0];
- btm->ref_numbers[1] = ref_numbers[1];
- btm->ref_numbers[2] = ref_numbers[2];
-
- ASSERT(eq_ref_numbers(btm->ref_numbers, ERTS_REF_NUMBERS,
- ref_numbers, ERTS_REF_NUMBERS));
-
- if (is_immed(message)) {
- btm->bp = NULL;
- btm->message = message;
- }
- else {
- ErlHeapFragment* bp;
- Eterm* hp;
- Uint size;
-
- size = size_object(message);
- btm->bp = bp = new_message_buffer(size);
- hp = bp->mem;
- btm->message = copy_struct(message, size, &hp, &bp->off_heap);
- }
-
- tab_insert(btm);
- ASSERT(btm == tab_find(ref));
- btm->tm.active = 0; /* MUST be initalized */
- erts_set_timer(&btm->tm,
- (ErlTimeoutProc) bif_timer_timeout,
- (ErlCancelProc) bif_timer_cleanup,
- (void *) btm,
- timeout);
- return ref;
-}
-
-/* send_after(Time, Pid, Message) -> Ref */
-BIF_RETTYPE send_after_3(BIF_ALIST_3)
-{
- Eterm res;
-
- if (erts_smp_safe_btm_rwlock(BIF_P, ERTS_PROC_LOCK_MAIN))
- ERTS_BIF_EXITED(BIF_P);
-
- res = setup_bif_timer(0, BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
-
- erts_smp_btm_rwunlock();
-
- if (is_non_value(res)) {
- BIF_ERROR(BIF_P, BADARG);
- }
- else {
- ASSERT(is_internal_ref(res));
- BIF_RET(res);
- }
-}
-
-/* start_timer(Time, Pid, Message) -> Ref */
-BIF_RETTYPE start_timer_3(BIF_ALIST_3)
-{
- Eterm res;
-
- if (erts_smp_safe_btm_rwlock(BIF_P, ERTS_PROC_LOCK_MAIN))
- ERTS_BIF_EXITED(BIF_P);
-
- res = setup_bif_timer(BTM_FLG_WRAP, BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
-
- erts_smp_btm_rwunlock();
-
- if (is_non_value(res)) {
- BIF_ERROR(BIF_P, BADARG);
- }
- else {
- ASSERT(is_internal_ref(res));
- BIF_RET(res);
- }
-}
-
-/* cancel_timer(Ref) -> false | RemainingTime */
-BIF_RETTYPE cancel_timer_1(BIF_ALIST_1)
-{
- Eterm res;
- ErtsBifTimer *btm;
-
- if (is_not_internal_ref(BIF_ARG_1)) {
- if (is_ref(BIF_ARG_1)) {
- BIF_RET(am_false);
- }
- BIF_ERROR(BIF_P, BADARG);
- }
-
- if (erts_smp_safe_btm_rwlock(BIF_P, ERTS_PROC_LOCK_MAIN))
- ERTS_BIF_EXITED(BIF_P);
-
- btm = tab_find(BIF_ARG_1);
- if (!btm || btm->flags & BTM_FLG_CANCELED) {
- erts_smp_btm_rwunlock();
- res = am_false;
- }
- else {
- Uint left = erts_time_left(&btm->tm);
- if (!(btm->flags & BTM_FLG_BYNAME)) {
- erts_smp_proc_lock(btm->receiver.proc.ess, ERTS_PROC_LOCK_MSGQ);
- unlink_proc(btm);
- erts_smp_proc_unlock(btm->receiver.proc.ess, ERTS_PROC_LOCK_MSGQ);
- }
- tab_remove(btm);
- ASSERT(!tab_find(BIF_ARG_1));
- erts_cancel_timer(&btm->tm);
- erts_smp_btm_rwunlock();
- res = erts_make_integer(left, BIF_P);
- }
-
- BIF_RET(res);
-}
-
-/* read_timer(Ref) -> false | RemainingTime */
-BIF_RETTYPE read_timer_1(BIF_ALIST_1)
-{
- Eterm res;
- ErtsBifTimer *btm;
-
- if (is_not_internal_ref(BIF_ARG_1)) {
- if (is_ref(BIF_ARG_1)) {
- BIF_RET(am_false);
- }
- BIF_ERROR(BIF_P, BADARG);
- }
-
- if (erts_smp_safe_btm_rlock(BIF_P, ERTS_PROC_LOCK_MAIN))
- ERTS_BIF_EXITED(BIF_P);
-
- btm = tab_find(BIF_ARG_1);
- if (!btm || btm->flags & BTM_FLG_CANCELED) {
- res = am_false;
- }
- else {
- Uint left = erts_time_left(&btm->tm);
- res = erts_make_integer(left, BIF_P);
- }
-
- erts_smp_btm_runlock();
-
- BIF_RET(res);
-}
-
-void
-erts_print_bif_timer_info(int to, void *to_arg)
-{
- int i;
- int lock = !ERTS_IS_CRASH_DUMPING;
-
- if (lock)
- erts_smp_btm_rlock();
-
- for (i = 0; i < TIMER_HASH_VEC_SZ; i++) {
- ErtsBifTimer *btm;
- for (btm = bif_timer_tab[i]; btm; btm = btm->tab.next) {
- Eterm receiver = (btm->flags & BTM_FLG_BYNAME
- ? btm->receiver.name
- : btm->receiver.proc.ess->common.id);
- erts_print(to, to_arg, "=timer:%T\n", receiver);
- erts_print(to, to_arg, "Message: %T\n", btm->message);
- erts_print(to, to_arg, "Time left: %u\n",
- erts_time_left(&btm->tm));
- }
- }
-
- if (lock)
- erts_smp_btm_runlock();
-}
-
-
-void
-erts_cancel_bif_timers(Process *p, ErtsProcLocks plocks)
-{
- ErtsBifTimer *btm;
-
- if (erts_smp_btm_tryrwlock() == EBUSY) {
- erts_smp_proc_unlock(p, plocks);
- erts_smp_btm_rwlock();
- erts_smp_proc_lock(p, plocks);
- }
-
- btm = p->u.bif_timers;
- while (btm) {
- ErtsBifTimer *tmp_btm;
- ASSERT(!(btm->flags & BTM_FLG_CANCELED));
- tab_remove(btm);
- tmp_btm = btm;
- btm = btm->receiver.proc.next;
- erts_cancel_timer(&tmp_btm->tm);
- }
-
- p->u.bif_timers = NULL;
-
- erts_smp_btm_rwunlock();
-}
-
-void erts_bif_timer_init(void)
-{
- int i;
- no_bif_timers = 0;
- init_btm_pre_alloc();
- erts_smp_btm_lock_init();
- bif_timer_tab = erts_alloc(ERTS_ALC_T_BIF_TIMER_TABLE,
- sizeof(ErtsBifTimer *)*TIMER_HASH_VEC_SZ);
- for (i = 0; i < TIMER_HASH_VEC_SZ; ++i)
- bif_timer_tab[i] = NULL;
-}
-
-Uint
-erts_bif_timer_memory_size(void)
-{
- Uint res;
- int lock = !ERTS_IS_CRASH_DUMPING;
-
- if (lock)
- erts_smp_btm_rlock();
-
- res = (sizeof(ErtsBifTimer *)*TIMER_HASH_VEC_SZ
- + no_bif_timers*sizeof(ErtsBifTimer));
-
- if (lock)
- erts_smp_btm_runlock();
-
- return res;
-}
-
-
-void
-erts_bif_timer_foreach(void (*func)(Eterm, Eterm, ErlHeapFragment *, void *),
- void *arg)
-{
- int i;
-
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
-
- for (i = 0; i < TIMER_HASH_VEC_SZ; i++) {
- ErtsBifTimer *btm;
- for (btm = bif_timer_tab[i]; btm; btm = btm->tab.next) {
- (*func)((btm->flags & BTM_FLG_BYNAME
- ? btm->receiver.name
- : btm->receiver.proc.ess->common.id),
- btm->message,
- btm->bp,
- arg);
- }
- }
-}
diff --git a/erts/emulator/beam/erl_bif_timer.h b/erts/emulator/beam/erl_bif_timer.h
deleted file mode 100644
index 1197c176f5..0000000000
--- a/erts/emulator/beam/erl_bif_timer.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-
-#ifndef ERL_BIF_TIMER_H__
-#define ERL_BIF_TIMER_H__
-
-typedef struct ErtsBifTimer_ ErtsBifTimer;
-
-#include "sys.h"
-#include "erl_process.h"
-#include "erl_message.h"
-
-Uint erts_bif_timer_memory_size(void);
-void erts_print_bif_timer_info(int to, void *to_arg);
-void erts_cancel_bif_timers(Process *p, ErtsProcLocks plocks);
-void erts_bif_timer_init(void);
-void erts_bif_timer_foreach(void (*func)(Eterm,Eterm,ErlHeapFragment *,void *),
- void *arg);
-#endif
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 06fbbea123..22942b40c4 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -38,6 +39,7 @@
#include "beam_bp.h"
#include "erl_binary.h"
#include "erl_thr_progress.h"
+#include "erl_bif_unique.h"
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
@@ -50,7 +52,7 @@ static int erts_default_trace_pattern_is_on;
static Binary *erts_default_match_spec;
static Binary *erts_default_meta_match_spec;
static struct trace_pattern_flags erts_default_trace_pattern_flags;
-static Eterm erts_default_meta_tracer_pid;
+static ErtsTracer erts_default_meta_tracer;
static struct { /* Protected by code write permission */
int current;
@@ -58,26 +60,25 @@ static struct { /* Protected by code write permission */
int local;
BpFunctions f; /* Local functions */
BpFunctions e; /* Export entries */
-#ifdef ERTS_SMP
Process* stager;
ErtsThrPrgrLaterOp lop;
-#endif
} finish_bp;
static Eterm
trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist);
-#ifdef ERTS_SMP
+static int
+erts_set_tracing_event_pattern(Eterm event, Binary*, int on);
+
static void smp_bp_finisher(void* arg);
-#endif
static BIF_RETTYPE
system_monitor(Process *p, Eterm monitor_pid, Eterm list);
static void new_seq_trace_token(Process* p); /* help func for seq_trace_2*/
-static int already_traced(Process *p, Process *tracee_p, Eterm tracer);
-static int port_already_traced(Process *p, Port *tracee_port, Eterm tracer);
static Eterm trace_info_pid(Process* p, Eterm pid_spec, Eterm key);
static Eterm trace_info_func(Process* p, Eterm pid_spec, Eterm key);
static Eterm trace_info_on_load(Process* p, Eterm key);
+static Eterm trace_info_event(Process* p, Eterm event, Eterm key);
+
static void reset_bif_trace(void);
static void setup_bif_trace(void);
@@ -85,28 +86,34 @@ static void install_exp_breakpoints(BpFunctions* f);
static void uninstall_exp_breakpoints(BpFunctions* f);
static void clean_export_entries(BpFunctions* f);
+ErtsTracingEvent erts_send_tracing[ERTS_NUM_BP_IX];
+ErtsTracingEvent erts_receive_tracing[ERTS_NUM_BP_IX];
+
void
erts_bif_trace_init(void)
{
+ int i;
+
erts_default_trace_pattern_is_on = 0;
erts_default_match_spec = NULL;
erts_default_meta_match_spec = NULL;
erts_default_trace_pattern_flags = erts_trace_pattern_flags_off;
- erts_default_meta_tracer_pid = NIL;
+ erts_default_meta_tracer = erts_tracer_nil;
+
+ for (i=0; i<ERTS_NUM_BP_IX; i++) {
+ erts_send_tracing[i].on = 1;
+ erts_send_tracing[i].match_spec = NULL;
+ erts_receive_tracing[i].on = 1;
+ erts_receive_tracing[i].match_spec = NULL;
+ }
}
/*
* Turn on/off call tracing for the given function(s).
*/
-
-Eterm
-trace_pattern_2(BIF_ALIST_2)
-{
- return trace_pattern(BIF_P, BIF_ARG_1, BIF_ARG_2, NIL);
-}
Eterm
-trace_pattern_3(BIF_ALIST_3)
+erts_internal_trace_pattern_3(BIF_ALIST_3)
{
return trace_pattern(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
}
@@ -114,7 +121,6 @@ trace_pattern_3(BIF_ALIST_3)
static Eterm
trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
{
- DeclareTmpHeap(mfa,3,p); /* Not really heap here, but might be when setting pattern */
int i;
int matches = -1;
int specified = 0;
@@ -123,11 +129,10 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
Eterm l;
struct trace_pattern_flags flags = erts_trace_pattern_flags_off;
int is_global;
- Process *meta_tracer_proc = p;
- Eterm meta_tracer_pid = p->common.id;
+ ErtsTracer meta_tracer = erts_tracer_nil;
if (!erts_try_seize_code_write_permission(p)) {
- ERTS_BIF_YIELD3(bif_export[BIF_trace_pattern_3], p, MFA, Pattern, flaglist);
+ ERTS_BIF_YIELD3(bif_export[BIF_erts_internal_trace_pattern_3], p, MFA, Pattern, flaglist);
}
finish_bp.current = -1;
@@ -144,45 +149,28 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
on = 1;
} else if (Pattern == am_restart) {
match_prog_set = NULL;
- on = erts_break_reset;
+ on = ERTS_BREAK_RESTART;
} else if (Pattern == am_pause) {
match_prog_set = NULL;
- on = erts_break_stop;
- } else if ((match_prog_set = erts_match_set_compile(p, Pattern)) != NULL) {
- MatchSetRef(match_prog_set);
- on = 1;
- } else{
- goto error;
+ on = ERTS_BREAK_PAUSE;
+ } else {
+ match_prog_set = erts_match_set_compile(p, Pattern, MFA);
+ if (match_prog_set) {
+ MatchSetRef(match_prog_set);
+ on = 1;
+ } else{
+ goto error;
+ }
}
is_global = 0;
for(l = flaglist; is_list(l); l = CDR(list_val(l))) {
if (is_tuple(CAR(list_val(l)))) {
- Eterm *tp = tuple_val(CAR(list_val(l)));
-
- if (arityval(tp[0]) != 2 || tp[1] != am_meta) {
- goto error;
- }
- meta_tracer_pid = tp[2];
- if (is_internal_pid(meta_tracer_pid)) {
- meta_tracer_proc = erts_pid2proc(NULL, 0, meta_tracer_pid, 0);
- if (!meta_tracer_proc) {
- goto error;
- }
- } else if (is_internal_port(meta_tracer_pid)) {
- Port *meta_tracer_port;
- meta_tracer_proc = NULL;
- meta_tracer_port = (erts_port_lookup(
- meta_tracer_pid,
- ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP));
- if (!meta_tracer_port)
- goto error;
- } else {
- goto error;
- }
- if (is_global) {
- goto error;
- }
+ meta_tracer = erts_term_to_tracer(am_meta, CAR(list_val(l)));
+ if (meta_tracer == THE_NON_VALUE) {
+ meta_tracer = erts_tracer_nil;
+ goto error;
+ }
flags.breakpoint = 1;
flags.meta = 1;
} else {
@@ -200,6 +188,8 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
}
flags.breakpoint = 1;
flags.meta = 1;
+ if (ERTS_TRACER_IS_NIL(meta_tracer))
+ meta_tracer = erts_term_to_tracer(THE_NON_VALUE, p->common.id);
break;
case am_global:
if (flags.breakpoint) {
@@ -250,14 +240,11 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
MatchSetUnref(erts_default_meta_match_spec);
erts_default_meta_match_spec = match_prog_set;
MatchSetRef(erts_default_meta_match_spec);
- erts_default_meta_tracer_pid = meta_tracer_pid;
- if (meta_tracer_proc) {
- ERTS_TRACE_FLAGS(meta_tracer_proc) |= F_TRACER;
- }
+ erts_tracer_update(&erts_default_meta_tracer, meta_tracer);
} else if (! flags.breakpoint) {
MatchSetUnref(erts_default_meta_match_spec);
erts_default_meta_match_spec = NULL;
- erts_default_meta_tracer_pid = NIL;
+ ERTS_TRACER_CLEAR(&erts_default_meta_tracer);
}
if (erts_default_trace_pattern_flags.breakpoint &&
flags.breakpoint) {
@@ -316,53 +303,53 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
}
matches = 0;
} else if (is_tuple(MFA)) {
+ ErtsCodeMFA mfa;
Eterm *tp = tuple_val(MFA);
if (tp[0] != make_arityval(3)) {
goto error;
}
- mfa[0] = tp[1];
- mfa[1] = tp[2];
- mfa[2] = tp[3];
- if (!is_atom(mfa[0]) || !is_atom(mfa[1]) ||
- (!is_small(mfa[2]) && mfa[2] != am_Underscore)) {
+ if (!is_atom(tp[1]) || !is_atom(tp[2]) ||
+ (!is_small(tp[3]) && tp[3] != am_Underscore)) {
goto error;
}
- for (i = 0; i < 3 && mfa[i] != am_Underscore; i++, specified++) {
+ for (i = 0; i < 3 && tp[i+1] != am_Underscore; i++, specified++) {
/* Empty loop body */
}
for (i = specified; i < 3; i++) {
- if (mfa[i] != am_Underscore) {
+ if (tp[i+1] != am_Underscore) {
goto error;
}
}
- if (is_small(mfa[2])) {
- mfa[2] = signed_val(mfa[2]);
- }
-
- if (meta_tracer_proc) {
- ERTS_TRACE_FLAGS(meta_tracer_proc) |= F_TRACER;
+ mfa.module = tp[1];
+ mfa.function = tp[2];
+ if (specified == 3) {
+ mfa.arity = signed_val(tp[3]);
}
- matches = erts_set_trace_pattern(p, mfa, specified,
+ matches = erts_set_trace_pattern(p, &mfa, specified,
match_prog_set, match_prog_set,
- on, flags, meta_tracer_pid, 0);
+ on, flags, meta_tracer, 0);
+ } else if (is_atom(MFA)) {
+ if (is_global || flags.breakpoint || on > ERTS_BREAK_SET) {
+ goto error;
+ }
+ matches = erts_set_tracing_event_pattern(MFA, match_prog_set, on);
}
error:
MatchSetUnref(match_prog_set);
- UnUseTmpHeap(3,p);
-#ifdef ERTS_SMP
+ ERTS_TRACER_CLEAR(&meta_tracer);
+
if (finish_bp.current >= 0) {
ASSERT(matches >= 0);
ASSERT(finish_bp.stager == NULL);
finish_bp.stager = p;
erts_schedule_thr_prgr_later_op(smp_bp_finisher, NULL, &finish_bp.lop);
- erts_smp_proc_inc_refc(p);
+ erts_proc_inc_refc(p);
erts_suspend(p, ERTS_PROC_LOCK_MAIN, NULL);
ERTS_BIF_YIELD_RETURN(p, make_small(matches));
}
-#endif
erts_release_code_write_permission();
@@ -374,7 +361,6 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
}
}
-#ifdef ERTS_SMP
static void smp_bp_finisher(void* null)
{
if (erts_finish_breakpointing()) { /* Not done */
@@ -387,25 +373,24 @@ static void smp_bp_finisher(void* null)
finish_bp.stager = NULL;
#endif
erts_release_code_write_permission();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
if (!ERTS_PROC_IS_EXITING(p)) {
erts_resume(p, ERTS_PROC_LOCK_STATUS);
}
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- erts_smp_proc_dec_refc(p);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_dec_refc(p);
}
}
-#endif /* ERTS_SMP */
void
erts_get_default_trace_pattern(int *trace_pattern_is_on,
Binary **match_spec,
Binary **meta_match_spec,
struct trace_pattern_flags *trace_pattern_flags,
- Eterm *meta_tracer_pid)
+ ErtsTracer *meta_tracer)
{
- ERTS_SMP_LC_ASSERT(erts_has_code_write_permission() ||
- erts_smp_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(erts_has_code_write_permission() ||
+ erts_thr_progress_is_blocking());
if (trace_pattern_is_on)
*trace_pattern_is_on = erts_default_trace_pattern_is_on;
if (match_spec)
@@ -414,14 +399,14 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
*meta_match_spec = erts_default_meta_match_spec;
if (trace_pattern_flags)
*trace_pattern_flags = erts_default_trace_pattern_flags;
- if (meta_tracer_pid)
- *meta_tracer_pid = erts_default_meta_tracer_pid;
+ if (meta_tracer)
+ *meta_tracer = erts_default_meta_tracer;
}
int erts_is_default_trace_enabled(void)
{
- ERTS_SMP_LC_ASSERT(erts_has_code_write_permission() ||
- erts_smp_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(erts_has_code_write_permission() ||
+ erts_thr_progress_is_blocking());
return erts_default_trace_pattern_is_on;
}
@@ -429,6 +414,9 @@ Uint
erts_trace_flag2bit(Eterm flag)
{
switch (flag) {
+ case am_timestamp: return F_NOW_TS;
+ case am_strict_monotonic_timestamp: return F_STRICT_MON_TS;
+ case am_monotonic_timestamp: return F_MON_TS;
case am_all: return TRACEE_FLAGS;
case am_send: return F_TRACE_SEND;
case am_receive: return F_TRACE_RECEIVE;
@@ -437,7 +425,6 @@ erts_trace_flag2bit(Eterm flag)
case am_set_on_first_spawn: return F_TRACE_SOS1;
case am_set_on_link: return F_TRACE_SOL;
case am_set_on_first_link: return F_TRACE_SOL1;
- case am_timestamp: return F_TIMESTAMP;
case am_running: return F_TRACE_SCHED;
case am_exiting: return F_TRACE_SCHED_EXIT;
case am_garbage_collection: return F_TRACE_GC;
@@ -461,12 +448,12 @@ erts_trace_flag2bit(Eterm flag)
** occurred in the argument list.
*/
int
-erts_trace_flags(Eterm List,
- Uint *pMask, Eterm *pTracer, int *pCpuTimestamp)
+erts_trace_flags(Eterm List,
+ Uint *pMask, ErtsTracer *pTracer, int *pCpuTimestamp)
{
Eterm list = List;
Uint mask = 0;
- Eterm tracer = NIL;
+ ErtsTracer tracer = erts_tracer_nil;
int cpu_timestamp = 0;
while (is_list(list)) {
@@ -479,80 +466,100 @@ erts_trace_flags(Eterm List,
cpu_timestamp = !0;
#endif
} else if (is_tuple(item)) {
- Eterm* tp = tuple_val(item);
-
- if (arityval(tp[0]) != 2 || tp[1] != am_tracer) goto error;
- if (is_internal_pid(tp[2]) || is_internal_port(tp[2])) {
- tracer = tp[2];
- } else goto error;
+ tracer = erts_term_to_tracer(am_tracer, item);
+ if (tracer == THE_NON_VALUE)
+ goto error;
} else goto error;
list = CDR(list_val(list));
}
if (is_not_nil(list)) goto error;
- if (pMask && mask) *pMask = mask;
- if (pTracer && tracer != NIL) *pTracer = tracer;
- if (pCpuTimestamp && cpu_timestamp) *pCpuTimestamp = cpu_timestamp;
+ if (pMask && mask) *pMask = mask;
+ if (pTracer && !ERTS_TRACER_IS_NIL(tracer)) *pTracer = tracer;
+ if (pCpuTimestamp && cpu_timestamp) *pCpuTimestamp = cpu_timestamp;
return !0;
error:
return 0;
}
-Eterm trace_3(BIF_ALIST_3)
+static ERTS_INLINE int
+start_trace(Process *c_p, ErtsTracer tracer,
+ ErtsPTabElementCommon *common,
+ int on, int mask)
+{
+ /* We can use the common part of both port+proc without checking what it is
+ In the code below port is used for both proc and port */
+ Port *port = (Port*)common;
+
+ /*
+ * SMP build assumes that either system is blocked or:
+ * * main lock is held on c_p
+ * * all locks are held on port common
+ */
+
+ if (!ERTS_TRACER_IS_NIL(tracer)) {
+ if ((ERTS_TRACE_FLAGS(port) & TRACEE_FLAGS)
+ && !ERTS_TRACER_COMPARE(ERTS_TRACER(port), tracer)) {
+ /* This tracee is already being traced, and not by the
+ * tracer to be */
+ if (erts_is_tracer_enabled(ERTS_TRACER(port), common)) {
+ /* The tracer is still in use */
+ return 1;
+ }
+ /* Current tracer now invalid */
+ }
+ }
+
+ if (on)
+ ERTS_TRACE_FLAGS(port) |= mask;
+ else
+ ERTS_TRACE_FLAGS(port) &= ~mask;
+
+ if ((ERTS_TRACE_FLAGS(port) & TRACEE_FLAGS) == 0) {
+ tracer = erts_tracer_nil;
+ erts_tracer_replace(common, erts_tracer_nil);
+ } else if (!ERTS_TRACER_IS_NIL(tracer))
+ erts_tracer_replace(common, tracer);
+
+ return 0;
+}
+
+Eterm erts_internal_trace_3(BIF_ALIST_3)
{
Process* p = BIF_P;
Eterm pid_spec = BIF_ARG_1;
Eterm how = BIF_ARG_2;
Eterm list = BIF_ARG_3;
int on;
- Eterm tracer = NIL;
+ ErtsTracer tracer = erts_tracer_nil;
int matches = 0;
Uint mask = 0;
int cpu_ts = 0;
-#ifdef ERTS_SMP
int system_blocked = 0;
-#endif
if (! erts_trace_flags(list, &mask, &tracer, &cpu_ts)) {
BIF_ERROR(p, BADARG);
}
if (!erts_try_seize_code_write_permission(BIF_P)) {
- ERTS_BIF_YIELD3(bif_export[BIF_trace_3], BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ ERTS_BIF_YIELD3(bif_export[BIF_erts_internal_trace_3],
+ BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
}
- if (is_nil(tracer) || is_internal_pid(tracer)) {
- Process *tracer_proc = erts_pid2proc(p,
- ERTS_PROC_LOCK_MAIN,
- is_nil(tracer) ? p->common.id : tracer,
- ERTS_PROC_LOCKS_ALL);
- if (!tracer_proc)
- goto error;
- ERTS_TRACE_FLAGS(tracer_proc) |= F_TRACER;
- erts_smp_proc_unlock(tracer_proc,
- (tracer_proc == p
- ? ERTS_PROC_LOCKS_ALL_MINOR
- : ERTS_PROC_LOCKS_ALL));
- } else if (is_internal_port(tracer)) {
- Port *tracer_port = erts_id2port_sflgs(tracer,
- p,
- ERTS_PROC_LOCK_MAIN,
- ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
- if (!tracer_port)
- goto error;
- ERTS_TRACE_FLAGS(tracer_port) |= F_TRACER;
- erts_port_release(tracer_port);
- } else
- goto error;
-
switch (how) {
case am_false:
on = 0;
break;
case am_true:
on = 1;
- if (is_nil(tracer))
- tracer = p->common.id;
+ if (ERTS_TRACER_IS_NIL(tracer))
+ tracer = erts_term_to_tracer(am_tracer, p->common.id);
+
+ if (tracer == THE_NON_VALUE) {
+ tracer = erts_tracer_nil;
+ goto error;
+ }
+
break;
default:
goto error;
@@ -571,34 +578,20 @@ Eterm trace_3(BIF_ALIST_3)
}
#endif
- if (pid_spec == tracer)
- goto error;
-
tracee_port = erts_id2port_sflgs(pid_spec,
p,
ERTS_PROC_LOCK_MAIN,
ERTS_PORT_SFLGS_INVALID_LOOKUP);
+
if (!tracee_port)
goto error;
-
- if (tracer != NIL && port_already_traced(p, tracee_port, tracer)) {
+
+ if (start_trace(p, tracer, &tracee_port->common, on, mask)) {
erts_port_release(tracee_port);
goto already_traced;
- }
-
- if (on)
- ERTS_TRACE_FLAGS(tracee_port) |= mask;
- else
- ERTS_TRACE_FLAGS(tracee_port) &= ~mask;
-
- if (!ERTS_TRACE_FLAGS(tracee_port))
- ERTS_TRACER_PROC(tracee_port) = NIL;
- else if (tracer != NIL)
- ERTS_TRACER_PROC(tracee_port) = tracer;
-
- erts_port_release(tracee_port);
-
- matches = 1;
+ }
+ erts_port_release(tracee_port);
+ matches = 1;
} else if (is_pid(pid_spec)) {
Process *tracee_p;
@@ -611,33 +604,19 @@ Eterm trace_3(BIF_ALIST_3)
* and not about to be tracing.
*/
- if (pid_spec == tracer)
- goto error;
-
tracee_p = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
pid_spec, ERTS_PROC_LOCKS_ALL);
if (!tracee_p)
goto error;
- if (tracer != NIL && already_traced(p, tracee_p, tracer)) {
- erts_smp_proc_unlock(tracee_p,
+ if (start_trace(tracee_p, tracer, &tracee_p->common, on, mask)) {
+ erts_proc_unlock(tracee_p,
(tracee_p == p
? ERTS_PROC_LOCKS_ALL_MINOR
: ERTS_PROC_LOCKS_ALL));
goto already_traced;
- }
-
- if (on)
- ERTS_TRACE_FLAGS(tracee_p) |= mask;
- else
- ERTS_TRACE_FLAGS(tracee_p) &= ~mask;
-
- if ((ERTS_TRACE_FLAGS(tracee_p) & TRACEE_FLAGS) == 0)
- ERTS_TRACER_PROC(tracee_p) = NIL;
- else if (tracer != NIL)
- ERTS_TRACER_PROC(tracee_p) = tracer;
-
- erts_smp_proc_unlock(tracee_p,
+ }
+ erts_proc_unlock(tracee_p,
(tracee_p == p
? ERTS_PROC_LOCKS_ALL_MINOR
: ERTS_PROC_LOCKS_ALL));
@@ -651,7 +630,7 @@ Eterm trace_3(BIF_ALIST_3)
if (pid_spec == am_all) {
if (on) {
if (!erts_cpu_timestamp) {
-#ifdef HAVE_CLOCK_GETTIME
+#ifdef HAVE_CLOCK_GETTIME_CPU_TIME
/*
Perhaps clock_gettime was found during config
on a different machine than this. We check
@@ -678,7 +657,7 @@ Eterm trace_3(BIF_ALIST_3)
if (erts_start_now_cpu() < 0) {
goto error;
}
-#endif /* HAVE_CLOCK_GETTIME */
+#endif /* HAVE_CLOCK_GETTIME_CPU_TIME */
erts_cpu_timestamp = !0;
}
}
@@ -688,24 +667,31 @@ Eterm trace_3(BIF_ALIST_3)
}
#endif
- if (pid_spec == am_all || pid_spec == am_existing) {
+ if (pid_spec == am_all || pid_spec == am_existing ||
+ pid_spec == am_ports || pid_spec == am_processes ||
+ pid_spec == am_existing_ports || pid_spec == am_existing_processes
+ ) {
int i;
int procs = 0;
int ports = 0;
int mods = 0;
if (mask & (ERTS_PROC_TRACEE_FLAGS & ~ERTS_TRACEE_MODIFIER_FLAGS))
- procs = 1;
+ procs = pid_spec != am_ports && pid_spec != am_existing_ports;
if (mask & (ERTS_PORT_TRACEE_FLAGS & ~ERTS_TRACEE_MODIFIER_FLAGS))
- ports = 1;
- if (mask & ERTS_TRACEE_MODIFIER_FLAGS)
- mods = 1;
-
-#ifdef ERTS_SMP
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ ports = pid_spec != am_processes && pid_spec != am_existing_processes;
+ if (mask & ERTS_TRACEE_MODIFIER_FLAGS) {
+ if (pid_spec == am_ports || pid_spec == am_existing_ports)
+ ports = 1;
+ else if (pid_spec == am_processes || pid_spec == am_existing_processes)
+ procs = 1;
+ else
+ mods = 1;
+ }
+
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
system_blocked = 1;
-#endif
ok = 1;
if (procs || mods) {
@@ -715,23 +701,8 @@ Eterm trace_3(BIF_ALIST_3)
Process* tracee_p = erts_pix2proc(i);
if (! tracee_p)
continue;
- if (tracer != NIL) {
- if (tracee_p->common.id == tracer)
- continue;
- if (already_traced(NULL, tracee_p, tracer))
- continue;
- }
- if (on) {
- ERTS_TRACE_FLAGS(tracee_p) |= mask;
- } else {
- ERTS_TRACE_FLAGS(tracee_p) &= ~mask;
- }
- if(!(ERTS_TRACE_FLAGS(tracee_p) & TRACEE_FLAGS)) {
- ERTS_TRACER_PROC(tracee_p) = NIL;
- } else if (tracer != NIL) {
- ERTS_TRACER_PROC(tracee_p) = tracer;
- }
- matches++;
+ if (!start_trace(p, tracer, &tracee_p->common, on, mask))
+ matches++;
}
}
if (ports || mods) {
@@ -745,33 +716,26 @@ Eterm trace_3(BIF_ALIST_3)
state = erts_atomic32_read_nob(&tracee_port->state);
if (state & ERTS_PORT_SFLGS_DEAD)
continue;
- if (tracer != NIL) {
- if (tracee_port->common.id == tracer)
- continue;
- if (port_already_traced(NULL, tracee_port, tracer))
- continue;
- }
-
- if (on) ERTS_TRACE_FLAGS(tracee_port) |= mask;
- else ERTS_TRACE_FLAGS(tracee_port) &= ~mask;
-
- if (!(ERTS_TRACE_FLAGS(tracee_port) & TRACEE_FLAGS)) {
- ERTS_TRACER_PROC(tracee_port) = NIL;
- } else if (tracer != NIL) {
- ERTS_TRACER_PROC(tracee_port) = tracer;
- }
- /* matches are not counted for ports since it would violate compatibility */
- /* This could be a reason to modify this function or make a new one. */
+ if (!start_trace(p, tracer, &tracee_port->common, on, mask))
+ matches++;
}
}
}
- if (pid_spec == am_all || pid_spec == am_new) {
- Uint def_flags = mask;
- Eterm def_tracer = tracer;
+ if (pid_spec == am_all || pid_spec == am_new
+ || pid_spec == am_ports || pid_spec == am_processes
+ || pid_spec == am_new_ports || pid_spec == am_new_processes
+ ) {
ok = 1;
- erts_change_default_tracing(on, &def_flags, &def_tracer);
+ if (mask & ERTS_PROC_TRACEE_FLAGS &&
+ pid_spec != am_ports && pid_spec != am_new_ports)
+ erts_change_default_proc_tracing(
+ on, mask & ERTS_PROC_TRACEE_FLAGS, tracer);
+ if (mask & ERTS_PORT_TRACEE_FLAGS &&
+ pid_spec != am_processes && pid_spec != am_new_processes)
+ erts_change_default_port_tracing(
+ on, mask & ERTS_PORT_TRACEE_FLAGS, tracer);
#ifdef HAVE_ERTS_NOW_CPU
if (cpu_ts && !on) {
@@ -790,13 +754,12 @@ Eterm trace_3(BIF_ALIST_3)
goto error;
}
-#ifdef ERTS_SMP
if (system_blocked) {
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
-#endif
erts_release_code_write_permission();
+ ERTS_TRACER_CLEAR(&tracer);
BIF_RET(make_small(matches));
@@ -806,99 +769,17 @@ Eterm trace_3(BIF_ALIST_3)
error:
-#ifdef ERTS_SMP
+ ERTS_TRACER_CLEAR(&tracer);
+
if (system_blocked) {
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
-#endif
erts_release_code_write_permission();
BIF_ERROR(p, BADARG);
}
-/* Check that the process to be traced is not already traced
- * by a valid other tracer than the tracer to be.
- */
-static int port_already_traced(Process *c_p, Port *tracee_port, Eterm tracer)
-{
- /*
- * SMP build assumes that either system is blocked or:
- * * main lock is held on c_p
- * * all locks are held on port tracee_p
- */
- if ((ERTS_TRACE_FLAGS(tracee_port) & TRACEE_FLAGS)
- && ERTS_TRACER_PROC(tracee_port) != tracer) {
- /* This tracee is already being traced, and not by the
- * tracer to be */
- if (is_internal_port(ERTS_TRACER_PROC(tracee_port))) {
- if (!erts_is_valid_tracer_port(ERTS_TRACER_PROC(tracee_port))) {
- /* Current trace port now invalid
- * - discard it and approve the new. */
- goto remove_tracer;
- } else
- return 1;
- }
- else if(is_internal_pid(ERTS_TRACER_PROC(tracee_port))) {
- Process *tracer_p = erts_proc_lookup(ERTS_TRACER_PROC(tracee_port));
- if (!tracer_p) {
- /* Current trace process now invalid
- * - discard it and approve the new. */
- goto remove_tracer;
- } else
- return 1;
- }
- else {
- remove_tracer:
- ERTS_TRACE_FLAGS(tracee_port) &= ~TRACEE_FLAGS;
- ERTS_TRACER_PROC(tracee_port) = NIL;
- }
- }
- return 0;
-}
-
-/* Check that the process to be traced is not already traced
- * by a valid other tracer than the tracer to be.
- */
-static int already_traced(Process *c_p, Process *tracee_p, Eterm tracer)
-{
- /*
- * SMP build assumes that either system is blocked or:
- * * main lock is held on c_p
- * * all locks multiple are held on tracee_p
- */
- if ((ERTS_TRACE_FLAGS(tracee_p) & TRACEE_FLAGS)
- && ERTS_TRACER_PROC(tracee_p) != tracer) {
- /* This tracee is already being traced, and not by the
- * tracer to be */
- if (is_internal_port(ERTS_TRACER_PROC(tracee_p))) {
- if (!erts_is_valid_tracer_port(ERTS_TRACER_PROC(tracee_p))) {
- /* Current trace port now invalid
- * - discard it and approve the new. */
- goto remove_tracer;
- } else
- return 1;
- }
- else if(is_internal_pid(ERTS_TRACER_PROC(tracee_p))) {
- Process *tracer_p;
-
- tracer_p = erts_proc_lookup(ERTS_TRACER_PROC(tracee_p));
- if (!tracer_p) {
- /* Current trace process now invalid
- * - discard it and approve the new. */
- goto remove_tracer;
- } else
- return 1;
- }
- else {
- remove_tracer:
- ERTS_TRACE_FLAGS(tracee_p) &= ~TRACEE_FLAGS;
- ERTS_TRACER_PROC(tracee_p) = NIL;
- }
- }
- return 0;
-}
-
/*
* Return information about a process or an external function being traced.
*/
@@ -916,7 +797,9 @@ Eterm trace_info_2(BIF_ALIST_2)
if (What == am_on_load) {
res = trace_info_on_load(p, Key);
- } else if (is_atom(What) || is_pid(What)) {
+ } else if (What == am_send || What == am_receive) {
+ res = trace_info_event(p, What, Key);
+ } else if (is_atom(What) || is_pid(What) || is_port(What)) {
res = trace_info_pid(p, What, Key);
} else if (is_tuple(What)) {
res = trace_info_func(p, What, Key);
@@ -932,41 +815,54 @@ static Eterm
trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
{
Eterm tracer;
- Uint trace_flags;
+ Uint trace_flags = am_false;
Eterm* hp;
- if (pid_spec == am_new) {
- erts_get_default_tracing(&trace_flags, &tracer);
+ if (pid_spec == am_new || pid_spec == am_new_processes) {
+ ErtsTracer def_tracer;
+ erts_get_default_proc_tracing(&trace_flags, &def_tracer);
+ tracer = erts_tracer_to_term(p, def_tracer);
+ ERTS_TRACER_CLEAR(&def_tracer);
+ } else if (pid_spec == am_new_ports) {
+ ErtsTracer def_tracer;
+ erts_get_default_port_tracing(&trace_flags, &def_tracer);
+ tracer = erts_tracer_to_term(p, def_tracer);
+ ERTS_TRACER_CLEAR(&def_tracer);
+ } else if (is_internal_port(pid_spec)) {
+ Port *tracee;
+ tracee = erts_id2port_sflgs(pid_spec, p, ERTS_PROC_LOCK_MAIN,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP);
+
+ if (!tracee)
+ return am_undefined;
+
+ if (!ERTS_TRACER_IS_NIL(ERTS_TRACER(tracee)))
+ erts_is_tracer_proc_enabled(NULL, 0, &tracee->common);
+
+ tracer = erts_tracer_to_term(p, ERTS_TRACER(tracee));
+ trace_flags = ERTS_TRACE_FLAGS(tracee);
+
+ erts_port_release(tracee);
+
} else if (is_internal_pid(pid_spec)) {
- Process *tracee;
- tracee = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
- pid_spec, ERTS_PROC_LOCKS_ALL);
+ Process *tracee = erts_pid2proc_not_running(p, ERTS_PROC_LOCK_MAIN,
+ pid_spec, ERTS_PROC_LOCK_MAIN);
+
+ if (tracee == ERTS_PROC_LOCK_BUSY)
+ ERTS_BIF_YIELD2(bif_export[BIF_trace_info_2], p, pid_spec, key);
- if (!tracee) {
+ if (!tracee)
return am_undefined;
- } else {
- tracer = ERTS_TRACER_PROC(tracee);
- trace_flags = ERTS_TRACE_FLAGS(tracee);
- }
- if (is_internal_pid(tracer)) {
- if (!erts_proc_lookup(tracer)) {
- reset_tracer:
- ERTS_TRACE_FLAGS(tracee) &= ~TRACEE_FLAGS;
- trace_flags = ERTS_TRACE_FLAGS(tracee);
- tracer = ERTS_TRACER_PROC(tracee) = NIL;
- }
- }
- else if (is_internal_port(tracer)) {
- if (!erts_is_valid_tracer_port(tracer))
- goto reset_tracer;
- }
-#ifdef ERTS_SMP
- erts_smp_proc_unlock(tracee,
- (tracee == p
- ? ERTS_PROC_LOCKS_ALL_MINOR
- : ERTS_PROC_LOCKS_ALL));
-#endif
+ if (!ERTS_TRACER_IS_NIL(ERTS_TRACER(tracee)))
+ erts_is_tracer_proc_enabled(tracee, ERTS_PROC_LOCK_MAIN,
+ &tracee->common);
+
+ tracer = erts_tracer_to_term(p, ERTS_TRACER(tracee));
+ trace_flags = ERTS_TRACE_FLAGS(tracee);
+
+ if (tracee != p)
+ erts_proc_unlock(tracee, ERTS_PROC_LOCK_MAIN);
} else if (is_external_pid(pid_spec)
&& external_pid_dist_entry(pid_spec) == erts_this_dist_entry) {
return am_undefined;
@@ -976,7 +872,7 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
}
if (key == am_flags) {
- int num_flags = 19; /* MAXIMUM number of flags. */
+ int num_flags = 21; /* MAXIMUM number of flags. */
Uint needed = 3+2*num_flags;
Eterm flag_list = NIL;
Eterm* limit;
@@ -994,6 +890,9 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
#endif
hp = HAlloc(p, needed);
limit = hp+needed;
+ FLAG(F_NOW_TS, am_timestamp);
+ FLAG(F_STRICT_MON_TS, am_strict_monotonic_timestamp);
+ FLAG(F_MON_TS, am_monotonic_timestamp);
FLAG(F_TRACE_SEND, am_send);
FLAG(F_TRACE_RECEIVE, am_receive);
FLAG(F_TRACE_SOS, am_set_on_spawn);
@@ -1005,7 +904,6 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
FLAG(F_TRACE_SCHED, am_running);
FLAG(F_TRACE_SCHED_EXIT, am_exiting);
FLAG(F_TRACE_GC, am_garbage_collection);
- FLAG(F_TIMESTAMP, am_timestamp);
FLAG(F_TRACE_ARITY_ONLY, am_arity);
FLAG(F_TRACE_RETURN_TO, am_return_to);
FLAG(F_TRACE_SILENT, am_silent);
@@ -1018,8 +916,10 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
HRelease(p,limit,hp+3);
return TUPLE2(hp, key, flag_list);
} else if (key == am_tracer) {
- hp = HAlloc(p, 3);
- return TUPLE2(hp, key, tracer); /* Local pid or port */
+ if (tracer == am_false)
+ tracer = NIL;
+ hp = HAlloc(p, 3);
+ return TUPLE2(hp, key, tracer);
} else {
goto error;
}
@@ -1048,41 +948,42 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
*/
static int function_is_traced(Process *p,
Eterm mfa[3],
- Binary **ms, /* out */
- Binary **ms_meta, /* out */
- Eterm *tracer_pid_meta, /* out */
- Uint *count, /* out */
- Eterm *call_time) /* out */
+ Binary **ms, /* out */
+ Binary **ms_meta, /* out */
+ ErtsTracer *tracer_pid_meta, /* out */
+ Uint *count, /* out */
+ Eterm *call_time) /* out */
{
Export e;
Export* ep;
BeamInstr* pc;
+ ErtsCodeInfo *ci;
/* First look for an export entry */
- e.code[0] = mfa[0];
- e.code[1] = mfa[1];
- e.code[2] = mfa[2];
+ e.info.mfa.module = mfa[0];
+ e.info.mfa.function = mfa[1];
+ e.info.mfa.arity = mfa[2];
if ((ep = export_get(&e)) != NULL) {
- pc = ep->code+3;
+ pc = ep->beam;
if (ep->addressv[erts_active_code_ix()] == pc &&
- *pc != (BeamInstr) em_call_error_handler) {
+ ! BeamIsOpCode(*pc, op_call_error_handler)) {
int r = 0;
- ASSERT(*pc == (BeamInstr) em_apply_bif ||
- *pc == (BeamInstr) BeamOp(op_i_generic_breakpoint));
+ ASSERT(BeamIsOpCode(*pc, op_apply_bif) ||
+ BeamIsOpCode(*pc, op_i_generic_breakpoint));
- if (erts_is_trace_break(pc, ms, 0)) {
+ if (erts_is_trace_break(&ep->info, ms, 0)) {
return FUNC_TRACE_GLOBAL_TRACE;
}
- if (erts_is_trace_break(pc, ms, 1)) {
+ if (erts_is_trace_break(&ep->info, ms, 1)) {
r |= FUNC_TRACE_LOCAL_TRACE;
}
- if (erts_is_mtrace_break(pc, ms_meta, tracer_pid_meta)) {
+ if (erts_is_mtrace_break(&ep->info, ms_meta, tracer_pid_meta)) {
r |= FUNC_TRACE_META_TRACE;
}
- if (erts_is_time_break(p, pc, call_time)) {
+ if (erts_is_time_break(p, &ep->info, call_time)) {
r |= FUNC_TRACE_TIME_TRACE;
}
return r ? r : FUNC_TRACE_UNTRACED;
@@ -1090,15 +991,15 @@ static int function_is_traced(Process *p,
}
/* OK, now look for breakpoint tracing */
- if ((pc = erts_find_local_func(mfa)) != NULL) {
+ if ((ci = erts_find_local_func(&e.info.mfa)) != NULL) {
int r =
- (erts_is_trace_break(pc, ms, 1)
+ (erts_is_trace_break(ci, ms, 1)
? FUNC_TRACE_LOCAL_TRACE : 0)
- | (erts_is_mtrace_break(pc, ms_meta, tracer_pid_meta)
+ | (erts_is_mtrace_break(ci, ms_meta, tracer_pid_meta)
? FUNC_TRACE_META_TRACE : 0)
- | (erts_is_count_break(pc, count)
+ | (erts_is_count_break(ci, count)
? FUNC_TRACE_COUNT_TRACE : 0)
- | (erts_is_time_break(p, pc, call_time)
+ | (erts_is_time_break(p, ci, call_time)
? FUNC_TRACE_TIME_TRACE : 0);
return r ? r : FUNC_TRACE_UNTRACED;
@@ -1117,7 +1018,7 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
Eterm traced = am_false;
Eterm match_spec = am_false;
Eterm retval = am_false;
- Eterm meta = am_false;
+ ErtsTracer meta = erts_tracer_nil;
Eterm call_time = NIL;
int r;
@@ -1138,21 +1039,20 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
mfa[1] = tp[2];
mfa[2] = signed_val(tp[3]);
-#ifdef ERTS_SMP
if ( (key == am_call_time) || (key == am_all)) {
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
}
-#endif
+ erts_mtx_lock(&erts_dirty_bp_ix_mtx);
+
r = function_is_traced(p, mfa, &ms, &ms_meta, &meta, &count, &call_time);
-#ifdef ERTS_SMP
+ erts_mtx_unlock(&erts_dirty_bp_ix_mtx);
if ( (key == am_call_time) || (key == am_all)) {
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
-#endif
switch (r) {
case FUNC_TRACE_NOEXIST:
@@ -1187,7 +1087,10 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
retval = match_spec;
break;
case am_meta:
- retval = meta;
+ retval = erts_tracer_to_term(p, meta);
+ if (retval == am_false)
+ /* backwards compatibility */
+ retval = NIL;
break;
case am_meta_match_spec:
if (r & FUNC_TRACE_META_TRACE) {
@@ -1210,7 +1113,8 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
}
break;
case am_all: {
- Eterm match_spec_meta = am_false, c = am_false, t, ct = am_false;
+ Eterm match_spec_meta = am_false, c = am_false, t, ct = am_false,
+ m = am_false;
if (ms) {
match_spec = MatchSetGetSource(ms);
@@ -1229,6 +1133,9 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
if (r & FUNC_TRACE_TIME_TRACE) {
ct = call_time;
}
+
+ m = erts_tracer_to_term(p, meta);
+
hp = HAlloc(p, (3+2)*6);
retval = NIL;
t = TUPLE2(hp, am_call_count, c); hp += 3;
@@ -1237,7 +1144,7 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
retval = CONS(hp, t, retval); hp += 2;
t = TUPLE2(hp, am_meta_match_spec, match_spec_meta); hp += 3;
retval = CONS(hp, t, retval); hp += 2;
- t = TUPLE2(hp, am_meta, meta); hp += 3;
+ t = TUPLE2(hp, am_meta, m); hp += 3;
retval = CONS(hp, t, retval); hp += 2;
t = TUPLE2(hp, am_match_spec, match_spec); hp += 3;
retval = CONS(hp, t, retval); hp += 2;
@@ -1300,7 +1207,8 @@ trace_info_on_load(Process* p, Eterm key)
case am_meta:
hp = HAlloc(p, 3);
if (erts_default_trace_pattern_flags.meta) {
- return TUPLE2(hp, key, erts_default_meta_tracer_pid);
+ ASSERT(!ERTS_TRACER_IS_NIL(erts_default_meta_tracer));
+ return TUPLE2(hp, key, erts_tracer_to_term(p, erts_default_meta_tracer));
} else {
return TUPLE2(hp, key, am_false);
}
@@ -1339,7 +1247,7 @@ trace_info_on_load(Process* p, Eterm key)
}
case am_all:
{
- Eterm match_spec = am_false, meta_match_spec = am_false, r = NIL, t;
+ Eterm match_spec = am_false, meta_match_spec = am_false, r = NIL, t, m;
if (erts_default_trace_pattern_flags.local ||
(! erts_default_trace_pattern_flags.breakpoint)) {
@@ -1357,6 +1265,8 @@ trace_info_on_load(Process* p, Eterm key)
MatchSetGetSource(erts_default_meta_match_spec);
meta_match_spec = copy_object(meta_match_spec, p);
}
+ m = (erts_default_trace_pattern_flags.meta
+ ? erts_tracer_to_term(p, erts_default_meta_tracer) : am_false);
hp = HAlloc(p, (3+2)*5 + 3);
t = TUPLE2(hp, am_call_count,
(erts_default_trace_pattern_flags.call_count
@@ -1364,9 +1274,7 @@ trace_info_on_load(Process* p, Eterm key)
r = CONS(hp, t, r); hp += 2;
t = TUPLE2(hp, am_meta_match_spec, meta_match_spec); hp += 3;
r = CONS(hp, t, r); hp += 2;
- t = TUPLE2(hp, am_meta,
- (erts_default_trace_pattern_flags.meta
- ? erts_default_meta_tracer_pid : am_false)); hp += 3;
+ t = TUPLE2(hp, am_meta, m); hp += 3;
r = CONS(hp, t, r); hp += 2;
t = TUPLE2(hp, am_match_spec, match_spec); hp += 3;
r = CONS(hp, t, r); hp += 2;
@@ -1382,16 +1290,52 @@ trace_info_on_load(Process* p, Eterm key)
}
}
+static Eterm
+trace_info_event(Process* p, Eterm event, Eterm key)
+{
+ ErtsTracingEvent* te;
+ Eterm retval;
+ Eterm* hp;
+
+ switch (event) {
+ case am_send: te = erts_send_tracing; break;
+ case am_receive: te = erts_receive_tracing; break;
+ default:
+ goto error;
+ }
+
+ if (key != am_match_spec)
+ goto error;
+
+ te = &te[erts_active_bp_ix()];
+
+ if (te->on) {
+ if (!te->match_spec)
+ retval = am_true;
+ else
+ retval = copy_object(MatchSetGetSource(te->match_spec), p);
+ }
+ else
+ retval = am_false;
+
+ hp = HAlloc(p, 3);
+ return TUPLE2(hp, key, retval);
+
+ error:
+ BIF_ERROR(p, BADARG);
+}
+
+
#undef FUNC_TRACE_NOEXIST
#undef FUNC_TRACE_UNTRACED
#undef FUNC_TRACE_GLOBAL_TRACE
#undef FUNC_TRACE_LOCAL_TRACE
int
-erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
+erts_set_trace_pattern(Process*p, ErtsCodeMFA *mfa, int specified,
Binary* match_prog_set, Binary *meta_match_prog_set,
int on, struct trace_pattern_flags flags,
- Eterm meta_tracer_pid, int is_blocking)
+ ErtsTracer meta_tracer, int is_blocking)
{
const ErtsCodeIndex code_ix = erts_active_code_ix();
int matches = 0;
@@ -1408,22 +1352,23 @@ erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
n = finish_bp.e.matched;
for (i = 0; i < n; i++) {
- BeamInstr* pc = fp[i].pc;
- Export* ep = (Export *)(((char *)(pc-3)) - offsetof(Export, code));
+ ErtsCodeInfo *ci = fp[i].ci;
+ BeamInstr* pc = erts_codeinfo_to_code(ci);
+ Export* ep = ErtsContainerStruct(ci, Export, info);
if (on && !flags.breakpoint) {
/* Turn on global call tracing */
if (ep->addressv[code_ix] != pc) {
fp[i].mod->curr.num_traced_exports++;
#ifdef DEBUG
- pc[-5] = (BeamInstr) BeamOp(op_i_func_info_IaaI);
+ ep->info.op = BeamOpCodeAddr(op_i_func_info_IaaI);
#endif
- pc[0] = (BeamInstr) BeamOp(op_jump_f);
- pc[1] = (BeamInstr) ep->addressv[code_ix];
+ ep->beam[0] = BeamOpCodeAddr(op_trace_jump_W);
+ ep->beam[1] = (BeamInstr) ep->addressv[code_ix];
}
- erts_set_call_trace_bif(pc, match_prog_set, 0);
+ erts_set_call_trace_bif(ci, match_prog_set, 0);
if (ep->addressv[code_ix] != pc) {
- pc[0] = (BeamInstr) BeamOp(op_i_generic_breakpoint);
+ ep->beam[0] = BeamOpCodeAddr(op_i_generic_breakpoint);
}
} else if (!on && flags.breakpoint) {
/* Turn off breakpoint tracing -- nothing to do here. */
@@ -1432,9 +1377,9 @@ erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
* Turn off global tracing, either explicitly or implicitly
* before turning on breakpoint tracing.
*/
- erts_clear_call_trace_bif(pc, 0);
- if (pc[0] == (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
- pc[0] = (BeamInstr) BeamOp(op_jump_f);
+ erts_clear_call_trace_bif(ci, 0);
+ if (BeamIsOpCode(ep->beam[0], op_i_generic_breakpoint)) {
+ ep->beam[0] = BeamOpCodeAddr(op_trace_jump_W);
}
}
}
@@ -1444,68 +1389,76 @@ erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
*/
for (i = 0; i < BIF_SIZE; ++i) {
Export *ep = bif_export[i];
- int j;
-
+
if (!ExportIsBuiltIn(ep)) {
continue;
}
-
+
if (bif_table[i].f == bif_table[i].traced) {
/* Trace wrapper same as regular function - untraceable */
continue;
}
-
- for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) {
- /* Empty loop body */
- }
- if (j == specified) {
- BeamInstr* pc = (BeamInstr *)bif_export[i]->code + 3;
- if (! flags.breakpoint) { /* Export entry call trace */
- if (on) {
- erts_clear_call_trace_bif(pc, 1);
- erts_clear_mtrace_bif(pc);
- erts_set_call_trace_bif(pc, match_prog_set, 0);
- } else { /* off */
- erts_clear_call_trace_bif(pc, 0);
- }
- matches++;
- } else { /* Breakpoint call trace */
- int m = 0;
-
- if (on) {
- if (flags.local) {
- erts_clear_call_trace_bif(pc, 0);
- erts_set_call_trace_bif(pc, match_prog_set, 1);
- m = 1;
- }
- if (flags.meta) {
- erts_set_mtrace_bif(pc, meta_match_prog_set,
- meta_tracer_pid);
- m = 1;
- }
- if (flags.call_time) {
- erts_set_time_trace_bif(pc, on);
- /* I don't want to remove any other tracers */
- m = 1;
- }
- } else { /* off */
- if (flags.local) {
- erts_clear_call_trace_bif(pc, 1);
- m = 1;
- }
- if (flags.meta) {
- erts_clear_mtrace_bif(pc);
- m = 1;
- }
- if (flags.call_time) {
- erts_clear_time_trace_bif(pc);
- m = 1;
- }
- }
- matches += m;
- }
- }
+ switch (specified) {
+ case 3:
+ if (mfa->arity != ep->info.mfa.arity)
+ continue;
+ case 2:
+ if (mfa->function != ep->info.mfa.function)
+ continue;
+ case 1:
+ if (mfa->module != ep->info.mfa.module)
+ continue;
+ case 0:
+ break;
+ default:
+ ASSERT(0);
+ }
+
+ if (! flags.breakpoint) { /* Export entry call trace */
+ if (on) {
+ erts_clear_call_trace_bif(&ep->info, 1);
+ erts_clear_mtrace_bif(&ep->info);
+ erts_set_call_trace_bif(&ep->info, match_prog_set, 0);
+ } else { /* off */
+ erts_clear_call_trace_bif(&ep->info, 0);
+ }
+ matches++;
+ } else { /* Breakpoint call trace */
+ int m = 0;
+
+ if (on) {
+ if (flags.local) {
+ erts_clear_call_trace_bif(&ep->info, 0);
+ erts_set_call_trace_bif(&ep->info, match_prog_set, 1);
+ m = 1;
+ }
+ if (flags.meta) {
+ erts_set_mtrace_bif(&ep->info, meta_match_prog_set,
+ meta_tracer);
+ m = 1;
+ }
+ if (flags.call_time) {
+ erts_set_time_trace_bif(&ep->info, on);
+ /* I don't want to remove any other tracers */
+ m = 1;
+ }
+ } else { /* off */
+ if (flags.local) {
+ erts_clear_call_trace_bif(&ep->info, 1);
+ m = 1;
+ }
+ if (flags.meta) {
+ erts_clear_mtrace_bif(&ep->info);
+ m = 1;
+ }
+ if (flags.call_time) {
+ erts_clear_time_trace_bif(&ep->info);
+ m = 1;
+ }
+ }
+ matches += m;
+ }
}
/*
@@ -1521,7 +1474,7 @@ erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
}
if (flags.meta) {
erts_set_mtrace_break(&finish_bp.f, meta_match_prog_set,
- meta_tracer_pid);
+ meta_tracer);
}
if (flags.call_count) {
erts_set_count_break(&finish_bp.f, on);
@@ -1549,17 +1502,13 @@ erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
finish_bp.install = on;
finish_bp.local = flags.breakpoint;
-#ifdef ERTS_SMP
if (is_blocking) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
-#endif
+ ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
while (erts_finish_breakpointing()) {
/* Empty loop body */
}
-#ifdef ERTS_SMP
finish_bp.current = -1;
}
-#endif
if (flags.breakpoint) {
matches += finish_bp.f.matched;
@@ -1570,12 +1519,52 @@ erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
}
int
+erts_set_tracing_event_pattern(Eterm event, Binary* match_spec, int on)
+{
+ ErtsBpIndex ix = erts_staging_bp_ix();
+ ErtsTracingEvent* st;
+
+ switch (event) {
+ case am_send: st = &erts_send_tracing[ix]; break;
+ case am_receive: st = &erts_receive_tracing[ix]; break;
+ default: return -1;
+ }
+
+ MatchSetUnref(st->match_spec);
+
+ st->on = on;
+ st->match_spec = match_spec;
+ MatchSetRef(match_spec);
+
+ finish_bp.current = 1; /* prepare phase not needed for event trace */
+ finish_bp.install = on;
+ finish_bp.e.matched = 0;
+ finish_bp.e.matching = NULL;
+ finish_bp.f.matched = 0;
+ finish_bp.f.matching = NULL;
+
+ return 1;
+}
+
+static void
+consolidate_event_tracing(ErtsTracingEvent te[])
+{
+ ErtsTracingEvent* src = &te[erts_active_bp_ix()];
+ ErtsTracingEvent* dst = &te[erts_staging_bp_ix()];
+
+ MatchSetUnref(dst->match_spec);
+ dst->on = src->on;
+ dst->match_spec = src->match_spec;
+ MatchSetRef(dst->match_spec);
+}
+
+int
erts_finish_breakpointing(void)
{
- ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
+ ERTS_LC_ASSERT(erts_has_code_write_permission());
/*
- * Memory barriers will be issued for all processes *before*
+ * Memory barriers will be issued for all schedulers *before*
* each of the stages below. (Unless the other schedulers
* are blocked, in which case memory barriers will be issued
* when they are awaken.)
@@ -1644,6 +1633,8 @@ erts_finish_breakpointing(void)
erts_consolidate_bp_data(&finish_bp.f, 1);
erts_bp_free_matched_functions(&finish_bp.e);
erts_bp_free_matched_functions(&finish_bp.f);
+ consolidate_event_tracing(erts_send_tracing);
+ consolidate_event_tracing(erts_receive_tracing);
return 0;
default:
ASSERT(0);
@@ -1658,13 +1649,11 @@ install_exp_breakpoints(BpFunctions* f)
BpFunction* fp = f->matching;
Uint ne = f->matched;
Uint i;
- Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
for (i = 0; i < ne; i++) {
- BeamInstr* pc = fp[i].pc;
- Export* ep = (Export *) (((char *)pc)-offset);
+ Export* ep = ErtsContainerStruct(fp[i].ci, Export, info);
- ep->addressv[code_ix] = pc;
+ ep->addressv[code_ix] = ep->beam;
}
}
@@ -1675,17 +1664,15 @@ uninstall_exp_breakpoints(BpFunctions* f)
BpFunction* fp = f->matching;
Uint ne = f->matched;
Uint i;
- Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
for (i = 0; i < ne; i++) {
- BeamInstr* pc = fp[i].pc;
- Export* ep = (Export *) (((char *)pc)-offset);
+ Export* ep = ErtsContainerStruct(fp[i].ci, Export, info);
- if (ep->addressv[code_ix] != pc) {
+ if (ep->addressv[code_ix] != ep->beam) {
continue;
}
- ASSERT(*pc == (BeamInstr) BeamOp(op_jump_f));
- ep->addressv[code_ix] = (BeamInstr *) ep->code[4];
+ ASSERT(BeamIsOpCode(ep->beam[0], op_trace_jump_W));
+ ep->addressv[code_ix] = (BeamInstr *) ep->beam[1];
}
}
@@ -1696,18 +1683,16 @@ clean_export_entries(BpFunctions* f)
BpFunction* fp = f->matching;
Uint ne = f->matched;
Uint i;
- Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
for (i = 0; i < ne; i++) {
- BeamInstr* pc = fp[i].pc;
- Export* ep = (Export *) (((char *)pc)-offset);
+ Export* ep = ErtsContainerStruct(fp[i].ci, Export, info);
- if (ep->addressv[code_ix] == pc) {
+ if (ep->addressv[code_ix] == ep->beam) {
continue;
}
- if (*pc == (BeamInstr) BeamOp(op_jump_f)) {
- ep->code[3] = (BeamInstr) 0;
- ep->code[4] = (BeamInstr) 0;
+ if (BeamIsOpCode(ep->beam[0], op_trace_jump_W)) {
+ ep->beam[0] = (BeamInstr) 0;
+ ep->beam[1] = (BeamInstr) 0;
}
}
}
@@ -1719,11 +1704,11 @@ setup_bif_trace(void)
for (i = 0; i < BIF_SIZE; ++i) {
Export *ep = bif_export[i];
- GenericBp* g = (GenericBp *) ep->fake_op_func_info_for_hipe[1];
+ GenericBp* g = ep->info.u.gen_bp;
if (g) {
if (ExportIsBuiltIn(ep)) {
- ASSERT(ep->code[4]);
- ep->code[4] = (BeamInstr) bif_table[i].traced;
+ ASSERT(ep->beam[1]);
+ ep->beam[1] = (BeamInstr) bif_table[i].traced;
}
}
}
@@ -1737,12 +1722,11 @@ reset_bif_trace(void)
for (i = 0; i < BIF_SIZE; ++i) {
Export *ep = bif_export[i];
- BeamInstr* pc = ep->code+3;
- GenericBp* g = (GenericBp *) pc[-4];
+ GenericBp* g = ep->info.u.gen_bp;
if (g && g->data[active].flags == 0) {
if (ExportIsBuiltIn(ep)) {
- ASSERT(ep->code[4]);
- ep->code[4] = (BeamInstr) bif_table[i].f;
+ ASSERT(ep->beam[1]);
+ ep->beam[1] = (BeamInstr) bif_table[i].f;
}
}
}
@@ -1796,7 +1780,11 @@ Eterm erts_seq_trace(Process *p, Eterm arg1, Eterm arg2,
} else if (arg1 == am_print) {
current_flag = SEQ_TRACE_PRINT;
} else if (arg1 == am_timestamp) {
- current_flag = SEQ_TRACE_TIMESTAMP;
+ current_flag = SEQ_TRACE_NOW_TS;
+ } else if (arg1 == am_strict_monotonic_timestamp) {
+ current_flag = SEQ_TRACE_STRICT_MON_TS;
+ } else if (arg1 == am_monotonic_timestamp) {
+ current_flag = SEQ_TRACE_MON_TS;
}
else
current_flag = 0;
@@ -1877,11 +1865,7 @@ new_seq_trace_token(Process* p)
{
Eterm* hp;
- if (SEQ_TRACE_TOKEN(p) == NIL
-#ifdef USE_VM_PROBES
- || SEQ_TRACE_TOKEN(p) == am_have_dt_utag
-#endif
- ) {
+ if (have_no_seqtrace(SEQ_TRACE_TOKEN(p))) {
hp = HAlloc(p, 6);
SEQ_TRACE_TOKEN(p) = TUPLE5(hp, make_small(0), /* Flags */
make_small(0), /* Label */
@@ -1901,13 +1885,11 @@ BIF_RETTYPE erl_seq_trace_info(Process *p, Eterm item)
BIF_ERROR(p, BADARG);
}
- if (SEQ_TRACE_TOKEN(p) == NIL
-#ifdef USE_VM_PROBES
- || SEQ_TRACE_TOKEN(p) == am_have_dt_utag
-#endif
- ) {
- if ((item == am_send) || (item == am_receive) ||
- (item == am_print) || (item == am_timestamp)) {
+ if (have_no_seqtrace(SEQ_TRACE_TOKEN(p))) {
+ if ((item == am_send) || (item == am_receive) ||
+ (item == am_print) || (item == am_timestamp)
+ || (item == am_monotonic_timestamp)
+ || (item == am_strict_monotonic_timestamp)) {
hp = HAlloc(p,3);
res = TUPLE2(hp, item, am_false);
BIF_RET(res);
@@ -1925,7 +1907,11 @@ BIF_RETTYPE erl_seq_trace_info(Process *p, Eterm item)
} else if (item == am_print) {
current_flag = SEQ_TRACE_PRINT;
} else if (item == am_timestamp) {
- current_flag = SEQ_TRACE_TIMESTAMP;
+ current_flag = SEQ_TRACE_NOW_TS;
+ } else if (item == am_strict_monotonic_timestamp) {
+ current_flag = SEQ_TRACE_STRICT_MON_TS;
+ } else if (item == am_monotonic_timestamp) {
+ current_flag = SEQ_TRACE_MON_TS;
} else {
current_flag = 0;
}
@@ -1962,11 +1948,7 @@ BIF_RETTYPE seq_trace_info_1(BIF_ALIST_1)
*/
BIF_RETTYPE seq_trace_print_1(BIF_ALIST_1)
{
- if (SEQ_TRACE_TOKEN(BIF_P) == NIL
-#ifdef USE_VM_PROBES
- || SEQ_TRACE_TOKEN(BIF_P) == am_have_dt_utag
-#endif
- ) {
+ if (have_no_seqtrace(SEQ_TRACE_TOKEN(BIF_P))) {
BIF_RET(am_false);
}
seq_trace_update_send(BIF_P);
@@ -1985,11 +1967,7 @@ BIF_RETTYPE seq_trace_print_1(BIF_ALIST_1)
*/
BIF_RETTYPE seq_trace_print_2(BIF_ALIST_2)
{
- if (SEQ_TRACE_TOKEN(BIF_P) == NIL
-#ifdef USE_VM_PROBES
- || SEQ_TRACE_TOKEN(BIF_P) == am_have_dt_utag
-#endif
- ) {
+ if (have_no_seqtrace(SEQ_TRACE_TOKEN(BIF_P))) {
BIF_RET(am_false);
}
if (!(is_atom(BIF_ARG_1) || is_small(BIF_ARG_1))) {
@@ -2004,24 +1982,20 @@ BIF_RETTYPE seq_trace_print_2(BIF_ALIST_2)
}
void erts_system_monitor_clear(Process *c_p) {
-#ifdef ERTS_SMP
if (c_p) {
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
}
-#endif
erts_set_system_monitor(NIL);
erts_system_monitor_long_gc = 0;
erts_system_monitor_long_schedule = 0;
erts_system_monitor_large_heap = 0;
erts_system_monitor_flags.busy_port = 0;
erts_system_monitor_flags.busy_dist_port = 0;
-#ifdef ERTS_SMP
if (c_p) {
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
-#endif
}
@@ -2131,8 +2105,8 @@ system_monitor(Process *p, Eterm monitor_pid, Eterm list)
int busy_port, busy_dist_port;
system_blocked = 1;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
if (!erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, monitor_pid, 0))
goto error;
@@ -2171,16 +2145,16 @@ system_monitor(Process *p, Eterm monitor_pid, Eterm list)
erts_system_monitor_flags.busy_port = !!busy_port;
erts_system_monitor_flags.busy_dist_port = !!busy_dist_port;
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
BIF_RET(prev);
}
error:
if (system_blocked) {
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
BIF_ERROR(p, BADARG);
@@ -2189,23 +2163,19 @@ system_monitor(Process *p, Eterm monitor_pid, Eterm list)
/* Begin: Trace for System Profiling */
void erts_system_profile_clear(Process *c_p) {
-#ifdef ERTS_SMP
if (c_p) {
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
}
-#endif
erts_set_system_profile(NIL);
erts_system_profile_flags.scheduler = 0;
erts_system_profile_flags.runnable_procs = 0;
erts_system_profile_flags.runnable_ports = 0;
erts_system_profile_flags.exclusive = 0;
-#ifdef ERTS_SMP
if (c_p) {
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
-#endif
}
static Eterm system_profile_get(Process *p) {
@@ -2235,6 +2205,7 @@ static Eterm system_profile_get(Process *p) {
if (erts_system_profile_flags.exclusive) {
res = CONS(hp, am_exclusive, res); hp += 2;
}
+
return TUPLE2(hp, system_profile, res);
}
}
@@ -2253,6 +2224,7 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2)
int system_blocked = 0;
Process *profiler_p = NULL;
Port *profiler_port = NULL;
+ int ts;
if (profiler == am_undefined || list == NIL) {
prev = system_profile_get(p);
@@ -2265,8 +2237,8 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2)
int scheduler, runnable_procs, runnable_ports, exclusive;
system_blocked = 1;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
/* Check if valid process, no locks are taken */
@@ -2284,7 +2256,8 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2)
goto error;
}
- for (scheduler = 0, runnable_ports = 0, runnable_procs = 0, exclusive = 0;
+ for (ts = ERTS_TRACE_FLG_NOW_TIMESTAMP, scheduler = 0,
+ runnable_ports = 0, runnable_procs = 0, exclusive = 0;
is_list(list);
list = CDR(list_val(list))) {
@@ -2297,6 +2270,12 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2)
exclusive = !0;
} else if (t == am_scheduler) {
scheduler = !0;
+ } else if (t == am_timestamp) {
+ ts = ERTS_TRACE_FLG_NOW_TIMESTAMP;
+ } else if (t == am_strict_monotonic_timestamp) {
+ ts = ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP;
+ } else if (t == am_monotonic_timestamp) {
+ ts = ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP;
} else goto error;
}
if (is_not_nil(list)) goto error;
@@ -2309,9 +2288,9 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2)
erts_system_profile_flags.runnable_ports = !!runnable_ports;
erts_system_profile_flags.runnable_procs = !!runnable_procs;
erts_system_profile_flags.exclusive = !!exclusive;
-
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ erts_system_profile_ts_type = ts;
+ erts_thr_progress_unblock();
+ erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
BIF_RET(prev);
@@ -2319,58 +2298,83 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2)
error:
if (system_blocked) {
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
BIF_ERROR(p, BADARG);
}
/* End: Trace for System Profiling */
-BIF_RETTYPE
-trace_delivered_1(BIF_ALIST_1)
+/* Trace delivered send an aux work message to all schedulers
+ and when all schedulers have acknowledged that they have seen
+ the message the message is sent to the requesting process.
+
+ IMPORTANT: We have to make sure that the all messages sent
+ using enif_send have been delivered before we send the message
+ to the caller.
+
+ There used to be a separate implementation for when only a pid
+ is passed in, but since this is not performance critical code
+ we now use the same approach for both.
+*/
+
+typedef struct {
+ Process *proc;
+ Eterm ref;
+ Eterm ref_heap[ERTS_REF_THING_SIZE];
+ Eterm target;
+ erts_atomic32_t refc;
+} ErtsTraceDeliveredAll;
+
+static void
+reply_trace_delivered_all(void *vtdarp)
{
- DECL_AM(trace_delivered);
-#ifdef ERTS_SMP
- ErlHeapFragment *bp;
-#else
- ErtsProcLocks locks = 0;
-#endif
- Eterm *hp;
- Eterm msg, ref, msg_ref;
- Process *p;
- if (BIF_ARG_1 == am_all) {
- p = NULL;
- } else if (! (p = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
- BIF_ARG_1, ERTS_PROC_LOCKS_ALL))) {
- if (is_not_internal_pid(BIF_ARG_1)) {
- BIF_ERROR(BIF_P, BADARG);
- }
- }
-
- ref = erts_make_ref(BIF_P);
+ ErtsTraceDeliveredAll *tdarp = (ErtsTraceDeliveredAll *) vtdarp;
-#ifdef ERTS_SMP
- bp = new_message_buffer(REF_THING_SIZE + 4);
- hp = &bp->mem[0];
- msg_ref = STORE_NC(&hp, &bp->off_heap, ref);
-#else
- hp = HAlloc(BIF_P, 4);
- msg_ref = ref;
-#endif
+ if (erts_atomic32_dec_read_nob(&tdarp->refc) == 0) {
+ Eterm ref_copy, msg;
+ Process *rp = tdarp->proc;
+ Eterm *hp = NULL;
+ ErlOffHeap *ohp;
+ ErlHeapFragment *bp;
+ bp = new_message_buffer(4 + NC_HEAP_SIZE(tdarp->ref));
+ hp = &bp->mem[0];
+ ohp = &bp->off_heap;
- msg = TUPLE3(hp, AM_trace_delivered, BIF_ARG_1, msg_ref);
+ ref_copy = STORE_NC(&hp, ohp, tdarp->ref);
+ msg = TUPLE3(hp, am_trace_delivered, tdarp->target, ref_copy);
-#ifdef ERTS_SMP
- erts_send_sys_msg_proc(BIF_P->common.id, BIF_P->common.id, msg, bp);
- if (p)
- erts_smp_proc_unlock(p,
- (BIF_P == p
- ? ERTS_PROC_LOCKS_ALL_MINOR
- : ERTS_PROC_LOCKS_ALL));
-#else
- erts_send_message(BIF_P, BIF_P, &locks, msg, ERTS_SND_FLG_NO_SEQ_TRACE);
-#endif
+ erts_send_sys_msg_proc(rp->common.id, rp->common.id, msg, bp);
- BIF_RET(ref);
+ erts_free(ERTS_ALC_T_MISC_AUX_WORK, vtdarp);
+ erts_proc_dec_refc(rp);
+ }
+}
+
+BIF_RETTYPE
+trace_delivered_1(BIF_ALIST_1)
+{
+
+ if (BIF_ARG_1 == am_all || is_internal_pid(BIF_ARG_1)) {
+ Eterm *hp, ref;
+ ErtsTraceDeliveredAll *tdarp =
+ erts_alloc(ERTS_ALC_T_MISC_AUX_WORK, sizeof(ErtsTraceDeliveredAll));
+
+ tdarp->proc = BIF_P;
+ ref = erts_make_ref(BIF_P);
+ hp = &tdarp->ref_heap[0];
+ tdarp->ref = STORE_NC(&hp, NULL, ref);
+ tdarp->target = BIF_ARG_1;
+ erts_atomic32_init_nob(&tdarp->refc,
+ (erts_aint32_t) erts_no_schedulers);
+ erts_proc_add_refc(BIF_P, 1);
+ erts_schedule_multi_misc_aux_work(0,
+ erts_no_schedulers,
+ reply_trace_delivered_all,
+ (void *) tdarp);
+ BIF_RET(ref);
+ } else {
+ BIF_ERROR(BIF_P, BADARG);
+ }
}
diff --git a/erts/emulator/beam/erl_bif_unique.c b/erts/emulator/beam/erl_bif_unique.c
new file mode 100644
index 0000000000..19d46537f9
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_unique.c
@@ -0,0 +1,847 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2014-2017. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#define ERL_BIF_UNIQUE_C__
+#include "sys.h"
+#include "erl_vm.h"
+#include "erl_alloc.h"
+#include "export.h"
+#include "bif.h"
+#include "erl_bif_unique.h"
+#include "hash.h"
+#include "erl_binary.h"
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Reference *
+\* */
+
+static union {
+ erts_atomic64_t count;
+ char align__[ERTS_CACHE_LINE_SIZE];
+} global_reference erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+
+
+/*
+ * ref[0] indicate thread creating reference as follows:
+ *
+ * - ref[0] == 0 => Non-scheduler thread;
+ * - else; ref[0] <= erts_no_schedulers =>
+ * ordinary scheduler with id == ref[0];
+ * - else; ref[0] <= erts_no_schedulers
+ * + erts_no_dirty_cpu_schedulers =>
+ * dirty cpu scheduler with id == 'ref[0] - erts_no_schedulers';
+ * - else =>
+ * dirty io scheduler with id == 'ref[0]
+ * - erts_no_schedulers
+ * - erts_no_dirty_cpu_schedulers'
+ */
+
+#ifdef DEBUG
+static Uint32 max_thr_id;
+#endif
+
+static void init_magic_ref_tables(void);
+
+static Uint64 ref_init_value;
+
+static void
+init_reference(void)
+{
+ SysTimeval tv;
+ sys_gettimeofday(&tv);
+ ref_init_value = 0;
+ ref_init_value |= (Uint64) tv.tv_sec;
+ ref_init_value |= ((Uint64) tv.tv_usec) << 32;
+ ref_init_value *= (Uint64) 268438039;
+ ref_init_value += (Uint64) tv.tv_usec;
+#ifdef DEBUG
+ max_thr_id = (Uint32) erts_no_schedulers;
+ max_thr_id += (Uint32) erts_no_dirty_cpu_schedulers;
+ max_thr_id += (Uint32) erts_no_dirty_io_schedulers;
+#endif
+ erts_atomic64_init_nob(&global_reference.count,
+ (erts_aint64_t) ref_init_value);
+ init_magic_ref_tables();
+}
+
+static ERTS_INLINE void
+global_make_ref_in_array(Uint32 thr_id, Uint32 ref[ERTS_REF_NUMBERS])
+{
+ Uint64 value;
+
+ value = (Uint64) erts_atomic64_inc_read_mb(&global_reference.count);
+
+ erts_set_ref_numbers(ref, thr_id, value);
+}
+
+static ERTS_INLINE void
+make_ref_in_array(Uint32 ref[ERTS_REF_NUMBERS])
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ if (esdp)
+ erts_sched_make_ref_in_array(esdp, ref);
+ else
+ global_make_ref_in_array(0, ref);
+}
+
+void
+erts_make_ref_in_array(Uint32 ref[ERTS_REF_NUMBERS])
+{
+ make_ref_in_array(ref);
+}
+
+void
+erts_make_magic_ref_in_array(Uint32 ref[ERTS_REF_NUMBERS])
+{
+ make_ref_in_array(ref);
+ ASSERT(!(ref[1] & ERTS_REF1_MAGIC_MARKER_BIT__));
+ ref[1] |= ERTS_REF1_MAGIC_MARKER_BIT__;
+}
+
+Eterm erts_make_ref_in_buffer(Eterm buffer[ERTS_REF_THING_SIZE])
+{
+ Eterm* hp = buffer;
+ Uint32 ref[ERTS_REF_NUMBERS];
+
+ make_ref_in_array(ref);
+ write_ref_thing(hp, ref[0], ref[1], ref[2]);
+ return make_internal_ref(hp);
+}
+
+Eterm erts_make_ref(Process *c_p)
+{
+ Eterm* hp;
+ Uint32 ref[ERTS_REF_NUMBERS];
+
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p));
+
+ hp = HAlloc(c_p, ERTS_REF_THING_SIZE);
+
+ make_ref_in_array(ref);
+ write_ref_thing(hp, ref[0], ref[1], ref[2]);
+
+ return make_internal_ref(hp);
+}
+
+/*
+ * Magic reference tables
+ */
+
+typedef struct {
+ HashBucket hash;
+ ErtsMagicBinary *mb;
+ Uint64 value;
+ Uint32 thr_id;
+} ErtsMagicRefTableEntry;
+
+typedef struct {
+ erts_rwmtx_t rwmtx;
+ Hash hash;
+ char name[32];
+} ErtsMagicRefTable;
+
+typedef struct {
+ union {
+ ErtsMagicRefTable table;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsMagicRefTable))];
+ } u;
+} ErtsAlignedMagicRefTable;
+
+ErtsAlignedMagicRefTable *magic_ref_table;
+
+ErtsMagicBinary *
+erts_magic_ref_lookup_bin__(Uint32 refn[ERTS_REF_NUMBERS])
+{
+ ErtsMagicRefTableEntry tmpl;
+ ErtsMagicRefTableEntry *tep;
+ ErtsMagicBinary *mb;
+ ErtsMagicRefTable *tblp;
+
+ ASSERT(erts_is_ref_numbers_magic(refn));
+
+ tmpl.value = erts_get_ref_numbers_value(refn);
+ tmpl.thr_id = erts_get_ref_numbers_thr_id(refn);
+ if (tmpl.thr_id > erts_no_schedulers)
+ tblp = &magic_ref_table[0].u.table;
+ else
+ tblp = &magic_ref_table[tmpl.thr_id].u.table;
+
+ erts_rwmtx_rlock(&tblp->rwmtx);
+
+ tep = (ErtsMagicRefTableEntry *) hash_get(&tblp->hash, &tmpl);
+ if (!tep)
+ mb = NULL;
+ else {
+ erts_aint_t refc;
+ mb = tep->mb;
+ refc = erts_refc_inc_unless(&mb->intern.refc, 0, 0);
+ if (refc == 0)
+ mb = NULL;
+ }
+
+ erts_rwmtx_runlock(&tblp->rwmtx);
+
+ return mb;
+}
+
+void
+erts_magic_ref_save_bin__(Eterm ref)
+{
+ ErtsMagicRefTableEntry tmpl;
+ ErtsMagicRefTableEntry *tep;
+ ErtsMRefThing *mrtp;
+ ErtsMagicRefTable *tblp;
+ Uint32 *refn;
+
+ ASSERT(is_internal_magic_ref(ref));
+
+ mrtp = (ErtsMRefThing *) internal_ref_val(ref);
+ refn = mrtp->mb->refn;
+
+ tmpl.value = erts_get_ref_numbers_value(refn);
+ tmpl.thr_id = erts_get_ref_numbers_thr_id(refn);
+
+ if (tmpl.thr_id > erts_no_schedulers)
+ tblp = &magic_ref_table[0].u.table;
+ else
+ tblp = &magic_ref_table[tmpl.thr_id].u.table;
+
+ erts_rwmtx_rlock(&tblp->rwmtx);
+
+ tep = (ErtsMagicRefTableEntry *) hash_get(&tblp->hash, &tmpl);
+
+ erts_rwmtx_runlock(&tblp->rwmtx);
+
+ if (!tep) {
+ ErtsMagicRefTableEntry *used_tep;
+
+ ASSERT(tmpl.value == erts_get_ref_numbers_value(refn));
+ ASSERT(tmpl.thr_id == erts_get_ref_numbers_thr_id(refn));
+
+ if (tblp != &magic_ref_table[0].u.table) {
+ tep = erts_alloc(ERTS_ALC_T_MREF_NSCHED_ENT,
+ sizeof(ErtsNSchedMagicRefTableEntry));
+ }
+ else {
+ tep = erts_alloc(ERTS_ALC_T_MREF_ENT,
+ sizeof(ErtsMagicRefTableEntry));
+ tep->thr_id = tmpl.thr_id;
+ }
+
+ tep->value = tmpl.value;
+ tep->mb = mrtp->mb;
+
+ erts_rwmtx_rwlock(&tblp->rwmtx);
+
+ used_tep = hash_put(&tblp->hash, tep);
+
+ erts_rwmtx_rwunlock(&tblp->rwmtx);
+
+ if (used_tep != tep) {
+ if (tblp != &magic_ref_table[0].u.table)
+ erts_free(ERTS_ALC_T_MREF_NSCHED_ENT, (void *) tep);
+ else
+ erts_free(ERTS_ALC_T_MREF_ENT, (void *) tep);
+ }
+ }
+}
+
+void
+erts_magic_ref_remove_bin(Uint32 refn[ERTS_REF_NUMBERS])
+{
+ ErtsMagicRefTableEntry tmpl;
+ ErtsMagicRefTableEntry *tep;
+ ErtsMagicRefTable *tblp;
+
+ tmpl.value = erts_get_ref_numbers_value(refn);
+ tmpl.thr_id = erts_get_ref_numbers_thr_id(refn);
+
+ if (tmpl.thr_id > erts_no_schedulers)
+ tblp = &magic_ref_table[0].u.table;
+ else
+ tblp = &magic_ref_table[tmpl.thr_id].u.table;
+
+ erts_rwmtx_rlock(&tblp->rwmtx);
+
+ tep = (ErtsMagicRefTableEntry *) hash_get(&tblp->hash, &tmpl);
+
+ erts_rwmtx_runlock(&tblp->rwmtx);
+
+ if (tep) {
+
+ ASSERT(tmpl.value == erts_get_ref_numbers_value(refn));
+ ASSERT(tmpl.thr_id == erts_get_ref_numbers_thr_id(refn));
+
+ erts_rwmtx_rwlock(&tblp->rwmtx);
+
+ tep = hash_remove(&tblp->hash, &tmpl);
+ ASSERT(tep);
+
+ erts_rwmtx_rwunlock(&tblp->rwmtx);
+
+ if (tblp != &magic_ref_table[0].u.table)
+ erts_free(ERTS_ALC_T_MREF_NSCHED_ENT, (void *) tep);
+ else
+ erts_free(ERTS_ALC_T_MREF_ENT, (void *) tep);
+ }
+}
+
+static int nsched_mreft_cmp(void *ve1, void *ve2)
+{
+ ErtsNSchedMagicRefTableEntry *e1 = ve1;
+ ErtsNSchedMagicRefTableEntry *e2 = ve2;
+ return e1->value != e2->value;
+}
+
+static int non_nsched_mreft_cmp(void *ve1, void *ve2)
+{
+ ErtsMagicRefTableEntry *e1 = ve1;
+ ErtsMagicRefTableEntry *e2 = ve2;
+ return e1->value != e2->value || e1->thr_id != e2->thr_id;
+}
+
+static HashValue nsched_mreft_hash(void *ve)
+{
+ ErtsNSchedMagicRefTableEntry *e = ve;
+ return (HashValue) e->value;
+}
+
+static HashValue non_nsched_mreft_hash(void *ve)
+{
+ ErtsMagicRefTableEntry *e = ve;
+ HashValue h;
+ h = (HashValue) e->thr_id;
+ h *= 268440163;
+ h += (HashValue) e->value;
+ return h;
+}
+
+static void *mreft_alloc(void *ve)
+{
+ /*
+ * We allocate the element before
+ * hash_put() and pass it as
+ * template which we get as
+ * input...
+ */
+ return ve;
+}
+
+static void mreft_free(void *ve)
+{
+ /*
+ * We free the element ourselves
+ * after hash_remove()...
+ */
+}
+
+static void *mreft_meta_alloc(int i, size_t size)
+{
+ return erts_alloc(ERTS_ALC_T_MREF_TAB_BKTS, size);
+}
+
+static void mreft_meta_free(int i, void *ptr)
+{
+ erts_free(ERTS_ALC_T_MREF_TAB_BKTS, ptr);
+}
+
+static void
+init_magic_ref_tables(void)
+{
+ HashFunctions hash_funcs;
+ int i;
+ ErtsMagicRefTable *tblp;
+
+ magic_ref_table = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_MREF_TAB,
+ (sizeof(ErtsAlignedMagicRefTable)
+ * (erts_no_schedulers + 1)));
+
+ hash_funcs.hash = non_nsched_mreft_hash;
+ hash_funcs.cmp = non_nsched_mreft_cmp;
+
+ hash_funcs.alloc = mreft_alloc;
+ hash_funcs.free = mreft_free;
+ hash_funcs.meta_alloc = mreft_meta_alloc;
+ hash_funcs.meta_free = mreft_meta_free;
+ hash_funcs.meta_print = erts_print;
+
+ tblp = &magic_ref_table[0].u.table;
+ erts_snprintf(&tblp->name[0], sizeof(tblp->name),
+ "magic_ref_table_0");
+ hash_init(0, &tblp->hash, &tblp->name[0], 1, hash_funcs);
+ erts_rwmtx_init(&tblp->rwmtx, "magic_ref_table", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+
+ hash_funcs.hash = nsched_mreft_hash;
+ hash_funcs.cmp = nsched_mreft_cmp;
+
+ for (i = 1; i <= erts_no_schedulers; i++) {
+ ErtsMagicRefTable *tblp = &magic_ref_table[i].u.table;
+ erts_snprintf(&tblp->name[0], sizeof(tblp->name),
+ "magic_ref_table_%d", i);
+ hash_init(0, &tblp->hash, &tblp->name[0], 1, hash_funcs);
+ erts_rwmtx_init(&tblp->rwmtx, "magic_ref_table", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+ }
+}
+
+void erts_ref_bin_free(ErtsMagicBinary *mb)
+{
+ erts_bin_free((Binary *) mb);
+}
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Unique Integer *
+\* */
+
+static struct {
+ union {
+ struct {
+ int left_shift;
+ int right_shift;
+ Uint64 mask;
+ Uint64 val0_max;
+ } o;
+ char align__[ERTS_CACHE_LINE_SIZE];
+ } r;
+ union {
+ erts_atomic64_t val1;
+ char align__[ERTS_CACHE_LINE_SIZE];
+ } w;
+} unique_data erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+
+static void
+init_unique_integer(void)
+{
+ int bits;
+ unique_data.r.o.val0_max = (Uint64) erts_no_schedulers;
+ unique_data.r.o.val0_max += (Uint64) erts_no_dirty_cpu_schedulers;
+ unique_data.r.o.val0_max += (Uint64) erts_no_dirty_io_schedulers;
+ bits = erts_fit_in_bits_int64(unique_data.r.o.val0_max);
+ unique_data.r.o.left_shift = bits;
+ unique_data.r.o.right_shift = 64 - bits;
+ unique_data.r.o.mask = (((Uint64) 1) << bits) - 1;
+ erts_atomic64_init_nob(&unique_data.w.val1, -1);
+}
+
+#define ERTS_MAX_UNIQUE_INT_HEAP_SIZE ERTS_UINT64_ARRAY_TO_BIG_MAX_HEAP_SZ(2)
+
+static ERTS_INLINE Eterm
+bld_unique_integer_term(Eterm **hpp, Uint *szp,
+ Uint64 val0, Uint64 val1,
+ int positive)
+{
+ Uint hsz;
+ Uint64 unique_val[2];
+
+ unique_val[0] = ((Uint64) val0);
+ unique_val[0] |= ((Uint64) val1) << unique_data.r.o.left_shift;
+ unique_val[1] = ((Uint64) val1) >> unique_data.r.o.right_shift;
+ unique_val[1] &= unique_data.r.o.mask;
+
+ if (positive) {
+ unique_val[0]++;
+ if (unique_val[0] == 0)
+ unique_val[1]++;
+ }
+ else {
+ ASSERT(MIN_SMALL < 0);
+ if (unique_val[1] == 0
+ && unique_val[0] < ((Uint64) -1*((Sint64) MIN_SMALL))) {
+ Sint64 s_unique_val = (Sint64) unique_val[0];
+ s_unique_val += MIN_SMALL;
+ ASSERT(MIN_SMALL <= s_unique_val && s_unique_val < 0);
+ if (szp)
+ *szp = 0;
+ if (!hpp)
+ return THE_NON_VALUE;
+ return make_small((Sint) s_unique_val);
+ }
+ if (unique_val[0] < ((Uint64) -1*((Sint64) MIN_SMALL))) {
+ ASSERT(unique_val[1] != 0);
+ unique_val[1] -= 1;
+ }
+ unique_val[0] += MIN_SMALL;
+ }
+
+ if (!unique_val[1]) {
+ if (unique_val[0] <= MAX_SMALL) {
+ if (szp)
+ *szp = 0;
+ if (!hpp)
+ return THE_NON_VALUE;
+ return make_small((Uint) unique_val[0]);
+ }
+
+ if (szp)
+ *szp = ERTS_UINT64_HEAP_SIZE(unique_val[0]);
+ if (!hpp)
+ return THE_NON_VALUE;
+ return erts_uint64_to_big(unique_val[0], hpp);
+ }
+ else {
+ Eterm tmp, *tmp_hp, res;
+ DeclareTmpHeapNoproc(local_heap, 2*ERTS_MAX_UNIQUE_INT_HEAP_SIZE);
+
+ UseTmpHeapNoproc(2*ERTS_MAX_UNIQUE_INT_HEAP_SIZE);
+
+ tmp_hp = local_heap;
+
+ tmp = erts_uint64_array_to_big(&tmp_hp, 0, 2, unique_val);
+ ASSERT(is_big(tmp));
+
+ hsz = big_arity(tmp) + 1;
+
+ ASSERT(hsz <= ERTS_MAX_UNIQUE_INT_HEAP_SIZE);
+
+ if (szp)
+ *szp = hsz;
+
+ if (!hpp)
+ res = THE_NON_VALUE;
+ else {
+ int hix;
+ Eterm *hp = *hpp;
+ tmp_hp = big_val(tmp);
+ for (hix = 0; hix < hsz; hix++)
+ hp[hix] = tmp_hp[hix];
+
+ *hpp = hp + hsz;
+ res = make_big(hp);
+ }
+
+ UnUseTmpHeapNoproc(2*ERTS_MAX_UNIQUE_INT_HEAP_SIZE);
+
+ return res;
+ }
+}
+
+static ERTS_INLINE Eterm unique_integer_bif(Process *c_p, int positive)
+{
+ ErtsSchedulerData *esdp;
+ Uint64 thr_id, unique;
+ Uint hsz;
+ Eterm *hp;
+
+ esdp = erts_proc_sched_data(c_p);
+ thr_id = (Uint64) esdp->thr_id;
+ unique = esdp->unique++;
+ bld_unique_integer_term(NULL, &hsz, thr_id, unique, positive);
+ hp = hsz ? HAlloc(c_p, hsz) : NULL;
+ return bld_unique_integer_term(&hp, NULL, thr_id, unique, positive);
+}
+
+Uint
+erts_raw_unique_integer_heap_size(Uint64 val[ERTS_UNIQUE_INT_RAW_VALUES],
+ int positive)
+{
+ Uint sz;
+ bld_unique_integer_term(NULL, &sz, val[0], val[1], positive);
+ return sz;
+}
+
+Eterm
+erts_raw_make_unique_integer(Eterm **hpp, Uint64 val[ERTS_UNIQUE_INT_RAW_VALUES],
+ int positive)
+{
+ return bld_unique_integer_term(hpp, NULL, val[0], val[1], positive);
+}
+
+void
+erts_raw_get_unique_integer(Uint64 val[ERTS_UNIQUE_INT_RAW_VALUES])
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ if (esdp) {
+ val[0] = (Uint64) esdp->thr_id;
+ val[1] = esdp->unique++;
+ }
+ else {
+ val[0] = (Uint64) 0;
+ val[1] = (Uint64) erts_atomic64_inc_read_nob(&unique_data.w.val1);
+ }
+}
+
+
+Sint64
+erts_get_min_unique_integer(void)
+{
+ return (Sint64) MIN_SMALL;
+}
+
+/* --- Debug --- */
+
+Eterm
+erts_debug_make_unique_integer(Process *c_p, Eterm etval0, Eterm etval1)
+{
+ Uint64 val0, val1;
+ Uint hsz;
+ Eterm res, *hp, *end_hp;
+
+ if (!term_to_Uint64(etval0, &val0))
+ return THE_NON_VALUE;
+
+ if (!term_to_Uint64(etval1, &val1))
+ return THE_NON_VALUE;
+
+ bld_unique_integer_term(NULL, &hsz, val0, val1, 0);
+
+ hp = HAlloc(c_p, hsz);
+ end_hp = hp + hsz;
+
+ res = bld_unique_integer_term(&hp, NULL, val0, val1, 0);
+ if (hp != end_hp)
+ ERTS_INTERNAL_ERROR("Heap allocation error");
+
+ return res;
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Strict Monotonic Counter *
+\* */
+
+static struct {
+ union {
+ erts_atomic64_t value;
+ char align__[ERTS_CACHE_LINE_SIZE];
+ } w;
+} raw_unique_monotonic_integer erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+
+#if defined(ARCH_32)
+# define ERTS_UNIQUE_MONOTONIC_OFFSET ERTS_SINT64_MIN
+#else
+# define ERTS_UNIQUE_MONOTONIC_OFFSET MIN_SMALL
+#endif
+
+static void
+init_unique_monotonic_integer(void)
+{
+ erts_atomic64_init_nob(&raw_unique_monotonic_integer.w.value,
+ (erts_aint64_t) -1);
+}
+
+static ERTS_INLINE Uint64
+get_raw_unique_monotonic_integer(void)
+{
+ return (Uint64) erts_atomic64_inc_read_mb(&raw_unique_monotonic_integer.w.value);
+}
+
+static ERTS_INLINE Uint
+get_unique_monotonic_integer_heap_size(Uint64 raw, int positive)
+{
+ if (positive) {
+ Uint64 value = raw+1;
+ return ERTS_UINT64_HEAP_SIZE(value);
+ }
+ else {
+ Sint64 value = ((Sint64) raw) + ERTS_UNIQUE_MONOTONIC_OFFSET;
+ if (IS_SSMALL(value))
+ return 0;
+#if defined(ARCH_32)
+ return ERTS_SINT64_HEAP_SIZE(value);
+#else
+ return ERTS_UINT64_HEAP_SIZE((Uint64) value);
+#endif
+ }
+}
+
+static ERTS_INLINE Eterm
+make_unique_monotonic_integer_value(Eterm *hp, Uint hsz, Uint64 raw, int positive)
+{
+ Eterm res;
+#ifdef DEBUG
+ Eterm *end_hp = hp + hsz;
+#endif
+
+ if (positive) {
+ Uint64 value = raw+1;
+ res = hsz ? erts_uint64_to_big(value, &hp) : make_small(value);
+ }
+ else {
+ Sint64 value = ((Sint64) raw) + ERTS_UNIQUE_MONOTONIC_OFFSET;
+ if (hsz == 0)
+ res = make_small(value);
+ else {
+#if defined(ARCH_32)
+ res = erts_sint64_to_big(value, &hp);
+#else
+ res = erts_uint64_to_big((Uint64) value, &hp);
+#endif
+ }
+ }
+
+ ASSERT(end_hp == hp);
+
+ return res;
+}
+
+static ERTS_INLINE Eterm
+unique_monotonic_integer_bif(Process *c_p, int positive)
+{
+ Uint64 raw;
+ Uint hsz;
+ Eterm *hp;
+
+ raw = get_raw_unique_monotonic_integer();
+ hsz = get_unique_monotonic_integer_heap_size(raw, positive);
+ hp = hsz ? HAlloc(c_p, hsz) : NULL;
+ return make_unique_monotonic_integer_value(hp, hsz, raw, positive);
+}
+
+Sint64
+erts_raw_get_unique_monotonic_integer(void)
+{
+ return get_raw_unique_monotonic_integer();
+}
+
+Uint
+erts_raw_unique_monotonic_integer_heap_size(Sint64 raw, int positive)
+{
+ return get_unique_monotonic_integer_heap_size(raw, positive);
+}
+
+Eterm
+erts_raw_make_unique_monotonic_integer_value(Eterm **hpp, Sint64 raw, int positive)
+{
+ Uint hsz = get_unique_monotonic_integer_heap_size(raw, positive);
+ Eterm res = make_unique_monotonic_integer_value(*hpp, hsz, raw, positive);
+ *hpp += hsz;
+ return res;
+}
+
+Sint64
+erts_get_min_unique_monotonic_integer(void)
+{
+ return ERTS_UNIQUE_MONOTONIC_OFFSET;
+}
+
+/* --- Debug --- */
+
+int
+erts_debug_set_unique_monotonic_integer_state(Eterm et_value)
+{
+ Sint64 value;
+
+ if (!term_to_Sint64(et_value, &value)) {
+ Uint64 uvalue;
+ if (!term_to_Uint64(et_value, &uvalue))
+ return 0;
+ value = (Sint64) uvalue;
+ }
+
+ erts_atomic64_set_mb(&raw_unique_monotonic_integer.w.value,
+ (erts_aint64_t) value);
+ return 1;
+}
+
+Eterm
+erts_debug_get_unique_monotonic_integer_state(Process *c_p)
+{
+ Uint64 value;
+ Eterm hsz, *hp;
+
+ value = (Uint64) erts_atomic64_read_mb(&raw_unique_monotonic_integer.w.value);
+
+ if (IS_USMALL(0, value))
+ return make_small(value);
+ hsz = ERTS_UINT64_HEAP_SIZE(value);
+ hp = HAlloc(c_p, hsz);
+ return erts_uint64_to_big(value, &hp);
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Initilazation *
+\* */
+
+void
+erts_bif_unique_init(void)
+{
+ init_reference();
+ init_unique_monotonic_integer();
+ init_unique_integer();
+}
+
+void
+erts_sched_bif_unique_init(ErtsSchedulerData *esdp)
+{
+ esdp->unique = (Uint64) 0;
+ esdp->ref = (Uint64) ref_init_value;
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * The BIFs *
+\* */
+
+
+BIF_RETTYPE make_ref_0(BIF_ALIST_0)
+{
+ BIF_RETTYPE res;
+ Eterm* hp;
+
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P));
+
+ hp = HAlloc(BIF_P, ERTS_REF_THING_SIZE);
+
+ res = erts_sched_make_ref_in_buffer(erts_proc_sched_data(BIF_P), hp);
+
+ BIF_RET(res);
+}
+
+BIF_RETTYPE unique_integer_0(BIF_ALIST_0)
+{
+ BIF_RET(unique_integer_bif(BIF_P, 0));
+}
+
+BIF_RETTYPE unique_integer_1(BIF_ALIST_1)
+{
+ Eterm modlist = BIF_ARG_1;
+ int monotonic = 0;
+ int positive = 0;
+ BIF_RETTYPE res;
+
+ while (is_list(modlist)) {
+ Eterm *consp = list_val(modlist);
+ switch (CAR(consp)) {
+ case am_monotonic:
+ monotonic = 1;
+ break;
+ case am_positive:
+ positive = 1;
+ break;
+ default:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ modlist = CDR(consp);
+ }
+
+ if (is_not_nil(modlist))
+ BIF_ERROR(BIF_P, BADARG);
+
+ if (monotonic)
+ res = unique_monotonic_integer_bif(BIF_P, positive);
+ else
+ res = unique_integer_bif(BIF_P, positive);
+
+ BIF_RET(res);
+}
diff --git a/erts/emulator/beam/erl_bif_unique.h b/erts/emulator/beam/erl_bif_unique.h
new file mode 100644
index 0000000000..9aa631fde9
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_unique.h
@@ -0,0 +1,440 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2014-2017. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERTS_BIF_UNIQUE_H__
+#define ERTS_BIF_UNIQUE_H__
+
+#include "erl_term.h"
+#include "erl_process.h"
+#include "big.h"
+#define ERTS_BINARY_TYPES_ONLY__
+#include "erl_binary.h"
+#undef ERTS_BINARY_TYPES_ONLY__
+
+void erts_bif_unique_init(void);
+void erts_sched_bif_unique_init(ErtsSchedulerData *esdp);
+
+/* reference */
+Eterm erts_make_ref(Process *);
+Eterm erts_make_ref_in_buffer(Eterm buffer[ERTS_REF_THING_SIZE]);
+void erts_make_ref_in_array(Uint32 ref[ERTS_REF_NUMBERS]);
+void erts_make_magic_ref_in_array(Uint32 ref[ERTS_REF_NUMBERS]);
+void erts_magic_ref_remove_bin(Uint32 refn[ERTS_REF_NUMBERS]);
+void erts_magic_ref_save_bin__(Eterm ref);
+ErtsMagicBinary *erts_magic_ref_lookup_bin__(Uint32 refn[ERTS_REF_NUMBERS]);
+
+
+/* strict monotonic counter */
+
+#define ERTS_MAX_UNIQUE_MONOTONIC_INTEGER_HEAP_SIZE ERTS_MAX_UINT64_HEAP_SIZE
+
+/*
+ * Note that a raw value is an intermediate value that
+ * not necessarily correspond to the end result.
+ */
+Sint64 erts_raw_get_unique_monotonic_integer(void);
+Uint erts_raw_unique_monotonic_integer_heap_size(Sint64 raw, int positive);
+Eterm erts_raw_make_unique_monotonic_integer_value(Eterm **hpp, Sint64 raw,
+ int positive);
+
+Sint64 erts_get_min_unique_monotonic_integer(void);
+
+int erts_debug_set_unique_monotonic_integer_state(Eterm et_value);
+Eterm erts_debug_get_unique_monotonic_integer_state(Process *c_p);
+
+/* unique integer */
+#define ERTS_UNIQUE_INT_RAW_VALUES 2
+#define ERTS_MAX_UNIQUE_INT_HEAP_SIZE ERTS_UINT64_ARRAY_TO_BIG_MAX_HEAP_SZ(2)
+
+Uint erts_raw_unique_integer_heap_size(Uint64 val[ERTS_UNIQUE_INT_RAW_VALUES],
+ int positive);
+Eterm erts_raw_make_unique_integer(Eterm **hpp,
+ Uint64 val[ERTS_UNIQUE_INT_RAW_VALUES],
+ int postive);
+void erts_raw_get_unique_integer(Uint64 val[ERTS_UNIQUE_INT_RAW_VALUES]);
+Sint64 erts_get_min_unique_integer(void);
+
+Eterm erts_debug_make_unique_integer(Process *c_p,
+ Eterm etval0,
+ Eterm etval1);
+
+
+ERTS_GLB_INLINE void erts_set_ref_numbers(Uint32 ref[ERTS_REF_NUMBERS],
+ Uint32 thr_id, Uint64 value);
+ERTS_GLB_INLINE Uint32 erts_get_ref_numbers_thr_id(Uint32 ref[ERTS_REF_NUMBERS]);
+ERTS_GLB_INLINE int erts_is_ref_numbers_magic(Uint32 ref[ERTS_REF_NUMBERS]);
+ERTS_GLB_INLINE Uint64 erts_get_ref_numbers_value(Uint32 ref[ERTS_REF_NUMBERS]);
+ERTS_GLB_INLINE void erts_sched_make_ref_in_array(ErtsSchedulerData *esdp,
+ Uint32 ref[ERTS_REF_NUMBERS]);
+ERTS_GLB_INLINE void erts_sched_make_magic_ref_in_array(ErtsSchedulerData *esdp,
+ Uint32 ref[ERTS_REF_NUMBERS]);
+ERTS_GLB_INLINE Eterm erts_sched_make_ref_in_buffer(ErtsSchedulerData *esdp,
+ Eterm buffer[ERTS_REF_THING_SIZE]);
+ERTS_GLB_INLINE Eterm erts_mk_magic_ref(Eterm **hpp, ErlOffHeap *ohp, Binary *mbp);
+ERTS_GLB_INLINE Binary *erts_magic_ref2bin(Eterm mref);
+ERTS_GLB_INLINE void erts_magic_ref_save_bin(Eterm ref);
+ERTS_GLB_INLINE ErtsMagicBinary *erts_magic_ref_lookup_bin(Uint32 ref[ERTS_REF_NUMBERS]);
+
+#define ERTS_REF1_MAGIC_MARKER_BIT_NO__ \
+ (_REF_NUM_SIZE-1)
+#define ERTS_REF1_MAGIC_MARKER_BIT__ \
+ (((Uint32) 1) << ERTS_REF1_MAGIC_MARKER_BIT_NO__)
+#define ERTS_REF1_THR_ID_MASK__ \
+ (ERTS_REF1_MAGIC_MARKER_BIT__-1)
+#define ERTS_REF1_NUM_MASK__ \
+ (~(ERTS_REF1_THR_ID_MASK__|ERTS_REF1_MAGIC_MARKER_BIT__))
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_set_ref_numbers(Uint32 ref[ERTS_REF_NUMBERS], Uint32 thr_id, Uint64 value)
+{
+ /*
+ * We cannot use thread id in the first 18-bit word since
+ * the hash/phash/phash2 BIFs only hash on this word. If
+ * we did, we would get really poor hash values. Instead
+ * we have to shuffle the bits a bit.
+ */
+ ASSERT(thr_id == (thr_id & ((Uint32) ERTS_REF1_THR_ID_MASK__)));
+ ref[0] = (Uint32) (value & ((Uint64) REF_MASK));
+ ref[1] = (((Uint32) (value & ((Uint64) ERTS_REF1_NUM_MASK__)))
+ | (thr_id & ((Uint32) ERTS_REF1_THR_ID_MASK__)));
+ ref[2] = (Uint32) ((value >> 32) & ((Uint64) 0xffffffff));
+}
+
+ERTS_GLB_INLINE Uint32
+erts_get_ref_numbers_thr_id(Uint32 ref[ERTS_REF_NUMBERS])
+{
+ return ref[1] & ((Uint32) ERTS_REF1_THR_ID_MASK__);
+}
+
+ERTS_GLB_INLINE int
+erts_is_ref_numbers_magic(Uint32 ref[ERTS_REF_NUMBERS])
+{
+ return !!(ref[1] & ERTS_REF1_MAGIC_MARKER_BIT__);
+}
+
+ERTS_GLB_INLINE Uint64
+erts_get_ref_numbers_value(Uint32 ref[ERTS_REF_NUMBERS])
+{
+ ERTS_CT_ASSERT((ERTS_REF1_NUM_MASK__ | REF_MASK) == 0xffffffff);
+ ERTS_CT_ASSERT((ERTS_REF1_NUM_MASK__ & REF_MASK) == 0);
+
+ return (((((Uint64) ref[2]) & ((Uint64) 0xffffffff)) << 32)
+ | (((Uint64) ref[1]) & ((Uint64) ERTS_REF1_NUM_MASK__))
+ | (((Uint64) ref[0]) & ((Uint64) REF_MASK)));
+}
+
+ERTS_GLB_INLINE void
+erts_sched_make_ref_in_array(ErtsSchedulerData *esdp,
+ Uint32 ref[ERTS_REF_NUMBERS])
+{
+ Uint64 value;
+
+ ASSERT(esdp);
+ value = esdp->ref++;
+ erts_set_ref_numbers(ref, (Uint32) esdp->thr_id, value);
+}
+
+ERTS_GLB_INLINE void
+erts_sched_make_magic_ref_in_array(ErtsSchedulerData *esdp,
+ Uint32 ref[ERTS_REF_NUMBERS])
+{
+ erts_sched_make_ref_in_array(esdp, ref);
+ ASSERT(!(ref[1] & ERTS_REF1_MAGIC_MARKER_BIT__));
+ ref[1] |= ERTS_REF1_MAGIC_MARKER_BIT__;
+}
+
+ERTS_GLB_INLINE Eterm
+erts_sched_make_ref_in_buffer(ErtsSchedulerData *esdp,
+ Eterm buffer[ERTS_REF_THING_SIZE])
+{
+ Eterm* hp = buffer;
+ Uint32 ref[ERTS_REF_NUMBERS];
+
+ erts_sched_make_ref_in_array(esdp, ref);
+ write_ref_thing(hp, ref[0], ref[1], ref[2]);
+ return make_internal_ref(hp);
+}
+
+ERTS_GLB_INLINE Eterm
+erts_mk_magic_ref(Eterm **hpp, ErlOffHeap *ohp, Binary *bp)
+{
+ Eterm *hp = *hpp;
+ ASSERT(bp->intern.flags & BIN_FLAG_MAGIC);
+ write_magic_ref_thing(hp, ohp, (ErtsMagicBinary *) bp);
+ *hpp += ERTS_MAGIC_REF_THING_SIZE;
+ erts_refc_inc(&bp->intern.refc, 1);
+ OH_OVERHEAD(ohp, bp->orig_size / sizeof(Eterm));
+ return make_internal_ref(hp);
+}
+
+ERTS_GLB_INLINE Binary *
+erts_magic_ref2bin(Eterm mref)
+{
+ ErtsMRefThing *mrtp;
+ ASSERT(is_internal_magic_ref(mref));
+ mrtp = (ErtsMRefThing *) internal_ref_val(mref);
+ return (Binary *) mrtp->mb;
+}
+
+/*
+ * Save the magic binary of a ref when the
+ * ref is exposed to the outside world...
+ */
+ERTS_GLB_INLINE void
+erts_magic_ref_save_bin(Eterm ref)
+{
+ if (is_internal_magic_ref(ref))
+ erts_magic_ref_save_bin__(ref);
+}
+
+/*
+ * Look up the magic binary of a magic ref
+ * when the ref comes from the outside world...
+ */
+ERTS_GLB_INLINE ErtsMagicBinary *
+erts_magic_ref_lookup_bin(Uint32 ref[ERTS_REF_NUMBERS])
+{
+ if (!erts_is_ref_numbers_magic(ref))
+ return NULL;
+ return erts_magic_ref_lookup_bin__(ref);
+}
+
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+
+/*
+ * Storage of internal refs in misc structures...
+ */
+
+#include "erl_message.h"
+
+#if ERTS_REF_NUMBERS != 3
+# error fix this...
+#endif
+
+ERTS_GLB_INLINE int erts_internal_ref_number_cmp(Uint32 num1[ERTS_REF_NUMBERS],
+ Uint32 num2[ERTS_REF_NUMBERS]);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE int
+erts_internal_ref_number_cmp(Uint32 num1[ERTS_REF_NUMBERS],
+ Uint32 num2[ERTS_REF_NUMBERS])
+{
+ if (num1[2] != num2[2])
+ return (int) ((Sint64) num1[2] - (Sint64) num2[2]);
+ if (num1[1] != num2[1])
+ return (int) ((Sint64) num1[1] - (Sint64) num2[1]);
+ if (num1[0] != num2[0])
+ return (int) ((Sint64) num1[0] - (Sint64) num2[0]);
+ return 0;
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+/* Iref storage for all internal references... */
+typedef struct {
+ Uint32 is_magic;
+ union {
+ ErtsMagicBinary *mb;
+ Uint32 num[ERTS_REF_NUMBERS];
+ } u;
+} ErtsIRefStorage;
+
+void erts_ref_bin_free(ErtsMagicBinary *mb);
+
+ERTS_GLB_INLINE void erts_iref_storage_save(ErtsIRefStorage *iref, Eterm ref);
+ERTS_GLB_INLINE void erts_iref_storage_clean(ErtsIRefStorage *iref);
+ERTS_GLB_INLINE Uint erts_iref_storage_heap_size(ErtsIRefStorage *iref);
+ERTS_GLB_INLINE Eterm erts_iref_storage_make_ref(ErtsIRefStorage *iref,
+ Eterm **hpp, ErlOffHeap *ohp,
+ int clean_storage);
+ERTS_GLB_INLINE int erts_iref_storage_cmp(ErtsIRefStorage *iref1,
+ ErtsIRefStorage *iref2);
+
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_iref_storage_save(ErtsIRefStorage *iref, Eterm ref)
+{
+ Eterm *hp;
+
+ ERTS_CT_ASSERT(ERTS_REF_NUMBERS == 3);
+ ASSERT(is_internal_ref(ref));
+
+ hp = boxed_val(ref);
+
+ if (is_ordinary_ref_thing(hp)) {
+ ErtsORefThing *rtp = (ErtsORefThing *) hp;
+ iref->is_magic = 0;
+ iref->u.num[0] = rtp->num[0];
+ iref->u.num[1] = rtp->num[1];
+ iref->u.num[2] = rtp->num[2];
+ }
+ else {
+ ErtsMRefThing *mrtp = (ErtsMRefThing *) hp;
+ ASSERT(is_magic_ref_thing(hp));
+ iref->is_magic = 1;
+ iref->u.mb = mrtp->mb;
+ erts_refc_inc(&mrtp->mb->intern.refc, 1);
+ }
+}
+
+ERTS_GLB_INLINE void
+erts_iref_storage_clean(ErtsIRefStorage *iref)
+{
+ if (iref->is_magic && erts_refc_dectest(&iref->u.mb->intern.refc, 0) == 0)
+ erts_ref_bin_free(iref->u.mb);
+#ifdef DEBUG
+ memset((void *) iref, 0xf, sizeof(ErtsIRefStorage));
+#endif
+}
+
+ERTS_GLB_INLINE Uint
+erts_iref_storage_heap_size(ErtsIRefStorage *iref)
+{
+ return iref->is_magic ? ERTS_MAGIC_REF_THING_SIZE : ERTS_REF_THING_SIZE;
+}
+
+ERTS_GLB_INLINE Eterm
+erts_iref_storage_make_ref(ErtsIRefStorage *iref,
+ Eterm **hpp, ErlOffHeap *ohp,
+ int clean_storage)
+{
+ Eterm *hp = *hpp;
+ if (!iref->is_magic) {
+ write_ref_thing(hp, iref->u.num[0], iref->u.num[1],
+ iref->u.num[2]);
+ *hpp += ERTS_REF_THING_SIZE;
+ }
+ else {
+ write_magic_ref_thing(hp, ohp, iref->u.mb);
+ OH_OVERHEAD(ohp, iref->u.mb->orig_size / sizeof(Eterm));
+ *hpp += ERTS_MAGIC_REF_THING_SIZE;
+ /*
+ * If we clean storage, the term inherits the
+ * refc increment of the cleaned storage...
+ */
+ if (!clean_storage)
+ erts_refc_inc(&iref->u.mb->intern.refc, 1);
+ }
+
+#ifdef DEBUG
+ if (clean_storage)
+ memset((void *) iref, 0xf, sizeof(ErtsIRefStorage));
+#endif
+
+ return make_internal_ref(hp);
+}
+
+ERTS_GLB_INLINE int
+erts_iref_storage_cmp(ErtsIRefStorage *iref1,
+ ErtsIRefStorage *iref2)
+{
+ Uint32 *num1 = iref1->is_magic ? iref1->u.mb->refn : iref1->u.num;
+ Uint32 *num2 = iref2->is_magic ? iref2->u.mb->refn : iref2->u.num;
+ return erts_internal_ref_number_cmp(num1, num2);
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+
+/* OIref storage for ordinary internal references only... */
+typedef struct {
+ Uint32 num[ERTS_REF_NUMBERS];
+} ErtsOIRefStorage;
+
+ERTS_GLB_INLINE void erts_oiref_storage_save(ErtsOIRefStorage *oiref,
+ Eterm ref);
+ERTS_GLB_INLINE Eterm erts_oiref_storage_make_ref(ErtsOIRefStorage *oiref,
+ Eterm **hpp);
+ERTS_GLB_INLINE int erts_oiref_storage_cmp(ErtsOIRefStorage *oiref1,
+ ErtsOIRefStorage *oiref2);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_oiref_storage_save(ErtsOIRefStorage *oiref, Eterm ref)
+{
+ ErtsORefThing *rtp;
+ ERTS_CT_ASSERT(ERTS_REF_NUMBERS == 3);
+ ASSERT(is_internal_ordinary_ref(ref));
+
+ rtp = (ErtsORefThing *) internal_ref_val(ref);
+
+ oiref->num[0] = rtp->num[0];
+ oiref->num[1] = rtp->num[1];
+ oiref->num[2] = rtp->num[2];
+}
+
+ERTS_GLB_INLINE Eterm
+erts_oiref_storage_make_ref(ErtsOIRefStorage *oiref, Eterm **hpp)
+{
+ Eterm *hp = *hpp;
+ ERTS_CT_ASSERT(ERTS_REF_NUMBERS == 3);
+ write_ref_thing(hp, oiref->num[0], oiref->num[1], oiref->num[2]);
+ *hpp += ERTS_REF_THING_SIZE;
+ return make_internal_ref(hp);
+}
+
+ERTS_GLB_INLINE int
+erts_oiref_storage_cmp(ErtsOIRefStorage *oiref1,
+ ErtsOIRefStorage *oiref2)
+{
+ return erts_internal_ref_number_cmp(oiref1->num, oiref2->num);
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+
+ERTS_GLB_INLINE Eterm
+erts_proc_store_ref(Process *c_p, Uint32 ref[ERTS_MAX_REF_NUMBERS]);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE Eterm
+erts_proc_store_ref(Process *c_p, Uint32 ref[ERTS_MAX_REF_NUMBERS])
+{
+ Eterm *hp = HAlloc(c_p, ERTS_REF_THING_SIZE);
+ write_ref_thing(hp, ref[0], ref[1], ref[2]);
+ return make_internal_ref(hp);
+}
+
+#endif
+
+#endif /* ERTS_BIF_UNIQUE_H__ */
+
+#if (defined(ERTS_ALLOC_C__) || defined(ERL_BIF_UNIQUE_C__)) \
+ && !defined(ERTS_BIF_UNIQUE_H__FIX_ALLOC_TYPES__)
+#define ERTS_BIF_UNIQUE_H__FIX_ALLOC_TYPES__
+
+#include "hash.h"
+
+typedef struct {
+ HashBucket hash;
+ ErtsMagicBinary *mb;
+ Uint64 value;
+} ErtsNSchedMagicRefTableEntry;
+
+#endif
diff --git a/erts/emulator/beam/erl_binary.h b/erts/emulator/beam/erl_binary.h
index 819b19e566..05007e864e 100644
--- a/erts/emulator/beam/erl_binary.h
+++ b/erts/emulator/beam/erl_binary.h
@@ -1,50 +1,170 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
-#ifndef __ERL_BINARY_H
-#define __ERL_BINARY_H
+#ifndef ERL_BINARY_H__TYPES__
+#define ERL_BINARY_H__TYPES__
-#include "erl_threads.h"
-#include "bif.h"
+/*
+** Just like the driver binary but with initial flags
+** Note that the two structures Binary and ErlDrvBinary HAVE to
+** be equal except for extra fields in the beginning of the struct.
+** ErlDrvBinary is defined in erl_driver.h.
+** When driver_alloc_binary is called, a Binary is allocated, but
+** the pointer returned is to the address of the first element that
+** also occurs in the ErlDrvBinary struct (driver.*binary takes care if this).
+** The driver need never know about additions to the internal Binary of the
+** emulator. One should however NEVER be sloppy when mixing ErlDrvBinary
+** and Binary, the macros below can convert one type to the other, as they both
+** in reality are equal.
+*/
+
+#ifdef ARCH_32
+ /* *DO NOT USE* only for alignment. */
+#define ERTS_BINARY_STRUCT_ALIGNMENT Uint32 align__;
+#else
+#define ERTS_BINARY_STRUCT_ALIGNMENT
+#endif
+
+/* Add fields in binary_internals, otherwise the drivers crash */
+struct binary_internals {
+ UWord flags;
+ erts_refc_t refc;
+ ERTS_BINARY_STRUCT_ALIGNMENT
+};
+
+
+typedef struct binary {
+ struct binary_internals intern;
+ SWord orig_size;
+ char orig_bytes[1]; /* to be continued */
+} Binary;
+
+#define ERTS_SIZEOF_Binary(Sz) \
+ (offsetof(Binary,orig_bytes) + (Sz))
+
+#if ERTS_REF_NUMBERS != 3
+#error "Update ErtsMagicBinary"
+#endif
+
+typedef struct magic_binary ErtsMagicBinary;
+struct magic_binary {
+ struct binary_internals intern;
+ SWord orig_size;
+ int (*destructor)(Binary *);
+ Uint32 refn[ERTS_REF_NUMBERS];
+ ErtsAlcType_t alloc_type;
+ union {
+ struct {
+ ERTS_BINARY_STRUCT_ALIGNMENT
+ char data[1];
+ } aligned;
+ struct {
+ char data[1];
+ } unaligned;
+ } u;
+};
+
+#define ERTS_MAGIC_BIN_BYTES_TO_ALIGN \
+ (offsetof(ErtsMagicBinary,u.aligned.data) - \
+ offsetof(ErtsMagicBinary,u.unaligned.data))
+
+typedef union {
+ Binary binary;
+ ErtsMagicBinary magic_binary;
+ struct {
+ struct binary_internals intern;
+ ErlDrvBinary binary;
+ } driver;
+} ErtsBinary;
/*
- * Maximum number of bytes to place in a heap binary.
+ * 'Binary' alignment:
+ * Address of orig_bytes[0] of a Binary should always be 8-byte aligned.
+ * It is assumed that the flags, refc, and orig_size fields are 4 bytes on
+ * 32-bits architectures and 8 bytes on 64-bits architectures.
*/
-#define ERL_ONHEAP_BIN_LIMIT 64
+#define ERTS_MAGIC_BIN_REFN(BP) \
+ ((ErtsBinary *) (BP))->magic_binary.refn
+#define ERTS_MAGIC_BIN_ATYPE(BP) \
+ ((ErtsBinary *) (BP))->magic_binary.alloc_type
+#define ERTS_MAGIC_DATA_OFFSET \
+ (offsetof(ErtsMagicBinary,u.aligned.data) - offsetof(Binary,orig_bytes))
+#define ERTS_MAGIC_BIN_DESTRUCTOR(BP) \
+ ((ErtsBinary *) (BP))->magic_binary.destructor
+#define ERTS_MAGIC_BIN_DATA(BP) \
+ ((void *) ((ErtsBinary *) (BP))->magic_binary.u.aligned.data)
+#define ERTS_MAGIC_BIN_DATA_SIZE(BP) \
+ ((BP)->orig_size - ERTS_MAGIC_DATA_OFFSET)
+#define ERTS_MAGIC_DATA_OFFSET \
+ (offsetof(ErtsMagicBinary,u.aligned.data) - offsetof(Binary,orig_bytes))
+#define ERTS_MAGIC_BIN_ORIG_SIZE(Sz) \
+ (ERTS_MAGIC_DATA_OFFSET + (Sz))
+#define ERTS_MAGIC_BIN_SIZE(Sz) \
+ (offsetof(ErtsMagicBinary,u.aligned.data) + (Sz))
+#define ERTS_MAGIC_BIN_FROM_DATA(DATA) \
+ ((ErtsBinary*)((char*)(DATA) - offsetof(ErtsMagicBinary,u.aligned.data)))
+
+/* On 32-bit arch these macro variants will save memory
+ by not forcing 8-byte alignment for the magic payload.
+*/
+#define ERTS_MAGIC_BIN_UNALIGNED_DATA(BP) \
+ ((void *) ((ErtsBinary *) (BP))->magic_binary.u.unaligned.data)
+#define ERTS_MAGIC_UNALIGNED_DATA_OFFSET \
+ (offsetof(ErtsMagicBinary,u.unaligned.data) - offsetof(Binary,orig_bytes))
+#define ERTS_MAGIC_BIN_UNALIGNED_DATA_SIZE(BP) \
+ ((BP)->orig_size - ERTS_MAGIC_UNALIGNED_DATA_OFFSET)
+#define ERTS_MAGIC_BIN_UNALIGNED_ORIG_SIZE(Sz) \
+ (ERTS_MAGIC_UNALIGNED_DATA_OFFSET + (Sz))
+#define ERTS_MAGIC_BIN_UNALIGNED_SIZE(Sz) \
+ (offsetof(ErtsMagicBinary,u.unaligned.data) + (Sz))
+#define ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(DATA) \
+ ((ErtsBinary*)((char*)(DATA) - offsetof(ErtsMagicBinary,u.unaligned.data)))
+
+
+#define Binary2ErlDrvBinary(B) (&((ErtsBinary *) (B))->driver.binary)
+#define ErlDrvBinary2Binary(D) ((Binary *) \
+ (((char *) (D)) \
+ - offsetof(ErtsBinary, driver.binary)))
+
+/* A "magic" binary flag */
+#define BIN_FLAG_MAGIC 1
+#define BIN_FLAG_USR1 2 /* Reserved for use by different modules too mark */
+#define BIN_FLAG_USR2 4 /* certain binaries as special (used by ets) */
+#define BIN_FLAG_DRV 8
+
+#endif /* ERL_BINARY_H__TYPES__ */
+
+#if !defined(ERL_BINARY_H__) && !defined(ERTS_BINARY_TYPES_ONLY__)
+#define ERL_BINARY_H__
+
+#include "erl_threads.h"
+#include "bif.h"
+#include "erl_bif_unique.h"
+#include "erl_bits.h"
/*
- * This structure represents a SUB_BINARY.
- *
- * Note: The last field (orig) is not counted in arityval in the header.
- * This simplifies garbage collection.
+ * Maximum number of bytes to place in a heap binary.
*/
-typedef struct erl_sub_bin {
- Eterm thing_word; /* Subtag SUB_BINARY_SUBTAG. */
- Uint size; /* Binary size in bytes. */
- Uint offs; /* Offset into original binary. */
- byte bitsize;
- byte bitoffs;
- byte is_writable; /* The underlying binary is writable */
- Eterm orig; /* Original binary (REFC or HEAP binary). */
-} ErlSubBin;
+#define ERL_ONHEAP_BIN_LIMIT 64
#define ERL_SUB_BIN_SIZE (sizeof(ErlSubBin)/sizeof(Eterm))
#define HEADER_SUB_BIN _make_header(ERL_SUB_BIN_SIZE-2,_TAG_HEADER_SUB_BIN)
@@ -71,7 +191,6 @@ typedef struct erl_heap_bin {
*/
#define binary_size(Bin) (binary_val(Bin)[1])
-#define binary_size_rel(Bin,BasePtr) (binary_val_rel(Bin,BasePtr)[1])
#define binary_bitsize(Bin) \
((*binary_val(Bin) == HEADER_SUB_BIN) ? \
@@ -94,12 +213,9 @@ typedef struct erl_heap_bin {
* Bitsize: output variable (Uint)
*/
-#define ERTS_GET_BINARY_BYTES(Bin,Bytep,Bitoffs,Bitsize) \
- ERTS_GET_BINARY_BYTES_REL(Bin,Bytep,Bitoffs,Bitsize,NULL)
-
-#define ERTS_GET_BINARY_BYTES_REL(Bin,Bytep,Bitoffs,Bitsize,BasePtr) \
+#define ERTS_GET_BINARY_BYTES(Bin,Bytep,Bitoffs,Bitsize) \
do { \
- Eterm* _real_bin = binary_val_rel(Bin,BasePtr); \
+ Eterm* _real_bin = binary_val(Bin); \
Uint _offs = 0; \
Bitoffs = Bitsize = 0; \
if (*_real_bin == HEADER_SUB_BIN) { \
@@ -107,7 +223,7 @@ do { \
_offs = _sb->offs; \
Bitoffs = _sb->bitoffs; \
Bitsize = _sb->bitsize; \
- _real_bin = binary_val_rel(_sb->orig,BasePtr); \
+ _real_bin = binary_val(_sb->orig); \
} \
if (*_real_bin == HEADER_PROC_BIN) { \
Bytep = ((ProcBin *) _real_bin)->bytes + _offs; \
@@ -130,11 +246,8 @@ do { \
*/
#define ERTS_GET_REAL_BIN(Bin, RealBin, ByteOffset, BitOffset, BitSize) \
- ERTS_GET_REAL_BIN_REL(Bin, RealBin, ByteOffset, BitOffset, BitSize, NULL)
-
-#define ERTS_GET_REAL_BIN_REL(Bin, RealBin, ByteOffset, BitOffset, BitSize, BasePtr) \
do { \
- ErlSubBin* _sb = (ErlSubBin *) binary_val_rel(Bin,BasePtr); \
+ ErlSubBin* _sb = (ErlSubBin *) binary_val(Bin); \
if (_sb->thing_word == HEADER_SUB_BIN) { \
RealBin = _sb->orig; \
ByteOffset = _sb->offs; \
@@ -166,11 +279,22 @@ Eterm erts_bin_bytes_to_list(Eterm previous, Eterm* hp, byte* bytes, Uint size,
* Common implementation for erlang:list_to_binary/1 and binary:list_to_bin/1
*/
-BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg);
+BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg, Export *bif);
BIF_RETTYPE erts_gc_binary_part(Process *p, Eterm *reg, Eterm live, int range_is_tuple);
BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen);
+typedef union {
+ /*
+ * These two are almost always of
+ * the same size, but when fallback
+ * atomics are used they might
+ * differ in size.
+ */
+ erts_atomic_t smp_atomic_word;
+ erts_atomic_t atomic_word;
+} ErtsMagicIndirectionWord;
+
#if defined(__i386__) || !defined(__GNUC__)
/*
* Doubles aren't required to be 8-byte aligned on intel x86.
@@ -194,8 +318,16 @@ ERTS_GLB_INLINE Binary *erts_bin_nrml_alloc(Uint size);
ERTS_GLB_INLINE Binary *erts_bin_realloc_fnf(Binary *bp, Uint size);
ERTS_GLB_INLINE Binary *erts_bin_realloc(Binary *bp, Uint size);
ERTS_GLB_INLINE void erts_bin_free(Binary *bp);
+ERTS_GLB_INLINE void erts_bin_release(Binary *bp);
+ERTS_GLB_INLINE Binary *erts_create_magic_binary_x(Uint size,
+ int (*destructor)(Binary *),
+ ErtsAlcType_t alloc_type,
+ int unaligned);
ERTS_GLB_INLINE Binary *erts_create_magic_binary(Uint size,
- void (*destructor)(Binary *));
+ int (*destructor)(Binary *));
+ERTS_GLB_INLINE Binary *erts_create_magic_indirection(int (*destructor)(Binary *));
+ERTS_GLB_INLINE erts_atomic_t *erts_binary_to_magic_indirection(Binary *bp);
+ERTS_GLB_INLINE erts_atomic_t *erts_binary_to_magic_indirection(Binary *bp);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -231,35 +363,61 @@ erts_free_aligned_binary_bytes(byte* buf)
# define CHICKEN_PAD (sizeof(void*) - 1)
#endif
+/* Caller must initialize 'refc'
+*/
ERTS_GLB_INLINE Binary *
erts_bin_drv_alloc_fnf(Uint size)
{
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
- void *res;
+ Binary *res;
+
+ if (bsize < size) /* overflow */
+ return NULL;
res = erts_alloc_fnf(ERTS_ALC_T_DRV_BINARY, bsize);
ERTS_CHK_BIN_ALIGNMENT(res);
- return (Binary *) res;
+ if (res) {
+ res->orig_size = size;
+ res->intern.flags = BIN_FLAG_DRV;
+ erts_refc_init(&res->intern.refc, 1);
+ }
+ return res;
}
+/* Caller must initialize 'refc'
+*/
ERTS_GLB_INLINE Binary *
erts_bin_drv_alloc(Uint size)
{
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
- void *res;
+ Binary *res;
+
+ if (bsize < size) /* overflow */
+ erts_alloc_enomem(ERTS_ALC_T_DRV_BINARY, size);
res = erts_alloc(ERTS_ALC_T_DRV_BINARY, bsize);
ERTS_CHK_BIN_ALIGNMENT(res);
- return (Binary *) res;
+ res->orig_size = size;
+ res->intern.flags = BIN_FLAG_DRV;
+ erts_refc_init(&res->intern.refc, 1);
+ return res;
}
+/* Caller must initialize 'refc'
+*/
ERTS_GLB_INLINE Binary *
erts_bin_nrml_alloc(Uint size)
{
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
- void *res;
+ Binary *res;
+
+ if (bsize < size) /* overflow */
+ erts_alloc_enomem(ERTS_ALC_T_BINARY, size);
res = erts_alloc(ERTS_ALC_T_BINARY, bsize);
ERTS_CHK_BIN_ALIGNMENT(res);
- return (Binary *) res;
+ res->orig_size = size;
+ res->intern.flags = 0;
+ erts_refc_init(&res->intern.refc, 1);
+ return res;
}
ERTS_GLB_INLINE Binary *
@@ -267,12 +425,15 @@ erts_bin_realloc_fnf(Binary *bp, Uint size)
{
Binary *nbp;
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
- ASSERT((bp->flags & BIN_FLAG_MAGIC) == 0);
- if (bp->flags & BIN_FLAG_DRV)
- nbp = erts_realloc_fnf(ERTS_ALC_T_DRV_BINARY, (void *) bp, bsize);
- else
- nbp = erts_realloc_fnf(ERTS_ALC_T_BINARY, (void *) bp, bsize);
+ ErtsAlcType_t type = (bp->intern.flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY
+ : ERTS_ALC_T_BINARY;
+ ASSERT((bp->intern.flags & BIN_FLAG_MAGIC) == 0);
+ if (bsize < size) /* overflow */
+ return NULL;
+ nbp = erts_realloc_fnf(type, (void *) bp, bsize);
ERTS_CHK_BIN_ALIGNMENT(nbp);
+ if (nbp)
+ nbp->orig_size = size;
return nbp;
}
@@ -281,47 +442,93 @@ erts_bin_realloc(Binary *bp, Uint size)
{
Binary *nbp;
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
- ASSERT((bp->flags & BIN_FLAG_MAGIC) == 0);
- if (bp->flags & BIN_FLAG_DRV)
- nbp = erts_realloc_fnf(ERTS_ALC_T_DRV_BINARY, (void *) bp, bsize);
- else
- nbp = erts_realloc_fnf(ERTS_ALC_T_BINARY, (void *) bp, bsize);
+ ErtsAlcType_t type = (bp->intern.flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY
+ : ERTS_ALC_T_BINARY;
+ ASSERT((bp->intern.flags & BIN_FLAG_MAGIC) == 0);
+ if (bsize < size) /* overflow */
+ erts_realloc_enomem(type, bp, size);
+ nbp = erts_realloc_fnf(type, (void *) bp, bsize);
if (!nbp)
- erts_realloc_n_enomem(ERTS_ALC_T2N(bp->flags & BIN_FLAG_DRV
- ? ERTS_ALC_T_DRV_BINARY
- : ERTS_ALC_T_BINARY),
- bp,
- bsize);
+ erts_realloc_enomem(type, bp, bsize);
ERTS_CHK_BIN_ALIGNMENT(nbp);
+ nbp->orig_size = size;
return nbp;
}
ERTS_GLB_INLINE void
erts_bin_free(Binary *bp)
{
- if (bp->flags & BIN_FLAG_MAGIC)
- ERTS_MAGIC_BIN_DESTRUCTOR(bp)(bp);
- if (bp->flags & BIN_FLAG_DRV)
+ if (bp->intern.flags & BIN_FLAG_MAGIC) {
+ if (!ERTS_MAGIC_BIN_DESTRUCTOR(bp)(bp)) {
+ /* Destructor took control of the deallocation */
+ return;
+ }
+ erts_magic_ref_remove_bin(ERTS_MAGIC_BIN_REFN(bp));
+ erts_free(ERTS_MAGIC_BIN_ATYPE(bp), (void *) bp);
+ }
+ else if (bp->intern.flags & BIN_FLAG_DRV)
erts_free(ERTS_ALC_T_DRV_BINARY, (void *) bp);
else
erts_free(ERTS_ALC_T_BINARY, (void *) bp);
}
+ERTS_GLB_INLINE void
+erts_bin_release(Binary *bp)
+{
+ if (erts_refc_dectest(&bp->intern.refc, 0) == 0) {
+ erts_bin_free(bp);
+ }
+}
+
ERTS_GLB_INLINE Binary *
-erts_create_magic_binary(Uint size, void (*destructor)(Binary *))
+erts_create_magic_binary_x(Uint size, int (*destructor)(Binary *),
+ ErtsAlcType_t alloc_type,
+ int unaligned)
{
- Uint bsize = ERTS_MAGIC_BIN_SIZE(size);
- Binary* bptr = erts_alloc_fnf(ERTS_ALC_T_BINARY, bsize);
+ Uint bsize = unaligned ? ERTS_MAGIC_BIN_UNALIGNED_SIZE(size)
+ : ERTS_MAGIC_BIN_SIZE(size);
+ Binary* bptr = erts_alloc_fnf(alloc_type, bsize);
+ ASSERT(bsize > size);
if (!bptr)
- erts_alloc_n_enomem(ERTS_ALC_T2N(ERTS_ALC_T_BINARY), bsize);
+ erts_alloc_n_enomem(ERTS_ALC_T2N(alloc_type), bsize);
ERTS_CHK_BIN_ALIGNMENT(bptr);
- bptr->flags = BIN_FLAG_MAGIC;
- bptr->orig_size = ERTS_MAGIC_BIN_ORIG_SIZE(size);
- erts_refc_init(&bptr->refc, 0);
+ bptr->intern.flags = BIN_FLAG_MAGIC;
+ bptr->orig_size = unaligned ? ERTS_MAGIC_BIN_UNALIGNED_ORIG_SIZE(size)
+ : ERTS_MAGIC_BIN_ORIG_SIZE(size);
+ erts_refc_init(&bptr->intern.refc, 0);
ERTS_MAGIC_BIN_DESTRUCTOR(bptr) = destructor;
+ ERTS_MAGIC_BIN_ATYPE(bptr) = alloc_type;
+ erts_make_magic_ref_in_array(ERTS_MAGIC_BIN_REFN(bptr));
return bptr;
}
+ERTS_GLB_INLINE Binary *
+erts_create_magic_binary(Uint size, int (*destructor)(Binary *))
+{
+ return erts_create_magic_binary_x(size, destructor,
+ ERTS_ALC_T_BINARY, 0);
+}
+
+ERTS_GLB_INLINE Binary *
+erts_create_magic_indirection(int (*destructor)(Binary *))
+{
+ return erts_create_magic_binary_x(sizeof(ErtsMagicIndirectionWord),
+ destructor,
+ ERTS_ALC_T_MINDIRECTION,
+ 1); /* Not 64-bit aligned,
+ but word aligned */
+}
+
+ERTS_GLB_INLINE erts_atomic_t *
+erts_binary_to_magic_indirection(Binary *bp)
+{
+ ErtsMagicIndirectionWord *mip;
+ ASSERT(bp->intern.flags & BIN_FLAG_MAGIC);
+ ASSERT(ERTS_MAGIC_BIN_ATYPE(bp) == ERTS_ALC_T_MINDIRECTION);
+ mip = ERTS_MAGIC_BIN_UNALIGNED_DATA(bp);
+ return &mip->smp_atomic_word;
+}
+
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#endif
+#endif /* !ERL_BINARY_H__ */
diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c
index 73765772c8..3a16913473 100644
--- a/erts/emulator/beam/erl_bits.c
+++ b/erts/emulator/beam/erl_bits.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -31,15 +32,6 @@
#include "erl_bits.h"
#include "erl_binary.h"
-#ifdef MAX
-#undef MAX
-#endif
-#define MAX(x,y) (((x)>(y))?(x):(y))
-#ifdef MIN
-#undef MIN
-#endif
-#define MIN(x,y) (((x)<(y))?(x):(y))
-
#if defined(WORDS_BIGENDIAN)
# define BIT_ENDIAN_MACHINE 0
#else
@@ -63,30 +55,19 @@
static byte get_bit(byte b, size_t a_offs);
-#if defined(ERTS_SMP)
/* the state resides in the current process' scheduler data */
-#elif defined(ERL_BITS_REENTRANT)
-/* reentrant API but with a hidden single global state, for testing only */
-struct erl_bits_state ErlBitsState_;
-#else
-/* non-reentrant API with a single global state */
-struct erl_bits_state ErlBitsState;
-#endif
#define byte_buf (ErlBitsState.byte_buf_)
#define byte_buf_len (ErlBitsState.byte_buf_len_)
-static erts_smp_atomic_t bits_bufs_size;
+static erts_atomic_t bits_bufs_size;
Uint
erts_bits_bufs_size(void)
{
- return (Uint) erts_smp_atomic_read_nob(&bits_bufs_size);
+ return (Uint) erts_atomic_read_nob(&bits_bufs_size);
}
-#if !defined(ERTS_SMP)
-static
-#endif
void
erts_bits_init_state(ERL_BITS_PROTO_0)
{
@@ -96,24 +77,22 @@ erts_bits_init_state(ERL_BITS_PROTO_0)
erts_bin_offset = 0;
}
-#if defined(ERTS_SMP)
void
erts_bits_destroy_state(ERL_BITS_PROTO_0)
{
erts_free(ERTS_ALC_T_BITS_BUF, byte_buf);
}
-#endif
void
erts_init_bits(void)
{
- erts_smp_atomic_init_nob(&bits_bufs_size, 0);
-#if defined(ERTS_SMP)
+ ERTS_CT_ASSERT(offsetof(Binary,orig_bytes) % 8 == 0);
+ ERTS_CT_ASSERT(offsetof(ErtsMagicBinary,u.aligned.data) % 8 == 0);
+ ERTS_CT_ASSERT(offsetof(ErtsBinary,driver.binary.orig_bytes)
+ == offsetof(Binary,orig_bytes));
+
+ erts_atomic_init_nob(&bits_bufs_size, 0);
/* erl_process.c calls erts_bits_init_state() on all state instances */
-#else
- ERL_BITS_DECLARE_STATEP;
- erts_bits_init_state(ERL_BITS_ARGS_0);
-#endif
}
/*****************************************************************
@@ -165,6 +144,26 @@ erts_bs_start_match_2(Process *p, Eterm Binary, Uint Max)
return make_matchstate(ms);
}
+#ifdef DEBUG
+# define CHECK_MATCH_BUFFER(MB) check_match_buffer(MB)
+
+static void check_match_buffer(ErlBinMatchBuffer* mb)
+{
+ Eterm realbin;
+ Uint byteoffs;
+ byte* bytes, bitoffs, bitsz;
+ ProcBin* pb;
+ ERTS_GET_REAL_BIN(mb->orig, realbin, byteoffs, bitoffs, bitsz);
+ bytes = binary_bytes(realbin) + byteoffs;
+ ERTS_ASSERT(mb->base >= bytes && mb->base <= (bytes + binary_size(mb->orig)));
+ pb = (ProcBin *) boxed_val(realbin);
+ if (pb->thing_word == HEADER_PROC_BIN)
+ ERTS_ASSERT(pb->flags == 0);
+}
+#else
+# define CHECK_MATCH_BUFFER(MB)
+#endif
+
Eterm
erts_bs_get_integer_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuffer* mb)
{
@@ -185,6 +184,7 @@ erts_bs_get_integer_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuff
return SMALL_ZERO;
}
+ CHECK_MATCH_BUFFER(mb);
if (mb->size - mb->offset < num_bits) { /* Asked for too many bits. */
return THE_NON_VALUE;
}
@@ -252,7 +252,7 @@ erts_bs_get_integer_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuff
* Simply shift whole bytes into the result.
*/
switch (BYTE_OFFSET(n)) {
-#if defined(ARCH_64) && !HALFWORD_HEAP
+#if defined(ARCH_64)
case 7: w = (w << 8) | *bp++;
case 6: w = (w << 8) | *bp++;
case 5: w = (w << 8) | *bp++;
@@ -357,7 +357,7 @@ erts_bs_get_integer_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuff
case 3:
v32 = LSB[0] + (LSB[1]<<8) + (LSB[2]<<16);
goto big_small;
-#if !defined(ARCH_64) || HALFWORD_HEAP
+#if !defined(ARCH_64)
case 4:
v32 = (LSB[0] + (LSB[1]<<8) + (LSB[2]<<16) + (LSB[3]<<24));
if (!IS_USMALL(sgn, v32)) {
@@ -403,7 +403,10 @@ erts_bs_get_integer_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuff
words_needed = 1+WSIZE(bytes);
hp = HeapOnlyAlloc(p, words_needed);
res = bytes_to_big(LSB, bytes, sgn, hp);
- if (is_small(res)) {
+ if (is_nil(res)) {
+ p->htop = hp;
+ res = THE_NON_VALUE;
+ } else if (is_small(res)) {
p->htop = hp;
} else if ((actual = bignum_header_arity(*hp)+1) < words_needed) {
p->htop = hp + actual;
@@ -422,6 +425,7 @@ erts_bs_get_binary_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuffe
{
ErlSubBin* sb;
+ CHECK_MATCH_BUFFER(mb);
if (mb->size - mb->offset < num_bits) { /* Asked for too many bits. */
return THE_NON_VALUE;
}
@@ -453,6 +457,7 @@ erts_bs_get_float_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuffer
byte* fptr;
FloatDef f;
+ CHECK_MATCH_BUFFER(mb);
if (num_bits == 0) {
f.fd = 0.0;
hp = HeapOnlyAlloc(p, FLOAT_SIZE_OBJECT);
@@ -506,6 +511,8 @@ erts_bs_get_binary_all_2(Process *p, ErlBinMatchBuffer* mb)
{
ErlSubBin* sb;
Uint size;
+
+ CHECK_MATCH_BUFFER(mb);
size = mb->size-mb->offset;
sb = (ErlSubBin *) HeapOnlyAlloc(p, ERL_SUB_BIN_SIZE);
sb->thing_word = HEADER_SUB_BIN;
@@ -719,7 +726,7 @@ static void
ERTS_INLINE need_byte_buf(ERL_BITS_PROTO_1(int need))
{
if (byte_buf_len < need) {
- erts_smp_atomic_add_nob(&bits_bufs_size, need - byte_buf_len);
+ erts_atomic_add_nob(&bits_bufs_size, need - byte_buf_len);
byte_buf_len = need;
byte_buf = erts_realloc(ERTS_ALC_T_BITS_BUF, byte_buf, byte_buf_len);
}
@@ -1287,7 +1294,14 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term,
goto badarg;
}
}
+
+ if((ERTS_UINT_MAX - build_size_in_bits) < erts_bin_offset) {
+ c_p->freason = SYSTEM_LIMIT;
+ return THE_NON_VALUE;
+ }
+
used_size_in_bits = erts_bin_offset + build_size_in_bits;
+
sb->is_writable = 0; /* Make sure that no one else can write. */
pb->size = NBYTES(used_size_in_bits);
pb->flags |= PB_ACTIVE_WRITER;
@@ -1299,7 +1313,6 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term,
if (binp->orig_size < pb->size) {
Uint new_size = 2*pb->size;
binp = erts_bin_realloc(binp, new_size);
- binp->orig_size = new_size;
pb->val = binp;
pb->bytes = (byte *) binp->orig_bytes;
}
@@ -1362,18 +1375,27 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term,
goto badarg;
}
}
- used_size_in_bits = erts_bin_offset + build_size_in_bits;
- used_size_in_bytes = NBYTES(used_size_in_bits);
- bin_size = 2*used_size_in_bytes;
+
+ if((ERTS_UINT_MAX - build_size_in_bits) < erts_bin_offset) {
+ c_p->freason = SYSTEM_LIMIT;
+ return THE_NON_VALUE;
+ }
+
+ used_size_in_bits = erts_bin_offset + build_size_in_bits;
+ used_size_in_bytes = NBYTES(used_size_in_bits);
+
+ if(used_size_in_bits < (ERTS_UINT_MAX / 2)) {
+ bin_size = 2 * used_size_in_bytes;
+ } else {
+ bin_size = NBYTES(ERTS_UINT_MAX);
+ }
+
bin_size = (bin_size < 256) ? 256 : bin_size;
/*
* Allocate the binary data struct itself.
*/
bptr = erts_bin_nrml_alloc(bin_size);
- bptr->flags = 0;
- bptr->orig_size = bin_size;
- erts_refc_init(&bptr->refc, 1);
erts_current_bin = (byte *) bptr->orig_bytes;
/*
@@ -1457,6 +1479,12 @@ erts_bs_private_append(Process* p, Eterm bin, Eterm build_size_term, Uint unit)
* Calculate new size in bytes.
*/
erts_bin_offset = 8*sb->size + sb->bitsize;
+
+ if((ERTS_UINT_MAX - build_size_in_bits) < erts_bin_offset) {
+ p->freason = SYSTEM_LIMIT;
+ return THE_NON_VALUE;
+ }
+
pos_in_bits_after_build = erts_bin_offset + build_size_in_bits;
pb->size = (pos_in_bits_after_build+7) >> 3;
pb->flags |= PB_ACTIVE_WRITER;
@@ -1475,7 +1503,6 @@ erts_bs_private_append(Process* p, Eterm bin, Eterm build_size_term, Uint unit)
* is safe to reallocate it.
*/
binp = erts_bin_realloc(binp, new_size);
- binp->orig_size = new_size;
pb->val = binp;
pb->bytes = (byte *) binp->orig_bytes;
} else {
@@ -1488,16 +1515,11 @@ erts_bs_private_append(Process* p, Eterm bin, Eterm build_size_term, Uint unit)
* binary and copy the contents of the old binary into it.
*/
Binary* bptr = erts_bin_nrml_alloc(new_size);
- bptr->flags = 0;
- bptr->orig_size = new_size;
- erts_refc_init(&bptr->refc, 1);
sys_memcpy(bptr->orig_bytes, binp->orig_bytes, binp->orig_size);
pb->flags |= PB_IS_WRITABLE | PB_ACTIVE_WRITER;
pb->val = bptr;
pb->bytes = (byte *) bptr->orig_bytes;
- if (erts_refc_dectest(&binp->refc, 0) == 0) {
- erts_bin_free(binp);
- }
+ erts_bin_release(binp);
}
}
erts_current_bin = pb->bytes;
@@ -1537,9 +1559,6 @@ erts_bs_init_writable(Process* p, Eterm sz)
* Allocate the binary data struct itself.
*/
bptr = erts_bin_nrml_alloc(bin_size);
- bptr->flags = 0;
- bptr->orig_size = bin_size;
- erts_refc_init(&bptr->refc, 1);
/*
* Now allocate the ProcBin on the heap.
@@ -1585,9 +1604,7 @@ erts_emasculate_writable_binary(ProcBin* pb)
/* Our allocators are 8 byte aligned, i.e., shrinking with
less than 8 bytes will have no real effect */
if (unused >= 8) {
- Uint new_size = pb->size;
binp = erts_bin_realloc(binp, pb->size);
- binp->orig_size = new_size;
pb->val = binp;
pb->bytes = (byte *) binp->orig_bytes;
}
@@ -1602,6 +1619,7 @@ erts_bs_get_unaligned_uint32(ErlBinMatchBuffer* mb)
byte* LSB;
byte* MSB;
+ CHECK_MATCH_BUFFER(mb);
ASSERT((mb->offset & 7) != 0);
ASSERT(mb->size - mb->offset >= 32);
@@ -1616,7 +1634,7 @@ erts_bs_get_unaligned_uint32(ErlBinMatchBuffer* mb)
return LSB[0] | (LSB[1]<<8) | (LSB[2]<<16) | (LSB[3]<<24);
}
-void
+static void
erts_align_utf8_bytes(ErlBinMatchBuffer* mb, byte* buf)
{
Uint bits = mb->size - mb->offset;
@@ -1661,6 +1679,8 @@ erts_bs_get_utf8(ErlBinMatchBuffer* mb)
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,9,9,9,9,9,9,9,9
};
+ CHECK_MATCH_BUFFER(mb);
+
if ((remaining_bits = mb->size - mb->offset) < 8) {
return THE_NON_VALUE;
}
@@ -1745,6 +1765,7 @@ erts_bs_get_utf16(ErlBinMatchBuffer* mb, Uint flags)
return THE_NON_VALUE;
}
+ CHECK_MATCH_BUFFER(mb);
/*
* Set up the pointer to the source bytes.
*/
diff --git a/erts/emulator/beam/erl_bits.h b/erts/emulator/beam/erl_bits.h
index 388d943755..b9d141d585 100644
--- a/erts/emulator/beam/erl_bits.h
+++ b/erts/emulator/beam/erl_bits.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -21,6 +22,23 @@
#define __ERL_BITS_H__
/*
+ * This structure represents a SUB_BINARY.
+ *
+ * Note: The last field (orig) is not counted in arityval in the header.
+ * This simplifies garbage collection.
+ */
+
+typedef struct erl_sub_bin {
+ Eterm thing_word; /* Subtag SUB_BINARY_SUBTAG. */
+ Uint size; /* Binary size in bytes. */
+ Uint offs; /* Offset into original binary. */
+ byte bitsize;
+ byte bitoffs;
+ byte is_writable; /* The underlying binary is writable */
+ Eterm orig; /* Original binary (REFC or HEAP binary). */
+} ErlSubBin;
+
+/*
* This structure represents a binary to be matched.
*/
@@ -66,31 +84,14 @@ typedef struct erl_bin_match_struct{
#define ms_matchbuffer(_Ms) &(((ErlBinMatchState*) boxed_val(_Ms))->mb)
-#if defined(ERTS_SMP)
-#define ERL_BITS_REENTRANT
-#else
-/* uncomment to test the reentrant API in the non-SMP runtime system */
-/* #define ERL_BITS_REENTRANT */
-#endif
-
-#ifdef ERL_BITS_REENTRANT
-
/*
* Reentrant API with the state passed as a parameter.
* (Except when the current Process* already is a parameter.)
*/
-#ifdef ERTS_SMP
/* the state resides in the current process' scheduler data */
#define ERL_BITS_DECLARE_STATEP struct erl_bits_state *EBS
-#define ERL_BITS_RELOAD_STATEP(P) do{EBS = &(P)->scheduler_data->erl_bits_state;}while(0)
-#define ERL_BITS_DEFINE_STATEP(P) struct erl_bits_state *EBS = &(P)->scheduler_data->erl_bits_state
-#else
-/* reentrant API but with a hidden single global state, for testing only */
-extern struct erl_bits_state ErlBitsState_;
-#define ERL_BITS_DECLARE_STATEP struct erl_bits_state *EBS = &ErlBitsState_
-#define ERL_BITS_RELOAD_STATEP(P) do{}while(0)
-#define ERL_BITS_DEFINE_STATEP(P) ERL_BITS_DECLARE_STATEP
-#endif
+#define ERL_BITS_RELOAD_STATEP(P) do{EBS = &erts_proc_sched_data((P))->erl_bits_state;}while(0)
+#define ERL_BITS_DEFINE_STATEP(P) struct erl_bits_state *EBS = &erts_proc_sched_data((P))->erl_bits_state
#define ErlBitsState (*EBS)
#define ERL_BITS_PROTO_0 struct erl_bits_state *EBS
@@ -102,26 +103,6 @@ extern struct erl_bits_state ErlBitsState_;
#define ERL_BITS_ARGS_2(ARG1,ARG2) EBS, ARG1, ARG2
#define ERL_BITS_ARGS_3(ARG1,ARG2,ARG3) EBS, ARG1, ARG2, ARG3
-#else /* ERL_BITS_REENTRANT */
-
-/*
- * Non-reentrant API with a single global state.
- */
-extern struct erl_bits_state ErlBitsState;
-#define ERL_BITS_DECLARE_STATEP /*empty*/
-#define ERL_BITS_RELOAD_STATEP(P) do{}while(0)
-#define ERL_BITS_DEFINE_STATEP(P) /*empty*/
-
-#define ERL_BITS_PROTO_0 void
-#define ERL_BITS_PROTO_1(PARM1) PARM1
-#define ERL_BITS_PROTO_2(PARM1,PARM2) PARM1, PARM2
-#define ERL_BITS_PROTO_3(PARM1,PARM2,PARM3) PARM1, PARM2, PARM3
-#define ERL_BITS_ARGS_0 /*empty*/
-#define ERL_BITS_ARGS_1(ARG1) ARG1
-#define ERL_BITS_ARGS_2(ARG1,ARG2) ARG1, ARG2
-#define ERL_BITS_ARGS_3(ARG1,ARG2,ARG3) ARG1, ARG2, ARG3
-
-#endif /* ERL_BITS_REENTRANT */
#define erts_bin_offset (ErlBitsState.erts_bin_offset_)
#define erts_current_bin (ErlBitsState.erts_current_bin_)
@@ -140,10 +121,8 @@ extern struct erl_bits_state ErlBitsState;
} while (0)
void erts_init_bits(void); /* Initialization once. */
-#ifdef ERTS_SMP
void erts_bits_init_state(ERL_BITS_PROTO_0);
void erts_bits_destroy_state(ERL_BITS_PROTO_0);
-#endif
/*
@@ -184,7 +163,6 @@ void erts_new_bs_put_string(ERL_BITS_PROTO_2(byte* iptr, Uint num_bytes));
Uint erts_bits_bufs_size(void);
Uint32 erts_bs_get_unaligned_uint32(ErlBinMatchBuffer* mb);
-void erts_align_utf8_bytes(ErlBinMatchBuffer* mb, byte* buf);
Eterm erts_bs_get_utf8(ErlBinMatchBuffer* mb);
Eterm erts_bs_get_utf16(ErlBinMatchBuffer* mb, Uint flags);
Eterm erts_bs_append(Process* p, Eterm* reg, Uint live, Eterm build_size_term,
diff --git a/erts/emulator/beam/erl_cpu_topology.c b/erts/emulator/beam/erl_cpu_topology.c
index f594cb9392..6f8d2f8c35 100644
--- a/erts/emulator/beam/erl_cpu_topology.c
+++ b/erts/emulator/beam/erl_cpu_topology.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -59,7 +60,7 @@ static int max_main_threads;
static int reader_groups;
static ErtsCpuBindData *scheduler2cpu_map;
-static erts_smp_rwmtx_t cpuinfo_rwmtx;
+static erts_rwmtx_t cpuinfo_rwmtx;
typedef enum {
ERTS_CPU_BIND_UNDEFINED,
@@ -130,13 +131,11 @@ static erts_cpu_groups_map_t *reader_groups_map;
#define ERTS_MAX_CPU_TOPOLOGY_ID ((int) 0xffff)
-#ifdef ERTS_SMP
static void cpu_bind_order_sort(erts_cpu_topology_t *cpudata,
int size,
ErtsCpuBindOrder bind_order,
int mk_seq);
static void write_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size);
-#endif
static void reader_groups_callback(int, ErtsSchedulerData *, int, void *);
static erts_cpu_groups_map_t *add_cpu_groups(int groups,
@@ -401,7 +400,7 @@ cpu_bind_order_sort(erts_cpu_topology_t *cpudata,
break;
default:
cmp_func = NULL;
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"Bad cpu bind type: %d\n",
(int) cpu_bind_order);
break;
@@ -433,7 +432,6 @@ processor_order_cmp(const void *vx, const void *vy)
return 0;
}
-#ifdef ERTS_SMP
void
erts_sched_check_cpu_bind_prep_suspend(ErtsSchedulerData *esdp)
{
@@ -443,7 +441,7 @@ erts_sched_check_cpu_bind_prep_suspend(ErtsSchedulerData *esdp)
int cgcc_ix;
/* Unbind from cpu */
- erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rwlock(&cpuinfo_rwmtx);
if (scheduler2cpu_map[esdp->no].bound_id >= 0
&& erts_unbind_from_cpu(cpuinfo) == 0) {
esdp->cpu_id = scheduler2cpu_map[esdp->no].bound_id = -1;
@@ -462,7 +460,7 @@ erts_sched_check_cpu_bind_prep_suspend(ErtsSchedulerData *esdp)
}
}
ASSERT(no_cpu_groups_callbacks == cgcc_ix);
- erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rwunlock(&cpuinfo_rwmtx);
for (cgcc_ix = 0; cgcc_ix < no_cpu_groups_callbacks; cgcc_ix++)
cgcc[cgcc_ix].callback(1,
@@ -480,7 +478,7 @@ erts_sched_check_cpu_bind_prep_suspend(ErtsSchedulerData *esdp)
void
erts_sched_check_cpu_bind_post_suspend(ErtsSchedulerData *esdp)
{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(esdp->run_queue));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(esdp->run_queue));
if (esdp->no <= max_main_threads)
erts_thr_set_main_status(1, (int) esdp->no);
@@ -489,7 +487,6 @@ erts_sched_check_cpu_bind_post_suspend(ErtsSchedulerData *esdp)
(void) ERTS_RUNQ_FLGS_SET(esdp->run_queue, ERTS_RUNQ_FLG_CHK_CPU_BIND);
}
-#endif
void
erts_sched_check_cpu_bind(ErtsSchedulerData *esdp)
@@ -498,8 +495,8 @@ erts_sched_check_cpu_bind(ErtsSchedulerData *esdp)
erts_cpu_groups_map_t *cgm;
erts_cpu_groups_callback_list_t *cgcl;
erts_cpu_groups_callback_call_t *cgcc;
- erts_smp_runq_unlock(esdp->run_queue);
- erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx);
+ erts_runq_unlock(esdp->run_queue);
+ erts_rwmtx_rwlock(&cpuinfo_rwmtx);
cpu_id = scheduler2cpu_map[esdp->no].bind_id;
if (cpu_id >= 0 && cpu_id != scheduler2cpu_map[esdp->no].bound_id) {
res = erts_bind_to_cpu(cpuinfo, cpu_id);
@@ -542,7 +539,7 @@ erts_sched_check_cpu_bind(ErtsSchedulerData *esdp)
}
ASSERT(no_cpu_groups_callbacks == cgcc_ix);
- erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rwunlock(&cpuinfo_rwmtx);
for (cgcc_ix = 0; cgcc_ix < no_cpu_groups_callbacks; cgcc_ix++)
cgcc[cgcc_ix].callback(0,
@@ -552,10 +549,9 @@ erts_sched_check_cpu_bind(ErtsSchedulerData *esdp)
erts_free(ERTS_ALC_T_TMP, cgcc);
- erts_smp_runq_lock(esdp->run_queue);
+ erts_runq_lock(esdp->run_queue);
}
-#ifdef ERTS_SMP
void
erts_sched_init_check_cpu_bind(ErtsSchedulerData *esdp)
{
@@ -564,7 +560,7 @@ erts_sched_init_check_cpu_bind(ErtsSchedulerData *esdp)
erts_cpu_groups_callback_list_t *cgcl;
erts_cpu_groups_callback_call_t *cgcc;
- erts_smp_rwmtx_rlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rlock(&cpuinfo_rwmtx);
cgcc = erts_alloc(ERTS_ALC_T_TMP,
(no_cpu_groups_callbacks
@@ -580,7 +576,7 @@ erts_sched_init_check_cpu_bind(ErtsSchedulerData *esdp)
}
ASSERT(no_cpu_groups_callbacks == cgcc_ix);
- erts_smp_rwmtx_runlock(&cpuinfo_rwmtx);
+ erts_rwmtx_runlock(&cpuinfo_rwmtx);
for (cgcc_ix = 0; cgcc_ix < no_cpu_groups_callbacks; cgcc_ix++)
cgcc[cgcc_ix].callback(0,
@@ -593,7 +589,6 @@ erts_sched_init_check_cpu_bind(ErtsSchedulerData *esdp)
if (esdp->no <= max_main_threads)
erts_thr_set_main_status(1, (int) esdp->no);
}
-#endif
static void
write_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size)
@@ -601,13 +596,13 @@ write_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size)
int s_ix = 1;
int cpu_ix;
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx));
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx));
if (cpu_bind_order != ERTS_CPU_BIND_NONE && size) {
cpu_bind_order_sort(cpudata, size, cpu_bind_order, 1);
- for (cpu_ix = 0; cpu_ix < size && cpu_ix < erts_no_schedulers; cpu_ix++)
+ for (cpu_ix = 0; cpu_ix < size && s_ix <= erts_no_schedulers; cpu_ix++)
if (erts_is_cpu_available(cpuinfo, cpudata[cpu_ix].logical))
scheduler2cpu_map[s_ix++].bind_id = cpudata[cpu_ix].logical;
}
@@ -701,9 +696,9 @@ Eterm
erts_bound_schedulers_term(Process *c_p)
{
ErtsCpuBindOrder order;
- erts_smp_rwmtx_rlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rlock(&cpuinfo_rwmtx);
order = cpu_bind_order;
- erts_smp_rwmtx_runlock(&cpuinfo_rwmtx);
+ erts_rwmtx_runlock(&cpuinfo_rwmtx);
return bound_schedulers_term(order);
}
@@ -716,7 +711,7 @@ erts_bind_schedulers(Process *c_p, Eterm how)
int cpudata_size;
ErtsCpuBindOrder old_cpu_bind_order;
- erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rwlock(&cpuinfo_rwmtx);
if (erts_bind_to_cpu(cpuinfo, -1) == -ENOTSUP) {
if (cpu_bind_order == ERTS_CPU_BIND_NONE
@@ -772,7 +767,7 @@ erts_bind_schedulers(Process *c_p, Eterm how)
done:
- erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rwunlock(&cpuinfo_rwmtx);
if (notify)
erts_sched_notify_check_cpu_bind();
@@ -792,9 +787,9 @@ erts_sched_bind_atthrcreate_child(int unbind)
{
int res = 0;
if (unbind) {
- erts_smp_rwmtx_rlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rlock(&cpuinfo_rwmtx);
res = erts_unbind_from_cpu(cpuinfo);
- erts_smp_rwmtx_runlock(&cpuinfo_rwmtx);
+ erts_rwmtx_runlock(&cpuinfo_rwmtx);
}
return res;
}
@@ -811,7 +806,7 @@ erts_sched_bind_atfork_prepare(void)
ErtsSchedulerData *esdp = erts_get_scheduler_data();
int unbind = esdp != NULL && erts_is_scheduler_bound(esdp);
if (unbind)
- erts_smp_rwmtx_rlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rlock(&cpuinfo_rwmtx);
return unbind;
}
@@ -819,29 +814,18 @@ int
erts_sched_bind_atfork_child(int unbind)
{
if (unbind) {
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&cpuinfo_rwmtx)
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&cpuinfo_rwmtx)
|| erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx));
return erts_unbind_from_cpu(cpuinfo);
}
return 0;
}
-char *
-erts_sched_bind_atvfork_child(int unbind)
-{
- if (unbind) {
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&cpuinfo_rwmtx)
- || erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx));
- return erts_get_unbind_from_cpu_str(cpuinfo);
- }
- return "false";
-}
-
void
erts_sched_bind_atfork_parent(int unbind)
{
if (unbind)
- erts_smp_rwmtx_runlock(&cpuinfo_rwmtx);
+ erts_rwmtx_runlock(&cpuinfo_rwmtx);
}
Eterm
@@ -875,9 +859,9 @@ erts_fake_scheduler_bindings(Process *p, Eterm how)
return res;
}
- erts_smp_rwmtx_rlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rlock(&cpuinfo_rwmtx);
create_tmp_cpu_topology_copy(&cpudata, &cpudata_size);
- erts_smp_rwmtx_runlock(&cpuinfo_rwmtx);
+ erts_rwmtx_runlock(&cpuinfo_rwmtx);
if (!cpudata || fake_cpu_bind_order == ERTS_CPU_BIND_NONE)
ERTS_BIF_PREP_RET(res, am_false);
@@ -940,12 +924,12 @@ erts_get_schedulers_binds(Process *c_p)
Eterm res = make_tuple(hp);
*(hp++) = make_arityval(erts_no_schedulers);
- erts_smp_rwmtx_rlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rlock(&cpuinfo_rwmtx);
for (ix = 1; ix <= erts_no_schedulers; ix++)
*(hp++) = (scheduler2cpu_map[ix].bound_id >= 0
? make_small(scheduler2cpu_map[ix].bound_id)
: AM_unbound);
- erts_smp_rwmtx_runlock(&cpuinfo_rwmtx);
+ erts_rwmtx_runlock(&cpuinfo_rwmtx);
return res;
}
@@ -1356,7 +1340,7 @@ erts_set_cpu_topology(Process *c_p, Eterm term)
int cpudata_size = 0;
Eterm res;
- erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rwlock(&cpuinfo_rwmtx);
res = get_cpu_topology_term(c_p, ERTS_GET_USED_CPU_TOPOLOGY);
if (term == am_undefined) {
if (user_cpudata)
@@ -1377,7 +1361,7 @@ erts_set_cpu_topology(Process *c_p, Eterm term)
}
else if (is_not_list(term)) {
error:
- erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rwunlock(&cpuinfo_rwmtx);
res = THE_NON_VALUE;
goto done;
}
@@ -1471,7 +1455,7 @@ erts_set_cpu_topology(Process *c_p, Eterm term)
write_schedulers_bind_change(cpudata, cpudata_size);
- erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rwunlock(&cpuinfo_rwmtx);
erts_sched_notify_check_cpu_bind();
done:
@@ -1589,7 +1573,7 @@ get_cpu_topology_term(Process *c_p, int type)
}
break;
default:
- erl_exit(ERTS_ABORT_EXIT, "Bad cpu topology type: %d\n", type);
+ erts_exit(ERTS_ABORT_EXIT, "Bad cpu topology type: %d\n", type);
break;
}
@@ -1625,7 +1609,7 @@ erts_get_cpu_topology_term(Process *c_p, Eterm which)
{
Eterm res;
int type;
- erts_smp_rwmtx_rlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rlock(&cpuinfo_rwmtx);
if (ERTS_IS_ATOM_STR("used", which))
type = ERTS_GET_USED_CPU_TOPOLOGY;
else if (ERTS_IS_ATOM_STR("detected", which))
@@ -1638,7 +1622,7 @@ erts_get_cpu_topology_term(Process *c_p, Eterm which)
res = THE_NON_VALUE;
else
res = get_cpu_topology_term(c_p, type);
- erts_smp_rwmtx_runlock(&cpuinfo_rwmtx);
+ erts_rwmtx_runlock(&cpuinfo_rwmtx);
return res;
}
@@ -1656,9 +1640,9 @@ get_logical_processors(int *conf, int *onln, int *avail)
void
erts_get_logical_processors(int *conf, int *onln, int *avail)
{
- erts_smp_rwmtx_rlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rlock(&cpuinfo_rwmtx);
get_logical_processors(conf, onln, avail);
- erts_smp_rwmtx_runlock(&cpuinfo_rwmtx);
+ erts_rwmtx_runlock(&cpuinfo_rwmtx);
}
void
@@ -1716,8 +1700,9 @@ erts_init_cpu_topology(void)
{
int ix;
- erts_smp_rwmtx_init(&cpuinfo_rwmtx, "cpu_info");
- erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx);
+ erts_rwmtx_init(&cpuinfo_rwmtx, "cpu_info", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+ erts_rwmtx_rwlock(&cpuinfo_rwmtx);
scheduler2cpu_map = erts_alloc(ERTS_ALC_T_CPUDATA,
(sizeof(ErtsCpuBindData)
@@ -1735,13 +1720,13 @@ erts_init_cpu_topology(void)
NULL);
if (cpu_bind_order == ERTS_CPU_BIND_NONE)
- erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rwunlock(&cpuinfo_rwmtx);
else {
erts_cpu_topology_t *cpudata;
int cpudata_size;
create_tmp_cpu_topology_copy(&cpudata, &cpudata_size);
write_schedulers_bind_change(cpudata, cpudata_size);
- erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rwunlock(&cpuinfo_rwmtx);
erts_sched_notify_check_cpu_bind();
destroy_tmp_cpu_topology_copy(cpudata);
}
@@ -1751,7 +1736,7 @@ int
erts_update_cpu_info(void)
{
int changed;
- erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rwlock(&cpuinfo_rwmtx);
changed = erts_cpu_info_update(cpuinfo);
if (changed) {
erts_cpu_topology_t *cpudata;
@@ -1784,7 +1769,7 @@ erts_update_cpu_info(void)
write_schedulers_bind_change(cpudata, cpudata_size);
destroy_tmp_cpu_topology_copy(cpudata);
}
- erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rwunlock(&cpuinfo_rwmtx);
if (changed)
erts_sched_notify_check_cpu_bind();
return changed;
@@ -1801,7 +1786,7 @@ reader_groups_callback(int suspending,
void *unused)
{
if (reader_groups && esdp->no <= max_main_threads)
- erts_smp_rwmtx_set_reader_group(suspending ? 0 : group+1);
+ erts_rwmtx_set_reader_group(suspending ? 0 : group+1);
}
static Eterm get_cpu_groups_map(Process *c_p,
@@ -1830,9 +1815,9 @@ Eterm
erts_get_reader_groups_map(Process *c_p)
{
Eterm res;
- erts_smp_rwmtx_rlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rlock(&cpuinfo_rwmtx);
res = get_cpu_groups_map(c_p, reader_groups_map, 1);
- erts_smp_rwmtx_runlock(&cpuinfo_rwmtx);
+ erts_rwmtx_runlock(&cpuinfo_rwmtx);
return res;
}
@@ -1966,7 +1951,7 @@ cpu_group_insert(erts_cpu_groups_map_t *map,
ix = 0;
} while (ix != start);
- erl_exit(ERTS_ABORT_EXIT, "Reader groups map full\n");
+ erts_exit(ERTS_ABORT_EXIT, "Reader groups map full\n");
}
@@ -2212,7 +2197,7 @@ add_cpu_groups(int groups,
erts_cpu_groups_callback_list_t *cgcl;
erts_cpu_groups_map_t *cgm;
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx));
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx));
if (use_groups > max_main_threads)
use_groups = max_main_threads;
@@ -2253,52 +2238,13 @@ add_cpu_groups(int groups,
return cgm;
}
-static void
-remove_cpu_groups(erts_cpu_groups_callback_t callback, void *arg)
-{
- erts_cpu_groups_map_t *prev_cgm, *cgm;
- erts_cpu_groups_callback_list_t *prev_cgcl, *cgcl;
-
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx));
-
- no_cpu_groups_callbacks--;
-
- prev_cgm = NULL;
- for (cgm = cpu_groups_maps; cgm; cgm = cgm->next) {
- prev_cgcl = NULL;
- for (cgcl = cgm->callback_list; cgcl; cgcl = cgcl->next) {
- if (cgcl->callback == callback && cgcl->arg == arg) {
- if (prev_cgcl)
- prev_cgcl->next = cgcl->next;
- else
- cgm->callback_list = cgcl->next;
- erts_free(ERTS_ALC_T_CPU_GRPS_MAP, cgcl);
- if (!cgm->callback_list) {
- if (prev_cgm)
- prev_cgm->next = cgm->next;
- else
- cpu_groups_maps = cgm->next;
- if (cgm->array)
- erts_free(ERTS_ALC_T_CPU_GRPS_MAP, cgm->array);
- erts_free(ERTS_ALC_T_CPU_GRPS_MAP, cgm);
- }
- return;
- }
- prev_cgcl = cgcl;
- }
- prev_cgm = cgm;
- }
-
- erl_exit(ERTS_ABORT_EXIT, "Cpu groups not found\n");
-}
-
static int
cpu_groups_lookup(erts_cpu_groups_map_t *map,
ErtsSchedulerData *esdp)
{
int start, logical, ix;
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&cpuinfo_rwmtx)
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&cpuinfo_rwmtx)
|| erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx));
if (esdp->cpu_id < 0)
@@ -2319,33 +2265,15 @@ cpu_groups_lookup(erts_cpu_groups_map_t *map,
ix = 0;
} while (ix != start);
- erl_exit(ERTS_ABORT_EXIT, "Logical cpu id %d not found\n", logical);
+ erts_exit(ERTS_ABORT_EXIT, "Logical cpu id %d not found\n", logical);
}
static void
update_cpu_groups_maps(void)
{
erts_cpu_groups_map_t *cgm;
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx));
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx));
for (cgm = cpu_groups_maps; cgm; cgm = cgm->next)
make_cpu_groups_map(cgm, 0);
}
-
-void
-erts_add_cpu_groups(int groups,
- erts_cpu_groups_callback_t callback,
- void *arg)
-{
- erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx);
- add_cpu_groups(groups, callback, arg);
- erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx);
-}
-
-void erts_remove_cpu_groups(erts_cpu_groups_callback_t callback,
- void *arg)
-{
- erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx);
- remove_cpu_groups(callback, arg);
- erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx);
-}
diff --git a/erts/emulator/beam/erl_cpu_topology.h b/erts/emulator/beam/erl_cpu_topology.h
index b502258dae..88bcad79ab 100644
--- a/erts/emulator/beam/erl_cpu_topology.h
+++ b/erts/emulator/beam/erl_cpu_topology.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -59,11 +60,9 @@ int erts_init_scheduler_bind_type_string(char *how);
int erts_init_cpu_topology_string(char *topology_str);
void erts_sched_check_cpu_bind(ErtsSchedulerData *esdp);
-#ifdef ERTS_SMP
void erts_sched_init_check_cpu_bind(ErtsSchedulerData *esdp);
void erts_sched_check_cpu_bind_prep_suspend(ErtsSchedulerData *esdp);
void erts_sched_check_cpu_bind_post_suspend(ErtsSchedulerData *esdp);
-#endif
int erts_update_cpu_info(void);
@@ -84,22 +83,14 @@ void erts_sched_bind_atthrcreate_parent(int unbind);
int erts_sched_bind_atfork_prepare(void);
int erts_sched_bind_atfork_child(int unbind);
-char *erts_sched_bind_atvfork_child(int unbind);
void erts_sched_bind_atfork_parent(int unbind);
Eterm erts_fake_scheduler_bindings(Process *p, Eterm how);
Eterm erts_debug_cpu_groups_map(Process *c_p, int groups);
-
typedef void (*erts_cpu_groups_callback_t)(int,
ErtsSchedulerData *,
int,
void *);
-void erts_add_cpu_groups(int groups,
- erts_cpu_groups_callback_t callback,
- void *arg);
-void erts_remove_cpu_groups(erts_cpu_groups_callback_t callback,
- void *arg);
-
#endif
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index a5d67571e2..3ba0886464 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -1,26 +1,25 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2014. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
/*
- * This file contains the bif interface functions and
- * the handling of the "meta tables" ie the tables of
- * db tables.
+ * This file contains the 'ets' bif interface functions.
*/
/*
@@ -42,9 +41,10 @@
#include "erl_db.h"
#include "bif.h"
#include "big.h"
+#include "erl_binary.h"
-erts_smp_atomic_t erts_ets_misc_mem_size;
+erts_atomic_t erts_ets_misc_mem_size;
/*
** Utility macros
@@ -61,85 +61,237 @@ enum DbIterSafety {
ITER_SAFE_LOCKED, /* Safe while table is locked, not between trap calls */
ITER_SAFE /* No need to fixate at all */
};
-#ifdef ERTS_SMP
# define ITERATION_SAFETY(Proc,Tab) \
((IS_TREE_TABLE((Tab)->common.status) || ONLY_WRITER(Proc,Tab)) ? ITER_SAFE \
: (((Tab)->common.status & DB_FINE_LOCKED) ? ITER_UNSAFE : ITER_SAFE_LOCKED))
-#else
-# define ITERATION_SAFETY(Proc,Tab) \
- ((IS_TREE_TABLE((Tab)->common.status) || ONLY_WRITER(Proc,Tab)) \
- ? ITER_SAFE : ITER_SAFE_LOCKED)
-#endif
#define DID_TRAP(P,Ret) (!is_value(Ret) && ((P)->freason == TRAP))
+/*
+ * "fixed_tabs": list of all fixed tables for a process
+ */
+#ifdef DEBUG
+static int fixed_tabs_find(DbFixation* first, DbFixation* fix);
+#endif
-/*
-** The main meta table, containing all ets tables.
-*/
-#ifdef ERTS_SMP
+static void fixed_tabs_insert(Process* p, DbFixation* fix)
+{
+ DbFixation* first = erts_psd_get(p, ERTS_PSD_ETS_FIXED_TABLES);
+
+ if (!first) {
+ fix->tabs.next = fix->tabs.prev = fix;
+ erts_psd_set(p, ERTS_PSD_ETS_FIXED_TABLES, fix);
+ }
+ else {
+ ASSERT(!fixed_tabs_find(first, fix));
+ fix->tabs.prev = first->tabs.prev;
+ fix->tabs.next = first;
+ fix->tabs.prev->tabs.next = fix;
+ first->tabs.prev = fix;
+ }
+}
+
+static void fixed_tabs_delete(Process *p, DbFixation* fix)
+{
+ if (fix->tabs.next == fix) {
+ DbFixation* old;
+ ASSERT(fix->tabs.prev == fix);
+ old = erts_psd_set(p, ERTS_PSD_ETS_FIXED_TABLES, NULL);
+ ASSERT(old == fix); (void)old;
+ }
+ else {
+ DbFixation *first = (DbFixation*) erts_psd_get(p, ERTS_PSD_ETS_FIXED_TABLES);
-#define ERTS_META_MAIN_TAB_LOCK_TAB_BITS 8
-#define ERTS_META_MAIN_TAB_LOCK_TAB_SIZE (1 << ERTS_META_MAIN_TAB_LOCK_TAB_BITS)
-#define ERTS_META_MAIN_TAB_LOCK_TAB_MASK (ERTS_META_MAIN_TAB_LOCK_TAB_SIZE - 1)
+ ASSERT(fixed_tabs_find(first, fix));
+ fix->tabs.prev->tabs.next = fix->tabs.next;
+ fix->tabs.next->tabs.prev = fix->tabs.prev;
-typedef union {
- erts_smp_rwmtx_t rwmtx;
- byte cache_line_align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(
- sizeof(erts_smp_rwmtx_t))];
-} erts_meta_main_tab_lock_t;
+ if (fix == first)
+ erts_psd_set(p, ERTS_PSD_ETS_FIXED_TABLES, fix->tabs.next);
+ }
+}
-static erts_meta_main_tab_lock_t *meta_main_tab_locks;
+#ifdef DEBUG
+static int fixed_tabs_find(DbFixation* first, DbFixation* fix)
+{
+ DbFixation* p;
+ if (!first) {
+ first = (DbFixation*) erts_psd_get(fix->procs.p, ERTS_PSD_ETS_FIXED_TABLES);
+ }
+ p = first;
+ do {
+ if (p == fix)
+ return 1;
+ ASSERT(p->procs.p == fix->procs.p);
+ ASSERT(p->tabs.next->tabs.prev == p);
+ p = p->tabs.next;
+ } while (p != first);
+ return 0;
+}
#endif
-static struct {
- union {
- DbTable *tb; /* Only directly readable if slot is ALIVE */
- UWord next_free; /* (index<<2)|1 if slot is FREE */
- }u;
-} *meta_main_tab;
-/* A slot in meta_main_tab can have three states:
- * FREE : Free to use for new table. Part of linked free-list.
- * ALIVE: Contains a table
- * DEAD : Contains a table that is being removed.
+
+/*
+ * fixing_procs: tree of all processes fixating a table
*/
-#define IS_SLOT_FREE(i) (meta_main_tab[(i)].u.next_free & 1)
-#define IS_SLOT_DEAD(i) (meta_main_tab[(i)].u.next_free & 2)
-#define IS_SLOT_ALIVE(i) (!(meta_main_tab[(i)].u.next_free & (1|2)))
-#define GET_NEXT_FREE_SLOT(i) (meta_main_tab[(i)].u.next_free >> 2)
-#define SET_NEXT_FREE_SLOT(i,next) (meta_main_tab[(i)].u.next_free = ((next)<<2)|1)
-#define MARK_SLOT_DEAD(i) (meta_main_tab[(i)].u.next_free |= 2)
-#define GET_ANY_SLOT_TAB(i) ((DbTable*)(meta_main_tab[(i)].u.next_free & ~(1|2))) /* dead or alive */
-
-static ERTS_INLINE erts_smp_rwmtx_t *
-get_meta_main_tab_lock(unsigned slot)
-{
-#ifdef ERTS_SMP
- return &meta_main_tab_locks[slot & ERTS_META_MAIN_TAB_LOCK_TAB_MASK].rwmtx;
+#define ERTS_RBT_PREFIX fixing_procs
+#define ERTS_RBT_T DbFixation
+#define ERTS_RBT_KEY_T Process*
+#define ERTS_RBT_FLAGS_T int
+#define ERTS_RBT_INIT_EMPTY_TNODE(T) \
+ do { \
+ (T)->procs.parent = NULL; \
+ (T)->procs.right = NULL; \
+ (T)->procs.left = NULL; \
+ } while (0)
+#define ERTS_RBT_IS_RED(T) ((T)->procs.is_red)
+#define ERTS_RBT_SET_RED(T) ((T)->procs.is_red = 1)
+#define ERTS_RBT_IS_BLACK(T) (!(T)->procs.is_red)
+#define ERTS_RBT_SET_BLACK(T) ((T)->procs.is_red = 0)
+#define ERTS_RBT_GET_FLAGS(T) ((T)->procs.is_red)
+#define ERTS_RBT_SET_FLAGS(T, F) ((T)->procs.is_red = (F))
+#define ERTS_RBT_GET_PARENT(T) ((T)->procs.parent)
+#define ERTS_RBT_SET_PARENT(T, P) ((T)->procs.parent = (P))
+#define ERTS_RBT_GET_RIGHT(T) ((T)->procs.right)
+#define ERTS_RBT_SET_RIGHT(T, R) ((T)->procs.right = (R))
+#define ERTS_RBT_GET_LEFT(T) ((T)->procs.left)
+#define ERTS_RBT_SET_LEFT(T, L) ((T)->procs.left = (L))
+#define ERTS_RBT_GET_KEY(T) ((T)->procs.p)
+#define ERTS_RBT_IS_LT(KX, KY) ((KX) < (KY))
+#define ERTS_RBT_IS_EQ(KX, KY) ((KX) == (KY))
+
+#define ERTS_RBT_WANT_INSERT
+#define ERTS_RBT_WANT_LOOKUP
+#define ERTS_RBT_WANT_DELETE
+#define ERTS_RBT_WANT_FOREACH
+#define ERTS_RBT_WANT_FOREACH_DESTROY
+#ifdef DEBUG
+# define ERTS_RBT_WANT_LOOKUP
+#endif
+#define ERTS_RBT_UNDEF
+
+#include "erl_rbtree.h"
+
+#ifdef HARDDEBUG
+# error Do something useful with CHECK_TABLES maybe
#else
- return NULL;
+# define CHECK_TABLES()
#endif
+
+
+static void
+send_ets_transfer_message(Process *c_p, Process *proc,
+ ErtsProcLocks *locks,
+ DbTable *tb, Eterm heir_data);
+static void schedule_free_dbtable(DbTable* tb);
+static void delete_sched_table(Process *c_p, DbTable *tb);
+
+static void table_dec_refc(DbTable *tb, erts_aint_t min_val)
+{
+ if (erts_refc_dectest(&tb->common.refc, min_val) == 0)
+ schedule_free_dbtable(tb);
+}
+
+static int
+db_table_tid_destructor(Binary *unused)
+{
+ return 1;
+}
+
+static ERTS_INLINE void
+make_btid(DbTable *tb)
+{
+ Binary *btid = erts_create_magic_indirection(db_table_tid_destructor);
+ erts_atomic_t *tbref = erts_binary_to_magic_indirection(btid);
+ erts_atomic_init_nob(tbref, (erts_aint_t) tb);
+ tb->common.btid = btid;
+ /*
+ * Table and magic indirection refer eachother,
+ * and table is refered once by being alive...
+ */
+ erts_refc_init(&tb->common.refc, 2);
+ erts_refc_inc(&btid->intern.refc, 1);
+}
+
+static ERTS_INLINE DbTable* btid2tab(Binary* btid)
+{
+ erts_atomic_t *tbref = erts_binary_to_magic_indirection(btid);
+ return (DbTable *) erts_atomic_read_nob(tbref);
+}
+
+static DbTable *
+tid2tab(Eterm tid)
+{
+ DbTable *tb;
+ Binary *btid;
+ erts_atomic_t *tbref;
+ if (!is_internal_magic_ref(tid))
+ return NULL;
+
+ btid = erts_magic_ref2bin(tid);
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(btid) != db_table_tid_destructor)
+ return NULL;
+
+ tbref = erts_binary_to_magic_indirection(btid);
+ tb = (DbTable *) erts_atomic_read_nob(tbref);
+
+ ASSERT(!tb || tb->common.btid == btid);
+
+ return tb;
+}
+
+static ERTS_INLINE int
+is_table_alive(DbTable *tb)
+{
+ erts_atomic_t *tbref;
+ DbTable *rtb;
+
+ tbref = erts_binary_to_magic_indirection(tb->common.btid);
+ rtb = (DbTable *) erts_atomic_read_nob(tbref);
+
+ ASSERT(!rtb || rtb == tb);
+
+ return !!rtb;
+}
+
+static ERTS_INLINE int
+is_table_named(DbTable *tb)
+{
+ return tb->common.type & DB_NAMED_TABLE;
+}
+
+
+static ERTS_INLINE void
+tid_clear(Process *c_p, DbTable *tb)
+{
+ DbTable *rtb;
+ Binary *btid = tb->common.btid;
+ erts_atomic_t *tbref = erts_binary_to_magic_indirection(btid);
+ rtb = (DbTable *) erts_atomic_xchg_nob(tbref, (erts_aint_t) NULL);
+ ASSERT(!rtb || tb == rtb);
+ if (rtb) {
+ table_dec_refc(tb, 1);
+ delete_sched_table(c_p, tb);
+ }
+}
+
+static ERTS_INLINE Eterm
+make_tid(Process *c_p, DbTable *tb)
+{
+ Eterm *hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
+ return erts_mk_magic_ref(&hp, &c_p->off_heap, tb->common.btid);
}
-static erts_smp_spinlock_t meta_main_tab_main_lock;
-static Uint meta_main_tab_first_free; /* Index of first free slot */
-static int meta_main_tab_cnt; /* Number of active tables */
-static int meta_main_tab_top; /* Highest ever used slot + 1 */
-static Uint meta_main_tab_slot_mask; /* The slot index part of an unnamed table id */
-static Uint meta_main_tab_seq_incr;
-static Uint meta_main_tab_seq_cnt = 0; /* To give unique(-ish) table identifiers */
/*
** The meta hash table of all NAMED ets tables
*/
-#ifdef ERTS_SMP
# define META_NAME_TAB_LOCK_CNT 16
union {
- erts_smp_rwmtx_t lck;
+ erts_rwmtx_t lck;
byte _cache_line_alignment[64];
}meta_name_tab_rwlocks[META_NAME_TAB_LOCK_CNT];
-#endif
static struct meta_name_tab_entry {
union {
Eterm name_atom;
@@ -155,13 +307,11 @@ static unsigned meta_name_tab_mask;
static ERTS_INLINE
struct meta_name_tab_entry* meta_name_tab_bucket(Eterm name,
- erts_smp_rwmtx_t** lockp)
+ erts_rwmtx_t** lockp)
{
unsigned bix = atom_val(name) & meta_name_tab_mask;
struct meta_name_tab_entry* bucket = &meta_name_tab[bix];
-#ifdef ERTS_SMP
*lockp = &meta_name_tab_rwlocks[bix % META_NAME_TAB_LOCK_CNT].lck;
-#endif
return bucket;
}
@@ -180,8 +330,6 @@ int user_requested_db_max_tabs;
int erts_ets_realloc_always_moves;
int erts_ets_always_compress;
static int db_max_tabs;
-static DbTable *meta_pid_to_tab; /* Pid mapped to owned tables */
-static DbTable *meta_pid_to_fixed_tab; /* Pid mapped to fixed tables */
static Eterm ms_delete_all;
static Eterm ms_delete_all_buff[8]; /* To compare with for deletion
of all objects */
@@ -194,15 +342,13 @@ static void fix_table_locked(Process* p, DbTable* tb);
static void unfix_table_locked(Process* p, DbTable* tb, db_lock_kind_t* kind);
static void set_heir(Process* me, DbTable* tb, Eterm heir, UWord heir_data);
static void free_heir_data(DbTable*);
-static void free_fixations_locked(DbTable *tb);
+static SWord free_fixations_locked(Process* p, DbTable *tb);
-static int free_table_cont(Process *p,
- DbTable *tb,
- int first,
- int clean_meta_tab);
-static void print_table(int to, void *to_arg, int show, DbTable* tb);
+static SWord free_table_continue(Process *p, DbTable *tb, SWord reds);
+static void print_table(fmtfn_t to, void *to_arg, int show, DbTable* tb);
static BIF_RETTYPE ets_select_delete_1(BIF_ALIST_1);
static BIF_RETTYPE ets_select_count_1(BIF_ALIST_1);
+static BIF_RETTYPE ets_select_replace_1(BIF_ALIST_1);
static BIF_RETTYPE ets_select_trap_1(BIF_ALIST_1);
static BIF_RETTYPE ets_delete_trap(BIF_ALIST_1);
static Eterm table_info(Process* p, DbTable* tb, Eterm What);
@@ -217,6 +363,7 @@ static BIF_RETTYPE ets_select3(Process* p, Eterm arg1, Eterm arg2, Eterm arg3);
*/
Export ets_select_delete_continue_exp;
Export ets_select_count_continue_exp;
+Export ets_select_replace_continue_exp;
Export ets_select_continue_exp;
/*
@@ -229,28 +376,19 @@ free_dbtable(void *vtb)
{
DbTable *tb = (DbTable *) vtb;
#ifdef HARDDEBUG
- if (erts_smp_atomic_read_nob(&tb->common.memory_size) != sizeof(DbTable)) {
+ if (erts_atomic_read_nob(&tb->common.memory_size) != sizeof(DbTable)) {
erts_fprintf(stderr, "ets: free_dbtable memory remain=%ld fix=%x\n",
- erts_smp_atomic_read_nob(&tb->common.memory_size)-sizeof(DbTable),
+ erts_atomic_read_nob(&tb->common.memory_size)-sizeof(DbTable),
tb->common.fixations);
}
- erts_fprintf(stderr, "ets: free_dbtable(%T) deleted!!!\r\n",
- tb->common.id);
-
- erts_fprintf(stderr, "ets: free_dbtable: meta_pid_to_tab common.memory_size = %ld\n",
- erts_smp_atomic_read_nob(&meta_pid_to_tab->common.memory_size));
- print_table(ERTS_PRINT_STDOUT, NULL, 1, meta_pid_to_tab);
-
-
- erts_fprintf(stderr, "ets: free_dbtable: meta_pid_to_fixed_tab common.memory_size = %ld\n",
- erts_smp_atomic_read_nob(&meta_pid_to_fixed_tab->common.memory_size));
- print_table(ERTS_PRINT_STDOUT, NULL, 1, meta_pid_to_fixed_tab);
-#endif
-#ifdef ERTS_SMP
- erts_smp_rwmtx_destroy(&tb->common.rwlock);
- erts_smp_mtx_destroy(&tb->common.fixlock);
#endif
+ erts_rwmtx_destroy(&tb->common.rwlock);
+ erts_mtx_destroy(&tb->common.fixlock);
ASSERT(is_immed(tb->common.heir_data));
+
+ if (tb->common.btid)
+ erts_bin_release(tb->common.btid);
+
erts_db_free(ERTS_ALC_T_DB_TABLE, tb, (void *) tb, sizeof(DbTable));
}
@@ -259,44 +397,190 @@ static void schedule_free_dbtable(DbTable* tb)
/*
* NON-SMP case: Caller is *not* allowed to access the *tb
* structure after this function has returned!
- * SMP case: Caller is allowed to access the *tb structure
- * until the bif has returned (we typically
- * need to unlock the table lock after this
- * function has returned).
+ * SMP case: Caller is allowed to access the *common* part of the *tb
+ * structure until the bif has returned (we typically need to
+ * unlock the table lock after this function has returned).
+ * Caller is *not* allowed to access the specialized part
+ * (hash or tree) of *tb after this function has returned.
*/
- ASSERT(erts_refc_read(&tb->common.ref, 0) == 0);
+ ASSERT(erts_refc_read(&tb->common.refc, 0) == 0);
+ ASSERT(erts_refc_read(&tb->common.fix_count, 0) == 0);
erts_schedule_thr_prgr_later_cleanup_op(free_dbtable,
(void *) tb,
&tb->release.data,
sizeof(DbTable));
}
-static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock,
- char *rwname, char* fixname)
+static ERTS_INLINE void
+save_sched_table(Process *c_p, DbTable *tb)
{
-#ifdef ERTS_SMP
- erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
- if (use_frequent_read_lock)
- rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
+ DbTable *first;
+
+ ASSERT(esdp);
+ esdp->ets_tables.count++;
+ erts_refc_inc(&tb->common.refc, 1);
+
+ first = esdp->ets_tables.clist;
+ if (!first) {
+ tb->common.all.next = tb->common.all.prev = tb;
+ esdp->ets_tables.clist = tb;
+ }
+ else {
+ tb->common.all.prev = first->common.all.prev;
+ tb->common.all.next = first;
+ tb->common.all.prev->common.all.next = tb;
+ first->common.all.prev = tb;
+ }
+}
+
+static ERTS_INLINE void
+remove_sched_table(ErtsSchedulerData *esdp, DbTable *tb)
+{
+ ErtsEtsAllYieldData *eaydp;
+ ASSERT(esdp);
+ ASSERT(erts_get_ref_numbers_thr_id(ERTS_MAGIC_BIN_REFN(tb->common.btid))
+ == (Uint32) esdp->no);
+
+ ASSERT(esdp->ets_tables.count > 0);
+ esdp->ets_tables.count--;
+
+ eaydp = ERTS_SCHED_AUX_YIELD_DATA(esdp, ets_all);
+ if (eaydp->ongoing) {
+ /* ets:all() op process list from last to first... */
+ if (eaydp->tab == tb) {
+ if (eaydp->tab == esdp->ets_tables.clist)
+ eaydp->tab = NULL;
+ else
+ eaydp->tab = tb->common.all.prev;
+ }
+ }
+
+ if (tb->common.all.next == tb) {
+ ASSERT(tb->common.all.prev == tb);
+ ASSERT(esdp->ets_tables.clist == tb);
+ esdp->ets_tables.clist = NULL;
+ }
+ else {
+#ifdef DEBUG
+ DbTable *tmp = esdp->ets_tables.clist;
+ do {
+ if (tmp == tb) break;
+ tmp = tmp->common.all.next;
+ } while (tmp != esdp->ets_tables.clist);
+ ASSERT(tmp == tb);
#endif
-#ifdef ERTS_SMP
- erts_smp_rwmtx_init_opt_x(&tb->common.rwlock, &rwmtx_opt,
- rwname, tb->common.the_name);
- erts_smp_mtx_init_x(&tb->common.fixlock, fixname, tb->common.the_name);
- tb->common.is_thread_safe = !(tb->common.status & DB_FINE_LOCKED);
+ tb->common.all.prev->common.all.next = tb->common.all.next;
+ tb->common.all.next->common.all.prev = tb->common.all.prev;
+
+ if (esdp->ets_tables.clist == tb)
+ esdp->ets_tables.clist = tb->common.all.next;
+
+ }
+
+ table_dec_refc(tb, 0);
+}
+
+static void
+scheduled_remove_sched_table(void *vtb)
+{
+ remove_sched_table(erts_get_scheduler_data(), (DbTable *) vtb);
+}
+
+static void
+delete_sched_table(Process *c_p, DbTable *tb)
+{
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
+ Uint32 sid;
+
+ ASSERT(esdp);
+
+ ASSERT(tb->common.btid);
+ sid = erts_get_ref_numbers_thr_id(ERTS_MAGIC_BIN_REFN(tb->common.btid));
+ ASSERT(1 <= sid && sid <= erts_no_schedulers);
+ if (sid == (Uint32) esdp->no)
+ remove_sched_table(esdp, tb);
+ else
+ erts_schedule_misc_aux_work((int) sid, scheduled_remove_sched_table, tb);
+}
+
+static ERTS_INLINE void
+save_owned_table(Process *c_p, DbTable *tb)
+{
+ DbTable *first;
+
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+
+ first = (DbTable*) erts_psd_get(c_p, ERTS_PSD_ETS_OWNED_TABLES);
+
+ erts_refc_inc(&tb->common.refc, 1);
+
+ if (!first) {
+ tb->common.owned.next = tb->common.owned.prev = tb;
+ erts_psd_set(c_p, ERTS_PSD_ETS_OWNED_TABLES, tb);
+ }
+ else {
+ tb->common.owned.prev = first->common.owned.prev;
+ tb->common.owned.next = first;
+ tb->common.owned.prev->common.owned.next = tb;
+ first->common.owned.prev = tb;
+ }
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+}
+
+static ERTS_INLINE void
+delete_owned_table(Process *p, DbTable *tb)
+{
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ if (tb->common.owned.next == tb) {
+ DbTable* old;
+ ASSERT(tb->common.owned.prev == tb);
+ old = erts_psd_set(p, ERTS_PSD_ETS_OWNED_TABLES, NULL);
+ ASSERT(old == tb); (void)old;
+ }
+ else {
+ DbTable *first = (DbTable*) erts_psd_get(p, ERTS_PSD_ETS_OWNED_TABLES);
+#ifdef DEBUG
+ DbTable *tmp = first;
+ do {
+ if (tmp == tb) break;
+ tmp = tmp->common.owned.next;
+ } while (tmp != first);
+ ASSERT(tmp == tb);
#endif
+ tb->common.owned.prev->common.owned.next = tb->common.owned.next;
+ tb->common.owned.next->common.owned.prev = tb->common.owned.prev;
+
+ if (tb == first)
+ erts_psd_set(p, ERTS_PSD_ETS_OWNED_TABLES, tb->common.owned.next);
+ }
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+
+ table_dec_refc(tb, 1);
+}
+
+static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock)
+{
+ erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
+ if (use_frequent_read_lock)
+ rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
+ if (erts_ets_rwmtx_spin_count >= 0)
+ rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count;
+ erts_rwmtx_init_opt(&tb->common.rwlock, &rwmtx_opt, "db_tab",
+ tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
+ erts_mtx_init(&tb->common.fixlock, "db_tab_fix",
+ tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
+ tb->common.is_thread_safe = !(tb->common.status & DB_FINE_LOCKED);
}
static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind)
{
-#ifdef ERTS_SMP
- ASSERT(tb != meta_pid_to_tab && tb != meta_pid_to_fixed_tab);
if (tb->common.type & DB_FINE_LOCKED) {
if (kind == LCK_WRITE) {
- erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ erts_rwmtx_rwlock(&tb->common.rwlock);
tb->common.is_thread_safe = 1;
} else {
- erts_smp_rwmtx_rlock(&tb->common.rwlock);
+ erts_rwmtx_rlock(&tb->common.rwlock);
ASSERT(!tb->common.is_thread_safe);
}
}
@@ -305,14 +589,13 @@ static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind)
switch (kind) {
case LCK_WRITE:
case LCK_WRITE_REC:
- erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ erts_rwmtx_rwlock(&tb->common.rwlock);
break;
default:
- erts_smp_rwmtx_rlock(&tb->common.rwlock);
+ erts_rwmtx_rlock(&tb->common.rwlock);
}
ASSERT(tb->common.is_thread_safe);
}
-#endif
}
static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
@@ -322,18 +605,15 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
* DbTable structure. That is, ONLY the SMP case is allowed
* to follow the tb pointer!
*/
-#ifdef ERTS_SMP
- ASSERT(tb != meta_pid_to_tab && tb != meta_pid_to_fixed_tab);
-
if (tb->common.type & DB_FINE_LOCKED) {
if (kind == LCK_WRITE) {
ASSERT(tb->common.is_thread_safe);
tb->common.is_thread_safe = 0;
- erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
+ erts_rwmtx_rwunlock(&tb->common.rwlock);
}
else {
ASSERT(!tb->common.is_thread_safe);
- erts_smp_rwmtx_runlock(&tb->common.rwlock);
+ erts_rwmtx_runlock(&tb->common.rwlock);
}
}
else {
@@ -341,27 +621,12 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
switch (kind) {
case LCK_WRITE:
case LCK_WRITE_REC:
- erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
+ erts_rwmtx_rwunlock(&tb->common.rwlock);
break;
default:
- erts_smp_rwmtx_runlock(&tb->common.rwlock);
+ erts_rwmtx_runlock(&tb->common.rwlock);
}
}
-#endif
-}
-
-
-static ERTS_INLINE void db_meta_lock(DbTable* tb, db_lock_kind_t kind)
-{
- ASSERT(tb == meta_pid_to_tab || tb == meta_pid_to_fixed_tab);
- ASSERT(kind != LCK_WRITE);
- /* As long as we only lock for READ we don't have to lock at all. */
-}
-
-static ERTS_INLINE void db_meta_unlock(DbTable* tb, db_lock_kind_t kind)
-{
- ASSERT(tb == meta_pid_to_tab || tb == meta_pid_to_fixed_tab);
- ASSERT(kind != LCK_WRITE);
}
static ERTS_INLINE
@@ -371,8 +636,8 @@ DbTable* db_get_table_aux(Process *p,
db_lock_kind_t kind,
int meta_already_locked)
{
- DbTable *tb = NULL;
- erts_smp_rwmtx_t *mtl = NULL;
+ DbTable *tb;
+ erts_rwmtx_t *mtl = NULL;
/*
* IMPORTANT: Only scheduler threads are allowed
@@ -381,32 +646,16 @@ DbTable* db_get_table_aux(Process *p,
*/
ASSERT(erts_get_scheduler_data());
- if (is_small(id)) {
- Uint slot = unsigned_val(id) & meta_main_tab_slot_mask;
- if (!meta_already_locked) {
- mtl = get_meta_main_tab_lock(slot);
- erts_smp_rwmtx_rlock(mtl);
- }
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
- else {
- erts_smp_rwmtx_t *test_mtl = get_meta_main_tab_lock(slot);
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(test_mtl)
- || erts_lc_rwmtx_is_rwlocked(test_mtl));
- }
-#endif
- if (slot < db_max_tabs && IS_SLOT_ALIVE(slot))
- tb = meta_main_tab[slot].u.tb;
- }
- else if (is_atom(id)) {
+ if (is_atom(id)) {
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(id,&mtl);
if (!meta_already_locked)
- erts_smp_rwmtx_rlock(mtl);
+ erts_rwmtx_rlock(mtl);
else{
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(mtl)
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(mtl)
|| erts_lc_rwmtx_is_rwlocked(mtl));
mtl = NULL;
}
-
+ tb = NULL;
if (bucket->pu.tb != NULL) {
if (is_atom(bucket->u.name_atom)) { /* single */
if (bucket->u.name_atom == id)
@@ -424,17 +673,19 @@ DbTable* db_get_table_aux(Process *p,
}
}
}
+ else
+ tb = tid2tab(id);
+
if (tb) {
db_lock(tb, kind);
- if (tb->common.id != id
- || ((tb->common.status & what) == 0
- && p->common.id != tb->common.owner)) {
+ if ((tb->common.status & what) == 0
+ && p->common.id != tb->common.owner) {
db_unlock(tb, kind);
tb = NULL;
}
}
if (mtl)
- erts_smp_rwmtx_runlock(mtl);
+ erts_rwmtx_runlock(mtl);
return tb;
}
@@ -447,27 +698,15 @@ DbTable* db_get_table(Process *p,
return db_get_table_aux(p, id, what, kind, 0);
}
-/* Requires meta_main_tab_locks[slot] locked.
-*/
-static ERTS_INLINE void free_slot(int slot)
-{
- ASSERT(!IS_SLOT_FREE(slot));
- erts_smp_spin_lock(&meta_main_tab_main_lock);
- SET_NEXT_FREE_SLOT(slot,meta_main_tab_first_free);
- meta_main_tab_first_free = slot;
- meta_main_tab_cnt--;
- erts_smp_spin_unlock(&meta_main_tab_main_lock);
-}
-
static int insert_named_tab(Eterm name_atom, DbTable* tb, int have_lock)
{
int ret = 0;
- erts_smp_rwmtx_t* rwlock;
+ erts_rwmtx_t* rwlock;
struct meta_name_tab_entry* new_entry;
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom,
&rwlock);
if (!have_lock)
- erts_smp_rwmtx_rwlock(rwlock);
+ erts_rwmtx_rwlock(rwlock);
if (bucket->pu.tb == NULL) { /* empty */
new_entry = bucket;
@@ -515,26 +754,25 @@ static int insert_named_tab(Eterm name_atom, DbTable* tb, int have_lock)
done:
if (!have_lock)
- erts_smp_rwmtx_rwunlock(rwlock);
+ erts_rwmtx_rwunlock(rwlock);
return ret;
}
static int remove_named_tab(DbTable *tb, int have_lock)
{
int ret = 0;
- erts_smp_rwmtx_t* rwlock;
- Eterm name_atom = tb->common.id;
+ erts_rwmtx_t* rwlock;
+ Eterm name_atom = tb->common.the_name;
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom,
&rwlock);
-#ifdef ERTS_SMP
- if (!have_lock && erts_smp_rwmtx_tryrwlock(rwlock) == EBUSY) {
+ ASSERT(is_table_named(tb));
+ if (!have_lock && erts_rwmtx_tryrwlock(rwlock) == EBUSY) {
db_unlock(tb, LCK_WRITE);
- erts_smp_rwmtx_rwlock(rwlock);
+ erts_rwmtx_rwlock(rwlock);
db_lock(tb, LCK_WRITE);
}
-#endif
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(rwlock));
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(rwlock));
if (bucket->pu.tb == NULL) {
goto done;
@@ -587,7 +825,7 @@ static int remove_named_tab(DbTable *tb, int have_lock)
done:
if (!have_lock)
- erts_smp_rwmtx_rwunlock(rwlock);
+ erts_rwmtx_rwunlock(rwlock);
return ret;
}
@@ -596,11 +834,11 @@ done:
*/
static ERTS_INLINE void local_fix_table(DbTable* tb)
{
- erts_refc_inc(&tb->common.ref, 1);
+ erts_refc_inc(&tb->common.fix_count, 1);
}
static ERTS_INLINE void local_unfix_table(DbTable* tb)
{
- if (erts_refc_dectest(&tb->common.ref, 0) == 0) {
+ if (erts_refc_dectest(&tb->common.fix_count, 0) == 0) {
ASSERT(IS_HASH_TABLE(tb->common.status));
db_unfix_table_hash(&(tb->hash));
}
@@ -619,7 +857,7 @@ BIF_RETTYPE ets_safe_fixtable_2(BIF_ALIST_2)
erts_fprintf(stderr,
"ets:safe_fixtable(%T,%T); Process: %T, initial: %T:%T/%bpu\n",
BIF_ARG_1, BIF_ARG_2, BIF_P->common.id,
- BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
+ BIF_P->u.initial[0], BIF_P->u.initial[1], BIF_P->u.initial[2]);
#endif
kind = (BIF_ARG_2 == am_true) ? LCK_READ : LCK_WRITE_REC;
@@ -752,6 +990,31 @@ BIF_RETTYPE ets_prev_2(BIF_ALIST_2)
BIF_RET(ret);
}
+/*
+** take(Tab, Key)
+*/
+BIF_RETTYPE ets_take_2(BIF_ALIST_2)
+{
+ DbTable* tb;
+#ifdef DEBUG
+ int cret;
+#endif
+ Eterm ret;
+ CHECK_TABLES();
+
+ tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC);
+ if (!tb) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+#ifdef DEBUG
+ cret =
+#endif
+ tb->common.meth->db_take(BIF_P, tb, BIF_ARG_2, &ret);
+ ASSERT(cret == DB_ERROR_NONE);
+ db_unlock(tb, LCK_WRITE_REC);
+ BIF_RET(ret);
+}
+
/*
** update_element(Tab, Key, {Pos, Value})
** update_element(Tab, Key, [{Pos, Value}])
@@ -779,7 +1042,7 @@ BIF_RETTYPE ets_update_element_3(BIF_ALIST_3)
list = BIF_ARG_3;
}
- if (!tb->common.meth->db_lookup_dbterm(tb, BIF_ARG_2, &handle)) {
+ if (!tb->common.meth->db_lookup_dbterm(BIF_P, tb, BIF_ARG_2, THE_NON_VALUE, &handle)) {
cret = DB_ERROR_BADKEY;
goto bail_out;
}
@@ -818,7 +1081,7 @@ BIF_RETTYPE ets_update_element_3(BIF_ALIST_3)
}
finalize:
- tb->common.meth->db_finalize_dbterm(&handle);
+ tb->common.meth->db_finalize_dbterm(cret, &handle);
bail_out:
UnUseTmpHeap(2,BIF_P);
@@ -837,14 +1100,8 @@ bail_out:
}
}
-/*
-** update_counter(Tab, Key, Incr)
-** update_counter(Tab, Key, {Upop})
-** update_counter(Tab, Key, [{Upop}])
-** Upop = {Pos,Incr} | {Pos,Incr,Threshold,WarpTo}
-** Returns new value(s) (integer or [integer])
-*/
-BIF_RETTYPE ets_update_counter_3(BIF_ALIST_3)
+static BIF_RETTYPE
+do_update_counter(Process *p, Eterm arg1, Eterm arg2, Eterm arg3, Eterm arg4)
{
DbTable* tb;
int cret = DB_ERROR_BADITEM;
@@ -854,7 +1111,7 @@ BIF_RETTYPE ets_update_counter_3(BIF_ALIST_3)
Eterm* ret_list_currp = NULL;
Eterm* ret_list_prevp = NULL;
Eterm iter;
- DeclareTmpHeap(cell,5,BIF_P);
+ DeclareTmpHeap(cell, 5, p);
Eterm *tuple = cell+2;
DbUpdateHandle handle;
Uint halloc_size = 0; /* overestimated heap usage */
@@ -862,28 +1119,29 @@ BIF_RETTYPE ets_update_counter_3(BIF_ALIST_3)
Eterm* hstart;
Eterm* hend;
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
+ if ((tb = db_get_table(p, arg1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
+ BIF_ERROR(p, BADARG);
}
- UseTmpHeap(5,BIF_P);
+ UseTmpHeap(5, p);
if (!(tb->common.status & (DB_SET | DB_ORDERED_SET))) {
goto bail_out;
}
- if (is_integer(BIF_ARG_3)) { /* Incr */
- upop_list = CONS(cell, TUPLE2(tuple, make_small(tb->common.keypos+1),
- BIF_ARG_3), NIL);
+ if (is_integer(arg3)) { /* Incr */
+ upop_list = CONS(cell,
+ TUPLE2(tuple, make_small(tb->common.keypos+1), arg3),
+ NIL);
}
- else if (is_tuple(BIF_ARG_3)) { /* {Upop} */
- upop_list = CONS(cell, BIF_ARG_3, NIL);
+ else if (is_tuple(arg3)) { /* {Upop} */
+ upop_list = CONS(cell, arg3, NIL);
}
else { /* [{Upop}] (probably) */
- upop_list = BIF_ARG_3;
+ upop_list = arg3;
ret_list_prevp = &ret;
}
- if (!tb->common.meth->db_lookup_dbterm(tb, BIF_ARG_2, &handle)) {
+ if (!tb->common.meth->db_lookup_dbterm(p, tb, arg2, arg4, &handle)) {
goto bail_out; /* key not found */
}
@@ -956,13 +1214,13 @@ BIF_RETTYPE ets_update_counter_3(BIF_ALIST_3)
if (ret_list_prevp) { /* Prepare to return a list */
ret = NIL;
halloc_size += list_size;
- hstart = HAlloc(BIF_P, halloc_size);
+ hstart = HAlloc(p, halloc_size);
ret_list_currp = hstart;
htop = hstart + list_size;
hend = hstart + halloc_size;
}
else {
- hstart = htop = HAlloc(BIF_P, halloc_size);
+ hstart = htop = HAlloc(p, halloc_size);
}
hend = hstart + halloc_size;
@@ -1009,26 +1267,54 @@ BIF_RETTYPE ets_update_counter_3(BIF_ALIST_3)
(is_list(ret) && (list_val(ret)+list_size)==ret_list_currp));
ASSERT(htop <= hend);
- HRelease(BIF_P,hend,htop);
+ HRelease(p, hend, htop);
finalize:
- tb->common.meth->db_finalize_dbterm(&handle);
+ tb->common.meth->db_finalize_dbterm(cret, &handle);
bail_out:
- UnUseTmpHeap(5,BIF_P);
+ UnUseTmpHeap(5, p);
db_unlock(tb, LCK_WRITE_REC);
switch (cret) {
case DB_ERROR_NONE:
BIF_RET(ret);
case DB_ERROR_SYSRES:
- BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+ BIF_ERROR(p, SYSTEM_LIMIT);
default:
- BIF_ERROR(BIF_P, BADARG);
+ BIF_ERROR(p, BADARG);
break;
}
}
+/*
+** update_counter(Tab, Key, Incr)
+** update_counter(Tab, Key, Upop)
+** update_counter(Tab, Key, [{Upop}])
+** Upop = {Pos,Incr} | {Pos,Incr,Threshold,WarpTo}
+** Returns new value(s) (integer or [integer])
+*/
+BIF_RETTYPE ets_update_counter_3(BIF_ALIST_3)
+{
+ return do_update_counter(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, THE_NON_VALUE);
+}
+
+/*
+** update_counter(Tab, Key, Incr, Default)
+** update_counter(Tab, Key, Upop, Default)
+** update_counter(Tab, Key, [{Upop}], Default)
+** Upop = {Pos,Incr} | {Pos,Incr,Threshold,WarpTo}
+** Returns new value(s) (integer or [integer])
+*/
+BIF_RETTYPE ets_update_counter_4(BIF_ALIST_4)
+{
+ if (is_not_tuple(BIF_ARG_4)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ return do_update_counter(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, BIF_ARG_4);
+}
+
+
/*
** The put BIF
*/
@@ -1042,7 +1328,7 @@ BIF_RETTYPE ets_insert_2(BIF_ALIST_2)
CHECK_TABLES();
- /* Write lock table if more than one object to keep atomicy */
+ /* Write lock table if more than one object to keep atomicity */
kind = ((is_list(BIF_ARG_2) && CDR(list_val(BIF_ARG_2)) != NIL)
? LCK_WRITE : LCK_WRITE_REC);
@@ -1112,7 +1398,7 @@ BIF_RETTYPE ets_insert_new_2(BIF_ALIST_2)
Eterm lookup_ret;
DbTableMethod* meth;
- /* More than one object, use LCK_WRITE to keep atomicy */
+ /* More than one object, use LCK_WRITE to keep atomicity */
kind = LCK_WRITE;
tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, kind);
if (tb == NULL) {
@@ -1192,13 +1478,14 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
{
DbTable* tb;
Eterm ret;
- erts_smp_rwmtx_t *lck1, *lck2;
+ Eterm old_name;
+ erts_rwmtx_t *lck1, *lck2;
#ifdef HARDDEBUG
erts_fprintf(stderr,
"ets:rename(%T,%T); Process: %T, initial: %T:%T/%bpu\n",
BIF_ARG_1, BIF_ARG_2, BIF_P->common.id,
- BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
+ BIF_P->u.initial[0], BIF_P->u.initial[1], BIF_P->u.initial[2]);
#endif
@@ -1208,58 +1495,63 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
(void) meta_name_tab_bucket(BIF_ARG_2, &lck1);
- if (is_small(BIF_ARG_1)) {
- Uint slot = unsigned_val(BIF_ARG_1) & meta_main_tab_slot_mask;
- lck2 = get_meta_main_tab_lock(slot);
- }
- else if (is_atom(BIF_ARG_1)) {
- (void) meta_name_tab_bucket(BIF_ARG_1, &lck2);
+ if (is_atom(BIF_ARG_1)) {
+ old_name = BIF_ARG_1;
+ named_tab:
+ (void) meta_name_tab_bucket(old_name, &lck2);
if (lck1 == lck2)
lck2 = NULL;
else if (lck1 > lck2) {
- erts_smp_rwmtx_t *tmp = lck1;
+ erts_rwmtx_t *tmp = lck1;
lck1 = lck2;
lck2 = tmp;
}
}
else {
- BIF_ERROR(BIF_P, BADARG);
+ tb = tid2tab(BIF_ARG_1);
+ if (!tb)
+ BIF_ERROR(BIF_P, BADARG);
+ else {
+ if (is_table_named(tb)) {
+ old_name = tb->common.the_name;
+ goto named_tab;
+ }
+ lck2 = NULL;
+ }
}
- erts_smp_rwmtx_rwlock(lck1);
+ erts_rwmtx_rwlock(lck1);
if (lck2)
- erts_smp_rwmtx_rwlock(lck2);
+ erts_rwmtx_rwlock(lck2);
tb = db_get_table_aux(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE, 1);
if (!tb)
goto badarg;
- if (is_not_atom(tb->common.id)) { /* Not a named table */
- tb->common.the_name = BIF_ARG_2;
- goto done;
- }
-
- if (!insert_named_tab(BIF_ARG_2, tb, 1))
- goto badarg;
-
- if (!remove_named_tab(tb, 1))
- erl_exit(1,"Could not find named tab %s", tb->common.id);
+ if (is_table_named(tb)) {
+ if (!insert_named_tab(BIF_ARG_2, tb, 1))
+ goto badarg;
- tb->common.id = tb->common.the_name = BIF_ARG_2;
+ if (!remove_named_tab(tb, 1))
+ erts_exit(ERTS_ERROR_EXIT,"Could not find named tab %s", tb->common.the_name);
+ ret = BIF_ARG_2;
+ }
+ else { /* Not a named table */
+ ret = BIF_ARG_1;
+ }
+ tb->common.the_name = BIF_ARG_2;
- done:
- ret = tb->common.id;
db_unlock(tb, LCK_WRITE);
- erts_smp_rwmtx_rwunlock(lck1);
+ erts_rwmtx_rwunlock(lck1);
if (lck2)
- erts_smp_rwmtx_rwunlock(lck2);
+ erts_rwmtx_rwunlock(lck2);
BIF_RET(ret);
badarg:
if (tb)
db_unlock(tb, LCK_WRITE);
- erts_smp_rwmtx_rwunlock(lck1);
+ erts_rwmtx_rwunlock(lck1);
if (lck2)
- erts_smp_rwmtx_rwunlock(lck2);
+ erts_rwmtx_rwunlock(lck2);
BIF_ERROR(BIF_P, BADARG);
}
@@ -1272,7 +1564,6 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
BIF_RETTYPE ets_new_2(BIF_ALIST_2)
{
DbTable* tb = NULL;
- int slot;
Eterm list;
Eterm val;
Eterm ret;
@@ -1281,15 +1572,11 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
Uint32 status;
Sint keypos;
int is_named, is_compressed;
-#ifdef ERTS_SMP
int is_fine_locked, frequent_read;
-#endif
#ifdef DEBUG
int cret;
#endif
- DeclareTmpHeap(meta_tuple,3,BIF_P);
DbTableMethod* meth;
- erts_smp_rwmtx_t *mmtl;
if (is_not_atom(BIF_ARG_1)) {
BIF_ERROR(BIF_P, BADARG);
@@ -1298,13 +1585,11 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, BADARG);
}
- status = DB_NORMAL | DB_SET | DB_PROTECTED;
+ status = DB_SET | DB_PROTECTED;
keypos = 1;
is_named = 0;
-#ifdef ERTS_SMP
is_fine_locked = 0;
frequent_read = 0;
-#endif
heir = am_none;
heir_data = (UWord) am_undefined;
is_compressed = erts_ets_always_compress;
@@ -1324,7 +1609,6 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
status |= DB_ORDERED_SET;
status &= ~(DB_SET | DB_BAG | DB_DUPLICATE_BAG);
}
- /*TT*/
else if (is_tuple(val)) {
Eterm *tp = tuple_val(val);
if (arityval(tp[0]) == 2) {
@@ -1333,30 +1617,18 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
keypos = signed_val(tp[2]);
}
else if (tp[1] == am_write_concurrency) {
-#ifdef ERTS_SMP
if (tp[2] == am_true) {
is_fine_locked = 1;
} else if (tp[2] == am_false) {
is_fine_locked = 0;
} else break;
-#else
- if ((tp[2] != am_true) && (tp[2] != am_false)) {
- break;
- }
-#endif
}
else if (tp[1] == am_read_concurrency) {
-#ifdef ERTS_SMP
if (tp[2] == am_true) {
frequent_read = 1;
} else if (tp[2] == am_false) {
frequent_read = 0;
} else break;
-#else
- if ((tp[2] != am_true) && (tp[2] != am_false)) {
- break;
- }
-#endif
}
else if (tp[1] == am_heir && tp[2] == am_none) {
@@ -1382,6 +1654,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
}
else if (val == am_named_table) {
is_named = 1;
+ status |= DB_NAMED_TABLE;
}
else if (val == am_compressed) {
is_compressed = 1;
@@ -1397,11 +1670,9 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
}
if (IS_HASH_TABLE(status)) {
meth = &db_hash;
-#ifdef ERTS_SMP
if (is_fine_locked && !(status & DB_PRIVATE)) {
status |= DB_FINE_LOCKED;
}
-#endif
}
else if (IS_TREE_TABLE(status)) {
meth = &db_tree;
@@ -1410,10 +1681,8 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, BADARG);
}
-#ifdef ERTS_SMP
if (frequent_read && !(status & DB_PRIVATE))
status |= DB_FREQ_READ;
-#endif
/* we create table outside any table lock
* and take the unusal cost of destroy table if it
@@ -1422,30 +1691,27 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
{
DbTable init_tb;
- erts_smp_atomic_init_nob(&init_tb.common.memory_size, 0);
+ erts_atomic_init_nob(&init_tb.common.memory_size, 0);
tb = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE,
&init_tb, sizeof(DbTable));
- erts_smp_atomic_init_nob(&tb->common.memory_size,
- erts_smp_atomic_read_nob(&init_tb.common.memory_size));
+ erts_atomic_init_nob(&tb->common.memory_size,
+ erts_atomic_read_nob(&init_tb.common.memory_size));
}
tb->common.meth = meth;
tb->common.the_name = BIF_ARG_1;
tb->common.status = status;
-#ifdef ERTS_SMP
tb->common.type = status & ERTS_ETS_TABLE_TYPES;
/* Note, 'type' is *read only* from now on... */
-#endif
- erts_refc_init(&tb->common.ref, 0);
- db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ),
- "db_tab", "db_tab_fix");
+ erts_refc_init(&tb->common.fix_count, 0);
+ db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ));
tb->common.keypos = keypos;
tb->common.owner = BIF_P->common.id;
set_heir(BIF_P, tb, heir, heir_data);
- erts_smp_atomic_init_nob(&tb->common.nitems, 0);
+ erts_atomic_init_nob(&tb->common.nitems, 0);
- tb->common.fixations = NULL;
+ tb->common.fixing_procs = NULL;
tb->common.compress = is_compressed;
#ifdef DEBUG
@@ -1454,87 +1720,36 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
meth->db_create(BIF_P, tb);
ASSERT(cret == DB_ERROR_NONE);
- erts_smp_spin_lock(&meta_main_tab_main_lock);
-
- if (meta_main_tab_cnt >= db_max_tabs) {
- erts_smp_spin_unlock(&meta_main_tab_main_lock);
- erts_send_error_to_logger_str(BIF_P->group_leader,
- "** Too many db tables **\n");
- free_heir_data(tb);
- tb->common.meth->db_free_table(tb);
- free_dbtable((void *) tb);
- BIF_ERROR(BIF_P, SYSTEM_LIMIT);
- }
-
- slot = meta_main_tab_first_free;
- ASSERT(slot>=0 && slot<db_max_tabs);
- meta_main_tab_first_free = GET_NEXT_FREE_SLOT(slot);
- meta_main_tab_cnt++;
- if (slot >= meta_main_tab_top) {
- ASSERT(slot == meta_main_tab_top);
- meta_main_tab_top = slot + 1;
- }
-
- if (is_named) {
- ret = BIF_ARG_1;
- }
- else {
- ret = make_small(slot | meta_main_tab_seq_cnt);
- meta_main_tab_seq_cnt += meta_main_tab_seq_incr;
- ASSERT((unsigned_val(ret) & meta_main_tab_slot_mask) == slot);
- }
- erts_smp_spin_unlock(&meta_main_tab_main_lock);
+ make_btid(tb);
- tb->common.id = ret;
- tb->common.slot = slot; /* store slot for erase */
+ if (is_named)
+ ret = BIF_ARG_1;
+ else
+ ret = make_tid(BIF_P, tb);
- mmtl = get_meta_main_tab_lock(slot);
- erts_smp_rwmtx_rwlock(mmtl);
- meta_main_tab[slot].u.tb = tb;
- ASSERT(IS_SLOT_ALIVE(slot));
- erts_smp_rwmtx_rwunlock(mmtl);
+ save_sched_table(BIF_P, tb);
if (is_named && !insert_named_tab(BIF_ARG_1, tb, 0)) {
- mmtl = get_meta_main_tab_lock(slot);
- erts_smp_rwmtx_rwlock(mmtl);
- free_slot(slot);
- erts_smp_rwmtx_rwunlock(mmtl);
+ tid_clear(BIF_P, tb);
db_lock(tb,LCK_WRITE);
free_heir_data(tb);
tb->common.meth->db_free_table(tb);
- schedule_free_dbtable(tb);
db_unlock(tb,LCK_WRITE);
+ table_dec_refc(tb, 0);
BIF_ERROR(BIF_P, BADARG);
}
BIF_P->flags |= F_USING_DB; /* So we can remove tb if p dies */
+ save_owned_table(BIF_P, tb);
#ifdef HARDDEBUG
erts_fprintf(stderr,
"ets:new(%T,%T)=%T; Process: %T, initial: %T:%T/%bpu\n",
BIF_ARG_1, BIF_ARG_2, ret, BIF_P->common.id,
- BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
- erts_fprintf(stderr, "ets: new: meta_pid_to_tab common.memory_size = %ld\n",
- erts_smp_atomic_read_nob(&meta_pid_to_tab->common.memory_size));
- erts_fprintf(stderr, "ets: new: meta_pid_to_fixed_tab common.memory_size = %ld\n",
- erts_smp_atomic_read_nob(&meta_pid_to_fixed_tab->common.memory_size));
+ BIF_P->u.initial[0], BIF_P->u.initial[1], BIF_P->u.initial[2]);
#endif
- UseTmpHeap(3,BIF_P);
-
- db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
- if (db_put_hash(meta_pid_to_tab,
- TUPLE2(meta_tuple,
- BIF_P->common.id,
- make_small(slot)),
- 0) != DB_ERROR_NONE) {
- erl_exit(1,"Could not update ets metadata.");
- }
- db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
-
- UnUseTmpHeap(3,BIF_P);
-
BIF_RET(ret);
}
@@ -1639,15 +1854,15 @@ BIF_RETTYPE ets_lookup_element_3(BIF_ALIST_3)
*/
BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
{
- int trap;
+ SWord initial_reds = ERTS_BIF_REDS_LEFT(BIF_P);
+ SWord reds = initial_reds;
DbTable* tb;
- erts_smp_rwmtx_t *mmtl;
#ifdef HARDDEBUG
erts_fprintf(stderr,
"ets:delete(%T); Process: %T, initial: %T:%T/%bpu\n",
BIF_ARG_1, BIF_P->common.id,
- BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
+ BIF_P->u.initial[0], BIF_P->u.initial[1], BIF_P->u.initial[2]);
#endif
CHECK_TABLES();
@@ -1664,7 +1879,6 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
tb->common.status |= DB_DELETE;
if (tb->common.owner != BIF_P->common.id) {
- DeclareTmpHeap(meta_tuple,3,BIF_P);
/*
* The table is being deleted by a process other than its owner.
@@ -1672,68 +1886,47 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
* current process will be killed (e.g. by an EXIT signal), we will
* now transfer the ownership to the current process.
*/
- UseTmpHeap(3,BIF_P);
- db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
- db_erase_bag_exact2(meta_pid_to_tab, tb->common.owner,
- make_small(tb->common.slot));
-
- BIF_P->flags |= F_USING_DB;
- tb->common.owner = BIF_P->common.id;
-
- db_put_hash(meta_pid_to_tab,
- TUPLE2(meta_tuple,
- BIF_P->common.id,
- make_small(tb->common.slot)),
- 0);
- db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
- UnUseTmpHeap(3,BIF_P);
- }
- mmtl = get_meta_main_tab_lock(tb->common.slot);
-#ifdef ERTS_SMP
- if (erts_smp_rwmtx_tryrwlock(mmtl) == EBUSY) {
- /*
- * We keep our increased refc over this op in order to
- * prevent the table from disapearing.
- */
- db_unlock(tb, LCK_WRITE);
- erts_smp_rwmtx_rwlock(mmtl);
- db_lock(tb, LCK_WRITE);
+ Process *rp = erts_proc_lookup_raw(tb->common.owner);
+ /*
+ * Process 'rp' might be exiting, but our table lock prevents it
+ * from terminating as it cannot complete erts_db_process_exiting().
+ */
+ ASSERT(!(ERTS_PSFLG_FREE & erts_atomic32_read_nob(&rp->state)));
+
+ delete_owned_table(rp, tb);
+ BIF_P->flags |= F_USING_DB;
+ tb->common.owner = BIF_P->common.id;
+ save_owned_table(BIF_P, tb);
}
-#endif
- /* We must keep the slot, to be found by db_proc_dead() if process dies */
- MARK_SLOT_DEAD(tb->common.slot);
- erts_smp_rwmtx_rwunlock(mmtl);
- if (is_atom(tb->common.id))
+
+ tid_clear(BIF_P, tb);
+
+ if (is_table_named(tb))
remove_named_tab(tb, 0);
/* disable inheritance */
free_heir_data(tb);
tb->common.heir = am_none;
- free_fixations_locked(tb);
-
- trap = free_table_cont(BIF_P, tb, 1, 1);
+ reds -= free_fixations_locked(BIF_P, tb);
db_unlock(tb, LCK_WRITE);
- if (trap) {
+
+ if (free_table_continue(BIF_P, tb, reds) < 0) {
/*
* Package the DbTable* pointer into a bignum so that it can be safely
* passed through a trap. We used to pass the DbTable* pointer directly
* (it looks like an continuation pointer), but that is will crash the
* emulator if this BIF is call traced.
*/
-#if HALFWORD_HEAP
- Eterm *hp = HAlloc(BIF_P, 3);
- hp[0] = make_pos_bignum_header(2);
- *((UWord *) (UWord) (hp+1)) = (UWord) tb;
-#else
Eterm *hp = HAlloc(BIF_P, 2);
hp[0] = make_pos_bignum_header(1);
hp[1] = (Eterm) tb;
-#endif
+ BUMP_ALL_REDS(BIF_P);
BIF_TRAP1(&ets_delete_continue_exp, BIF_P, make_big(hp));
}
else {
+ BUMP_REDS(BIF_P, (initial_reds - reds));
BIF_RET(am_true);
}
}
@@ -1745,7 +1938,6 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3)
{
Process* to_proc = NULL;
ErtsProcLocks to_locks = ERTS_PROC_LOCK_MAIN;
- DeclareTmpHeap(buf,5,BIF_P);
Eterm to_pid = BIF_ARG_2;
Eterm from_pid;
DbTable* tb = NULL;
@@ -1767,32 +1959,20 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3)
goto badarg; /* or should we be idempotent? return false maybe */
}
- UseTmpHeap(5,BIF_P);
- db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
- db_erase_bag_exact2(meta_pid_to_tab, tb->common.owner,
- make_small(tb->common.slot));
-
+ delete_owned_table(BIF_P, tb);
to_proc->flags |= F_USING_DB;
tb->common.owner = to_pid;
-
- db_put_hash(meta_pid_to_tab,
- TUPLE2(buf,to_pid,make_small(tb->common.slot)),
- 0);
- db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
+ save_owned_table(to_proc, tb);
db_unlock(tb,LCK_WRITE);
- erts_send_message(BIF_P, to_proc, &to_locks,
- TUPLE4(buf, am_ETS_TRANSFER,
- tb->common.id,
- from_pid,
- BIF_ARG_3),
- 0);
- erts_smp_proc_unlock(to_proc, to_locks);
+ send_ets_transfer_message(BIF_P, to_proc, &to_locks,
+ tb, BIF_ARG_3);
+ erts_proc_unlock(to_proc, to_locks);
UnUseTmpHeap(5,BIF_P);
BIF_RET(am_true);
badarg:
- if (to_proc != NULL && to_proc != BIF_P) erts_smp_proc_unlock(to_proc, to_locks);
+ if (to_proc != NULL && to_proc != BIF_P) erts_proc_unlock(to_proc, to_locks);
if (tb != NULL) db_unlock(tb, LCK_WRITE);
BIF_ERROR(BIF_P, BADARG);
}
@@ -2016,7 +2196,7 @@ BIF_RETTYPE ets_select_delete_2(BIF_ALIST_2)
if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL) {
BIF_ERROR(BIF_P, BADARG);
}
- nitems = erts_smp_atomic_read_nob(&tb->common.nitems);
+ nitems = erts_atomic_read_nob(&tb->common.nitems);
tb->common.meth->db_delete_all_objects(BIF_P, tb);
db_unlock(tb, LCK_WRITE);
BIF_RET(erts_make_integer(nitems,BIF_P));
@@ -2029,7 +2209,7 @@ BIF_RETTYPE ets_select_delete_2(BIF_ALIST_2)
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select_delete(BIF_P, tb, BIF_ARG_2, &ret);
+ cret = tb->common.meth->db_select_delete(BIF_P, tb, BIF_ARG_1, BIF_ARG_2, &ret);
if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
fix_table_locked(BIF_P,tb);
@@ -2056,46 +2236,252 @@ BIF_RETTYPE ets_select_delete_2(BIF_ALIST_2)
return result;
}
-/*
-** Return a list of tables on this node
-*/
-BIF_RETTYPE ets_all_0(BIF_ALIST_0)
+/*
+ * ets:all/0
+ *
+ * ets:all() calls ets:internal_request_all/0 which
+ * requests information about all tables from
+ * each scheduler thread. Each scheduler replies
+ * to the calling process with information about
+ * existing tables created on that specific scheduler.
+ */
+
+struct ErtsEtsAllReq_ {
+ erts_atomic32_t refc;
+ Process *proc;
+ ErtsOIRefStorage ref;
+ ErtsEtsAllReqList list[1]; /* one per scheduler */
+};
+
+#define ERTS_ETS_ALL_REQ_SIZE \
+ (sizeof(ErtsEtsAllReq) \
+ + (sizeof(ErtsEtsAllReqList) \
+ * (erts_no_schedulers - 1)))
+
+typedef struct {
+ ErtsEtsAllReq *ongoing;
+ ErlHeapFragment *hfrag;
+ DbTable *tab;
+ ErtsEtsAllReq *queue;
+} ErtsEtsAllData;
+
+/* Tables handled before yielding */
+#define ERTS_ETS_ALL_TB_YCNT 200
+/*
+ * Min yield count required before starting
+ * an operation that will require yield.
+ */
+#define ERTS_ETS_ALL_TB_YCNT_START 10
+
+#ifdef DEBUG
+/* Test yielding... */
+#undef ERTS_ETS_ALL_TB_YCNT
+#undef ERTS_ETS_ALL_TB_YCNT_START
+#define ERTS_ETS_ALL_TB_YCNT 10
+#define ERTS_ETS_ALL_TB_YCNT_START 1
+#endif
+
+static int
+ets_all_reply(ErtsSchedulerData *esdp, ErtsEtsAllReq **reqpp,
+ ErlHeapFragment **hfragpp, DbTable **tablepp,
+ int *yield_count_p)
{
- DbTable* tb;
- Eterm previous;
- int i;
- Eterm* hp;
- Eterm* hendp;
- int t_tabs_cnt;
- int t_top;
-
- erts_smp_spin_lock(&meta_main_tab_main_lock);
- t_tabs_cnt = meta_main_tab_cnt;
- t_top = meta_main_tab_top;
- erts_smp_spin_unlock(&meta_main_tab_main_lock);
-
- hp = HAlloc(BIF_P, 2*t_tabs_cnt);
- hendp = hp + 2*t_tabs_cnt;
-
- previous = NIL;
- for(i = 0; i < t_top; i++) {
- erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(i);
- erts_smp_rwmtx_rlock(mmtl);
- if (IS_SLOT_ALIVE(i)) {
- if (hp == hendp) {
- /* Racing table creator, grab some more heap space */
- t_tabs_cnt = 10;
- hp = HAlloc(BIF_P, 2*t_tabs_cnt);
- hendp = hp + 2*t_tabs_cnt;
- }
- tb = meta_main_tab[i].u.tb;
- previous = CONS(hp, tb->common.id, previous);
- hp += 2;
- }
- erts_smp_rwmtx_runlock(mmtl);
+ ErtsEtsAllReq *reqp = *reqpp;
+ ErlHeapFragment *hfragp = *hfragpp;
+ int ycount = *yield_count_p;
+ DbTable *tb, *first;
+ Uint sz;
+ Eterm list, msg, ref, *hp;
+ ErlOffHeap *ohp;
+ ErtsMessage *mp;
+
+ /*
+ * - save_sched_table() inserts at end of circular list.
+ *
+ * - This function scans from the end so we know that
+ * the amount of tables to scan wont grow even if we
+ * yield.
+ *
+ * - remove_sched_table() updates the table we yielded
+ * on if it removes it.
+ */
+
+ if (hfragp) {
+ /* Restart of a yielded operation... */
+ ASSERT(hfragp->used_size < hfragp->alloc_size);
+ ohp = &hfragp->off_heap;
+ hp = &hfragp->mem[hfragp->used_size];
+ list = *hp;
+ hfragp->used_size = hfragp->alloc_size;
+ first = esdp->ets_tables.clist;
+ tb = *tablepp;
+ }
+ else {
+ /* A new operation... */
+ ASSERT(!*tablepp);
+
+ /* Max heap size needed... */
+ sz = esdp->ets_tables.count;
+ sz *= ERTS_MAGIC_REF_THING_SIZE + 2;
+ sz += 3 + ERTS_REF_THING_SIZE;
+ hfragp = new_message_buffer(sz);
+
+ hp = &hfragp->mem[0];
+ ohp = &hfragp->off_heap;
+ list = NIL;
+ first = esdp->ets_tables.clist;
+ tb = first ? first->common.all.prev : NULL;
+ }
+
+ if (tb) {
+ while (1) {
+ if (is_table_alive(tb)) {
+ Eterm tid;
+ if (is_table_named(tb))
+ tid = tb->common.the_name;
+ else
+ tid = erts_mk_magic_ref(&hp, ohp, tb->common.btid);
+ list = CONS(hp, tid, list);
+ hp += 2;
+ }
+
+ if (tb == first)
+ break;
+
+ tb = tb->common.all.prev;
+
+ if (--ycount <= 0) {
+ sz = hp - &hfragp->mem[0];
+ ASSERT(hfragp->alloc_size > sz + 1);
+ *hp = list;
+ hfragp->used_size = sz;
+ *hfragpp = hfragp;
+ *reqpp = reqp;
+ *tablepp = tb;
+ *yield_count_p = 0;
+ return 1; /* Yield! */
+ }
+ }
+ }
+
+ ref = erts_oiref_storage_make_ref(&reqp->ref, &hp);
+ msg = TUPLE2(hp, ref, list);
+ hp += 3;
+
+ sz = hp - &hfragp->mem[0];
+ ASSERT(sz <= hfragp->alloc_size);
+
+ hfragp = erts_resize_message_buffer(hfragp, sz, &msg, 1);
+
+ mp = erts_alloc_message(0, NULL);
+ mp->data.heap_frag = hfragp;
+
+ erts_queue_message(reqp->proc, 0, mp, msg, am_system);
+
+ erts_proc_dec_refc(reqp->proc);
+
+ if (erts_atomic32_dec_read_nob(&reqp->refc) == 0)
+ erts_free(ERTS_ALC_T_ETS_ALL_REQ, reqp);
+
+ *reqpp = NULL;
+ *hfragpp = NULL;
+ *tablepp = NULL;
+ *yield_count_p = ycount;
+
+ return 0;
+}
+
+int
+erts_handle_yielded_ets_all_request(ErtsSchedulerData *esdp,
+ ErtsEtsAllYieldData *eaydp)
+{
+ int ix = (int) esdp->no - 1;
+ int yc = ERTS_ETS_ALL_TB_YCNT;
+
+ while (1) {
+ if (!eaydp->ongoing) {
+ ErtsEtsAllReq *ongoing;
+
+ if (!eaydp->queue)
+ return 0; /* All work completed! */
+
+ if (yc < ERTS_ETS_ALL_TB_YCNT_START && yc > esdp->ets_tables.count)
+ return 1; /* Yield! */
+
+ eaydp->ongoing = ongoing = eaydp->queue;
+ if (ongoing->list[ix].next == ongoing)
+ eaydp->queue = NULL;
+ else {
+ ongoing->list[ix].next->list[ix].prev = ongoing->list[ix].prev;
+ ongoing->list[ix].prev->list[ix].next = ongoing->list[ix].next;
+ eaydp->queue = ongoing->list[ix].next;
+ }
+ ASSERT(!eaydp->hfrag);
+ ASSERT(!eaydp->tab);
+ }
+
+ if (ets_all_reply(esdp, &eaydp->ongoing, &eaydp->hfrag, &eaydp->tab, &yc))
+ return 1; /* Yield! */
+ }
+}
+
+static void
+handle_ets_all_request(void *vreq)
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ErtsEtsAllYieldData *eayp = ERTS_SCHED_AUX_YIELD_DATA(esdp, ets_all);
+ ErtsEtsAllReq *req = (ErtsEtsAllReq *) vreq;
+
+ if (!eayp->ongoing && !eayp->queue) {
+ /* No ets:all() operations ongoing... */
+ ErlHeapFragment *hf = NULL;
+ DbTable *tb = NULL;
+ int yc = ERTS_ETS_ALL_TB_YCNT;
+ if (ets_all_reply(esdp, &req, &hf, &tb, &yc)) {
+ /* Yielded... */
+ ASSERT(hf);
+ eayp->ongoing = req;
+ eayp->hfrag = hf;
+ eayp->tab = tb;
+ erts_notify_new_aux_yield_work(esdp);
+ }
+ }
+ else {
+ /* Ongoing ets:all() operations; queue up this request... */
+ int ix = (int) esdp->no - 1;
+ if (!eayp->queue) {
+ req->list[ix].next = req;
+ req->list[ix].prev = req;
+ eayp->queue = req;
+ }
+ else {
+ req->list[ix].next = eayp->queue;
+ req->list[ix].prev = eayp->queue->list[ix].prev;
+ eayp->queue->list[ix].prev = req;
+ req->list[ix].prev->list[ix].next = req;
+ }
}
- HRelease(BIF_P, hendp, hp);
- BIF_RET(previous);
+}
+
+BIF_RETTYPE ets_internal_request_all_0(BIF_ALIST_0)
+{
+ Eterm ref = erts_make_ref(BIF_P);
+ ErtsEtsAllReq *req = erts_alloc(ERTS_ALC_T_ETS_ALL_REQ,
+ ERTS_ETS_ALL_REQ_SIZE);
+ erts_atomic32_init_nob(&req->refc,
+ (erts_aint32_t) erts_no_schedulers);
+ erts_oiref_storage_save(&req->ref, ref);
+ req->proc = BIF_P;
+ erts_proc_add_refc(BIF_P, (Sint) erts_no_schedulers);
+
+ if (erts_no_schedulers > 1)
+ erts_schedule_multi_misc_aux_work(1,
+ erts_no_schedulers,
+ handle_ets_all_request,
+ (void *) req);
+
+ handle_ets_all_request((void *) req);
+ BIF_RET(ref);
}
@@ -2200,7 +2586,7 @@ ets_select3(Process* p, Eterm arg1, Eterm arg2, Eterm arg3)
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select_chunk(p, tb,
+ cret = tb->common.meth->db_select_chunk(p, tb, arg1,
arg2, chunk_size,
0 /* not reversed */,
&ret);
@@ -2369,8 +2755,7 @@ ets_select2(Process* p, Eterm arg1, Eterm arg2)
local_fix_table(tb);
}
- cret = tb->common.meth->db_select(p, tb, arg2,
- 0, &ret);
+ cret = tb->common.meth->db_select(p, tb, arg1, arg2, 0, &ret);
if (DID_TRAP(p,ret) && safety != ITER_SAFE) {
fix_table_locked(p, tb);
@@ -2461,7 +2846,7 @@ BIF_RETTYPE ets_select_count_2(BIF_ALIST_2)
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select_count(BIF_P,tb,BIF_ARG_2, &ret);
+ cret = tb->common.meth->db_select_count(BIF_P,tb, BIF_ARG_1, BIF_ARG_2, &ret);
if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
fix_table_locked(BIF_P, tb);
@@ -2487,6 +2872,103 @@ BIF_RETTYPE ets_select_count_2(BIF_ALIST_2)
return result;
}
+/*
+ ** This is for trapping, cannot be called directly.
+ */
+static BIF_RETTYPE ets_select_replace_1(BIF_ALIST_1)
+{
+ Process *p = BIF_P;
+ Eterm a1 = BIF_ARG_1;
+ BIF_RETTYPE result;
+ DbTable* tb;
+ int cret;
+ Eterm ret;
+ Eterm *tptr;
+ db_lock_kind_t kind = LCK_WRITE_REC;
+
+ CHECK_TABLES();
+ ASSERT(is_tuple(a1));
+ tptr = tuple_val(a1);
+ ASSERT(arityval(*tptr) >= 1);
+
+ if ((tb = db_get_table(p, tptr[1], DB_WRITE, kind)) == NULL) {
+ BIF_ERROR(p,BADARG);
+ }
+
+ cret = tb->common.meth->db_select_replace_continue(p,tb,a1,&ret);
+
+ if(!DID_TRAP(p,ret) && ITERATION_SAFETY(p,tb) != ITER_SAFE) {
+ unfix_table_locked(p, tb, &kind);
+ }
+
+ db_unlock(tb, kind);
+
+ switch (cret) {
+ case DB_ERROR_NONE:
+ ERTS_BIF_PREP_RET(result, ret);
+ break;
+ default:
+ ERTS_BIF_PREP_ERROR(result, p, BADARG);
+ break;
+ }
+ erts_match_set_release_result(p);
+
+ return result;
+}
+
+
+BIF_RETTYPE ets_select_replace_2(BIF_ALIST_2)
+{
+ BIF_RETTYPE result;
+ DbTable* tb;
+ int cret;
+ Eterm ret;
+ enum DbIterSafety safety;
+
+ CHECK_TABLES();
+
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (tb->common.status & DB_BAG) {
+ /* Bag implementation presented both semantic consistency
+ and performance issues */
+ db_unlock(tb, LCK_WRITE_REC);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ safety = ITERATION_SAFETY(BIF_P,tb);
+ if (safety == ITER_UNSAFE) {
+ local_fix_table(tb);
+ }
+ cret = tb->common.meth->db_select_replace(BIF_P, tb, BIF_ARG_1, BIF_ARG_2, &ret);
+
+ if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
+ fix_table_locked(BIF_P,tb);
+ }
+ if (safety == ITER_UNSAFE) {
+ local_unfix_table(tb);
+ }
+ db_unlock(tb, LCK_WRITE_REC);
+
+ switch (cret) {
+ case DB_ERROR_NONE:
+ ERTS_BIF_PREP_RET(result, ret);
+ break;
+ case DB_ERROR_SYSRES:
+ ERTS_BIF_PREP_ERROR(result, BIF_P, SYSTEM_LIMIT);
+ break;
+ default:
+ ERTS_BIF_PREP_ERROR(result, BIF_P, BADARG);
+ break;
+ }
+
+ erts_match_set_release_result(BIF_P);
+
+ return result;
+}
+
BIF_RETTYPE ets_select_reverse_3(BIF_ALIST_3)
{
@@ -2515,7 +2997,7 @@ BIF_RETTYPE ets_select_reverse_3(BIF_ALIST_3)
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select_chunk(BIF_P,tb,
+ cret = tb->common.meth->db_select_chunk(BIF_P,tb, BIF_ARG_1,
BIF_ARG_2, chunk_size,
1 /* reversed */, &ret);
if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
@@ -2565,7 +3047,7 @@ BIF_RETTYPE ets_select_reverse_2(BIF_ALIST_2)
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select(BIF_P,tb,BIF_ARG_2,
+ cret = tb->common.meth->db_select(BIF_P,tb, BIF_ARG_1, BIF_ARG_2,
1 /*reversed*/, &ret);
if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
@@ -2642,7 +3124,9 @@ BIF_RETTYPE ets_match_object_3(BIF_ALIST_3)
BIF_RETTYPE ets_info_1(BIF_ALIST_1)
{
static Eterm fields[] = {am_protection, am_keypos, am_type, am_named_table,
- am_node, am_size, am_name, am_heir, am_owner, am_memory, am_compressed};
+ am_node, am_size, am_name, am_heir, am_owner, am_memory, am_compressed,
+ am_write_concurrency,
+ am_read_concurrency};
Eterm results[sizeof(fields)/sizeof(Eterm)];
DbTable* tb;
Eterm res;
@@ -2654,7 +3138,7 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1)
*/
if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL) {
- if (is_atom(BIF_ARG_1) || is_small(BIF_ARG_1)) {
+ if (is_atom(BIF_ARG_1) || is_ref(BIF_ARG_1)) {
BIF_RET(am_undefined);
}
BIF_ERROR(BIF_P, BADARG);
@@ -2678,7 +3162,7 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1)
if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL
|| tb->common.owner != owner) {
if (BIF_P != rp)
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
if (is_atom(BIF_ARG_1) || is_small(BIF_ARG_1)) {
BIF_RET(am_undefined);
}
@@ -2692,7 +3176,7 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1)
db_unlock(tb, LCK_READ);
/*if (rp != NULL && rp != BIF_P)
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);*/
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);*/
hp = HAlloc(BIF_P, 5*sizeof(fields)/sizeof(Eterm));
res = NIL;
@@ -2716,7 +3200,7 @@ BIF_RETTYPE ets_info_2(BIF_ALIST_2)
Eterm ret = THE_NON_VALUE;
if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL) {
- if (is_atom(BIF_ARG_1) || is_small(BIF_ARG_1)) {
+ if (is_atom(BIF_ARG_1) || is_ref(BIF_ARG_1)) {
BIF_RET(am_undefined);
}
BIF_ERROR(BIF_P, BADARG);
@@ -2732,7 +3216,7 @@ BIF_RETTYPE ets_info_2(BIF_ALIST_2)
BIF_RETTYPE ets_is_compiled_ms_1(BIF_ALIST_1)
{
- if (erts_db_is_compiled_ms(BIF_ARG_1)) {
+ if (erts_db_get_match_prog_binary(BIF_ARG_1)) {
BIF_RET(am_true);
} else {
BIF_RET(am_false);
@@ -2747,9 +3231,9 @@ BIF_RETTYPE ets_match_spec_compile_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
- hp = HAlloc(BIF_P, PROC_BIN_SIZE);
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
- BIF_RET(erts_mk_magic_binary_term(&hp, &MSO(BIF_P), mp));
+ BIF_RET(erts_db_make_match_prog_ref(BIF_P, mp, &hp));
}
BIF_RETTYPE ets_match_spec_run_r_3(BIF_ALIST_3)
@@ -2758,24 +3242,18 @@ BIF_RETTYPE ets_match_spec_run_r_3(BIF_ALIST_3)
int i = 0;
Eterm *hp;
Eterm lst;
- ProcBin *bp;
Binary *mp;
Eterm res;
Uint32 dummy;
- if (!(is_list(BIF_ARG_1) || BIF_ARG_1 == NIL) || !is_binary(BIF_ARG_2)) {
+ if (!(is_list(BIF_ARG_1) || BIF_ARG_1 == NIL)) {
error:
BIF_ERROR(BIF_P, BADARG);
}
- bp = (ProcBin*) binary_val(BIF_ARG_2);
- if (thing_subtag(bp->thing_word) != REFC_BINARY_SUBTAG) {
- goto error;
- }
- mp = bp->val;
- if (!IsMatchProgBinary(mp)) {
+ mp = erts_db_get_match_prog_binary(BIF_ARG_2);
+ if (!mp)
goto error;
- }
if (BIF_ARG_1 == NIL) {
BIF_RET(BIF_ARG_3);
@@ -2786,7 +3264,8 @@ BIF_RETTYPE ets_match_spec_run_r_3(BIF_ALIST_3)
BIF_TRAP3(bif_export[BIF_ets_match_spec_run_r_3],
BIF_P,lst,BIF_ARG_2,ret);
}
- res = db_prog_match(BIF_P, mp, CAR(list_val(lst)), NULL, NULL, 0,
+ res = db_prog_match(BIF_P, BIF_P,
+ mp, CAR(list_val(lst)), NULL, 0,
ERTS_PAM_COPY_RESULT, &dummy);
if (is_value(res)) {
hp = HAlloc(BIF_P, 2);
@@ -2805,39 +3284,66 @@ BIF_RETTYPE ets_match_spec_run_r_3(BIF_ALIST_3)
** External interface (NOT BIF's)
*/
+int erts_ets_rwmtx_spin_count = -1;
/* Init the db */
-void init_db(void)
+void init_db(ErtsDbSpinCount db_spin_count)
{
- DbTable init_tb;
int i;
Eterm *hp;
unsigned bits;
size_t size;
-#ifdef ERTS_SMP
- erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
- rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
- rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
-
- meta_main_tab_locks =
- erts_alloc_permanent_cache_aligned(ERTS_ALC_T_DB_TABLES,
- sizeof(erts_meta_main_tab_lock_t)
- * ERTS_META_MAIN_TAB_LOCK_TAB_SIZE);
+ int max_spin_count = (1 << 15) - 1; /* internal limit */
+ erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED;
- for (i = 0; i < ERTS_META_MAIN_TAB_LOCK_TAB_SIZE; i++) {
- erts_smp_rwmtx_init_opt_x(&meta_main_tab_locks[i].rwmtx, &rwmtx_opt,
- "meta_main_tab_slot", make_small(i));
+ switch (db_spin_count) {
+ case ERTS_DB_SPNCNT_NONE:
+ erts_ets_rwmtx_spin_count = 0;
+ break;
+ case ERTS_DB_SPNCNT_VERY_LOW:
+ erts_ets_rwmtx_spin_count = 100;
+ break;
+ case ERTS_DB_SPNCNT_LOW:
+ erts_ets_rwmtx_spin_count = 200;
+ erts_ets_rwmtx_spin_count += erts_no_schedulers * 50;
+ if (erts_ets_rwmtx_spin_count > 1000)
+ erts_ets_rwmtx_spin_count = 1000;
+ break;
+ case ERTS_DB_SPNCNT_HIGH:
+ erts_ets_rwmtx_spin_count = 2000;
+ erts_ets_rwmtx_spin_count += erts_no_schedulers * 100;
+ if (erts_ets_rwmtx_spin_count > 15000)
+ erts_ets_rwmtx_spin_count = 15000;
+ break;
+ case ERTS_DB_SPNCNT_VERY_HIGH:
+ erts_ets_rwmtx_spin_count = 15000;
+ erts_ets_rwmtx_spin_count += erts_no_schedulers * 500;
+ if (erts_ets_rwmtx_spin_count > max_spin_count)
+ erts_ets_rwmtx_spin_count = max_spin_count;
+ break;
+ case ERTS_DB_SPNCNT_EXTREMELY_HIGH:
+ erts_ets_rwmtx_spin_count = max_spin_count;
+ break;
+ case ERTS_DB_SPNCNT_NORMAL:
+ default:
+ erts_ets_rwmtx_spin_count = -1;
+ break;
}
- erts_smp_spinlock_init(&meta_main_tab_main_lock, "meta_main_tab_main");
+
+ if (erts_ets_rwmtx_spin_count >= 0)
+ rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count;
+
for (i=0; i<META_NAME_TAB_LOCK_CNT; i++) {
- erts_smp_rwmtx_init_opt_x(&meta_name_tab_rwlocks[i].lck, &rwmtx_opt,
- "meta_name_tab", make_small(i));
+ erts_rwmtx_init_opt(&meta_name_tab_rwlocks[i].lck, &rwmtx_opt,
+ "meta_name_tab", make_small(i),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DB);
}
-#endif
- erts_smp_atomic_init_nob(&erts_ets_misc_mem_size, 0);
+ erts_atomic_init_nob(&erts_ets_misc_mem_size, 0);
db_initialize_util();
if (user_requested_db_max_tabs < DB_DEF_MAX_TABS)
@@ -2847,23 +3353,9 @@ void init_db(void)
bits = erts_fit_in_bits_int32(db_max_tabs-1);
if (bits > SMALL_BITS) {
- erl_exit(1,"Max limit for ets tabled too high %u (max %u).",
+ erts_exit(ERTS_ERROR_EXIT,"Max limit for ets tabled too high %u (max %u).",
db_max_tabs, ((Uint)1)<<SMALL_BITS);
}
- meta_main_tab_slot_mask = (((Uint)1)<<bits) - 1;
- meta_main_tab_seq_incr = (((Uint)1)<<bits);
-
- size = sizeof(*meta_main_tab)*db_max_tabs;
- meta_main_tab = erts_db_alloc_nt(ERTS_ALC_T_DB_TABLES, size);
- ERTS_ETS_MISC_MEM_ADD(size);
-
- meta_main_tab_cnt = 0;
- meta_main_tab_top = 0;
- for (i=1; i<db_max_tabs; i++) {
- SET_NEXT_FREE_SLOT(i-1,i);
- }
- SET_NEXT_FREE_SLOT(db_max_tabs-1, (Uint)-1);
- meta_main_tab_first_free = 0;
meta_name_tab_mask = (((Uint) 1)<<(bits-1)) - 1; /* At least half the size of main tab */
size = sizeof(struct meta_name_tab_entry)*(meta_name_tab_mask+1);
@@ -2878,70 +3370,6 @@ void init_db(void)
db_initialize_hash();
db_initialize_tree();
- /*TT*/
- /* Create meta table invertion. */
- erts_smp_atomic_init_nob(&init_tb.common.memory_size, 0);
- meta_pid_to_tab = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE,
- &init_tb,
- sizeof(DbTable));
- erts_smp_atomic_init_nob(&meta_pid_to_tab->common.memory_size,
- erts_smp_atomic_read_nob(&init_tb.common.memory_size));
-
- meta_pid_to_tab->common.id = NIL;
- meta_pid_to_tab->common.the_name = am_true;
- meta_pid_to_tab->common.status = (DB_NORMAL | DB_BAG | DB_PUBLIC | DB_FINE_LOCKED);
-#ifdef ERTS_SMP
- meta_pid_to_tab->common.type
- = meta_pid_to_tab->common.status & ERTS_ETS_TABLE_TYPES;
- /* Note, 'type' is *read only* from now on... */
- meta_pid_to_tab->common.is_thread_safe = 0;
-#endif
- meta_pid_to_tab->common.keypos = 1;
- meta_pid_to_tab->common.owner = NIL;
- erts_smp_atomic_init_nob(&meta_pid_to_tab->common.nitems, 0);
- meta_pid_to_tab->common.slot = -1;
- meta_pid_to_tab->common.meth = &db_hash;
- meta_pid_to_tab->common.compress = 0;
-
- erts_refc_init(&meta_pid_to_tab->common.ref, 0);
- /* Neither rwlock or fixlock used
- db_init_lock(meta_pid_to_tab, "meta_pid_to_tab", "meta_pid_to_tab_FIX");*/
-
- if (db_create_hash(NULL, meta_pid_to_tab) != DB_ERROR_NONE) {
- erl_exit(1,"Unable to create ets metadata tables.");
- }
-
- erts_smp_atomic_set_nob(&init_tb.common.memory_size, 0);
- meta_pid_to_fixed_tab = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE,
- &init_tb,
- sizeof(DbTable));
- erts_smp_atomic_init_nob(&meta_pid_to_fixed_tab->common.memory_size,
- erts_smp_atomic_read_nob(&init_tb.common.memory_size));
-
- meta_pid_to_fixed_tab->common.id = NIL;
- meta_pid_to_fixed_tab->common.the_name = am_true;
- meta_pid_to_fixed_tab->common.status = (DB_NORMAL | DB_BAG | DB_PUBLIC | DB_FINE_LOCKED);
-#ifdef ERTS_SMP
- meta_pid_to_fixed_tab->common.type
- = meta_pid_to_fixed_tab->common.status & ERTS_ETS_TABLE_TYPES;
- /* Note, 'type' is *read only* from now on... */
- meta_pid_to_fixed_tab->common.is_thread_safe = 0;
-#endif
- meta_pid_to_fixed_tab->common.keypos = 1;
- meta_pid_to_fixed_tab->common.owner = NIL;
- erts_smp_atomic_init_nob(&meta_pid_to_fixed_tab->common.nitems, 0);
- meta_pid_to_fixed_tab->common.slot = -1;
- meta_pid_to_fixed_tab->common.meth = &db_hash;
- meta_pid_to_fixed_tab->common.compress = 0;
-
- erts_refc_init(&meta_pid_to_fixed_tab->common.ref, 0);
- /* Neither rwlock or fixlock used
- db_init_lock(meta_pid_to_fixed_tab, "meta_pid_to_fixed_tab", "meta_pid_to_fixed_tab_FIX");*/
-
- if (db_create_hash(NULL, meta_pid_to_fixed_tab) != DB_ERROR_NONE) {
- erl_exit(1,"Unable to create ets metadata tables.");
- }
-
/* Non visual BIF to trap to. */
erts_init_trap_export(&ets_select_delete_continue_exp,
am_ets, am_atom_put("delete_trap",11), 1,
@@ -2953,6 +3381,11 @@ void init_db(void)
&ets_select_count_1);
/* Non visual BIF to trap to. */
+ erts_init_trap_export(&ets_select_replace_continue_exp,
+ am_ets, am_atom_put("replace_trap",11), 1,
+ &ets_select_replace_1);
+
+ /* Non visual BIF to trap to. */
erts_init_trap_export(&ets_select_continue_exp,
am_ets, am_atom_put("select_trap",11), 1,
&ets_select_trap_1);
@@ -2970,81 +3403,18 @@ void init_db(void)
ms_delete_all = CONS(hp, ms_delete_all,NIL);
}
-#define ARRAY_CHUNK 100
-
-typedef enum {
- ErtsDbProcCleanupProgressTables,
- ErtsDbProcCleanupProgressFixations,
- ErtsDbProcCleanupProgressDone,
-} ErtsDbProcCleanupProgress;
-
-typedef enum {
- ErtsDbProcCleanupOpGetTables,
- ErtsDbProcCleanupOpDeleteTables,
- ErtsDbProcCleanupOpGetFixations,
- ErtsDbProcCleanupOpDeleteFixations,
- ErtsDbProcCleanupOpDone
-} ErtsDbProcCleanupOperation;
-
-typedef struct {
- ErtsDbProcCleanupProgress progress;
- ErtsDbProcCleanupOperation op;
- struct {
- Eterm arr[ARRAY_CHUNK];
- int size;
- int ix;
- int clean_ix;
- } slots;
-} ErtsDbProcCleanupState;
-
-
-static void
-proc_exit_cleanup_tables_meta_data(Eterm pid, ErtsDbProcCleanupState *state)
-{
- ASSERT(state->slots.clean_ix <= state->slots.ix);
- if (state->slots.clean_ix < state->slots.ix) {
- db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
- if (state->slots.size < ARRAY_CHUNK
- && state->slots.ix == state->slots.size) {
- Eterm dummy;
- db_erase_hash(meta_pid_to_tab,pid,&dummy);
- }
- else {
- int ix;
- /* Need to erase each explicitly */
- for (ix = state->slots.clean_ix; ix < state->slots.ix; ix++)
- db_erase_bag_exact2(meta_pid_to_tab,
- pid,
- state->slots.arr[ix]);
- }
- db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
- state->slots.clean_ix = state->slots.ix;
- }
+void
+erts_ets_sched_spec_data_init(ErtsSchedulerData *esdp)
+{
+ ErtsEtsAllYieldData *eaydp = ERTS_SCHED_AUX_YIELD_DATA(esdp, ets_all);
+ eaydp->ongoing = NULL;
+ eaydp->hfrag = NULL;
+ eaydp->tab = NULL;
+ eaydp->queue = NULL;
+ esdp->ets_tables.clist = NULL;
+ esdp->ets_tables.count = 0;
}
-static void
-proc_exit_cleanup_fixations_meta_data(Eterm pid, ErtsDbProcCleanupState *state)
-{
- ASSERT(state->slots.clean_ix <= state->slots.ix);
- if (state->slots.clean_ix < state->slots.ix) {
- db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
- if (state->slots.size < ARRAY_CHUNK
- && state->slots.ix == state->slots.size) {
- Eterm dummy;
- db_erase_hash(meta_pid_to_fixed_tab,pid,&dummy);
- }
- else {
- int ix;
- /* Need to erase each explicitly */
- for (ix = state->slots.clean_ix; ix < state->slots.ix; ix++)
- db_erase_bag_exact2(meta_pid_to_fixed_tab,
- pid,
- state->slots.arr[ix]);
- }
- db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
- state->slots.clean_ix = state->slots.ix;
- }
-}
/* In: Table LCK_WRITE
** Return TRUE : ok, table not mine and NOT locked anymore.
@@ -3054,7 +3424,6 @@ static int give_away_to_heir(Process* p, DbTable* tb)
{
Process* to_proc;
ErtsProcLocks to_locks = ERTS_PROC_LOCK_MAIN;
- DeclareTmpHeap(buf,5,p);
Eterm to_pid;
UWord heir_data;
@@ -3075,14 +3444,14 @@ retry:
if (tb->common.owner != p->common.id) {
if (to_proc != NULL ) {
- erts_smp_proc_unlock(to_proc, to_locks);
+ erts_proc_unlock(to_proc, to_locks);
}
db_unlock(tb,LCK_WRITE);
return !0; /* ok, someone already gave my table away */
}
if (tb->common.heir != to_pid) { /* someone changed the heir */
if (to_proc != NULL ) {
- erts_smp_proc_unlock(to_proc, to_locks);
+ erts_proc_unlock(to_proc, to_locks);
}
if (to_pid == p->common.id || to_pid == am_none) {
return 0; /* no real heir, table still mine */
@@ -3095,22 +3464,15 @@ retry:
}
if (to_proc->common.u.alive.started_interval
!= tb->common.heir_started_interval) {
- erts_smp_proc_unlock(to_proc, to_locks);
+ erts_proc_unlock(to_proc, to_locks);
return 0; /* heir dead and pid reused, table still mine */
}
- UseTmpHeap(5,p);
- db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
- db_erase_bag_exact2(meta_pid_to_tab, tb->common.owner,
- make_small(tb->common.slot));
+ delete_owned_table(p, tb);
to_proc->flags |= F_USING_DB;
tb->common.owner = to_pid;
-
- db_put_hash(meta_pid_to_tab,
- TUPLE2(buf,to_pid,make_small(tb->common.slot)),
- 0);
- db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
- UnUseTmpHeap(5,p);
+ save_owned_table(to_proc, tb);
+
db_unlock(tb,LCK_WRITE);
heir_data = tb->common.heir_data;
if (!is_immed(heir_data)) {
@@ -3118,17 +3480,88 @@ retry:
ASSERT(arityval(*tpv) == 1);
heir_data = tpv[1];
}
- erts_send_message(p, to_proc, &to_locks,
- TUPLE4(buf,
- am_ETS_TRANSFER,
- tb->common.id,
- p->common.id,
- heir_data),
- 0);
- erts_smp_proc_unlock(to_proc, to_locks);
+ send_ets_transfer_message(p, to_proc, &to_locks, tb, heir_data);
+ erts_proc_unlock(to_proc, to_locks);
return !0;
}
+static void
+send_ets_transfer_message(Process *c_p, Process *proc,
+ ErtsProcLocks *locks,
+ DbTable *tb, Eterm heir_data)
+{
+ Uint hsz, hd_sz;
+ ErtsMessage *mp;
+ Eterm *hp;
+ ErlOffHeap *ohp;
+ Eterm tid, hd_copy, msg, sender;
+
+ hsz = 5;
+ if (!is_table_named(tb))
+ hsz += ERTS_MAGIC_REF_THING_SIZE;
+ if (is_immed(heir_data))
+ hd_sz = 0;
+ else {
+ hd_sz = size_object(heir_data);
+ hsz += hd_sz;
+ }
+
+ mp = erts_alloc_message_heap(proc, locks, hsz, &hp, &ohp);
+ if (is_table_named(tb))
+ tid = tb->common.the_name;
+ else
+ tid = erts_mk_magic_ref(&hp, ohp, tb->common.btid);
+ if (!hd_sz)
+ hd_copy = heir_data;
+ else
+ hd_copy = copy_struct(heir_data, hd_sz, &hp, ohp);
+ sender = c_p->common.id;
+ msg = TUPLE4(hp, am_ETS_TRANSFER, tid, sender, hd_copy);
+ erts_queue_message(proc, *locks, mp, msg, sender);
+}
+
+
+/* Auto-release fixation from exiting process */
+static SWord proc_cleanup_fixed_table(Process* p, DbFixation* fix)
+{
+ DbTable* tb = btid2tab(fix->tabs.btid);
+ SWord work = 0;
+
+ ASSERT(fix->procs.p == p); (void)p;
+ if (tb) {
+ db_lock(tb, LCK_WRITE_REC);
+ if (!(tb->common.status & DB_DELETE)) {
+ erts_aint_t diff;
+ erts_mtx_lock(&tb->common.fixlock);
+
+ ASSERT(fixing_procs_rbt_lookup(tb->common.fixing_procs, p));
+
+ diff = -((erts_aint_t) fix->counter);
+ erts_refc_add(&tb->common.fix_count,diff,0);
+ fix->counter = 0;
+
+ fixing_procs_rbt_delete(&tb->common.fixing_procs, fix);
+
+ erts_mtx_unlock(&tb->common.fixlock);
+ if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)) {
+ work += db_unfix_table_hash(&(tb->hash));
+ }
+
+ ASSERT(sizeof(DbFixation) == ERTS_ALC_DBG_BLK_SZ(fix));
+ ERTS_DB_ALC_MEM_UPDATE_(tb, sizeof(DbFixation), 0);
+ }
+ db_unlock(tb, LCK_WRITE_REC);
+ }
+
+ erts_bin_release(fix->tabs.btid);
+ erts_free(ERTS_ALC_T_DB_FIXATION, fix);
+ ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
+ ++work;
+
+ return work;
+}
+
+
/*
* erts_db_process_exiting() is called when a process terminates.
* It returns 0 when completely done, and !0 when it wants to
@@ -3136,279 +3569,160 @@ retry:
* yielding.
*/
#define ERTS_DB_INTERNAL_ERROR(LSTR) \
- erl_exit(ERTS_ABORT_EXIT, "%s:%d:erts_db_process_exiting(): " LSTR "\n", \
+ erts_exit(ERTS_ABORT_EXIT, "%s:%d:erts_db_process_exiting(): " LSTR "\n", \
__FILE__, __LINE__)
int
erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
{
- ErtsDbProcCleanupState *state = (ErtsDbProcCleanupState *) c_p->u.terminate;
+ typedef struct {
+ enum {
+ GET_OWNED_TABLE,
+ FREE_OWNED_TABLE,
+ UNFIX_TABLES,
+ }op;
+ DbTable *tb;
+ } CleanupState;
+ CleanupState *state = (CleanupState *) c_p->u.terminate;
Eterm pid = c_p->common.id;
- ErtsDbProcCleanupState default_state;
- int ret;
+ CleanupState default_state;
+ SWord initial_reds = ERTS_BIF_REDS_LEFT(c_p);
+ SWord reds = initial_reds;
if (!state) {
state = &default_state;
- state->progress = ErtsDbProcCleanupProgressTables;
- state->op = ErtsDbProcCleanupOpGetTables;
+ state->op = GET_OWNED_TABLE;
+ state->tb = NULL;
}
- while (!0) {
+ do {
switch (state->op) {
- case ErtsDbProcCleanupOpGetTables:
- state->slots.size = ARRAY_CHUNK;
- db_meta_lock(meta_pid_to_tab, LCK_READ);
- ret = db_get_element_array(meta_pid_to_tab,
- pid,
- 2,
- state->slots.arr,
- &state->slots.size);
- db_meta_unlock(meta_pid_to_tab, LCK_READ);
- if (ret == DB_ERROR_BADKEY) {
- /* Done with tables; now fixations */
- state->progress = ErtsDbProcCleanupProgressFixations;
- state->op = ErtsDbProcCleanupOpGetFixations;
- break;
- } else if (ret != DB_ERROR_NONE) {
- ERTS_DB_INTERNAL_ERROR("Inconsistent ets table metadata");
- }
-
- state->slots.ix = 0;
- state->slots.clean_ix = 0;
- state->op = ErtsDbProcCleanupOpDeleteTables;
- /* Fall through */
-
- case ErtsDbProcCleanupOpDeleteTables:
+ case GET_OWNED_TABLE: {
+ DbTable* tb;
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+ tb = (DbTable*) erts_psd_get(c_p, ERTS_PSD_ETS_OWNED_TABLES);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+
+ if (!tb) {
+ /* Done with owned tables; now fixations */
+ state->op = UNFIX_TABLES;
+ break;
+ }
- while (state->slots.ix < state->slots.size) {
- DbTable *tb = NULL;
- Sint ix = unsigned_val(state->slots.arr[state->slots.ix]);
- erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(ix);
- erts_smp_rwmtx_rlock(mmtl);
- if (!IS_SLOT_FREE(ix)) {
- tb = GET_ANY_SLOT_TAB(ix);
- ASSERT(tb);
- }
- erts_smp_rwmtx_runlock(mmtl);
- if (tb) {
- int do_yield;
- db_lock(tb, LCK_WRITE);
- /* Ownership may have changed since
- we looked up the table. */
- if (tb->common.owner != pid) {
- do_yield = 0;
- db_unlock(tb, LCK_WRITE);
- }
- else if (tb->common.heir != am_none
- && tb->common.heir != pid
- && give_away_to_heir(c_p, tb)) {
- do_yield = 0;
- }
- else {
- int first_call;
-#ifdef HARDDEBUG
- erts_fprintf(stderr,
- "erts_db_process_exiting(); Table: %T, "
- "Process: %T\n",
- tb->common.id, pid);
-#endif
- first_call = (tb->common.status & DB_DELETE) == 0;
- if (first_call) {
- /* Clear all access bits. */
- tb->common.status &= ~(DB_PROTECTED
- | DB_PUBLIC
- | DB_PRIVATE);
- tb->common.status |= DB_DELETE;
-
- if (is_atom(tb->common.id))
- remove_named_tab(tb, 0);
-
- free_heir_data(tb);
- free_fixations_locked(tb);
- }
-
- do_yield = free_table_cont(c_p, tb, first_call, 0);
- db_unlock(tb, LCK_WRITE);
- }
- if (do_yield)
- goto yield;
- }
- state->slots.ix++;
- if (ERTS_BIF_REDS_LEFT(c_p) <= 0)
- goto yield;
- }
+ ASSERT(tb != state->tb);
+ state->tb = tb;
+ db_lock(tb, LCK_WRITE);
+ /*
+ * Ownership may have changed since we looked up the table.
+ */
+ if (tb->common.owner != pid) {
+ db_unlock(tb, LCK_WRITE);
+ break;
+ }
+ if (tb->common.heir != am_none
+ && tb->common.heir != pid
+ && give_away_to_heir(c_p, tb)) {
+ break;
+ }
+ tid_clear(c_p, tb);
+ /* Clear all access bits. */
+ tb->common.status &= ~(DB_PROTECTED | DB_PUBLIC | DB_PRIVATE);
+ tb->common.status |= DB_DELETE;
+
+ if (is_table_named(tb))
+ remove_named_tab(tb, 0);
+
+ free_heir_data(tb);
+ reds -= free_fixations_locked(c_p, tb);
+ db_unlock(tb, LCK_WRITE);
+ state->op = FREE_OWNED_TABLE;
+ break;
+ }
+ case FREE_OWNED_TABLE:
+ reds = free_table_continue(c_p, state->tb, reds);
+ if (reds < 0)
+ goto yield;
- proc_exit_cleanup_tables_meta_data(pid, state);
- state->op = ErtsDbProcCleanupOpGetTables;
- break;
+ state->op = GET_OWNED_TABLE;
+ break;
- case ErtsDbProcCleanupOpGetFixations:
- state->slots.size = ARRAY_CHUNK;
- db_meta_lock(meta_pid_to_fixed_tab, LCK_READ);
- ret = db_get_element_array(meta_pid_to_fixed_tab,
- pid,
- 2,
- state->slots.arr,
- &state->slots.size);
- db_meta_unlock(meta_pid_to_fixed_tab, LCK_READ);
-
- if (ret == DB_ERROR_BADKEY) {
- /* Done */
- state->progress = ErtsDbProcCleanupProgressDone;
- state->op = ErtsDbProcCleanupOpDone;
- break;
- } else if (ret != DB_ERROR_NONE) {
- ERTS_DB_INTERNAL_ERROR("Inconsistent ets fix table metadata");
- }
+ case UNFIX_TABLES: {
+ DbFixation* fix;
- state->slots.ix = 0;
- state->slots.clean_ix = 0;
- state->op = ErtsDbProcCleanupOpDeleteFixations;
- /* Fall through */
+ fix = (DbFixation*) erts_psd_get(c_p, ERTS_PSD_ETS_FIXED_TABLES);
- case ErtsDbProcCleanupOpDeleteFixations:
+ if (!fix) {
+ /* Done */
- while (state->slots.ix < state->slots.size) {
- DbTable *tb = NULL;
- Sint ix = unsigned_val(state->slots.arr[state->slots.ix]);
- erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(ix);
- erts_smp_rwmtx_rlock(mmtl);
- if (IS_SLOT_ALIVE(ix)) {
- tb = meta_main_tab[ix].u.tb;
- ASSERT(tb);
- }
- erts_smp_rwmtx_runlock(mmtl);
- if (tb) {
- int reds;
- DbFixation** pp;
-
- db_lock(tb, LCK_WRITE_REC);
- #ifdef ERTS_SMP
- erts_smp_mtx_lock(&tb->common.fixlock);
- #endif
- reds = 10;
-
- for (pp = &tb->common.fixations; *pp != NULL;
- pp = &(*pp)->next) {
- if ((*pp)->pid == pid) {
- DbFixation* fix = *pp;
- erts_aint_t diff = -((erts_aint_t) fix->counter);
- erts_refc_add(&tb->common.ref,diff,0);
- *pp = fix->next;
- erts_db_free(ERTS_ALC_T_DB_FIXATION,
- tb, fix, sizeof(DbFixation));
- ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
- break;
- }
- }
- #ifdef ERTS_SMP
- erts_smp_mtx_unlock(&tb->common.fixlock);
- #endif
- if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)) {
- db_unfix_table_hash(&(tb->hash));
- reds += 40;
- }
- db_unlock(tb, LCK_WRITE_REC);
- BUMP_REDS(c_p, reds);
- }
- state->slots.ix++;
- if (ERTS_BIF_REDS_LEFT(c_p) <= 0)
- goto yield;
- }
+ if (state != &default_state)
+ erts_free(ERTS_ALC_T_DB_PROC_CLEANUP, state);
+ c_p->u.terminate = NULL;
- proc_exit_cleanup_fixations_meta_data(pid, state);
- state->op = ErtsDbProcCleanupOpGetFixations;
- break;
-
- case ErtsDbProcCleanupOpDone:
+ BUMP_REDS(c_p, (initial_reds - reds));
+ return 0;
+ }
- if (state != &default_state)
- erts_free(ERTS_ALC_T_DB_PROC_CLEANUP, state);
- c_p->u.terminate = NULL;
- return 0;
+ fixed_tabs_delete(c_p, fix);
+ reds -= proc_cleanup_fixed_table(c_p, fix);
+ break;
+ }
default:
ERTS_DB_INTERNAL_ERROR("Bad internal state");
- }
- }
-
- yield:
+ }
- switch (state->progress) {
- case ErtsDbProcCleanupProgressTables:
- proc_exit_cleanup_tables_meta_data(pid, state);
- break;
- case ErtsDbProcCleanupProgressFixations:
- proc_exit_cleanup_fixations_meta_data(pid, state);
- break;
- default:
- break;
- }
+ } while (reds > 0);
- ASSERT(c_p->u.terminate == (void *) state
- || state == &default_state);
+ yield:
if (state == &default_state) {
c_p->u.terminate = erts_alloc(ERTS_ALC_T_DB_PROC_CLEANUP,
- sizeof(ErtsDbProcCleanupState));
- sys_memcpy(c_p->u.terminate,
- (void*) state,
- sizeof(ErtsDbProcCleanupState));
+ sizeof(CleanupState));
+ sys_memcpy(c_p->u.terminate, (void*) state, sizeof(CleanupState));
}
+ else
+ ASSERT(state == c_p->u.terminate);
return !0;
}
+
/* SMP note: table only need to be LCK_READ locked */
static void fix_table_locked(Process* p, DbTable* tb)
{
DbFixation *fix;
- DeclareTmpHeap(meta_tuple,3,p);
-#ifdef ERTS_SMP
- erts_smp_mtx_lock(&tb->common.fixlock);
-#endif
- erts_refc_inc(&tb->common.ref,1);
- fix = tb->common.fixations;
- if (fix == NULL) {
- get_now(&(tb->common.megasec),
- &(tb->common.sec),
- &(tb->common.microsec));
+ erts_mtx_lock(&tb->common.fixlock);
+ erts_refc_inc(&tb->common.fix_count,1);
+ fix = tb->common.fixing_procs;
+ if (fix == NULL) {
+ tb->common.time.monotonic
+ = erts_get_monotonic_time(erts_proc_sched_data(p));
+ tb->common.time.offset = erts_get_time_offset();
}
else {
- for (; fix != NULL; fix = fix->next) {
- if (fix->pid == p->common.id) {
- ++(fix->counter);
-#ifdef ERTS_SMP
- erts_smp_mtx_unlock(&tb->common.fixlock);
-#endif
- return;
- }
+ fix = fixing_procs_rbt_lookup(fix, p);
+ if (fix) {
+ ASSERT(fixed_tabs_find(NULL, fix));
+ ++(fix->counter);
+
+ erts_mtx_unlock(&tb->common.fixlock);
+ return;
}
}
fix = (DbFixation *) erts_db_alloc(ERTS_ALC_T_DB_FIXATION,
tb, sizeof(DbFixation));
ERTS_ETS_MISC_MEM_ADD(sizeof(DbFixation));
- fix->pid = p->common.id;
+ fix->tabs.btid = tb->common.btid;
+ erts_refc_inc(&fix->tabs.btid->intern.refc, 2);
+ fix->procs.p = p;
fix->counter = 1;
- fix->next = tb->common.fixations;
- tb->common.fixations = fix;
-#ifdef ERTS_SMP
- erts_smp_mtx_unlock(&tb->common.fixlock);
-#endif
- p->flags |= F_USING_DB;
- UseTmpHeap(3,p);
- db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
- if (db_put_hash(meta_pid_to_fixed_tab,
- TUPLE2(meta_tuple,
- p->common.id,
- make_small(tb->common.slot)),
- 0) != DB_ERROR_NONE) {
- UnUseTmpHeap(3,p);
- erl_exit(1,"Could not insert ets metadata in safe_fixtable.");
- }
- UnUseTmpHeap(3,p);
- db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
+ fixing_procs_rbt_insert(&tb->common.fixing_procs, fix);
+
+ erts_mtx_unlock(&tb->common.fixlock);
+ p->flags |= F_USING_DB;
+
+ fixed_tabs_insert(p, fix);
}
/* SMP note: May re-lock table
@@ -3416,77 +3730,115 @@ static void fix_table_locked(Process* p, DbTable* tb)
static void unfix_table_locked(Process* p, DbTable* tb,
db_lock_kind_t* kind_p)
{
- DbFixation** pp;
+ DbFixation* fix;
+
+ erts_mtx_lock(&tb->common.fixlock);
+ fix = fixing_procs_rbt_lookup(tb->common.fixing_procs, p);
+
+ if (fix) {
+ erts_refc_dec(&tb->common.fix_count,0);
+ --(fix->counter);
+ ASSERT(fix->counter >= 0);
+ if (fix->counter == 0) {
+ fixing_procs_rbt_delete(&tb->common.fixing_procs, fix);
+ erts_mtx_unlock(&tb->common.fixlock);
+ fixed_tabs_delete(p, fix);
+
+ erts_refc_dec(&fix->tabs.btid->intern.refc, 1);
-#ifdef ERTS_SMP
- erts_smp_mtx_lock(&tb->common.fixlock);
-#endif
- for (pp = &tb->common.fixations; *pp != NULL; pp = &(*pp)->next) {
- if ((*pp)->pid == p->common.id) {
- DbFixation* fix = *pp;
- erts_refc_dec(&tb->common.ref,0);
- --(fix->counter);
- ASSERT(fix->counter >= 0);
- if (fix->counter > 0) {
- break;
- }
- *pp = fix->next;
-#ifdef ERTS_SMP
- erts_smp_mtx_unlock(&tb->common.fixlock);
-#endif
- db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
- db_erase_bag_exact2(meta_pid_to_fixed_tab,
- p->common.id, make_small(tb->common.slot));
- db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
erts_db_free(ERTS_ALC_T_DB_FIXATION,
tb, (void *) fix, sizeof(DbFixation));
ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
goto unlocked;
}
}
-#ifdef ERTS_SMP
- erts_smp_mtx_unlock(&tb->common.fixlock);
-#endif
+ erts_mtx_unlock(&tb->common.fixlock);
unlocked:
if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)
- && erts_smp_atomic_read_nob(&tb->hash.fixdel) != (erts_aint_t)NULL) {
-#ifdef ERTS_SMP
+ && erts_atomic_read_nob(&tb->hash.fixdel) != (erts_aint_t)NULL) {
if (*kind_p == LCK_READ && tb->common.is_thread_safe) {
/* Must have write lock while purging pseudo-deleted (OTP-8166) */
- erts_smp_rwmtx_runlock(&tb->common.rwlock);
- erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ erts_rwmtx_runlock(&tb->common.rwlock);
+ erts_rwmtx_rwlock(&tb->common.rwlock);
*kind_p = LCK_WRITE;
if (tb->common.status & DB_DELETE) return;
}
-#endif
db_unfix_table_hash(&(tb->hash));
}
}
-/* Assume that tb is WRITE locked */
-static void free_fixations_locked(DbTable *tb)
+struct free_fixations_ctx
{
- DbFixation *fix;
- DbFixation *next_fix;
+ Process* p;
+ DbTable* tb;
+ SWord cnt;
+};
+
+static void free_fixations_op(DbFixation* fix, void* vctx)
+{
+ struct free_fixations_ctx* ctx = (struct free_fixations_ctx*) vctx;
+ erts_aint_t diff;
+
+ ASSERT(!btid2tab(fix->tabs.btid));
+ ASSERT(fix->counter > 0);
+ ASSERT(ctx->tb->common.status & DB_DELETE);
+
+ diff = -((erts_aint_t) fix->counter);
+ erts_refc_add(&ctx->tb->common.fix_count, diff, 0);
- fix = tb->common.fixations;
- while (fix != NULL) {
- erts_aint_t diff = -((erts_aint_t) fix->counter);
- erts_refc_add(&tb->common.ref,diff,0);
- next_fix = fix->next;
- db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
- db_erase_bag_exact2(meta_pid_to_fixed_tab,
- fix->pid,
- make_small(tb->common.slot));
- db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
- erts_db_free(ERTS_ALC_T_DB_FIXATION,
- tb, (void *) fix, sizeof(DbFixation));
- ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
+ if (fix->procs.p != ctx->p) { /* Fixated by other process */
+ fix->counter = 0;
- fix = next_fix;
+ /* Fake memory stats for table */
+ ASSERT(sizeof(DbFixation) == ERTS_ALC_DBG_BLK_SZ(fix));
+ ERTS_DB_ALC_MEM_UPDATE_(ctx->tb, sizeof(DbFixation), 0);
+
+ erts_schedule_ets_free_fixation(fix->procs.p->common.id, fix);
+ /*
+ * Either sys task is scheduled and erts_db_execute_free_fixation()
+ * will remove 'fix' or process will exit, drop sys task and
+ * proc_cleanup_fixed_table() will remove 'fix'.
+ */
+ }
+ else
+ {
+ fixed_tabs_delete(fix->procs.p, fix);
+
+ erts_bin_release(fix->tabs.btid);
+
+ erts_db_free(ERTS_ALC_T_DB_FIXATION,
+ ctx->tb, (void *) fix, sizeof(DbFixation));
+ ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
}
- tb->common.fixations = NULL;
+ ctx->cnt++;
+}
+
+int erts_db_execute_free_fixation(Process* p, DbFixation* fix)
+{
+ ASSERT(fix->counter == 0);
+ fixed_tabs_delete(p, fix);
+
+ erts_bin_release(fix->tabs.btid);
+
+ erts_free(ERTS_ALC_T_DB_FIXATION, fix);
+ ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
+ return 1;
+}
+
+static SWord free_fixations_locked(Process* p, DbTable *tb)
+{
+ struct free_fixations_ctx ctx;
+
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock));
+
+ ctx.p = p;
+ ctx.tb = tb;
+ ctx.cnt = 0;
+ fixing_procs_rbt_foreach_destroy(&tb->common.fixing_procs,
+ free_fixations_op, &ctx);
+ tb->common.fixing_procs = NULL;
+ return ctx.cnt;
}
static void set_heir(Process* me, DbTable* tb, Eterm heir, UWord heir_data)
@@ -3550,58 +3902,40 @@ static void free_heir_data(DbTable* tb)
static BIF_RETTYPE ets_delete_trap(BIF_ALIST_1)
{
- Process *p = BIF_P;
+ SWord initial_reds = ERTS_BIF_REDS_LEFT(BIF_P);
+ SWord reds = initial_reds;
Eterm cont = BIF_ARG_1;
- int trap;
Eterm* ptr = big_val(cont);
DbTable *tb = *((DbTable **) (UWord) (ptr + 1));
-#if HALFWORD_HEAP
- ASSERT(*ptr == make_pos_bignum_header(2));
-#else
ASSERT(*ptr == make_pos_bignum_header(1));
-#endif
- db_lock(tb, LCK_WRITE);
- trap = free_table_cont(p, tb, 0, 1);
- db_unlock(tb, LCK_WRITE);
- if (trap) {
- BIF_TRAP1(&ets_delete_continue_exp, p, cont);
+ if (free_table_continue(BIF_P, tb, reds) < 0) {
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP1(&ets_delete_continue_exp, BIF_P, cont);
}
else {
+ BUMP_REDS(BIF_P, (initial_reds - reds));
BIF_RET(am_true);
}
}
/*
- * free_table_cont() returns 0 when done and !0 when more work is needed.
+ * free_table_continue() returns reductions left
+ * done if >= 0
+ * yield if < 0
*/
-static int free_table_cont(Process *p,
- DbTable *tb,
- int first,
- int clean_meta_tab)
+static SWord free_table_continue(Process *p, DbTable *tb, SWord reds)
{
- Eterm result;
- erts_smp_rwmtx_t *mmtl;
+ reds = tb->common.meth->db_free_table_continue(tb, reds);
-#ifdef HARDDEBUG
- if (!first) {
- erts_fprintf(stderr,"ets: free_table_cont %T (continue)\r\n",
- tb->common.id);
- }
-#endif
-
- result = tb->common.meth->db_free_table_continue(tb);
-
- if (result == 0) {
+ if (reds < 0) {
#ifdef HARDDEBUG
erts_fprintf(stderr,"ets: free_table_cont %T (continue begin)\r\n",
tb->common.id);
#endif
/* More work to be done. Let other processes work and call us again. */
- BUMP_ALL_REDS(p);
- return !0;
}
else {
#ifdef HARDDEBUG
@@ -3609,35 +3943,37 @@ static int free_table_cont(Process *p,
tb->common.id);
#endif
/* Completely done - we will not get called again. */
- mmtl = get_meta_main_tab_lock(tb->common.slot);
-#ifdef ERTS_SMP
- if (erts_smp_rwmtx_tryrwlock(mmtl) == EBUSY) {
- erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
- erts_smp_rwmtx_rwlock(mmtl);
- erts_smp_rwmtx_rwlock(&tb->common.rwlock);
- }
-#endif
- free_slot(tb->common.slot);
- erts_smp_rwmtx_rwunlock(mmtl);
-
- if (clean_meta_tab) {
- db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
- db_erase_bag_exact2(meta_pid_to_tab,tb->common.owner,
- make_small(tb->common.slot));
- db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
- }
- schedule_free_dbtable(tb);
- BUMP_REDS(p, 100);
- return 0;
+ delete_owned_table(p, tb);
+ table_dec_refc(tb, 0);
}
+ return reds;
+}
+
+struct fixing_procs_info_ctx
+{
+ Process* p;
+ Eterm list;
+};
+
+static void fixing_procs_info_op(DbFixation* fix, void* vctx)
+{
+ struct fixing_procs_info_ctx* ctx = (struct fixing_procs_info_ctx*) vctx;
+ Eterm* hp;
+ Eterm tpl;
+
+ hp = HAllocX(ctx->p, 5, 100);
+ tpl = TUPLE2(hp, fix->procs.p->common.id, make_small(fix->counter));
+ hp += 3;
+ ctx->list = CONS(hp, tpl, ctx->list);
}
static Eterm table_info(Process* p, DbTable* tb, Eterm What)
{
Eterm ret = THE_NON_VALUE;
+ int use_monotonic;
if (What == am_size) {
- ret = make_small(erts_smp_atomic_read_nob(&tb->common.nitems));
+ ret = make_small(erts_atomic_read_nob(&tb->common.nitems));
} else if (What == am_type) {
if (tb->common.status & DB_SET) {
ret = am_set;
@@ -3650,7 +3986,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
ret = am_bag;
}
} else if (What == am_memory) {
- Uint words = (Uint) ((erts_smp_atomic_read_nob(&tb->common.memory_size)
+ Uint words = (Uint) ((erts_atomic_read_nob(&tb->common.memory_size)
+ sizeof(Uint)
- 1)
/ sizeof(Uint));
@@ -3666,6 +4002,10 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
ret = am_protected;
else if (tb->common.status & DB_PUBLIC)
ret = am_public;
+ } else if (What == am_write_concurrency) {
+ ret = tb->common.status & DB_FINE_LOCKED ? am_true : am_false;
+ } else if (What == am_read_concurrency) {
+ ret = tb->common.status & DB_FREQ_READ ? am_true : am_false;
} else if (What == am_name) {
ret = tb->common.the_name;
} else if (What == am_keypos) {
@@ -3673,7 +4013,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
} else if (What == am_node) {
ret = erts_this_dist_entry->sysname;
} else if (What == am_named_table) {
- ret = is_atom(tb->common.id) ? am_true : am_false;
+ ret = is_table_named(tb) ? am_true : am_false;
} else if (What == am_compressed) {
ret = tb->common.compress ? am_true : am_false;
}
@@ -3688,39 +4028,53 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
ret = am_true;
else
ret = am_false;
- } else if (What == am_atom_put("safe_fixed",10)) {
-#ifdef ERTS_SMP
- erts_smp_mtx_lock(&tb->common.fixlock);
-#endif
+ } else if ((use_monotonic
+ = ERTS_IS_ATOM_STR("safe_fixed_monotonic_time",
+ What))
+ || ERTS_IS_ATOM_STR("safe_fixed", What)) {
+ erts_mtx_lock(&tb->common.fixlock);
if (IS_FIXED(tb)) {
Uint need;
Eterm *hp;
- Eterm tpl, lst;
- DbFixation *fix;
- need = 7;
- for (fix = tb->common.fixations; fix != NULL; fix = fix->next) {
- need += 5;
+ Eterm time;
+ Sint64 mtime;
+ struct fixing_procs_info_ctx ctx;
+
+ need = 3;
+ if (use_monotonic) {
+ mtime = (Sint64) tb->common.time.monotonic;
+ mtime += ERTS_MONOTONIC_OFFSET_NATIVE;
+ if (!IS_SSMALL(mtime))
+ need += ERTS_SINT64_HEAP_SIZE(mtime);
+ }
+ else {
+ mtime = 0;
+ need += 4;
}
+ ctx.p = p;
+ ctx.list = NIL;
+ fixing_procs_rbt_foreach(tb->common.fixing_procs,
+ fixing_procs_info_op,
+ &ctx);
+
hp = HAlloc(p, need);
- lst = NIL;
- for (fix = tb->common.fixations; fix != NULL; fix = fix->next) {
- tpl = TUPLE2(hp,fix->pid,make_small(fix->counter));
- hp += 3;
- lst = CONS(hp,tpl,lst);
- hp += 2;
+ if (use_monotonic)
+ time = (IS_SSMALL(mtime)
+ ? make_small(mtime)
+ : erts_sint64_to_big(mtime, &hp));
+ else {
+ Uint ms, s, us;
+ erts_make_timestamp_value(&ms, &s, &us,
+ tb->common.time.monotonic,
+ tb->common.time.offset);
+ time = TUPLE3(hp, make_small(ms), make_small(s), make_small(us));
+ hp += 4;
}
- tpl = TUPLE3(hp,
- make_small(tb->common.megasec),
- make_small(tb->common.sec),
- make_small(tb->common.microsec));
- hp += 4;
- ret = TUPLE2(hp, tpl, lst);
+ ret = TUPLE2(hp, time, ctx.list);
} else {
ret = am_false;
}
-#ifdef ERTS_SMP
- erts_smp_mtx_unlock(&tb->common.fixlock);
-#endif
+ erts_mtx_unlock(&tb->common.fixlock);
} else if (What == am_atom_put("stats",5)) {
if (IS_HASH_TABLE(tb->common.status)) {
FloatDef f;
@@ -3744,11 +4098,11 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
std_dev_exp = make_float(hp);
PUT_DOUBLE(f, hp);
hp += FLOAT_SIZE_OBJECT;
- ret = TUPLE7(hp, make_small(erts_smp_atomic_read_nob(&tb->hash.nactive)),
+ ret = TUPLE7(hp, make_small(erts_atomic_read_nob(&tb->hash.nactive)),
avg, std_dev_real, std_dev_exp,
make_small(stats.min_chain_len),
make_small(stats.max_chain_len),
- make_small(db_kept_items_hash(&tb->hash)));
+ make_small(stats.kept_items));
}
else {
ret = am_false;
@@ -3757,59 +4111,92 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
return ret;
}
-static void print_table(int to, void *to_arg, int show, DbTable* tb)
+static void print_table(fmtfn_t to, void *to_arg, int show, DbTable* tb)
{
- erts_print(to, to_arg, "Table: %T\n", tb->common.id);
+ Eterm tid;
+ Eterm heap[ERTS_MAGIC_REF_THING_SIZE];
+
+ if (is_table_named(tb)) {
+ tid = tb->common.the_name;
+ } else {
+ ErlOffHeap oh;
+ ERTS_INIT_OFF_HEAP(&oh);
+ write_magic_ref_thing(heap, &oh, (ErtsMagicBinary *) tb->common.btid);
+ tid = make_internal_ref(heap);
+ }
+
+ erts_print(to, to_arg, "Table: %T\n", tid);
erts_print(to, to_arg, "Name: %T\n", tb->common.the_name);
tb->common.meth->db_print(to, to_arg, show, tb);
- erts_print(to, to_arg, "Objects: %d\n", (int)erts_smp_atomic_read_nob(&tb->common.nitems));
+ erts_print(to, to_arg, "Objects: %d\n", (int)erts_atomic_read_nob(&tb->common.nitems));
erts_print(to, to_arg, "Words: %bpu\n",
- (Uint) ((erts_smp_atomic_read_nob(&tb->common.memory_size)
+ (Uint) ((erts_atomic_read_nob(&tb->common.memory_size)
+ sizeof(Uint)
- 1)
/ sizeof(Uint)));
+ erts_print(to, to_arg, "Type: %T\n", table_info(NULL, tb, am_type));
+ erts_print(to, to_arg, "Protection: %T\n", table_info(NULL, tb, am_protection));
+ erts_print(to, to_arg, "Compressed: %T\n", table_info(NULL, tb, am_compressed));
+ erts_print(to, to_arg, "Write Concurrency: %T\n", table_info(NULL, tb, am_write_concurrency));
+ erts_print(to, to_arg, "Read Concurrency: %T\n", table_info(NULL, tb, am_read_concurrency));
}
-void db_info(int to, void *to_arg, int show) /* Called by break handler */
+typedef struct {
+ fmtfn_t to;
+ void *to_arg;
+ int show;
+} ErtsPrintDbInfo;
+
+static void
+db_info_print(DbTable *tb, void *vpdbip)
{
- int i;
- for (i=0; i < db_max_tabs; i++)
- if (IS_SLOT_ALIVE(i)) {
- erts_print(to, to_arg, "=ets:%T\n", meta_main_tab[i].u.tb->common.owner);
- erts_print(to, to_arg, "Slot: %d\n", i);
- print_table(to, to_arg, show, meta_main_tab[i].u.tb);
- }
-#ifdef DEBUG
- erts_print(to, to_arg, "=internal_ets: Process to table index\n");
- print_table(to, to_arg, show, meta_pid_to_tab);
- erts_print(to, to_arg, "=internal_ets: Process to fixation index\n");
- print_table(to, to_arg, show, meta_pid_to_fixed_tab);
-#endif
+ ErtsPrintDbInfo *pdbip = (ErtsPrintDbInfo *) vpdbip;
+ erts_print(pdbip->to, pdbip->to_arg, "=ets:%T\n", tb->common.owner);
+ erts_print(pdbip->to, pdbip->to_arg, "Slot: %bpu\n", (Uint) tb);
+ print_table(pdbip->to, pdbip->to_arg, pdbip->show, tb);
+}
+
+void db_info(fmtfn_t to, void *to_arg, int show) /* Called by break handler */
+{
+ ErtsPrintDbInfo pdbi;
+
+ pdbi.to = to;
+ pdbi.to_arg = to_arg;
+ pdbi.show = show;
+
+ erts_db_foreach_table(db_info_print, &pdbi);
}
Uint
erts_get_ets_misc_mem_size(void)
{
- ERTS_SMP_MEMORY_BARRIER;
+ ERTS_THR_MEMORY_BARRIER;
/* Memory not allocated in ets_alloc */
- return (Uint) erts_smp_atomic_read_nob(&erts_ets_misc_mem_size);
+ return (Uint) erts_atomic_read_nob(&erts_ets_misc_mem_size);
}
/* SMP Note: May only be used when system is locked */
void
erts_db_foreach_table(void (*func)(DbTable *, void *), void *arg)
{
- int i, j;
- j = 0;
- for(i = 0; (i < db_max_tabs && j < meta_main_tab_cnt); i++) {
- if (IS_SLOT_ALIVE(i)) {
- j++;
- (*func)(meta_main_tab[i].u.tb, arg);
- }
+ int ix;
+
+ ASSERT(erts_thr_progress_is_blocking());
+
+ for (ix = 0; ix < erts_no_schedulers; ix++) {
+ ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(ix);
+ DbTable *first = esdp->ets_tables.clist;
+ if (first) {
+ DbTable *tb = first;
+ do {
+ if (is_table_alive(tb))
+ (*func)(tb, arg);
+ tb = tb->common.all.next;
+ } while (tb != first);
+ }
}
- ASSERT(j == meta_main_tab_cnt);
}
/* SMP Note: May only be used when system is locked */
@@ -3859,23 +4246,47 @@ erts_ets_colliding_names(Process* p, Eterm name, Uint cnt)
return list;
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
-#ifdef HARDDEBUG /* Here comes some debug functions */
+void erts_lcnt_enable_db_lock_count(DbTable *tb, int enable) {
+ if(enable) {
+ erts_lcnt_install_new_lock_info(&tb->common.rwlock.lcnt, "db_tab",
+ tb->common.the_name, ERTS_LOCK_TYPE_RWMUTEX | ERTS_LOCK_FLAGS_CATEGORY_DB);
+ erts_lcnt_install_new_lock_info(&tb->common.fixlock.lcnt, "db_tab_fix",
+ tb->common.the_name, ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_DB);
+ } else {
+ erts_lcnt_uninstall(&tb->common.rwlock.lcnt);
+ erts_lcnt_uninstall(&tb->common.fixlock.lcnt);
+ }
-void db_check_tables(void)
-{
-#ifdef ERTS_SMP
- return;
-#else
- int i;
+ if(IS_HASH_TABLE(tb->common.status)) {
+ erts_lcnt_enable_db_hash_lock_count(&tb->hash, enable);
+ }
+}
- for (i = 0; i < db_max_tabs; i++) {
- if (IS_SLOT_ALIVE(i)) {
- DbTable* tb = meta_main_tab[i].t;
- tb->common.meth->db_check_table(tb);
- }
+static void lcnt_update_db_locks_per_sched(void *enable) {
+ ErtsSchedulerData *esdp;
+ DbTable *head;
+
+ esdp = erts_get_scheduler_data();
+ head = esdp->ets_tables.clist;
+
+ if(head) {
+ DbTable *iterator = head;
+
+ do {
+ if(is_table_alive(iterator)) {
+ erts_lcnt_enable_db_lock_count(iterator, !!enable);
+ }
+
+ iterator = iterator->common.all.next;
+ } while (iterator != head);
}
-#endif
}
-#endif /* HARDDEBUG */
+void erts_lcnt_update_db_locks(int enable) {
+ erts_schedule_multi_misc_aux_work(0, erts_no_schedulers,
+ &lcnt_update_db_locks_per_sched, (void*)(UWord)enable);
+}
+
+#endif /* ERTS_ENABLE_LOCK_COUNT */
diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h
index 5b4681fc90..318e90cb28 100644
--- a/erts/emulator/beam/erl_db.h
+++ b/erts/emulator/beam/erl_db.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -23,8 +24,37 @@
*
*/
-#ifndef __DB_H__
-#define __DB_H__
+#ifndef ERTS_DB_SCHED_SPEC_TYPES__
+#define ERTS_DB_SCHED_SPEC_TYPES__
+
+union db_table;
+typedef union db_table DbTable;
+
+typedef struct ErtsEtsAllReq_ ErtsEtsAllReq;
+
+typedef struct {
+ ErtsEtsAllReq *next;
+ ErtsEtsAllReq *prev;
+} ErtsEtsAllReqList;
+
+typedef struct {
+ ErtsEtsAllReq *ongoing;
+ ErlHeapFragment *hfrag;
+ DbTable *tab;
+ ErtsEtsAllReq *queue;
+} ErtsEtsAllYieldData;
+
+typedef struct {
+ Uint count;
+ DbTable *clist;
+} ErtsEtsTables;
+
+#endif /* ERTS_DB_SCHED_SPEC_TYPES__ */
+
+#ifndef ERTS_ONLY_SCHED_SPEC_ETS_DATA
+
+#ifndef ERL_DB_H__
+#define ERL_DB_H__
#include "sys.h"
#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
@@ -45,6 +75,12 @@ typedef struct {
ErtsThrPrgrLaterOp data;
} DbTableRelease;
+struct ErtsSchedulerData_;
+int erts_handle_yielded_ets_all_request(struct ErtsSchedulerData_ *esdp,
+ ErtsEtsAllYieldData *eadp);
+
+void erts_ets_sched_spec_data_init(struct ErtsSchedulerData_ *esdp);
+
/*
* So, the structure for a database table, NB this is only
* interesting in db.c.
@@ -61,28 +97,45 @@ union db_table {
"ERL_MAX_ETS_TABLES" */
#define ERL_MAX_ETS_TABLES_ENV "ERL_MAX_ETS_TABLES"
-void init_db(void);
+typedef enum {
+ ERTS_DB_SPNCNT_NONE,
+ ERTS_DB_SPNCNT_VERY_LOW,
+ ERTS_DB_SPNCNT_LOW,
+ ERTS_DB_SPNCNT_NORMAL,
+ ERTS_DB_SPNCNT_HIGH,
+ ERTS_DB_SPNCNT_VERY_HIGH,
+ ERTS_DB_SPNCNT_EXTREMELY_HIGH
+} ErtsDbSpinCount;
+
+void init_db(ErtsDbSpinCount);
int erts_db_process_exiting(Process *, ErtsProcLocks);
-void db_info(int, void *, int);
+int erts_db_execute_free_fixation(Process*, DbFixation*);
+void db_info(fmtfn_t, void *, int);
void erts_db_foreach_table(void (*)(DbTable *, void *), void *);
void erts_db_foreach_offheap(DbTable *,
void (*func)(ErlOffHeap *, void *),
void *);
+extern int erts_ets_rwmtx_spin_count;
extern int user_requested_db_max_tabs; /* set in erl_init */
extern int erts_ets_realloc_always_moves; /* set in erl_init */
extern int erts_ets_always_compress; /* set in erl_init */
extern Export ets_select_delete_continue_exp;
extern Export ets_select_count_continue_exp;
+extern Export ets_select_replace_continue_exp;
extern Export ets_select_continue_exp;
-extern erts_smp_atomic_t erts_ets_misc_mem_size;
+extern erts_atomic_t erts_ets_misc_mem_size;
Eterm erts_ets_colliding_names(Process*, Eterm name, Uint cnt);
-
Uint erts_db_get_max_tabs(void);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_db_lock_count(DbTable *tb, int enable);
+void erts_lcnt_update_db_locks(int enable);
#endif
+#endif /* ERL_DB_H__ */
+
#if defined(ERTS_WANT_DB_INTERNAL__) && !defined(ERTS_HAVE_DB_INTERNAL__)
#define ERTS_HAVE_DB_INTERNAL__
@@ -98,11 +151,11 @@ do { \
erts_aint_t sz__ = (((erts_aint_t) (ALLOC_SZ)) \
- ((erts_aint_t) (FREE_SZ))); \
ASSERT((TAB)); \
- erts_smp_atomic_add_nob(&(TAB)->common.memory_size, sz__); \
+ erts_atomic_add_nob(&(TAB)->common.memory_size, sz__); \
} while (0)
#define ERTS_ETS_MISC_MEM_ADD(SZ) \
- erts_smp_atomic_add_nob(&erts_ets_misc_mem_size, (SZ));
+ erts_atomic_add_nob(&erts_ets_misc_mem_size, (SZ));
ERTS_GLB_INLINE void *erts_db_alloc(ErtsAlcType_t type,
DbTable *tab,
@@ -239,7 +292,7 @@ erts_db_free(ErtsAlcType_t type, DbTable *tab, void *ptr, Uint size)
ERTS_DB_ALC_MEM_UPDATE_(tab, size, 0);
ASSERT(((void *) tab) != ptr
- || erts_smp_atomic_read_nob(&tab->common.memory_size) == 0);
+ || erts_atomic_read_nob(&tab->common.memory_size) == 0);
erts_free(type, ptr);
}
@@ -255,7 +308,6 @@ erts_db_free_nt(ErtsAlcType_t type, void *ptr, Uint size)
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#undef ERTS_DB_ALC_MEM_UPDATE_
-
#endif /* #if defined(ERTS_WANT_DB_INTERNAL__) && !defined(ERTS_HAVE_DB_INTERNAL__) */
+#endif /* !ERTS_ONLY_SCHED_SPEC_ETS_DATA */
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index 06dac8f161..5d49b2ea14 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2012. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -39,7 +40,7 @@
** DB_FINE_LOCKED set. The table variable is_thread_safe will then indicate
** if operations need to obtain fine grained locks or not. Some operations
** will for example always use exclusive table lock to guarantee
-** a higher level of atomicy.
+** a higher level of atomicity.
*/
/* FIXATION:
@@ -83,46 +84,47 @@
#include "erl_db_hash.h"
-#ifdef MYDEBUG /* Will fail test case ets_SUITE:memory */
-# define IF_DEBUG(x) x
-# define MY_ASSERT(x) ASSERT(x)
-#else
-# define IF_DEBUG(x)
-# define MY_ASSERT(x)
-#endif
-
/*
* The following symbols can be manipulated to "tune" the linear hash array
*/
-#define CHAIN_LEN 6 /* Medium bucket chain len */
+#define GROW_LIMIT(NACTIVE) ((NACTIVE)*1)
+#define SHRINK_LIMIT(NACTIVE) ((NACTIVE) / 2)
+
+/*
+** We want the first mandatory segment to be small (to reduce minimal footprint)
+** and larger extra segments (to reduce number of alloc/free calls).
+*/
+
+/* Number of slots in first segment */
+#define FIRST_SEGSZ_EXP 8
+#define FIRST_SEGSZ (1 << FIRST_SEGSZ_EXP)
+#define FIRST_SEGSZ_MASK (FIRST_SEGSZ - 1)
-/* Number of slots per segment */
-#define SEGSZ_EXP 8
-#define SEGSZ (1 << SEGSZ_EXP)
-#define SEGSZ_MASK (SEGSZ-1)
+/* Number of slots per extra segment */
+#define EXT_SEGSZ_EXP 11
+#define EXT_SEGSZ (1 << EXT_SEGSZ_EXP)
+#define EXT_SEGSZ_MASK (EXT_SEGSZ-1)
-#define NSEG_1 2 /* Size of first segment table (must be at least 2) */
+#define NSEG_1 (ErtsSizeofMember(DbTableHash,first_segtab) / sizeof(struct segment*))
#define NSEG_2 256 /* Size of second segment table */
#define NSEG_INC 128 /* Number of segments to grow after that */
-#ifdef ERTS_SMP
# define DB_USING_FINE_LOCKING(TB) (((TB))->common.type & DB_FINE_LOCKED)
-#else
-# define DB_USING_FINE_LOCKING(TB) 0
-#endif
#ifdef ETHR_ORDERED_READ_DEPEND
-#define SEGTAB(tb) ((struct segment**) erts_smp_atomic_read_nob(&(tb)->segtab))
+#define SEGTAB(tb) ((struct segment**) erts_atomic_read_nob(&(tb)->segtab))
#else
#define SEGTAB(tb) \
(DB_USING_FINE_LOCKING(tb) \
- ? ((struct segment**) erts_smp_atomic_read_ddrb(&(tb)->segtab)) \
- : ((struct segment**) erts_smp_atomic_read_nob(&(tb)->segtab)))
+ ? ((struct segment**) erts_atomic_read_ddrb(&(tb)->segtab)) \
+ : ((struct segment**) erts_atomic_read_nob(&(tb)->segtab)))
#endif
-#define NACTIVE(tb) ((int)erts_smp_atomic_read_nob(&(tb)->nactive))
-#define NITEMS(tb) ((int)erts_smp_atomic_read_nob(&(tb)->common.nitems))
+#define NACTIVE(tb) ((int)erts_atomic_read_nob(&(tb)->nactive))
+#define NITEMS(tb) ((int)erts_atomic_read_nob(&(tb)->common.nitems))
+
+#define SLOT_IX_TO_SEG_IX(i) (((i)+(EXT_SEGSZ-FIRST_SEGSZ)) >> EXT_SEGSZ_EXP)
-#define BUCKET(tb, i) SEGTAB(tb)[(i) >> SEGSZ_EXP]->buckets[(i) & SEGSZ_MASK]
+#define BUCKET(tb, i) SEGTAB(tb)[SLOT_IX_TO_SEG_IX(i)]->buckets[(i) & EXT_SEGSZ_MASK]
/*
* When deleting a table, the number of records to delete.
@@ -136,19 +138,22 @@
static ERTS_INLINE Uint hash_to_ix(DbTableHash* tb, HashValue hval)
{
Uint mask = (DB_USING_FINE_LOCKING(tb)
- ? erts_smp_atomic_read_acqb(&tb->szm)
- : erts_smp_atomic_read_nob(&tb->szm));
+ ? erts_atomic_read_acqb(&tb->szm)
+ : erts_atomic_read_nob(&tb->szm));
Uint ix = hval & mask;
- if (ix >= erts_smp_atomic_read_nob(&tb->nactive)) {
+ if (ix >= erts_atomic_read_nob(&tb->nactive)) {
ix &= mask>>1;
- ASSERT(ix < erts_smp_atomic_read_nob(&tb->nactive));
+ ASSERT(ix < erts_atomic_read_nob(&tb->nactive));
}
return ix;
}
/* Remember a slot containing a pseudo-deleted item (INVALID_HASH)
-*/
-static ERTS_INLINE void add_fixed_deletion(DbTableHash* tb, int ix)
+ * Return false if we got raced by unfixing thread
+ * and the object should be deleted for real.
+ */
+static ERTS_INLINE int add_fixed_deletion(DbTableHash* tb, int ix,
+ erts_aint_t fixated_by_me)
{
erts_aint_t was_next;
erts_aint_t exp_next;
@@ -157,14 +162,20 @@ static ERTS_INLINE void add_fixed_deletion(DbTableHash* tb, int ix)
sizeof(FixedDeletion));
ERTS_ETS_MISC_MEM_ADD(sizeof(FixedDeletion));
fixd->slot = ix;
- was_next = erts_smp_atomic_read_acqb(&tb->fixdel);
+ was_next = erts_atomic_read_acqb(&tb->fixdel);
do { /* Lockless atomic insertion in linked list: */
- exp_next = was_next;
+ if (NFIXED(tb) <= fixated_by_me) {
+ erts_db_free(ERTS_ALC_T_DB_FIX_DEL, (DbTable*)tb,
+ fixd, sizeof(FixedDeletion));
+ return 0; /* raced by unfixer */
+ }
+ exp_next = was_next;
fixd->next = (FixedDeletion*) exp_next;
- was_next = erts_smp_atomic_cmpxchg_relb(&tb->fixdel,
- (erts_aint_t) fixd,
- exp_next);
+ was_next = erts_atomic_cmpxchg_mb(&tb->fixdel,
+ (erts_aint_t) fixd,
+ exp_next);
}while (was_next != exp_next);
+ return 1;
}
@@ -174,63 +185,57 @@ static ERTS_INLINE void add_fixed_deletion(DbTableHash* tb, int ix)
/* optimised version of make_hash (normal case? atomic key) */
#define MAKE_HASH(term) \
((is_atom(term) ? (atom_tab(atom_val(term))->slot.bucket.hvalue) : \
- make_hash2(term)) % MAX_HASH)
+ make_internal_hash(term, 0)) % MAX_HASH)
-#ifdef ERTS_SMP
# define DB_HASH_LOCK_MASK (DB_HASH_LOCK_CNT-1)
# define GET_LOCK(tb,hval) (&(tb)->locks->lck_vec[(hval) & DB_HASH_LOCK_MASK].lck)
+# define GET_LOCK_MAYBE(tb,hval) ((tb)->common.is_thread_safe ? NULL : GET_LOCK(tb,hval))
/* Fine grained read lock */
-static ERTS_INLINE erts_smp_rwmtx_t* RLOCK_HASH(DbTableHash* tb, HashValue hval)
+static ERTS_INLINE erts_rwmtx_t* RLOCK_HASH(DbTableHash* tb, HashValue hval)
{
if (tb->common.is_thread_safe) {
return NULL;
} else {
- erts_smp_rwmtx_t* lck = GET_LOCK(tb,hval);
+ erts_rwmtx_t* lck = GET_LOCK(tb,hval);
ASSERT(tb->common.type & DB_FINE_LOCKED);
- erts_smp_rwmtx_rlock(lck);
+ erts_rwmtx_rlock(lck);
return lck;
}
}
/* Fine grained write lock */
-static ERTS_INLINE erts_smp_rwmtx_t* WLOCK_HASH(DbTableHash* tb, HashValue hval)
+static ERTS_INLINE erts_rwmtx_t* WLOCK_HASH(DbTableHash* tb, HashValue hval)
{
if (tb->common.is_thread_safe) {
return NULL;
} else {
- erts_smp_rwmtx_t* lck = GET_LOCK(tb,hval);
+ erts_rwmtx_t* lck = GET_LOCK(tb,hval);
ASSERT(tb->common.type & DB_FINE_LOCKED);
- erts_smp_rwmtx_rwlock(lck);
+ erts_rwmtx_rwlock(lck);
return lck;
}
}
-static ERTS_INLINE void RUNLOCK_HASH(erts_smp_rwmtx_t* lck)
+static ERTS_INLINE void RUNLOCK_HASH(erts_rwmtx_t* lck)
{
if (lck != NULL) {
- erts_smp_rwmtx_runlock(lck);
+ erts_rwmtx_runlock(lck);
}
}
-static ERTS_INLINE void WUNLOCK_HASH(erts_smp_rwmtx_t* lck)
+static ERTS_INLINE void WUNLOCK_HASH(erts_rwmtx_t* lck)
{
if (lck != NULL) {
- erts_smp_rwmtx_rwunlock(lck);
+ erts_rwmtx_rwunlock(lck);
}
}
-#else /* ERTS_SMP */
-# define RLOCK_HASH(tb,hval) NULL
-# define WLOCK_HASH(tb,hval) NULL
-# define RUNLOCK_HASH(lck) ((void)lck)
-# define WUNLOCK_HASH(lck) ((void)lck)
-#endif /* ERTS_SMP */
#ifdef ERTS_ENABLE_LOCK_CHECK
# define IFN_EXCL(tb,cmd) (((tb)->common.is_thread_safe) || (cmd))
-# define IS_HASH_RLOCKED(tb,hval) IFN_EXCL(tb,erts_smp_lc_rwmtx_is_rlocked(GET_LOCK(tb,hval)))
-# define IS_HASH_WLOCKED(tb,lck) IFN_EXCL(tb,erts_smp_lc_rwmtx_is_rwlocked(lck))
-# define IS_TAB_WLOCKED(tb) erts_smp_lc_rwmtx_is_rwlocked(&(tb)->common.rwlock)
+# define IS_HASH_RLOCKED(tb,hval) IFN_EXCL(tb,erts_lc_rwmtx_is_rlocked(GET_LOCK(tb,hval)))
+# define IS_HASH_WLOCKED(tb,lck) IFN_EXCL(tb,erts_lc_rwmtx_is_rwlocked(lck))
+# define IS_TAB_WLOCKED(tb) erts_lc_rwmtx_is_rwlocked(&(tb)->common.rwlock)
#else
# define IS_HASH_RLOCKED(tb,hval) (1)
# define IS_HASH_WLOCKED(tb,hval) (1)
@@ -243,33 +248,25 @@ static ERTS_INLINE void WUNLOCK_HASH(erts_smp_rwmtx_t* lck)
** Slot READ locks updated accordingly, unlocked if EOT.
*/
static ERTS_INLINE Sint next_slot(DbTableHash* tb, Uint ix,
- erts_smp_rwmtx_t** lck_ptr)
+ erts_rwmtx_t** lck_ptr)
{
-#ifdef ERTS_SMP
ix += DB_HASH_LOCK_CNT;
if (ix < NACTIVE(tb)) return ix;
RUNLOCK_HASH(*lck_ptr);
ix = (ix + 1) & DB_HASH_LOCK_MASK;
if (ix != 0) *lck_ptr = RLOCK_HASH(tb,ix);
return ix;
-#else
- return (++ix < NACTIVE(tb)) ? ix : 0;
-#endif
}
/* Same as next_slot but with WRITE locking */
static ERTS_INLINE Sint next_slot_w(DbTableHash* tb, Uint ix,
- erts_smp_rwmtx_t** lck_ptr)
+ erts_rwmtx_t** lck_ptr)
{
-#ifdef ERTS_SMP
ix += DB_HASH_LOCK_CNT;
if (ix < NACTIVE(tb)) return ix;
WUNLOCK_HASH(*lck_ptr);
ix = (ix + 1) & DB_HASH_LOCK_MASK;
if (ix != 0) *lck_ptr = WLOCK_HASH(tb,ix);
return ix;
-#else
- return next_slot(tb,ix,lck_ptr);
-#endif
}
@@ -308,83 +305,52 @@ struct mp_info {
/* A table segment */
struct segment {
- HashDbTerm* buckets[SEGSZ];
-#ifdef MYDEBUG
- int is_ext_segment;
-#endif
+ HashDbTerm* buckets[1];
};
+#define SIZEOF_SEGMENT(N) \
+ (offsetof(struct segment,buckets) + sizeof(HashDbTerm*)*(N))
-/* A segment that also contains a segment table */
-struct ext_segment {
- struct segment s; /* The segment itself. Must be first */
-
+/* An extended segment table */
+struct ext_segtab {
+ ErtsThrPrgrLaterOp lop;
struct segment** prev_segtab; /* Used when table is shrinking */
- int nsegs; /* Size of segtab */
+ int prev_nsegs; /* Size of prev_segtab */
+ int nsegs; /* Size of this segtab */
struct segment* segtab[1]; /* The segment table */
};
-#define SIZEOF_EXTSEG(NSEGS) \
- (offsetof(struct ext_segment,segtab) + sizeof(struct segment*)*(NSEGS))
-
-#if defined(DEBUG) || defined(VALGRIND)
-# define EXTSEG(SEGTAB_PTR) \
- ((struct ext_segment*) (((char*)(SEGTAB_PTR)) - offsetof(struct ext_segment,segtab)))
-#endif
+#define SIZEOF_EXT_SEGTAB(NSEGS) \
+ (offsetof(struct ext_segtab,segtab) + sizeof(struct segment*)*(NSEGS))
static ERTS_INLINE void SET_SEGTAB(DbTableHash* tb,
struct segment** segtab)
{
if (DB_USING_FINE_LOCKING(tb))
- erts_smp_atomic_set_wb(&tb->segtab, (erts_aint_t) segtab);
+ erts_atomic_set_wb(&tb->segtab, (erts_aint_t) segtab);
else
- erts_smp_atomic_set_nob(&tb->segtab, (erts_aint_t) segtab);
-#ifdef VALGRIND
- tb->top_ptr_to_segment_with_active_segtab = EXTSEG(segtab);
-#endif
+ erts_atomic_set_nob(&tb->segtab, (erts_aint_t) segtab);
}
-
-/* How the table segments relate to each other:
-
- ext_segment: ext_segment: "plain" segment
- #=================# #================# #=============#
- | bucket[0] |<--+ +------->| bucket[256] | +->| bucket[512] |
- | bucket[1] | | | | [257] | | | [513] |
- : : | | : : | : :
- | bucket[255] | | | | [511] | | | [767] |
- |-----------------| | | |----------------| | #=============#
- | prev_segtab=NULL| | | +--<---prev_segtab | |
- | nsegs = 2 | | | | | nsegs = 256 | |
-+->| segtab[0] -->-------+---|---|--<---segtab[0] |<-+ |
-| | segtab[1] -->-----------+---|--<---segtab[1] | | |
-| #=================# | | segtab[2] -->-----|--+ ext_segment:
-| | : : | #================#
-+----------------<---------------+ | segtab[255] ->----|----->| bucket[255*256]|
- #================# | | |
- | : :
- | |----------------|
- +----<---prev_segtab |
- : :
-*/
-
+/* Used by select_replace on analyze_pattern */
+typedef int (*extra_match_validator_t)(int keypos, Eterm match, Eterm guard, Eterm body);
/*
** Forward decl's (static functions)
*/
-static struct ext_segment* alloc_ext_seg(DbTableHash* tb, unsigned seg_ix,
- struct segment** old_segtab);
-static int alloc_seg(DbTableHash *tb);
+static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix);
+static void alloc_seg(DbTableHash *tb);
static int free_seg(DbTableHash *tb, int free_records);
-static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_smp_rwmtx_t** lck_ptr,
+static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_rwmtx_t** lck_ptr,
HashDbTerm *list);
static HashDbTerm* search_list(DbTableHash* tb, Eterm key,
HashValue hval, HashDbTerm *list);
-static void shrink(DbTableHash* tb, int nactive);
-static void grow(DbTableHash* tb, int nactive);
+static void shrink(DbTableHash* tb, int nitems);
+static void grow(DbTableHash* tb, int nitems);
static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2,
- DbTableHash*);
-static int analyze_pattern(DbTableHash *tb, Eterm pattern,
- struct mp_info *mpi);
+ Uint sz, DbTableHash*);
+static int analyze_pattern(DbTableHash *tb, Eterm pattern,
+ extra_match_validator_t extra_validator, /* Optional callback */
+ struct mp_info *mpi);
/*
* Method interface functions
@@ -408,31 +374,37 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object,Eterm *ret);
static int db_slot_hash(Process *p, DbTable *tbl,
Eterm slot_term, Eterm *ret);
-static int db_select_chunk_hash(Process *p, DbTable *tbl,
+static int db_select_chunk_hash(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Sint chunk_size,
int reverse, Eterm *ret);
-static int db_select_hash(Process *p, DbTable *tbl,
+static int db_select_hash(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, int reverse, Eterm *ret);
-static int db_select_count_hash(Process *p, DbTable *tbl,
- Eterm pattern, Eterm *ret);
-static int db_select_delete_hash(Process *p, DbTable *tbl,
- Eterm pattern, Eterm *ret);
-
-static int db_select_continue_hash(Process *p, DbTable *tbl,
+static int db_select_continue_hash(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
-static int db_select_count_continue_hash(Process *p, DbTable *tbl,
+static int db_select_count_hash(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret);
+static int db_select_count_continue_hash(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
+static int db_select_delete_hash(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret);
static int db_select_delete_continue_hash(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
-static void db_print_hash(int to,
+
+static int db_select_replace_hash(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret);
+static int db_select_replace_continue_hash(Process *p, DbTable *tbl,
+ Eterm continuation, Eterm *ret);
+
+static int db_take_hash(Process *, DbTable *, Eterm, Eterm *);
+static void db_print_hash(fmtfn_t to,
void *to_arg,
int show,
DbTable *tbl);
static int db_free_table_hash(DbTable *tbl);
-static int db_free_table_continue_hash(DbTable *tbl);
+static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds);
static void db_foreach_offheap_hash(DbTable *,
@@ -443,21 +415,22 @@ static int db_delete_all_objects_hash(Process* p, DbTable* tbl);
#ifdef HARDDEBUG
static void db_check_table_hash(DbTableHash *tb);
#endif
-static int db_lookup_dbterm_hash(DbTable *tbl, Eterm key, DbUpdateHandle* handle);
-static void db_finalize_dbterm_hash(DbUpdateHandle* handle);
+static int
+db_lookup_dbterm_hash(Process *p, DbTable *tbl, Eterm key, Eterm obj,
+ DbUpdateHandle* handle);
+static void
+db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle);
static ERTS_INLINE void try_shrink(DbTableHash* tb)
{
int nactive = NACTIVE(tb);
- if (nactive > SEGSZ && NITEMS(tb) < (nactive * CHAIN_LEN)
+ int nitems = NITEMS(tb);
+ if (nactive > FIRST_SEGSZ && nitems < SHRINK_LIMIT(nactive)
&& !IS_FIXED(tb)) {
- shrink(tb, nactive);
+ shrink(tb, nitems);
}
}
-#define EQ_REL(x,y,y_base) \
- (is_same(x,NULL,y,y_base) || (is_not_both_immed((x),(y)) && eq_rel((x),NULL,(y),y_base)))
-
/* Is this a live object (not pseodo-deleted) with the specified key?
*/
static ERTS_INLINE int has_live_key(DbTableHash* tb, HashDbTerm* b,
@@ -467,7 +440,7 @@ static ERTS_INLINE int has_live_key(DbTableHash* tb, HashDbTerm* b,
else {
Eterm itemKey = GETKEY(tb, b->dbterm.tpl);
ASSERT(!is_header(itemKey));
- return EQ_REL(key, itemKey, b->dbterm.tpl);
+ return EQ(key, itemKey);
}
}
@@ -480,7 +453,7 @@ static ERTS_INLINE int has_key(DbTableHash* tb, HashDbTerm* b,
else {
Eterm itemKey = GETKEY(tb, b->dbterm.tpl);
ASSERT(!is_header(itemKey));
- return EQ_REL(key, itemKey, b->dbterm.tpl);
+ return EQ(key, itemKey);
}
}
@@ -536,16 +509,14 @@ DbTableMethod db_hash =
db_select_delete_continue_hash,
db_select_count_hash,
db_select_count_continue_hash,
+ db_select_replace_hash,
+ db_select_replace_continue_hash,
+ db_take_hash,
db_delete_all_objects_hash,
db_free_table_hash,
db_free_table_continue_hash,
db_print_hash,
db_foreach_offheap_hash,
-#ifdef HARDDEBUG
- db_check_table_hash,
-#else
- NULL,
-#endif
db_lookup_dbterm_hash,
db_finalize_dbterm_hash
};
@@ -567,7 +538,7 @@ static void restore_fixdel(DbTableHash* tb, FixedDeletion* fixdel)
{
/*int tries = 0;*/
DEBUG_WAIT();
- if (erts_smp_atomic_cmpxchg_relb(&tb->fixdel,
+ if (erts_atomic_cmpxchg_relb(&tb->fixdel,
(erts_aint_t) fixdel,
(erts_aint_t) NULL) != (erts_aint_t) NULL) {
/* Oboy, must join lists */
@@ -576,13 +547,13 @@ static void restore_fixdel(DbTableHash* tb, FixedDeletion* fixdel)
erts_aint_t exp_tail;
while (last->next != NULL) last = last->next;
- was_tail = erts_smp_atomic_read_acqb(&tb->fixdel);
+ was_tail = erts_atomic_read_acqb(&tb->fixdel);
do { /* Lockless atomic list insertion */
exp_tail = was_tail;
last->next = (FixedDeletion*) exp_tail;
/*++tries;*/
DEBUG_WAIT();
- was_tail = erts_smp_atomic_cmpxchg_relb(&tb->fixdel,
+ was_tail = erts_atomic_cmpxchg_relb(&tb->fixdel,
(erts_aint_t) fixdel,
exp_tail);
}while (was_tail != exp_tail);
@@ -593,22 +564,23 @@ static void restore_fixdel(DbTableHash* tb, FixedDeletion* fixdel)
** Table interface routines ie what's called by the bif's
*/
-void db_unfix_table_hash(DbTableHash *tb)
+SWord db_unfix_table_hash(DbTableHash *tb)
{
FixedDeletion* fixdel;
+ SWord work = 0;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&tb->common.rwlock)
- || (erts_smp_lc_rwmtx_is_rlocked(&tb->common.rwlock)
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock)
+ || (erts_lc_rwmtx_is_rlocked(&tb->common.rwlock)
&& !tb->common.is_thread_safe));
restart:
- fixdel = (FixedDeletion*) erts_smp_atomic_xchg_acqb(&tb->fixdel,
- (erts_aint_t) NULL);
+ fixdel = (FixedDeletion*) erts_atomic_xchg_mb(&tb->fixdel,
+ (erts_aint_t) NULL);
while (fixdel != NULL) {
FixedDeletion *fx = fixdel;
int ix = fx->slot;
HashDbTerm **bp;
HashDbTerm *b;
- erts_smp_rwmtx_t* lck = WLOCK_HASH(tb,ix);
+ erts_rwmtx_t* lck = WLOCK_HASH(tb,ix);
if (IS_FIXED(tb)) { /* interrupted by fixer */
WUNLOCK_HASH(lck);
@@ -616,7 +588,7 @@ restart:
if (!IS_FIXED(tb)) {
goto restart; /* unfixed again! */
}
- return;
+ return work;
}
if (ix < NACTIVE(tb)) {
bp = &BUCKET(tb, ix);
@@ -626,6 +598,7 @@ restart:
if (b->hvalue == INVALID_HASH) {
*bp = b->next;
free_term(tb, b);
+ work++;
b = *bp;
} else {
bp = &b->next;
@@ -641,66 +614,54 @@ restart:
(void *) fx,
sizeof(FixedDeletion));
ERTS_ETS_MISC_MEM_ADD(-sizeof(FixedDeletion));
+ work++;
}
/* ToDo: Maybe try grow/shrink the table as well */
-}
-/* Only used by tests
-*/
-Uint db_kept_items_hash(DbTableHash *tb)
-{
- Uint kept_items = 0;
- Uint ix = 0;
- erts_smp_rwmtx_t* lck = RLOCK_HASH(tb,ix);
- HashDbTerm* b;
- do {
- for (b = BUCKET(tb, ix); b != NULL; b = b->next) {
- if (b->hvalue == INVALID_HASH) {
- ++kept_items;
- }
- }
- ix = next_slot(tb, ix, &lck);
- }while (ix);
- return kept_items;
+ return work;
}
int db_create_hash(Process *p, DbTable *tbl)
{
DbTableHash *tb = &tbl->hash;
- erts_smp_atomic_init_nob(&tb->szm, SEGSZ_MASK);
- erts_smp_atomic_init_nob(&tb->nactive, SEGSZ);
- erts_smp_atomic_init_nob(&tb->fixdel, (erts_aint_t)NULL);
- erts_smp_atomic_init_nob(&tb->segtab, (erts_aint_t)NULL);
- SET_SEGTAB(tb, alloc_ext_seg(tb,0,NULL)->segtab);
+ erts_atomic_init_nob(&tb->szm, FIRST_SEGSZ_MASK);
+ erts_atomic_init_nob(&tb->nactive, FIRST_SEGSZ);
+ erts_atomic_init_nob(&tb->fixdel, (erts_aint_t)NULL);
+ erts_atomic_init_nob(&tb->segtab, (erts_aint_t)NULL);
+ SET_SEGTAB(tb, tb->first_segtab);
tb->nsegs = NSEG_1;
- tb->nslots = SEGSZ;
+ tb->nslots = FIRST_SEGSZ;
+ tb->first_segtab[0] = (struct segment*) erts_db_alloc(ERTS_ALC_T_DB_SEG,
+ (DbTable *) tb,
+ SIZEOF_SEGMENT(FIRST_SEGSZ));
+ sys_memset(tb->first_segtab[0], 0, SIZEOF_SEGMENT(FIRST_SEGSZ));
- erts_smp_atomic_init_nob(&tb->is_resizing, 0);
-#ifdef ERTS_SMP
+ erts_atomic_init_nob(&tb->is_resizing, 0);
if (tb->common.type & DB_FINE_LOCKED) {
- erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
+ erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
int i;
if (tb->common.type & DB_FREQ_READ)
- rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
+ if (erts_ets_rwmtx_spin_count >= 0)
+ rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count;
tb->locks = (DbTableHashFineLocks*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG, /* Other type maybe? */
(DbTable *) tb,
sizeof(DbTableHashFineLocks));
for (i=0; i<DB_HASH_LOCK_CNT; ++i) {
- erts_smp_rwmtx_init_opt_x(&tb->locks->lck_vec[i].lck, &rwmtx_opt,
- "db_hash_slot", make_small(i));
+ erts_rwmtx_init_opt(&tb->locks->lck_vec[i].lck, &rwmtx_opt,
+ "db_hash_slot", tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
}
- /* This important property is needed to guarantee that the buckets
+ /* This important property is needed to guarantee the two buckets
* involved in a grow/shrink operation it protected by the same lock:
*/
- ASSERT(erts_smp_atomic_read_nob(&tb->nactive) % DB_HASH_LOCK_CNT == 0);
+ ASSERT(erts_atomic_read_nob(&tb->nactive) % DB_HASH_LOCK_CNT == 0);
}
else { /* coarse locking */
tb->locks = NULL;
}
ERTS_THR_MEMORY_BARRIER;
-#endif /* ERST_SMP */
return DB_ERROR_NONE;
}
@@ -708,7 +669,7 @@ static int db_first_hash(Process *p, DbTable *tbl, Eterm *ret)
{
DbTableHash *tb = &tbl->hash;
Uint ix = 0;
- erts_smp_rwmtx_t* lck = RLOCK_HASH(tb,ix);
+ erts_rwmtx_t* lck = RLOCK_HASH(tb,ix);
HashDbTerm* list;
for (;;) {
@@ -741,7 +702,7 @@ static int db_next_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
HashValue hval;
Uint ix;
HashDbTerm* b;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
hval = MAKE_HASH(key);
lck = RLOCK_HASH(tb,hval);
@@ -788,7 +749,7 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail)
HashDbTerm** bp;
HashDbTerm* b;
HashDbTerm* q;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
int nitems;
int ret = DB_ERROR_NONE;
@@ -814,7 +775,7 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail)
if (tb->common.status & DB_SET) {
HashDbTerm* bnext = b->next;
if (b->hvalue == INVALID_HASH) {
- erts_smp_atomic_inc_nob(&tb->common.nitems);
+ erts_atomic_inc_nob(&tb->common.nitems);
}
else if (key_clash_fail) {
ret = DB_ERROR_BADKEY;
@@ -842,7 +803,7 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail)
do {
if (db_eq(&tb->common,obj,&q->dbterm)) {
if (q->hvalue == INVALID_HASH) {
- erts_smp_atomic_inc_nob(&tb->common.nitems);
+ erts_atomic_inc_nob(&tb->common.nitems);
q->hvalue = hval;
if (q != b) { /* must move to preserve key insertion order */
*qp = q->next;
@@ -863,15 +824,14 @@ Lnew:
q->hvalue = hval;
q->next = b;
*bp = q;
- nitems = erts_smp_atomic_inc_read_nob(&tb->common.nitems);
+ nitems = erts_atomic_inc_read_nob(&tb->common.nitems);
WUNLOCK_HASH(lck);
{
int nactive = NACTIVE(tb);
- if (nitems > nactive * (CHAIN_LEN+1) && !IS_FIXED(tb)) {
- grow(tb, nactive);
+ if (nitems > GROW_LIMIT(nactive) && !IS_FIXED(tb)) {
+ grow(tb, nitems);
}
}
- CHECK_TABLES();
return DB_ERROR_NONE;
Ldone:
@@ -879,112 +839,62 @@ Ldone:
return ret;
}
-int db_get_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+static Eterm
+get_term_list(Process *p, DbTableHash *tb, Eterm key, HashValue hval,
+ HashDbTerm *b1, HashDbTerm **bend)
{
- DbTableHash *tb = &tbl->hash;
- HashValue hval;
- int ix;
- HashDbTerm* b1;
- erts_smp_rwmtx_t* lck;
-
- hval = MAKE_HASH(key);
- lck = RLOCK_HASH(tb,hval);
- ix = hash_to_ix(tb, hval);
- b1 = BUCKET(tb, ix);
+ HashDbTerm* b2 = b1->next;
+ Eterm copy;
+ Uint sz = b1->dbterm.size + 2;
- while(b1 != 0) {
- if (has_live_key(tb,b1,key,hval)) {
- HashDbTerm* b2 = b1->next;
- Eterm copy;
+ if (tb->common.status & (DB_BAG | DB_DUPLICATE_BAG)) {
+ while (b2 && has_key(tb, b2, key, hval)) {
+ if (b2->hvalue != INVALID_HASH)
+ sz += b2->dbterm.size + 2;
- if (tb->common.status & (DB_BAG | DB_DUPLICATE_BAG)) {
- while(b2 != NULL && has_key(tb,b2,key,hval))
- b2 = b2->next;
- }
- copy = build_term_list(p, b1, b2, tb);
- CHECK_TABLES();
- *ret = copy;
- goto done;
- }
- b1 = b1->next;
+ b2 = b2->next;
+ }
}
- *ret = NIL;
-done:
- RUNLOCK_HASH(lck);
- return DB_ERROR_NONE;
+ copy = build_term_list(p, b1, b2, sz, tb);
+ if (bend) {
+ *bend = b2;
+ }
+ return copy;
}
-int db_get_element_array(DbTable *tbl,
- Eterm key,
- int ndex,
- Eterm *ret,
- int *num_ret)
+int db_get_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
{
DbTableHash *tb = &tbl->hash;
HashValue hval;
int ix;
- HashDbTerm* b1;
- int num = 0;
- int retval;
- erts_smp_rwmtx_t* lck;
-
- ASSERT(!IS_FIXED(tbl)); /* no support for fixed tables here */
+ HashDbTerm* b;
+ erts_rwmtx_t* lck;
hval = MAKE_HASH(key);
- lck = RLOCK_HASH(tb, hval);
+ lck = RLOCK_HASH(tb,hval);
ix = hash_to_ix(tb, hval);
- b1 = BUCKET(tb, ix);
-
- while(b1 != 0) {
- if (has_live_key(tb,b1,key,hval)) {
- if (tb->common.status & (DB_BAG | DB_DUPLICATE_BAG)) {
- HashDbTerm* b;
- HashDbTerm* b2 = b1->next;
-
- while(b2 != NULL && has_live_key(tb,b2,key,hval)) {
- if (ndex > arityval(b2->dbterm.tpl[0])) {
- retval = DB_ERROR_BADITEM;
- goto done;
- }
- b2 = b2->next;
- }
+ b = BUCKET(tb, ix);
- b = b1;
- while(b != b2) {
- if (num < *num_ret) {
- ret[num++] = b->dbterm.tpl[ndex];
- } else {
- retval = DB_ERROR_NONE;
- goto done;
- }
- b = b->next;
- }
- *num_ret = num;
- }
- else {
- ASSERT(*num_ret > 0);
- ret[0] = b1->dbterm.tpl[ndex];
- *num_ret = 1;
- }
- retval = DB_ERROR_NONE;
+ while(b != 0) {
+ if (has_live_key(tb, b, key, hval)) {
+ *ret = get_term_list(p, tb, key, hval, b, NULL);
goto done;
}
- b1 = b1->next;
+ b = b->next;
}
- retval = DB_ERROR_BADKEY;
+ *ret = NIL;
done:
RUNLOCK_HASH(lck);
- return retval;
+ return DB_ERROR_NONE;
}
-
static int db_member_hash(DbTable *tbl, Eterm key, Eterm *ret)
{
DbTableHash *tb = &tbl->hash;
HashValue hval;
int ix;
HashDbTerm* b1;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
hval = MAKE_HASH(key);
ix = hash_to_ix(tb, hval);
@@ -1013,7 +923,7 @@ static int db_get_element_hash(Process *p, DbTable *tbl,
HashValue hval;
int ix;
HashDbTerm* b1;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
int retval;
hval = MAKE_HASH(key);
@@ -1048,7 +958,6 @@ static int db_get_element_hash(Process *p, DbTable *tbl,
Eterm copy = db_copy_element_from_ets(&tb->common, p,
&b->dbterm, ndex, &hp, 2);
elem_list = CONS(hp, copy, elem_list);
- hp += 2;
}
b = b->next;
}
@@ -1070,54 +979,6 @@ done:
}
/*
- * Very internal interface, removes elements of arity two from
- * BAG. Used for the PID meta table
- */
-int db_erase_bag_exact2(DbTable *tbl, Eterm key, Eterm value)
-{
- DbTableHash *tb = &tbl->hash;
- HashValue hval;
- int ix;
- HashDbTerm** bp;
- HashDbTerm* b;
- erts_smp_rwmtx_t* lck;
- int found = 0;
-
- hval = MAKE_HASH(key);
- lck = WLOCK_HASH(tb,hval);
- ix = hash_to_ix(tb, hval);
- bp = &BUCKET(tb, ix);
- b = *bp;
-
- ASSERT(!IS_FIXED(tb));
- ASSERT((tb->common.status & DB_BAG));
- ASSERT(!tb->common.compress);
-
- while(b != 0) {
- if (has_live_key(tb,b,key,hval)) {
- found = 1;
- if ((arityval(b->dbterm.tpl[0]) == 2) &&
- EQ(value, b->dbterm.tpl[2])) {
- *bp = b->next;
- free_term(tb, b);
- erts_smp_atomic_dec_nob(&tb->common.nitems);
- b = *bp;
- break;
- }
- } else if (found) {
- break;
- }
- bp = &b->next;
- b = b->next;
- }
- WUNLOCK_HASH(lck);
- if (found) {
- try_shrink(tb);
- }
- return DB_ERROR_NONE;
-}
-
-/*
** NB, this is for the db_erase/2 bif.
*/
int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret)
@@ -1127,7 +988,7 @@ int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret)
int ix;
HashDbTerm** bp;
HashDbTerm* b;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
int nitems_diff = 0;
hval = MAKE_HASH(key);
@@ -1139,9 +1000,9 @@ int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret)
while(b != 0) {
if (has_live_key(tb,b,key,hval)) {
--nitems_diff;
- if (nitems_diff == -1 && IS_FIXED(tb)) {
+ if (nitems_diff == -1 && IS_FIXED(tb)
+ && add_fixed_deletion(tb, ix, 0)) {
/* Pseudo remove (no need to keep several of same key) */
- add_fixed_deletion(tb, ix);
b->hvalue = INVALID_HASH;
} else {
*bp = b->next;
@@ -1159,7 +1020,7 @@ int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret)
}
WUNLOCK_HASH(lck);
if (nitems_diff) {
- erts_smp_atomic_add_nob(&tb->common.nitems, nitems_diff);
+ erts_atomic_add_nob(&tb->common.nitems, nitems_diff);
try_shrink(tb);
}
*ret = am_true;
@@ -1176,7 +1037,7 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret)
int ix;
HashDbTerm** bp;
HashDbTerm* b;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
int nitems_diff = 0;
int nkeys = 0;
Eterm key;
@@ -1193,9 +1054,8 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret)
++nkeys;
if (db_eq(&tb->common,object, &b->dbterm)) {
--nitems_diff;
- if (nkeys==1 && IS_FIXED(tb)) { /* Pseudo remove */
- add_fixed_deletion(tb,ix);
- b->hvalue = INVALID_HASH;
+ if (nkeys==1 && IS_FIXED(tb) && add_fixed_deletion(tb,ix,0)) {
+ b->hvalue = INVALID_HASH; /* Pseudo remove */
bp = &b->next;
b = b->next;
} else {
@@ -1218,7 +1078,7 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret)
}
WUNLOCK_HASH(lck);
if (nitems_diff) {
- erts_smp_atomic_add_nob(&tb->common.nitems, nitems_diff);
+ erts_atomic_add_nob(&tb->common.nitems, nitems_diff);
try_shrink(tb);
}
*ret = am_true;
@@ -1229,7 +1089,7 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret)
static int db_slot_hash(Process *p, DbTable *tbl, Eterm slot_term, Eterm *ret)
{
DbTableHash *tb = &tbl->hash;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
Sint slot;
int retval;
int nactive;
@@ -1240,7 +1100,7 @@ static int db_slot_hash(Process *p, DbTable *tbl, Eterm slot_term, Eterm *ret)
lck = RLOCK_HASH(tb, slot);
nactive = NACTIVE(tb);
if (slot < nactive) {
- *ret = build_term_list(p, BUCKET(tb, slot), 0, tb);
+ *ret = build_term_list(p, BUCKET(tb, slot), NULL, 0, tb);
retval = DB_ERROR_NONE;
}
else if (slot == nactive) {
@@ -1265,810 +1125,1123 @@ static BIF_RETTYPE bif_trap1(Export *bif,
{
BIF_TRAP1(bif, p, p1);
}
-
+
+
/*
- * Continue collecting select matches, this may happen either due to a trap
- * or when the user calls ets:select/1
+ * Match traversal callbacks
*/
-static int db_select_continue_hash(Process *p,
- DbTable *tbl,
- Eterm continuation,
- Eterm *ret)
-{
- DbTableHash *tb = &tbl->hash;
- Sint slot_ix;
- Sint save_slot_ix;
- Sint chunk_size;
- int all_objects;
- Binary *mp;
- int num_left = 1000;
- HashDbTerm *current = 0;
- Eterm match_list;
- Eterm *hp;
- Eterm match_res;
- Sint got;
- Eterm *tptr;
- erts_smp_rwmtx_t* lck;
-
-#define RET_TO_BIF(Term, State) do { *ret = (Term); return State; } while(0);
- /* Decode continuation. We know it's a tuple but not the arity or anything else */
+/* Called when no match is possible.
+ * context_ptr: Pointer to context
+ * ret: Pointer to traversal function term return.
+ *
+ * Both the direct return value and 'ret' are used as the traversal function return values.
+ */
+typedef int (*mtraversal_on_nothing_can_match_t)(void* context_ptr, Eterm* ret);
+
+/* Called for each match result.
+ * context_ptr: Pointer to context
+ * slot_ix: Current slot index
+ * current_ptr_ptr: Triple pointer to either the bucket or the 'next' pointer in the previous element;
+ * can be (carefully) used to adjust iteration when deleting or replacing elements.
+ * match_res: The result of running the match program against the current term.
+ *
+ * Should return 1 for successful match, 0 otherwise.
+ */
+typedef int (*mtraversal_on_match_res_t)(void* context_ptr, Sint slot_ix, HashDbTerm*** current_ptr_ptr,
+ Eterm match_res);
+
+/* Called when either we've matched enough elements in this cycle or EOT was reached.
+ * context_ptr: Pointer to context
+ * slot_ix: Current slot index
+ * got: How many elements have been matched so far
+ * iterations_left: Number of intended iterations (down from an initial max.) left in this traversal cycle
+ * mpp: Double pointer to the compiled match program
+ * ret: Pointer to traversal function term return.
+ *
+ * Both the direct return value and 'ret' are used as the traversal function return values.
+ * If *mpp is set to NULL, it won't be deallocated (useful for trapping.)
+ */
+typedef int (*mtraversal_on_loop_ended_t)(void* context_ptr, Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp, Eterm* ret);
+
+/* Called when it's time to trap
+ * context_ptr: Pointer to context
+ * slot_ix: Current slot index
+ * got: How many elements have been matched so far
+ * mpp: Double pointer to the compiled match program
+ * ret: Pointer to traversal function term return.
+ *
+ * Both the direct return value and 'ret' are used as the traversal function return values.
+ * If *mpp is set to NULL, it won't be deallocated (useful for trapping.)
+ */
+typedef int (*mtraversal_on_trap_t)(void* context_ptr, Sint slot_ix, Sint got, Binary** mpp, Eterm* ret);
- tptr = tuple_val(continuation);
+/*
+ * Begin hash table match traversal
+ */
+static int match_traverse(Process* p, DbTableHash* tb,
+ Eterm pattern,
+ extra_match_validator_t extra_match_validator, /* Optional */
+ Sint chunk_size, /* If 0, no chunking */
+ Sint iterations_left, /* Nr. of iterations left */
+ Eterm** hpp, /* Heap */
+ int lock_for_write, /* Set to 1 if we're going to delete or
+ modify existing terms */
+ mtraversal_on_nothing_can_match_t on_nothing_can_match,
+ mtraversal_on_match_res_t on_match_res,
+ mtraversal_on_loop_ended_t on_loop_ended,
+ mtraversal_on_trap_t on_trap,
+ void* context_ptr, /* State for callbacks above */
+ Eterm* ret)
+{
+ Sint slot_ix; /* Slot index */
+ HashDbTerm** current_ptr; /* Refers to either the bucket pointer or
+ * the 'next' pointer in the previous term
+ */
+ HashDbTerm* saved_current; /* Helper to avoid double skip on match */
+ struct mp_info mpi;
+ unsigned current_list_pos = 0; /* Prefound buckets list index */
+ Eterm match_res;
+ Sint got = 0; /* Matched terms counter */
+ erts_rwmtx_t* lck; /* Slot lock */
+ int ret_value;
+ erts_rwmtx_t* (*lock_hash_function)(DbTableHash*, HashValue)
+ = (lock_for_write ? WLOCK_HASH : RLOCK_HASH);
+ void (*unlock_hash_function)(erts_rwmtx_t*)
+ = (lock_for_write ? WUNLOCK_HASH : RUNLOCK_HASH);
+ Sint (*next_slot_function)(DbTableHash*, Uint, erts_rwmtx_t**)
+ = (lock_for_write ? next_slot_w : next_slot);
+
+ if ((ret_value = analyze_pattern(tb, pattern, extra_match_validator, &mpi))
+ != DB_ERROR_NONE)
+ {
+ *ret = NIL;
+ goto done;
+ }
- if (arityval(*tptr) != 6)
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
-
- if (!is_small(tptr[2]) || !is_small(tptr[3]) || !is_binary(tptr[4]) ||
- !(is_list(tptr[5]) || tptr[5] == NIL) || !is_small(tptr[6]))
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- if ((chunk_size = signed_val(tptr[3])) < 0)
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- if (!(thing_subtag(*binary_val(tptr[4])) == REFC_BINARY_SUBTAG))
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- mp = ((ProcBin *) binary_val(tptr[4]))->val;
- if (!IsMatchProgBinary(mp))
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- all_objects = mp->flags & BIN_FLAG_ALL_OBJECTS;
- match_list = tptr[5];
- if ((got = signed_val(tptr[6])) < 0)
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
+ if (!mpi.something_can_match) {
+ /* Can't possibly match anything */
+ ret_value = on_nothing_can_match(context_ptr, ret);
+ goto done;
+ }
- slot_ix = signed_val(tptr[2]);
- if (slot_ix < 0 /* EOT */
- || (chunk_size && got >= chunk_size)) {
- goto done; /* Already got all or enough in the match_list */
+ if (mpi.all_objects) {
+ mpi.mp->intern.flags |= BIN_FLAG_ALL_OBJECTS;
}
- lck = RLOCK_HASH(tb,slot_ix);
- if (slot_ix >= NACTIVE(tb)) {
- RUNLOCK_HASH(lck);
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
+ /*
+ * Look for initial slot / bucket
+ */
+ if (!mpi.key_given) {
+ /* Run this code if pattern is variable or GETKEY(pattern) */
+ /* is a variable */
+ slot_ix = 0;
+ lck = lock_hash_function(tb,slot_ix);
+ for (;;) {
+ ASSERT(slot_ix < NACTIVE(tb));
+ if (*(current_ptr = &BUCKET(tb,slot_ix)) != NULL) {
+ break;
+ }
+ slot_ix = next_slot_function(tb,slot_ix,&lck);
+ if (slot_ix == 0) {
+ ret_value = on_loop_ended(context_ptr, slot_ix, got, iterations_left, &mpi.mp, ret);
+ goto done;
+ }
+ }
+ } else {
+ /* We have at least one */
+ slot_ix = mpi.lists[current_list_pos].ix;
+ lck = lock_hash_function(tb, slot_ix);
+ current_ptr = mpi.lists[current_list_pos].bucket;
+ ASSERT(*current_ptr == BUCKET(tb,slot_ix));
+ ++current_list_pos;
}
- while ((current = BUCKET(tb,slot_ix)) == NULL) {
- slot_ix = next_slot(tb, slot_ix, &lck);
- if (slot_ix == 0) {
- slot_ix = -1; /* EOT */
- goto done;
- }
- }
+ /*
+ * Execute traversal cycle
+ */
for(;;) {
- if (current->hvalue != INVALID_HASH &&
- (match_res = db_match_dbterm(&tb->common, p, mp, all_objects,
- &current->dbterm, &hp, 2),
- is_value(match_res))) {
+ if (*current_ptr != NULL) {
+ if ((*current_ptr)->hvalue != INVALID_HASH) {
+ match_res = db_match_dbterm(&tb->common, p, mpi.mp, 0,
+ &(*current_ptr)->dbterm, hpp, 2);
+ saved_current = *current_ptr;
+ if (on_match_res(context_ptr, slot_ix, &current_ptr, match_res)) {
+ ++got;
+ }
+ --iterations_left;
+ if (*current_ptr != saved_current) {
+ /* Don't advance to next, the callback did it already */
+ continue;
+ }
+ }
+ current_ptr = &((*current_ptr)->next);
+ }
+ else if (mpi.key_given) { /* Key is bound */
+ unlock_hash_function(lck);
+ if (current_list_pos == mpi.num_lists) {
+ ret_value = on_loop_ended(context_ptr, -1, got, iterations_left, &mpi.mp, ret);
+ goto done;
+ } else {
+ slot_ix = mpi.lists[current_list_pos].ix;
+ lck = lock_hash_function(tb, slot_ix);
+ current_ptr = mpi.lists[current_list_pos].bucket;
+ ASSERT(mpi.lists[current_list_pos].bucket == &BUCKET(tb,slot_ix));
+ ++current_list_pos;
+ }
+ }
+ else { /* Key is variable */
+ if ((slot_ix = next_slot_function(tb,slot_ix,&lck)) == 0) {
+ slot_ix = -1;
+ break;
+ }
+ if (chunk_size && got >= chunk_size) {
+ unlock_hash_function(lck);
+ break;
+ }
+ if (iterations_left <= 0 || MBUF(p)) {
+ /*
+ * We have either reached our limit, or just created some heap fragments.
+ * Since many heap fragments will make the GC slower, trap and GC now.
+ */
+ unlock_hash_function(lck);
+ ret_value = on_trap(context_ptr, slot_ix, got, &mpi.mp, ret);
+ goto done;
+ }
+ current_ptr = &BUCKET(tb,slot_ix);
+ }
+ }
- match_list = CONS(hp, match_res, match_list);
- ++got;
- }
+ ret_value = on_loop_ended(context_ptr, slot_ix, got, iterations_left, &mpi.mp, ret);
- --num_left;
- save_slot_ix = slot_ix;
- if ((current = next(tb, (Uint*)&slot_ix, &lck, current)) == NULL) {
- slot_ix = -1; /* EOT */
- break;
- }
- if (slot_ix != save_slot_ix) {
- if (chunk_size && got >= chunk_size) {
- RUNLOCK_HASH(lck);
- break;
- }
- if (num_left <= 0 || MBUF(p)) {
- /*
- * We have either reached our limit, or just created some heap fragments.
- * Since many heap fragments will make the GC slower, trap and GC now.
- */
- RUNLOCK_HASH(lck);
- goto trap;
- }
- }
- }
done:
- BUMP_REDS(p, 1000 - num_left);
- if (chunk_size) {
- Eterm continuation;
- Eterm rest = NIL;
- Sint rest_size = 0;
-
- if (got > chunk_size) { /* Cannot write destructively here,
- the list may have
- been in user space */
- rest = NIL;
- hp = HAlloc(p, (got - chunk_size) * 2);
- while (got-- > chunk_size) {
- rest = CONS(hp, CAR(list_val(match_list)), rest);
- hp += 2;
- match_list = CDR(list_val(match_list));
- ++rest_size;
- }
- }
- if (rest != NIL || slot_ix >= 0) {
- hp = HAlloc(p,3+7);
- continuation = TUPLE6(hp, tptr[1], make_small(slot_ix),
- tptr[3], tptr[4], rest,
- make_small(rest_size));
- hp += 7;
- RET_TO_BIF(TUPLE2(hp, match_list, continuation),DB_ERROR_NONE);
- } else {
- if (match_list != NIL) {
- hp = HAlloc(p, 3);
- RET_TO_BIF(TUPLE2(hp, match_list, am_EOT),DB_ERROR_NONE);
- } else {
- RET_TO_BIF(am_EOT, DB_ERROR_NONE);
- }
- }
+ /* We should only jump directly to this label if
+ * we've already called on_nothing_can_match / on_loop_ended / on_trap
+ */
+ if (mpi.mp != NULL) {
+ erts_bin_free(mpi.mp);
}
- RET_TO_BIF(match_list,DB_ERROR_NONE);
-
-trap:
- BUMP_ALL_REDS(p);
-
- hp = HAlloc(p,7);
- continuation = TUPLE6(hp, tptr[1], make_small(slot_ix), tptr[3],
- tptr[4], match_list, make_small(got));
- RET_TO_BIF(bif_trap1(&ets_select_continue_exp, p,
- continuation),
- DB_ERROR_NONE);
-
-#undef RET_TO_BIF
-
-}
+ if (mpi.lists != mpi.dlists) {
+ erts_free(ERTS_ALC_T_DB_SEL_LIST,
+ (void *) mpi.lists);
+ }
+ return ret_value;
-static int db_select_hash(Process *p, DbTable *tbl,
- Eterm pattern, int reverse,
- Eterm *ret)
-{
- return db_select_chunk_hash(p, tbl, pattern, 0, reverse, ret);
}
-static int db_select_chunk_hash(Process *p, DbTable *tbl,
- Eterm pattern, Sint chunk_size,
- int reverse, /* not used */
- Eterm *ret)
+/*
+ * Continue hash table match traversal
+ */
+static int match_traverse_continue(Process* p, DbTableHash* tb,
+ Sint chunk_size, /* If 0, no chunking */
+ Sint iterations_left, /* Nr. of iterations left */
+ Eterm** hpp, /* Heap */
+ Sint slot_ix, /* Slot index to resume traversal from */
+ Sint got, /* Matched terms counter */
+ Binary** mpp, /* Existing match program */
+ int lock_for_write, /* Set to 1 if we're going to delete or
+ modify existing terms */
+ mtraversal_on_match_res_t on_match_res,
+ mtraversal_on_loop_ended_t on_loop_ended,
+ mtraversal_on_trap_t on_trap,
+ void* context_ptr, /* For callbacks */
+ Eterm* ret)
{
- DbTableHash *tb = &tbl->hash;
- struct mp_info mpi;
- Sint slot_ix;
- HashDbTerm *current = 0;
- unsigned current_list_pos = 0;
- Eterm match_list;
+ int all_objects = (*mpp)->intern.flags & BIN_FLAG_ALL_OBJECTS;
+ HashDbTerm** current_ptr; /* Refers to either the bucket pointer or
+ * the 'next' pointer in the previous term
+ */
+ HashDbTerm* saved_current; /* Helper to avoid double skip on match */
Eterm match_res;
- Eterm *hp;
- int num_left = 1000;
- Uint got = 0;
- Eterm continuation;
- int errcode;
- Eterm mpb;
- erts_smp_rwmtx_t* lck;
-
-
-#define RET_TO_BIF(Term,RetVal) do { \
- if (mpi.mp != NULL) { \
- erts_bin_free(mpi.mp); \
- } \
- if (mpi.lists != mpi.dlists) { \
- erts_free(ERTS_ALC_T_DB_SEL_LIST, \
- (void *) mpi.lists); \
- } \
- *ret = (Term); \
- return RetVal; \
- } while(0)
-
-
- if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
- RET_TO_BIF(NIL,errcode);
- }
-
- if (!mpi.something_can_match) {
- if (chunk_size) {
- RET_TO_BIF(am_EOT, DB_ERROR_NONE); /* We're done */
- }
- RET_TO_BIF(NIL, DB_ERROR_NONE);
- /* can't possibly match anything */
+ erts_rwmtx_t* lck;
+ int ret_value;
+ erts_rwmtx_t* (*lock_hash_function)(DbTableHash*, HashValue)
+ = (lock_for_write ? WLOCK_HASH : RLOCK_HASH);
+ void (*unlock_hash_function)(erts_rwmtx_t*)
+ = (lock_for_write ? WUNLOCK_HASH : RUNLOCK_HASH);
+ Sint (*next_slot_function)(DbTableHash* tb, Uint ix, erts_rwmtx_t** lck_ptr)
+ = (lock_for_write ? next_slot_w : next_slot);
+
+ if (got < 0) {
+ *ret = NIL;
+ return DB_ERROR_BADPARAM;
+ }
+
+ if (slot_ix < 0 /* EOT */
+ || (chunk_size && got >= chunk_size))
+ {
+ /* Already got all or enough in the match_list */
+ ret_value = on_loop_ended(context_ptr, slot_ix, got, iterations_left, mpp, ret);
+ goto done;
}
- if (!mpi.key_given) {
- /* Run this code if pattern is variable or GETKEY(pattern) */
- /* is a variable */
- slot_ix = 0;
- lck = RLOCK_HASH(tb,slot_ix);
- for (;;) {
- ASSERT(slot_ix < NACTIVE(tb));
- if ((current = BUCKET(tb,slot_ix)) != NULL) {
- break;
- }
- slot_ix = next_slot(tb,slot_ix,&lck);
- if (slot_ix == 0) {
- if (chunk_size) {
- RET_TO_BIF(am_EOT, DB_ERROR_NONE); /* We're done */
- }
- RET_TO_BIF(NIL,DB_ERROR_NONE);
- }
- }
- } else {
- /* We have at least one */
- slot_ix = mpi.lists[current_list_pos].ix;
- lck = RLOCK_HASH(tb, slot_ix);
- current = *(mpi.lists[current_list_pos].bucket);
- ASSERT(current == BUCKET(tb,slot_ix));
- ++current_list_pos;
+ lck = lock_hash_function(tb, slot_ix);
+ if (slot_ix >= NACTIVE(tb)) { /* Is this possible? */
+ unlock_hash_function(lck);
+ *ret = NIL;
+ ret_value = DB_ERROR_BADPARAM;
+ goto done;
}
- match_list = NIL;
-
+ /*
+ * Resume traversal cycle from where we left
+ */
+ current_ptr = &BUCKET(tb,slot_ix);
for(;;) {
- if (current != NULL) {
- if (current->hvalue != INVALID_HASH) {
- match_res = db_match_dbterm(&tb->common, p, mpi.mp, 0,
- &current->dbterm, &hp, 2);
- if (is_value(match_res)) {
- match_list = CONS(hp, match_res, match_list);
- ++got;
- }
- }
- current = current->next;
- }
- else if (mpi.key_given) { /* Key is bound */
- RUNLOCK_HASH(lck);
- if (current_list_pos == mpi.num_lists) {
- slot_ix = -1; /* EOT */
- goto done;
- } else {
- slot_ix = mpi.lists[current_list_pos].ix;
- lck = RLOCK_HASH(tb, slot_ix);
- current = *(mpi.lists[current_list_pos].bucket);
- ASSERT(mpi.lists[current_list_pos].bucket == &BUCKET(tb,slot_ix));
- ++current_list_pos;
- }
- }
- else { /* Key is variable */
- --num_left;
-
- if ((slot_ix=next_slot(tb,slot_ix,&lck)) == 0) {
- slot_ix = -1;
- break;
- }
- if (chunk_size && got >= chunk_size) {
- RUNLOCK_HASH(lck);
- break;
- }
- if (num_left <= 0 || MBUF(p)) {
- /*
- * We have either reached our limit, or just created some heap fragments.
- * Since many heap fragments will make the GC slower, trap and GC now.
- */
- RUNLOCK_HASH(lck);
- goto trap;
- }
- current = BUCKET(tb,slot_ix);
+ if (*current_ptr != NULL) {
+ if ((*current_ptr)->hvalue != INVALID_HASH) {
+ match_res = db_match_dbterm(&tb->common, p, *mpp, all_objects,
+ &(*current_ptr)->dbterm, hpp, 2);
+ saved_current = *current_ptr;
+ if (on_match_res(context_ptr, slot_ix, &current_ptr, match_res)) {
+ ++got;
+ }
+ --iterations_left;
+ if (*current_ptr != saved_current) {
+ /* Don't advance to next, the callback did it already */
+ continue;
+ }
+ }
+ current_ptr = &((*current_ptr)->next);
+ }
+ else {
+ if ((slot_ix=next_slot_function(tb,slot_ix,&lck)) == 0) {
+ slot_ix = -1;
+ break;
+ }
+ if (chunk_size && got >= chunk_size) {
+ unlock_hash_function(lck);
+ break;
+ }
+ if (iterations_left <= 0 || MBUF(p)) {
+ /*
+ * We have either reached our limit, or just created some heap fragments.
+ * Since many heap fragments will make the GC slower, trap and GC now.
+ */
+ unlock_hash_function(lck);
+ ret_value = on_trap(context_ptr, slot_ix, got, mpp, ret);
+ goto done;
+ }
+ current_ptr = &BUCKET(tb,slot_ix);
}
}
-done:
- BUMP_REDS(p, 1000 - num_left);
- if (chunk_size) {
- Eterm continuation;
- Eterm rest = NIL;
- Sint rest_size = 0;
-
- if (mpi.all_objects)
- (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- if (got > chunk_size) { /* Split list in return value and 'rest' */
- Eterm tmp = match_list;
- rest = match_list;
- while (got-- > chunk_size + 1) {
- tmp = CDR(list_val(tmp));
- ++rest_size;
- }
- ++rest_size;
- match_list = CDR(list_val(tmp));
- CDR(list_val(tmp)) = NIL; /* Destructive, the list has never
- been in 'user space' */
- }
- if (rest != NIL || slot_ix >= 0) { /* Need more calls */
- hp = HAlloc(p,3+7+PROC_BIN_SIZE);
- mpb =db_make_mp_binary(p,(mpi.mp),&hp);
- if (mpi.all_objects)
- (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- continuation = TUPLE6(hp, tb->common.id,make_small(slot_ix),
- make_small(chunk_size),
- mpb, rest,
- make_small(rest_size));
- mpi.mp = NULL; /*otherwise the return macro will destroy it */
- hp += 7;
- RET_TO_BIF(TUPLE2(hp, match_list, continuation),DB_ERROR_NONE);
- } else { /* All data is exhausted */
- if (match_list != NIL) { /* No more data to search but still a
- result to return to the caller */
- hp = HAlloc(p, 3);
- RET_TO_BIF(TUPLE2(hp, match_list, am_EOT),DB_ERROR_NONE);
- } else { /* Reached the end of the ttable with no data to return */
- RET_TO_BIF(am_EOT, DB_ERROR_NONE);
- }
- }
- }
- RET_TO_BIF(match_list,DB_ERROR_NONE);
-trap:
- BUMP_ALL_REDS(p);
- if (mpi.all_objects)
- (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- hp = HAlloc(p,7+PROC_BIN_SIZE);
- mpb =db_make_mp_binary(p,(mpi.mp),&hp);
- continuation = TUPLE6(hp, tb->common.id, make_small(slot_ix),
- make_small(chunk_size),
- mpb, match_list,
- make_small(got));
- mpi.mp = NULL; /*otherwise the return macro will destroy it */
- RET_TO_BIF(bif_trap1(&ets_select_continue_exp, p,
- continuation),
- DB_ERROR_NONE);
-#undef RET_TO_BIF
+ ret_value = on_loop_ended(context_ptr, slot_ix, got, iterations_left, mpp, ret);
+
+done:
+ /* We should only jump directly to this label if
+ * we've already called on_loop_ended / on_trap
+ */
+ return ret_value;
}
-static int db_select_count_hash(Process *p,
- DbTable *tbl,
- Eterm pattern,
- Eterm *ret)
+
+/*
+ * Common traversal trapping/continuation code;
+ * used by select_count, select_delete and select_replace,
+ * as well as their continuation-handling counterparts.
+ */
+
+static ERTS_INLINE int on_mtraversal_simple_trap(Export* trap_function,
+ Process* p,
+ DbTableHash* tb,
+ Eterm tid,
+ Eterm* prev_continuation_tptr,
+ Sint slot_ix,
+ Sint got,
+ Binary** mpp,
+ Eterm* ret)
{
- DbTableHash *tb = &tbl->hash;
- struct mp_info mpi;
- Uint slot_ix = 0;
- HashDbTerm* current = NULL;
- unsigned current_list_pos = 0;
- Eterm *hp;
- int num_left = 1000;
- Uint got = 0;
- Eterm continuation;
- int errcode;
+ Eterm* hp;
Eterm egot;
Eterm mpb;
- erts_smp_rwmtx_t* lck;
-
-#define RET_TO_BIF(Term,RetVal) do { \
- if (mpi.mp != NULL) { \
- erts_bin_free(mpi.mp); \
- } \
- if (mpi.lists != mpi.dlists) { \
- erts_free(ERTS_ALC_T_DB_SEL_LIST, \
- (void *) mpi.lists); \
- } \
- *ret = (Term); \
- return RetVal; \
- } while(0)
-
-
- if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
- RET_TO_BIF(NIL,errcode);
- }
-
- if (!mpi.something_can_match) {
- RET_TO_BIF(make_small(0), DB_ERROR_NONE);
- /* can't possibly match anything */
- }
-
- if (!mpi.key_given) {
- /* Run this code if pattern is variable or GETKEY(pattern) */
- /* is a variable */
- slot_ix = 0;
- lck = RLOCK_HASH(tb,slot_ix);
- current = BUCKET(tb,slot_ix);
- } else {
- /* We have at least one */
- slot_ix = mpi.lists[current_list_pos].ix;
- lck = RLOCK_HASH(tb, slot_ix);
- current = *(mpi.lists[current_list_pos].bucket);
- ASSERT(current == BUCKET(tb,slot_ix));
- ++current_list_pos;
- }
+ Eterm continuation;
+ int is_first_trap = (prev_continuation_tptr == NULL);
+ size_t base_halloc_sz = (is_first_trap ? ERTS_MAGIC_REF_THING_SIZE : 0);
- for(;;) {
- if (current != NULL) {
- if (current->hvalue != INVALID_HASH) {
- if (db_match_dbterm(&tb->common, p, mpi.mp, 0,
- &current->dbterm, NULL,0) == am_true) {
- ++got;
- }
- --num_left;
- }
- current = current->next;
- }
- else { /* next bucket */
- if (mpi.key_given) { /* Key is bound */
- RUNLOCK_HASH(lck);
- if (current_list_pos == mpi.num_lists) {
- goto done;
- } else {
- slot_ix = mpi.lists[current_list_pos].ix;
- lck = RLOCK_HASH(tb, slot_ix);
- current = *(mpi.lists[current_list_pos].bucket);
- ASSERT(mpi.lists[current_list_pos].bucket == &BUCKET(tb,slot_ix));
- ++current_list_pos;
- }
- }
- else {
- if ((slot_ix=next_slot(tb,slot_ix,&lck)) == 0) {
- goto done;
- }
- if (num_left <= 0) {
- RUNLOCK_HASH(lck);
- goto trap;
- }
- current = BUCKET(tb,slot_ix);
- }
- }
- }
-done:
- BUMP_REDS(p, 1000 - num_left);
- RET_TO_BIF(erts_make_integer(got,p),DB_ERROR_NONE);
-trap:
BUMP_ALL_REDS(p);
if (IS_USMALL(0, got)) {
- hp = HAlloc(p, PROC_BIN_SIZE + 5);
+ hp = HAlloc(p, base_halloc_sz + 5);
egot = make_small(got);
}
else {
- hp = HAlloc(p, BIG_UINT_HEAP_SIZE + PROC_BIN_SIZE + 5);
+ hp = HAlloc(p, base_halloc_sz + BIG_UINT_HEAP_SIZE + 5);
egot = uint_to_big(got, hp);
hp += BIG_UINT_HEAP_SIZE;
}
- mpb = db_make_mp_binary(p,mpi.mp,&hp);
- continuation = TUPLE4(hp, tb->common.id, make_small(slot_ix),
- mpb,
- egot);
- mpi.mp = NULL; /*otherwise the return macro will destroy it */
- RET_TO_BIF(bif_trap1(&ets_select_count_continue_exp, p,
- continuation),
- DB_ERROR_NONE);
-#undef RET_TO_BIF
+ if (is_first_trap) {
+ mpb = erts_db_make_match_prog_ref(p, *mpp, &hp);
+ *mpp = NULL; /* otherwise the caller will destroy it */
+ }
+ else {
+ mpb = prev_continuation_tptr[3];
+ }
+
+ continuation = TUPLE4(
+ hp,
+ tid,
+ make_small(slot_ix),
+ mpb,
+ egot);
+ *ret = bif_trap1(trap_function, p, continuation);
+ return DB_ERROR_NONE;
}
-static int db_select_delete_hash(Process *p,
- DbTable *tbl,
- Eterm pattern,
- Eterm *ret)
+static ERTS_INLINE int unpack_simple_mtraversal_continuation(Eterm continuation,
+ Eterm** tptr_ptr,
+ Eterm* tid_ptr,
+ Sint* slot_ix_p,
+ Binary** mpp,
+ Sint* got_p)
{
- DbTableHash *tb = &tbl->hash;
- struct mp_info mpi;
- Uint slot_ix = 0;
- HashDbTerm **current = NULL;
- unsigned current_list_pos = 0;
- Eterm *hp;
- int num_left = 1000;
- Uint got = 0;
- Eterm continuation;
- int errcode;
- Uint last_pseudo_delete = (Uint)-1;
- Eterm mpb;
- Eterm egot;
-#ifdef ERTS_SMP
- erts_aint_t fixated_by_me = tb->common.is_thread_safe ? 0 : 1; /* ToDo: something nicer */
-#else
- erts_aint_t fixated_by_me = 0;
-#endif
- erts_smp_rwmtx_t* lck;
-
-#define RET_TO_BIF(Term,RetVal) do { \
- if (mpi.mp != NULL) { \
- erts_bin_free(mpi.mp); \
- } \
- if (mpi.lists != mpi.dlists) { \
- erts_free(ERTS_ALC_T_DB_SEL_LIST, \
- (void *) mpi.lists); \
- } \
- *ret = (Term); \
- return RetVal; \
- } while(0)
-
+ Eterm* tptr;
+ ASSERT(is_tuple(continuation));
+ tptr = tuple_val(continuation);
+ if (arityval(*tptr) != 4)
+ return 1;
- if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
- RET_TO_BIF(NIL,errcode);
+ if (! is_small(tptr[2]) || !(is_big(tptr[4]) || is_small(tptr[4]))) {
+ return 1;
}
- if (!mpi.something_can_match) {
- RET_TO_BIF(make_small(0), DB_ERROR_NONE);
- /* can't possibly match anything */
+ *tptr_ptr = tptr;
+ *tid_ptr = tptr[1];
+ *slot_ix_p = unsigned_val(tptr[2]);
+ *mpp = erts_db_get_match_prog_binary_unchecked(tptr[3]);
+ if (is_big(tptr[4])) {
+ *got_p = big_to_uint32(tptr[4]);
}
+ else {
+ *got_p = unsigned_val(tptr[4]);
+ }
+ return 0;
+}
- if (!mpi.key_given) {
- /* Run this code if pattern is variable or GETKEY(pattern) */
- /* is a variable */
- lck = WLOCK_HASH(tb,slot_ix);
- current = &BUCKET(tb,slot_ix);
- } else {
- /* We have at least one */
- slot_ix = mpi.lists[current_list_pos].ix;
- lck = WLOCK_HASH(tb, slot_ix);
- current = mpi.lists[current_list_pos++].bucket;
- ASSERT(*current == BUCKET(tb,slot_ix));
+
+/*
+ *
+ * select / select_chunk match traversal
+ *
+ */
+
+#define MAX_SELECT_CHUNK_ITERATIONS 1000
+
+typedef struct {
+ Process* p;
+ DbTableHash* tb;
+ Eterm tid;
+ Eterm* hp;
+ Sint chunk_size;
+ Eterm match_list;
+ Eterm* prev_continuation_tptr;
+} mtraversal_select_chunk_context_t;
+
+static int mtraversal_select_chunk_on_nothing_can_match(void* context_ptr, Eterm* ret) {
+ mtraversal_select_chunk_context_t* sc_context_ptr = (mtraversal_select_chunk_context_t*) context_ptr;
+ *ret = (sc_context_ptr->chunk_size > 0 ? am_EOT : NIL);
+ return DB_ERROR_NONE;
+}
+
+static int mtraversal_select_chunk_on_match_res(void* context_ptr, Sint slot_ix,
+ HashDbTerm*** current_ptr_ptr,
+ Eterm match_res)
+{
+ mtraversal_select_chunk_context_t* sc_context_ptr = (mtraversal_select_chunk_context_t*) context_ptr;
+ if (is_value(match_res)) {
+ sc_context_ptr->match_list = CONS(sc_context_ptr->hp, match_res, sc_context_ptr->match_list);
+ return 1;
}
+ return 0;
+}
+static int mtraversal_select_chunk_on_loop_ended(void* context_ptr, Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp, Eterm* ret)
+{
+ mtraversal_select_chunk_context_t* sc_context_ptr = (mtraversal_select_chunk_context_t*) context_ptr;
+ Eterm mpb;
- for(;;) {
- if ((*current) == NULL) {
- if (mpi.key_given) { /* Key is bound */
- WUNLOCK_HASH(lck);
- if (current_list_pos == mpi.num_lists) {
- goto done;
- } else {
- slot_ix = mpi.lists[current_list_pos].ix;
- lck = WLOCK_HASH(tb, slot_ix);
- current = mpi.lists[current_list_pos].bucket;
- ASSERT(mpi.lists[current_list_pos].bucket == &BUCKET(tb,slot_ix));
- ++current_list_pos;
- }
- } else {
- if ((slot_ix=next_slot_w(tb,slot_ix,&lck)) == 0) {
- goto done;
- }
- if (num_left <= 0) {
- WUNLOCK_HASH(lck);
- goto trap;
- }
- current = &BUCKET(tb,slot_ix);
- }
- }
- else if ((*current)->hvalue == INVALID_HASH) {
- current = &((*current)->next);
- }
- else {
- int did_erase = 0;
- if (db_match_dbterm(&tb->common, p, mpi.mp, 0,
- &(*current)->dbterm, NULL, 0) == am_true) {
- if (NFIXED(tb) > fixated_by_me) { /* fixated by others? */
- if (slot_ix != last_pseudo_delete) {
- add_fixed_deletion(tb, slot_ix);
- last_pseudo_delete = slot_ix;
- }
- (*current)->hvalue = INVALID_HASH;
- } else {
- HashDbTerm *del = *current;
- *current = (*current)->next;
- free_term(tb, del);
- did_erase = 1;
- }
- erts_smp_atomic_dec_nob(&tb->common.nitems);
- ++got;
- }
- --num_left;
- if (!did_erase) {
- current = &((*current)->next);
- }
- }
+ if (iterations_left == MAX_SELECT_CHUNK_ITERATIONS) {
+ /* We didn't get to iterate a single time, which means EOT */
+ ASSERT(sc_context_ptr->match_list == NIL);
+ *ret = (sc_context_ptr->chunk_size > 0 ? am_EOT : NIL);
+ return DB_ERROR_NONE;
}
-done:
- BUMP_REDS(p, 1000 - num_left);
- if (got) {
- try_shrink(tb);
+ else {
+ ASSERT(iterations_left < MAX_SELECT_CHUNK_ITERATIONS);
+ BUMP_REDS(sc_context_ptr->p, MAX_SELECT_CHUNK_ITERATIONS - iterations_left);
+ if (sc_context_ptr->chunk_size) {
+ Eterm continuation;
+ Eterm rest = NIL;
+ Sint rest_size = 0;
+
+ if (got > sc_context_ptr->chunk_size) { /* Split list in return value and 'rest' */
+ Eterm tmp = sc_context_ptr->match_list;
+ rest = sc_context_ptr->match_list;
+ while (got-- > sc_context_ptr->chunk_size + 1) {
+ tmp = CDR(list_val(tmp));
+ ++rest_size;
+ }
+ ++rest_size;
+ sc_context_ptr->match_list = CDR(list_val(tmp));
+ CDR(list_val(tmp)) = NIL; /* Destructive, the list has never
+ been in 'user space' */
+ }
+ if (rest != NIL || slot_ix >= 0) { /* Need more calls */
+ sc_context_ptr->hp = HAlloc(sc_context_ptr->p, 3 + 7 + ERTS_MAGIC_REF_THING_SIZE);
+ mpb = erts_db_make_match_prog_ref(sc_context_ptr->p, *mpp, &sc_context_ptr->hp);
+ continuation = TUPLE6(
+ sc_context_ptr->hp,
+ sc_context_ptr->tid,
+ make_small(slot_ix),
+ make_small(sc_context_ptr->chunk_size),
+ mpb, rest,
+ make_small(rest_size));
+ *mpp = NULL; /* Otherwise the caller will destroy it */
+ sc_context_ptr->hp += 7;
+ *ret = TUPLE2(sc_context_ptr->hp, sc_context_ptr->match_list, continuation);
+ return DB_ERROR_NONE;
+ } else { /* All data is exhausted */
+ if (sc_context_ptr->match_list != NIL) { /* No more data to search but still a
+ result to return to the caller */
+ sc_context_ptr->hp = HAlloc(sc_context_ptr->p, 3);
+ *ret = TUPLE2(sc_context_ptr->hp, sc_context_ptr->match_list, am_EOT);
+ return DB_ERROR_NONE;
+ } else { /* Reached the end of the ttable with no data to return */
+ *ret = am_EOT;
+ return DB_ERROR_NONE;
+ }
+ }
+ }
+ *ret = sc_context_ptr->match_list;
+ return DB_ERROR_NONE;
}
- RET_TO_BIF(erts_make_integer(got,p),DB_ERROR_NONE);
-trap:
- BUMP_ALL_REDS(p);
- if (IS_USMALL(0, got)) {
- hp = HAlloc(p, PROC_BIN_SIZE + 5);
- egot = make_small(got);
+}
+
+static int mtraversal_select_chunk_on_trap(void* context_ptr, Sint slot_ix, Sint got,
+ Binary** mpp, Eterm* ret)
+{
+ mtraversal_select_chunk_context_t* sc_context_ptr = (mtraversal_select_chunk_context_t*) context_ptr;
+ Eterm mpb;
+ Eterm continuation;
+ Eterm* hp;
+
+ BUMP_ALL_REDS(sc_context_ptr->p);
+
+ if (sc_context_ptr->prev_continuation_tptr == NULL) {
+ /* First time we're trapping */
+ hp = HAlloc(sc_context_ptr->p, 7 + ERTS_MAGIC_REF_THING_SIZE);
+ mpb = erts_db_make_match_prog_ref(sc_context_ptr->p, *mpp, &hp);
+ continuation = TUPLE6(
+ hp,
+ sc_context_ptr->tid,
+ make_small(slot_ix),
+ make_small(sc_context_ptr->chunk_size),
+ mpb,
+ sc_context_ptr->match_list,
+ make_small(got));
+ *mpp = NULL; /* otherwise the caller will destroy it */
}
else {
- hp = HAlloc(p, BIG_UINT_HEAP_SIZE + PROC_BIN_SIZE + 5);
- egot = uint_to_big(got, hp);
- hp += BIG_UINT_HEAP_SIZE;
- }
- mpb = db_make_mp_binary(p,mpi.mp,&hp);
- continuation = TUPLE4(hp, tb->common.id, make_small(slot_ix),
- mpb,
- egot);
- mpi.mp = NULL; /*otherwise the return macro will destroy it */
- RET_TO_BIF(bif_trap1(&ets_select_delete_continue_exp, p,
- continuation),
- DB_ERROR_NONE);
+ /* Not the first time we're trapping; reuse continuation terms */
+ hp = HAlloc(sc_context_ptr->p, 7);
+ continuation = TUPLE6(
+ hp,
+ sc_context_ptr->prev_continuation_tptr[1],
+ make_small(slot_ix),
+ sc_context_ptr->prev_continuation_tptr[3],
+ sc_context_ptr->prev_continuation_tptr[4],
+ sc_context_ptr->match_list,
+ make_small(got));
+ }
+ *ret = bif_trap1(&ets_select_continue_exp, sc_context_ptr->p, continuation);
+ return DB_ERROR_NONE;
+}
-#undef RET_TO_BIF
+static int db_select_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, int reverse, Eterm *ret) {
+ return db_select_chunk_hash(p, tbl, tid, pattern, 0, reverse, ret);
+}
+static int db_select_chunk_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Sint chunk_size,
+ int reverse, Eterm *ret)
+{
+ mtraversal_select_chunk_context_t sc_context;
+ sc_context.p = p;
+ sc_context.tb = &tbl->hash;
+ sc_context.tid = tid;
+ sc_context.hp = NULL;
+ sc_context.chunk_size = chunk_size;
+ sc_context.match_list = NIL;
+ sc_context.prev_continuation_tptr = NULL;
+
+ return match_traverse(
+ sc_context.p, sc_context.tb,
+ pattern, NULL,
+ sc_context.chunk_size,
+ MAX_SELECT_CHUNK_ITERATIONS,
+ &sc_context.hp, 0,
+ mtraversal_select_chunk_on_nothing_can_match,
+ mtraversal_select_chunk_on_match_res,
+ mtraversal_select_chunk_on_loop_ended,
+ mtraversal_select_chunk_on_trap,
+ &sc_context, ret);
}
+
/*
-** This is called when select_delete traps
-*/
-static int db_select_delete_continue_hash(Process *p,
- DbTable *tbl,
- Eterm continuation,
- Eterm *ret)
+ *
+ * select_continue match traversal
+ *
+ */
+
+static int mtraversal_select_chunk_continue_on_loop_ended(void* context_ptr, Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp, Eterm* ret)
{
- DbTableHash *tb = &tbl->hash;
- Uint slot_ix;
- Uint last_pseudo_delete = (Uint)-1;
- HashDbTerm **current = NULL;
- Eterm *hp;
- int num_left = 1000;
- Uint got;
- Eterm *tptr;
- Binary *mp;
- Eterm egot;
- int fixated_by_me = ONLY_WRITER(p,tb) ? 0 : 1; /* ToDo: something nicer */
- erts_smp_rwmtx_t* lck;
+ mtraversal_select_chunk_context_t* sc_context_ptr = (mtraversal_select_chunk_context_t*) context_ptr;
+ Eterm continuation;
+ Eterm rest = NIL;
+ Eterm* hp;
+
+ ASSERT(iterations_left <= MAX_SELECT_CHUNK_ITERATIONS);
+ BUMP_REDS(sc_context_ptr->p, MAX_SELECT_CHUNK_ITERATIONS - iterations_left);
+ if (sc_context_ptr->chunk_size) {
+ Sint rest_size = 0;
+ if (got > sc_context_ptr->chunk_size) {
+ /* Cannot write destructively here,
+ the list may have
+ been in user space */
+ hp = HAlloc(sc_context_ptr->p, (got - sc_context_ptr->chunk_size) * 2);
+ while (got-- > sc_context_ptr->chunk_size) {
+ rest = CONS(hp, CAR(list_val(sc_context_ptr->match_list)), rest);
+ hp += 2;
+ sc_context_ptr->match_list = CDR(list_val(sc_context_ptr->match_list));
+ ++rest_size;
+ }
+ }
+ if (rest != NIL || slot_ix >= 0) {
+ hp = HAlloc(sc_context_ptr->p, 3 + 7);
+ continuation = TUPLE6(
+ hp,
+ sc_context_ptr->prev_continuation_tptr[1],
+ make_small(slot_ix),
+ sc_context_ptr->prev_continuation_tptr[3],
+ sc_context_ptr->prev_continuation_tptr[4],
+ rest,
+ make_small(rest_size));
+ hp += 7;
+ *ret = TUPLE2(hp, sc_context_ptr->match_list, continuation);
+ return DB_ERROR_NONE;
+ } else {
+ if (sc_context_ptr->match_list != NIL) {
+ hp = HAlloc(sc_context_ptr->p, 3);
+ *ret = TUPLE2(hp, sc_context_ptr->match_list, am_EOT);
+ return DB_ERROR_NONE;
+ } else {
+ *ret = am_EOT;
+ return DB_ERROR_NONE;
+ }
+ }
+ }
+ *ret = sc_context_ptr->match_list;
+ return DB_ERROR_NONE;
+}
-#define RET_TO_BIF(Term,RetVal) do { \
- *ret = (Term); \
- return RetVal; \
- } while(0)
+/*
+ * This is called when select traps
+ */
+static int db_select_continue_hash(Process* p, DbTable* tbl, Eterm continuation, Eterm* ret) {
+ mtraversal_select_chunk_context_t sc_context = {0};
+ Eterm* tptr;
+ Eterm tid;
+ Binary* mp;
+ Sint got;
+ Sint slot_ix;
+ Sint chunk_size;
+ Eterm match_list;
+ Sint iterations_left = MAX_SELECT_CHUNK_ITERATIONS;
-
+ /* Decode continuation. We know it's a tuple but not the arity or anything else */
+ ASSERT(is_tuple(continuation));
tptr = tuple_val(continuation);
- slot_ix = unsigned_val(tptr[2]);
- mp = ((ProcBin *) binary_val(tptr[3]))->val;
- if (is_big(tptr[4])) {
- got = big_to_uint32(tptr[4]);
- } else {
- got = unsigned_val(tptr[4]);
- }
-
- lck = WLOCK_HASH(tb,slot_ix);
- if (slot_ix >= NACTIVE(tb)) {
- WUNLOCK_HASH(lck);
- goto done;
- }
- current = &BUCKET(tb,slot_ix);
- for(;;) {
- if ((*current) == NULL) {
- if ((slot_ix=next_slot_w(tb,slot_ix,&lck)) == 0) {
- goto done;
- }
- if (num_left <= 0) {
- WUNLOCK_HASH(lck);
- goto trap;
- }
- current = &BUCKET(tb,slot_ix);
- }
- else if ((*current)->hvalue == INVALID_HASH) {
- current = &((*current)->next);
- }
- else {
- int did_erase = 0;
- if (db_match_dbterm(&tb->common, p, mp, 0,
- &(*current)->dbterm, NULL, 0) == am_true) {
- if (NFIXED(tb) > fixated_by_me) { /* fixated by others? */
- if (slot_ix != last_pseudo_delete) {
- add_fixed_deletion(tb, slot_ix);
- last_pseudo_delete = slot_ix;
- }
- (*current)->hvalue = INVALID_HASH;
- } else {
- HashDbTerm *del = *current;
- *current = (*current)->next;
- free_term(tb, del);
- did_erase = 1;
- }
- erts_smp_atomic_dec_nob(&tb->common.nitems);
- ++got;
- }
-
- --num_left;
- if (!did_erase) {
- current = &((*current)->next);
- }
- }
+ if (arityval(*tptr) != 6)
+ goto badparam;
+
+ if (!is_small(tptr[2]) || !is_small(tptr[3]) ||
+ !(is_list(tptr[5]) || tptr[5] == NIL) || !is_small(tptr[6]))
+ goto badparam;
+ if ((chunk_size = signed_val(tptr[3])) < 0)
+ goto badparam;
+
+ mp = erts_db_get_match_prog_binary(tptr[4]);
+ if (mp == NULL)
+ goto badparam;
+
+ if ((got = signed_val(tptr[6])) < 0)
+ goto badparam;
+
+ tid = tptr[1];
+ slot_ix = signed_val(tptr[2]);
+ match_list = tptr[5];
+
+ /* Proceed */
+ sc_context.p = p;
+ sc_context.tb = &tbl->hash;
+ sc_context.tid = tid;
+ sc_context.hp = NULL;
+ sc_context.chunk_size = chunk_size;
+ sc_context.match_list = match_list;
+ sc_context.prev_continuation_tptr = tptr;
+
+ return match_traverse_continue(
+ sc_context.p, sc_context.tb, sc_context.chunk_size,
+ iterations_left, &sc_context.hp, slot_ix, got, &mp, 0,
+ mtraversal_select_chunk_on_match_res, /* Reuse callback */
+ mtraversal_select_chunk_continue_on_loop_ended,
+ mtraversal_select_chunk_on_trap, /* Reuse callback */
+ &sc_context, ret);
+
+badparam:
+ *ret = NIL;
+ return DB_ERROR_BADPARAM;
+}
+
+#undef MAX_SELECT_CHUNK_ITERATIONS
+
+
+/*
+ *
+ * select_count match traversal
+ *
+ */
+
+#define MAX_SELECT_COUNT_ITERATIONS 1000
+
+typedef struct {
+ Process* p;
+ DbTableHash* tb;
+ Eterm tid;
+ Eterm* hp;
+ Eterm* prev_continuation_tptr;
+} mtraversal_select_count_context_t;
+
+static int mtraversal_select_count_on_nothing_can_match(void* context_ptr, Eterm* ret) {
+ *ret = make_small(0);
+ return DB_ERROR_NONE;
+}
+
+static int mtraversal_select_count_on_match_res(void* context_ptr, Sint slot_ix,
+ HashDbTerm*** current_ptr_ptr,
+ Eterm match_res)
+{
+ return (match_res == am_true);
+}
+
+static int mtraversal_select_count_on_loop_ended(void* context_ptr, Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp, Eterm* ret)
+{
+ mtraversal_select_count_context_t* scnt_context_ptr = (mtraversal_select_count_context_t*) context_ptr;
+ ASSERT(iterations_left <= MAX_SELECT_COUNT_ITERATIONS);
+ BUMP_REDS(scnt_context_ptr->p, MAX_SELECT_COUNT_ITERATIONS - iterations_left);
+ *ret = erts_make_integer(got, scnt_context_ptr->p);
+ return DB_ERROR_NONE;
+}
+
+static int mtraversal_select_count_on_trap(void* context_ptr, Sint slot_ix, Sint got,
+ Binary** mpp, Eterm* ret)
+{
+ mtraversal_select_count_context_t* scnt_context_ptr = (mtraversal_select_count_context_t*) context_ptr;
+ return on_mtraversal_simple_trap(
+ &ets_select_count_continue_exp,
+ scnt_context_ptr->p,
+ scnt_context_ptr->tb,
+ scnt_context_ptr->tid,
+ scnt_context_ptr->prev_continuation_tptr,
+ slot_ix, got, mpp, ret);
+}
+
+static int db_select_count_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Eterm *ret) {
+ mtraversal_select_count_context_t scnt_context = {0};
+ Sint iterations_left = MAX_SELECT_COUNT_ITERATIONS;
+ Sint chunk_size = 0;
+
+ scnt_context.p = p;
+ scnt_context.tb = &tbl->hash;
+ scnt_context.tid = tid;
+ scnt_context.hp = NULL;
+ scnt_context.prev_continuation_tptr = NULL;
+
+ return match_traverse(
+ scnt_context.p, scnt_context.tb,
+ pattern, NULL,
+ chunk_size, iterations_left, NULL, 0,
+ mtraversal_select_count_on_nothing_can_match,
+ mtraversal_select_count_on_match_res,
+ mtraversal_select_count_on_loop_ended,
+ mtraversal_select_count_on_trap,
+ &scnt_context, ret);
+}
+
+/*
+ * This is called when select_count traps
+ */
+static int db_select_count_continue_hash(Process* p, DbTable* tbl, Eterm continuation, Eterm* ret) {
+ mtraversal_select_count_context_t scnt_context = {0};
+ Eterm* tptr;
+ Eterm tid;
+ Binary* mp;
+ Sint got;
+ Sint slot_ix;
+ Sint chunk_size = 0;
+ *ret = NIL;
+
+ if (unpack_simple_mtraversal_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) {
+ *ret = NIL;
+ return DB_ERROR_BADPARAM;
+ }
+
+ scnt_context.p = p;
+ scnt_context.tb = &tbl->hash;
+ scnt_context.tid = tid;
+ scnt_context.hp = NULL;
+ scnt_context.prev_continuation_tptr = tptr;
+
+ return match_traverse_continue(
+ scnt_context.p, scnt_context.tb, chunk_size,
+ MAX_SELECT_COUNT_ITERATIONS,
+ NULL, slot_ix, got, &mp, 0,
+ mtraversal_select_count_on_match_res, /* Reuse callback */
+ mtraversal_select_count_on_loop_ended, /* Reuse callback */
+ mtraversal_select_count_on_trap, /* Reuse callback */
+ &scnt_context, ret);
+}
+
+#undef MAX_SELECT_COUNT_ITERATIONS
+
+
+/*
+ *
+ * select_delete match traversal
+ *
+ */
+
+#define MAX_SELECT_DELETE_ITERATIONS 1000
+
+typedef struct {
+ Process* p;
+ DbTableHash* tb;
+ Eterm tid;
+ Eterm* hp;
+ Eterm* prev_continuation_tptr;
+ erts_aint_t fixated_by_me;
+ Uint last_pseudo_delete;
+} mtraversal_select_delete_context_t;
+
+static int mtraversal_select_delete_on_nothing_can_match(void* context_ptr, Eterm* ret) {
+ *ret = make_small(0);
+ return DB_ERROR_NONE;
+}
+
+static int mtraversal_select_delete_on_match_res(void* context_ptr, Sint slot_ix,
+ HashDbTerm*** current_ptr_ptr,
+ Eterm match_res)
+{
+ HashDbTerm** current_ptr = *current_ptr_ptr;
+ mtraversal_select_delete_context_t* sd_context_ptr = (mtraversal_select_delete_context_t*) context_ptr;
+ HashDbTerm* del;
+ if (match_res != am_true)
+ return 0;
+
+ if (NFIXED(sd_context_ptr->tb) > sd_context_ptr->fixated_by_me) { /* fixated by others? */
+ if (slot_ix != sd_context_ptr->last_pseudo_delete) {
+ if (!add_fixed_deletion(sd_context_ptr->tb, slot_ix, sd_context_ptr->fixated_by_me))
+ goto do_erase;
+ sd_context_ptr->last_pseudo_delete = slot_ix;
+ }
+ (*current_ptr)->hvalue = INVALID_HASH;
}
-done:
- BUMP_REDS(p, 1000 - num_left);
- if (got) {
- try_shrink(tb);
+ else {
+ do_erase:
+ del = *current_ptr;
+ *current_ptr = (*current_ptr)->next; // replace pointer to term using next
+ free_term(sd_context_ptr->tb, del);
}
- RET_TO_BIF(erts_make_integer(got,p),DB_ERROR_NONE);
-trap:
- BUMP_ALL_REDS(p);
- if (IS_USMALL(0, got)) {
- hp = HAlloc(p, 5);
- egot = make_small(got);
+ erts_atomic_dec_nob(&sd_context_ptr->tb->common.nitems);
+
+ return 1;
+}
+
+static int mtraversal_select_delete_on_loop_ended(void* context_ptr, Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp, Eterm* ret)
+{
+ mtraversal_select_delete_context_t* sd_context_ptr = (mtraversal_select_delete_context_t*) context_ptr;
+ ASSERT(iterations_left <= MAX_SELECT_DELETE_ITERATIONS);
+ BUMP_REDS(sd_context_ptr->p, MAX_SELECT_DELETE_ITERATIONS - iterations_left);
+ if (got) {
+ try_shrink(sd_context_ptr->tb);
}
- else {
- hp = HAlloc(p, BIG_UINT_HEAP_SIZE + 5);
- egot = uint_to_big(got, hp);
- hp += BIG_UINT_HEAP_SIZE;
+ *ret = erts_make_integer(got, sd_context_ptr->p);
+ return DB_ERROR_NONE;
+}
+
+static int mtraversal_select_delete_on_trap(void* context_ptr, Sint slot_ix, Sint got,
+ Binary** mpp, Eterm* ret)
+{
+ mtraversal_select_delete_context_t* sd_context_ptr = (mtraversal_select_delete_context_t*) context_ptr;
+ return on_mtraversal_simple_trap(
+ &ets_select_delete_continue_exp,
+ sd_context_ptr->p,
+ sd_context_ptr->tb,
+ sd_context_ptr->tid,
+ sd_context_ptr->prev_continuation_tptr,
+ slot_ix, got, mpp, ret);
+}
+
+static int db_select_delete_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Eterm *ret) {
+ mtraversal_select_delete_context_t sd_context = {0};
+ Sint chunk_size = 0;
+
+ sd_context.p = p;
+ sd_context.tb = &tbl->hash;
+ sd_context.tid = tid;
+ sd_context.hp = NULL;
+ sd_context.prev_continuation_tptr = NULL;
+ sd_context.fixated_by_me = sd_context.tb->common.is_thread_safe ? 0 : 1; /* TODO: something nicer */
+ sd_context.last_pseudo_delete = (Uint) -1;
+
+ return match_traverse(
+ sd_context.p, sd_context.tb,
+ pattern, NULL,
+ chunk_size,
+ MAX_SELECT_DELETE_ITERATIONS, NULL, 1,
+ mtraversal_select_delete_on_nothing_can_match,
+ mtraversal_select_delete_on_match_res,
+ mtraversal_select_delete_on_loop_ended,
+ mtraversal_select_delete_on_trap,
+ &sd_context, ret);
+}
+
+/*
+ * This is called when select_delete traps
+ */
+static int db_select_delete_continue_hash(Process* p, DbTable* tbl, Eterm continuation, Eterm* ret) {
+ mtraversal_select_delete_context_t sd_context = {0};
+ Eterm* tptr;
+ Eterm tid;
+ Binary* mp;
+ Sint got;
+ Sint slot_ix;
+ Sint chunk_size = 0;
+
+ if (unpack_simple_mtraversal_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) {
+ *ret = NIL;
+ return DB_ERROR_BADPARAM;
+ }
+
+ sd_context.p = p;
+ sd_context.tb = &tbl->hash;
+ sd_context.tid = tid;
+ sd_context.hp = NULL;
+ sd_context.prev_continuation_tptr = tptr;
+ sd_context.fixated_by_me = ONLY_WRITER(p, sd_context.tb) ? 0 : 1; /* TODO: something nicer */
+ sd_context.last_pseudo_delete = (Uint) -1;
+
+ return match_traverse_continue(
+ sd_context.p, sd_context.tb, chunk_size,
+ MAX_SELECT_DELETE_ITERATIONS,
+ NULL, slot_ix, got, &mp, 1,
+ mtraversal_select_delete_on_match_res, /* Reuse callback */
+ mtraversal_select_delete_on_loop_ended, /* Reuse callback */
+ mtraversal_select_delete_on_trap, /* Reuse callback */
+ &sd_context, ret);
+}
+
+#undef MAX_SELECT_DELETE_ITERATIONS
+
+
+/*
+ *
+ * select_replace match traversal
+ *
+ */
+
+#define MAX_SELECT_REPLACE_ITERATIONS 1000
+
+typedef struct {
+ Process* p;
+ DbTableHash* tb;
+ Eterm tid;
+ Eterm* hp;
+ Eterm* prev_continuation_tptr;
+} mtraversal_select_replace_context_t;
+
+static int mtraversal_select_replace_on_nothing_can_match(void* context_ptr, Eterm* ret) {
+ *ret = make_small(0);
+ return DB_ERROR_NONE;
+}
+
+static int mtraversal_select_replace_on_match_res(void* context_ptr, Sint slot_ix,
+ HashDbTerm*** current_ptr_ptr,
+ Eterm match_res)
+{
+ mtraversal_select_replace_context_t* sr_context_ptr = (mtraversal_select_replace_context_t*) context_ptr;
+ DbTableHash* tb = sr_context_ptr->tb;
+ HashDbTerm* new;
+ HashDbTerm* next;
+ HashValue hval;
+
+ if (is_value(match_res)) {
+#ifdef DEBUG
+ Eterm key = db_getkey(tb->common.keypos, match_res);
+ ASSERT(is_value(key));
+ ASSERT(eq(key, GETKEY(tb, (**current_ptr_ptr)->dbterm.tpl)));
+#endif
+ next = (**current_ptr_ptr)->next;
+ hval = (**current_ptr_ptr)->hvalue;
+ new = new_dbterm(tb, match_res);
+ new->next = next;
+ new->hvalue = hval;
+ free_term(tb, **current_ptr_ptr);
+ **current_ptr_ptr = new; /* replace 'next' pointer in previous object */
+ *current_ptr_ptr = &((**current_ptr_ptr)->next); /* advance to next object */
+ return 1;
}
- continuation = TUPLE4(hp, tb->common.id, make_small(slot_ix),
- tptr[3],
- egot);
- RET_TO_BIF(bif_trap1(&ets_select_delete_continue_exp, p,
- continuation),
- DB_ERROR_NONE);
+ return 0;
+}
-#undef RET_TO_BIF
+static int mtraversal_select_replace_on_loop_ended(void* context_ptr, Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp, Eterm* ret)
+{
+ mtraversal_select_replace_context_t* sr_context_ptr = (mtraversal_select_replace_context_t*) context_ptr;
+ ASSERT(iterations_left <= MAX_SELECT_REPLACE_ITERATIONS);
+ /* the more objects we've replaced, the more reductions we've consumed */
+ BUMP_REDS(sr_context_ptr->p,
+ MIN(MAX_SELECT_REPLACE_ITERATIONS * 2,
+ (MAX_SELECT_REPLACE_ITERATIONS - iterations_left) + (int)got));
+ *ret = erts_make_integer(got, sr_context_ptr->p);
+ return DB_ERROR_NONE;
+}
+static int mtraversal_select_replace_on_trap(void* context_ptr, Sint slot_ix, Sint got,
+ Binary** mpp, Eterm* ret)
+{
+ mtraversal_select_replace_context_t* sr_context_ptr = (mtraversal_select_replace_context_t*) context_ptr;
+ return on_mtraversal_simple_trap(
+ &ets_select_replace_continue_exp,
+ sr_context_ptr->p,
+ sr_context_ptr->tb,
+ sr_context_ptr->tid,
+ sr_context_ptr->prev_continuation_tptr,
+ slot_ix, got, mpp, ret);
}
-
+
+static int db_select_replace_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Eterm *ret)
+{
+ mtraversal_select_replace_context_t sr_context = {0};
+ Sint chunk_size = 0;
+
+ /* Bag implementation presented both semantic consistency and performance issues,
+ * unsupported for now
+ */
+ ASSERT(!(tbl->hash.common.status & DB_BAG));
+
+ sr_context.p = p;
+ sr_context.tb = &tbl->hash;
+ sr_context.tid = tid;
+ sr_context.hp = NULL;
+ sr_context.prev_continuation_tptr = NULL;
+
+ return match_traverse(
+ sr_context.p, sr_context.tb,
+ pattern, db_match_keeps_key,
+ chunk_size,
+ MAX_SELECT_REPLACE_ITERATIONS, NULL, 1,
+ mtraversal_select_replace_on_nothing_can_match,
+ mtraversal_select_replace_on_match_res,
+ mtraversal_select_replace_on_loop_ended,
+ mtraversal_select_replace_on_trap,
+ &sr_context, ret);
+}
+
/*
-** This is called when select_count traps
-*/
-static int db_select_count_continue_hash(Process *p,
- DbTable *tbl,
- Eterm continuation,
- Eterm *ret)
+ * This is called when select_replace traps
+ */
+static int db_select_replace_continue_hash(Process* p, DbTable* tbl, Eterm continuation, Eterm* ret)
{
- DbTableHash *tb = &tbl->hash;
- Uint slot_ix;
- HashDbTerm* current;
- Eterm *hp;
- int num_left = 1000;
- Uint got;
- Eterm *tptr;
- Binary *mp;
- Eterm egot;
- erts_smp_rwmtx_t* lck;
+ mtraversal_select_replace_context_t sr_context = {0};
+ Eterm* tptr;
+ Eterm tid ;
+ Binary* mp;
+ Sint got;
+ Sint slot_ix;
+ Sint chunk_size = 0;
+ *ret = NIL;
-#define RET_TO_BIF(Term,RetVal) do { \
- *ret = (Term); \
- return RetVal; \
- } while(0)
+ if (unpack_simple_mtraversal_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) {
+ *ret = NIL;
+ return DB_ERROR_BADPARAM;
+ }
+
+ /* Proceed */
+ sr_context.p = p;
+ sr_context.tb = &tbl->hash;
+ sr_context.tid = tid;
+ sr_context.hp = NULL;
+ sr_context.prev_continuation_tptr = tptr;
+
+ return match_traverse_continue(
+ sr_context.p, sr_context.tb, chunk_size,
+ MAX_SELECT_REPLACE_ITERATIONS,
+ NULL, slot_ix, got, &mp, 1,
+ mtraversal_select_replace_on_match_res, /* Reuse callback */
+ mtraversal_select_replace_on_loop_ended, /* Reuse callback */
+ mtraversal_select_replace_on_trap, /* Reuse callback */
+ &sr_context, ret);
+}
-
- tptr = tuple_val(continuation);
- slot_ix = unsigned_val(tptr[2]);
- mp = ((ProcBin *) binary_val(tptr[3]))->val;
- if (is_big(tptr[4])) {
- got = big_to_uint32(tptr[4]);
- } else {
- got = unsigned_val(tptr[4]);
- }
-
- lck = RLOCK_HASH(tb, slot_ix);
- if (slot_ix >= NACTIVE(tb)) { /* Is this posible? */
- RUNLOCK_HASH(lck);
- goto done;
- }
- current = BUCKET(tb,slot_ix);
-
- for(;;) {
- if (current != NULL) {
- if (current->hvalue == INVALID_HASH) {
- current = current->next;
- continue;
- }
- if (db_match_dbterm(&tb->common, p, mp, 0, &current->dbterm,
- NULL, 0) == am_true) {
- ++got;
- }
- --num_left;
- current = current->next;
- }
- else { /* next bucket */
- if ((slot_ix = next_slot(tb,slot_ix,&lck)) == 0) {
- goto done;
- }
- if (num_left <= 0) {
- RUNLOCK_HASH(lck);
- goto trap;
- }
- current = BUCKET(tb,slot_ix);
- }
- }
-done:
- BUMP_REDS(p, 1000 - num_left);
- RET_TO_BIF(erts_make_integer(got,p),DB_ERROR_NONE);
-trap:
- BUMP_ALL_REDS(p);
- if (IS_USMALL(0, got)) {
- hp = HAlloc(p, 5);
- egot = make_small(got);
+static int db_take_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableHash *tb = &tbl->hash;
+ HashDbTerm **bp, *b;
+ HashValue hval = MAKE_HASH(key);
+ erts_rwmtx_t *lck = WLOCK_HASH(tb, hval);
+ int ix = hash_to_ix(tb, hval);
+ int nitems_diff = 0;
+
+ *ret = NIL;
+ for (bp = &BUCKET(tb, ix), b = *bp; b; bp = &b->next, b = b->next) {
+ if (has_live_key(tb, b, key, hval)) {
+ HashDbTerm *bend;
+
+ *ret = get_term_list(p, tb, key, hval, b, &bend);
+ while (b != bend) {
+ --nitems_diff;
+ if (nitems_diff == -1 && IS_FIXED(tb)
+ && add_fixed_deletion(tb, ix, 0)) {
+ /* Pseudo remove (no need to keep several of same key) */
+ bp = &b->next;
+ b->hvalue = INVALID_HASH;
+ b = b->next;
+ } else {
+ *bp = b->next;
+ free_term(tb, b);
+ b = *bp;
+ }
+ }
+ break;
+ }
}
- else {
- hp = HAlloc(p, BIG_UINT_HEAP_SIZE + 5);
- egot = uint_to_big(got, hp);
- hp += BIG_UINT_HEAP_SIZE;
+ WUNLOCK_HASH(lck);
+ if (nitems_diff) {
+ erts_atomic_add_nob(&tb->common.nitems, nitems_diff);
+ try_shrink(tb);
}
- continuation = TUPLE4(hp, tb->common.id, make_small(slot_ix),
- tptr[3],
- egot);
- RET_TO_BIF(bif_trap1(&ets_select_count_continue_exp, p,
- continuation),
- DB_ERROR_NONE);
+ return DB_ERROR_NONE;
+}
-#undef RET_TO_BIF
-}
-
/*
** Other interface routines (not directly coupled to one bif)
*/
@@ -2084,30 +2257,54 @@ int db_mark_all_deleted_hash(DbTable *tbl)
HashDbTerm* list;
int i;
- ERTS_SMP_LC_ASSERT(IS_TAB_WLOCKED(tb));
+ ERTS_LC_ASSERT(IS_TAB_WLOCKED(tb));
for (i = 0; i < NACTIVE(tb); i++) {
if ((list = BUCKET(tb,i)) != NULL) {
- add_fixed_deletion(tb, i);
+ add_fixed_deletion(tb, i, 0);
do {
list->hvalue = INVALID_HASH;
list = list->next;
}while(list != NULL);
}
}
- erts_smp_atomic_set_nob(&tb->common.nitems, 0);
+ erts_atomic_set_nob(&tb->common.nitems, 0);
return DB_ERROR_NONE;
}
/* Display hash table contents (for dump) */
-static void db_print_hash(int to, void *to_arg, int show, DbTable *tbl)
+static void db_print_hash(fmtfn_t to, void *to_arg, int show, DbTable *tbl)
{
DbTableHash *tb = &tbl->hash;
+ DbHashStats stats;
int i;
erts_print(to, to_arg, "Buckets: %d\n", NACTIVE(tb));
-
+
+ i = tbl->common.is_thread_safe;
+ /* If crash dumping we set table to thread safe in order to
+ avoid taking any locks */
+ if (ERTS_IS_CRASH_DUMPING)
+ tbl->common.is_thread_safe = 1;
+
+ db_calc_stats_hash(&tbl->hash, &stats);
+
+ tbl->common.is_thread_safe = i;
+
+ erts_print(to, to_arg, "Chain Length Avg: %f\n", stats.avg_chain_len);
+ erts_print(to, to_arg, "Chain Length Max: %d\n", stats.max_chain_len);
+ erts_print(to, to_arg, "Chain Length Min: %d\n", stats.min_chain_len);
+ erts_print(to, to_arg, "Chain Length Std Dev: %f\n",
+ stats.std_dev_chain_len);
+ erts_print(to, to_arg, "Chain Length Expected Std Dev: %f\n",
+ stats.std_dev_expected);
+
+ if (IS_FIXED(tb))
+ erts_print(to, to_arg, "Fixed: %d\n", stats.kept_items);
+ else
+ erts_print(to, to_arg, "Fixed: false\n");
+
if (show) {
for (i = 0; i < NACTIVE(tb); i++) {
HashDbTerm* list = BUCKET(tb,i);
@@ -2119,11 +2316,11 @@ static void db_print_hash(int to, void *to_arg, int show, DbTable *tbl)
erts_print(to, to_arg, "*");
if (tb->common.compress) {
Eterm key = GETKEY(tb, list->dbterm.tpl);
- erts_print(to, to_arg, "key=%R", key, list->dbterm.tpl);
+ erts_print(to, to_arg, "key=%T", key);
}
else {
- Eterm obj = make_tuple_rel(list->dbterm.tpl,list->dbterm.tpl);
- erts_print(to, to_arg, "%R", obj, list->dbterm.tpl);
+ Eterm obj = make_tuple(list->dbterm.tpl);
+ erts_print(to, to_arg, "%T", obj);
}
if (list->next != 0)
erts_print(to, to_arg, ",");
@@ -2137,19 +2334,17 @@ static void db_print_hash(int to, void *to_arg, int show, DbTable *tbl)
/* release all memory occupied by a single table */
static int db_free_table_hash(DbTable *tbl)
{
- while (!db_free_table_continue_hash(tbl))
+ while (db_free_table_continue_hash(tbl, ERTS_SWORD_MAX) < 0)
;
return 0;
}
-static int db_free_table_continue_hash(DbTable *tbl)
+static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds)
{
DbTableHash *tb = &tbl->hash;
- int done;
- FixedDeletion* fixdel = (FixedDeletion*) erts_smp_atomic_read_acqb(&tb->fixdel);
- ERTS_SMP_LC_ASSERT(IS_TAB_WLOCKED(tb));
+ FixedDeletion* fixdel = (FixedDeletion*) erts_atomic_read_acqb(&tb->fixdel);
+ ERTS_LC_ASSERT(IS_TAB_WLOCKED(tb) || (tb->common.status & DB_DELETE));
- done = 0;
while (fixdel != NULL) {
FixedDeletion *fx = fixdel;
@@ -2159,25 +2354,23 @@ static int db_free_table_continue_hash(DbTable *tbl)
(void *) fx,
sizeof(FixedDeletion));
ERTS_ETS_MISC_MEM_ADD(-sizeof(FixedDeletion));
- if (++done >= 2*DELETE_RECORD_LIMIT) {
- erts_smp_atomic_set_relb(&tb->fixdel, (erts_aint_t)fixdel);
- return 0; /* Not done */
+ if (--reds < 0) {
+ erts_atomic_set_relb(&tb->fixdel, (erts_aint_t)fixdel);
+ return reds; /* Not done */
}
}
- erts_smp_atomic_set_relb(&tb->fixdel, (erts_aint_t)NULL);
+ erts_atomic_set_relb(&tb->fixdel, (erts_aint_t)NULL);
- done /= 2;
while(tb->nslots != 0) {
- free_seg(tb, 1);
+ reds -= EXT_SEGSZ/64 + free_seg(tb, 1);
/*
* If we have done enough work, get out here.
*/
- if (++done >= (DELETE_RECORD_LIMIT / CHAIN_LEN / SEGSZ)) {
- return 0; /* Not done */
+ if (reds < 0) {
+ return reds; /* Not done */
}
}
-#ifdef ERTS_SMP
if (tb->locks != NULL) {
int i;
for (i=0; i<DB_HASH_LOCK_CNT; ++i) {
@@ -2187,9 +2380,8 @@ static int db_free_table_continue_hash(DbTable *tbl)
(void*)tb->locks, sizeof(DbTableHashFineLocks));
tb->locks = NULL;
}
-#endif
- ASSERT(erts_smp_atomic_read_nob(&tb->common.memory_size) == sizeof(DbTable));
- return 1; /* Done */
+ ASSERT(erts_atomic_read_nob(&tb->common.memory_size) == sizeof(DbTable));
+ return reds; /* Done */
}
@@ -2202,7 +2394,8 @@ static int db_free_table_continue_hash(DbTable *tbl)
** slots should be searched. Also compiles the match program
*/
static int analyze_pattern(DbTableHash *tb, Eterm pattern,
- struct mp_info *mpi)
+ extra_match_validator_t extra_validator, /* Optional callback */
+ struct mp_info *mpi)
{
Eterm *ptpl;
Eterm lst, tpl, ttpl;
@@ -2240,7 +2433,10 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern,
i = 0;
for(lst = pattern; is_list(lst); lst = CDR(list_val(lst))) {
- Eterm body;
+ Eterm match;
+ Eterm guard;
+ Eterm body;
+
ttpl = CAR(list_val(lst));
if (!is_tuple(ttpl)) {
if (buff != sbuff) {
@@ -2255,9 +2451,17 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern,
}
return DB_ERROR_BADPARAM;
}
- matches[i] = tpl = ptpl[1];
- guards[i] = ptpl[2];
+ matches[i] = match = tpl = ptpl[1];
+ guards[i] = guard = ptpl[2];
bodies[i] = body = ptpl[3];
+
+ if(extra_validator != NULL && !extra_validator(tb->common.keypos, match, guard, body)) {
+ if (buff != sbuff) {
+ erts_free(ERTS_ALC_T_DB_TMP, buff);
+ }
+ return DB_ERROR_BADPARAM;
+ }
+
if (!is_list(body) || CDR(list_val(body)) != NIL ||
CAR(list_val(body)) != am_DollarUnderscore) {
mpi->all_objects = 0;
@@ -2275,7 +2479,7 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern,
if (!db_has_variable(key)) { /* Bound key */
int ix, search_slot;
HashDbTerm** bp;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
hval = MAKE_HASH(key);
lck = RLOCK_HASH(tb,hval);
ix = hash_to_ix(tb, hval);
@@ -2332,69 +2536,58 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern,
return DB_ERROR_NONE;
}
-static struct ext_segment* alloc_ext_seg(DbTableHash* tb, unsigned seg_ix,
- struct segment** old_segtab)
+static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix)
{
- int nsegs;
- struct ext_segment* eseg;
+ struct segment** old_segtab = SEGTAB(tb);
+ int nsegs = 0;
+ struct ext_segtab* est;
+ ASSERT(seg_ix >= NSEG_1);
switch (seg_ix) {
- case 0: nsegs = NSEG_1; break;
- case 1: nsegs = NSEG_2; break;
- default: nsegs = seg_ix + NSEG_INC; break;
- }
- eseg = (struct ext_segment*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG,
- (DbTable *) tb,
- SIZEOF_EXTSEG(nsegs));
- ASSERT(eseg != NULL);
- sys_memset(&eseg->s, 0, sizeof(struct segment));
- IF_DEBUG(eseg->s.is_ext_segment = 1);
- eseg->prev_segtab = old_segtab;
- eseg->nsegs = nsegs;
- if (old_segtab) {
- ASSERT(nsegs > tb->nsegs);
- sys_memcpy(eseg->segtab, old_segtab, tb->nsegs*sizeof(struct segment*));
- }
+ case NSEG_1: nsegs = NSEG_2; break;
+ default: nsegs = seg_ix + NSEG_INC; break;
+ }
+ ASSERT(nsegs > tb->nsegs);
+ est = (struct ext_segtab*) erts_db_alloc(ERTS_ALC_T_DB_SEG,
+ (DbTable *) tb,
+ SIZEOF_EXT_SEGTAB(nsegs));
+ est->nsegs = nsegs;
+ est->prev_segtab = old_segtab;
+ est->prev_nsegs = tb->nsegs;
+ sys_memcpy(est->segtab, old_segtab, tb->nsegs*sizeof(struct segment*));
#ifdef DEBUG
- sys_memset(&eseg->segtab[seg_ix], 0, (nsegs-seg_ix)*sizeof(struct segment*));
+ sys_memset(&est->segtab[seg_ix], 0, (nsegs-seg_ix)*sizeof(struct segment*));
#endif
- eseg->segtab[seg_ix] = &eseg->s;
- return eseg;
+ return est;
}
/* Extend table with one new segment
*/
-static int alloc_seg(DbTableHash *tb)
+static void alloc_seg(DbTableHash *tb)
{
- int seg_ix = tb->nslots >> SEGSZ_EXP;
-
- if (seg_ix+1 == tb->nsegs) { /* New segtab needed (extended segment) */
- struct segment** segtab = SEGTAB(tb);
- struct ext_segment* seg = alloc_ext_seg(tb, seg_ix, segtab);
- if (seg == NULL) return 0;
- segtab[seg_ix] = &seg->s;
- /* We don't use the new segtab until next call (see "shrink race") */
- }
- else { /* Just a new plain segment */
- struct segment** segtab;
- if (seg_ix == tb->nsegs) { /* Time to start use segtab from last call */
- struct ext_segment* eseg;
- eseg = (struct ext_segment*) SEGTAB(tb)[seg_ix-1];
- MY_ASSERT(eseg!=NULL && eseg->s.is_ext_segment);
- SET_SEGTAB(tb, eseg->segtab);
- tb->nsegs = eseg->nsegs;
- }
- ASSERT(seg_ix < tb->nsegs);
- segtab = SEGTAB(tb);
- ASSERT(segtab[seg_ix] == NULL);
- segtab[seg_ix] = (struct segment*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG,
- (DbTable *) tb,
- sizeof(struct segment));
- if (segtab[seg_ix] == NULL) return 0;
- sys_memset(segtab[seg_ix], 0, sizeof(struct segment));
- }
- tb->nslots += SEGSZ;
- return 1;
+ int seg_ix = SLOT_IX_TO_SEG_IX(tb->nslots);
+ struct segment** segtab;
+
+ ASSERT(seg_ix > 0);
+ if (seg_ix == tb->nsegs) { /* New segtab needed */
+ struct ext_segtab* est = alloc_ext_segtab(tb, seg_ix);
+ SET_SEGTAB(tb, est->segtab);
+ tb->nsegs = est->nsegs;
+ }
+ ASSERT(seg_ix < tb->nsegs);
+ segtab = SEGTAB(tb);
+ segtab[seg_ix] = (struct segment*) erts_db_alloc(ERTS_ALC_T_DB_SEG,
+ (DbTable *) tb,
+ SIZEOF_SEGMENT(EXT_SEGSZ));
+ sys_memset(segtab[seg_ix], 0, SIZEOF_SEGMENT(EXT_SEGSZ));
+ tb->nslots += EXT_SEGSZ;
+}
+
+static void dealloc_ext_segtab(void* lop_data)
+{
+ struct ext_segtab* est = (struct ext_segtab*) lop_data;
+
+ erts_free(ERTS_ALC_T_DB_SEG, est);
}
/* Shrink table by freeing the top segment
@@ -2402,20 +2595,20 @@ static int alloc_seg(DbTableHash *tb)
*/
static int free_seg(DbTableHash *tb, int free_records)
{
- int seg_ix = (tb->nslots >> SEGSZ_EXP) - 1;
- int bytes;
- struct segment** segtab = SEGTAB(tb);
- struct ext_segment* top = (struct ext_segment*) segtab[seg_ix];
+ const int seg_ix = SLOT_IX_TO_SEG_IX(tb->nslots) - 1;
+ struct segment** const segtab = SEGTAB(tb);
+ struct segment* const segp = segtab[seg_ix];
+ Uint seg_sz;
int nrecords = 0;
- ASSERT(top != NULL);
+ ASSERT(segp != NULL);
#ifndef DEBUG
if (free_records)
#endif
{
- int i;
- for (i=0; i<SEGSZ; ++i) {
- HashDbTerm* p = top->s.buckets[i];
+ int i = (seg_ix == 0) ? FIRST_SEGSZ : EXT_SEGSZ;
+ while (i--) {
+ HashDbTerm* p = segp->buckets[i];
while(p != 0) {
HashDbTerm* nxt = p->next;
ASSERT(free_records); /* segment not empty as assumed? */
@@ -2425,55 +2618,44 @@ static int free_seg(DbTableHash *tb, int free_records)
}
}
}
-
- /* The "shrink race":
- * We must avoid deallocating an extended segment while its segtab may
- * still be used by other threads.
- * The trick is to stop use a segtab one call earlier. That is, stop use
- * a segtab when the segment above it is deallocated. When the segtab is
- * later deallocated, it has not been used for a very long time.
- * It is even theoretically safe as we have by then rehashed the entire
- * segment, seizing *all* locks, so there cannot exist any retarded threads
- * still hanging in BUCKET macro with an old segtab pointer.
- * For this to work, we must of course allocate a new segtab one call
- * earlier in alloc_seg() as well. And this is also the reason why
- * the minimum size of the first segtab is 2 and not 1 (NSEG_1).
- */
- if (seg_ix == tb->nsegs-1 || seg_ix==0) { /* Dealloc extended segment */
- MY_ASSERT(top->s.is_ext_segment);
- ASSERT(segtab != top->segtab || seg_ix==0);
- bytes = SIZEOF_EXTSEG(top->nsegs);
- }
- else { /* Dealloc plain segment */
- struct ext_segment* newtop = (struct ext_segment*) segtab[seg_ix-1];
- MY_ASSERT(!top->s.is_ext_segment);
-
- if (segtab == newtop->segtab) { /* New top segment is extended */
- MY_ASSERT(newtop->s.is_ext_segment);
- if (newtop->prev_segtab != NULL) {
- /* Time to use a smaller segtab */
- SET_SEGTAB(tb, newtop->prev_segtab);
- tb->nsegs = seg_ix;
- ASSERT(tb->nsegs == EXTSEG(SEGTAB(tb))->nsegs);
- }
- else {
- ASSERT(NSEG_1 > 2 && seg_ix==1);
- }
- }
- bytes = sizeof(struct segment);
+ if (seg_ix >= NSEG_1) {
+ struct ext_segtab* est = ErtsContainerStruct_(segtab,struct ext_segtab,segtab);
+
+ if (seg_ix == est->prev_nsegs) { /* Dealloc extended segtab */
+ ASSERT(est->prev_segtab != NULL);
+ SET_SEGTAB(tb, est->prev_segtab);
+ tb->nsegs = est->prev_nsegs;
+
+ if (!tb->common.is_thread_safe) {
+ /*
+ * Table is doing a graceful shrink operation and we must avoid
+ * deallocating this segtab while it may still be read by other
+ * threads. Schedule deallocation with thread progress to make
+ * sure no lingering threads are still hanging in BUCKET macro
+ * with an old segtab pointer.
+ */
+ Uint sz = SIZEOF_EXT_SEGTAB(est->nsegs);
+ ASSERT(sz == ERTS_ALC_DBG_BLK_SZ(est));
+ ERTS_DB_ALC_MEM_UPDATE_(tb, sz, 0);
+ erts_schedule_thr_prgr_later_cleanup_op(dealloc_ext_segtab,
+ est,
+ &est->lop,
+ sz);
+ }
+ else
+ erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable*)tb, est,
+ SIZEOF_EXT_SEGTAB(est->nsegs));
+ }
}
+ seg_sz = (seg_ix == 0) ? FIRST_SEGSZ : EXT_SEGSZ;
+ erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable *)tb, segp, SIZEOF_SEGMENT(seg_sz));
- erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable *)tb,
- (void*)top, bytes);
#ifdef DEBUG
- if (seg_ix > 0) {
- if (seg_ix < tb->nsegs) SEGTAB(tb)[seg_ix] = NULL;
- } else {
- SET_SEGTAB(tb, NULL);
- }
+ if (seg_ix < tb->nsegs)
+ SEGTAB(tb)[seg_ix] = NULL;
#endif
- tb->nslots -= SEGSZ;
+ tb->nslots -= seg_sz;
ASSERT(tb->nslots >= 0);
return nrecords;
}
@@ -2483,23 +2665,23 @@ static int free_seg(DbTableHash *tb, int free_records)
** Copy terms from ptr1 until ptr2
** works for ptr1 == ptr2 == 0 => []
** or ptr2 == 0
+** sz is either precalculated heap size or 0 if not known
*/
static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2,
- DbTableHash* tb)
+ Uint sz, DbTableHash* tb)
{
- int sz = 0;
HashDbTerm* ptr;
Eterm list = NIL;
Eterm copy;
Eterm *hp, *hend;
- ptr = ptr1;
- while(ptr != ptr2) {
-
- if (ptr->hvalue != INVALID_HASH)
- sz += ptr->dbterm.size + 2;
-
- ptr = ptr->next;
+ if (!sz) {
+ ptr = ptr1;
+ while(ptr != ptr2) {
+ if (ptr->hvalue != INVALID_HASH)
+ sz += ptr->dbterm.size + 2;
+ ptr = ptr->next;
+ }
}
hp = HAlloc(p, sz);
@@ -2523,103 +2705,106 @@ static ERTS_INLINE int
begin_resizing(DbTableHash* tb)
{
if (DB_USING_FINE_LOCKING(tb))
- return !erts_smp_atomic_xchg_acqb(&tb->is_resizing, 1);
- else {
- if (erts_smp_atomic_read_nob(&tb->is_resizing))
- return 0;
- erts_smp_atomic_set_nob(&tb->is_resizing, 1);
- return 1;
- }
+ return !erts_atomic_xchg_acqb(&tb->is_resizing, 1);
+ else
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock));
+ return 1;
}
static ERTS_INLINE void
done_resizing(DbTableHash* tb)
{
if (DB_USING_FINE_LOCKING(tb))
- erts_smp_atomic_set_relb(&tb->is_resizing, 0);
- else
- erts_smp_atomic_set_nob(&tb->is_resizing, 0);
+ erts_atomic_set_relb(&tb->is_resizing, 0);
}
-/* Grow table with one new bucket.
+/* Grow table with one or more new buckets.
** Allocate new segment if needed.
*/
-static void grow(DbTableHash* tb, int nactive)
+static void grow(DbTableHash* tb, int nitems)
{
HashDbTerm** pnext;
HashDbTerm** to_pnext;
HashDbTerm* p;
- erts_smp_rwmtx_t* lck;
- int from_ix;
+ erts_rwmtx_t* lck;
+ int nactive;
+ int from_ix, to_ix;
int szm;
+ int loop_limit = 5;
- if (!begin_resizing(tb))
- return; /* already in progress */
- if (NACTIVE(tb) != nactive) {
- goto abort; /* already done (race) */
- }
-
- /* Ensure that the slot nactive exists */
- if (nactive == tb->nslots) {
- /* Time to get a new segment */
- ASSERT((nactive & SEGSZ_MASK) == 0);
- if (!alloc_seg(tb)) goto abort;
- }
- ASSERT(nactive < tb->nslots);
+ do {
+ if (!begin_resizing(tb))
+ return; /* already in progress */
+ nactive = NACTIVE(tb);
+ if (nitems <= GROW_LIMIT(nactive)) {
+ goto abort; /* already done (race) */
+ }
- szm = erts_smp_atomic_read_nob(&tb->szm);
- if (nactive <= szm) {
- from_ix = nactive & (szm >> 1);
- } else {
- ASSERT(nactive == szm+1);
- from_ix = 0;
- szm = (szm<<1) | 1;
- }
+ /* Ensure that the slot nactive exists */
+ if (nactive == tb->nslots) {
+ /* Time to get a new segment */
+ ASSERT(((nactive-FIRST_SEGSZ) & EXT_SEGSZ_MASK) == 0);
+ alloc_seg(tb);
+ }
+ ASSERT(nactive < tb->nslots);
+
+ szm = erts_atomic_read_nob(&tb->szm);
+ if (nactive <= szm) {
+ from_ix = nactive & (szm >> 1);
+ } else {
+ ASSERT(nactive == szm+1);
+ from_ix = 0;
+ szm = (szm<<1) | 1;
+ }
+ to_ix = nactive;
+
+ lck = WLOCK_HASH(tb, from_ix);
+ ERTS_ASSERT(lck == GET_LOCK_MAYBE(tb,to_ix));
+ /* Now a final double check (with the from_ix lock held)
+ * that we did not get raced by a table fixer.
+ */
+ if (IS_FIXED(tb)) {
+ WUNLOCK_HASH(lck);
+ goto abort;
+ }
+ erts_atomic_set_nob(&tb->nactive, ++nactive);
+ if (from_ix == 0) {
+ if (DB_USING_FINE_LOCKING(tb))
+ erts_atomic_set_relb(&tb->szm, szm);
+ else
+ erts_atomic_set_nob(&tb->szm, szm);
+ }
+ done_resizing(tb);
+
+ /* Finally, let's split the bucket. We try to do it in a smart way
+ to keep link order and avoid unnecessary updates of next-pointers */
+ pnext = &BUCKET(tb, from_ix);
+ p = *pnext;
+ to_pnext = &BUCKET(tb, to_ix);
+ while (p != NULL) {
+ if (p->hvalue == INVALID_HASH) { /* rare but possible with fine locking */
+ *pnext = p->next;
+ free_term(tb, p);
+ p = *pnext;
+ }
+ else {
+ int ix = p->hvalue & szm;
+ if (ix != from_ix) {
+ ASSERT(ix == (from_ix ^ ((szm+1)>>1)));
+ *to_pnext = p;
+ /* Swap "from" and "to": */
+ from_ix = ix;
+ to_pnext = pnext;
+ }
+ pnext = &p->next;
+ p = *pnext;
+ }
+ }
+ *to_pnext = NULL;
+ WUNLOCK_HASH(lck);
- lck = WLOCK_HASH(tb, from_ix);
- /* Now a final double check (with the from_ix lock held)
- * that we did not get raced by a table fixer.
- */
- if (IS_FIXED(tb)) {
- WUNLOCK_HASH(lck);
- goto abort;
- }
- erts_smp_atomic_inc_nob(&tb->nactive);
- if (from_ix == 0) {
- if (DB_USING_FINE_LOCKING(tb))
- erts_smp_atomic_set_relb(&tb->szm, szm);
- else
- erts_smp_atomic_set_nob(&tb->szm, szm);
- }
- done_resizing(tb);
+ }while (--loop_limit && nitems > GROW_LIMIT(nactive));
- /* Finally, let's split the bucket. We try to do it in a smart way
- to keep link order and avoid unnecessary updates of next-pointers */
- pnext = &BUCKET(tb, from_ix);
- p = *pnext;
- to_pnext = &BUCKET(tb, nactive);
- while (p != NULL) {
- if (p->hvalue == INVALID_HASH) { /* rare but possible with fine locking */
- *pnext = p->next;
- free_term(tb, p);
- p = *pnext;
- }
- else {
- int ix = p->hvalue & szm;
- if (ix != from_ix) {
- ASSERT(ix == (from_ix ^ ((szm+1)>>1)));
- *to_pnext = p;
- /* Swap "from" and "to": */
- from_ix = ix;
- to_pnext = pnext;
- }
- pnext = &p->next;
- p = *pnext;
- }
- }
- *to_pnext = NULL;
-
- WUNLOCK_HASH(lck);
return;
abort:
@@ -2630,60 +2815,78 @@ abort:
/* Shrink table by joining top bucket.
** Remove top segment if it gets empty.
*/
-static void shrink(DbTableHash* tb, int nactive)
-{
- if (!begin_resizing(tb))
- return; /* already in progress */
- if (NACTIVE(tb) == nactive) {
- erts_smp_rwmtx_t* lck;
- int src_ix = nactive - 1;
- int low_szm = erts_smp_atomic_read_nob(&tb->szm) >> 1;
- int dst_ix = src_ix & low_szm;
-
- ASSERT(dst_ix < src_ix);
- ASSERT(nactive > SEGSZ);
- lck = WLOCK_HASH(tb, dst_ix);
- /* Double check for racing table fixers */
- if (!IS_FIXED(tb)) {
- HashDbTerm** src_bp = &BUCKET(tb, src_ix);
- HashDbTerm** dst_bp = &BUCKET(tb, dst_ix);
- HashDbTerm** bp = src_bp;
-
- /* Q: Why join lists by appending "dst" at the end of "src"?
- A: Must step through "src" anyway to purge pseudo deleted. */
- while(*bp != NULL) {
- if ((*bp)->hvalue == INVALID_HASH) {
- HashDbTerm* deleted = *bp;
- *bp = deleted->next;
- free_term(tb, deleted);
- } else {
- bp = &(*bp)->next;
- }
- }
- *bp = *dst_bp;
- *dst_bp = *src_bp;
- *src_bp = NULL;
-
- erts_smp_atomic_set_nob(&tb->nactive, src_ix);
- if (dst_ix == 0) {
- erts_smp_atomic_set_relb(&tb->szm, low_szm);
- }
- WUNLOCK_HASH(lck);
-
- if (tb->nslots - src_ix >= SEGSZ) {
- free_seg(tb, 0);
- }
- }
- else {
- WUNLOCK_HASH(lck);
- }
+static void shrink(DbTableHash* tb, int nitems)
+{
+ HashDbTerm** src_bp;
+ HashDbTerm** dst_bp;
+ HashDbTerm** bp;
+ erts_rwmtx_t* lck;
+ int src_ix, dst_ix, low_szm;
+ int nactive;
+ int loop_limit = 5;
- }
- /*else already done */
+ do {
+ if (!begin_resizing(tb))
+ return; /* already in progress */
+ nactive = NACTIVE(tb);
+ if (!(nactive > FIRST_SEGSZ && nitems < SHRINK_LIMIT(nactive))) {
+ goto abort; /* already done (race) */
+ }
+ src_ix = nactive - 1;
+ low_szm = erts_atomic_read_nob(&tb->szm) >> 1;
+ dst_ix = src_ix & low_szm;
+
+ ASSERT(dst_ix < src_ix);
+ ASSERT(nactive > FIRST_SEGSZ);
+ lck = WLOCK_HASH(tb, dst_ix);
+ ERTS_ASSERT(lck == GET_LOCK_MAYBE(tb,src_ix));
+ /* Double check for racing table fixers */
+ if (IS_FIXED(tb)) {
+ WUNLOCK_HASH(lck);
+ goto abort;
+ }
+
+ src_bp = &BUCKET(tb, src_ix);
+ dst_bp = &BUCKET(tb, dst_ix);
+ bp = src_bp;
+
+ /*
+ * We join lists by appending "dst" at the end of "src"
+ * as we must step through "src" anyway to purge pseudo deleted.
+ */
+ while(*bp != NULL) {
+ if ((*bp)->hvalue == INVALID_HASH) {
+ HashDbTerm* deleted = *bp;
+ *bp = deleted->next;
+ free_term(tb, deleted);
+ } else {
+ bp = &(*bp)->next;
+ }
+ }
+ *bp = *dst_bp;
+ *dst_bp = *src_bp;
+ *src_bp = NULL;
+
+ nactive = src_ix;
+ erts_atomic_set_nob(&tb->nactive, nactive);
+ if (dst_ix == 0) {
+ erts_atomic_set_relb(&tb->szm, low_szm);
+ }
+ WUNLOCK_HASH(lck);
+
+ if (tb->nslots - src_ix >= EXT_SEGSZ) {
+ free_seg(tb, 0);
+ }
+ done_resizing(tb);
+
+ } while (--loop_limit
+ && nactive > FIRST_SEGSZ && nitems < SHRINK_LIMIT(nactive));
+ return;
+
+abort:
done_resizing(tb);
}
-
/* Search a list of tuples for a matching key */
static HashDbTerm* search_list(DbTableHash* tb, Eterm key,
@@ -2702,12 +2905,12 @@ static HashDbTerm* search_list(DbTableHash* tb, Eterm key,
/* It return the next live object in a table, NULL if no more */
/* In-bucket: RLOCKED */
/* Out-bucket: RLOCKED unless NULL */
-static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_smp_rwmtx_t** lck_ptr,
+static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_rwmtx_t** lck_ptr,
HashDbTerm *list)
{
int i;
- ERTS_SMP_LC_ASSERT(IS_HASH_RLOCKED(tb,*iptr));
+ ERTS_LC_ASSERT(IS_HASH_RLOCKED(tb,*iptr));
for (list = list->next; list != NULL; list = list->next) {
if (list->hvalue != INVALID_HASH)
@@ -2730,63 +2933,136 @@ static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_smp_rwmtx_t** lck_ptr,
return NULL;
}
-static int db_lookup_dbterm_hash(DbTable *tbl, Eterm key, DbUpdateHandle* handle)
+static int
+db_lookup_dbterm_hash(Process *p, DbTable *tbl, Eterm key, Eterm obj,
+ DbUpdateHandle* handle)
{
DbTableHash *tb = &tbl->hash;
- HashDbTerm* b;
- HashDbTerm** prevp;
- int ix;
HashValue hval;
- erts_smp_rwmtx_t* lck;
+ HashDbTerm **bp, *b;
+ erts_rwmtx_t* lck;
+ int flags = 0;
+
+ ASSERT(tb->common.status & DB_SET);
hval = MAKE_HASH(key);
- lck = WLOCK_HASH(tb,hval);
- ix = hash_to_ix(tb, hval);
- prevp = &BUCKET(tb, ix);
- b = *prevp;
+ lck = WLOCK_HASH(tb, hval);
+ bp = &BUCKET(tb, hash_to_ix(tb, hval));
+ b = *bp;
- while (b != 0) {
- if (has_live_key(tb,b,key,hval)) {
- handle->tb = tbl;
- handle->bp = (void**) prevp;
- handle->dbterm = &b->dbterm;
- handle->mustResize = 0;
- handle->new_size = b->dbterm.size;
- #if HALFWORD_HEAP
- handle->abs_vec = NULL;
- #endif
- handle->lck = lck;
- /* KEEP hval WLOCKED, db_finalize_dbterm_hash will WUNLOCK */
- return 1;
- }
- prevp = &b->next;
- b = *prevp;
+ for (;;) {
+ if (b == NULL) {
+ break;
+ }
+ if (has_key(tb, b, key, hval)) {
+ if (b->hvalue != INVALID_HASH) {
+ goto Ldone;
+ }
+ break;
+ }
+ bp = &b->next;
+ b = *bp;
}
- WUNLOCK_HASH(lck);
- return 0;
+
+ if (obj == THE_NON_VALUE) {
+ WUNLOCK_HASH(lck);
+ return 0;
+ }
+
+ {
+ Eterm *objp = tuple_val(obj);
+ int arity = arityval(*objp);
+ Eterm *htop, *hend;
+
+ ASSERT(arity >= tb->common.keypos);
+ htop = HAlloc(p, arity + 1);
+ hend = htop + arity + 1;
+ sys_memcpy(htop, objp, sizeof(Eterm) * (arity + 1));
+ htop[tb->common.keypos] = key;
+ obj = make_tuple(htop);
+
+ if (b == NULL) {
+ HashDbTerm *q = new_dbterm(tb, obj);
+
+ q->hvalue = hval;
+ q->next = NULL;
+ *bp = b = q;
+ flags |= DB_INC_TRY_GROW;
+ } else {
+ HashDbTerm *q, *next = b->next;
+
+ ASSERT(b->hvalue == INVALID_HASH);
+ q = replace_dbterm(tb, b, obj);
+ q->next = next;
+ q->hvalue = hval;
+ *bp = b = q;
+ erts_atomic_inc_nob(&tb->common.nitems);
+ }
+
+ HRelease(p, hend, htop);
+ flags |= DB_NEW_OBJECT;
+ }
+
+Ldone:
+ handle->tb = tbl;
+ handle->bp = (void **)bp;
+ handle->dbterm = &b->dbterm;
+ handle->flags = flags;
+ handle->new_size = b->dbterm.size;
+ handle->lck = lck;
+ return 1;
}
/* Must be called after call to db_lookup_dbterm
*/
-static void db_finalize_dbterm_hash(DbUpdateHandle* handle)
+static void
+db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle)
{
DbTable* tbl = handle->tb;
- HashDbTerm* oldp = (HashDbTerm*) *(handle->bp);
- erts_smp_rwmtx_t* lck = (erts_smp_rwmtx_t*) handle->lck;
-
- ERTS_SMP_LC_ASSERT(IS_HASH_WLOCKED(&tbl->hash,lck)); /* locked by db_lookup_dbterm_hash */
+ DbTableHash *tb = &tbl->hash;
+ HashDbTerm **bp = (HashDbTerm **) handle->bp;
+ HashDbTerm *b = *bp;
+ erts_rwmtx_t* lck = (erts_rwmtx_t*) handle->lck;
+ HashDbTerm* free_me = NULL;
+
+ ERTS_LC_ASSERT(IS_HASH_WLOCKED(tb, lck)); /* locked by db_lookup_dbterm_hash */
+
+ ASSERT((&b->dbterm == handle->dbterm) == !(tb->common.compress && handle->flags & DB_MUST_RESIZE));
+
+ if (handle->flags & DB_NEW_OBJECT && cret != DB_ERROR_NONE) {
+ if (IS_FIXED(tb) && add_fixed_deletion(tb, hash_to_ix(tb, b->hvalue),
+ 0)) {
+ b->hvalue = INVALID_HASH;
+ } else {
+ *bp = b->next;
+ free_me = b;
+ }
- ASSERT((&oldp->dbterm == handle->dbterm) == !(tbl->common.compress && handle->mustResize));
+ WUNLOCK_HASH(lck);
+ erts_atomic_dec_nob(&tb->common.nitems);
+ try_shrink(tb);
+ } else {
+ if (handle->flags & DB_MUST_RESIZE) {
+ db_finalize_resize(handle, offsetof(HashDbTerm,dbterm));
+ free_me = b;
+ }
+ if (handle->flags & DB_INC_TRY_GROW) {
+ int nactive;
+ int nitems = erts_atomic_inc_read_nob(&tb->common.nitems);
+ WUNLOCK_HASH(lck);
+ nactive = NACTIVE(tb);
+
+ if (nitems > GROW_LIMIT(nactive) && !IS_FIXED(tb)) {
+ grow(tb, nitems);
+ }
+ } else {
+ WUNLOCK_HASH(lck);
+ }
+ }
- if (handle->mustResize) {
- db_finalize_resize(handle, offsetof(HashDbTerm,dbterm));
- WUNLOCK_HASH(lck);
+ if (free_me)
+ free_term(tb, free_me);
- free_term(&tbl->hash, oldp);
- }
- else {
- WUNLOCK_HASH(lck);
- }
#ifdef DEBUG
handle->dbterm = 0;
#endif
@@ -2800,7 +3076,7 @@ static int db_delete_all_objects_hash(Process* p, DbTable* tbl)
} else {
db_free_table_hash(tbl);
db_create_hash(p, tbl);
- erts_smp_atomic_set_nob(&tbl->hash.common.nitems, 0);
+ erts_atomic_set_nob(&tbl->hash.common.nitems, 0);
}
return 0;
}
@@ -2830,9 +3106,10 @@ void db_foreach_offheap_hash(DbTable *tbl,
void db_calc_stats_hash(DbTableHash* tb, DbHashStats* stats)
{
HashDbTerm* b;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
int sum = 0;
int sq_sum = 0;
+ int kept_items = 0;
int ix;
int len;
@@ -2844,6 +3121,8 @@ void db_calc_stats_hash(DbTableHash* tb, DbHashStats* stats)
len = 0;
for (b = BUCKET(tb,ix); b!=NULL; b=b->next) {
len++;
+ if (b->hvalue == INVALID_HASH)
+ ++kept_items;
}
sum += len;
sq_sum += len*len;
@@ -2855,26 +3134,33 @@ void db_calc_stats_hash(DbTableHash* tb, DbHashStats* stats)
stats->std_dev_chain_len = sqrt((sq_sum - stats->avg_chain_len*sum) / NACTIVE(tb));
/* Expected standard deviation from a good uniform hash function,
ie binomial distribution (not taking the linear hashing into acount) */
- stats->std_dev_expected = sqrt(stats->avg_chain_len * (1 - 1.0/NACTIVE(tb)));
+ stats->std_dev_expected = sqrt(stats->avg_chain_len * (1 - 1.0/NACTIVE(tb)));
+ stats->kept_items = kept_items;
}
-#ifdef HARDDEBUG
-void db_check_table_hash(DbTable *tbl)
+/* For testing only */
+Eterm erts_ets_hash_sizeof_ext_segtab(void)
{
- DbTableHash *tb = &tbl->hash;
- HashDbTerm* list;
- int j;
-
- for (j = 0; j < tb->nactive; j++) {
- if ((list = BUCKET(tb,j)) != 0) {
- while (list != 0) {
- if (!is_tuple(make_tuple(list->dbterm.tpl))) {
- erl_exit(1, "Bad term in slot %d of ets table", j);
- }
- list = list->next;
- }
- }
- }
+ return make_small(((SIZEOF_EXT_SEGTAB(0)-1) / sizeof(UWord)) + 1);
}
-#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_db_hash_lock_count(DbTableHash *tb, int enable) {
+ int i;
+
+ if(tb->locks == NULL) {
+ return;
+ }
+
+ for(i = 0; i < DB_HASH_LOCK_CNT; i++) {
+ erts_lcnt_ref_t *ref = &tb->locks->lck_vec[i].lck.lcnt;
+
+ if(enable) {
+ erts_lcnt_install_new_lock_info(ref, "db_hash_slot", tb->common.the_name,
+ ERTS_LOCK_TYPE_RWMUTEX | ERTS_LOCK_FLAGS_CATEGORY_DB);
+ } else {
+ erts_lcnt_uninstall(ref);
+ }
+ }
+}
+#endif /* ERTS_ENABLE_LOCK_COUNT */
diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h
index 908cec11d4..7d27609825 100644
--- a/erts/emulator/beam/erl_db_hash.h
+++ b/erts/emulator/beam/erl_db_hash.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -41,31 +42,29 @@ typedef struct hash_db_term {
typedef struct db_table_hash_fine_locks {
union {
- erts_smp_rwmtx_t lck;
- byte _cache_line_alignment[64];
+ erts_rwmtx_t lck;
+ byte _cache_line_alignment[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_rwmtx_t))];
}lck_vec[DB_HASH_LOCK_CNT];
} DbTableHashFineLocks;
typedef struct db_table_hash {
DbTableCommon common;
- erts_smp_atomic_t segtab; /* The segment table (struct segment**) */
- erts_smp_atomic_t szm; /* current size mask. */
-
+ /* SMP: szm and nactive are write-protected by is_resizing or table write lock */
+ erts_atomic_t szm; /* current size mask. */
+ erts_atomic_t nactive; /* Number of "active" slots */
+
+ erts_atomic_t segtab; /* The segment table (struct segment**) */
+ struct segment* first_segtab[1];
+
/* SMP: nslots and nsegs are protected by is_resizing or table write lock */
int nslots; /* Total number of slots */
int nsegs; /* Size of segment table */
/* List of slots where elements have been deleted while table was fixed */
- erts_smp_atomic_t fixdel; /* (FixedDeletion*) */
- erts_smp_atomic_t nactive; /* Number of "active" slots */
- erts_smp_atomic_t is_resizing; /* grow/shrink in progress */
-#ifdef ERTS_SMP
+ erts_atomic_t fixdel; /* (FixedDeletion*) */
+ erts_atomic_t is_resizing; /* grow/shrink in progress */
DbTableHashFineLocks* locks;
-#endif
-#ifdef VALGRIND
- struct ext_segment* top_ptr_to_segment_with_active_segtab;
-#endif
} DbTableHash;
@@ -74,7 +73,7 @@ typedef struct db_table_hash {
** table types. The process is always an [in out] parameter.
*/
void db_initialize_hash(void);
-void db_unfix_table_hash(DbTableHash *tb /* [in out] */);
+SWord db_unfix_table_hash(DbTableHash *tb);
Uint db_kept_items_hash(DbTableHash *tb);
/* Interface for meta pid table */
@@ -87,14 +86,6 @@ int db_get_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret);
int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret);
-int db_get_element_array(DbTable *tbl,
- Eterm key,
- int ndex,
- Eterm *ret,
- int *num_ret);
-
-int db_erase_bag_exact2(DbTable *tbl, Eterm key, Eterm value);
-
/* not yet in method table */
int db_mark_all_deleted_hash(DbTable *tbl);
@@ -104,8 +95,14 @@ typedef struct {
float std_dev_expected;
int max_chain_len;
int min_chain_len;
+ int kept_items;
}DbHashStats;
void db_calc_stats_hash(DbTableHash* tb, DbHashStats*);
+Eterm erts_ets_hash_sizeof_ext_segtab(void);
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_db_hash_lock_count(DbTableHash *tb, int enable);
+#endif
#endif /* _DB_HASH_H */
diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c
index a62a83a928..5a276b9d88 100644
--- a/erts/emulator/beam/erl_db_tree.c
+++ b/erts/emulator/beam/erl_db_tree.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -49,7 +50,7 @@
#include "erl_db_tree.h"
#define GETKEY_WITH_POS(Keypos, Tplp) (*((Tplp) + Keypos))
-#define NITEMS(tb) ((int)erts_smp_atomic_read_nob(&(tb)->common.nitems))
+#define NITEMS(tb) ((int)erts_atomic_read_nob(&(tb)->common.nitems))
/*
** A stack of this size is enough for an AVL tree with more than
@@ -75,8 +76,14 @@
((Dtt->pos) ? \
(Dtt)->array[(Dtt)->pos - 1] : NULL)
-#define EMPTY_NODE(Dtt) (TOP_NODE(Dtt) == NULL)
+#define TOPN_NODE(Dtt, Pos) \
+ (((Pos) < Dtt->pos) ? \
+ (Dtt)->array[(Dtt)->pos - ((Pos) + 1)] : NULL)
+#define REPLACE_TOP_NODE(Dtt, Node) \
+ if ((Dtt)->pos) (Dtt)->array[(Dtt)->pos - 1] = (Node)
+
+#define EMPTY_NODE(Dtt) (TOP_NODE(Dtt) == NULL)
/* Obtain table static stack if available. NULL if not.
@@ -84,7 +91,7 @@
*/
static DbTreeStack* get_static_stack(DbTableTree* tb)
{
- if (!erts_smp_atomic_xchg_acqb(&tb->is_stack_busy, 1)) {
+ if (!erts_atomic_xchg_acqb(&tb->is_stack_busy, 1)) {
return &tb->static_stack;
}
return NULL;
@@ -96,7 +103,7 @@ static DbTreeStack* get_static_stack(DbTableTree* tb)
static DbTreeStack* get_any_stack(DbTableTree* tb)
{
DbTreeStack* stack;
- if (!erts_smp_atomic_xchg_acqb(&tb->is_stack_busy, 1)) {
+ if (!erts_atomic_xchg_acqb(&tb->is_stack_busy, 1)) {
return &tb->static_stack;
}
stack = erts_db_alloc(ERTS_ALC_T_DB_STK, (DbTable *) tb,
@@ -110,8 +117,8 @@ static DbTreeStack* get_any_stack(DbTableTree* tb)
static void release_stack(DbTableTree* tb, DbTreeStack* stack)
{
if (stack == &tb->static_stack) {
- ASSERT(erts_smp_atomic_read_nob(&tb->is_stack_busy) == 1);
- erts_smp_atomic_set_relb(&tb->is_stack_busy, 0);
+ ASSERT(erts_atomic_read_nob(&tb->is_stack_busy) == 1);
+ erts_atomic_set_relb(&tb->is_stack_busy, 0);
}
else {
erts_db_free(ERTS_ALC_T_DB_STK, (DbTable *) tb,
@@ -179,7 +186,6 @@ static ERTS_INLINE TreeDbTerm* replace_dbterm(DbTableTree *tb, TreeDbTerm* old,
static TreeDbTerm *traverse_until(TreeDbTerm *t, int *current, int to);
static void check_slot_pos(DbTableTree *tb);
static void check_saved_stack(DbTableTree *tb);
-static int check_table_tree(DbTableTree* tb, TreeDbTerm *t);
#define TREE_DEBUG
#endif
@@ -225,9 +231,9 @@ struct mp_info {
Eterm most; /* The highest matching key (possibly
* partially bound expression) */
- TreeDbTerm *save_term; /* If the key is completely bound, this
- * will be the Tree node we're searching
- * for, otherwise it will be useless */
+ TreeDbTerm **save_term; /* If the key is completely bound, this
+ * will be the Tree node we're searching
+ * for, otherwise it will be useless */
Binary *mp; /* The compiled match program */
};
@@ -277,12 +283,30 @@ struct select_delete_context {
};
/*
+ * Used by doit_select_replace
+ */
+struct select_replace_context {
+ Process *p;
+ DbTableTree *tb;
+ Binary *mp;
+ Eterm end_condition;
+ Eterm *lastobj;
+ Sint32 max;
+ int keypos;
+ int all_objects;
+ Sint replaced;
+};
+
+/* Used by select_replace on analyze_pattern */
+typedef int (*extra_match_validator_t)(int keypos, Eterm match, Eterm guard, Eterm body);
+
+/*
** Forward declarations
*/
-static TreeDbTerm *linkout_tree(DbTableTree *tb, Eterm key, Eterm* key_base);
+static TreeDbTerm *linkout_tree(DbTableTree *tb, Eterm key);
static TreeDbTerm *linkout_object_tree(DbTableTree *tb,
Eterm object);
-static int do_free_tree_cont(DbTableTree *tb, int num_left);
+static SWord do_free_tree_continue(DbTableTree *tb, SWord reds);
static void free_term(DbTableTree *tb, TreeDbTerm* p);
static int balance_left(TreeDbTerm **this);
static int balance_right(TreeDbTerm **this);
@@ -290,15 +314,16 @@ static int delsub(TreeDbTerm **this);
static TreeDbTerm *slot_search(Process *p, DbTableTree *tb, Sint slot);
static TreeDbTerm *find_node(DbTableTree *tb, Eterm key);
static TreeDbTerm **find_node2(DbTableTree *tb, Eterm key);
-static TreeDbTerm *find_next(DbTableTree *tb, DbTreeStack*, Eterm key, Eterm* kbase);
-static TreeDbTerm *find_prev(DbTableTree *tb, DbTreeStack*, Eterm key, Eterm* kbase);
+static TreeDbTerm **find_ptr(DbTableTree *tb, DbTreeStack*, TreeDbTerm *this);
+static TreeDbTerm *find_next(DbTableTree *tb, DbTreeStack*, Eterm key);
+static TreeDbTerm *find_prev(DbTableTree *tb, DbTreeStack*, Eterm key);
static TreeDbTerm *find_next_from_pb_key(DbTableTree *tb, DbTreeStack*,
Eterm key);
static TreeDbTerm *find_prev_from_pb_key(DbTableTree *tb, DbTreeStack*,
Eterm key);
static void traverse_backwards(DbTableTree *tb,
DbTreeStack*,
- Eterm lastkey, Eterm* lk_base,
+ Eterm lastkey,
int (*doit)(DbTableTree *tb,
TreeDbTerm *,
void *,
@@ -306,19 +331,28 @@ static void traverse_backwards(DbTableTree *tb,
void *context);
static void traverse_forward(DbTableTree *tb,
DbTreeStack*,
- Eterm lastkey, Eterm* lk_base,
+ Eterm lastkey,
int (*doit)(DbTableTree *tb,
TreeDbTerm *,
void *,
int),
- void *context);
-static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm **ret,
+ void *context);
+static void traverse_update_backwards(DbTableTree *tb,
+ DbTreeStack*,
+ Eterm lastkey,
+ int (*doit)(DbTableTree *tb,
+ TreeDbTerm **, // out
+ void *,
+ int),
+ void *context);
+static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm ***ret,
Eterm *partly_bound_key);
-static Sint cmp_partly_bound(Eterm partly_bound_key, Eterm bound_key, Eterm* bk_base);
-static Sint do_cmp_partly_bound(Eterm a, Eterm b, Eterm* b_base, int *done);
+static Sint cmp_partly_bound(Eterm partly_bound_key, Eterm bound_key);
+static Sint do_cmp_partly_bound(Eterm a, Eterm b, int *done);
static int analyze_pattern(DbTableTree *tb, Eterm pattern,
- struct mp_info *mpi);
+ extra_match_validator_t extra_validator, /* Optional callback */
+ struct mp_info *mpi);
static int doit_select(DbTableTree *tb,
TreeDbTerm *this,
void *ptr,
@@ -335,6 +369,10 @@ static int doit_select_delete(DbTableTree *tb,
TreeDbTerm *this,
void *ptr,
int forward);
+static int doit_select_replace(DbTableTree *tb,
+ TreeDbTerm **this_ptr,
+ void *ptr,
+ int forward);
static int partly_bound_can_match_lesser(Eterm partly_bound_1,
Eterm partly_bound_2);
@@ -368,26 +406,31 @@ static int db_erase_tree(DbTable *tbl, Eterm key, Eterm *ret);
static int db_erase_object_tree(DbTable *tbl, Eterm object,Eterm *ret);
static int db_slot_tree(Process *p, DbTable *tbl,
Eterm slot_term, Eterm *ret);
-static int db_select_tree(Process *p, DbTable *tbl,
+static int db_select_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, int reversed, Eterm *ret);
-static int db_select_count_tree(Process *p, DbTable *tbl,
+static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Eterm *ret);
-static int db_select_chunk_tree(Process *p, DbTable *tbl,
+static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Sint chunk_size,
int reversed, Eterm *ret);
static int db_select_continue_tree(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
static int db_select_count_continue_tree(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
-static int db_select_delete_tree(Process *p, DbTable *tbl,
+static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Eterm *ret);
static int db_select_delete_continue_tree(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
-static void db_print_tree(int to, void *to_arg,
+static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret);
+static int db_select_replace_continue_tree(Process *p, DbTable *tbl,
+ Eterm continuation, Eterm *ret);
+static int db_take_tree(Process *, DbTable *, Eterm, Eterm *);
+static void db_print_tree(fmtfn_t to, void *to_arg,
int show, DbTable *tbl);
static int db_free_table_tree(DbTable *tbl);
-static int db_free_table_continue_tree(DbTable *tbl);
+static SWord db_free_table_continue_tree(DbTable *tbl, SWord);
static void db_foreach_offheap_tree(DbTable *,
void (*)(ErlOffHeap *, void *),
@@ -398,8 +441,11 @@ static int db_delete_all_objects_tree(Process* p, DbTable* tbl);
#ifdef HARDDEBUG
static void db_check_table_tree(DbTable *tbl);
#endif
-static int db_lookup_dbterm_tree(DbTable *, Eterm key, DbUpdateHandle*);
-static void db_finalize_dbterm_tree(DbUpdateHandle*);
+static int
+db_lookup_dbterm_tree(Process *, DbTable *, Eterm key, Eterm obj,
+ DbUpdateHandle*);
+static void
+db_finalize_dbterm_tree(int cret, DbUpdateHandle *);
/*
** Static variables
@@ -431,16 +477,14 @@ DbTableMethod db_tree =
db_select_delete_continue_tree,
db_select_count_tree,
db_select_count_continue_tree,
+ db_select_replace_tree,
+ db_select_replace_continue_tree,
+ db_take_tree,
db_delete_all_objects_tree,
db_free_table_tree,
db_free_table_continue_tree,
db_print_tree,
db_foreach_offheap_tree,
-#ifdef HARDDEBUG
- db_check_table_tree,
-#else
- NULL,
-#endif
db_lookup_dbterm_tree,
db_finalize_dbterm_tree
@@ -470,7 +514,7 @@ int db_create_tree(Process *p, DbTable *tbl)
sizeof(TreeDbTerm *) * STACK_NEED);
tb->static_stack.pos = 0;
tb->static_stack.slot = 0;
- erts_smp_atomic_init_nob(&tb->is_stack_busy, 0);
+ erts_atomic_init_nob(&tb->is_stack_busy, 0);
tb->deletion = 0;
return DB_ERROR_NONE;
}
@@ -511,7 +555,7 @@ static int db_next_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
if (is_atom(key) && key == am_EOT)
return DB_ERROR_BADKEY;
stack = get_any_stack(tb);
- this = find_next(tb, stack, key, NULL);
+ this = find_next(tb, stack, key);
release_stack(tb,stack);
if (this == NULL) {
*ret = am_EOT;
@@ -557,7 +601,7 @@ static int db_prev_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
if (is_atom(key) && key == am_EOT)
return DB_ERROR_BADKEY;
stack = get_any_stack(tb);
- this = find_prev(tb, stack, key, NULL);
+ this = find_prev(tb, stack, key);
release_stack(tb,stack);
if (this == NULL) {
*ret = am_EOT;
@@ -567,19 +611,13 @@ static int db_prev_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
return DB_ERROR_NONE;
}
-static ERTS_INLINE Sint cmp_key(DbTableTree* tb, Eterm key, Eterm* key_base,
- TreeDbTerm* obj)
-{
- return cmp_rel(key, key_base,
- GETKEY(tb,obj->dbterm.tpl), obj->dbterm.tpl);
+static ERTS_INLINE Sint cmp_key(DbTableTree* tb, Eterm key, TreeDbTerm* obj) {
+ return CMP(key, GETKEY(tb,obj->dbterm.tpl));
}
-static ERTS_INLINE int cmp_key_eq(DbTableTree* tb, Eterm key, Eterm* key_base,
- TreeDbTerm* obj)
-{
+static ERTS_INLINE int cmp_key_eq(DbTableTree* tb, Eterm key, TreeDbTerm* obj) {
Eterm obj_key = GETKEY(tb,obj->dbterm.tpl);
- return is_same(key, key_base, obj_key, obj->dbterm.tpl)
- || cmp_rel(key, key_base, obj_key, obj->dbterm.tpl) == 0;
+ return is_same(key, obj_key) || CMP(key, obj_key) == 0;
}
static int db_put_tree(DbTable *tbl, Eterm obj, int key_clash_fail)
@@ -605,15 +643,15 @@ static int db_put_tree(DbTable *tbl, Eterm obj, int key_clash_fail)
for (;;)
if (!*this) { /* Found our place */
state = 1;
- if (erts_smp_atomic_inc_read_nob(&tb->common.nitems) >= TREE_MAX_ELEMENTS) {
- erts_smp_atomic_dec_nob(&tb->common.nitems);
+ if (erts_atomic_inc_read_nob(&tb->common.nitems) >= TREE_MAX_ELEMENTS) {
+ erts_atomic_dec_nob(&tb->common.nitems);
return DB_ERROR_SYSRES;
}
*this = new_dbterm(tb, obj);
(*this)->balance = 0;
(*this)->left = (*this)->right = NULL;
break;
- } else if ((c = cmp_key(tb, key, NULL, *this)) < 0) {
+ } else if ((c = cmp_key(tb, key, *this)) < 0) {
/* go lefts */
dstack[dpos++] = DIR_LEFT;
tstack[tpos++] = this;
@@ -768,7 +806,7 @@ static int db_erase_tree(DbTable *tbl, Eterm key, Eterm *ret)
*ret = am_true;
- if ((res = linkout_tree(tb, key, NULL)) != NULL) {
+ if ((res = linkout_tree(tb, key)) != NULL) {
free_term(tb, res);
}
return DB_ERROR_NONE;
@@ -935,17 +973,15 @@ static int db_select_continue_tree(Process *p,
if (arityval(*tptr) != 8)
RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- if (!is_small(tptr[4]) || !is_binary(tptr[5]) ||
+ if (!is_small(tptr[4]) ||
!(is_list(tptr[6]) || tptr[6] == NIL) || !is_small(tptr[7]) ||
!is_small(tptr[8]))
RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
lastkey = tptr[2];
end_condition = tptr[3];
- if (!(thing_subtag(*binary_val(tptr[5])) == REFC_BINARY_SUBTAG))
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- mp = ((ProcBin *) binary_val(tptr[5]))->val;
- if (!IsMatchProgBinary(mp))
+ mp = erts_db_get_match_prog_binary(tptr[5]);
+ if (!mp)
RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
chunk_size = signed_val(tptr[4]);
@@ -956,7 +992,7 @@ static int db_select_continue_tree(Process *p,
sc.lastobj = NULL;
sc.max = 1000;
sc.keypos = tb->common.keypos;
- sc.all_objects = mp->flags & BIN_FLAG_ALL_OBJECTS;
+ sc.all_objects = mp->intern.flags & BIN_FLAG_ALL_OBJECTS;
sc.chunk_size = chunk_size;
reverse = unsigned_val(tptr[7]);
sc.got = signed_val(tptr[8]);
@@ -964,15 +1000,15 @@ static int db_select_continue_tree(Process *p,
stack = get_any_stack(tb);
if (chunk_size) {
if (reverse) {
- traverse_backwards(tb, stack, lastkey, NULL, &doit_select_chunk, &sc);
+ traverse_backwards(tb, stack, lastkey, &doit_select_chunk, &sc);
} else {
- traverse_forward(tb, stack, lastkey, NULL, &doit_select_chunk, &sc);
+ traverse_forward(tb, stack, lastkey, &doit_select_chunk, &sc);
}
} else {
if (reverse) {
- traverse_forward(tb, stack, lastkey, NULL, &doit_select, &sc);
+ traverse_forward(tb, stack, lastkey, &doit_select, &sc);
} else {
- traverse_backwards(tb, stack, lastkey, NULL, &doit_select, &sc);
+ traverse_backwards(tb, stack, lastkey, &doit_select, &sc);
}
}
release_stack(tb,stack);
@@ -997,9 +1033,9 @@ static int db_select_continue_tree(Process *p,
}
key = GETKEY(tb, sc.lastobj);
- sz = size_object_rel(key,sc.lastobj);
+ sz = size_object(key);
hp = HAlloc(p, 9 + sz);
- key = copy_struct_rel(key, sz, &hp, &MSO(p), sc.lastobj, NULL);
+ key = copy_struct(key, sz, &hp, &MSO(p));
continuation = TUPLE8
(hp,
tptr[1],
@@ -1020,8 +1056,8 @@ static int db_select_continue_tree(Process *p,
key = GETKEY(tb, sc.lastobj);
if (chunk_size) {
if (end_condition != NIL &&
- ((!reverse && cmp_partly_bound(end_condition,key,sc.lastobj) < 0) ||
- (reverse && cmp_partly_bound(end_condition,key,sc.lastobj) > 0))) {
+ ((!reverse && cmp_partly_bound(end_condition,key) < 0) ||
+ (reverse && cmp_partly_bound(end_condition,key) > 0))) {
/* done anyway */
if (!sc.got) {
RET_TO_BIF(am_EOT, DB_ERROR_NONE);
@@ -1033,16 +1069,16 @@ static int db_select_continue_tree(Process *p,
}
} else {
if (end_condition != NIL &&
- ((!reverse && cmp_partly_bound(end_condition,key,sc.lastobj) > 0) ||
- (reverse && cmp_partly_bound(end_condition,key,sc.lastobj) < 0))) {
+ ((!reverse && cmp_partly_bound(end_condition,key) > 0) ||
+ (reverse && cmp_partly_bound(end_condition,key) < 0))) {
/* done anyway */
RET_TO_BIF(sc.accum,DB_ERROR_NONE);
}
}
/* Not done yet, let's trap. */
- sz = size_object_rel(key,sc.lastobj);
+ sz = size_object(key);
hp = HAlloc(p, 9 + sz);
- key = copy_struct_rel(key, sz, &hp, &MSO(p), sc.lastobj, NULL);
+ key = copy_struct(key, sz, &hp, &MSO(p));
continuation = TUPLE8
(hp,
tptr[1],
@@ -1060,7 +1096,7 @@ static int db_select_continue_tree(Process *p,
}
-static int db_select_tree(Process *p, DbTable *tbl,
+static int db_select_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, int reverse, Eterm *ret)
{
/* Strategy: Traverse backwards to build resulting list from tail to head */
@@ -1069,7 +1105,6 @@ static int db_select_tree(Process *p, DbTable *tbl,
struct select_context sc;
struct mp_info mpi;
Eterm lastkey = THE_NON_VALUE;
- Eterm* lk_base = NULL;
Eterm key;
Eterm continuation;
unsigned sz;
@@ -1098,7 +1133,7 @@ static int db_select_tree(Process *p, DbTable *tbl,
sc.got = 0;
sc.chunk_size = 0;
- if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
+ if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
RET_TO_BIF(NIL,errcode);
}
@@ -1111,8 +1146,8 @@ static int db_select_tree(Process *p, DbTable *tbl,
sc.all_objects = mpi.all_objects;
if (!mpi.got_partial && mpi.some_limitation &&
- CMP(mpi.least,mpi.most) == 0) {
- doit_select(tb,mpi.save_term,&sc,0 /* direction doesn't matter */);
+ CMP_EQ(mpi.least,mpi.most)) {
+ doit_select(tb,*(mpi.save_term),&sc,0 /* direction doesn't matter */);
RET_TO_BIF(sc.accum,DB_ERROR_NONE);
}
@@ -1121,20 +1156,18 @@ static int db_select_tree(Process *p, DbTable *tbl,
if (mpi.some_limitation) {
if ((this = find_prev_from_pb_key(tb, stack, mpi.least)) != NULL) {
lastkey = GETKEY(tb, this->dbterm.tpl);
- lk_base = this->dbterm.tpl;
}
sc.end_condition = mpi.most;
}
- traverse_forward(tb, stack, lastkey, lk_base, &doit_select, &sc);
+ traverse_forward(tb, stack, lastkey, &doit_select, &sc);
} else {
if (mpi.some_limitation) {
if ((this = find_next_from_pb_key(tb, stack, mpi.most)) != NULL) {
lastkey = GETKEY(tb, this->dbterm.tpl);
- lk_base = this->dbterm.tpl;
}
sc.end_condition = mpi.least;
}
- traverse_backwards(tb, stack, lastkey, lk_base, &doit_select, &sc);
+ traverse_backwards(tb, stack, lastkey, &doit_select, &sc);
}
release_stack(tb,stack);
#ifdef HARDDEBUG
@@ -1147,16 +1180,16 @@ static int db_select_tree(Process *p, DbTable *tbl,
}
key = GETKEY(tb, sc.lastobj);
- sz = size_object_rel(key, sc.lastobj);
- hp = HAlloc(p, 9 + sz + PROC_BIN_SIZE);
- key = copy_struct_rel(key, sz, &hp, &MSO(p), sc.lastobj, NULL);
+ sz = size_object(key);
+ hp = HAlloc(p, 9 + sz + ERTS_MAGIC_REF_THING_SIZE);
+ key = copy_struct(key, sz, &hp, &MSO(p));
if (mpi.all_objects)
- (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- mpb=db_make_mp_binary(p,mpi.mp,&hp);
+ (mpi.mp)->intern.flags |= BIN_FLAG_ALL_OBJECTS;
+ mpb= erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE8
(hp,
- tb->common.id,
+ tid,
key,
sc.end_condition, /* From the match program, needn't be copied */
make_small(0), /* Chunk size of zero means not chunked to the
@@ -1207,14 +1240,12 @@ static int db_select_count_continue_tree(Process *p,
tptr = tuple_val(continuation);
if (arityval(*tptr) != 5)
- erl_exit(1,"Internal error in ets:select_count/1");
+ erts_exit(ERTS_ERROR_EXIT,"Internal error in ets:select_count/1");
lastkey = tptr[2];
end_condition = tptr[3];
- if (!(thing_subtag(*binary_val(tptr[4])) == REFC_BINARY_SUBTAG))
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- mp = ((ProcBin *) binary_val(tptr[4]))->val;
- if (!IsMatchProgBinary(mp))
+ mp = erts_db_get_match_prog_binary(tptr[4]);
+ if (!mp)
RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
sc.p = p;
@@ -1230,7 +1261,7 @@ static int db_select_count_continue_tree(Process *p,
}
stack = get_any_stack(tb);
- traverse_backwards(tb, stack, lastkey, NULL, &doit_select_count, &sc);
+ traverse_backwards(tb, stack, lastkey, &doit_select_count, &sc);
release_stack(tb,stack);
BUMP_REDS(p, 1000 - sc.max);
@@ -1240,12 +1271,12 @@ static int db_select_count_continue_tree(Process *p,
}
key = GETKEY(tb, sc.lastobj);
if (end_condition != NIL &&
- (cmp_partly_bound(end_condition,key,sc.lastobj) > 0)) {
+ (cmp_partly_bound(end_condition,key) > 0)) {
/* done anyway */
RET_TO_BIF(make_small(sc.got),DB_ERROR_NONE);
}
/* Not done yet, let's trap. */
- sz = size_object_rel(key, sc.lastobj);
+ sz = size_object(key);
if (IS_USMALL(0, sc.got)) {
hp = HAlloc(p, sz + 6);
egot = make_small(sc.got);
@@ -1255,7 +1286,7 @@ static int db_select_count_continue_tree(Process *p,
egot = uint_to_big(sc.got, hp);
hp += BIG_UINT_HEAP_SIZE;
}
- key = copy_struct_rel(key, sz, &hp, &MSO(p), sc.lastobj, NULL);
+ key = copy_struct(key, sz, &hp, &MSO(p));
continuation = TUPLE5
(hp,
tptr[1],
@@ -1270,7 +1301,7 @@ static int db_select_count_continue_tree(Process *p,
}
-static int db_select_count_tree(Process *p, DbTable *tbl,
+static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
@@ -1278,7 +1309,6 @@ static int db_select_count_tree(Process *p, DbTable *tbl,
struct select_count_context sc;
struct mp_info mpi;
Eterm lastkey = THE_NON_VALUE;
- Eterm* lk_base = NULL;
Eterm key;
Eterm continuation;
unsigned sz;
@@ -1306,7 +1336,7 @@ static int db_select_count_tree(Process *p, DbTable *tbl,
sc.keypos = tb->common.keypos;
sc.got = 0;
- if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
+ if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
RET_TO_BIF(NIL,errcode);
}
@@ -1319,8 +1349,8 @@ static int db_select_count_tree(Process *p, DbTable *tbl,
sc.all_objects = mpi.all_objects;
if (!mpi.got_partial && mpi.some_limitation &&
- CMP(mpi.least,mpi.most) == 0) {
- doit_select_count(tb,mpi.save_term,&sc,0 /* dummy */);
+ CMP_EQ(mpi.least,mpi.most)) {
+ doit_select_count(tb,*(mpi.save_term),&sc,0 /* dummy */);
RET_TO_BIF(erts_make_integer(sc.got,p),DB_ERROR_NONE);
}
@@ -1328,12 +1358,11 @@ static int db_select_count_tree(Process *p, DbTable *tbl,
if (mpi.some_limitation) {
if ((this = find_next_from_pb_key(tb, stack, mpi.most)) != NULL) {
lastkey = GETKEY(tb, this->dbterm.tpl);
- lk_base = this->dbterm.tpl;
}
sc.end_condition = mpi.least;
}
- traverse_backwards(tb, stack, lastkey, lk_base, &doit_select_count, &sc);
+ traverse_backwards(tb, stack, lastkey, &doit_select_count, &sc);
release_stack(tb,stack);
BUMP_REDS(p, 1000 - sc.max);
if (sc.max > 0) {
@@ -1341,24 +1370,24 @@ static int db_select_count_tree(Process *p, DbTable *tbl,
}
key = GETKEY(tb, sc.lastobj);
- sz = size_object_rel(key, sc.lastobj);
+ sz = size_object(key);
if (IS_USMALL(0, sc.got)) {
- hp = HAlloc(p, sz + PROC_BIN_SIZE + 6);
+ hp = HAlloc(p, sz + ERTS_MAGIC_REF_THING_SIZE + 6);
egot = make_small(sc.got);
}
else {
- hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + PROC_BIN_SIZE + 6);
+ hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + ERTS_MAGIC_REF_THING_SIZE + 6);
egot = uint_to_big(sc.got, hp);
hp += BIG_UINT_HEAP_SIZE;
}
- key = copy_struct_rel(key, sz, &hp, &MSO(p), sc.lastobj, NULL);
+ key = copy_struct(key, sz, &hp, &MSO(p));
if (mpi.all_objects)
- (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- mpb = db_make_mp_binary(p,mpi.mp,&hp);
+ (mpi.mp)->intern.flags |= BIN_FLAG_ALL_OBJECTS;
+ mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE5
(hp,
- tb->common.id,
+ tid,
key,
sc.end_condition, /* From the match program, needn't be copied */
mpb,
@@ -1372,7 +1401,7 @@ static int db_select_count_tree(Process *p, DbTable *tbl,
}
-static int db_select_chunk_tree(Process *p, DbTable *tbl,
+static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Sint chunk_size,
int reverse,
Eterm *ret)
@@ -1382,7 +1411,6 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl,
struct select_context sc;
struct mp_info mpi;
Eterm lastkey = THE_NON_VALUE;
- Eterm* lk_base = NULL;
Eterm key;
Eterm continuation;
unsigned sz;
@@ -1411,7 +1439,7 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl,
sc.got = 0;
sc.chunk_size = chunk_size;
- if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
+ if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
RET_TO_BIF(NIL,errcode);
}
@@ -1424,8 +1452,8 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl,
sc.all_objects = mpi.all_objects;
if (!mpi.got_partial && mpi.some_limitation &&
- CMP(mpi.least,mpi.most) == 0) {
- doit_select(tb,mpi.save_term,&sc, 0 /* direction doesn't matter */);
+ CMP_EQ(mpi.least,mpi.most)) {
+ doit_select(tb,*(mpi.save_term),&sc, 0 /* direction doesn't matter */);
if (sc.accum != NIL) {
hp=HAlloc(p, 3);
RET_TO_BIF(TUPLE2(hp,sc.accum,am_EOT),DB_ERROR_NONE);
@@ -1439,20 +1467,18 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl,
if (mpi.some_limitation) {
if ((this = find_next_from_pb_key(tb, stack, mpi.most)) != NULL) {
lastkey = GETKEY(tb, this->dbterm.tpl);
- lk_base = this->dbterm.tpl;
}
sc.end_condition = mpi.least;
}
- traverse_backwards(tb, stack, lastkey, lk_base, &doit_select_chunk, &sc);
+ traverse_backwards(tb, stack, lastkey, &doit_select_chunk, &sc);
} else {
if (mpi.some_limitation) {
if ((this = find_prev_from_pb_key(tb, stack, mpi.least)) != NULL) {
lastkey = GETKEY(tb, this->dbterm.tpl);
- lk_base = this->dbterm.tpl;
}
sc.end_condition = mpi.most;
}
- traverse_forward(tb, stack, lastkey, lk_base, &doit_select_chunk, &sc);
+ traverse_forward(tb, stack, lastkey, &doit_select_chunk, &sc);
}
release_stack(tb,stack);
@@ -1477,16 +1503,16 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl,
}
key = GETKEY(tb, sc.lastobj);
- sz = size_object_rel(key, sc.lastobj);
- hp = HAlloc(p, 9 + sz + PROC_BIN_SIZE);
- key = copy_struct_rel(key, sz, &hp, &MSO(p), sc.lastobj, NULL);
+ sz = size_object(key);
+ hp = HAlloc(p, 9 + sz + ERTS_MAGIC_REF_THING_SIZE);
+ key = copy_struct(key, sz, &hp, &MSO(p));
if (mpi.all_objects)
- (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- mpb = db_make_mp_binary(p,mpi.mp,&hp);
+ (mpi.mp)->intern.flags |= BIN_FLAG_ALL_OBJECTS;
+ mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE8
(hp,
- tb->common.id,
+ tid,
key,
sc.end_condition, /* From the match program,
needn't be copied */
@@ -1502,16 +1528,16 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl,
}
key = GETKEY(tb, sc.lastobj);
- sz = size_object_rel(key, sc.lastobj);
- hp = HAlloc(p, 9 + sz + PROC_BIN_SIZE);
- key = copy_struct_rel(key, sz, &hp, &MSO(p), sc.lastobj, NULL);
+ sz = size_object(key);
+ hp = HAlloc(p, 9 + sz + ERTS_MAGIC_REF_THING_SIZE);
+ key = copy_struct(key, sz, &hp, &MSO(p));
if (mpi.all_objects)
- (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- mpb = db_make_mp_binary(p,mpi.mp,&hp);
+ (mpi.mp)->intern.flags |= BIN_FLAG_ALL_OBJECTS;
+ mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE8
(hp,
- tb->common.id,
+ tid,
key,
sc.end_condition, /* From the match program, needn't be copied */
make_small(chunk_size),
@@ -1566,7 +1592,7 @@ static int db_select_delete_continue_tree(Process *p,
sc.erase_lastterm = 0; /* Before first RET_TO_BIF */
sc.lastterm = NULL;
- mp = ((ProcBin *) binary_val(tptr[4]))->val;
+ mp = erts_db_get_match_prog_binary_unchecked(tptr[4]);
sc.p = p;
sc.tb = tb;
if (is_big(tptr[5])) {
@@ -1579,8 +1605,8 @@ static int db_select_delete_continue_tree(Process *p,
sc.max = 1000;
sc.keypos = tb->common.keypos;
- ASSERT(!erts_smp_atomic_read_nob(&tb->is_stack_busy));
- traverse_backwards(tb, &tb->static_stack, lastkey, NULL, &doit_select_delete, &sc);
+ ASSERT(!erts_atomic_read_nob(&tb->is_stack_busy));
+ traverse_backwards(tb, &tb->static_stack, lastkey, &doit_select_delete, &sc);
BUMP_REDS(p, 1000 - sc.max);
@@ -1589,11 +1615,11 @@ static int db_select_delete_continue_tree(Process *p,
}
key = GETKEY(tb, (sc.lastterm)->dbterm.tpl);
if (end_condition != NIL &&
- cmp_partly_bound(end_condition,key,sc.lastterm->dbterm.tpl) > 0) { /* done anyway */
+ cmp_partly_bound(end_condition,key) > 0) { /* done anyway */
RET_TO_BIF(erts_make_integer(sc.accum,p),DB_ERROR_NONE);
}
/* Not done yet, let's trap. */
- sz = size_object_rel(key, sc.lastterm->dbterm.tpl);
+ sz = size_object(key);
if (IS_USMALL(0, sc.accum)) {
hp = HAlloc(p, sz + 6);
eaccsum = make_small(sc.accum);
@@ -1603,7 +1629,7 @@ static int db_select_delete_continue_tree(Process *p,
eaccsum = uint_to_big(sc.accum, hp);
hp += BIG_UINT_HEAP_SIZE;
}
- key = copy_struct_rel(key, sz, &hp, &MSO(p), sc.lastterm->dbterm.tpl, NULL);
+ key = copy_struct(key, sz, &hp, &MSO(p));
continuation = TUPLE5
(hp,
tptr[1],
@@ -1617,14 +1643,13 @@ static int db_select_delete_continue_tree(Process *p,
#undef RET_TO_BIF
}
-static int db_select_delete_tree(Process *p, DbTable *tbl,
+static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
struct select_delete_context sc;
struct mp_info mpi;
Eterm lastkey = THE_NON_VALUE;
- Eterm* lk_base = NULL;
Eterm key;
Eterm continuation;
unsigned sz;
@@ -1656,7 +1681,7 @@ static int db_select_delete_tree(Process *p, DbTable *tbl,
sc.keypos = tb->common.keypos;
sc.tb = tb;
- if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
+ if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
RET_TO_BIF(0,errcode);
}
@@ -1668,8 +1693,8 @@ static int db_select_delete_tree(Process *p, DbTable *tbl,
sc.mp = mpi.mp;
if (!mpi.got_partial && mpi.some_limitation &&
- CMP(mpi.least,mpi.most) == 0) {
- doit_select_delete(tb,mpi.save_term,&sc, 0 /* direction doesn't
+ CMP_EQ(mpi.least,mpi.most)) {
+ doit_select_delete(tb,*(mpi.save_term),&sc, 0 /* direction doesn't
matter */);
RET_TO_BIF(erts_make_integer(sc.accum,p),DB_ERROR_NONE);
}
@@ -1677,12 +1702,11 @@ static int db_select_delete_tree(Process *p, DbTable *tbl,
if (mpi.some_limitation) {
if ((this = find_next_from_pb_key(tb, &tb->static_stack, mpi.most)) != NULL) {
lastkey = GETKEY(tb, this->dbterm.tpl);
- lk_base = this->dbterm.tpl;
}
sc.end_condition = mpi.least;
}
- traverse_backwards(tb, &tb->static_stack, lastkey, lk_base, &doit_select_delete, &sc);
+ traverse_backwards(tb, &tb->static_stack, lastkey, &doit_select_delete, &sc);
BUMP_REDS(p, 1000 - sc.max);
if (sc.max > 0) {
@@ -1690,22 +1714,22 @@ static int db_select_delete_tree(Process *p, DbTable *tbl,
}
key = GETKEY(tb, (sc.lastterm)->dbterm.tpl);
- sz = size_object_rel(key, sc.lastterm->dbterm.tpl);
+ sz = size_object(key);
if (IS_USMALL(0, sc.accum)) {
- hp = HAlloc(p, sz + PROC_BIN_SIZE + 6);
+ hp = HAlloc(p, sz + ERTS_MAGIC_REF_THING_SIZE + 6);
eaccsum = make_small(sc.accum);
}
else {
- hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + PROC_BIN_SIZE + 6);
+ hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + ERTS_MAGIC_REF_THING_SIZE + 6);
eaccsum = uint_to_big(sc.accum, hp);
hp += BIG_UINT_HEAP_SIZE;
}
- key = copy_struct_rel(key, sz, &hp, &MSO(p), sc.lastterm->dbterm.tpl, NULL);
- mpb = db_make_mp_binary(p,mpi.mp,&hp);
+ key = copy_struct(key, sz, &hp, &MSO(p));
+ mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE5
(hp,
- tb->common.id,
+ tid,
key,
sc.end_condition, /* From the match program, needn't be copied */
mpb,
@@ -1722,13 +1746,237 @@ static int db_select_delete_tree(Process *p, DbTable *tbl,
}
+static int db_select_replace_continue_tree(Process *p,
+ DbTable *tbl,
+ Eterm continuation,
+ Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ DbTreeStack* stack;
+ struct select_replace_context sc;
+ unsigned sz;
+ Eterm *hp;
+ Eterm lastkey;
+ Eterm end_condition;
+ Binary *mp;
+ Eterm key;
+ Eterm *tptr;
+ Eterm ereplaced;
+ Sint prev_replaced;
+
+
+#define RET_TO_BIF(Term, State) do { *ret = (Term); return State; } while(0);
+
+ /* Decode continuation. We know it's a tuple and everything else as
+ this is only called by ourselves */
+
+ /* continuation:
+ {Table, Lastkey, EndCondition, MatchProgBin, HowManyReplaced}*/
+
+ tptr = tuple_val(continuation);
+
+ if (arityval(*tptr) != 5)
+ erts_exit(ERTS_ERROR_EXIT,"Internal error in ets:select_replace/1");
+
+ lastkey = tptr[2];
+ end_condition = tptr[3];
+ mp = erts_db_get_match_prog_binary_unchecked(tptr[4]);
+
+ sc.p = p;
+ sc.mp = mp;
+ sc.end_condition = NIL;
+ sc.lastobj = NULL;
+ sc.max = 1000;
+ sc.keypos = tb->common.keypos;
+ if (is_big(tptr[5])) {
+ sc.replaced = big_to_uint32(tptr[5]);
+ } else {
+ sc.replaced = unsigned_val(tptr[5]);
+ }
+ prev_replaced = sc.replaced;
+
+ stack = get_any_stack(tb);
+ traverse_update_backwards(tb, stack, lastkey, &doit_select_replace, &sc);
+ release_stack(tb,stack);
+
+ // the more objects we've replaced, the more reductions we've consumed
+ BUMP_REDS(p, MIN(2000, (1000 - sc.max) + (sc.replaced - prev_replaced)));
+
+ if (sc.max > 0) {
+ RET_TO_BIF(erts_make_integer(sc.replaced,p), DB_ERROR_NONE);
+ }
+ key = GETKEY(tb, sc.lastobj);
+ if (end_condition != NIL &&
+ (cmp_partly_bound(end_condition,key) > 0)) {
+ /* done anyway */
+ RET_TO_BIF(make_small(sc.replaced),DB_ERROR_NONE);
+ }
+ /* Not done yet, let's trap. */
+ sz = size_object(key);
+ if (IS_USMALL(0, sc.replaced)) {
+ hp = HAlloc(p, sz + 6);
+ ereplaced = make_small(sc.replaced);
+ }
+ else {
+ hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + 6);
+ ereplaced = uint_to_big(sc.replaced, hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+ key = copy_struct(key, sz, &hp, &MSO(p));
+ continuation = TUPLE5
+ (hp,
+ tptr[1],
+ key,
+ tptr[3],
+ tptr[4],
+ ereplaced);
+ RET_TO_BIF(bif_trap1(&ets_select_replace_continue_exp, p, continuation),
+ DB_ERROR_NONE);
+
+#undef RET_TO_BIF
+}
+
+static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ DbTreeStack* stack;
+ struct select_replace_context sc;
+ struct mp_info mpi;
+ Eterm lastkey = THE_NON_VALUE;
+ Eterm key;
+ Eterm continuation;
+ unsigned sz;
+ Eterm *hp;
+ TreeDbTerm *this;
+ int errcode;
+ Eterm ereplaced;
+ Eterm mpb;
+
+
+#define RET_TO_BIF(Term,RetVal) do { \
+ if (mpi.mp != NULL) { \
+ erts_bin_free(mpi.mp); \
+ } \
+ *ret = (Term); \
+ return RetVal; \
+ } while(0)
+
+ mpi.mp = NULL;
+
+ sc.lastobj = NULL;
+ sc.p = p;
+ sc.tb = tb;
+ sc.max = 1000;
+ sc.end_condition = NIL;
+ sc.keypos = tb->common.keypos;
+ sc.replaced = 0;
+
+ if ((errcode = analyze_pattern(tb, pattern, db_match_keeps_key, &mpi)) != DB_ERROR_NONE) {
+ RET_TO_BIF(NIL,errcode);
+ }
+
+ if (!mpi.something_can_match) {
+ RET_TO_BIF(make_small(0),DB_ERROR_NONE);
+ /* can't possibly match anything */
+ }
+
+ sc.mp = mpi.mp;
+ sc.all_objects = mpi.all_objects;
+
+ stack = get_static_stack(tb);
+ if (!mpi.got_partial && mpi.some_limitation &&
+ CMP_EQ(mpi.least,mpi.most)) {
+ TreeDbTerm* term = *(mpi.save_term);
+ doit_select_replace(tb,mpi.save_term,&sc,0 /* dummy */);
+ if (stack != NULL) {
+ if (TOP_NODE(stack) == term)
+ // throw away potentially invalid reference
+ REPLACE_TOP_NODE(stack, *(mpi.save_term));
+ release_stack(tb, stack);
+ }
+ RET_TO_BIF(erts_make_integer(sc.replaced,p),DB_ERROR_NONE);
+ }
+
+ if (stack == NULL)
+ stack = get_any_stack(tb);
+
+ if (mpi.some_limitation) {
+ if ((this = find_next_from_pb_key(tb, stack, mpi.most)) != NULL) {
+ lastkey = GETKEY(tb, this->dbterm.tpl);
+ }
+ sc.end_condition = mpi.least;
+ }
+
+ traverse_update_backwards(tb, stack, lastkey, &doit_select_replace, &sc);
+ release_stack(tb,stack);
+ // the more objects we've replaced, the more reductions we've consumed
+ BUMP_REDS(p, MIN(2000, (1000 - sc.max) + sc.replaced));
+ if (sc.max > 0) {
+ RET_TO_BIF(erts_make_integer(sc.replaced,p),DB_ERROR_NONE);
+ }
+
+ key = GETKEY(tb, sc.lastobj);
+ sz = size_object(key);
+ if (IS_USMALL(0, sc.replaced)) {
+ hp = HAlloc(p, sz + ERTS_MAGIC_REF_THING_SIZE + 6);
+ ereplaced = make_small(sc.replaced);
+ }
+ else {
+ hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + ERTS_MAGIC_REF_THING_SIZE + 6);
+ ereplaced = uint_to_big(sc.replaced, hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+ key = copy_struct(key, sz, &hp, &MSO(p));
+ if (mpi.all_objects)
+ (mpi.mp)->intern.flags |= BIN_FLAG_ALL_OBJECTS;
+ mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
+
+ continuation = TUPLE5
+ (hp,
+ tid,
+ key,
+ sc.end_condition, /* From the match program, needn't be copied */
+ mpb,
+ ereplaced);
+
+ /* Don't free mpi.mp, so don't use macro */
+ *ret = bif_trap1(&ets_select_replace_continue_exp, p, continuation);
+ return DB_ERROR_NONE;
+
+#undef RET_TO_BIF
+
+}
+
+static int db_take_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ TreeDbTerm *this;
+
+ *ret = NIL;
+ this = linkout_tree(tb, key);
+ if (this) {
+ Eterm copy, *hp, *hend;
+
+ hp = HAlloc(p, this->dbterm.size + 2);
+ hend = hp + this->dbterm.size + 2;
+ copy = db_copy_object_from_ets(&tb->common,
+ &this->dbterm, &hp, &MSO(p));
+ *ret = CONS(hp, copy, NIL);
+ hp += 2;
+ HRelease(p, hend, hp);
+ free_term(tb, this);
+ }
+ return DB_ERROR_NONE;
+}
+
/*
** Other interface routines (not directly coupled to one bif)
*/
/* Display tree contents (for dump) */
-static void db_print_tree(int to, void *to_arg,
+static void db_print_tree(fmtfn_t to, void *to_arg,
int show,
DbTable *tbl)
{
@@ -1749,38 +1997,37 @@ static void db_print_tree(int to, void *to_arg,
/* release all memory occupied by a single table */
static int db_free_table_tree(DbTable *tbl)
{
- while (!db_free_table_continue_tree(tbl))
+ while (db_free_table_continue_tree(tbl, ERTS_SWORD_MAX) < 0)
;
return 1;
}
-static int db_free_table_continue_tree(DbTable *tbl)
+static SWord db_free_table_continue_tree(DbTable *tbl, SWord reds)
{
DbTableTree *tb = &tbl->tree;
- int result;
if (!tb->deletion) {
tb->static_stack.pos = 0;
tb->deletion = 1;
PUSH_NODE(&tb->static_stack, tb->root);
}
- result = do_free_tree_cont(tb, DELETE_RECORD_LIMIT);
- if (result) { /* Completely done. */
+ reds = do_free_tree_continue(tb, reds);
+ if (reds >= 0) { /* Completely done. */
erts_db_free(ERTS_ALC_T_DB_STK,
(DbTable *) tb,
(void *) tb->static_stack.array,
sizeof(TreeDbTerm *) * STACK_NEED);
- ASSERT(erts_smp_atomic_read_nob(&tb->common.memory_size)
+ ASSERT(erts_atomic_read_nob(&tb->common.memory_size)
== sizeof(DbTable));
}
- return result;
+ return reds;
}
static int db_delete_all_objects_tree(Process* p, DbTable* tbl)
{
db_free_table_tree(tbl);
db_create_tree(p, tbl);
- erts_smp_atomic_set_nob(&tbl->tree.common.nitems, 0);
+ erts_atomic_set_nob(&tbl->tree.common.nitems, 0);
return 0;
}
@@ -1817,9 +2064,7 @@ do_db_tree_foreach_offheap(TreeDbTerm *tdbt,
do_db_tree_foreach_offheap(tdbt->right, func, arg);
}
-static TreeDbTerm *linkout_tree(DbTableTree *tb,
- Eterm key, Eterm* key_base)
-{
+static TreeDbTerm *linkout_tree(DbTableTree *tb, Eterm key) {
TreeDbTerm **tstack[STACK_NEED];
int tpos = 0;
int dstack[STACK_NEED+1];
@@ -1841,7 +2086,7 @@ static TreeDbTerm *linkout_tree(DbTableTree *tb,
for (;;) {
if (!*this) { /* Failure */
return NULL;
- } else if ((c = cmp_key(tb, key, key_base, *this)) < 0) {
+ } else if ((c = cmp_key(tb, key, *this)) < 0) {
dstack[dpos++] = DIR_LEFT;
tstack[tpos++] = this;
this = &((*this)->left);
@@ -1862,7 +2107,7 @@ static TreeDbTerm *linkout_tree(DbTableTree *tb,
tstack[tpos++] = this;
state = delsub(this);
}
- erts_smp_atomic_dec_nob(&tb->common.nitems);
+ erts_atomic_dec_nob(&tb->common.nitems);
break;
}
}
@@ -1905,7 +2150,7 @@ static TreeDbTerm *linkout_object_tree(DbTableTree *tb,
for (;;) {
if (!*this) { /* Failure */
return NULL;
- } else if ((c = cmp_key(tb,key,NULL,*this)) < 0) {
+ } else if ((c = cmp_key(tb,key,*this)) < 0) {
dstack[dpos++] = DIR_LEFT;
tstack[tpos++] = this;
this = &((*this)->left);
@@ -1929,7 +2174,7 @@ static TreeDbTerm *linkout_object_tree(DbTableTree *tb,
tstack[tpos++] = this;
state = delsub(this);
}
- erts_smp_atomic_dec_nob(&tb->common.nitems);
+ erts_atomic_dec_nob(&tb->common.nitems);
break;
}
}
@@ -1948,8 +2193,9 @@ static TreeDbTerm *linkout_object_tree(DbTableTree *tb,
** For the select functions, analyzes the pattern and determines which
** part of the tree should be searched. Also compiles the match program
*/
-static int analyze_pattern(DbTableTree *tb, Eterm pattern,
- struct mp_info *mpi)
+static int analyze_pattern(DbTableTree *tb, Eterm pattern,
+ extra_match_validator_t extra_validator, /* Optional callback */
+ struct mp_info *mpi)
{
Eterm lst, tpl, ttpl;
Eterm *matches,*guards, *bodies;
@@ -1987,7 +2233,10 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern,
i = 0;
for(lst = pattern; is_list(lst); lst = CDR(list_val(lst))) {
- Eterm body;
+ Eterm match;
+ Eterm guard;
+ Eterm body;
+
ttpl = CAR(list_val(lst));
if (!is_tuple(ttpl)) {
if (buff != sbuff) {
@@ -2002,9 +2251,17 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern,
}
return DB_ERROR_BADPARAM;
}
- matches[i] = tpl = ptpl[1];
- guards[i] = ptpl[2];
+ matches[i] = match = tpl = ptpl[1];
+ guards[i] = guard = ptpl[2];
bodies[i] = body = ptpl[3];
+
+ if(extra_validator != NULL && !extra_validator(tb->common.keypos, match, guard, body)) {
+ if (buff != sbuff) {
+ erts_free(ERTS_ALC_T_DB_TMP, buff);
+ }
+ return DB_ERROR_BADPARAM;
+ }
+
if (!is_list(body) || CDR(list_val(body)) != NIL ||
CAR(list_val(body)) != am_DollarUnderscore) {
mpi->all_objects = 0;
@@ -2012,7 +2269,7 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern,
++i;
partly_bound = NIL;
- res = key_given(tb, tpl, &mpi->save_term, &partly_bound);
+ res = key_given(tb, tpl, &(mpi->save_term), &partly_bound);
if ( res >= 0 ) { /* Can match something */
key = 0;
mpi->something_can_match = 1;
@@ -2058,7 +2315,7 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern,
return DB_ERROR_NONE;
}
-static int do_free_tree_cont(DbTableTree *tb, int num_left)
+static SWord do_free_tree_continue(DbTableTree *tb, SWord reds)
{
TreeDbTerm *root;
TreeDbTerm *p;
@@ -2077,15 +2334,14 @@ static int do_free_tree_cont(DbTableTree *tb, int num_left)
root = p;
} else {
free_term(tb, root);
- if (--num_left > 0) {
- break;
- } else {
- return 0; /* Done enough for now */
- }
+ if (--reds < 0) {
+ return reds; /* Done enough for now */
+ }
+ break;
}
}
}
- return 1;
+ return reds;
}
/*
@@ -2291,15 +2547,13 @@ done:
* Find next and previous in sort order
*/
-static TreeDbTerm *find_next(DbTableTree *tb, DbTreeStack* stack,
- Eterm key, Eterm* key_base)
-{
+static TreeDbTerm *find_next(DbTableTree *tb, DbTreeStack* stack, Eterm key) {
TreeDbTerm *this;
TreeDbTerm *tmp;
Sint c;
if(( this = TOP_NODE(stack)) != NULL) {
- if (!cmp_key_eq(tb,key,key_base,this)) {
+ if (!cmp_key_eq(tb,key,this)) {
/* Start from the beginning */
stack->pos = stack->slot = 0;
}
@@ -2309,7 +2563,7 @@ static TreeDbTerm *find_next(DbTableTree *tb, DbTreeStack* stack,
return NULL;
for (;;) {
PUSH_NODE(stack, this);
- if (( c = cmp_key(tb,key,key_base,this) ) > 0) {
+ if (( c = cmp_key(tb,key,this) ) > 0) {
if (this->right == NULL) /* We are at the previos
and the element does
not exist */
@@ -2349,15 +2603,13 @@ static TreeDbTerm *find_next(DbTableTree *tb, DbTreeStack* stack,
return this;
}
-static TreeDbTerm *find_prev(DbTableTree *tb, DbTreeStack* stack,
- Eterm key, Eterm* key_base)
-{
+static TreeDbTerm *find_prev(DbTableTree *tb, DbTreeStack* stack, Eterm key) {
TreeDbTerm *this;
TreeDbTerm *tmp;
Sint c;
if(( this = TOP_NODE(stack)) != NULL) {
- if (!cmp_key_eq(tb,key,key_base,this)) {
+ if (!cmp_key_eq(tb,key,this)) {
/* Start from the beginning */
stack->pos = stack->slot = 0;
}
@@ -2367,7 +2619,7 @@ static TreeDbTerm *find_prev(DbTableTree *tb, DbTreeStack* stack,
return NULL;
for (;;) {
PUSH_NODE(stack, this);
- if (( c = cmp_key(tb,key,key_base,this) ) < 0) {
+ if (( c = cmp_key(tb,key,this) ) < 0) {
if (this->left == NULL) /* We are at the next
and the element does
not exist */
@@ -2420,8 +2672,7 @@ static TreeDbTerm *find_next_from_pb_key(DbTableTree *tb, DbTreeStack* stack,
return NULL;
for (;;) {
PUSH_NODE(stack, this);
- if (( c = cmp_partly_bound(key,GETKEY(tb, this->dbterm.tpl),
- this->dbterm.tpl) ) >= 0) {
+ if (( c = cmp_partly_bound(key,GETKEY(tb, this->dbterm.tpl))) >= 0) {
if (this->right == NULL) {
do {
tmp = POP_NODE(stack);
@@ -2454,8 +2705,7 @@ static TreeDbTerm *find_prev_from_pb_key(DbTableTree *tb, DbTreeStack* stack,
return NULL;
for (;;) {
PUSH_NODE(stack, this);
- if (( c = cmp_partly_bound(key,GETKEY(tb, this->dbterm.tpl),
- this->dbterm.tpl) ) <= 0) {
+ if (( c = cmp_partly_bound(key,GETKEY(tb, this->dbterm.tpl))) <= 0) {
if (this->left == NULL) {
do {
tmp = POP_NODE(stack);
@@ -2486,10 +2736,10 @@ static TreeDbTerm *find_node(DbTableTree *tb, Eterm key)
DbTreeStack* stack = get_static_stack(tb);
if(!stack || EMPTY_NODE(stack)
- || !cmp_key_eq(tb, key, NULL, (this=TOP_NODE(stack)))) {
+ || !cmp_key_eq(tb, key, (this=TOP_NODE(stack)))) {
this = tb->root;
- while (this != NULL && (res = cmp_key(tb,key,NULL,this)) != 0) {
+ while (this != NULL && (res = cmp_key(tb,key,this)) != 0) {
if (res < 0)
this = this->left;
else
@@ -2511,7 +2761,7 @@ static TreeDbTerm **find_node2(DbTableTree *tb, Eterm key)
Sint res;
this = &tb->root;
- while ((*this) != NULL && (res = cmp_key(tb, key, NULL, *this)) != 0) {
+ while ((*this) != NULL && (res = cmp_key(tb, key, *this)) != 0) {
if (res < 0)
this = &((*this)->left);
else
@@ -2522,33 +2772,115 @@ static TreeDbTerm **find_node2(DbTableTree *tb, Eterm key)
return this;
}
-static int db_lookup_dbterm_tree(DbTable *tbl, Eterm key, DbUpdateHandle* handle)
+/*
+ * Find node and return the address of the node pointer (NULL if not found)
+ * Tries to reuse the existing stack for performance.
+ */
+
+static TreeDbTerm **find_ptr(DbTableTree *tb, DbTreeStack *stack, TreeDbTerm *this) {
+ Eterm key = GETKEY(tb, this->dbterm.tpl);
+ TreeDbTerm *tmp;
+ TreeDbTerm *parent;
+ Sint c;
+
+ if(( tmp = TOP_NODE(stack)) != NULL) {
+ if (!cmp_key_eq(tb,key,tmp)) {
+ /* Start from the beginning */
+ stack->pos = stack->slot = 0;
+ }
+ }
+ if (EMPTY_NODE(stack)) { /* Have to rebuild the stack */
+ if (( tmp = tb->root ) == NULL)
+ return NULL;
+ for (;;) {
+ PUSH_NODE(stack, tmp);
+ if (( c = cmp_key(tb,key,tmp) ) < 0) {
+ if (tmp->left == NULL) /* We are at the next
+ and the element does
+ not exist */
+ break;
+ else
+ tmp = tmp->left;
+ } else if (c > 0) {
+ if (tmp->right == NULL) /* Done */
+ return NULL;
+ else
+ tmp = tmp->right;
+ } else
+ break;
+ }
+ }
+
+ if (TOP_NODE(stack) != this)
+ return NULL;
+
+ parent = TOPN_NODE(stack, 1);
+ if (parent == NULL)
+ return ((this != tb->root) ? NULL : &(tb->root));
+ if (parent->left == this)
+ return &(parent->left);
+ if (parent->right == this)
+ return &(parent->right);
+ return NULL;
+}
+
+static int
+db_lookup_dbterm_tree(Process *p, DbTable *tbl, Eterm key, Eterm obj,
+ DbUpdateHandle* handle)
{
DbTableTree *tb = &tbl->tree;
TreeDbTerm **pp = find_node2(tb, key);
-
- if (pp == NULL) return 0;
+ int flags = 0;
+
+ if (pp == NULL) {
+ if (obj == THE_NON_VALUE) {
+ return 0;
+ } else {
+ Eterm *objp = tuple_val(obj);
+ int arity = arityval(*objp);
+ Eterm *htop, *hend;
+
+ ASSERT(arity >= tb->common.keypos);
+ htop = HAlloc(p, arity + 1);
+ hend = htop + arity + 1;
+ sys_memcpy(htop, objp, sizeof(Eterm) * (arity + 1));
+ htop[tb->common.keypos] = key;
+ obj = make_tuple(htop);
+
+ if (db_put_tree(tbl, obj, 1) != DB_ERROR_NONE) {
+ return 0;
+ }
+
+ pp = find_node2(tb, key);
+ ASSERT(pp != NULL);
+ HRelease(p, hend, htop);
+ flags |= DB_NEW_OBJECT;
+ }
+ }
handle->tb = tbl;
handle->dbterm = &(*pp)->dbterm;
- handle->mustResize = 0;
+ handle->flags = flags;
handle->bp = (void**) pp;
handle->new_size = (*pp)->dbterm.size;
-#if HALFWORD_HEAP
- handle->abs_vec = NULL;
-#endif
return 1;
}
-static void db_finalize_dbterm_tree(DbUpdateHandle* handle)
+static void
+db_finalize_dbterm_tree(int cret, DbUpdateHandle *handle)
{
- if (handle->mustResize) {
- TreeDbTerm* oldp = (TreeDbTerm*) *handle->bp;
+ DbTable *tbl = handle->tb;
+ DbTableTree *tb = &tbl->tree;
+ TreeDbTerm *bp = (TreeDbTerm *) *handle->bp;
+ if (handle->flags & DB_NEW_OBJECT && cret != DB_ERROR_NONE) {
+ Eterm ret;
+ db_erase_tree(tbl, GETKEY(tb, bp->dbterm.tpl), &ret);
+ } else if (handle->flags & DB_MUST_RESIZE) {
db_finalize_resize(handle, offsetof(TreeDbTerm,dbterm));
- reset_static_stack(&handle->tb->tree);
+ reset_static_stack(tb);
- free_term(&handle->tb->tree, oldp);
+ free_term(tb, bp);
}
#ifdef DEBUG
handle->dbterm = 0;
@@ -2561,7 +2893,7 @@ static void db_finalize_dbterm_tree(DbUpdateHandle* handle)
*/
static void traverse_backwards(DbTableTree *tb,
DbTreeStack* stack,
- Eterm lastkey, Eterm* lk_base,
+ Eterm lastkey,
int (*doit)(DbTableTree *,
TreeDbTerm *,
void *,
@@ -2580,16 +2912,15 @@ static void traverse_backwards(DbTableTree *tb,
this = this->right;
}
this = TOP_NODE(stack);
- next = find_prev(tb, stack, GETKEY(tb, this->dbterm.tpl),
- this->dbterm.tpl);
+ next = find_prev(tb, stack, GETKEY(tb, this->dbterm.tpl));
if (!((*doit)(tb, this, context, 0)))
return;
} else {
- next = find_prev(tb, stack, lastkey, lk_base);
+ next = find_prev(tb, stack, lastkey);
}
while ((this = next) != NULL) {
- next = find_prev(tb, stack, GETKEY(tb, this->dbterm.tpl), this->dbterm.tpl);
+ next = find_prev(tb, stack, GETKEY(tb, this->dbterm.tpl));
if (!((*doit)(tb, this, context, 0)))
return;
}
@@ -2600,7 +2931,7 @@ static void traverse_backwards(DbTableTree *tb,
*/
static void traverse_forward(DbTableTree *tb,
DbTreeStack* stack,
- Eterm lastkey, Eterm* lk_base,
+ Eterm lastkey,
int (*doit)(DbTableTree *,
TreeDbTerm *,
void *,
@@ -2619,28 +2950,75 @@ static void traverse_forward(DbTableTree *tb,
this = this->left;
}
this = TOP_NODE(stack);
- next = find_next(tb, stack, GETKEY(tb, this->dbterm.tpl), this->dbterm.tpl);
+ next = find_next(tb, stack, GETKEY(tb, this->dbterm.tpl));
if (!((*doit)(tb, this, context, 1)))
return;
} else {
- next = find_next(tb, stack, lastkey, lk_base);
+ next = find_next(tb, stack, lastkey);
}
while ((this = next) != NULL) {
- next = find_next(tb, stack, GETKEY(tb, this->dbterm.tpl), this->dbterm.tpl);
+ next = find_next(tb, stack, GETKEY(tb, this->dbterm.tpl));
if (!((*doit)(tb, this, context, 1)))
return;
}
}
/*
+ * Traverse the tree with an update callback function, used by db_select_replace
+ */
+static void traverse_update_backwards(DbTableTree *tb,
+ DbTreeStack* stack,
+ Eterm lastkey,
+ int (*doit)(DbTableTree*,
+ TreeDbTerm**,
+ void*,
+ int),
+ void* context)
+{
+ int res;
+ TreeDbTerm *this, *next, **this_ptr;
+
+ if (lastkey == THE_NON_VALUE) {
+ stack->pos = stack->slot = 0;
+ if (( this = tb->root ) == NULL) {
+ return;
+ }
+ while (this != NULL) {
+ PUSH_NODE(stack, this);
+ this = this->right;
+ }
+ this = TOP_NODE(stack);
+ this_ptr = find_ptr(tb, stack, this);
+ ASSERT(this_ptr != NULL);
+ res = (*doit)(tb, this_ptr, context, 0);
+ REPLACE_TOP_NODE(stack, *this_ptr);
+ next = find_prev(tb, stack, GETKEY(tb, (*this_ptr)->dbterm.tpl));
+ if (!res)
+ return;
+ } else {
+ next = find_prev(tb, stack, lastkey);
+ }
+
+ while ((this = next) != NULL) {
+ this_ptr = find_ptr(tb, stack, this);
+ ASSERT(this_ptr != NULL);
+ res = (*doit)(tb, this_ptr, context, 0);
+ REPLACE_TOP_NODE(stack, *this_ptr);
+ next = find_prev(tb, stack, GETKEY(tb, (*this_ptr)->dbterm.tpl));
+ if (!res)
+ return;
+ }
+}
+
+/*
* Returns 0 if not given 1 if given and -1 on no possible match
* if key is given; *ret is set to point to the object concerned.
*/
-static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm **ret,
+static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm ***ret,
Eterm *partly_bound)
{
- TreeDbTerm *this;
+ TreeDbTerm **this;
Eterm key;
ASSERT(ret != NULL);
@@ -2650,13 +3028,13 @@ static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm **ret,
if (is_non_value(key))
return -1; /* can't possibly match anything */
if (!db_has_variable(key)) { /* Bound key */
- if (( this = find_node(tb, key) ) == NULL) {
+ if (( this = find_node2(tb, key) ) == NULL) {
return -1;
}
*ret = this;
return 1;
} else if (partly_bound != NULL && key != am_Underscore &&
- db_is_variable(key) < 0)
+ db_is_variable(key) < 0 && !db_has_map(key))
*partly_bound = key;
return 0;
@@ -2664,7 +3042,7 @@ static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm **ret,
-static Sint do_cmp_partly_bound(Eterm a, Eterm b, Eterm* b_base, int *done)
+static Sint do_cmp_partly_bound(Eterm a, Eterm b, int *done)
{
Eterm* aa;
Eterm* bb;
@@ -2678,44 +3056,44 @@ static Sint do_cmp_partly_bound(Eterm a, Eterm b, Eterm* b_base, int *done)
*done = 1;
return 0;
}
- if (is_same(a,NULL,b,b_base))
+ if (is_same(a,b))
return 0;
switch (a & _TAG_PRIMARY_MASK) {
case TAG_PRIMARY_LIST:
if (!is_list(b)) {
- return cmp_rel(a,NULL,b,b_base);
+ return CMP(a,b);
}
aa = list_val(a);
- bb = list_val_rel(b,b_base);
+ bb = list_val(b);
while (1) {
- if ((j = do_cmp_partly_bound(*aa++, *bb++, b_base, done)) != 0 || *done)
+ if ((j = do_cmp_partly_bound(*aa++, *bb++, done)) != 0 || *done)
return j;
- if (is_same(*aa, NULL, *bb, b_base))
+ if (is_same(*aa, *bb))
return 0;
if (is_not_list(*aa) || is_not_list(*bb))
- return do_cmp_partly_bound(*aa, *bb, b_base, done);
+ return do_cmp_partly_bound(*aa, *bb, done);
aa = list_val(*aa);
- bb = list_val_rel(*bb,b_base);
+ bb = list_val(*bb);
}
case TAG_PRIMARY_BOXED:
if ((b & _TAG_PRIMARY_MASK) != TAG_PRIMARY_BOXED) {
- return cmp_rel(a,NULL,b,b_base);
+ return CMP(a,b);
}
a_hdr = ((*boxed_val(a)) & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE;
- b_hdr = ((*boxed_val_rel(b,b_base)) & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE;
+ b_hdr = ((*boxed_val(b)) & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE;
if (a_hdr != b_hdr) {
- return cmp_rel(a, NULL, b, b_base);
+ return CMP(a,b);
}
if (a_hdr == (_TAG_HEADER_ARITYVAL >> _TAG_PRIMARY_SIZE)) {
aa = tuple_val(a);
- bb = tuple_val_rel(b, b_base);
+ bb = tuple_val(b);
/* compare the arities */
i = arityval(*aa); /* get the arity*/
if (i < arityval(*bb)) return(-1);
if (i > arityval(*bb)) return(1);
while (i--) {
- if ((j = do_cmp_partly_bound(*++aa, *++bb, b_base, done)) != 0
+ if ((j = do_cmp_partly_bound(*++aa, *++bb, done)) != 0
|| *done)
return j;
}
@@ -2723,14 +3101,13 @@ static Sint do_cmp_partly_bound(Eterm a, Eterm b, Eterm* b_base, int *done)
}
/* Drop through */
default:
- return cmp_rel(a, NULL, b, b_base);
+ return CMP(a,b);
}
}
-static Sint cmp_partly_bound(Eterm partly_bound_key, Eterm bound_key, Eterm* bk_base)
-{
+static Sint cmp_partly_bound(Eterm partly_bound_key, Eterm bound_key) {
int done = 0;
- Sint ret = do_cmp_partly_bound(partly_bound_key, bound_key, bk_base, &done);
+ Sint ret = do_cmp_partly_bound(partly_bound_key, bound_key, &done);
#ifdef HARDDEBUG
erts_fprintf(stderr,"\ncmp_partly_bound: %T", partly_bound_key);
if (ret < 0)
@@ -2739,7 +3116,7 @@ static Sint cmp_partly_bound(Eterm partly_bound_key, Eterm bound_key, Eterm* bk_
erts_fprintf(stderr," > ");
else
erts_fprintf(stderr," == ");
- erts_fprintf(stderr,"%R\n", bound_key, bk_base);
+ erts_fprintf(stderr,"%T\n", bound_key);
#endif
return ret;
}
@@ -2956,12 +3333,10 @@ static int doit_select(DbTableTree *tb, TreeDbTerm *this, void *ptr,
if (sc->end_condition != NIL &&
((forward &&
cmp_partly_bound(sc->end_condition,
- GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl),
- this->dbterm.tpl) < 0) ||
+ GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl)) < 0) ||
(!forward &&
cmp_partly_bound(sc->end_condition,
- GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl),
- this->dbterm.tpl) > 0))) {
+ GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl)) > 0))) {
return 0;
}
ret = db_match_dbterm(&tb->common,sc->p,sc->mp,sc->all_objects,
@@ -2993,8 +3368,7 @@ static int doit_select_count(DbTableTree *tb, TreeDbTerm *this, void *ptr,
/* Always backwards traversing */
if (sc->end_condition != NIL &&
(cmp_partly_bound(sc->end_condition,
- GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl),
- this->dbterm.tpl) > 0)) {
+ GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl)) > 0)) {
return 0;
}
ret = db_match_dbterm(&tb->common, sc->p, sc->mp, 0,
@@ -3020,12 +3394,10 @@ static int doit_select_chunk(DbTableTree *tb, TreeDbTerm *this, void *ptr,
if (sc->end_condition != NIL &&
((forward &&
cmp_partly_bound(sc->end_condition,
- GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl),
- this->dbterm.tpl) < 0) ||
+ GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl)) < 0) ||
(!forward &&
cmp_partly_bound(sc->end_condition,
- GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl),
- this->dbterm.tpl) > 0))) {
+ GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl)) > 0))) {
return 0;
}
@@ -3063,14 +3435,13 @@ static int doit_select_delete(DbTableTree *tb, TreeDbTerm *this, void *ptr,
if (sc->end_condition != NIL &&
cmp_partly_bound(sc->end_condition,
- GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl),
- this->dbterm.tpl) > 0)
+ GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl)) > 0)
return 0;
ret = db_match_dbterm(&tb->common, sc->p, sc->mp, 0,
&this->dbterm, NULL, 0);
if (ret == am_true) {
key = GETKEY(sc->tb, this->dbterm.tpl);
- linkout_tree(sc->tb, key, this->dbterm.tpl);
+ linkout_tree(sc->tb, key);
sc->erase_lastterm = 1;
++sc->accum;
}
@@ -3080,6 +3451,46 @@ static int doit_select_delete(DbTableTree *tb, TreeDbTerm *this, void *ptr,
return 1;
}
+static int doit_select_replace(DbTableTree *tb, TreeDbTerm **this, void *ptr,
+ int forward)
+{
+ struct select_replace_context *sc = (struct select_replace_context *) ptr;
+ Eterm ret;
+
+ sc->lastobj = (*this)->dbterm.tpl;
+
+ /* Always backwards traversing */
+ if (sc->end_condition != NIL &&
+ (cmp_partly_bound(sc->end_condition,
+ GETKEY_WITH_POS(sc->keypos, (*this)->dbterm.tpl)) > 0)) {
+ return 0;
+ }
+ ret = db_match_dbterm(&tb->common, sc->p, sc->mp, 0,
+ &(*this)->dbterm, NULL, 0);
+
+ if (is_value(ret)) {
+ TreeDbTerm* new;
+ TreeDbTerm* old = *this;
+#ifdef DEBUG
+ Eterm key = db_getkey(tb->common.keypos, ret);
+ ASSERT(is_value(key));
+ ASSERT(cmp_key(tb, key, old) == 0);
+#endif
+ new = new_dbterm(tb, ret);
+ new->left = old->left;
+ new->right = old->right;
+ new->balance = old->balance;
+ sc->lastobj = new->dbterm.tpl;
+ *this = new;
+ free_term(tb, old);
+ ++(sc->replaced);
+ }
+ if (--(sc->max) <= 0) {
+ return 0;
+ }
+ return 1;
+}
+
#ifdef TREE_DEBUG
static void do_dump_tree2(DbTableTree* tb, int to, void *to_arg, int show,
TreeDbTerm *t, int offset)
@@ -3096,11 +3507,10 @@ static void do_dump_tree2(DbTableTree* tb, int to, void *to_arg, int show,
}
else {
prefix = "";
- term = make_tuple_rel(t->dbterm.tpl,t->dbterm.tpl);
+ term = make_tuple(t->dbterm.tpl);
}
- erts_print(to, to_arg, "%*s%s%R (addr = %p, bal = %d)\n",
- offset, "", prefix, term, t->dbterm.tpl,
- t, t->balance);
+ erts_print(to, to_arg, "%*s%s%T (addr = %p, bal = %d)\n",
+ offset, "", prefix, term, t, t->balance);
}
do_dump_tree2(tb, to, to_arg, show, t->left, offset + 4);
}
@@ -3109,6 +3519,9 @@ static void do_dump_tree2(DbTableTree* tb, int to, void *to_arg, int show,
#ifdef HARDDEBUG
+/*
+ * No called, but kept as it might come to use
+ */
void db_check_table_tree(DbTable *tbl)
{
DbTableTree *tb = &tbl->tree;
diff --git a/erts/emulator/beam/erl_db_tree.h b/erts/emulator/beam/erl_db_tree.h
index 7bc235e135..dc1b93d410 100644
--- a/erts/emulator/beam/erl_db_tree.h
+++ b/erts/emulator/beam/erl_db_tree.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -40,7 +41,7 @@ typedef struct db_table_tree {
/* Tree-specific fields */
TreeDbTerm *root; /* The tree root */
Uint deletion; /* Being deleted */
- erts_smp_atomic_t is_stack_busy;
+ erts_atomic_t is_stack_busy;
DbTreeStack static_stack;
} DbTableTree;
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index 3927615e04..e017b9552b 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2014. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -122,6 +123,9 @@ do { \
#define TermWords(t) (((t) / (sizeof(UWord)/sizeof(Eterm))) + !!((t) % (sizeof(UWord)/sizeof(Eterm))))
+#define add_dmc_err(EINFO, STR, VAR, TERM, SEV) \
+ vadd_dmc_err(EINFO, SEV, VAR, STR, TERM)
+
static ERTS_INLINE Process *
get_proc(Process *cp, Uint32 cp_locks, Eterm id, Uint32 id_locks)
@@ -134,21 +138,22 @@ get_proc(Process *cp, Uint32 cp_locks, Eterm id, Uint32 id_locks)
static Eterm
-set_tracee_flags(Process *tracee_p, Eterm tracer, Uint d_flags, Uint e_flags) {
+set_tracee_flags(Process *tracee_p, ErtsTracer tracer,
+ Uint d_flags, Uint e_flags) {
Eterm ret;
Uint flags;
- if (tracer == NIL) {
+ if (ERTS_TRACER_IS_NIL(tracer)) {
flags = ERTS_TRACE_FLAGS(tracee_p) & ~TRACEE_FLAGS;
} else {
flags = ((ERTS_TRACE_FLAGS(tracee_p) & ~d_flags) | e_flags);
- if (! flags) tracer = NIL;
+ if (! flags) tracer = erts_tracer_nil;
}
- ret = ((ERTS_TRACER_PROC(tracee_p) != tracer
+ ret = ((!ERTS_TRACER_COMPARE(ERTS_TRACER(tracee_p),tracer)
|| ERTS_TRACE_FLAGS(tracee_p) != flags)
? am_true
: am_false);
- ERTS_TRACER_PROC(tracee_p) = tracer;
+ erts_tracer_replace(&tracee_p->common, tracer);
ERTS_TRACE_FLAGS(tracee_p) = flags;
return ret;
}
@@ -162,46 +167,18 @@ set_tracee_flags(Process *tracee_p, Eterm tracer, Uint d_flags, Uint e_flags) {
** returns fail_term on failure. Fails if tracer pid or port is invalid.
*/
static Eterm
-set_match_trace(Process *tracee_p, Eterm fail_term, Eterm tracer,
+set_match_trace(Process *tracee_p, Eterm fail_term, ErtsTracer tracer,
Uint d_flags, Uint e_flags) {
- Eterm ret = fail_term;
- Process *tracer_p;
-
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCKS_ALL ==
- erts_proc_lc_my_proc_locks(tracee_p));
-
- if (is_internal_pid(tracer)
- && (tracer_p =
- erts_pid2proc(tracee_p, ERTS_PROC_LOCKS_ALL,
- tracer, ERTS_PROC_LOCKS_ALL))) {
- if (tracee_p != tracer_p) {
- ret = set_tracee_flags(tracee_p, tracer, d_flags, e_flags);
- ERTS_TRACE_FLAGS(tracer_p) |= (ERTS_TRACE_FLAGS(tracee_p)
- ? F_TRACER
- : 0);
- erts_smp_proc_unlock(tracer_p, ERTS_PROC_LOCKS_ALL);
- }
- } else if (is_internal_port(tracer)) {
- Port *tracer_port =
- erts_id2port_sflgs(tracer,
- tracee_p,
- ERTS_PROC_LOCKS_ALL,
- ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
- if (tracer_port) {
- ret = set_tracee_flags(tracee_p, tracer, d_flags, e_flags);
- erts_port_release(tracer_port);
- }
- } else {
- ASSERT(is_nil(tracer));
- ret = set_tracee_flags(tracee_p, tracer, d_flags, e_flags);
- }
- return ret;
-}
-
-/* Type checking... */
+ ERTS_LC_ASSERT(
+ ERTS_PROC_LOCKS_ALL == erts_proc_lc_my_proc_locks(tracee_p)
+ || erts_thr_progress_is_blocking());
-#define BOXED_IS_TUPLE(Boxed) is_arity_value(*boxed_val((Boxed)))
+ if (ERTS_TRACER_IS_NIL(tracer)
+ || erts_is_tracer_enabled(tracer, &tracee_p->common))
+ return set_tracee_flags(tracee_p, tracer, d_flags, e_flags);
+ return fail_term;
+}
/*
**
@@ -218,7 +195,9 @@ typedef enum {
matchTuple,
matchPushT,
matchPushL,
+ matchPushM,
matchPop,
+ matchSwap,
matchBind,
matchCmp,
matchEqBin,
@@ -227,21 +206,21 @@ typedef enum {
matchEqRef,
matchEq,
matchList,
+ matchMap,
+ matchKey,
matchSkip,
matchPushC,
matchConsA, /* Car is below Cdr */
matchConsB, /* Cdr is below Car (unusual) */
matchMkTuple,
+ matchMkFlatMap,
+ matchMkHashMap,
matchCall0,
matchCall1,
matchCall2,
matchCall3,
matchPushV,
-#if HALFWORD_HEAP
- matchPushVGuard, /* First guard-only variable reference */
-#endif
- matchPushVResult, /* First variable reference in result, or (if HALFWORD)
- in guard if also referenced in result */
+ matchPushVResult, /* First variable reference in result */
matchPushExpr, /* Push the whole expression we're matching ('$_') */
matchPushArrayAsList, /* Only when parameter is an Array and
not an erlang term (DCOMP_TRACE) */
@@ -308,9 +287,6 @@ DMC_DECLARE_STACK_TYPE(unsigned);
typedef struct DMCVariable {
int is_bound;
int is_in_body;
-#if HALFWORD_HEAP
- int first_guard_label; /* to maybe change from PushVGuard to PushVResult */
-#endif
} DMCVariable;
typedef struct DMCHeap {
@@ -372,7 +348,6 @@ typedef struct MatchVariable {
Eterm term;
#ifdef DEBUG
Process* proc;
- Eterm* base;
#endif
} MatchVariable;
@@ -386,11 +361,7 @@ typedef struct {
} ErtsMatchPseudoProcess;
-#ifdef ERTS_SMP
-static erts_smp_tsd_key_t match_pseudo_process_key;
-#else
-static ErtsMatchPseudoProcess *match_pseudo_process;
-#endif
+static erts_tsd_key_t match_pseudo_process_key;
static ERTS_INLINE void
cleanup_match_pseudo_process(ErtsMatchPseudoProcess *mpsp, int keep_heap)
@@ -413,7 +384,7 @@ cleanup_match_pseudo_process(ErtsMatchPseudoProcess *mpsp, int keep_heap)
else {
int i;
for (i = 0; i < ERTS_DEFAULT_MS_HEAP_SIZE; i++) {
-#if defined(ARCH_64) && !HALFWORD_HEAP
+#if defined(ARCH_64)
mpsp->default_heap[i] = (Eterm) 0xdeadbeefdeadbeef;
#else
mpsp->default_heap[i] = (Eterm) 0xdeadbeef;
@@ -439,22 +410,27 @@ static ERTS_INLINE ErtsMatchPseudoProcess *
get_match_pseudo_process(Process *c_p, Uint heap_size)
{
ErtsMatchPseudoProcess *mpsp;
-#ifdef ERTS_SMP
- mpsp = (ErtsMatchPseudoProcess *) c_p->scheduler_data->match_pseudo_process;
- if (mpsp)
+ ErtsSchedulerData *esdp;
+
+ esdp = c_p ? c_p->scheduler_data : erts_get_scheduler_data();
+
+ mpsp = esdp ? esdp->match_pseudo_process :
+ (ErtsMatchPseudoProcess*) erts_tsd_get(match_pseudo_process_key);
+
+ if (mpsp) {
+ ASSERT(mpsp == erts_tsd_get(match_pseudo_process_key));
+ ASSERT(mpsp->process.scheduler_data == esdp);
cleanup_match_pseudo_process(mpsp, 0);
+ }
else {
- ASSERT(erts_smp_tsd_get(match_pseudo_process_key) == NULL);
+ ASSERT(erts_tsd_get(match_pseudo_process_key) == NULL);
mpsp = create_match_pseudo_process();
- c_p->scheduler_data->match_pseudo_process = (void *) mpsp;
- erts_smp_tsd_set(match_pseudo_process_key, (void *) mpsp);
+ if (esdp) {
+ esdp->match_pseudo_process = (void *) mpsp;
+ }
+ mpsp->process.scheduler_data = esdp;
+ erts_tsd_set(match_pseudo_process_key, (void *) mpsp);
}
- ASSERT(mpsp == erts_smp_tsd_get(match_pseudo_process_key));
- mpsp->process.scheduler_data = c_p->scheduler_data;
-#else
- mpsp = match_pseudo_process;
- cleanup_match_pseudo_process(mpsp, 0);
-#endif
if (heap_size > ERTS_DEFAULT_MS_HEAP_SIZE*sizeof(Eterm)) {
mpsp->u.heap = (Eterm*) erts_alloc(ERTS_ALC_T_DB_MS_RUN_HEAP, heap_size);
}
@@ -464,31 +440,25 @@ get_match_pseudo_process(Process *c_p, Uint heap_size)
return mpsp;
}
-#ifdef ERTS_SMP
static void
destroy_match_pseudo_process(void)
{
ErtsMatchPseudoProcess *mpsp;
- mpsp = (ErtsMatchPseudoProcess *)erts_smp_tsd_get(match_pseudo_process_key);
+ mpsp = (ErtsMatchPseudoProcess *)erts_tsd_get(match_pseudo_process_key);
if (mpsp) {
cleanup_match_pseudo_process(mpsp, 0);
erts_free(ERTS_ALC_T_DB_MS_PSDO_PROC, (void *) mpsp);
- erts_smp_tsd_set(match_pseudo_process_key, (void *) NULL);
+ erts_tsd_set(match_pseudo_process_key, (void *) NULL);
}
}
-#endif
static
void
match_pseudo_process_init(void)
{
-#ifdef ERTS_SMP
- erts_smp_tsd_key_create(&match_pseudo_process_key,
+ erts_tsd_key_create(&match_pseudo_process_key,
"erts_match_pseudo_process_key");
- erts_smp_install_exit_handler(destroy_match_pseudo_process);
-#else
- match_pseudo_process = create_match_pseudo_process();
-#endif
+ erts_thr_install_exit_handler(destroy_match_pseudo_process);
}
void
@@ -499,7 +469,7 @@ erts_match_set_release_result(Process* c_p)
/* The trace control word. */
-static erts_smp_atomic32_t trace_control_word;
+static erts_atomic32_t trace_control_word;
/* This needs to be here, before the bif table... */
@@ -856,6 +826,13 @@ static int match_compact(ErlHeapFragment *expr, DMCErrInfo *err_info);
static Uint my_size_object(Eterm t);
static Eterm my_copy_struct(Eterm t, Eterm **hp, ErlOffHeap* off_heap);
+/* Guard subroutines */
+static void
+dmc_rearrange_constants(DMCContext *context, DMC_STACK_TYPE(UWord) *text,
+ int textpos, Eterm *p, Uint nelems);
+static DMCRet
+dmc_array(DMCContext *context, DMCHeap *heap, DMC_STACK_TYPE(UWord) *text,
+ Eterm *p, Uint nelems, int *constant);
/* Guard compilation */
static void do_emit_constant(DMCContext *context, DMC_STACK_TYPE(UWord) *text,
Eterm t);
@@ -869,6 +846,9 @@ static DMCRet dmc_tuple(DMCContext *context,
DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant);
+static DMCRet
+dmc_map(DMCContext *context, DMCHeap *heap, DMC_STACK_TYPE(UWord) *text,
+ Eterm t, int *constant);
static DMCRet dmc_variable(DMCContext *context,
DMCHeap *heap,
DMC_STACK_TYPE(UWord) *text,
@@ -888,12 +868,14 @@ static DMCRet compile_guard_expr(DMCContext *context,
DMCHeap *heap,
DMC_STACK_TYPE(UWord) *text,
Eterm t);
-/* match expression subroutine */
+/* match expression subroutines */
static DMCRet dmc_one_term(DMCContext *context,
DMCHeap *heap,
DMC_STACK_TYPE(Eterm) *stack,
DMC_STACK_TYPE(UWord) *text,
Eterm c);
+static Eterm
+dmc_private_copy(DMCContext *context, Eterm c);
#ifdef DMC_DEBUG
@@ -906,11 +888,7 @@ void db_match_dis(Binary *prog);
#define TRACE /* Nothing */
#define FENCE_PATTERN_SIZE 0
#endif
-static void add_dmc_err(DMCErrInfo *err_info,
- char *str,
- int variable,
- Eterm term,
- DMCErrorSeverity severity);
+static void vadd_dmc_err(DMCErrInfo*, DMCErrorSeverity, int var, const char *str, ...);
static Eterm dpm_array_to_list(Process *psp, Eterm *arr, int arity);
@@ -930,7 +908,7 @@ static void db_free_tmp_uncompressed(DbTerm* obj);
*/
BIF_RETTYPE db_get_trace_control_word(Process *p)
{
- Uint32 tcw = (Uint32) erts_smp_atomic32_read_acqb(&trace_control_word);
+ Uint32 tcw = (Uint32) erts_atomic32_read_acqb(&trace_control_word);
BIF_RET(erts_make_integer((Uint) tcw, p));
}
@@ -948,7 +926,7 @@ BIF_RETTYPE db_set_trace_control_word(Process *p, Eterm new)
if (val != ((Uint32)val))
BIF_ERROR(p, BADARG);
- old_tcw = (Uint32) erts_smp_atomic32_xchg_relb(&trace_control_word,
+ old_tcw = (Uint32) erts_atomic32_xchg_relb(&trace_control_word,
(erts_aint32_t) val);
BIF_RET(erts_make_integer((Uint) old_tcw, p));
}
@@ -1006,12 +984,20 @@ Eterm erts_match_set_get_source(Binary *mpsp)
}
/* This one is for the tracing */
-Binary *erts_match_set_compile(Process *p, Eterm matchexpr) {
+Binary *erts_match_set_compile(Process *p, Eterm matchexpr, Eterm MFA) {
Binary *bin;
Uint sz;
Eterm *hp;
+ Uint flags;
+
+ switch (MFA) {
+ case am_receive: flags = DCOMP_TRACE; break;
+ case am_send: flags = DCOMP_TRACE | DCOMP_ALLOW_TRACE_OPS; break;
+ default:
+ flags = DCOMP_TRACE | DCOMP_CALL_TRACE | DCOMP_ALLOW_TRACE_OPS;
+ }
- bin = db_match_set_compile(p, matchexpr, DCOMP_TRACE);
+ bin = db_match_set_compile(p, matchexpr, flags);
if (bin != NULL) {
MatchProg *prog = Binary2MatchProg(bin);
sz = size_object(matchexpr);
@@ -1118,9 +1104,186 @@ error:
return NULL;
}
-/* This is used when tracing */
-Eterm erts_match_set_lint(Process *p, Eterm matchexpr) {
- return db_match_set_lint(p, matchexpr, DCOMP_TRACE);
+/*
+ * Compare a matching term 'a' with a constructing term 'b' for equality.
+ *
+ * Returns true if 'b' is guaranteed to always construct
+ * the same term as 'a' has matched.
+ */
+static int db_match_eq_body(Eterm a, Eterm b, int const_mode)
+{
+ DECLARE_ESTACK(s);
+ Uint arity;
+ Eterm *ap, *bp;
+ const Eterm CONST_MODE_OFF = THE_NON_VALUE;
+
+ while (1) {
+ switch(b & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_LIST:
+ if (!is_list(a))
+ return 0;
+ ESTACK_PUSH2(s, CDR(list_val(a)), CDR(list_val(b)));
+ a = CAR(list_val(a));
+ b = CAR(list_val(b));
+ continue; /* loop without pop */
+
+ case TAG_PRIMARY_BOXED:
+ if (is_tuple(b)) {
+ bp = tuple_val(b);
+ if (!const_mode) {
+ if (bp[0] == make_arityval(1) && is_tuple(bp[1])) {
+ b = bp[1]; /* double-tuple syntax */
+ }
+ else if (bp[0] == make_arityval(2) && bp[1] == am_const) {
+ ESTACK_PUSH(s, CONST_MODE_OFF);
+ const_mode = 1; /* {const, term()} syntax */
+ b = bp[2];
+ continue; /* loop without pop */
+ }
+ else
+ return 0; /* function call or invalid tuple syntax */
+ }
+ if (!is_tuple(a))
+ return 0;
+
+ ap = tuple_val(a);
+ bp = tuple_val(b);
+ if (ap[0] != bp[0])
+ return 0;
+ arity = arityval(ap[0]);
+ if (arity > 0) {
+ a = *(++ap);
+ b = *(++bp);
+ while(--arity) {
+ ESTACK_PUSH2(s, *(++ap), *(++bp));
+ }
+ continue; /* loop without pop */
+ }
+ }
+ else if (is_map(b)) {
+ /* We don't know what other pairs the matched map may contain */
+ return 0;
+ }
+ else if (!eq(a,b)) /* other boxed */
+ return 0;
+ break;
+
+ case TAG_PRIMARY_IMMED1:
+ if (a != b || a == am_Underscore || a == am_DollarDollar
+ || a == am_DollarUnderscore
+ || (const_mode && db_is_variable(a) >= 0)) {
+
+ return 0;
+ }
+ break;
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "db_compare: "
+ "Bad object on ESTACK: 0x%bex\n", b);
+ }
+
+pop_next:
+ if (ESTACK_ISEMPTY(s))
+ break; /* done */
+
+ b = ESTACK_POP(s);
+ if (b == CONST_MODE_OFF) {
+ ASSERT(const_mode);
+ const_mode = 0;
+ goto pop_next;
+ }
+ a = ESTACK_POP(s);
+ }
+
+ DESTROY_ESTACK(s);
+ return 1;
+}
+
+/* This is used by select_replace */
+int db_match_keeps_key(int keypos, Eterm match, Eterm guard, Eterm body)
+{
+ Eterm match_key;
+ Eterm* body_list;
+ Eterm single_body_term;
+ Eterm* single_body_term_tpl;
+ Eterm single_body_subterm;
+ Eterm single_body_subterm_key;
+ Eterm* single_body_subterm_key_tpl;
+ int const_mode;
+
+ if (!is_list(body)) {
+ return 0;
+ }
+
+ body_list = list_val(body);
+ if (CDR(body_list) != NIL) {
+ return 0;
+ }
+
+ single_body_term = CAR(body_list);
+ if (single_body_term == am_DollarUnderscore) {
+ /* same tuple is returned */
+ return 1;
+ }
+
+ if (!is_tuple(single_body_term)) {
+ return 0;
+ }
+
+ match_key = db_getkey(keypos, match);
+ if (!is_value(match_key)) {
+ // can't get key out of match
+ return 0;
+ }
+
+ single_body_term_tpl = tuple_val(single_body_term);
+ if (single_body_term_tpl[0] == make_arityval(2) &&
+ single_body_term_tpl[1] == am_const) {
+ /* {const, {"ets-tuple constant"}} */
+ single_body_subterm = single_body_term_tpl[2];
+ const_mode = 1;
+ }
+ else if (*single_body_term_tpl == make_arityval(1)) {
+ /* {{"ets-tuple construction"}} */
+ single_body_subterm = single_body_term_tpl[1];
+ const_mode = 0;
+ }
+ else {
+ /* not a tuple construction */
+ return 0;
+ }
+
+ single_body_subterm_key = db_getkey(keypos, single_body_subterm);
+ if (!is_value(single_body_subterm_key)) {
+ // can't get key out of single body subterm
+ return 0;
+ }
+
+ if (db_match_eq_body(match_key, single_body_subterm_key, const_mode)) {
+ /* tuple with same key is returned */
+ return 1;
+ }
+
+ if (const_mode) {
+ /* constant key did not match */
+ return 0;
+ }
+
+ if (!is_tuple(single_body_subterm_key)) {
+ /* can't possibly be an element instruction */
+ return 0;
+ }
+
+ single_body_subterm_key_tpl = tuple_val(single_body_subterm_key);
+ if (single_body_subterm_key_tpl[0] == make_arityval(3) &&
+ single_body_subterm_key_tpl[1] == am_element &&
+ single_body_subterm_key_tpl[3] == am_DollarUnderscore &&
+ single_body_subterm_key_tpl[2] == make_small(keypos))
+ {
+ /* {element, KeyPos, '$_'} */
+ return 1;
+ }
+
+ return 0;
}
Eterm db_match_set_lint(Process *p, Eterm matchexpr, Uint flags)
@@ -1141,8 +1304,8 @@ Eterm db_match_set_lint(Process *p, Eterm matchexpr, Uint flags)
int i;
if (!is_list(matchexpr)) {
- add_dmc_err(err_info, "Match programs are not in a list.",
- -1, 0UL, dmcError);
+ add_dmc_err(err_info, "Match programs are not in a list.",
+ -1, 0UL, dmcError);
goto done;
}
num_heads = 0;
@@ -1150,9 +1313,8 @@ Eterm db_match_set_lint(Process *p, Eterm matchexpr, Uint flags)
++num_heads;
if (l != NIL) { /* proper list... */
- add_dmc_err(err_info, "Match programs are not in a proper "
- "list.",
- -1, 0UL, dmcError);
+ add_dmc_err(err_info, "Match programs are not in a proper list.",
+ -1, 0UL, dmcError);
goto done;
}
@@ -1219,30 +1381,37 @@ done:
return ret;
}
-Eterm erts_match_set_run(Process *p, Binary *mpsp,
- Eterm *args, int num_args,
- enum erts_pam_run_flags in_flags,
- Uint32 *return_flags)
+/* Returns
+ * am_false if no match or
+ * if {message,false} has been called,
+ * am_true if {message,_} has NOT been called or
+ * if {message,true} has been called,
+ * Msg if {message,Msg} has been called.
+ *
+ * If return value is_not_immed
+ * then erts_match_set_release_result_trace() must be called to release it.
+ */
+Eterm erts_match_set_run_trace(Process *c_p,
+ Process *self,
+ Binary *mpsp,
+ Eterm *args, int num_args,
+ enum erts_pam_run_flags in_flags,
+ Uint32 *return_flags)
{
Eterm ret;
- ret = db_prog_match(p, mpsp, NIL, NULL, args, num_args,
+ ret = db_prog_match(c_p, self, mpsp, NIL, args, num_args,
in_flags, return_flags);
-#if defined(HARDDEBUG)
- if (is_non_value(ret)) {
- erts_fprintf(stderr, "Failed\n");
- } else {
- erts_fprintf(stderr, "Returning : %T\n", ret);
+
+ ASSERT(!(is_non_value(ret) && *return_flags));
+
+ if (is_non_value(ret) || ret == am_false) {
+ erts_match_set_release_result(c_p);
+ return am_false;
}
-#endif
+ if (is_immed(ret))
+ erts_match_set_release_result(c_p);
return ret;
- /* Returns
- * THE_NON_VALUE if no match
- * am_false if {message,false} has been called,
- * am_true if {message,_} has not been called or
- * if {message,true} has been called,
- * Msg if {message,Msg} has been called.
- */
}
static Eterm erts_match_set_run_ets(Process *p, Binary *mpsp,
@@ -1251,7 +1420,8 @@ static Eterm erts_match_set_run_ets(Process *p, Binary *mpsp,
{
Eterm ret;
- ret = db_prog_match(p, mpsp, args, NULL, NULL, num_args,
+ ret = db_prog_match(p, p,
+ mpsp, args, NULL, num_args,
ERTS_PAM_COPY_RESULT,
return_flags);
#if defined(HARDDEBUG)
@@ -1281,7 +1451,7 @@ void db_initialize_util(void){
sizeof(DMCGuardBif),
(int (*)(const void *, const void *)) &cmp_guard_bif);
match_pseudo_process_init();
- erts_smp_atomic32_init_nob(&trace_control_word, 0);
+ erts_atomic32_init_nob(&trace_control_word, 0);
}
@@ -1364,7 +1534,112 @@ restart:
for (;;) {
switch (t & _TAG_PRIMARY_MASK) {
case TAG_PRIMARY_BOXED:
- if (!BOXED_IS_TUPLE(t)) {
+ if (is_flatmap(t)) {
+ num_iters = flatmap_get_size(flatmap_val(t));
+ if (!structure_checked) {
+ DMC_PUSH(text, matchMap);
+ DMC_PUSH(text, num_iters);
+ }
+ structure_checked = 0;
+ for (i = 0; i < num_iters; ++i) {
+ Eterm key = flatmap_get_keys(flatmap_val(t))[i];
+ if (db_is_variable(key) >= 0) {
+ if (context.err_info) {
+ add_dmc_err(context.err_info,
+ "Variable found in map key.",
+ -1, 0UL, dmcError);
+ }
+ goto error;
+ } else if (key == am_Underscore) {
+ if (context.err_info) {
+ add_dmc_err(context.err_info,
+ "Underscore found in map key.",
+ -1, 0UL, dmcError);
+ }
+ goto error;
+ }
+ DMC_PUSH(text, matchKey);
+ DMC_PUSH(text, dmc_private_copy(&context, key));
+ {
+ int old_stack = ++(context.stack_used);
+ Eterm value = flatmap_get_values(flatmap_val(t))[i];
+ res = dmc_one_term(&context, &heap, &stack, &text,
+ value);
+ ASSERT(res != retFail);
+ if (res == retRestart) {
+ goto restart;
+ }
+ if (old_stack != context.stack_used) {
+ ASSERT(old_stack + 1 == context.stack_used);
+ DMC_PUSH(text, matchSwap);
+ }
+ if (context.stack_used > context.stack_need) {
+ context.stack_need = context.stack_used;
+ }
+ DMC_PUSH(text, matchPop);
+ --(context.stack_used);
+ }
+ }
+ break;
+ }
+ if (is_hashmap(t)) {
+ DECLARE_WSTACK(wstack);
+ Eterm *kv;
+ num_iters = hashmap_size(t);
+ if (!structure_checked) {
+ DMC_PUSH(text, matchMap);
+ DMC_PUSH(text, num_iters);
+ }
+ structure_checked = 0;
+
+ hashmap_iterator_init(&wstack, t, 0);
+
+ while ((kv=hashmap_iterator_next(&wstack)) != NULL) {
+ Eterm key = CAR(kv);
+ Eterm value = CDR(kv);
+ if (db_is_variable(key) >= 0) {
+ if (context.err_info) {
+ add_dmc_err(context.err_info,
+ "Variable found in map key.",
+ -1, 0UL, dmcError);
+ }
+ DESTROY_WSTACK(wstack);
+ goto error;
+ } else if (key == am_Underscore) {
+ if (context.err_info) {
+ add_dmc_err(context.err_info,
+ "Underscore found in map key.",
+ -1, 0UL, dmcError);
+ }
+ DESTROY_WSTACK(wstack);
+ goto error;
+ }
+ DMC_PUSH(text, matchKey);
+ DMC_PUSH(text, dmc_private_copy(&context, key));
+ {
+ int old_stack = ++(context.stack_used);
+ res = dmc_one_term(&context, &heap, &stack, &text,
+ value);
+ ASSERT(res != retFail);
+ if (res == retRestart) {
+ DESTROY_WSTACK(wstack);
+ goto restart;
+ }
+ if (old_stack != context.stack_used) {
+ ASSERT(old_stack + 1 == context.stack_used);
+ DMC_PUSH(text, matchSwap);
+ }
+ if (context.stack_used > context.stack_need) {
+ context.stack_need = context.stack_used;
+ }
+ DMC_PUSH(text, matchPop);
+ --(context.stack_used);
+ }
+ }
+ DESTROY_WSTACK(wstack);
+ break;
+ }
+ if (!is_tuple(t)) {
goto simple_term;
}
num_iters = arityval(*tuple_val(t));
@@ -1589,17 +1864,18 @@ error: /* Here is were we land when compilation failed. */
/*
** Free a match program (in a binary)
*/
-void erts_db_match_prog_destructor(Binary *bprog)
+int erts_db_match_prog_destructor(Binary *bprog)
{
MatchProg *prog;
if (bprog == NULL)
- return;
+ return 1;
prog = Binary2MatchProg(bprog);
if (prog->term_save != NULL) {
free_message_buffer(prog->term_save);
}
if (prog->saved_program_buf != NULL)
free_message_buffer(prog->saved_program_buf);
+ return 1;
}
void
@@ -1636,96 +1912,31 @@ static Eterm dpm_array_to_list(Process *psp, Eterm *arr, int arity)
return ret;
}
-
-#if HALFWORD_HEAP
-struct heap_checkpoint_t
-{
- Process *p;
- Eterm* htop;
- ErlHeapFragment* mbuf;
- unsigned used_size;
- ErlOffHeap off_heap;
-};
-
-static void heap_checkpoint_init(Process* p, struct heap_checkpoint_t* hcp)
-{
- hcp->p = p;
- hcp->htop = HEAP_TOP(p);
- hcp->mbuf = MBUF(p);
- hcp->used_size = hcp->mbuf ? hcp->mbuf->used_size : 0;
- hcp->off_heap = MSO(p);
-}
-
-static void heap_checkpoint_revert(struct heap_checkpoint_t* hcp)
-{
- struct erl_off_heap_header* oh = MSO(hcp->p).first;
-
- if (oh != hcp->off_heap.first) {
- ASSERT(oh != NULL);
- if (hcp->off_heap.first) {
- while (oh->next != hcp->off_heap.first) {
- oh = oh->next;
- }
- oh->next = NULL;
- }
- erts_cleanup_offheap(&MSO(hcp->p));
- MSO(hcp->p) = hcp->off_heap;
- }
- if (MBUF(hcp->p) != hcp->mbuf) {
- ErlHeapFragment* hf = MBUF(hcp->p);
- ASSERT(hf != NULL);
- if (hcp->mbuf) {
- while (hf->next != hcp->mbuf) {
- hf = hf->next;
- }
- hf->next = NULL;
- }
- free_message_buffer(MBUF(hcp->p));
- MBUF(hcp->p) = hcp->mbuf;
- }
- if (hcp->mbuf != NULL && hcp->mbuf->used_size != hcp->used_size) {
- hcp->mbuf->used_size = hcp->used_size;
- }
- HEAP_TOP(hcp->p) = hcp->htop;
-}
-#endif /* HALFWORD_HEAP */
-
-static ERTS_INLINE Eterm copy_object_rel(Process* p, Eterm term, Eterm* base)
-{
- if (!is_immed(term)) {
- Uint sz = size_object_rel(term, base);
- Eterm* top = HAllocX(p, sz, HEAP_XTRA);
- return copy_struct_rel(term, sz, &top, &MSO(p), base, NULL);
- }
- return term;
-}
-
-
/*
** Execution of the match program, this is Pam.
** May return THE_NON_VALUE, which is a bailout.
** the parameter 'arity' is only used if 'term' is actually an array,
** i.e. 'DCOMP_TRACE' was specified
*/
-Eterm db_prog_match(Process *c_p, Binary *bprog,
- Eterm term, Eterm* base,
+Eterm db_prog_match(Process *c_p,
+ Process *self,
+ Binary *bprog,
+ Eterm term,
Eterm *termp,
int arity,
enum erts_pam_run_flags in_flags,
Uint32 *return_flags)
{
MatchProg *prog = Binary2MatchProg(bprog);
- Eterm *ep;
- Eterm *tp;
+ const Eterm *ep, *tp, **sp;
Eterm t;
- Eterm **sp;
Eterm *esp;
MatchVariable* variables;
- BeamInstr *cp;
- UWord *pc = prog->text;
+ ErtsCodeMFA *cp;
+ const UWord *pc = prog->text;
Eterm *ehp;
Eterm ret;
- Uint n = 0; /* To avoid warning. */
+ Uint n;
int i;
unsigned do_catch;
ErtsMatchPseudoProcess *mpsp;
@@ -1737,17 +1948,16 @@ Eterm db_prog_match(Process *c_p, Binary *bprog,
Eterm (*bif)(Process*, ...);
Eterm bif_args[3];
int fail_label;
- int atomic_trace;
-#if HALFWORD_HEAP
- struct heap_checkpoint_t c_p_checkpoint = {};
-#endif
#ifdef DMC_DEBUG
Uint *heap_fence;
Uint *stack_fence;
Uint save_op;
#endif /* DMC_DEBUG */
- ASSERT(base==NULL || HALFWORD_HEAP);
+ ERTS_UNDEF(n,0);
+ ERTS_UNDEF(current_scheduled,NULL);
+
+ ASSERT(c_p || !(in_flags & ERTS_PAM_COPY_RESULT));
mpsp = get_match_pseudo_process(c_p, prog->heap_size);
psp = &mpsp->process;
@@ -1755,33 +1965,11 @@ Eterm db_prog_match(Process *c_p, Binary *bprog,
/* We need to lure the scheduler into believing in the pseudo process,
because of floating point exceptions. Do *after* mpsp is set!!! */
- esdp = ERTS_GET_SCHEDULER_DATA_FROM_PROC(c_p);
- ASSERT(esdp != NULL);
- current_scheduled = esdp->current_process;
+ esdp = erts_get_scheduler_data();
+ if (esdp)
+ current_scheduled = esdp->current_process;
/* SMP: psp->scheduler_data is set by get_match_pseudo_process */
- atomic_trace = 0;
-#define BEGIN_ATOMIC_TRACE(p) \
- do { \
- if (! atomic_trace) { \
- erts_refc_inc(&bprog->refc, 2); \
- erts_smp_proc_unlock((p), ERTS_PROC_LOCK_MAIN); \
- erts_smp_thr_progress_block(); \
- atomic_trace = !0; \
- } \
- } while (0)
-#define END_ATOMIC_TRACE(p) \
- do { \
- if (atomic_trace) { \
- erts_smp_thr_progress_unblock(); \
- erts_smp_proc_lock((p), ERTS_PROC_LOCK_MAIN); \
- if (erts_refc_dectest(&bprog->refc, 0) == 0) {\
- erts_bin_free(bprog); \
- } \
- atomic_trace = 0; \
- } \
- } while (0)
-
#ifdef DMC_DEBUG
save_op = 0;
heap_fence = (Eterm*)((char*) mpsp->u.heap + prog->stack_offset) - 1;
@@ -1799,29 +1987,24 @@ Eterm db_prog_match(Process *c_p, Binary *bprog,
do_catch != 0 */
*return_flags = 0U;
-
variables = mpsp->u.variables;
-#if HALFWORD_HEAP
- c_p_checkpoint.p = NULL;
-#endif
restart:
ep = &term;
esp = (Eterm*)((char*)mpsp->u.heap + prog->stack_offset);
- sp = (Eterm **) esp;
+ sp = (const Eterm **)esp;
ret = am_true;
do_catch = 0;
fail_label = -1;
build_proc = psp;
- esdp->current_process = psp;
- ASSERT_HALFWORD(!c_p_checkpoint.p);
+ if (esdp)
+ esdp->current_process = psp;
#ifdef DEBUG
ASSERT(variables == mpsp->u.variables);
for (i=0; i<prog->num_bindings; i++) {
variables[i].term = THE_NON_VALUE;
variables[i].proc = NULL;
- variables[i].base = base;
}
#endif
@@ -1829,11 +2012,11 @@ restart:
#ifdef DMC_DEBUG
if (*heap_fence != FENCE_PATTERN) {
- erl_exit(1, "Heap fence overwritten in db_prog_match after op "
+ erts_exit(ERTS_ERROR_EXIT, "Heap fence overwritten in db_prog_match after op "
"0x%08x, overwritten with 0x%08x.", save_op, *heap_fence);
}
if (*stack_fence != FENCE_PATTERN) {
- erl_exit(1, "Stack fence overwritten in db_prog_match after op "
+ erts_exit(ERTS_ERROR_EXIT, "Stack fence overwritten in db_prog_match after op "
"0x%08x, overwritten with 0x%08x.", save_op,
*stack_fence);
}
@@ -1857,9 +2040,9 @@ restart:
variables[n].term = dpm_array_to_list(psp, termp, arity);
break;
case matchTuple: /* *ep is a tuple of arity n */
- if (!is_tuple_rel(*ep,base))
+ if (!is_tuple(*ep))
FAIL();
- ep = tuple_val_rel(*ep,base);
+ ep = tuple_val(*ep);
n = *pc++;
if (arityval(*ep) != n)
FAIL();
@@ -1867,9 +2050,9 @@ restart:
break;
case matchPushT: /* *ep is a tuple of arity n,
push ptr to first element */
- if (!is_tuple_rel(*ep,base))
+ if (!is_tuple(*ep))
FAIL();
- tp = tuple_val_rel(*ep,base);
+ tp = tuple_val(*ep);
n = *pc++;
if (arityval(*tp) != n)
FAIL();
@@ -1879,46 +2062,94 @@ restart:
case matchList:
if (!is_list(*ep))
FAIL();
- ep = list_val_rel(*ep,base);
+ ep = list_val(*ep);
break;
case matchPushL:
if (!is_list(*ep))
FAIL();
- *sp++ = list_val_rel(*ep,base);
+ *sp++ = list_val(*ep);
++ep;
break;
+ case matchMap:
+ if (!is_map(*ep)) {
+ FAIL();
+ }
+ n = *pc++;
+ if (is_flatmap(*ep)) {
+ if (flatmap_get_size(flatmap_val(*ep)) < n) {
+ FAIL();
+ }
+ } else {
+ ASSERT(is_hashmap(*ep));
+ if (hashmap_size(*ep) < n) {
+ FAIL();
+ }
+ }
+ ep = flatmap_val(*ep);
+ break;
+ case matchPushM:
+ if (!is_map(*ep)) {
+ FAIL();
+ }
+ n = *pc++;
+ if (is_flatmap(*ep)) {
+ if (flatmap_get_size(flatmap_val(*ep)) < n) {
+ FAIL();
+ }
+ } else {
+ ASSERT(is_hashmap(*ep));
+ if (hashmap_size(*ep) < n) {
+ FAIL();
+ }
+ }
+ *sp++ = flatmap_val(*ep++);
+ break;
+ case matchKey:
+ t = (Eterm) *pc++;
+ tp = erts_maps_get(t, make_boxed(ep));
+ if (!tp) {
+ FAIL();
+ }
+ *sp++ = ep;
+ ep = tp;
+ break;
case matchPop:
ep = *(--sp);
break;
+ case matchSwap:
+ tp = sp[-1];
+ sp[-1] = sp[-2];
+ sp[-2] = tp;
+ break;
case matchBind:
n = *pc++;
variables[n].term = *ep++;
break;
case matchCmp:
n = *pc++;
- if (!eq_rel(variables[n].term, base, *ep, base))
+ if (!EQ(variables[n].term, *ep))
FAIL();
++ep;
break;
case matchEqBin:
t = (Eterm) *pc++;
- if (!eq_rel(t,NULL,*ep,base))
+ if (!EQ(t,*ep))
FAIL();
++ep;
break;
case matchEqFloat:
- if (!is_float_rel(*ep,base))
+ if (!is_float(*ep))
FAIL();
- if (memcmp(float_val_rel(*ep,base) + 1, pc, sizeof(double)))
+ if (memcmp(float_val(*ep) + 1, pc, sizeof(double)))
FAIL();
pc += TermWords(2);
++ep;
break;
case matchEqRef: {
Eterm* epc = (Eterm*)pc;
- if (!is_ref_rel(*ep,base))
+ if (!is_ref(*ep))
FAIL();
- if (!eq_rel(make_internal_ref_rel(epc, epc), epc, *ep, base)) {
+ if (!EQ(make_internal_ref(epc), *ep)) {
FAIL();
}
i = thing_arityval(*epc);
@@ -1927,9 +2158,9 @@ restart:
break;
}
case matchEqBig:
- if (!is_big_rel(*ep,base))
+ if (!is_big(*ep))
FAIL();
- tp = big_val_rel(*ep,base);
+ tp = big_val(*ep);
{
Eterm *epc = (Eterm *) pc;
if (*tp != *epc)
@@ -1987,6 +2218,39 @@ restart:
}
*esp++ = t;
break;
+ case matchMkFlatMap:
+ n = *pc++;
+ ehp = HAllocX(build_proc, MAP_HEADER_FLATMAP_SZ + n, HEAP_XTRA);
+ t = *--esp;
+ {
+ flatmap_t *m = (flatmap_t *)ehp;
+ m->thing_word = MAP_HEADER_FLATMAP;
+ m->size = n;
+ m->keys = t;
+ }
+ t = make_flatmap(ehp);
+ ehp += MAP_HEADER_FLATMAP_SZ;
+ while (n--) {
+ *ehp++ = *--esp;
+ }
+ *esp++ = t;
+ break;
+ case matchMkHashMap:
+ n = *pc++;
+ esp -= 2*n;
+ ehp = HAllocX(build_proc, 2*n, HEAP_XTRA);
+ {
+ ErtsHeapFactory factory;
+ Uint ix;
+ for (ix = 0; ix < 2*n; ix++){
+ ehp[ix] = esp[ix];
+ }
+ erts_factory_proc_init(&factory, build_proc);
+ t = erts_hashmap_from_array(&factory, ehp, n, 0);
+ erts_factory_close(&factory);
+ }
+ *esp++ = t;
+ break;
case matchCall0:
bif = (Eterm (*)(Process*, ...)) *pc++;
t = (*bif)(build_proc, bif_args);
@@ -2038,70 +2302,43 @@ restart:
esp -= 2;
esp[-1] = t;
break;
-
- #if HALFWORD_HEAP
- case matchPushVGuard:
- if (!base) goto case_matchPushV;
- /* Build NULL-based copy on pseudo heap for easy disposal */
- n = *pc++;
- ASSERT(is_value(variables[n].term));
- ASSERT(!variables[n].proc);
- variables[n].term = copy_object_rel(psp, variables[n].term, base);
- *esp++ = variables[n].term;
- #ifdef DEBUG
- variables[n].proc = psp;
- variables[n].base = NULL;
- #endif
- break;
- #endif
case matchPushVResult:
if (!(in_flags & ERTS_PAM_COPY_RESULT)) goto case_matchPushV;
-
- /* Build (NULL-based) copy on callers heap */
- #if HALFWORD_HEAP
- if (!do_catch && !c_p_checkpoint.p) {
- heap_checkpoint_init(c_p, &c_p_checkpoint);
- }
- #endif
+ /* Build copy on callers heap */
n = *pc++;
ASSERT(is_value(variables[n].term));
ASSERT(!variables[n].proc);
- variables[n].term = copy_object_rel(c_p, variables[n].term, base);
+ variables[n].term = copy_object_x(variables[n].term, c_p, HEAP_XTRA);
*esp++ = variables[n].term;
#ifdef DEBUG
variables[n].proc = c_p;
- variables[n].base = NULL;
#endif
break;
case matchPushV:
case_matchPushV:
n = *pc++;
ASSERT(is_value(variables[n].term));
- ASSERT(!variables[n].base);
*esp++ = variables[n].term;
break;
case matchPushExpr:
if (in_flags & ERTS_PAM_COPY_RESULT) {
Uint sz;
Eterm* top;
- sz = size_object_rel(term, base);
+ sz = size_object(term);
top = HAllocX(build_proc, sz, HEAP_XTRA);
if (in_flags & ERTS_PAM_CONTIGUOUS_TUPLE) {
- ASSERT(is_tuple_rel(term,base));
- *esp++ = copy_shallow_rel(tuple_val_rel(term,base), sz,
- &top, &MSO(build_proc), base);
+ ASSERT(is_tuple(term));
+ *esp++ = copy_shallow(tuple_val(term), sz, &top, &MSO(build_proc));
}
else {
- *esp++ = copy_struct_rel(term, sz, &top, &MSO(build_proc),
- base, NULL);
+ *esp++ = copy_struct(term, sz, &top, &MSO(build_proc));
}
}
else {
- *esp = term;
+ *esp++ = term;
}
break;
case matchPushArrayAsList:
- ASSERT_HALFWORD(base == NULL);
n = arity; /* Only happens when 'term' is an array */
tp = termp;
ehp = HAllocX(build_proc, n*2, HEAP_XTRA);
@@ -2117,7 +2354,6 @@ restart:
break;
case matchPushArrayAsListU:
/* This instruction is NOT efficient. */
- ASSERT_HALFWORD(base == NULL);
*esp++ = dpm_array_to_list(build_proc, termp, arity);
break;
case matchTrue:
@@ -2193,7 +2429,7 @@ restart:
pc += n;
break;
case matchSelf:
- *esp++ = c_p->common.id;
+ *esp++ = self->common.id;
break;
case matchWaste:
--esp;
@@ -2203,6 +2439,7 @@ restart:
break;
case matchProcessDump: {
erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
+ ASSERT(c_p == self);
print_process_info(ERTS_PRINT_DSBUF, (void *) dsbufp, c_p);
*esp++ = new_binary(build_proc, (byte *)dsbufp->str,
dsbufp->str_len);
@@ -2221,18 +2458,16 @@ restart:
*return_flags |= MATCH_SET_EXCEPTION_TRACE;
*esp++ = am_true;
break;
- case matchIsSeqTrace:
- if (SEQ_TRACE_TOKEN(c_p) != NIL
-#ifdef USE_VM_PROBES
- && SEQ_TRACE_TOKEN(c_p) != am_have_dt_utag
-#endif
- )
+ case matchIsSeqTrace:
+ ASSERT(c_p == self);
+ if (have_seqtrace(SEQ_TRACE_TOKEN(c_p)))
*esp++ = am_true;
else
*esp++ = am_false;
break;
case matchSetSeqToken:
- t = erts_seq_trace(c_p, esp[-1], esp[-2], 0);
+ ASSERT(c_p == self);
+ t = erts_seq_trace(c_p, esp[-1], esp[-2], 0);
if (is_non_value(t)) {
esp[-2] = FAIL_TERM;
} else {
@@ -2240,7 +2475,8 @@ restart:
}
--esp;
break;
- case matchSetSeqTokenFake:
+ case matchSetSeqTokenFake:
+ ASSERT(c_p == self);
t = seq_trace_fake(c_p, esp[-1]);
if (is_non_value(t)) {
esp[-2] = FAIL_TERM;
@@ -2249,12 +2485,9 @@ restart:
}
--esp;
break;
- case matchGetSeqToken:
- if (SEQ_TRACE_TOKEN(c_p) == NIL
-#ifdef USE_VM_PROBES
- || SEQ_TRACE_TOKEN(c_p) == am_have_dt_utag
-#endif
- )
+ case matchGetSeqToken:
+ ASSERT(c_p == self);
+ if (have_no_seqtrace(SEQ_TRACE_TOKEN(c_p)))
*esp++ = NIL;
else {
Eterm sender = SEQ_TRACE_TOKEN_SENDER(c_p);
@@ -2277,80 +2510,95 @@ restart:
ASSERT(is_immed(ehp[5]));
}
break;
- case matchEnableTrace:
+ case matchEnableTrace:
+ ASSERT(c_p == self);
if ( (n = erts_trace_flag2bit(esp[-1]))) {
- BEGIN_ATOMIC_TRACE(c_p);
- set_tracee_flags(c_p, ERTS_TRACER_PROC(c_p), 0, n);
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ set_tracee_flags(c_p, ERTS_TRACER(c_p), 0, n);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
esp[-1] = am_true;
} else {
esp[-1] = FAIL_TERM;
}
break;
- case matchEnableTrace2:
+ case matchEnableTrace2:
+ ASSERT(c_p == self);
n = erts_trace_flag2bit((--esp)[-1]);
esp[-1] = FAIL_TERM;
if (n) {
- BEGIN_ATOMIC_TRACE(c_p);
- if ( (tmpp = get_proc(c_p, 0, esp[0], 0))) {
+ if ( (tmpp = get_proc(c_p, ERTS_PROC_LOCK_MAIN, esp[0], ERTS_PROC_LOCKS_ALL))) {
/* Always take over the tracer of the current process */
- set_tracee_flags(tmpp, ERTS_TRACER_PROC(c_p), 0, n);
- esp[-1] = am_true;
+ set_tracee_flags(tmpp, ERTS_TRACER(c_p), 0, n);
+ if (tmpp == c_p)
+ erts_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL_MINOR);
+ else
+ erts_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL);
+ esp[-1] = am_true;
}
}
break;
- case matchDisableTrace:
+ case matchDisableTrace:
+ ASSERT(c_p == self);
if ( (n = erts_trace_flag2bit(esp[-1]))) {
- BEGIN_ATOMIC_TRACE(c_p);
- set_tracee_flags(c_p, ERTS_TRACER_PROC(c_p), n, 0);
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ set_tracee_flags(c_p, ERTS_TRACER(c_p), n, 0);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
esp[-1] = am_true;
} else {
esp[-1] = FAIL_TERM;
}
break;
- case matchDisableTrace2:
+ case matchDisableTrace2:
+ ASSERT(c_p == self);
n = erts_trace_flag2bit((--esp)[-1]);
esp[-1] = FAIL_TERM;
if (n) {
- BEGIN_ATOMIC_TRACE(c_p);
- if ( (tmpp = get_proc(c_p, 0, esp[0], 0))) {
+ if ( (tmpp = get_proc(c_p, ERTS_PROC_LOCK_MAIN, esp[0], ERTS_PROC_LOCKS_ALL))) {
/* Always take over the tracer of the current process */
- set_tracee_flags(tmpp, ERTS_TRACER_PROC(c_p), n, 0);
- esp[-1] = am_true;
+ set_tracee_flags(tmpp, ERTS_TRACER(c_p), n, 0);
+ if (tmpp == c_p)
+ erts_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL_MINOR);
+ else
+ erts_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL);
+ esp[-1] = am_true;
}
}
break;
- case matchCaller:
+ case matchCaller:
+ ASSERT(c_p == self);
if (!(c_p->cp) || !(cp = find_function_from_pc(c_p->cp))) {
*esp++ = am_undefined;
} else {
ehp = HAllocX(build_proc, 4, HEAP_XTRA);
*esp++ = make_tuple(ehp);
ehp[0] = make_arityval(3);
- ehp[1] = cp[0];
- ehp[2] = cp[1];
- ehp[3] = make_small((Uint) cp[2]);
+ ehp[1] = cp->module;
+ ehp[2] = cp->function;
+ ehp[3] = make_small((Uint) cp->arity);
}
break;
- case matchSilent:
+ case matchSilent:
+ ASSERT(c_p == self);
--esp;
if (in_flags & ERTS_PAM_IGNORE_TRACE_SILENT)
break;
if (*esp == am_true) {
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
ERTS_TRACE_FLAGS(c_p) |= F_TRACE_SILENT;
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
}
else if (*esp == am_false) {
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
ERTS_TRACE_FLAGS(c_p) &= ~F_TRACE_SILENT;
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
}
break;
- case matchTrace2:
+ case matchTrace2:
+ ASSERT(c_p == self);
{
/* disable enable */
Uint d_flags = 0, e_flags = 0; /* process trace flags */
- Eterm tracer = ERTS_TRACER_PROC(c_p);
+ ErtsTracer tracer = erts_tracer_nil;
/* XXX Atomicity note: Not fully atomic. Default tracer
* is sampled from current process but applied to
* tracee and tracer later after releasing main
@@ -2362,29 +2610,35 @@ restart:
* {trace,[],[{{tracer,Tracer}}]} is much, much older.
*/
int cputs = 0;
+ erts_tracer_update(&tracer, ERTS_TRACER(c_p));
if (! erts_trace_flags(esp[-1], &d_flags, &tracer, &cputs) ||
! erts_trace_flags(esp[-2], &e_flags, &tracer, &cputs) ||
cputs ) {
(--esp)[-1] = FAIL_TERM;
+ ERTS_TRACER_CLEAR(&tracer);
break;
}
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
(--esp)[-1] = set_match_trace(c_p, FAIL_TERM, tracer,
d_flags, e_flags);
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ ERTS_TRACER_CLEAR(&tracer);
}
break;
- case matchTrace3:
+ case matchTrace3:
+ ASSERT(c_p == self);
{
/* disable enable */
Uint d_flags = 0, e_flags = 0; /* process trace flags */
- Eterm tracer = ERTS_TRACER_PROC(c_p);
+ ErtsTracer tracer = erts_tracer_nil;
/* XXX Atomicity note. Not fully atomic. See above.
* Above it could possibly be solved, but not here.
*/
int cputs = 0;
Eterm tracee = (--esp)[0];
+
+ erts_tracer_update(&tracer, ERTS_TRACER(c_p));
if (! erts_trace_flags(esp[-1], &d_flags, &tracer, &cputs) ||
! erts_trace_flags(esp[-2], &e_flags, &tracer, &cputs) ||
@@ -2392,42 +2646,38 @@ restart:
! (tmpp = get_proc(c_p, ERTS_PROC_LOCK_MAIN,
tracee, ERTS_PROC_LOCKS_ALL))) {
(--esp)[-1] = FAIL_TERM;
+ ERTS_TRACER_CLEAR(&tracer);
break;
}
if (tmpp == c_p) {
(--esp)[-1] = set_match_trace(c_p, FAIL_TERM, tracer,
d_flags, e_flags);
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
} else {
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
(--esp)[-1] = set_match_trace(tmpp, FAIL_TERM, tracer,
d_flags, e_flags);
- erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL);
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
+ ERTS_TRACER_CLEAR(&tracer);
}
break;
case matchCatch: /* Match success, now build result */
do_catch = 1;
if (in_flags & ERTS_PAM_COPY_RESULT) {
build_proc = c_p;
- esdp->current_process = c_p;
+ if (esdp)
+ esdp->current_process = c_p;
}
break;
case matchHalt:
goto success;
default:
- erl_exit(1, "Internal error: unexpected opcode in match program.");
+ erts_exit(ERTS_ERROR_EXIT, "Internal error: unexpected opcode in match program.");
}
}
fail:
-#if HALFWORD_HEAP
- if (c_p_checkpoint.p) {
- /* Dispose garbage built by guards on caller heap */
- heap_checkpoint_revert(&c_p_checkpoint);
- c_p_checkpoint.p = NULL;
- }
-#endif
*return_flags = 0U;
if (fail_label >= 0) { /* We failed during a "TryMeElse",
lets restart, with the next match
@@ -2441,36 +2691,25 @@ success:
#ifdef DMC_DEBUG
if (*heap_fence != FENCE_PATTERN) {
- erl_exit(1, "Heap fence overwritten in db_prog_match after op "
+ erts_exit(ERTS_ERROR_EXIT, "Heap fence overwritten in db_prog_match after op "
"0x%08x, overwritten with 0x%08x.", save_op, *heap_fence);
}
if (*stack_fence != FENCE_PATTERN) {
- erl_exit(1, "Stack fence overwritten in db_prog_match after op "
+ erts_exit(ERTS_ERROR_EXIT, "Stack fence overwritten in db_prog_match after op "
"0x%08x, overwritten with 0x%08x.", save_op,
*stack_fence);
}
#endif
- esdp->current_process = current_scheduled;
-
- END_ATOMIC_TRACE(c_p);
+ if (esdp)
+ esdp->current_process = current_scheduled;
return ret;
#undef FAIL
#undef FAIL_TERM
-#undef BEGIN_ATOMIC_TRACE
-#undef END_ATOMIC_TRACE
}
-/*
- * Convert a match program to a "magic" binary to return up to erlang
- */
-Eterm db_make_mp_binary(Process *p, Binary *mp, Eterm **hpp)
-{
- return erts_mk_magic_binary_term(hpp, &MSO(p), mp);
-}
-
DMCErrInfo *db_new_dmc_err_info(void)
{
DMCErrInfo *ret = erts_alloc(ERTS_ALC_T_DB_DMC_ERR_INFO,
@@ -2590,21 +2829,14 @@ Wterm db_do_read_element(DbUpdateHandle* handle, Sint position)
{
Eterm elem = handle->dbterm->tpl[position];
if (!is_header(elem)) {
-#if HALFWORD_HEAP
- if (!is_immed(elem)
- && !handle->tb->common.compress
- && !(handle->abs_vec && handle->abs_vec[position])) {
- return rterm2wterm(elem, handle->dbterm->tpl);
- }
-#endif
return elem;
}
ASSERT(((DbTableCommon*)handle->tb)->compress);
- ASSERT(!handle->mustResize);
+ ASSERT(!(handle->flags & DB_MUST_RESIZE));
handle->dbterm = db_alloc_tmp_uncompressed(&handle->tb->common,
handle->dbterm);
- handle->mustResize = 1;
+ handle->flags |= DB_MUST_RESIZE;
return handle->dbterm->tpl[position];
}
@@ -2624,9 +2856,6 @@ void db_do_update_element(DbUpdateHandle* handle,
Eterm* oldp;
Uint newval_sz;
Uint oldval_sz;
-#if HALFWORD_HEAP
- Eterm* old_base;
-#endif
if (is_both_immed(newval,oldval)) {
handle->dbterm->tpl[position] = newval;
@@ -2637,21 +2866,14 @@ void db_do_update_element(DbUpdateHandle* handle,
#endif
return;
}
- if (!handle->mustResize) {
+ if (!(handle->flags & DB_MUST_RESIZE)) {
if (handle->tb->common.compress) {
handle->dbterm = db_alloc_tmp_uncompressed(&handle->tb->common,
handle->dbterm);
- handle->mustResize = 1;
+ handle->flags |= DB_MUST_RESIZE;
oldval = handle->dbterm->tpl[position];
- #if HALFWORD_HEAP
- old_base = NULL;
- #endif
}
else {
- #if HALFWORD_HEAP
- ASSERT(!handle->abs_vec);
- old_base = handle->dbterm->tpl;
- #endif
if (is_boxed(newval)) {
newp = boxed_val(newval);
switch (*newp & _TAG_HEADER_MASK) {
@@ -2661,7 +2883,7 @@ void db_do_update_element(DbUpdateHandle* handle,
case _TAG_HEADER_HEAP_BIN:
newval_sz = header_arity(*newp) + 1;
if (is_boxed(oldval)) {
- oldp = boxed_val_rel(oldval,old_base);
+ oldp = boxed_val(oldval);
switch (*oldp & _TAG_HEADER_MASK) {
case _TAG_HEADER_POS_BIG:
case _TAG_HEADER_NEG_BIG:
@@ -2681,40 +2903,20 @@ void db_do_update_element(DbUpdateHandle* handle,
}
}
}
-#if HALFWORD_HEAP
- else {
- old_base = (handle->tb->common.compress
- || (handle->abs_vec && handle->abs_vec[position])) ?
- NULL : handle->dbterm->tpl;
- }
-#endif
/* Not possible for simple memcpy or dbterm is already non-contiguous, */
/* need to realloc... */
newval_sz = is_immed(newval) ? 0 : size_object(newval);
new_size_set:
- oldval_sz = is_immed(oldval) ? 0 : size_object_rel(oldval,old_base);
+ oldval_sz = is_immed(oldval) ? 0 : size_object(oldval);
both_size_set:
handle->new_size = handle->new_size - oldval_sz + newval_sz;
/* write new value in old dbterm, finalize will make a flat copy */
handle->dbterm->tpl[position] = newval;
- handle->mustResize = 1;
-
-#if HALFWORD_HEAP
- if (old_base && newval_sz > 0) {
- ASSERT(!handle->tb->common.compress);
- if (!handle->abs_vec) {
- int i = header_arity(handle->dbterm->tpl[0]);
- handle->abs_vec = erts_alloc(ERTS_ALC_T_TMP, (i+1)*sizeof(char));
- sys_memset(handle->abs_vec, 0, i+1);
- /* abs_vec[0] not used */
- }
- handle->abs_vec[position] = 1;
- }
-#endif
+ handle->flags |= DB_MUST_RESIZE;
}
static ERTS_INLINE byte* db_realloc_term(DbTableCommon* tb, void* old,
@@ -2813,7 +3015,7 @@ static void* copy_to_comp(DbTableCommon* tb, Eterm obj, DbTerm* dest,
tpl[arity + 1] = alloc_size;
tmp_offheap.first = NULL;
- tpl[tb->keypos] = copy_struct_rel(key, size_object(key), &top.ep, &tmp_offheap, NULL, tpl);
+ tpl[tb->keypos] = copy_struct(key, size_object(key), &top.ep, &tmp_offheap);
dest->first_oh = tmp_offheap.first;
for (i=1; i<=arity; i++) {
if (i != tb->keypos) {
@@ -2832,7 +3034,7 @@ static void* copy_to_comp(DbTableCommon* tb, Eterm obj, DbTerm* dest,
Eterm* dbg_top = erts_alloc(ERTS_ALC_T_DB_TERM, dest->size * sizeof(Eterm));
dest->debug_clone = dbg_top;
tmp_offheap.first = dest->first_oh;
- copy_struct_rel(obj, dest->size, &dbg_top, &tmp_offheap, NULL, dbg_top);
+ copy_struct(obj, dest->size, &dbg_top, &tmp_offheap);
dest->first_oh = tmp_offheap.first;
ASSERT(dbg_top == dest->debug_clone + dest->size);
}
@@ -2879,7 +3081,7 @@ void* db_store_term(DbTableCommon *tb, DbTerm* old, Uint offset, Eterm obj)
newp->size = size;
top = newp->tpl;
tmp_offheap.first = NULL;
- copy_struct_rel(obj, size, &top, &tmp_offheap, NULL, top);
+ copy_struct(obj, size, &top, &tmp_offheap);
newp->first_oh = tmp_offheap.first;
#ifdef DEBUG_CLONE
newp->debug_clone = NULL;
@@ -2962,29 +3164,8 @@ void db_finalize_resize(DbUpdateHandle* handle, Uint offset)
tmp_offheap.first = NULL;
- #if HALFWORD_HEAP
- if (handle->abs_vec) {
- int i, arity = header_arity(handle->dbterm->tpl[0]);
-
- top[0] = tpl[0];
- top += arity + 1;
- for (i=1; i<=arity; i++) {
- Eterm* src_base = handle->abs_vec[i] ? NULL : tpl;
-
- newDbTerm->tpl[i] = copy_struct_rel(tpl[i],
- size_object_rel(tpl[i],src_base),
- &top, &tmp_offheap, src_base,
- newDbTerm->tpl);
- }
- newDbTerm->first_oh = tmp_offheap.first;
- ASSERT((byte*)top <= (newp + alloc_sz));
- erts_free(ERTS_ALC_T_TMP, handle->abs_vec);
- }
- else
- #endif /* HALFWORD_HEAP */
{
- copy_struct_rel(make_tuple_rel(tpl,tpl), handle->new_size, &top,
- &tmp_offheap, tpl, top);
+ copy_struct(make_tuple(tpl), handle->new_size, &top, &tmp_offheap);
newDbTerm->first_oh = tmp_offheap.first;
ASSERT((byte*)top == (newp + alloc_sz));
}
@@ -2996,27 +3177,34 @@ Eterm db_copy_from_comp(DbTableCommon* tb, DbTerm* bp, Eterm** hpp,
{
Eterm* hp = *hpp;
int i, arity = arityval(bp->tpl[0]);
+ ErtsHeapFactory factory;
hp[0] = bp->tpl[0];
*hpp += arity + 1;
- hp[tb->keypos] = copy_struct_rel(bp->tpl[tb->keypos],
- size_object_rel(bp->tpl[tb->keypos], bp->tpl),
- hpp, off_heap, bp->tpl, NULL);
+ hp[tb->keypos] = copy_struct(bp->tpl[tb->keypos],
+ size_object(bp->tpl[tb->keypos]),
+ hpp, off_heap);
+
+ erts_factory_static_init(&factory, *hpp, bp->size - (arity+1), off_heap);
+
for (i=arity; i>0; i--) {
if (i != tb->keypos) {
if (is_immed(bp->tpl[i])) {
hp[i] = bp->tpl[i];
}
else {
- hp[i] = erts_decode_ext_ets(hpp, off_heap,
+ hp[i] = erts_decode_ext_ets(&factory,
elem2ext(bp->tpl, i));
}
}
}
+ *hpp = factory.hp;
+ erts_factory_close(&factory);
+
ASSERT((*hpp - hp) <= bp->size);
#ifdef DEBUG_CLONE
- ASSERT(eq_rel(make_tuple(hp),NULL,make_tuple(bp->debug_clone),bp->debug_clone));
+ ASSERT(EQ(make_tuple(hp),make_tuple(bp->debug_clone)));
#endif
return make_tuple(hp);
}
@@ -3032,21 +3220,22 @@ Eterm db_copy_element_from_ets(DbTableCommon* tb, Process* p,
if (tb->compress && pos != tb->keypos) {
byte* ext = elem2ext(obj->tpl, pos);
Sint sz = erts_decode_ext_size_ets(ext, db_alloced_size_comp(obj)) + extra;
- Eterm* hp = HAlloc(p, sz);
- Eterm* endp = hp + sz;
- Eterm copy = erts_decode_ext_ets(&hp, &MSO(p), ext);
- *hpp = hp;
- hp += extra;
- HRelease(p, endp, hp);
+ Eterm copy;
+ ErtsHeapFactory factory;
+
+ erts_factory_proc_prealloc_init(&factory, p, sz);
+ copy = erts_decode_ext_ets(&factory, ext);
+ *hpp = erts_produce_heap(&factory, extra, 0);
+ erts_factory_close(&factory);
#ifdef DEBUG_CLONE
- ASSERT(eq_rel(copy, NULL, obj->debug_clone[pos], obj->debug_clone));
+ ASSERT(EQ(copy, obj->debug_clone[pos]));
#endif
return copy;
}
else {
- Uint sz = size_object_rel(obj->tpl[pos], obj->tpl);
+ Uint sz = size_object(obj->tpl[pos]);
*hpp = HAlloc(p, sz + extra);
- return copy_struct_rel(obj->tpl[pos], sz, hpp, &MSO(p), obj->tpl, NULL);
+ return copy_struct(obj->tpl[pos], sz, hpp, &MSO(p));
}
}
@@ -3069,9 +3258,7 @@ void db_cleanup_offheap_comp(DbTerm* obj)
}
switch (thing_subtag(u.hdr->thing_word)) {
case REFC_BINARY_SUBTAG:
- if (erts_refc_dectest(&u.pb->val->refc, 0) == 0) {
- erts_bin_free(u.pb->val);
- }
+ erts_bin_release(u.pb->val);
break;
case FUN_SUBTAG:
ASSERT(u.pb != &tmp);
@@ -3079,6 +3266,10 @@ void db_cleanup_offheap_comp(DbTerm* obj)
erts_erase_fun_entry(u.fun->fe);
}
break;
+ case REF_SUBTAG:
+ ASSERT(is_magic_ref_thing(u.hdr));
+ erts_bin_release((Binary *)u.mref->mb);
+ break;
default:
ASSERT(is_external_header(u.hdr->thing_word));
ASSERT(u.pb != &tmp);
@@ -3151,49 +3342,89 @@ int db_is_variable(Eterm obj)
return N;
}
+/* check if node is (or contains) a map
+ * return 1 if node contains a map
+ * return 0 otherwise
+ */
+
+int db_has_map(Eterm node) {
+ DECLARE_ESTACK(s);
+
+ ESTACK_PUSH(s,node);
+ while (!ESTACK_ISEMPTY(s)) {
+ node = ESTACK_POP(s);
+ if (is_list(node)) {
+ while (is_list(node)) {
+ ESTACK_PUSH(s,CAR(list_val(node)));
+ node = CDR(list_val(node));
+ }
+ ESTACK_PUSH(s,node); /* Non wellformed list or [] */
+ } else if (is_tuple(node)) {
+ Eterm *tuple = tuple_val(node);
+ int arity = arityval(*tuple);
+ while(arity--) {
+ ESTACK_PUSH(s,*(++tuple));
+ }
+ } else if is_map(node) {
+ DESTROY_ESTACK(s);
+ return 1;
+ }
+ }
+ DESTROY_ESTACK(s);
+ return 0;
+}
/* check if obj is (or contains) a variable */
/* return 1 if obj contains a variable or underscore */
/* return 0 if obj is fully ground */
-int db_has_variable(Eterm obj)
-{
- switch(obj & _TAG_PRIMARY_MASK) {
- case TAG_PRIMARY_LIST: {
- while (is_list(obj)) {
- if (db_has_variable(CAR(list_val(obj))))
+int db_has_variable(Eterm node) {
+ DECLARE_ESTACK(s);
+
+ ESTACK_PUSH(s,node);
+ while (!ESTACK_ISEMPTY(s)) {
+ node = ESTACK_POP(s);
+ switch(node & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_LIST:
+ while (is_list(node)) {
+ ESTACK_PUSH(s,CAR(list_val(node)));
+ node = CDR(list_val(node));
+ }
+ ESTACK_PUSH(s,node); /* Non wellformed list or [] */
+ break;
+ case TAG_PRIMARY_BOXED:
+ if (is_tuple(node)) {
+ Eterm *tuple = tuple_val(node);
+ int arity = arityval(*tuple);
+ while(arity--) {
+ ESTACK_PUSH(s,*(++tuple));
+ }
+ } else if (is_flatmap(node)) {
+ Eterm *values = flatmap_get_values(flatmap_val(node));
+ Uint size = flatmap_get_size(flatmap_val(node));
+ ESTACK_PUSH(s, ((flatmap_t *) flatmap_val(node))->keys);
+ while (size--) {
+ ESTACK_PUSH(s, *(values++));
+ }
+ } else if (is_map(node)) { /* other map-nodes or map-heads */
+ Eterm *ptr = hashmap_val(node);
+ int i = hashmap_bitcount(MAP_HEADER_VAL(*ptr));
+ ptr += MAP_HEADER_ARITY(*ptr);
+ while(i--) { ESTACK_PUSH(s, *++ptr); }
+ }
+ break;
+ case TAG_PRIMARY_IMMED1:
+ if (node == am_Underscore || db_is_variable(node) >= 0) {
+ DESTROY_ESTACK(s);
return 1;
- obj = CDR(list_val(obj));
- }
- return(db_has_variable(obj)); /* Non wellformed list or [] */
- }
- case TAG_PRIMARY_BOXED:
- if (!BOXED_IS_TUPLE(obj)) {
- return 0;
- } else {
- Eterm *tuple = tuple_val(obj);
- int arity = arityval(*tuple++);
- while(arity--) {
- if (db_has_variable(*tuple))
- return 1;
- tuple++;
}
- return(0);
+ break;
}
- case TAG_PRIMARY_IMMED1:
- if (obj == am_Underscore || db_is_variable(obj) >= 0)
- return 1;
}
+ DESTROY_ESTACK(s);
return 0;
}
-int erts_db_is_compiled_ms(Eterm term)
-{
- return (is_binary(term)
- && (thing_subtag(*binary_val(term)) == REFC_BINARY_SUBTAG)
- && IsMatchProgBinary((((ProcBin *) binary_val(term))->val)));
-}
-
/*
** Local (static) utilities.
*/
@@ -3207,20 +3438,20 @@ int erts_db_is_compiled_ms(Eterm term)
** Utility to add an error
*/
-static void add_dmc_err(DMCErrInfo *err_info,
- char *str,
- int variable,
- Eterm term,
- DMCErrorSeverity severity)
+static void vadd_dmc_err(DMCErrInfo *err_info,
+ DMCErrorSeverity severity,
+ int variable,
+ const char *str,
+ ...)
{
+ DMCError *e;
+ va_list args;
+ va_start(args, str);
+
+
/* Linked in in reverse order, to ease the formatting */
- DMCError *e = erts_alloc(ERTS_ALC_T_DB_DMC_ERROR, sizeof(DMCError));
- if (term != 0UL) {
- erts_snprintf(e->error_string, DMC_ERR_STR_LEN, str, term);
- } else {
- strncpy(e->error_string, str, DMC_ERR_STR_LEN);
- e->error_string[DMC_ERR_STR_LEN] ='\0';
- }
+ e = erts_alloc(ERTS_ALC_T_DB_DMC_ERROR, sizeof(DMCError));
+ erts_vsnprintf(e->error_string, DMC_ERR_STR_LEN, str, args);
e->variable = variable;
e->severity = severity;
e->next = err_info->first;
@@ -3230,8 +3461,11 @@ static void add_dmc_err(DMCErrInfo *err_info,
err_info->first = e;
if (severity >= dmcError)
err_info->error_added = 1;
+
+ va_end(args);
}
+
/*
** Handle one term in the match expression (not the guard)
*/
@@ -3243,11 +3477,9 @@ static DMCRet dmc_one_term(DMCContext *context,
{
Sint n;
Eterm *hp;
- ErlHeapFragment *tmp_mb;
Uint sz, sz2, sz3;
Uint i, j;
-
switch (c & _TAG_PRIMARY_MASK) {
case TAG_PRIMARY_IMMED1:
if ((n = db_is_variable(c)) >= 0) { /* variable */
@@ -3334,30 +3566,24 @@ static DMCRet dmc_one_term(DMCContext *context,
DMC_PUSH(*text, n);
DMC_PUSH(*stack, c);
break;
+ case (_TAG_HEADER_MAP >> _TAG_PRIMARY_SIZE):
+ if (is_flatmap(c))
+ n = flatmap_get_size(flatmap_val(c));
+ else
+ n = hashmap_size(c);
+ DMC_PUSH(*text, matchPushM);
+ ++(context->stack_used);
+ DMC_PUSH(*text, n);
+ DMC_PUSH(*stack, c);
+ break;
case (_TAG_HEADER_REF >> _TAG_PRIMARY_SIZE):
{
Eterm* ref_val = internal_ref_val(c);
DMC_PUSH(*text, matchEqRef);
-#if HALFWORD_HEAP
- {
- union {
- UWord u;
- Uint t[2];
- } fiddle;
- ASSERT(thing_arityval(ref_val[0]) == 3);
- fiddle.t[0] = ref_val[0];
- fiddle.t[1] = ref_val[1];
- DMC_PUSH(*text, fiddle.u);
- fiddle.t[0] = ref_val[2];
- fiddle.t[1] = ref_val[3];
- DMC_PUSH(*text, fiddle.u);
- }
-#else
n = thing_arityval(ref_val[0]);
for (i = 0; i <= n; ++i) {
DMC_PUSH(*text, ref_val[i]);
}
-#endif
break;
}
case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
@@ -3366,77 +3592,51 @@ static DMCRet dmc_one_term(DMCContext *context,
Eterm* bval = big_val(c);
n = thing_arityval(bval[0]);
DMC_PUSH(*text, matchEqBig);
-#if HALFWORD_HEAP
- {
- union {
- UWord u;
- Uint t[2];
- } fiddle;
- ASSERT(n >= 1);
- fiddle.t[0] = bval[0];
- fiddle.t[1] = bval[1];
- DMC_PUSH(*text, fiddle.u);
- for (i = 2; i <= n; ++i) {
- fiddle.t[0] = bval[i];
- if (++i <= n) {
- fiddle.t[1] = bval[i];
- } else {
- fiddle.t[1] = (Uint) 0;
- }
- DMC_PUSH(*text, fiddle.u);
- }
- }
-#else
for (i = 0; i <= n; ++i) {
DMC_PUSH(*text, (Uint) bval[i]);
}
-#endif
break;
}
case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
DMC_PUSH(*text,matchEqFloat);
-#if HALFWORD_HEAP
- {
- union {
- UWord u;
- Uint t[2];
- } fiddle;
- fiddle.t[0] = float_val(c)[1];
- fiddle.t[1] = float_val(c)[2];
- DMC_PUSH(*text, fiddle.u);
- }
-#else
DMC_PUSH(*text, (Uint) float_val(c)[1]);
#ifdef ARCH_64
DMC_PUSH(*text, (Uint) 0);
#else
DMC_PUSH(*text, (Uint) float_val(c)[2]);
#endif
-#endif
break;
default: /* BINARY, FUN, VECTOR, or EXTERNAL */
- /*
- ** Make a private copy...
- */
- n = size_object(c);
- tmp_mb = new_message_buffer(n);
- hp = tmp_mb->mem;
DMC_PUSH(*text, matchEqBin);
- DMC_PUSH(*text, copy_struct(c, n, &hp, &(tmp_mb->off_heap)));
- tmp_mb->next = context->save;
- context->save = tmp_mb;
+ DMC_PUSH(*text, dmc_private_copy(context, c));
break;
}
break;
}
default:
- erl_exit(1, "db_match_compile: "
+ erts_exit(ERTS_ERROR_EXIT, "db_match_compile: "
"Bad object on heap: 0x%bex\n", c);
}
return retOk;
}
/*
+** Make a private copy of a term in a context.
+*/
+
+static Eterm
+dmc_private_copy(DMCContext *context, Eterm c)
+{
+ Uint n = size_object(c);
+ ErlHeapFragment *tmp_mb = new_message_buffer(n);
+ Eterm *hp = tmp_mb->mem;
+ Eterm copy = copy_struct(c, n, &hp, &(tmp_mb->off_heap));
+ tmp_mb->next = context->save;
+ context->save = tmp_mb;
+ return copy;
+}
+
+/*
** Match guard compilation
*/
@@ -3464,24 +3664,21 @@ static void do_emit_constant(DMCContext *context, DMC_STACK_TYPE(UWord) *text,
context->stack_need = context->stack_used;
}
-#define RETURN_ERROR_X(String, X, Y, ContextP, ConstantF) \
-do { \
-if ((ContextP)->err_info != NULL) { \
- (ConstantF) = 0; \
- add_dmc_err((ContextP)->err_info, String, X, Y, dmcError); \
- return retOk; \
-} else \
- return retFail; \
-} while(0)
+#define RETURN_ERROR_X(VAR, ContextP, ConstantF, String, ARG) \
+ (((ContextP)->err_info != NULL) \
+ ? ((ConstantF) = 0, \
+ vadd_dmc_err((ContextP)->err_info, dmcError, VAR, String, ARG), \
+ retOk) \
+ : retFail)
#define RETURN_ERROR(String, ContextP, ConstantF) \
- RETURN_ERROR_X(String, -1, 0UL, ContextP, ConstantF)
+ return RETURN_ERROR_X(-1, ContextP, ConstantF, String, 0)
#define RETURN_VAR_ERROR(String, N, ContextP, ConstantF) \
- RETURN_ERROR_X(String, N, 0UL, ContextP, ConstantF)
+ return RETURN_ERROR_X(N, ContextP, ConstantF, String, 0)
#define RETURN_TERM_ERROR(String, T, ContextP, ConstantF) \
- RETURN_ERROR_X(String, -1, T, ContextP, ConstantF)
+ return RETURN_ERROR_X(-1, ContextP, ConstantF, String, T)
#define WARNING(String, ContextP) \
add_dmc_err((ContextP)->err_info, String, -1, 0UL, dmcWarning)
@@ -3527,57 +3724,78 @@ static DMCRet dmc_list(DMCContext *context,
return retOk;
}
-static DMCRet dmc_tuple(DMCContext *context,
- DMCHeap *heap,
- DMC_STACK_TYPE(UWord) *text,
- Eterm t,
- int *constant)
+static void
+dmc_rearrange_constants(DMCContext *context, DMC_STACK_TYPE(UWord) *text,
+ int textpos, Eterm *p, Uint nelems)
{
DMC_STACK_TYPE(UWord) instr_save;
+ Uint i;
+
+ DMC_INIT_STACK(instr_save);
+ while (DMC_STACK_NUM(*text) > textpos) {
+ DMC_PUSH(instr_save, DMC_POP(*text));
+ }
+ for (i = nelems; i--;) {
+ do_emit_constant(context, text, p[i]);
+ }
+ while(!DMC_EMPTY(instr_save)) {
+ DMC_PUSH(*text, DMC_POP(instr_save));
+ }
+ DMC_FREE(instr_save);
+}
+
+static DMCRet
+dmc_array(DMCContext *context, DMCHeap *heap, DMC_STACK_TYPE(UWord) *text,
+ Eterm *p, Uint nelems, int *constant)
+{
int all_constant = 1;
int textpos = DMC_STACK_NUM(*text);
- Eterm *p = tuple_val(t);
- Uint nelems = arityval(*p);
Uint i;
- int c;
- DMCRet ret;
/*
- ** We remember where we started to layout code,
+ ** We remember where we started to layout code,
** assume all is constant and back up and restart if not so.
- ** The tuple should be laid out with the last element first,
- ** so we can memcpy the tuple to the eheap.
+ ** The array should be laid out with the last element first,
+ ** so we can memcpy it to the eheap.
*/
- for (i = nelems; i > 0; --i) {
- if ((ret = dmc_expr(context, heap, text, p[i], &c)) != retOk)
- return ret;
- if (!c && all_constant) {
- all_constant = 0;
- if (i < nelems) {
- Uint j;
+ for (i = nelems; i--;) {
+ DMCRet ret;
+ int c;
- /*
- * Oops, we need to relayout the constants.
- * Save the already laid out instructions.
- */
- DMC_INIT_STACK(instr_save);
- while (DMC_STACK_NUM(*text) > textpos)
- DMC_PUSH(instr_save, DMC_POP(*text));
- for (j = nelems; j > i; --j)
- do_emit_constant(context, text, p[j]);
- while(!DMC_EMPTY(instr_save))
- DMC_PUSH(*text, DMC_POP(instr_save));
- DMC_FREE(instr_save);
- }
- } else if (c && !all_constant) {
- /* push a constant */
- do_emit_constant(context, text, p[i]);
- }
+ ret = dmc_expr(context, heap, text, p[i], &c);
+ if (ret != retOk) {
+ return ret;
+ }
+ if (!c && all_constant) {
+ all_constant = 0;
+ if (i < nelems - 1) {
+ dmc_rearrange_constants(context, text, textpos,
+ p + i + 1, nelems - i - 1);
+ }
+ } else if (c && !all_constant) {
+ do_emit_constant(context, text, p[i]);
+ }
+ }
+ *constant = all_constant;
+ return retOk;
+}
+
+static DMCRet
+dmc_tuple(DMCContext *context, DMCHeap *heap, DMC_STACK_TYPE(UWord) *text,
+ Eterm t, int *constant)
+{
+ int all_constant;
+ Eterm *p = tuple_val(t);
+ Uint nelems = arityval(*p);
+ DMCRet ret;
+
+ ret = dmc_array(context, heap, text, p + 1, nelems, &all_constant);
+ if (ret != retOk) {
+ return ret;
}
-
if (all_constant) {
- *constant = 1;
- return retOk;
+ *constant = 1;
+ return retOk;
}
DMC_PUSH(*text, matchMkTuple);
DMC_PUSH(*text, nelems);
@@ -3586,6 +3804,93 @@ static DMCRet dmc_tuple(DMCContext *context,
return retOk;
}
+static DMCRet
+dmc_map(DMCContext *context, DMCHeap *heap, DMC_STACK_TYPE(UWord) *text,
+ Eterm t, int *constant)
+{
+ int nelems;
+ int constant_values;
+ DMCRet ret;
+ if (is_flatmap(t)) {
+ flatmap_t *m = (flatmap_t *)flatmap_val(t);
+ Eterm *values = flatmap_get_values(m);
+
+ nelems = flatmap_get_size(m);
+ ret = dmc_array(context, heap, text, values, nelems, &constant_values);
+
+ if (ret != retOk) {
+ return ret;
+ }
+ if (constant_values) {
+ *constant = 1;
+ return retOk;
+ }
+ DMC_PUSH(*text, matchPushC);
+ DMC_PUSH(*text, dmc_private_copy(context, m->keys));
+ if (++context->stack_used > context->stack_need) {
+ context->stack_need = context->stack_used;
+ }
+ DMC_PUSH(*text, matchMkFlatMap);
+ DMC_PUSH(*text, nelems);
+ context->stack_used -= nelems;
+ *constant = 0;
+ return retOk;
+ } else {
+ DECLARE_WSTACK(wstack);
+ Eterm *kv;
+ int c;
+
+ ASSERT(is_hashmap(t));
+
+ hashmap_iterator_init(&wstack, t, 1);
+ constant_values = 1;
+ nelems = hashmap_size(t);
+
+ while ((kv=hashmap_iterator_prev(&wstack)) != NULL) {
+ if ((ret = dmc_expr(context, heap, text, CDR(kv), &c)) != retOk) {
+ DESTROY_WSTACK(wstack);
+ return ret;
+ }
+ if (!c)
+ constant_values = 0;
+ }
+
+ if (constant_values) {
+ *constant = 1;
+ DESTROY_WSTACK(wstack);
+ return retOk;
+ }
+
+ *constant = 0;
+
+ hashmap_iterator_init(&wstack, t, 1);
+
+ while ((kv=hashmap_iterator_prev(&wstack)) != NULL) {
+ /* push key */
+ if ((ret = dmc_expr(context, heap, text, CAR(kv), &c)) != retOk) {
+ DESTROY_WSTACK(wstack);
+ return ret;
+ }
+ if (c) {
+ do_emit_constant(context, text, CAR(kv));
+ }
+ /* push value */
+ if ((ret = dmc_expr(context, heap, text, CDR(kv), &c)) != retOk) {
+ DESTROY_WSTACK(wstack);
+ return ret;
+ }
+ if (c) {
+ do_emit_constant(context, text, CDR(kv));
+ }
+ }
+ DMC_PUSH(*text, matchMkHashMap);
+ DMC_PUSH(*text, nelems);
+ context->stack_used -= nelems;
+ DESTROY_WSTACK(wstack);
+ return retOk;
+ }
+}
+
static DMCRet dmc_whole_expression(DMCContext *context,
DMCHeap *heap,
DMC_STACK_TYPE(UWord) *text,
@@ -3620,24 +3925,8 @@ static void dmc_add_pushv_variant(DMCContext *context, DMCHeap *heap,
MatchOps instr = matchPushV;
ASSERT(n < heap->vars_used && v->is_bound);
- if (context->is_guard) {
- #if HALFWORD_HEAP
- if (!v->first_guard_label) {
- v->first_guard_label = DMC_STACK_NUM(*text);
- ASSERT(v->first_guard_label);
- instr = matchPushVGuard; /* may be changed to PushVResult below */
- }
- #endif
- }
- else { /* body */
- #if HALFWORD_HEAP
- if (v->first_guard_label) {
- /* Avoid double-copy, copy to result heap at first encounter in guard */
- DMC_POKE(*text, v->first_guard_label, matchPushVResult);
- v->is_in_body = 1;
- }
- #endif
- if (!v->is_in_body) {
+ if (!context->is_guard) {
+ if(!v->is_in_body) {
instr = matchPushVResult;
v->is_in_body = 1;
}
@@ -3655,7 +3944,7 @@ static DMCRet dmc_variable(DMCContext *context,
Uint n = db_is_variable(t);
if (n >= heap->vars_used || !heap->vars[n].is_bound) {
- RETURN_VAR_ERROR("Variable $%d is unbound.", n, context, *constant);
+ RETURN_VAR_ERROR("Variable $%%d is unbound.", n, context, *constant);
}
dmc_add_pushv_variant(context, heap, text, n);
@@ -3987,7 +4276,30 @@ static DMCRet dmc_exception_trace(DMCContext *context,
return retOk;
}
-
+static int check_trace(const char* op,
+ DMCContext *context,
+ int *constant,
+ int need_cflags,
+ int allow_in_guard,
+ DMCRet* retp)
+{
+ if (!(context->cflags & DCOMP_TRACE)) {
+ *retp = RETURN_ERROR_X(-1, context, *constant, "Special form '%s' "
+ "used in wrong dialect.", op);
+ return 0;
+ }
+ if ((context->cflags & need_cflags) != need_cflags) {
+ *retp = RETURN_ERROR_X(-1, context, *constant, "Special form '%s' "
+ "not allow for this trace event.", op);
+ return 0;
+ }
+ if (context->is_guard && !allow_in_guard) {
+ *retp = RETURN_ERROR_X(-1, context, *constant, "Special form '%s' "
+ "called in guard context.", op);
+ return 0;
+ }
+ return 1;
+}
static DMCRet dmc_is_seq_trace(DMCContext *context,
DMCHeap *heap,
@@ -3997,12 +4309,11 @@ static DMCRet dmc_is_seq_trace(DMCContext *context,
{
Eterm *p = tuple_val(t);
Uint a = arityval(*p);
+ DMCRet ret;
- if (!(context->cflags & DCOMP_TRACE)) {
- RETURN_ERROR("Special form 'is_seq_trace' used in wrong dialect.",
- context,
- *constant);
- }
+ if (!check_trace("is_seq_trace", context, constant, DCOMP_ALLOW_TRACE_OPS, 1, &ret))
+ return ret;
+
if (a != 1) {
RETURN_TERM_ERROR("Special form 'is_seq_trace' called with "
"arguments in %T.", t, context, *constant);
@@ -4026,16 +4337,8 @@ static DMCRet dmc_set_seq_token(DMCContext *context,
DMCRet ret;
int c;
-
- if (!(context->cflags & DCOMP_TRACE)) {
- RETURN_ERROR("Special form 'set_seq_token' used in wrong dialect.",
- context,
- *constant);
- }
- if (context->is_guard) {
- RETURN_ERROR("Special form 'set_seq_token' called in "
- "guard context.", context, *constant);
- }
+ if (!check_trace("set_seq_trace", context, constant, DCOMP_ALLOW_TRACE_OPS, 0, &ret))
+ return ret;
if (a != 3) {
RETURN_TERM_ERROR("Special form 'set_seq_token' called with wrong "
@@ -4072,16 +4375,11 @@ static DMCRet dmc_get_seq_token(DMCContext *context,
{
Eterm *p = tuple_val(t);
Uint a = arityval(*p);
+ DMCRet ret;
+
+ if (!check_trace("get_seq_token", context, constant, DCOMP_ALLOW_TRACE_OPS, 0, &ret))
+ return ret;
- if (!(context->cflags & DCOMP_TRACE)) {
- RETURN_ERROR("Special form 'get_seq_token' used in wrong dialect.",
- context,
- *constant);
- }
- if (context->is_guard) {
- RETURN_ERROR("Special form 'get_seq_token' called in "
- "guard context.", context, *constant);
- }
if (a != 1) {
RETURN_TERM_ERROR("Special form 'get_seq_token' called with "
"arguments in %T.", t, context,
@@ -4145,16 +4443,10 @@ static DMCRet dmc_process_dump(DMCContext *context,
{
Eterm *p = tuple_val(t);
Uint a = arityval(*p);
-
- if (!(context->cflags & DCOMP_TRACE)) {
- RETURN_ERROR("Special form 'process_dump' used in wrong dialect.",
- context,
- *constant);
- }
- if (context->is_guard) {
- RETURN_ERROR("Special form 'process_dump' called in "
- "guard context.", context, *constant);
- }
+ DMCRet ret;
+
+ if (!check_trace("process_dump", context, constant, DCOMP_ALLOW_TRACE_OPS, 0, &ret))
+ return ret;
if (a != 1) {
RETURN_TERM_ERROR("Special form 'process_dump' called with "
@@ -4178,17 +4470,8 @@ static DMCRet dmc_enable_trace(DMCContext *context,
DMCRet ret;
int c;
-
- if (!(context->cflags & DCOMP_TRACE)) {
- RETURN_ERROR("Special form 'enable_trace' used in wrong dialect.",
- context,
- *constant);
- }
- if (context->is_guard) {
- RETURN_ERROR("Special form 'enable_trace' called in guard context.",
- context,
- *constant);
- }
+ if (!check_trace("enable_trace", context, constant, DCOMP_ALLOW_TRACE_OPS, 0, &ret))
+ return ret;
switch (a) {
case 2:
@@ -4237,18 +4520,9 @@ static DMCRet dmc_disable_trace(DMCContext *context,
Uint a = arityval(*p);
DMCRet ret;
int c;
-
- if (!(context->cflags & DCOMP_TRACE)) {
- RETURN_ERROR("Special form 'disable_trace' used in wrong dialect.",
- context,
- *constant);
- }
- if (context->is_guard) {
- RETURN_ERROR("Special form 'disable_trace' called in guard context.",
- context,
- *constant);
- }
+ if (!check_trace("disable_trace", context, constant, DCOMP_ALLOW_TRACE_OPS, 0, &ret))
+ return ret;
switch (a) {
case 2:
@@ -4298,17 +4572,8 @@ static DMCRet dmc_trace(DMCContext *context,
DMCRet ret;
int c;
-
- if (!(context->cflags & DCOMP_TRACE)) {
- RETURN_ERROR("Special form 'trace' used in wrong dialect.",
- context,
- *constant);
- }
- if (context->is_guard) {
- RETURN_ERROR("Special form 'trace' called in guard context.",
- context,
- *constant);
- }
+ if (!check_trace("trace", context, constant, DCOMP_ALLOW_TRACE_OPS, 0, &ret))
+ return ret;
switch (a) {
case 3:
@@ -4369,16 +4634,11 @@ static DMCRet dmc_caller(DMCContext *context,
{
Eterm *p = tuple_val(t);
Uint a = arityval(*p);
+ DMCRet ret;
- if (!(context->cflags & DCOMP_TRACE)) {
- RETURN_ERROR("Special form 'caller' used in wrong dialect.",
- context,
- *constant);
- }
- if (context->is_guard) {
- RETURN_ERROR("Special form 'caller' called in "
- "guard context.", context, *constant);
- }
+ if (!check_trace("caller", context, constant,
+ (DCOMP_CALL_TRACE|DCOMP_ALLOW_TRACE_OPS), 0, &ret))
+ return ret;
if (a != 1) {
RETURN_TERM_ERROR("Special form 'caller' called with "
@@ -4404,15 +4664,8 @@ static DMCRet dmc_silent(DMCContext *context,
DMCRet ret;
int c;
- if (!(context->cflags & DCOMP_TRACE)) {
- RETURN_ERROR("Special form 'silent' used in wrong dialect.",
- context,
- *constant);
- }
- if (context->is_guard) {
- RETURN_ERROR("Special form 'silent' called in "
- "guard context.", context, *constant);
- }
+ if (!check_trace("silent", context, constant, DCOMP_ALLOW_TRACE_OPS, 0, &ret))
+ return ret;
if (a != 2) {
RETURN_TERM_ERROR("Special form 'silent' called with wrong "
@@ -4553,7 +4806,7 @@ static DMCRet dmc_fun(DMCContext *context,
DMC_PUSH(*text, matchCall3);
break;
default:
- erl_exit(1,"ets:match() internal error, "
+ erts_exit(ERTS_ERROR_EXIT,"ets:match() internal error, "
"guard with more than 3 arguments.");
}
DMC_PUSH(*text, (UWord) b->biff);
@@ -4580,7 +4833,10 @@ static DMCRet dmc_expr(DMCContext *context,
return ret;
break;
case TAG_PRIMARY_BOXED:
- if (!BOXED_IS_TUPLE(t)) {
+ if (is_map(t)) {
+ return dmc_map(context, heap, text, t, constant);
+ }
+ if (!is_tuple(t)) {
goto simple_term;
}
p = tuple_val(t);
@@ -4830,7 +5086,7 @@ static Uint my_size_object(Eterm t)
tmp == am_const) {
sum += size_object(tuple_val(t)[2]);
} else {
- erl_exit(1,"Internal error, sizing unrecognized object in "
+ erts_exit(ERTS_ERROR_EXIT,"Internal error, sizing unrecognized object in "
"(d)ets:match compilation.");
}
break;
@@ -4855,7 +5111,7 @@ static Eterm my_copy_struct(Eterm t, Eterm **hp, ErlOffHeap* off_heap)
*hp += 2;
break;
case TAG_PRIMARY_BOXED:
- if (BOXED_IS_TUPLE(t)) {
+ if (is_tuple(t)) {
if (arityval(*tuple_val(t)) == 1 &&
is_tuple(a = tuple_val(t)[1])) {
Uint i,n;
@@ -4875,7 +5131,7 @@ static Eterm my_copy_struct(Eterm t, Eterm **hp, ErlOffHeap* off_heap)
sz = size_object(b);
ret = copy_struct(b,sz,hp,off_heap);
} else {
- erl_exit(1, "Trying to constant-copy non constant expression "
+ erts_exit(ERTS_ERROR_EXIT, "Trying to constant-copy non constant expression "
"0x%bex in (d)ets:match compilation.", t);
}
} else {
@@ -4949,13 +5205,16 @@ static Eterm match_spec_test(Process *p, Eterm against, Eterm spec, int trace)
return THE_NON_VALUE;
}
if (trace) {
- lint_res = db_match_set_lint(p, spec, DCOMP_TRACE | DCOMP_FAKE_DESTRUCTIVE);
- mps = db_match_set_compile(p, spec, DCOMP_TRACE | DCOMP_FAKE_DESTRUCTIVE);
+ const Uint cflags = (DCOMP_TRACE | DCOMP_FAKE_DESTRUCTIVE |
+ DCOMP_CALL_TRACE | DCOMP_ALLOW_TRACE_OPS);
+ lint_res = db_match_set_lint(p, spec, cflags);
+ mps = db_match_set_compile(p, spec, cflags);
} else {
- lint_res = db_match_set_lint(p, spec, DCOMP_TABLE | DCOMP_FAKE_DESTRUCTIVE);
- mps = db_match_set_compile(p, spec, DCOMP_TABLE | DCOMP_FAKE_DESTRUCTIVE);
+ const Uint cflags = (DCOMP_TABLE | DCOMP_FAKE_DESTRUCTIVE);
+ lint_res = db_match_set_lint(p, spec, cflags);
+ mps = db_match_set_compile(p, spec, cflags);
}
-
+
if (mps == NULL) {
hp = HAlloc(p,3);
ret = TUPLE2(hp, am_error, lint_res);
@@ -4986,7 +5245,8 @@ static Eterm match_spec_test(Process *p, Eterm against, Eterm spec, int trace)
}
save_cp = p->cp;
p->cp = NULL;
- res = erts_match_set_run(p, mps, arr, n,
+ res = erts_match_set_run_trace(p, p,
+ mps, arr, n,
ERTS_PAM_COPY_RESULT|ERTS_PAM_IGNORE_TRACE_SILENT,
&ret_flags);
p->cp = save_cp;
@@ -5062,18 +5322,21 @@ void db_free_tmp_uncompressed(DbTerm* obj)
Eterm db_match_dbterm(DbTableCommon* tb, Process* c_p, Binary* bprog,
int all, DbTerm* obj, Eterm** hpp, Uint extra)
{
+ enum erts_pam_run_flags flags;
Uint32 dummy;
- Eterm* base;
Eterm res;
if (tb->compress) {
obj = db_alloc_tmp_uncompressed(tb, obj);
- base = NULL;
}
- else base = HALFWORD_HEAP ? obj->tpl : NULL;
- res = db_prog_match(c_p, bprog, make_tuple_rel(obj->tpl,base), base, NULL, 0,
- ERTS_PAM_COPY_RESULT|ERTS_PAM_CONTIGUOUS_TUPLE, &dummy);
+ flags = (hpp ?
+ ERTS_PAM_COPY_RESULT | ERTS_PAM_CONTIGUOUS_TUPLE :
+ ERTS_PAM_TMP_RESULT | ERTS_PAM_CONTIGUOUS_TUPLE);
+
+ res = db_prog_match(c_p, c_p,
+ bprog, make_tuple(obj->tpl), NULL, 0,
+ flags, &dummy);
if (is_value(res) && hpp!=NULL) {
*hpp = HAlloc(c_p, extra);
@@ -5126,6 +5389,18 @@ void db_match_dis(Binary *bp)
++t;
erts_printf("Tuple\t%beu\n", n);
break;
+ case matchMap:
+ ++t;
+ n = *t;
+ ++t;
+ erts_printf("Map\t%beu\n", n);
+ break;
+ case matchKey:
+ ++t;
+ p = (Eterm) *t;
+ ++t;
+ erts_printf("Key\t%p (%T)\n", t, p);
+ break;
case matchPushT:
++t;
n = *t;
@@ -5136,10 +5411,20 @@ void db_match_dis(Binary *bp)
++t;
erts_printf("PushL\n");
break;
+ case matchPushM:
+ ++t;
+ n = *t;
+ ++t;
+ erts_printf("PushM\t%beu\n", n);
+ break;
case matchPop:
++t;
erts_printf("Pop\n");
break;
+ case matchSwap:
+ ++t;
+ erts_printf("Swap\n");
+ break;
case matchBind:
++t;
n = *t;
@@ -5161,24 +5446,35 @@ void db_match_dis(Binary *bp)
case matchEqRef:
++t;
{
- RefThing *rt = (RefThing *) t;
+ Uint32 *num;
int ri;
- n = thing_arityval(rt->header);
- erts_printf("EqRef\t(%d) {", (int) n);
+
+ if (is_ordinary_ref_thing(t)) {
+ ErtsORefThing *rt = (ErtsORefThing *) t;
+ num = rt->num;
+ t += TermWords(ERTS_REF_THING_SIZE);
+ }
+ else {
+ ErtsMRefThing *mrt = (ErtsMRefThing *) t;
+ ASSERT(is_magic_ref_thing(t));
+ num = mrt->mb->refn;
+ t += TermWords(ERTS_MAGIC_REF_THING_SIZE);
+ }
+
+ erts_printf("EqRef\t(%d) {", (int) ERTS_REF_NUMBERS);
first = 1;
- for (ri = 0; ri < n; ++ri) {
+ for (ri = 0; ri < ERTS_REF_NUMBERS; ++ri) {
if (first)
first = 0;
else
erts_printf(", ");
-#if defined(ARCH_64) && !HALFWORD_HEAP
- erts_printf("0x%016bex", rt->data.ui[ri]);
+#if defined(ARCH_64)
+ erts_printf("0x%016bex", num[ri]);
#else
- erts_printf("0x%08bex", rt->data.ui[ri]);
+ erts_printf("0x%08bex", num[ri]);
#endif
}
}
- t += TermWords(REF_THING_SIZE);
erts_printf("}\n");
break;
case matchEqBig:
@@ -5195,7 +5491,7 @@ void db_match_dis(Binary *bp)
first = 0;
else
erts_printf(", ");
-#if defined(ARCH_64) && !HALFWORD_HEAP
+#if defined(ARCH_64)
erts_printf("0x%016bex", *et);
#else
erts_printf("0x%08bex", *et);
@@ -5252,6 +5548,18 @@ void db_match_dis(Binary *bp)
++t;
erts_printf("MkTuple\t%beu\n", n);
break;
+ case matchMkFlatMap:
+ ++t;
+ n = *t;
+ ++t;
+ erts_printf("MkFlatMap\t%beu\n", n);
+ break;
+ case matchMkHashMap:
+ ++t;
+ n = *t;
+ ++t;
+ erts_printf("MkHashMap\t%beu\n", n);
+ break;
case matchOr:
++t;
n = *t;
@@ -5306,13 +5614,6 @@ void db_match_dis(Binary *bp)
++t;
erts_printf("PushV\t%beu\n", n);
break;
- #if HALFWORD_HEAP
- case matchPushVGuard:
- n = (Uint) *++t;
- ++t;
- erts_printf("PushVGuard\t%beu\n", n);
- break;
- #endif
case matchPushVResult:
n = (Uint) *++t;
++t;
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index 328b19dfc9..6b126f35d6 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -22,6 +23,7 @@
#include "global.h"
#include "erl_message.h"
+#include "erl_bif_unique.h"
/*#define HARDDEBUG 1*/
@@ -73,8 +75,9 @@ typedef struct db_term {
*/
} DbTerm;
-union db_table;
-typedef union db_table DbTable;
+#define DB_MUST_RESIZE 1
+#define DB_NEW_OBJECT 2
+#define DB_INC_TRY_GROW 4
/* Info about a database entry while it's being updated
* (by update_counter or update_element)
@@ -84,11 +87,8 @@ typedef struct {
DbTerm* dbterm;
void** bp; /* {Hash|Tree}DbTerm** */
Uint new_size;
- int mustResize;
+ int flags;
void* lck;
-#if HALFWORD_HEAP
- unsigned char* abs_vec; /* [i] true if dbterm->tpl[i] is absolute Eterm */
-#endif
} DbUpdateHandle;
@@ -135,44 +135,58 @@ typedef struct db_table_method
Eterm slot,
Eterm* ret);
int (*db_select_chunk)(Process* p,
- DbTable* tb, /* [in out] */
+ DbTable* tb, /* [in out] */
+ Eterm tid,
Eterm pattern,
Sint chunk_size,
int reverse,
Eterm* ret);
int (*db_select)(Process* p,
- DbTable* tb, /* [in out] */
+ DbTable* tb, /* [in out] */
+ Eterm tid,
Eterm pattern,
int reverse,
Eterm* ret);
int (*db_select_delete)(Process* p,
- DbTable* tb, /* [in out] */
+ DbTable* tb, /* [in out] */
+ Eterm tid,
Eterm pattern,
Eterm* ret);
int (*db_select_continue)(Process* p,
- DbTable* tb, /* [in out] */
+ DbTable* tb, /* [in out] */
Eterm continuation,
Eterm* ret);
int (*db_select_delete_continue)(Process* p,
- DbTable* tb, /* [in out] */
+ DbTable* tb, /* [in out] */
Eterm continuation,
Eterm* ret);
int (*db_select_count)(Process* p,
- DbTable* tb, /* [in out] */
+ DbTable* tb, /* [in out] */
+ Eterm tid,
Eterm pattern,
Eterm* ret);
int (*db_select_count_continue)(Process* p,
DbTable* tb, /* [in out] */
Eterm continuation,
Eterm* ret);
+ int (*db_select_replace)(Process* p,
+ DbTable* tb, /* [in out] */
+ Eterm tid,
+ Eterm pattern,
+ Eterm* ret);
+ int (*db_select_replace_continue)(Process* p,
+ DbTable* tb, /* [in out] */
+ Eterm continuation,
+ Eterm* ret);
+ int (*db_take)(Process *, DbTable *, Eterm, Eterm *);
int (*db_delete_all_objects)(Process* p,
DbTable* db /* [in out] */ );
int (*db_free_table)(DbTable* db /* [in out] */ );
- int (*db_free_table_continue)(DbTable* db); /* [in out] */
+ SWord (*db_free_table_continue)(DbTable* db, SWord reds);
- void (*db_print)(int to,
+ void (*db_print)(fmtfn_t to,
void* to_arg,
int show,
DbTable* tb /* [in out] */ );
@@ -180,26 +194,43 @@ typedef struct db_table_method
void (*db_foreach_offheap)(DbTable* db, /* [in out] */
void (*func)(ErlOffHeap *, void *),
void *arg);
- void (*db_check_table)(DbTable* tb);
- /* Lookup a dbterm for updating. Return false if not found.
- */
- int (*db_lookup_dbterm)(DbTable*, Eterm key,
- DbUpdateHandle* handle); /* [out] */
+ /* Lookup a dbterm for updating. Return false if not found. */
+ int (*db_lookup_dbterm)(Process *, DbTable *, Eterm key, Eterm obj,
+ DbUpdateHandle* handle);
- /* Must be called for each db_lookup_dbterm that returned true,
- ** even if dbterm was not updated.
- */
- void (*db_finalize_dbterm)(DbUpdateHandle* handle);
+ /* Must be called for each db_lookup_dbterm that returned true, even if
+ ** dbterm was not updated. If the handle was of a new object and cret is
+ ** not DB_ERROR_NONE, the object is removed from the table. */
+ void (*db_finalize_dbterm)(int cret, DbUpdateHandle* handle);
} DbTableMethod;
typedef struct db_fixation {
- Eterm pid;
+ /* Node in fixed_tabs list */
+ struct {
+ struct db_fixation *next, *prev;
+ Binary* btid;
+ } tabs;
+
+ /* Node in fixing_procs tree */
+ struct {
+ struct db_fixation *left, *right, *parent;
+ int is_red;
+ Process* p;
+ } procs;
+
+ /* Number of fixations on table from procs.p
+ * Protected by table write lock or read lock + fixlock
+ */
Uint counter;
- struct db_fixation *next;
} DbFixation;
+typedef struct {
+ DbTable *next;
+ DbTable *prev;
+} DbTableList;
+
/*
* This structure contains data for all different types of database
* tables. Note that these fields must match the same fields
@@ -209,53 +240,56 @@ typedef struct db_fixation {
*/
typedef struct db_table_common {
- erts_refc_t ref; /* fixation counter */
-#ifdef ERTS_SMP
- erts_smp_rwmtx_t rwlock; /* rw lock on table */
- erts_smp_mtx_t fixlock; /* Protects fixations,megasec,sec,microsec */
+ erts_refc_t refc; /* reference count of table struct */
+ erts_refc_t fix_count;/* fixation counter */
+ DbTableList all;
+ DbTableList owned;
+ erts_rwmtx_t rwlock; /* rw lock on table */
+ erts_mtx_t fixlock; /* Protects fixing_procs and time */
int is_thread_safe; /* No fine locking inside table needed */
Uint32 type; /* table type, *read only* after creation */
-#endif
Eterm owner; /* Pid of the creator */
Eterm heir; /* Pid of the heir */
UWord heir_data; /* To send in ETS-TRANSFER (is_immed or (DbTerm*) */
Uint64 heir_started_interval; /* To further identify the heir */
Eterm the_name; /* an atom */
- Eterm id; /* atom | integer */
+ Binary *btid;
DbTableMethod* meth; /* table methods */
- erts_smp_atomic_t nitems; /* Total number of items in table */
- erts_smp_atomic_t memory_size;/* Total memory size. NOTE: in bytes! */
- Uint megasec,sec,microsec; /* Last fixation time */
- DbFixation* fixations; /* List of processes who have done safe_fixtable,
+ erts_atomic_t nitems; /* Total number of items in table */
+ erts_atomic_t memory_size;/* Total memory size. NOTE: in bytes! */
+ struct { /* Last fixation time */
+ ErtsMonotonicTime monotonic;
+ ErtsMonotonicTime offset;
+ } time;
+ DbFixation* fixing_procs; /* Tree of processes who have done safe_fixtable,
"local" fixations not included. */
/* All 32-bit fields */
Uint32 status; /* bit masks defined below */
- int slot; /* slot index in meta_main_tab */
int keypos; /* defaults to 1 */
int compress;
} DbTableCommon;
/* These are status bit patterns */
-#define DB_NORMAL (1 << 0)
-#define DB_PRIVATE (1 << 1)
-#define DB_PROTECTED (1 << 2)
-#define DB_PUBLIC (1 << 3)
-#define DB_BAG (1 << 4)
-#define DB_SET (1 << 5)
-/*#define DB_LHASH (1 << 6)*/
-#define DB_FINE_LOCKED (1 << 7) /* fine grained locking enabled */
-#define DB_DUPLICATE_BAG (1 << 8)
-#define DB_ORDERED_SET (1 << 9)
-#define DB_DELETE (1 << 10) /* table is being deleted */
-#define DB_FREQ_READ (1 << 11)
-
-#define ERTS_ETS_TABLE_TYPES (DB_BAG|DB_SET|DB_DUPLICATE_BAG|DB_ORDERED_SET|DB_FINE_LOCKED|DB_FREQ_READ)
+#define DB_PRIVATE (1 << 0)
+#define DB_PROTECTED (1 << 1)
+#define DB_PUBLIC (1 << 2)
+#define DB_DELETE (1 << 3) /* table is being deleted */
+#define DB_SET (1 << 4)
+#define DB_BAG (1 << 5)
+#define DB_DUPLICATE_BAG (1 << 6)
+#define DB_ORDERED_SET (1 << 7)
+#define DB_FINE_LOCKED (1 << 8) /* write_concurrency */
+#define DB_FREQ_READ (1 << 9) /* read_concurrency */
+#define DB_NAMED_TABLE (1 << 10)
+
+#define ERTS_ETS_TABLE_TYPES (DB_BAG|DB_SET|DB_DUPLICATE_BAG|DB_ORDERED_SET\
+ |DB_FINE_LOCKED|DB_FREQ_READ|DB_NAMED_TABLE)
#define IS_HASH_TABLE(Status) (!!((Status) & \
(DB_BAG | DB_SET | DB_DUPLICATE_BAG)))
#define IS_TREE_TABLE(Status) (!!((Status) & \
DB_ORDERED_SET))
-#define NFIXED(T) (erts_refc_read(&(T)->common.ref,0))
+#define NFIXED(T) (erts_refc_read(&(T)->common.fix_count,0))
#define IS_FIXED(T) (NFIXED(T) != 0)
/*
@@ -283,10 +317,10 @@ ERTS_GLB_INLINE Eterm db_copy_key(Process* p, DbTable* tb, DbTerm* obj)
Eterm key = GETKEY(tb, obj->tpl);
if IS_CONST(key) return key;
else {
- Uint size = size_object_rel(key, obj->tpl);
+ Uint size = size_object(key);
Eterm* hp = HAlloc(p, size);
- Eterm res = copy_struct_rel(key, size, &hp, &MSO(p), obj->tpl, NULL);
- ASSERT(eq_rel(res,NULL,key,obj->tpl));
+ Eterm res = copy_struct(key, size, &hp, &MSO(p));
+ ASSERT(EQ(res,key));
return res;
}
}
@@ -298,14 +332,14 @@ ERTS_GLB_INLINE Eterm db_copy_object_from_ets(DbTableCommon* tb, DbTerm* bp,
return db_copy_from_comp(tb, bp, hpp, off_heap);
}
else {
- return copy_shallow_rel(bp->tpl, bp->size, hpp, off_heap, bp->tpl);
+ return copy_shallow(bp->tpl, bp->size, hpp, off_heap);
}
}
ERTS_GLB_INLINE int db_eq(DbTableCommon* tb, Eterm a, DbTerm* b)
{
if (!tb->compress) {
- return eq_rel(a, NULL, make_tuple_rel(b->tpl,b->tpl), b->tpl);
+ return EQ(a, make_tuple(b->tpl));
}
else {
return db_eq_comp(tb, a, b);
@@ -339,6 +373,7 @@ void* db_store_term(DbTableCommon *tb, DbTerm* old, Uint offset, Eterm obj);
void* db_store_term_comp(DbTableCommon *tb, DbTerm* old, Uint offset, Eterm obj);
Eterm db_copy_element_from_ets(DbTableCommon* tb, Process* p, DbTerm* obj,
Uint pos, Eterm** hpp, Uint extra);
+int db_has_map(Eterm obj);
int db_has_variable(Eterm obj);
int db_is_variable(Eterm obj);
void db_do_update_element(DbUpdateHandle* handle,
@@ -349,7 +384,8 @@ Eterm db_add_counter(Eterm** hpp, Wterm counter, Eterm incr);
Eterm db_match_set_lint(Process *p, Eterm matchexpr, Uint flags);
Binary *db_match_set_compile(Process *p, Eterm matchexpr,
Uint flags);
-void erts_db_match_prog_destructor(Binary *);
+int db_match_keeps_key(int keypos, Eterm match, Eterm guard, Eterm body);
+int erts_db_match_prog_destructor(Binary *);
typedef struct match_prog {
ErlHeapFragment *term_save; /* Only if needed, a list of message
@@ -420,6 +456,11 @@ typedef struct dmc_err_info {
#define DCOMP_FAKE_DESTRUCTIVE ((Uint) 8) /* When this is active, no setting of
trace control words or seq_trace tokens will be done. */
+/* Allow lock seizing operations on the tracee and 3rd party processes */
+#define DCOMP_ALLOW_TRACE_OPS ((Uint) 0x10)
+
+/* This is call trace */
+#define DCOMP_CALL_TRACE ((Uint) 0x20)
Binary *db_match_compile(Eterm *matchexpr, Eterm *guards,
Eterm *body, int num_matches,
@@ -430,7 +471,8 @@ Binary *db_match_compile(Eterm *matchexpr, Eterm *guards,
Eterm db_match_dbterm(DbTableCommon* tb, Process* c_p, Binary* bprog,
int all, DbTerm* obj, Eterm** hpp, Uint extra);
-Eterm db_prog_match(Process *p, Binary *prog, Eterm term, Eterm* base,
+Eterm db_prog_match(Process *p, Process *self,
+ Binary *prog, Eterm term,
Eterm *termp, int arity,
enum erts_pam_run_flags in_flags,
Uint32 *return_flags /* Zeroed on enter */);
@@ -444,29 +486,54 @@ Eterm db_format_dmc_err_info(Process *p, DMCErrInfo *ei);
void db_free_dmc_err_info(DMCErrInfo *ei);
/* Completely free's an error info structure, including all recorded
errors */
-Eterm db_make_mp_binary(Process *p, Binary *mp, Eterm **hpp);
-/* Convert a match program to a erlang "magic" binary to be returned to userspace,
- increments the reference counter. */
-int erts_db_is_compiled_ms(Eterm term);
+
+ERTS_GLB_INLINE Eterm erts_db_make_match_prog_ref(Process *p, Binary *mp, Eterm **hpp);
+ERTS_GLB_INLINE Binary *erts_db_get_match_prog_binary(Eterm term);
+ERTS_GLB_INLINE Binary *erts_db_get_match_prog_binary_unchecked(Eterm term);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+/*
+ * Convert a match program to a "magic" ref to return up to erlang
+ */
+ERTS_GLB_INLINE Eterm erts_db_make_match_prog_ref(Process *p, Binary *mp, Eterm **hpp)
+{
+ return erts_mk_magic_ref(hpp, &MSO(p), mp);
+}
+
+ERTS_GLB_INLINE Binary *
+erts_db_get_match_prog_binary_unchecked(Eterm term)
+{
+ Binary *bp = erts_magic_ref2bin(term);
+ ASSERT(bp->intern.flags & BIN_FLAG_MAGIC);
+ ASSERT((ERTS_MAGIC_BIN_DESTRUCTOR(bp) == erts_db_match_prog_destructor));
+ return bp;
+}
+
+ERTS_GLB_INLINE Binary *
+erts_db_get_match_prog_binary(Eterm term)
+{
+ Binary *bp;
+ if (!is_internal_magic_ref(term))
+ return NULL;
+ bp = erts_magic_ref2bin(term);
+ ASSERT(bp->intern.flags & BIN_FLAG_MAGIC);
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(bp) != erts_db_match_prog_destructor)
+ return NULL;
+ return bp;
+}
+
+#endif
/*
** Convenience when compiling into Binary structures
*/
#define IsMatchProgBinary(BP) \
- (((BP)->flags & BIN_FLAG_MAGIC) \
+ (((BP)->intern.flags & BIN_FLAG_MAGIC) \
&& ERTS_MAGIC_BIN_DESTRUCTOR((BP)) == erts_db_match_prog_destructor)
#define Binary2MatchProg(BP) \
(ASSERT(IsMatchProgBinary((BP))), \
((MatchProg *) ERTS_MAGIC_BIN_DATA((BP))))
-/*
-** Debugging
-*/
-#ifdef HARDDEBUG
-void db_check_tables(void); /* in db.c */
-#define CHECK_TABLES() db_check_tables()
-#else
-#define CHECK_TABLES()
-#endif
#endif /* _DB_UTIL_H */
diff --git a/erts/emulator/beam/erl_debug.c b/erts/emulator/beam/erl_debug.c
index 50bdc79506..bf8244564a 100644
--- a/erts/emulator/beam/erl_debug.c
+++ b/erts/emulator/beam/erl_debug.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -59,10 +60,10 @@ static const char dashes[PTR_SIZE+3] = {
void pps(Process*, Eterm*);
void ptd(Process*, Eterm);
-void paranoid_display(int, void*, Process*, Eterm);
+void paranoid_display(fmtfn_t, void*, Process*, Eterm);
static int dcount;
-static int pdisplay1(int to, void *to_arg, Process* p, Eterm obj);
+static int pdisplay1(fmtfn_t to, void *to_arg, Process* p, Eterm obj);
void ptd(Process* p, Eterm x)
{
@@ -76,14 +77,14 @@ void ptd(Process* p, Eterm x)
*/
void
-paranoid_display(int to, void *to_arg, Process* p, Eterm obj)
+paranoid_display(fmtfn_t to, void *to_arg, Process* p, Eterm obj)
{
dcount = 100000;
pdisplay1(to, to_arg, p, obj);
}
static int
-pdisplay1(int to, void *to_arg, Process* p, Eterm obj)
+pdisplay1(fmtfn_t to, void *to_arg, Process* p, Eterm obj)
{
int i, k;
Eterm* nobj;
@@ -129,7 +130,7 @@ pdisplay1(int to, void *to_arg, Process* p, Eterm obj)
Uint32 *ref_num;
erts_print(to, to_arg, "#Ref<%lu", ref_channel_no(obj));
ref_num = ref_numbers(obj);
- for (i = ref_no_of_numbers(obj)-1; i >= 0; i--)
+ for (i = ref_no_numbers(obj)-1; i >= 0; i--)
erts_print(to, to_arg, ",%lu", ref_num[i]);
erts_print(to, to_arg, ">");
break;
@@ -188,6 +189,9 @@ pdisplay1(int to, void *to_arg, Process* p, Eterm obj)
case BINARY_DEF:
erts_print(to, to_arg, "#Bin");
break;
+ case MATCHSTATE_DEF:
+ erts_print(to, to_arg, "#Matchstate");
+ break;
default:
erts_print(to, to_arg, "unknown object %x", obj);
}
@@ -197,7 +201,7 @@ pdisplay1(int to, void *to_arg, Process* p, Eterm obj)
void
pps(Process* p, Eterm* stop)
{
- int to = ERTS_PRINT_STDOUT;
+ fmtfn_t to = ERTS_PRINT_STDOUT;
void *to_arg = NULL;
Eterm* sp = STACK_START(p) - 1;
@@ -251,14 +255,14 @@ void erts_check_stack(Process *p)
Eterm *stack_end = p->htop;
if (p->stop > stack_start)
- erl_exit(1,
+ erts_exit(ERTS_ERROR_EXIT,
"<%lu.%lu.%lu>: Stack underflow\n",
internal_pid_channel_no(p->common.id),
internal_pid_number(p->common.id),
internal_pid_serial(p->common.id));
if (p->stop < stack_end)
- erl_exit(1,
+ erts_exit(ERTS_ERROR_EXIT,
"<%lu.%lu.%lu>: Stack overflow\n",
internal_pid_channel_no(p->common.id),
internal_pid_number(p->common.id),
@@ -283,7 +287,7 @@ void erts_check_stack(Process *p)
if (in_mbuf)
continue;
- erl_exit(1,
+ erts_exit(ERTS_ERROR_EXIT,
"<%lu.%lu.%lu>: Wild stack pointer\n",
internal_pid_channel_no(p->common.id),
internal_pid_number(p->common.id),
@@ -308,6 +312,8 @@ void erts_check_for_holes(Process* p)
p->last_htop = HEAP_TOP(p);
for (hf = MBUF(p); hf != 0; hf = hf->next) {
+ if (hf == p->heap_hfrag)
+ continue;
if (hf == p->last_mbuf) {
break;
}
@@ -368,7 +374,7 @@ void erts_check_memory(Process *p, Eterm *start, Eterm *end)
#ifdef DEBUG
if (hval == DEBUG_BAD_WORD) {
print_untagged_memory(start, end);
- erl_exit(1, "Uninitialized HAlloc'ed memory found @ 0x%0*lx!\n",
+ erts_exit(ERTS_ERROR_EXIT, "Uninitialized HAlloc'ed memory found @ 0x%0*lx!\n",
PTR_SIZE,(unsigned long)(pos - 1));
}
#endif
@@ -381,7 +387,7 @@ void erts_check_memory(Process *p, Eterm *start, Eterm *end)
if (verify_eterm(p,hval))
continue;
- erl_exit(1, "Wild pointer found @ 0x%0*lx!\n",
+ erts_exit(ERTS_ERROR_EXIT, "Wild pointer found @ 0x%0*lx!\n",
PTR_SIZE,(unsigned long)(pos - 1));
}
}
@@ -391,14 +397,14 @@ void verify_process(Process *p)
#define VERIFY_AREA(name,ptr,sz) { \
int n = (sz); \
while (n--) if(!verify_eterm(p,*(ptr+n))) \
- erl_exit(1,"Wild pointer found in " name " of %T!\n",p->common.id); }
+ erts_exit(ERTS_ERROR_EXIT,"Wild pointer found in " name " of %T!\n",p->common.id); }
#define VERIFY_ETERM(name,eterm) { \
if(!verify_eterm(p,eterm)) \
- erl_exit(1,"Wild pointer found in " name " of %T!\n",p->common.id); }
+ erts_exit(ERTS_ERROR_EXIT,"Wild pointer found in " name " of %T!\n",p->common.id); }
- ErlMessage* mp = p->msg.first;
+ ErtsMessage* mp = p->msg.first;
VERBOSE(DEBUG_MEMORY,("Verify process: %T...\n",p->common.id));
@@ -412,7 +418,7 @@ void verify_process(Process *p)
erts_check_heap(p);
if (p->dictionary)
- VERIFY_AREA("dictionary",p->dictionary->data, p->dictionary->used);
+ VERIFY_AREA("dictionary", ERTS_PD_START(p->dictionary), ERTS_PD_SIZE(p->dictionary));
VERIFY_ETERM("seq trace token",p->seq_trace_token);
VERIFY_ETERM("group leader",p->group_leader);
VERIFY_ETERM("fvalue",p->fvalue);
@@ -527,7 +533,7 @@ static void print_process_memory(Process *p)
PTR_SIZE, "PCB", dashes, dashes, dashes, dashes);
if (p->msg.first != NULL) {
- ErlMessage* mp;
+ ErtsMessage* mp;
erts_printf(" Message Queue:\n");
mp = p->msg.first;
while (mp != NULL) {
@@ -538,8 +544,8 @@ static void print_process_memory(Process *p)
}
if (p->dictionary != NULL) {
- int n = p->dictionary->used;
- Eterm *ptr = p->dictionary->data;
+ int n = ERTS_PD_SIZE(p->dictionary);
+ Eterm *ptr = ERTS_PD_START(p->dictionary);
erts_printf(" Dictionary: ");
while (n--) erts_printf("0x%0*lx ",PTR_SIZE,(unsigned long)ptr++);
erts_printf("\n");
@@ -627,29 +633,4 @@ void print_memory_info(Process *p)
}
erts_printf("+-----------------%s-%s-%s-%s-+\n",dashes,dashes,dashes,dashes);
}
-#if !HEAP_ON_C_STACK && defined(DEBUG)
-Eterm *erts_debug_allocate_tmp_heap(int size, Process *p)
-{
- ErtsSchedulerData *sd = ((p == NULL) ? erts_get_scheduler_data() : ERTS_PROC_GET_SCHDATA(p));
- int offset = sd->num_tmp_heap_used;
-
- ASSERT(offset+size <= TMP_HEAP_SIZE);
- return (sd->tmp_heap)+offset;
-}
-void erts_debug_use_tmp_heap(int size, Process *p)
-{
- ErtsSchedulerData *sd = ((p == NULL) ? erts_get_scheduler_data() : ERTS_PROC_GET_SCHDATA(p));
-
- sd->num_tmp_heap_used += size;
- ASSERT(sd->num_tmp_heap_used <= TMP_HEAP_SIZE);
-}
-void erts_debug_unuse_tmp_heap(int size, Process *p)
-{
- ErtsSchedulerData *sd = ((p == NULL) ? erts_get_scheduler_data() : ERTS_PROC_GET_SCHDATA(p));
-
- sd->num_tmp_heap_used -= size;
- ASSERT(sd->num_tmp_heap_used >= 0);
-}
-#endif
#endif
-
diff --git a/erts/emulator/beam/erl_debug.h b/erts/emulator/beam/erl_debug.h
index af51212281..029320691d 100644
--- a/erts/emulator/beam/erl_debug.h
+++ b/erts/emulator/beam/erl_debug.h
@@ -3,16 +3,17 @@
*
* Copyright Ericsson AB 2004-2012. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -47,6 +48,7 @@
#define DEBUG_THREADS 0x0010 /* Thread-related stuff */
#define DEBUG_PROCESSES 0x0020 /* Process creation and removal */
#define DEBUG_MEMORY 0x0040 /* Display results of memory checks */
+#define DEBUG_SHCOPY 0x0080 /* Sharing-preserving copying of terms */
extern Uint32 verbose;
@@ -91,10 +93,4 @@ extern void print_tagged_memory(Eterm *start, Eterm *end);
extern void print_untagged_memory(Eterm *start, Eterm *end);
extern void print_memory(Process *p);
extern void print_memory_info(Process *p);
-#if defined(DEBUG) && !HEAP_ON_C_STACK
-extern Eterm *erts_debug_allocate_tmp_heap(int, Process *);
-extern void erts_debug_use_tmp_heap(int, Process *);
-extern void erts_debug_unuse_tmp_heap(int, Process *);
-#endif
-
#endif /* _ERL_DEBUG_H_ */
diff --git a/erts/emulator/beam/erl_dirty_bif.tab b/erts/emulator/beam/erl_dirty_bif.tab
new file mode 100644
index 0000000000..10c76d2579
--- /dev/null
+++ b/erts/emulator/beam/erl_dirty_bif.tab
@@ -0,0 +1,87 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2016. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# %CopyrightEnd%
+#
+
+#
+# Static declaration of BIFs that should execute on dirty schedulers.
+#
+# <dirty-bif-decl> ::= <type> <bif>
+# <bif> ::= <module> ":" <name> "/" <arity>
+# <type> ::= dirty-cpu | dirty-io | dirty-cpu-test | dirty-io-test
+#
+# When dirty scheduler support is available, a BIF declared with the
+# 'dirty-cpu' type will unconditionally execute on a dirty CPU scheduler,
+# and a BIF declared with the type 'dirty-io' will unconditionally execute
+# on a dirty IO scheduler. When dirty scheduler support is not available
+# all BIFs will of course execute on normal schedulers.
+#
+# When the emulator has been configured with the debug option
+# '--enable-dirty-schedulers-test', BIFs with the types 'dirty-cpu-test',
+# and 'dirty-io-test' will unconditionally execute on dirty schedulers.
+# When this debug option has not been enabled, these BIFs will be executed
+# on normal schedulers.
+#
+# BIFs marked as 'ubif' in ./bif.tab will be ignored, i.e., will always
+# execute on normal schedulers.
+#
+
+# --- Dirty BIFs ---
+
+dirty-cpu erts_debug:dirty_cpu/2
+dirty-io erts_debug:dirty_io/2
+
+# lcnt_control/1 doesn't need to be dirty.
+dirty-cpu erts_debug:lcnt_control/2
+dirty-cpu erts_debug:lcnt_collect/0
+dirty-cpu erts_debug:lcnt_clear/0
+
+# --- TEST of Dirty BIF functionality ---
+# Functions below will execute on dirty schedulers when emulator has
+# been configured for testing dirty schedulers. This is used for test
+# and debug purposes only. We really do *not* want to execute these
+# on dirty schedulers on a real system.
+
+dirty-cpu-test erlang:'++'/2
+dirty-cpu-test erlang:append/2
+dirty-cpu-test erlang:'--'/2
+dirty-cpu-test erlang:subtract/2
+dirty-cpu-test erlang:iolist_size/1
+dirty-cpu-test erlang:make_tuple/2
+dirty-cpu-test erlang:make_tuple/3
+dirty-cpu-test erlang:append_element/2
+dirty-cpu-test erlang:insert_element/3
+dirty-cpu-test erlang:delete_element/2
+dirty-cpu-test erlang:atom_to_list/1
+dirty-cpu-test erlang:list_to_atom/1
+dirty-cpu-test erlang:list_to_existing_atom/1
+dirty-cpu-test erlang:integer_to_list/1
+dirty-cpu-test erlang:string_to_integer/1
+dirty-cpu-test erlang:list_to_integer/1
+dirty-cpu-test erlang:list_to_integer/2
+dirty-cpu-test erlang:float_to_list/1
+dirty-cpu-test erlang:float_to_list/2
+dirty-cpu-test erlang:float_to_binary/1
+dirty-cpu-test erlang:float_to_binary/2
+dirty-cpu-test erlang:string_to_float/1
+dirty-cpu-test erlang:list_to_float/1
+dirty-cpu-test erlang:binary_to_float/1
+dirty-cpu-test erlang:tuple_to_list/1
+dirty-cpu-test erlang:list_to_tuple/1
+dirty-cpu-test erlang:display/1
+dirty-cpu-test erlang:display_string/1
diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h
index 3ecb379326..d5379a40d5 100644
--- a/erts/emulator/beam/erl_driver.h
+++ b/erts/emulator/beam/erl_driver.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2014. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -36,56 +37,9 @@
# endif
#endif
-#ifdef SIZEOF_CHAR
-# define SIZEOF_CHAR_SAVED__ SIZEOF_CHAR
-# undef SIZEOF_CHAR
-#endif
-#ifdef SIZEOF_SHORT
-# define SIZEOF_SHORT_SAVED__ SIZEOF_SHORT
-# undef SIZEOF_SHORT
-#endif
-#ifdef SIZEOF_INT
-# define SIZEOF_INT_SAVED__ SIZEOF_INT
-# undef SIZEOF_INT
-#endif
-#ifdef SIZEOF_LONG
-# define SIZEOF_LONG_SAVED__ SIZEOF_LONG
-# undef SIZEOF_LONG
-#endif
-#ifdef SIZEOF_LONG_LONG
-# define SIZEOF_LONG_LONG_SAVED__ SIZEOF_LONG_LONG
-# undef SIZEOF_LONG_LONG
-#endif
-#ifdef HALFWORD_HEAP_EMULATOR
-# define HALFWORD_HEAP_EMULATOR_SAVED__ HALFWORD_HEAP_EMULATOR
-# undef HALFWORD_HEAP_EMULATOR
-#endif
-#include "erl_int_sizes_config.h"
-#if defined(SIZEOF_CHAR_SAVED__) && SIZEOF_CHAR_SAVED__ != SIZEOF_CHAR
-# error SIZEOF_CHAR mismatch
-#endif
-#if defined(SIZEOF_SHORT_SAVED__) && SIZEOF_SHORT_SAVED__ != SIZEOF_SHORT
-# error SIZEOF_SHORT mismatch
-#endif
-#if defined(SIZEOF_INT_SAVED__) && SIZEOF_INT_SAVED__ != SIZEOF_INT
-# error SIZEOF_INT mismatch
-#endif
-#if defined(SIZEOF_LONG_SAVED__) && SIZEOF_LONG_SAVED__ != SIZEOF_LONG
-# error SIZEOF_LONG mismatch
-#endif
-#if defined(SIZEOF_LONG_LONG_SAVED__) && SIZEOF_LONG_LONG_SAVED__ != SIZEOF_LONG_LONG
-# error SIZEOF_LONG_LONG mismatch
-#endif
-
-/* This is OK to override by the NIF/driver implementor */
-#if defined(HALFWORD_HEAP_EMULATOR_SAVED__) && !defined(HALFWORD_HEAP_EMULATOR)
-#define HALFWORD_HEAP_EMULATOR HALFWORD_HEAP_EMULATOR_SAVED__
-#endif
-
#include "erl_drv_nif.h"
#include <stdlib.h>
-#include <sys/types.h> /* ssize_t */
#if defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_)
#ifndef STATIC_ERLANG_DRIVER
@@ -93,24 +47,6 @@
#define ERL_DRIVER_TYPES_ONLY
#define WIN32_DYNAMIC_ERL_DRIVER
#endif
-/*
- * This structure can be cast to a WSABUF structure.
- */
-typedef struct _SysIOVec {
- unsigned long iov_len;
- char* iov_base;
-} SysIOVec;
-#else /* Unix */
-# ifdef HAVE_SYS_UIO_H
-# include <sys/types.h>
-# include <sys/uio.h>
-typedef struct iovec SysIOVec;
-# else
-typedef struct {
- char* iov_base;
- size_t iov_len;
-} SysIOVec;
-# endif
#endif
#ifndef EXTERN
@@ -121,11 +57,10 @@ typedef struct {
# endif
#endif
-/* Values for mode arg to driver_select() */
-#define ERL_DRV_READ (1 << 0)
-#define ERL_DRV_WRITE (1 << 1)
-#define ERL_DRV_USE (1 << 2)
-#define ERL_DRV_USE_NO_CALLBACK (ERL_DRV_USE | (1 << 3))
+#define ERL_DRV_READ ((int)ERL_NIF_SELECT_READ)
+#define ERL_DRV_WRITE ((int)ERL_NIF_SELECT_WRITE)
+#define ERL_DRV_USE ((int)ERL_NIF_SELECT_STOP)
+#define ERL_DRV_USE_NO_CALLBACK (ERL_DRV_USE | (ERL_DRV_USE << 1))
/* Old deprecated */
#define DO_READ ERL_DRV_READ
@@ -133,7 +68,7 @@ typedef struct {
#define ERL_DRV_EXTENDED_MARKER (0xfeeeeeed)
#define ERL_DRV_EXTENDED_MAJOR_VERSION 3
-#define ERL_DRV_EXTENDED_MINOR_VERSION 0
+#define ERL_DRV_EXTENDED_MINOR_VERSION 3
/*
* The emulator will refuse to load a driver with a major version
@@ -171,34 +106,19 @@ typedef struct {
#define ERL_DRV_FLAG_USE_PORT_LOCKING (1 << 0)
#define ERL_DRV_FLAG_SOFT_BUSY (1 << 1)
#define ERL_DRV_FLAG_NO_BUSY_MSGQ (1 << 2)
+#define ERL_DRV_FLAG_USE_INIT_ACK (1 << 3)
/*
* Integer types
*/
-#if defined(__WIN32__) && (SIZEOF_VOID_P == 8)
-typedef unsigned __int64 ErlDrvTermData;
-typedef unsigned __int64 ErlDrvUInt;
-typedef signed __int64 ErlDrvSInt;
-#else
-typedef unsigned long ErlDrvTermData;
-typedef unsigned long ErlDrvUInt;
-typedef signed long ErlDrvSInt;
-#endif
-#if defined(__WIN32__)
-typedef unsigned __int64 ErlDrvUInt64;
-typedef __int64 ErlDrvSInt64;
-#elif SIZEOF_LONG == 8
-typedef unsigned long ErlDrvUInt64;
-typedef long ErlDrvSInt64;
-#elif SIZEOF_LONG_LONG == 8
-typedef unsigned long long ErlDrvUInt64;
-typedef long long ErlDrvSInt64;
-#else
-#error No 64-bit integer type
-#endif
+typedef ErlNapiUInt64 ErlDrvUInt64;
+typedef ErlNapiSInt64 ErlDrvSInt64;
+typedef ErlNapiUInt ErlDrvUInt;
+typedef ErlNapiSInt ErlDrvSInt;
+typedef ErlNapiUInt ErlDrvTermData;
-#if defined(__WIN32__)
+#if defined(__WIN32__) || defined(_WIN32)
typedef ErlDrvUInt ErlDrvSizeT;
typedef ErlDrvSInt ErlDrvSSizeT;
#else
@@ -228,27 +148,23 @@ typedef struct _erl_drv_event* ErlDrvEvent; /* An event to be selected on. */
typedef struct _erl_drv_port* ErlDrvPort; /* A port descriptor. */
typedef struct _erl_drv_port* ErlDrvThreadData; /* Thread data. */
-#if !defined(__WIN32__) && !defined(_WIN32) && !defined(_WIN32_) && !defined(USE_SELECT)
-struct erl_drv_event_data {
- short events;
- short revents;
-};
-#endif
-typedef struct erl_drv_event_data *ErlDrvEventData; /* Event data */
-
-/*
- * A driver monitor
- */
-typedef struct {
- unsigned char data[sizeof(void *)*4];
-} ErlDrvMonitor;
-
typedef struct {
unsigned long megasecs;
unsigned long secs;
unsigned long microsecs;
} ErlDrvNowData;
+typedef ErlDrvSInt64 ErlDrvTime;
+
+#define ERL_DRV_TIME_ERROR ((ErlDrvSInt64) ERTS_NAPI_TIME_ERROR__)
+
+typedef enum {
+ ERL_DRV_SEC = ERTS_NAPI_SEC__,
+ ERL_DRV_MSEC = ERTS_NAPI_MSEC__,
+ ERL_DRV_USEC = ERTS_NAPI_USEC__,
+ ERL_DRV_NSEC = ERTS_NAPI_NSEC__
+} ErlDrvTimeUnit;
+
/*
* Error codes that can be return from driver.
*/
@@ -346,10 +262,7 @@ typedef struct erl_drv_entry {
unsigned int *flags); /* Works mostly like 'control',
a synchronous
call into the driver. */
- void (*event)(ErlDrvData drv_data, ErlDrvEvent event,
- ErlDrvEventData event_data);
- /* Called when an event selected by
- driver_event() has occurred */
+ void (*unused_event_callback)(void);
int extended_marker; /* ERL_DRV_EXTENDED_MARKER */
int major_version; /* ERL_DRV_EXTENDED_MAJOR_VERSION */
int minor_version; /* ERL_DRV_EXTENDED_MINOR_VERSION */
@@ -361,6 +274,9 @@ typedef struct erl_drv_entry {
/* Called on behalf of driver_select when
it is safe to release 'event'. A typical
unix driver would call close(event) */
+ void (*emergency_close)(ErlDrvData drv_data);
+ /* called when the port is closed abruptly.
+ specifically when erl_crash_dump is called. */
/* When adding entries here, dont forget to pad in obsolete/driver.h */
} ErlDrvEntry;
@@ -382,22 +298,23 @@ typedef struct erl_drv_entry {
#ifdef STATIC_ERLANG_DRIVER
# define ERLANG_DRIVER_NAME(NAME) NAME ## _driver_init
+# define ERL_DRIVER_EXPORT
#else
# define ERLANG_DRIVER_NAME(NAME) driver_init
+# if defined(__GNUC__) && __GNUC__ >= 4
+# define ERL_DRIVER_EXPORT __attribute__ ((visibility("default")))
+# elif defined (__SUNPRO_C) && (__SUNPRO_C >= 0x550)
+# define ERL_DRIVER_EXPORT __global
+# else
+# define ERL_DRIVER_EXPORT
+# endif
#endif
-/* For windows dynamic drivers */
#ifndef ERL_DRIVER_TYPES_ONLY
-#if defined(__WIN32__)
-# define DRIVER_INIT(DRIVER_NAME) \
- __declspec(dllexport) ErlDrvEntry* ERLANG_DRIVER_NAME(DRIVER_NAME)(void); \
- __declspec(dllexport) ErlDrvEntry* ERLANG_DRIVER_NAME(DRIVER_NAME)(void)
-#else
-# define DRIVER_INIT(DRIVER_NAME) \
- ErlDrvEntry* ERLANG_DRIVER_NAME(DRIVER_NAME)(void); \
- ErlDrvEntry* ERLANG_DRIVER_NAME(DRIVER_NAME)(void)
-#endif
+#define DRIVER_INIT(DRIVER_NAME) \
+ ERL_DRIVER_EXPORT ErlDrvEntry* ERLANG_DRIVER_NAME(DRIVER_NAME)(void); \
+ ERL_DRIVER_EXPORT ErlDrvEntry* ERLANG_DRIVER_NAME(DRIVER_NAME)(void)
#define ERL_DRV_BUSY_MSGQ_DISABLED (~((ErlDrvSizeT) 0))
#define ERL_DRV_BUSY_MSGQ_READ_ONLY ((ErlDrvSizeT) 0)
@@ -412,8 +329,6 @@ EXTERN void erl_drv_busy_msgq_limits(ErlDrvPort port,
ErlDrvSizeT *high);
EXTERN int driver_select(ErlDrvPort port, ErlDrvEvent event, int mode, int on);
-EXTERN int driver_event(ErlDrvPort port, ErlDrvEvent event,
- ErlDrvEventData event_data);
EXTERN int driver_output(ErlDrvPort port, char *buf, ErlDrvSizeT len);
EXTERN int driver_output2(ErlDrvPort port, char *hbuf, ErlDrvSizeT hlen,
@@ -681,8 +596,16 @@ EXTERN long driver_async(ErlDrvPort ix,
EXTERN int driver_lock_driver(ErlDrvPort ix);
/* Get the current 'now' timestamp (analogue to erlang:now()) */
-EXTERN int driver_get_now(ErlDrvNowData *now);
+EXTERN int driver_get_now(ErlDrvNowData *now) ERL_DRV_DEPRECATED_FUNC;
+/* Erlang Monotonic Time */
+EXTERN ErlDrvTime erl_drv_monotonic_time(ErlDrvTimeUnit time_unit);
+/* Time offset between Erlang Monotonic Time and Erlang System Time */
+EXTERN ErlDrvTime erl_drv_time_offset(ErlDrvTimeUnit time_unit);
+/* Time unit conversion */
+EXTERN ErlDrvTime erl_drv_convert_time_unit(ErlDrvTime val,
+ ErlDrvTimeUnit from,
+ ErlDrvTimeUnit to);
/* These were removed from the ANSI version, now they're back. */
@@ -692,18 +615,14 @@ EXTERN int driver_dl_close(void *);
EXTERN char *driver_dl_error(void);
/* environment */
-EXTERN int erl_drv_putenv(char *key, char *value);
-EXTERN int erl_drv_getenv(char *key, char *value, size_t *value_size);
-
-#ifdef __OSE__
-typedef ErlDrvUInt ErlDrvOseEventId;
-EXTERN union SIGNAL *erl_drv_ose_get_signal(ErlDrvEvent ev);
-EXTERN ErlDrvEvent erl_drv_ose_event_alloc(SIGSELECT sig, ErlDrvOseEventId handle,
- ErlDrvOseEventId (*resolve_signal)(union SIGNAL *sig), void *extra);
-EXTERN void erl_drv_ose_event_free(ErlDrvEvent ev);
-EXTERN void erl_drv_ose_event_fetch(ErlDrvEvent ev, SIGSELECT *sig,
- ErlDrvOseEventId *handle, void **extra);
-#endif
+EXTERN int erl_drv_putenv(const char *key, char *value);
+EXTERN int erl_drv_getenv(const char *key, char *value, size_t *value_size);
+
+/* spawn start init ack */
+EXTERN void erl_drv_init_ack(ErlDrvPort ix, ErlDrvData res);
+
+/* set the pid seen in port_info */
+EXTERN void erl_drv_set_os_pid(ErlDrvPort ix, ErlDrvSInt pid);
#endif /* !ERL_DRIVER_TYPES_ONLY */
diff --git a/erts/emulator/beam/erl_drv_nif.h b/erts/emulator/beam/erl_drv_nif.h
index 3f829ea7ea..31b4817fb1 100644
--- a/erts/emulator/beam/erl_drv_nif.h
+++ b/erts/emulator/beam/erl_drv_nif.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -35,21 +36,133 @@ typedef struct {
int scheduler_threads;
int nif_major_version;
int nif_minor_version;
+ int dirty_scheduler_support;
} ErlDrvSysInfo;
typedef struct {
int suggested_stack_size;
} ErlDrvThreadOpts;
-#if defined(ERL_DRV_DIRTY_SCHEDULER_SUPPORT) || defined(ERL_NIF_DIRTY_SCHEDULER_SUPPORT)
+
typedef enum {
- ERL_DRV_DIRTY_JOB_CPU_BOUND = 1,
- ERL_DRV_DIRTY_JOB_IO_BOUND = 2
-} ErlDrvDirtyJobFlags;
+ ERL_DIRTY_JOB_CPU_BOUND = 1,
+ ERL_DIRTY_JOB_IO_BOUND = 2
+} ErlDirtyJobFlags;
+
+/* Values for enif_select AND mode arg for driver_select() */
+enum ErlNifSelectFlags {
+ ERL_NIF_SELECT_READ = (1 << 0),
+ ERL_NIF_SELECT_WRITE = (1 << 1),
+ ERL_NIF_SELECT_STOP = (1 << 2)
+};
+
+/*
+ * A driver monitor
+ */
+typedef struct {
+ unsigned char data[sizeof(void *)*4];
+} ErlDrvMonitor;
+
+
+#ifdef SIZEOF_CHAR
+# define SIZEOF_CHAR_SAVED__ SIZEOF_CHAR
+# undef SIZEOF_CHAR
+#endif
+#ifdef SIZEOF_SHORT
+# define SIZEOF_SHORT_SAVED__ SIZEOF_SHORT
+# undef SIZEOF_SHORT
+#endif
+#ifdef SIZEOF_INT
+# define SIZEOF_INT_SAVED__ SIZEOF_INT
+# undef SIZEOF_INT
+#endif
+#ifdef SIZEOF_LONG
+# define SIZEOF_LONG_SAVED__ SIZEOF_LONG
+# undef SIZEOF_LONG
+#endif
+#ifdef SIZEOF_LONG_LONG
+# define SIZEOF_LONG_LONG_SAVED__ SIZEOF_LONG_LONG
+# undef SIZEOF_LONG_LONG
+#endif
+#include "erl_int_sizes_config.h"
+#if defined(SIZEOF_CHAR_SAVED__) && SIZEOF_CHAR_SAVED__ != SIZEOF_CHAR
+# error SIZEOF_CHAR mismatch
+#endif
+#if defined(SIZEOF_SHORT_SAVED__) && SIZEOF_SHORT_SAVED__ != SIZEOF_SHORT
+# error SIZEOF_SHORT mismatch
+#endif
+#if defined(SIZEOF_INT_SAVED__) && SIZEOF_INT_SAVED__ != SIZEOF_INT
+# error SIZEOF_INT mismatch
+#endif
+#if defined(SIZEOF_LONG_SAVED__) && SIZEOF_LONG_SAVED__ != SIZEOF_LONG
+# error SIZEOF_LONG mismatch
+#endif
+#if defined(SIZEOF_LONG_LONG_SAVED__) && SIZEOF_LONG_LONG_SAVED__ != SIZEOF_LONG_LONG
+# error SIZEOF_LONG_LONG mismatch
#endif
-#endif /* __ERL_DRV_NIF_H__ */
+#if !defined(__GNUC__) && (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
+typedef unsigned __int64 ErlNapiUInt64;
+typedef signed __int64 ErlNapiSInt64;
+#define ERL_NAPI_SINT64_MAX__ 9223372036854775807i64
+#define ERL_NAPI_SINT64_MIN__ (-ERL_NAPI_SINT64_MAX__ - 1i64)
+#elif SIZEOF_LONG == 8
+typedef unsigned long ErlNapiUInt64;
+typedef signed long ErlNapiSInt64;
+#define ERL_NAPI_SINT64_MAX__ 9223372036854775807L
+#define ERL_NAPI_SINT64_MIN__ (-ERL_NAPI_SINT64_MAX__ - 1L)
+#elif SIZEOF_LONG_LONG == 8
+typedef unsigned long long ErlNapiUInt64;
+typedef signed long long ErlNapiSInt64;
+#define ERL_NAPI_SINT64_MAX__ 9223372036854775807LL
+#define ERL_NAPI_SINT64_MIN__ (-ERL_NAPI_SINT64_MAX__ - 1LL)
+#else
+# error No 64-bit integer type
+#endif
+#if SIZEOF_VOID_P == 8
+typedef ErlNapiUInt64 ErlNapiUInt;
+typedef ErlNapiSInt64 ErlNapiSInt;
+#elif SIZEOF_VOID_P == 4
+# if SIZEOF_LONG == SIZEOF_VOID_P
+typedef unsigned long ErlNapiUInt;
+typedef signed long ErlNapiSInt;
+# elif SIZEOF_INT == SIZEOF_VOID_P
+typedef unsigned int ErlNapiUInt;
+typedef signed int ErlNapiSInt;
+# else
+# error No 32-bit integer type
+# endif
+#else
+# error Not support arch
+#endif
+#define ERTS_NAPI_TIME_ERROR__ ERL_NAPI_SINT64_MIN__
+#define ERTS_NAPI_SEC__ 0
+#define ERTS_NAPI_MSEC__ 1
+#define ERTS_NAPI_USEC__ 2
+#define ERTS_NAPI_NSEC__ 3
+#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
+/*
+ * This structure can be cast to a WSABUF structure.
+ */
+typedef struct _SysIOVec {
+ unsigned long iov_len;
+ char* iov_base;
+} SysIOVec;
+#else /* Unix */
+# include <sys/types.h>
+# ifdef HAVE_SYS_UIO_H
+# include <sys/uio.h>
+typedef struct iovec SysIOVec;
+# else
+typedef struct {
+ char* iov_base;
+ size_t iov_len;
+} SysIOVec;
+# endif
+#endif
+
+#endif /* __ERL_DRV_NIF_H__ */
diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c
index 147249f751..71d4534ef9 100644
--- a/erts/emulator/beam/erl_drv_thread.c
+++ b/erts/emulator/beam/erl_drv_thread.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2007-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2007-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -42,17 +43,19 @@ fatal_error(int err, char *func)
else
estr = "Unknown error";
}
- erl_exit(ERTS_ABORT_EXIT, "Fatal error in %s: %s [%d]\n", func, estr, err);
+ erts_exit(ERTS_ABORT_EXIT, "Fatal error in %s: %s [%d]\n", func, estr, err);
}
#define ERL_DRV_TSD_KEYS_INC 10
#define ERL_DRV_TSD_EXTRA 10
#define ERL_DRV_INVALID_TSD_KEY INT_MAX
-#ifdef USE_THREADS
struct ErlDrvMutex_ {
ethr_mutex mtx;
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_ref_t lcnt;
+#endif
char *name;
};
@@ -63,6 +66,9 @@ struct ErlDrvCond_ {
struct ErlDrvRWLock_ {
ethr_rwmutex rwmtx;
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_ref_t lcnt;
+#endif
char *name;
};
@@ -78,10 +84,6 @@ struct ErlDrvTid_ {
static ethr_tsd_key tid_key;
-#else /* USE_THREADS */
-static Uint tsd_len;
-static void **tsd;
-#endif
static ErlDrvTSDKey next_tsd_key;
static ErlDrvTSDKey max_used_tsd_key;
@@ -90,7 +92,6 @@ static char **used_tsd_keys;
static erts_mtx_t tsd_mtx;
static char *no_name;
-#ifdef USE_THREADS
static void
thread_exit_handler(void)
@@ -115,21 +116,15 @@ erl_drv_thread_wrapper(void *vdtid)
return (*dtid->func)(dtid->arg);
}
-#endif
void erl_drv_thr_init(void)
{
int i;
-#ifdef USE_THREADS
int res = ethr_tsd_key_create(&tid_key,"erts_tid_key");
if (res == 0)
res = ethr_install_exit_handler(thread_exit_handler);
if (res != 0)
fatal_error(res, "erl_drv_thr_init()");
-#else
- tsd_len = 0;
- tsd = NULL;
-#endif
no_name = "unknown";
next_tsd_key = 0;
@@ -139,19 +134,19 @@ void erl_drv_thr_init(void)
sizeof(char *)*ERL_DRV_TSD_KEYS_INC);
for (i = 0; i < ERL_DRV_TSD_KEYS_INC; i++)
used_tsd_keys[i] = NULL;
- erts_mtx_init(&tsd_mtx, "drv_tsd");
+ erts_mtx_init(&tsd_mtx, "drv_tsd", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO);
}
/*
* These functions implement the driver thread interface in erl_driver.h.
* NOTE: Only use this interface from drivers. From within the emulator use
- * either the erl_threads.h, the erl_smp.h or the ethread.h interface.
+ * either the erl_threads.h or the ethread.h interface.
*/
ErlDrvMutex *
erl_drv_mutex_create(char *name)
{
-#ifdef USE_THREADS
ErlDrvMutex *dmtx = erts_alloc_fnf(ERTS_ALC_T_DRV_MTX,
(sizeof(ErlDrvMutex)
+ (name ? sys_strlen(name) + 1 : 0)));
@@ -160,79 +155,83 @@ erl_drv_mutex_create(char *name)
opt.posix_compliant = 1;
if (ethr_mutex_init_opt(&dmtx->mtx, &opt) != 0) {
erts_free(ERTS_ALC_T_DRV_MTX, (void *) dmtx);
- dmtx = NULL;
+ return NULL;
}
- else if (!name)
- dmtx->name = no_name;
- else {
+ if (name) {
dmtx->name = ((char *) dmtx) + sizeof(ErlDrvMutex);
sys_strcpy(dmtx->name, name);
+ } else {
+ dmtx->name = no_name;
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init_ref_x(&dmtx->lcnt, dmtx->name, NIL,
+ ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+#endif
}
return dmtx;
-#else
- return (ErlDrvMutex *) NULL;
-#endif
}
void
erl_drv_mutex_destroy(ErlDrvMutex *dmtx)
{
-#ifdef USE_THREADS
- int res = dmtx ? ethr_mutex_destroy(&dmtx->mtx) : EINVAL;
+ int res;
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_uninstall(&dmtx->lcnt);
+#endif
+ res = dmtx ? ethr_mutex_destroy(&dmtx->mtx) : EINVAL;
if (res != 0)
fatal_error(res, "erl_drv_mutex_destroy()");
erts_free(ERTS_ALC_T_DRV_MTX, (void *) dmtx);
-#endif
}
char *
erl_drv_mutex_name(ErlDrvMutex *dmtx)
{
-#ifdef USE_THREADS
return dmtx ? dmtx->name : NULL;
-#else
- return NULL;
-#endif
}
int
erl_drv_mutex_trylock(ErlDrvMutex *dmtx)
{
-#ifdef USE_THREADS
+ int res;
if (!dmtx)
fatal_error(EINVAL, "erl_drv_mutex_trylock()");
- return ethr_mutex_trylock(&dmtx->mtx);
-#else
- return 0;
+ res = ethr_mutex_trylock(&dmtx->mtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_trylock(&dmtx->lcnt, res);
#endif
+ return res;
}
void
erl_drv_mutex_lock(ErlDrvMutex *dmtx)
{
-#ifdef USE_THREADS
if (!dmtx)
fatal_error(EINVAL, "erl_drv_mutex_lock()");
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock(&dmtx->lcnt);
+#endif
ethr_mutex_lock(&dmtx->mtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_post(&dmtx->lcnt);
#endif
}
void
erl_drv_mutex_unlock(ErlDrvMutex *dmtx)
{
-#ifdef USE_THREADS
if (!dmtx)
fatal_error(EINVAL, "erl_drv_mutex_unlock()");
- ethr_mutex_unlock(&dmtx->mtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_unlock(&dmtx->lcnt);
#endif
+ ethr_mutex_unlock(&dmtx->mtx);
}
ErlDrvCond *
erl_drv_cond_create(char *name)
{
-#ifdef USE_THREADS
ErlDrvCond *dcnd = erts_alloc_fnf(ERTS_ALC_T_DRV_CND,
(sizeof(ErlDrvCond)
+ (name ? sys_strlen(name) + 1 : 0)));
@@ -251,176 +250,179 @@ erl_drv_cond_create(char *name)
}
}
return dcnd;
-#else
- return (ErlDrvCond *) NULL;
-#endif
}
void
erl_drv_cond_destroy(ErlDrvCond *dcnd)
{
-#ifdef USE_THREADS
int res = dcnd ? ethr_cond_destroy(&dcnd->cnd) : EINVAL;
if (res != 0)
fatal_error(res, "erl_drv_cond_destroy()");
erts_free(ERTS_ALC_T_DRV_CND, (void *) dcnd);
-#endif
}
char *
erl_drv_cond_name(ErlDrvCond *dcnd)
{
-#ifdef USE_THREADS
return dcnd ? dcnd->name : NULL;
-#else
- return NULL;
-#endif
}
void
erl_drv_cond_signal(ErlDrvCond *dcnd)
{
-#ifdef USE_THREADS
if (!dcnd)
fatal_error(EINVAL, "erl_drv_cond_signal()");
ethr_cond_signal(&dcnd->cnd);
-#endif
}
void
erl_drv_cond_broadcast(ErlDrvCond *dcnd)
{
-#ifdef USE_THREADS
if (!dcnd)
fatal_error(EINVAL, "erl_drv_cond_broadcast()");
ethr_cond_broadcast(&dcnd->cnd);
-#endif
}
void
erl_drv_cond_wait(ErlDrvCond *dcnd, ErlDrvMutex *dmtx)
{
-#ifdef USE_THREADS
if (!dcnd || !dmtx) {
fatal_error(EINVAL, "erl_drv_cond_wait()");
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_unlock(&dmtx->lcnt);
+#endif
while (1) {
int res = ethr_cond_wait(&dcnd->cnd, &dmtx->mtx);
- if (res == 0)
+ if (res == 0) {
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock(&dmtx->lcnt);
+ erts_lcnt_lock_post(&dmtx->lcnt);
+#endif
break;
+ }
}
-#endif
}
ErlDrvRWLock *
erl_drv_rwlock_create(char *name)
{
-#ifdef USE_THREADS
ErlDrvRWLock *drwlck = erts_alloc_fnf(ERTS_ALC_T_DRV_RWLCK,
(sizeof(ErlDrvRWLock)
+ (name ? sys_strlen(name) + 1 : 0)));
if (drwlck) {
if (ethr_rwmutex_init(&drwlck->rwmtx) != 0) {
erts_free(ERTS_ALC_T_DRV_RWLCK, (void *) drwlck);
- drwlck = NULL;
+ return NULL;
}
- else if (!name)
- drwlck->name = no_name;
- else {
+ if (name) {
drwlck->name = ((char *) drwlck) + sizeof(ErlDrvRWLock);
sys_strcpy(drwlck->name, name);
+ } else {
+ drwlck->name = no_name;
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init_ref_x(&drwlck->lcnt, drwlck->name, NIL,
+ ERTS_LOCK_TYPE_RWMUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+#endif
}
return drwlck;
-#else
- return (ErlDrvRWLock *) NULL;
-#endif
}
void
erl_drv_rwlock_destroy(ErlDrvRWLock *drwlck)
{
-#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_destroy(&drwlck->rwmtx) : EINVAL;
+ int res;
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_uninstall(&drwlck->lcnt);
+#endif
+ res = drwlck ? ethr_rwmutex_destroy(&drwlck->rwmtx) : EINVAL;
if (res != 0)
fatal_error(res, "erl_drv_rwlock_destroy()");
erts_free(ERTS_ALC_T_DRV_RWLCK, (void *) drwlck);
-#endif
}
char *
erl_drv_rwlock_name(ErlDrvRWLock *drwlck)
{
-#ifdef USE_THREADS
return drwlck ? drwlck->name : NULL;
-#else
- return NULL;
-#endif
}
int
erl_drv_rwlock_tryrlock(ErlDrvRWLock *drwlck)
{
-#ifdef USE_THREADS
+ int res;
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_tryrlock()");
- return ethr_rwmutex_tryrlock(&drwlck->rwmtx);
-#else
- return 0;
+ res = ethr_rwmutex_tryrlock(&drwlck->rwmtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_trylock_opt(&drwlck->lcnt, res, ERTS_LOCK_OPTIONS_READ);
#endif
+ return res;
}
void
erl_drv_rwlock_rlock(ErlDrvRWLock *drwlck)
{
-#ifdef USE_THREADS
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_rlock()");
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_opt(&drwlck->lcnt, ERTS_LOCK_OPTIONS_READ);
+#endif
ethr_rwmutex_rlock(&drwlck->rwmtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_post(&drwlck->lcnt);
#endif
}
void
erl_drv_rwlock_runlock(ErlDrvRWLock *drwlck)
{
-#ifdef USE_THREADS
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_runlock()");
- ethr_rwmutex_runlock(&drwlck->rwmtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_unlock_opt(&drwlck->lcnt, ERTS_LOCK_OPTIONS_READ);
#endif
+ ethr_rwmutex_runlock(&drwlck->rwmtx);
}
int
erl_drv_rwlock_tryrwlock(ErlDrvRWLock *drwlck)
{
-#ifdef USE_THREADS
+ int res;
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_tryrwlock()");
- return ethr_rwmutex_tryrwlock(&drwlck->rwmtx);
-#else
- return 0;
+ res = ethr_rwmutex_tryrwlock(&drwlck->rwmtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_trylock_opt(&drwlck->lcnt, res, ERTS_LOCK_OPTIONS_RDWR);
#endif
+ return res;
}
void
erl_drv_rwlock_rwlock(ErlDrvRWLock *drwlck)
{
-#ifdef USE_THREADS
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_rwlock()");
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_opt(&drwlck->lcnt, ERTS_LOCK_OPTIONS_RDWR);
+#endif
ethr_rwmutex_rwlock(&drwlck->rwmtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_post(&drwlck->lcnt);
#endif
}
void
erl_drv_rwlock_rwunlock(ErlDrvRWLock *drwlck)
{
-#ifdef USE_THREADS
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_rwunlock()");
- ethr_rwmutex_rwunlock(&drwlck->rwmtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_unlock_opt(&drwlck->lcnt, ERTS_LOCK_OPTIONS_RDWR);
#endif
+ ethr_rwmutex_rwunlock(&drwlck->rwmtx);
}
int
@@ -514,20 +516,13 @@ erl_drv_tsd_key_destroy(ErlDrvTSDKey key)
}
-#ifdef USE_THREADS
#define ERL_DRV_TSD__ (dtid->tsd)
#define ERL_DRV_TSD_LEN__ (dtid->tsd_len)
-#else
-#define ERL_DRV_TSD__ (tsd)
-#define ERL_DRV_TSD_LEN__ (tsd_len)
-#endif
void
erl_drv_tsd_set(ErlDrvTSDKey key, void *data)
{
-#ifdef USE_THREADS
struct ErlDrvTid_ *dtid = (struct ErlDrvTid_ *) erl_drv_thread_self();
-#endif
if (key < 0 || max_used_tsd_key < key || !used_tsd_keys[key])
fatal_error(EINVAL, "erl_drv_tsd_set()");
@@ -555,15 +550,11 @@ erl_drv_tsd_set(ErlDrvTSDKey key, void *data)
void *
erl_drv_tsd_get(ErlDrvTSDKey key)
{
-#ifdef USE_THREADS
struct ErlDrvTid_ *dtid = ethr_tsd_get(tid_key);
-#endif
if (key < 0 || max_used_tsd_key < key || !used_tsd_keys[key])
fatal_error(EINVAL, "erl_drv_tsd_get()");
-#ifdef USE_THREADS
if (!dtid)
return NULL;
-#endif
if (ERL_DRV_TSD_LEN__ <= key)
return NULL;
return ERL_DRV_TSD__[key];
@@ -598,20 +589,18 @@ erl_drv_thread_create(char *name,
void* arg,
ErlDrvThreadOpts *opts)
{
-#ifdef USE_THREADS
int res;
struct ErlDrvTid_ *dtid;
- ethr_thr_opts ethr_opts;
+ ethr_thr_opts ethr_opts = ETHR_THR_OPTS_DEFAULT_INITER;
ethr_thr_opts *use_opts;
- ethr_thr_opts def_ethr_opts = ETHR_THR_OPTS_DEFAULT_INITER;
- if (!opts)
+ if (!opts && !name)
use_opts = NULL;
else {
- sys_memcpy((void *) &ethr_opts,
- (void *) &def_ethr_opts,
- sizeof(ethr_thr_opts));
- ethr_opts.suggested_stack_size = opts->suggested_stack_size;
+ if(opts)
+ ethr_opts.suggested_stack_size = opts->suggested_stack_size;
+
+ ethr_opts.name = name;
use_opts = &ethr_opts;
}
@@ -641,27 +630,19 @@ erl_drv_thread_create(char *name,
*tid = (ErlDrvTid) dtid;
return 0;
-#else
- return ENOTSUP;
-#endif
}
char *
erl_drv_thread_name(ErlDrvTid tid)
{
-#ifdef USE_THREADS
struct ErlDrvTid_ *dtid = (struct ErlDrvTid_ *) tid;
return dtid ? dtid->name : NULL;
-#else
- return NULL;
-#endif
}
ErlDrvTid
erl_drv_thread_self(void)
{
-#ifdef USE_THREADS
struct ErlDrvTid_ *dtid = ethr_tsd_get(tid_key);
if (!dtid) {
int res;
@@ -680,15 +661,11 @@ erl_drv_thread_self(void)
fatal_error(res, "erl_drv_thread_self()");
}
return (ErlDrvTid) dtid;
-#else
- return (ErlDrvTid) NULL;
-#endif
}
int
erl_drv_equal_tids(ErlDrvTid tid1, ErlDrvTid tid2)
{
-#ifdef USE_THREADS
int res;
struct ErlDrvTid_ *dtid1 = (struct ErlDrvTid_ *) tid1;
struct ErlDrvTid_ *dtid2 = (struct ErlDrvTid_ *) tid2;
@@ -702,28 +679,22 @@ erl_drv_equal_tids(ErlDrvTid tid1, ErlDrvTid tid2)
: !ethr_equal_tids(dtid1->tid, dtid2->tid));
return res;
-#else
- return 1;
-#endif
}
void
erl_drv_thread_exit(void *res)
{
-#ifdef USE_THREADS
struct ErlDrvTid_ *dtid = ethr_tsd_get(tid_key);
if (dtid && dtid->drv_thr) {
ethr_thr_exit(res);
fatal_error(0, "erl_drv_thread_exit()");
}
-#endif
fatal_error(EACCES, "erl_drv_thread_exit()");
}
int
erl_drv_thread_join(ErlDrvTid tid, void **respp)
{
-#ifdef USE_THREADS
int res;
struct ErlDrvTid_ *dtid = (struct ErlDrvTid_ *) tid;
@@ -736,12 +707,9 @@ erl_drv_thread_join(ErlDrvTid tid, void **respp)
if (res == 0)
erts_free(ERTS_ALC_T_DRV_TID, dtid);
return res;
-#else
- return ENOTSUP;
-#endif
}
-#if defined(__DARWIN__) && defined(USE_THREADS) && defined(ERTS_SMP)
+#if defined(__DARWIN__)
extern int erts_darwin_main_thread_pipe[2];
extern int erts_darwin_main_thread_result_pipe[2];
diff --git a/erts/emulator/beam/erl_fun.c b/erts/emulator/beam/erl_fun.c
index 88947b5536..9c866250bb 100644
--- a/erts/emulator/beam/erl_fun.c
+++ b/erts/emulator/beam/erl_fun.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -29,14 +30,16 @@
static Hash erts_fun_table;
-#include "erl_smp.h"
+#ifdef HIPE
+# include "hipe_mode_switch.h"
+#endif
-static erts_smp_rwmtx_t erts_fun_table_lock;
+static erts_rwmtx_t erts_fun_table_lock;
-#define erts_fun_read_lock() erts_smp_rwmtx_rlock(&erts_fun_table_lock)
-#define erts_fun_read_unlock() erts_smp_rwmtx_runlock(&erts_fun_table_lock)
-#define erts_fun_write_lock() erts_smp_rwmtx_rwlock(&erts_fun_table_lock)
-#define erts_fun_write_unlock() erts_smp_rwmtx_rwunlock(&erts_fun_table_lock)
+#define erts_fun_read_lock() erts_rwmtx_rlock(&erts_fun_table_lock)
+#define erts_fun_read_unlock() erts_rwmtx_runlock(&erts_fun_table_lock)
+#define erts_fun_write_lock() erts_rwmtx_rwlock(&erts_fun_table_lock)
+#define erts_fun_write_unlock() erts_rwmtx_rwunlock(&erts_fun_table_lock)
static HashValue fun_hash(ErlFunEntry* obj);
static int fun_cmp(ErlFunEntry* obj1, ErlFunEntry* obj2);
@@ -48,29 +51,33 @@ static void fun_free(ErlFunEntry* obj);
* to unloaded_fun[]. The -1 in unloaded_fun[0] will be interpreted
* as an illegal arity when attempting to call a fun.
*/
-static BeamInstr unloaded_fun_code[3] = {NIL, -1, 0};
-static BeamInstr* unloaded_fun = unloaded_fun_code + 2;
+static BeamInstr unloaded_fun_code[4] = {NIL, NIL, -1, 0};
+static BeamInstr* unloaded_fun = unloaded_fun_code + 3;
void
erts_init_fun_table(void)
{
HashFunctions f;
- erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
- rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
- rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+ erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&erts_fun_table_lock, &rwmtx_opt, "fun_tab");
+ erts_rwmtx_init_opt(&erts_fun_table_lock, &rwmtx_opt, "fun_tab", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
f.hash = (H_FUN) fun_hash;
f.cmp = (HCMP_FUN) fun_cmp;
f.alloc = (HALLOC_FUN) fun_alloc;
f.free = (HFREE_FUN) fun_free;
+ f.meta_alloc = (HMALLOC_FUN) erts_alloc;
+ f.meta_free = (HMFREE_FUN) erts_free;
+ f.meta_print = (HMPRINT_FUN) erts_print;
hash_init(ERTS_ALC_T_FUN_TABLE, &erts_fun_table, "fun_table", 16, f);
}
void
-erts_fun_info(int to, void *to_arg)
+erts_fun_info(fmtfn_t to, void *to_arg)
{
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
@@ -175,16 +182,14 @@ void
erts_erase_fun_entry(ErlFunEntry* fe)
{
erts_fun_write_lock();
-#ifdef ERTS_SMP
/*
* We have to check refc again since someone might have looked up
* the fun entry and incremented refc after last check.
*/
if (erts_refc_dectest(&fe->refc, -1) <= 0)
-#endif
{
if (fe->address != unloaded_fun)
- erl_exit(1,
+ erts_exit(ERTS_ERROR_EXIT,
"Internal error: "
"Invalid reference count found on #Fun<%T.%d.%d>: "
" About to erase fun still referred by code.\n",
@@ -195,14 +200,13 @@ erts_erase_fun_entry(ErlFunEntry* fe)
}
void
-erts_cleanup_funs_on_purge(BeamInstr* start, BeamInstr* end)
+erts_fun_purge_prepare(BeamInstr* start, BeamInstr* end)
{
int limit;
HashBucket** bucket;
- ErlFunEntry* to_delete = NULL;
int i;
- erts_fun_write_lock();
+ erts_fun_read_lock();
limit = erts_fun_table.size;
bucket = erts_fun_table.bucket;
for (i = 0; i < limit; i++) {
@@ -213,26 +217,69 @@ erts_cleanup_funs_on_purge(BeamInstr* start, BeamInstr* end)
BeamInstr* addr = fe->address;
if (start <= addr && addr < end) {
+ fe->pend_purge_address = addr;
+ ERTS_THR_WRITE_MEMORY_BARRIER;
fe->address = unloaded_fun;
- if (erts_refc_dectest(&fe->refc, 0) == 0) {
- fe->address = (void *) to_delete;
- to_delete = fe;
- }
+#ifdef HIPE
+ fe->pend_purge_native_address = fe->native_address;
+ hipe_set_closure_stub(fe);
+#endif
+ erts_purge_state_add_fun(fe);
}
b = b->next;
}
}
+ erts_fun_read_unlock();
+}
+
+void
+erts_fun_purge_abort_prepare(ErlFunEntry **funs, Uint no)
+{
+ Uint ix;
- while (to_delete != NULL) {
- ErlFunEntry* next = (ErlFunEntry *) to_delete->address;
- erts_erase_fun_entry_unlocked(to_delete);
- to_delete = next;
+ for (ix = 0; ix < no; ix++) {
+ ErlFunEntry *fe = funs[ix];
+ if (fe->address == unloaded_fun) {
+ fe->address = fe->pend_purge_address;
+#ifdef HIPE
+ fe->native_address = fe->pend_purge_native_address;
+#endif
+ }
}
- erts_fun_write_unlock();
}
void
-erts_dump_fun_entries(int to, void *to_arg)
+erts_fun_purge_abort_finalize(ErlFunEntry **funs, Uint no)
+{
+ Uint ix;
+
+ for (ix = 0; ix < no; ix++) {
+ funs[ix]->pend_purge_address = NULL;
+#ifdef HIPE
+ funs[ix]->pend_purge_native_address = NULL;
+#endif
+ }
+}
+
+void
+erts_fun_purge_complete(ErlFunEntry **funs, Uint no)
+{
+ Uint ix;
+
+ for (ix = 0; ix < no; ix++) {
+ ErlFunEntry *fe = funs[ix];
+ fe->pend_purge_address = NULL;
+#ifdef HIPE
+ fe->pend_purge_native_address = NULL;
+#endif
+ if (erts_refc_dectest(&fe->refc, 0) == 0)
+ erts_erase_fun_entry(fe);
+ }
+ ERTS_THR_WRITE_MEMORY_BARRIER;
+}
+
+void
+erts_dump_fun_entries(fmtfn_t to, void *to_arg)
{
int limit;
HashBucket** bucket;
@@ -290,8 +337,10 @@ fun_alloc(ErlFunEntry* template)
obj->module = template->module;
erts_refc_init(&obj->refc, -1);
obj->address = unloaded_fun;
+ obj->pend_purge_address = NULL;
#ifdef HIPE
obj->native_address = NULL;
+ obj->pend_purge_native_address = NULL;
#endif
return obj;
}
diff --git a/erts/emulator/beam/erl_fun.h b/erts/emulator/beam/erl_fun.h
index b673ef6b3c..fb2901d866 100644
--- a/erts/emulator/beam/erl_fun.h
+++ b/erts/emulator/beam/erl_fun.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2012. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -20,7 +21,7 @@
#ifndef __ERLFUNTABLE_H__
#define __ERLFUNTABLE_H__
-#include "erl_smp.h"
+#include "erl_threads.h"
/*
* Fun entry.
@@ -43,6 +44,10 @@ typedef struct erl_fun_entry {
Eterm module; /* Tagged atom for module. */
erts_refc_t refc; /* Reference count: One for code + one for each
fun object in each process. */
+ BeamInstr *pend_purge_address; /* address stored during a pending purge */
+#ifdef HIPE
+ UWord* pend_purge_native_address;
+#endif
} ErlFunEntry;
/*
@@ -55,9 +60,6 @@ typedef struct erl_fun_thing {
Eterm thing_word; /* Subtag FUN_SUBTAG. */
ErlFunEntry* fe; /* Pointer to fun entry. */
struct erl_off_heap_header* next;
-#ifdef HIPE
- UWord* native_address; /* Native code for the fun. */
-#endif
Uint arity; /* The arity of the fun. */
Uint num_free; /* Number of free variables (in env). */
/* -- The following may be compound Erlang terms ---------------------- */
@@ -69,7 +71,7 @@ typedef struct erl_fun_thing {
#define ERL_FUN_SIZE ((sizeof(ErlFunThing)/sizeof(Eterm))-1)
void erts_init_fun_table(void);
-void erts_fun_info(int, void *);
+void erts_fun_info(fmtfn_t, void *);
int erts_fun_table_sz(void);
ErlFunEntry* erts_put_fun_entry(Eterm mod, int uniq, int index);
@@ -80,7 +82,10 @@ ErlFunEntry* erts_put_fun_entry2(Eterm mod, int old_uniq, int old_index,
void erts_erase_fun_entry(ErlFunEntry* fe);
void erts_cleanup_funs(ErlFunThing* funp);
-void erts_cleanup_funs_on_purge(BeamInstr* start, BeamInstr* end);
-void erts_dump_fun_entries(int, void *);
+void erts_fun_purge_prepare(BeamInstr* start, BeamInstr* end);
+void erts_fun_purge_abort_prepare(ErlFunEntry **funs, Uint no);
+void erts_fun_purge_abort_finalize(ErlFunEntry **funs, Uint no);
+void erts_fun_purge_complete(ErlFunEntry **funs, Uint no);
+void erts_dump_fun_entries(fmtfn_t, void *);
#endif
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index aa15d2cc57..97a1ca915f 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2014. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -20,6 +21,8 @@
# include "config.h"
#endif
+#define ERL_WANT_GC_INTERNALS__
+
#include "sys.h"
#include "erl_vm.h"
#include "global.h"
@@ -32,17 +35,35 @@
#include "error.h"
#include "big.h"
#include "erl_gc.h"
-#if HIPE
+#ifdef HIPE
#include "hipe_stack.h"
#include "hipe_mode_switch.h"
#endif
#include "dtrace-wrapper.h"
+#include "erl_bif_unique.h"
+#include "dist.h"
+#include "erl_nfunc_sched.h"
#define ERTS_INACT_WR_PB_LEAVE_MUCH_LIMIT 1
#define ERTS_INACT_WR_PB_LEAVE_MUCH_PERCENTAGE 20
#define ERTS_INACT_WR_PB_LEAVE_LIMIT 10
#define ERTS_INACT_WR_PB_LEAVE_PERCENTAGE 10
+#if defined(DEBUG) || 0
+#define ERTS_GC_DEBUG
+#else
+#undef ERTS_GC_DEBUG
+#endif
+#ifdef ERTS_GC_DEBUG
+# define ERTS_GC_ASSERT ASSERT
+#else
+# define ERTS_GC_ASSERT(B) ((void) 1)
+#endif
+
+#if defined(DEBUG) && 0
+# define HARDDEBUG 1
+#endif
+
/*
* Returns number of elements in an array.
*/
@@ -55,7 +76,7 @@
erts_fprintf(stderr, "stop=%p\n", (p)->stop); \
erts_fprintf(stderr, "htop=%p\n", (p)->htop); \
erts_fprintf(stderr, "heap=%p\n", (p)->heap); \
- erl_exit(ERTS_ABORT_EXIT, "%s, line %d: %T: Overrun stack and heap\n", \
+ erts_exit(ERTS_ABORT_EXIT, "%s, line %d: %T: Overrun stack and heap\n", \
__FILE__,__LINE__,(P)->common.id); \
}
@@ -63,10 +84,10 @@
#define ErtsGcQuickSanityCheck(P) \
do { \
ASSERT((P)->heap < (P)->hend); \
- ASSERT((P)->heap_sz == (P)->hend - (P)->heap); \
+ ASSERT((p)->abandoned_heap || (P)->heap_sz == (P)->hend - (P)->heap); \
ASSERT((P)->heap <= (P)->htop && (P)->htop <= (P)->hend); \
ASSERT((P)->heap <= (P)->stop && (P)->stop <= (P)->hend); \
- ASSERT((P)->heap <= (P)->high_water && (P)->high_water <= (P)->hend);\
+ ASSERT((p)->abandoned_heap || ((P)->heap <= (P)->high_water && (P)->high_water <= (P)->hend)); \
OverRunCheck((P)); \
} while (0)
#else
@@ -94,19 +115,35 @@ typedef struct {
static Uint setup_rootset(Process*, Eterm*, int, Rootset*);
static void cleanup_rootset(Rootset *rootset);
-static Uint combined_message_size(Process* p);
-static void remove_message_buffers(Process* p);
-static int major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl);
-static int minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl);
-static void do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj);
-static Eterm* sweep_rootset(Rootset *rootset, Eterm* htop, char* src, Uint src_size);
-static Eterm* sweep_one_area(Eterm* n_hp, Eterm* n_htop, char* src, Uint src_size);
-static Eterm* sweep_one_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop,
- char* src, Uint src_size);
-static Eterm* collect_heap_frags(Process* p, Eterm* heap,
- Eterm* htop, Eterm* objv, int nobj);
-static Uint adjust_after_fullsweep(Process *p, Uint size_before,
- int need, Eterm *objv, int nobj);
+static Eterm *full_sweep_heaps(Process *p,
+ int hibernate,
+ Eterm *n_heap, Eterm* n_htop,
+ char *oh, Uint oh_size,
+ Eterm *objv, int nobj);
+static int garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
+ int need, Eterm* objv, int nobj, int fcalls,
+ Uint max_young_gen_usage);
+static int major_collection(Process* p, ErlHeapFragment *live_hf_end,
+ int need, Eterm* objv, int nobj,
+ Uint ygen_usage, Uint *recl);
+static int minor_collection(Process* p, ErlHeapFragment *live_hf_end,
+ int need, Eterm* objv, int nobj,
+ Uint ygen_usage, Uint *recl);
+static void do_minor(Process *p, ErlHeapFragment *live_hf_end,
+ char *mature, Uint mature_size,
+ Uint new_sz, Eterm* objv, int nobj);
+static Eterm *sweep_new_heap(Eterm *n_hp, Eterm *n_htop,
+ char* old_heap, Uint old_heap_size);
+static Eterm *sweep_heaps(Eterm *n_hp, Eterm *n_htop,
+ char* old_heap, Uint old_heap_size);
+static Eterm* sweep_literal_area(Eterm* n_hp, Eterm* n_htop,
+ char* old_heap, Uint old_heap_size,
+ char* src, Uint src_size);
+static Eterm* sweep_literals_to_old_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop,
+ char* src, Uint src_size);
+static Eterm* collect_live_heap_frags(Process* p, ErlHeapFragment *live_hf_end,
+ Eterm* heap, Eterm* htop, Eterm* objv, int nobj);
+static int adjust_after_fullsweep(Process *p, int need, Eterm *objv, int nobj);
static void shrink_new_heap(Process *p, Uint new_sz, Eterm *objv, int nobj);
static void grow_new_heap(Process *p, Uint new_sz, Eterm* objv, int nobj);
static void sweep_off_heap(Process *p, int fullsweep);
@@ -116,16 +153,18 @@ static void offset_rootset(Process *p, Sint offs, char* area, Uint area_size,
Eterm* objv, int nobj);
static void offset_off_heap(Process* p, Sint offs, char* area, Uint area_size);
static void offset_mqueue(Process *p, Sint offs, char* area, Uint area_size);
-
+static void move_msgq_to_heap(Process *p);
+static int reached_max_heap_size(Process *p, Uint total_heap_size,
+ Uint extra_heap_size, Uint extra_old_heap_size);
static void init_gc_info(ErtsGCInfo *gcip);
+static Uint64 next_vheap_size(Process* p, Uint64 vheap, Uint64 vheap_sz);
#ifdef HARDDEBUG
-static void disallow_heap_frag_ref_in_heap(Process* p);
+static void disallow_heap_frag_ref_in_heap(Process *p, Eterm *heap, Eterm *htop);
static void disallow_heap_frag_ref_in_old_heap(Process* p);
-static void disallow_heap_frag_ref(Process* p, Eterm* n_htop, Eterm* objv, int nobj);
#endif
-#if defined(ARCH_64) && !HALFWORD_HEAP
+#if defined(ARCH_64)
# define MAX_HEAP_SIZES 154
#else
# define MAX_HEAP_SIZES 59
@@ -139,31 +178,34 @@ Uint erts_test_long_gc_sleep; /* Only used for testing... */
typedef struct {
Process *proc;
Eterm ref;
- Eterm ref_heap[REF_THING_SIZE];
+ Eterm ref_heap[ERTS_REF_THING_SIZE];
Uint req_sched;
- erts_smp_atomic32_t refc;
+ erts_atomic32_t refc;
} ErtsGCInfoReq;
-#if !HALFWORD_HEAP
-ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(gcireq,
- ErtsGCInfoReq,
- 5,
- ERTS_ALC_T_GC_INFO_REQ)
-#else
-static ERTS_INLINE ErtsGCInfoReq *
-gcireq_alloc(void)
-{
- return erts_alloc(ERTS_ALC_T_GC_INFO_REQ,
- sizeof(ErtsGCInfoReq));
-}
+static struct {
+ erts_mtx_t mtx;
+ ErtsGCInfo info;
+} dirty_gc;
-static ERTS_INLINE void
-gcireq_free(ErtsGCInfoReq *ptr)
+static ERTS_INLINE int
+gc_cost(Uint gc_moved_live_words, Uint resize_moved_words)
{
- erts_free(ERTS_ALC_T_GC_INFO_REQ, ptr);
+ Sint reds;
+
+ reds = gc_moved_live_words/10;
+ reds += resize_moved_words/100;
+ if (reds < 1)
+ return 1;
+ if (reds > INT_MAX)
+ return INT_MAX;
+ return (int) reds;
}
-#endif
+ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(gcireq,
+ ErtsGCInfoReq,
+ 5,
+ ERTS_ALC_T_GC_INFO_REQ)
/*
* Initialize GC global data.
*/
@@ -173,15 +215,15 @@ erts_init_gc(void)
int i = 0, ix;
Sint max_heap_size = 0;
- ASSERT(offsetof(ProcBin,thing_word) == offsetof(struct erl_off_heap_header,thing_word));
- ASSERT(offsetof(ProcBin,thing_word) == offsetof(ErlFunThing,thing_word));
- ASSERT(offsetof(ProcBin,thing_word) == offsetof(ExternalThing,header));
- ASSERT(offsetof(ProcBin,size) == offsetof(struct erl_off_heap_header,size));
- ASSERT(offsetof(ProcBin,size) == offsetof(ErlSubBin,size));
- ASSERT(offsetof(ProcBin,size) == offsetof(ErlHeapBin,size));
- ASSERT(offsetof(ProcBin,next) == offsetof(struct erl_off_heap_header,next));
- ASSERT(offsetof(ProcBin,next) == offsetof(ErlFunThing,next));
- ASSERT(offsetof(ProcBin,next) == offsetof(ExternalThing,next));
+ ERTS_CT_ASSERT(offsetof(ProcBin,thing_word) == offsetof(struct erl_off_heap_header,thing_word));
+ ERTS_CT_ASSERT(offsetof(ProcBin,thing_word) == offsetof(ErlFunThing,thing_word));
+ ERTS_CT_ASSERT(offsetof(ProcBin,thing_word) == offsetof(ExternalThing,header));
+ ERTS_CT_ASSERT(offsetof(ProcBin,size) == offsetof(struct erl_off_heap_header,size));
+ ERTS_CT_ASSERT(offsetof(ProcBin,size) == offsetof(ErlSubBin,size));
+ ERTS_CT_ASSERT(offsetof(ProcBin,size) == offsetof(ErlHeapBin,size));
+ ERTS_CT_ASSERT(offsetof(ProcBin,next) == offsetof(struct erl_off_heap_header,next));
+ ERTS_CT_ASSERT(offsetof(ProcBin,next) == offsetof(ErlFunThing,next));
+ ERTS_CT_ASSERT(offsetof(ProcBin,next) == offsetof(ExternalThing,next));
erts_test_long_gc_sleep = 0;
@@ -205,7 +247,7 @@ erts_init_gc(void)
}
- /* for 32 bit we want max_heap_size to be MAX(32bit) / 4 [words] (and halfword)
+ /* for 32 bit we want max_heap_size to be MAX(32bit) / 4 [words]
* for 64 bit we want max_heap_size to be MAX(52bit) / 8 [words]
*/
@@ -229,10 +271,11 @@ erts_init_gc(void)
init_gc_info(&esdp->gc_info);
}
-#if !HALFWORD_HEAP
- init_gcireq_alloc();
-#endif
+ erts_mtx_init(&dirty_gc.mtx, "dirty_gc_info", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+ init_gc_info(&dirty_gc.info);
+ init_gcireq_alloc();
}
/*
@@ -265,7 +308,7 @@ erts_next_heap_size(Uint size, Uint offset)
low = mid + 1;
}
}
- erl_exit(1, "no next heap size found: %beu, offset %beu\n", size, offset);
+ erts_exit(ERTS_ERROR_EXIT, "no next heap size found: %beu, offset %beu\n", size, offset);
}
return 0;
}
@@ -294,7 +337,7 @@ erts_heap_sizes(Process* p)
for (i = num_heap_sizes-1; i >= 0; i--) {
n += 2;
- if (!MY_IS_SSMALL(heap_sizes[i])) {
+ if (!IS_SSMALL(heap_sizes[i])) {
big += BIG_UINT_HEAP_SIZE;
}
}
@@ -309,7 +352,7 @@ erts_heap_sizes(Process* p)
Eterm num;
Sint sz = heap_sizes[i];
- if (MY_IS_SSMALL(sz)) {
+ if (IS_SSMALL(sz)) {
num = make_small(sz);
} else {
num = uint_to_big(sz, bigp);
@@ -348,32 +391,54 @@ erts_offset_off_heap(ErlOffHeap *ohp, Sint offs, Eterm* low, Eterm* high)
#undef ptr_within
Eterm
-erts_gc_after_bif_call(Process* p, Eterm result, Eterm* regs, Uint arity)
+erts_gc_after_bif_call_lhf(Process* p, ErlHeapFragment *live_hf_end,
+ Eterm result, Eterm* regs, Uint arity)
{
int cost;
+ if (p->flags & F_HIBERNATE_SCHED) {
+ /*
+ * We just hibernated. We do *not* want to mess
+ * up the hibernation by an ordinary GC...
+ */
+ return result;
+ }
+
+ if (!p->mbuf) {
+ /* Must have GC:d in BIF call... invalidate live_hf_end */
+ live_hf_end = ERTS_INVALID_HFRAG_PTR;
+ }
+
if (is_non_value(result)) {
if (p->freason == TRAP) {
- #if HIPE
+#ifdef HIPE
if (regs == NULL) {
- regs = ERTS_PROC_GET_SCHDATA(p)->x_reg_array;
+ regs = erts_proc_sched_data(p)->x_reg_array;
}
- #endif
- cost = erts_garbage_collect(p, 0, regs, p->arity);
+#endif
+ cost = garbage_collect(p, live_hf_end, 0, regs, p->arity, p->fcalls, 0);
} else {
- cost = erts_garbage_collect(p, 0, regs, arity);
+ cost = garbage_collect(p, live_hf_end, 0, regs, arity, p->fcalls, 0);
}
} else {
Eterm val[1];
val[0] = result;
- cost = erts_garbage_collect(p, 0, val, 1);
+ cost = garbage_collect(p, live_hf_end, 0, val, 1, p->fcalls, 0);
result = val[0];
}
BUMP_REDS(p, cost);
+
return result;
}
+Eterm
+erts_gc_after_bif_call(Process* p, Eterm result, Eterm* regs, Uint arity)
+{
+ return erts_gc_after_bif_call_lhf(p, ERTS_INVALID_HFRAG_PTR,
+ result, regs, arity);
+}
+
static ERTS_INLINE void reset_active_writer(Process *p)
{
struct erl_off_heap_header* ptr;
@@ -387,6 +452,186 @@ static ERTS_INLINE void reset_active_writer(Process *p)
}
}
+#define ERTS_DELAY_GC_EXTRA_FREE 40
+#define ERTS_ABANDON_HEAP_COST 10
+
+static int
+delay_garbage_collection(Process *p, ErlHeapFragment *live_hf_end, int need, int fcalls)
+{
+ ErlHeapFragment *hfrag;
+ Eterm *orig_heap, *orig_hend, *orig_htop, *orig_stop;
+ Eterm *stop, *hend;
+ Uint hsz, ssz;
+ int reds_left;
+
+ ERTS_HOLE_CHECK(p);
+
+ if ((p->flags & F_DISABLE_GC)
+ && p->live_hf_end == ERTS_INVALID_HFRAG_PTR) {
+ /*
+ * A BIF yielded with disabled GC. Remember
+ * heap fragments created by the BIF until we
+ * do next GC.
+ */
+ p->live_hf_end = live_hf_end;
+ }
+
+ if (need == 0) {
+ if (p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) {
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(p)));
+ goto force_reschedule;
+ }
+ return 1;
+ }
+ /*
+ * Satisfy need in a heap fragment...
+ */
+ ASSERT(need > 0);
+
+ orig_heap = p->heap;
+ orig_hend = p->hend;
+ orig_htop = p->htop;
+ orig_stop = p->stop;
+
+ ssz = orig_hend - orig_stop;
+ hsz = ssz + need + ERTS_DELAY_GC_EXTRA_FREE;
+
+ hfrag = new_message_buffer(hsz);
+ p->heap = p->htop = &hfrag->mem[0];
+ p->hend = hend = &hfrag->mem[hsz];
+ p->stop = stop = hend - ssz;
+ sys_memcpy((void *) stop, (void *) orig_stop, ssz * sizeof(Eterm));
+
+ if (p->abandoned_heap) {
+ /*
+ * Active heap already in a fragment; adjust it and
+ * save it into mbuf list...
+ */
+ ErlHeapFragment *hfrag = ((ErlHeapFragment *)
+ (((char *) orig_heap)
+ - offsetof(ErlHeapFragment, mem)));
+ Uint used = orig_htop - orig_heap;
+ hfrag->used_size = used;
+ p->mbuf_sz += used;
+ ASSERT(hfrag->used_size <= hfrag->alloc_size);
+ ASSERT(!hfrag->off_heap.first && !hfrag->off_heap.overhead);
+ hfrag->next = p->mbuf;
+ p->mbuf = hfrag;
+ }
+ else {
+ /* Do not leave a hole in the abandoned heap... */
+ if (orig_htop < orig_hend) {
+ *orig_htop = make_pos_bignum_header(orig_hend-orig_htop-1);
+ if (orig_htop + 1 < orig_hend) {
+ orig_hend[-1] = (Uint) (orig_htop - orig_heap);
+ p->flags |= F_ABANDONED_HEAP_USE;
+ }
+ }
+ p->abandoned_heap = orig_heap;
+ }
+
+#ifdef CHECK_FOR_HOLES
+ p->last_htop = p->htop;
+ p->heap_hfrag = hfrag;
+#endif
+
+force_reschedule:
+
+ /* Make sure that we do a proper GC as soon as possible... */
+ p->flags |= F_FORCE_GC;
+ reds_left = ERTS_REDS_LEFT(p, fcalls);
+ ASSERT(CONTEXT_REDS - reds_left >= erts_proc_sched_data(p)->virtual_reds);
+
+ if (reds_left > ERTS_ABANDON_HEAP_COST) {
+ int vreds = reds_left - ERTS_ABANDON_HEAP_COST;
+ erts_proc_sched_data((p))->virtual_reds += vreds;
+ }
+
+ ERTS_CHK_MBUF_SZ(p);
+
+ ASSERT(CONTEXT_REDS >= erts_proc_sched_data(p)->virtual_reds);
+ return reds_left;
+}
+
+static ERTS_FORCE_INLINE Uint
+young_gen_usage(Process *p)
+{
+ Uint hsz;
+ Eterm *aheap;
+
+ ERTS_CHK_MBUF_SZ(p);
+
+ hsz = p->mbuf_sz;
+
+ if (p->flags & F_ON_HEAP_MSGQ) {
+ ErtsMessage *mp;
+ for (mp = p->msg.first; mp; mp = mp->next) {
+ /*
+ * We leave not yet decoded distribution messages
+ * as they are in the queue since it is not
+ * possible to determine a maximum size until
+ * actual decoding. However, we use their estimated
+ * size when calculating need, and by this making
+ * it more likely that they will fit on the heap
+ * when actually decoded.
+ */
+ if (mp->data.attached)
+ hsz += erts_msg_attached_data_size(mp);
+ }
+ }
+
+ hsz += p->htop - p->heap;
+ aheap = p->abandoned_heap;
+ if (aheap) {
+ /* used in orig heap */
+ if (p->flags & F_ABANDONED_HEAP_USE)
+ hsz += aheap[p->heap_sz-1];
+ else
+ hsz += p->heap_sz;
+ }
+ return hsz;
+}
+
+#define ERTS_GET_ORIG_HEAP(Proc, Heap, HTop) \
+ do { \
+ Eterm *aheap__ = (Proc)->abandoned_heap; \
+ if (!aheap__) { \
+ (Heap) = (Proc)->heap; \
+ (HTop) = (Proc)->htop; \
+ } \
+ else { \
+ (Heap) = aheap__; \
+ if ((Proc)->flags & F_ABANDONED_HEAP_USE) \
+ (HTop) = aheap__ + aheap__[(Proc)->heap_sz-1]; \
+ else \
+ (HTop) = aheap__ + (Proc)->heap_sz; \
+ } \
+ } while (0)
+
+
+static ERTS_INLINE void
+check_for_possibly_long_gc(Process *p, Uint ygen_usage)
+{
+ int major;
+ Uint sz;
+
+ major = (p->flags & F_NEED_FULLSWEEP) || GEN_GCS(p) >= MAX_GEN_GCS(p);
+
+ sz = ygen_usage;
+ sz += p->hend - p->stop;
+ if (p->flags & F_ON_HEAP_MSGQ)
+ sz += p->msg.len;
+ if (major)
+ sz += p->old_htop - p->old_heap;
+
+ if (sz >= ERTS_POTENTIALLY_LONG_GC_HSIZE) {
+ ASSERT(!(p->flags & (F_DISABLE_GC|F_DELAY_GC)));
+ p->flags |= major ? F_DIRTY_MAJOR_GC : F_DIRTY_MINOR_GC;
+ erts_schedule_dirty_sys_execution(p);
+ }
+}
+
+
/*
* Garbage collect a process.
*
@@ -395,39 +640,58 @@ static ERTS_INLINE void reset_active_writer(Process *p)
* objv: Array of terms to add to rootset; that is to preserve.
* nobj: Number of objects in objv.
*/
-int
-erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
+static int
+garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
+ int need, Eterm* objv, int nobj, int fcalls,
+ Uint max_young_gen_usage)
{
Uint reclaimed_now = 0;
- int done = 0;
- Uint ms1, s1, us1;
- ErtsSchedulerData *esdp;
+ Uint ygen_usage;
+ Eterm gc_trace_end_tag;
+ int reds;
+ ErtsMonotonicTime start_time;
+ ErtsSchedulerData *esdp = erts_proc_sched_data(p);
+ erts_aint32_t state;
+ ERTS_MSACC_PUSH_STATE_M();
#ifdef USE_VM_PROBES
DTRACE_CHARBUF(pidbuf, DTRACE_TERM_BUF_SIZE);
#endif
- if (p->flags & F_DISABLE_GC) {
- ASSERT(need == 0);
- return 1;
- }
+ ERTS_UNDEF(start_time, 0);
+ ERTS_CHK_MBUF_SZ(p);
- esdp = erts_get_scheduler_data();
+ ASSERT(CONTEXT_REDS - ERTS_REDS_LEFT(p, fcalls) >= esdp->virtual_reds);
- if (IS_TRACED_FL(p, F_TRACE_GC)) {
- trace_gc(p, am_gc_start);
+ state = erts_atomic32_read_nob(&p->state);
+
+ if ((p->flags & (F_DISABLE_GC|F_DELAY_GC)) || state & ERTS_PSFLG_EXITING) {
+ delay_gc_before_start:
+ return delay_garbage_collection(p, live_hf_end, need, fcalls);
}
- erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
- if (erts_system_monitor_long_gc != 0) {
- get_now(&ms1, &s1, &us1);
+ ygen_usage = max_young_gen_usage ? max_young_gen_usage : young_gen_usage(p);
+
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ check_for_possibly_long_gc(p, ygen_usage);
+ if (p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC))
+ goto delay_gc_before_start;
}
+ if (p->abandoned_heap)
+ live_hf_end = ERTS_INVALID_HFRAG_PTR;
+ else if (p->live_hf_end != ERTS_INVALID_HFRAG_PTR)
+ live_hf_end = p->live_hf_end;
+
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_GC);
+
+ erts_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
+ if (erts_system_monitor_long_gc != 0)
+ start_time = erts_get_monotonic_time(esdp);
+
ERTS_CHK_OFFHEAP(p);
ErtsGcQuickSanityCheck(p);
- if (GEN_GCS(p) >= MAX_GEN_GCS(p)) {
- FLAGS(p) |= F_NEED_FULLSWEEP;
- }
+
#ifdef USE_VM_PROBES
*pidbuf = '\0';
if (DTRACE_ENABLED(gc_major_start)
@@ -440,17 +704,46 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
/*
* Test which type of GC to do.
*/
- while (!done) {
- if ((FLAGS(p) & F_NEED_FULLSWEEP) != 0) {
- DTRACE2(gc_major_start, pidbuf, need);
- done = major_collection(p, need, objv, nobj, &reclaimed_now);
- DTRACE2(gc_major_end, pidbuf, reclaimed_now);
- } else {
- DTRACE2(gc_minor_start, pidbuf, need);
- done = minor_collection(p, need, objv, nobj, &reclaimed_now);
- DTRACE2(gc_minor_end, pidbuf, reclaimed_now);
- }
+
+ if (GEN_GCS(p) < MAX_GEN_GCS(p) && !(FLAGS(p) & F_NEED_FULLSWEEP)) {
+ if (IS_TRACED_FL(p, F_TRACE_GC)) {
+ trace_gc(p, am_gc_minor_start, need, THE_NON_VALUE);
+ }
+ DTRACE2(gc_minor_start, pidbuf, need);
+ reds = minor_collection(p, live_hf_end, need, objv, nobj,
+ ygen_usage, &reclaimed_now);
+ DTRACE2(gc_minor_end, pidbuf, reclaimed_now);
+ if (reds == -1) {
+ if (IS_TRACED_FL(p, F_TRACE_GC)) {
+ trace_gc(p, am_gc_minor_end, reclaimed_now, THE_NON_VALUE);
+ }
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ p->flags |= F_NEED_FULLSWEEP;
+ check_for_possibly_long_gc(p, ygen_usage);
+ if (p->flags & F_DIRTY_MAJOR_GC)
+ goto delay_gc_after_start;
+ }
+ goto do_major_collection;
+ }
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ p->flags &= ~F_DIRTY_MINOR_GC;
+ gc_trace_end_tag = am_gc_minor_end;
+ } else {
+do_major_collection:
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_GC_FULL);
+ if (IS_TRACED_FL(p, F_TRACE_GC)) {
+ trace_gc(p, am_gc_major_start, need, THE_NON_VALUE);
+ }
+ DTRACE2(gc_major_start, pidbuf, need);
+ reds = major_collection(p, live_hf_end, need, objv, nobj,
+ ygen_usage, &reclaimed_now);
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ p->flags &= ~(F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC);
+ DTRACE2(gc_major_end, pidbuf, reclaimed_now);
+ gc_trace_end_tag = am_gc_major_end;
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_GC);
}
+
reset_active_writer(p);
/*
@@ -461,23 +754,46 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
ErtsGcQuickSanityCheck(p);
- erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC);
+ /* Max heap size has been reached and the process was configured
+ to be killed, so we kill it and set it in a delayed garbage
+ collecting state. There should be no gc_end trace or
+ long_gc/large_gc triggers when this happens as process was
+ killed before a GC could be done. */
+ if (reds == -2) {
+ ErtsProcLocks locks = ERTS_PROC_LOCKS_ALL;
+ int res;
+
+ erts_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_send_exit_signal(p, p->common.id, p, &locks,
+ am_kill, NIL, NULL, 0);
+ erts_proc_unlock(p, locks & ERTS_PROC_LOCKS_ALL_MINOR);
+
+ delay_gc_after_start:
+ /* erts_send_exit_signal looks for ERTS_PSFLG_GC, so
+ we have to remove it after the signal is sent */
+ erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC);
+
+ /* We have to make sure that we have space for need on the heap */
+ res = delay_garbage_collection(p, live_hf_end, need, fcalls);
+ ERTS_MSACC_POP_STATE_M();
+ return res;
+ }
+
+ erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC);
if (IS_TRACED_FL(p, F_TRACE_GC)) {
- trace_gc(p, am_gc_end);
+ trace_gc(p, gc_trace_end_tag, reclaimed_now, THE_NON_VALUE);
}
if (erts_system_monitor_long_gc != 0) {
- Uint ms2, s2, us2;
- Sint t;
+ ErtsMonotonicTime end_time;
+ Uint gc_time;
if (erts_test_long_gc_sleep)
while (0 != erts_milli_sleep(erts_test_long_gc_sleep));
- get_now(&ms2, &s2, &us2);
- t = ms2 - ms1;
- t = t*1000000 + s2 - s1;
- t = t*1000 + ((Sint) (us2 - us1))/1000;
- if (t > 0 && (Uint)t > erts_system_monitor_long_gc) {
- monitor_long_gc(p, t);
+ end_time = erts_get_monotonic_time(esdp);
+ gc_time = (Uint) ERTS_MONOTONIC_TO_MSEC(end_time - start_time);
+ if (gc_time && gc_time > erts_system_monitor_long_gc) {
+ monitor_long_gc(p, gc_time);
}
}
if (erts_system_monitor_large_heap != 0) {
@@ -487,10 +803,22 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
monitor_large_heap(p);
}
- esdp->gc_info.garbage_cols++;
- esdp->gc_info.reclaimed += reclaimed_now;
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ erts_mtx_lock(&dirty_gc.mtx);
+ dirty_gc.info.garbage_cols++;
+ dirty_gc.info.reclaimed += reclaimed_now;
+ erts_mtx_unlock(&dirty_gc.mtx);
+ }
+ else
+ {
+ esdp->gc_info.garbage_cols++;
+ esdp->gc_info.reclaimed += reclaimed_now;
+ }
- FLAGS(p) &= ~F_FORCE_GC;
+ FLAGS(p) &= ~(F_FORCE_GC|F_HIBERNATED);
+ p->live_hf_end = ERTS_INVALID_HFRAG_PTR;
+
+ ERTS_MSACC_POP_STATE_M();
#ifdef CHECK_FOR_HOLES
/*
@@ -512,92 +840,96 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
p->last_old_htop = p->old_htop;
#endif
- /* FIXME: This function should really return an Sint, i.e., a possibly
- 64 bit wide signed integer, but that requires updating all the code
- that calls it. For now, we just return INT_MAX if the result is too
- large for an int. */
- {
- Sint result = (HEAP_TOP(p) - HEAP_START(p)) / 10;
- if (result >= INT_MAX) return INT_MAX;
- else return (int) result;
- }
+ ASSERT(!p->mbuf);
+ ASSERT(!ERTS_IS_GC_DESIRED(p));
+ ASSERT(need <= HEAP_LIMIT(p) - HEAP_TOP(p));
+
+ return reds;
+}
+
+int
+erts_garbage_collect_nobump(Process* p, int need, Eterm* objv, int nobj, int fcalls)
+{
+ int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, fcalls, 0);
+ int reds_left = ERTS_REDS_LEFT(p, fcalls);
+ if (reds > reds_left)
+ reds = reds_left;
+ ASSERT(CONTEXT_REDS - (reds_left - reds) >= erts_proc_sched_data(p)->virtual_reds);
+ return reds;
}
+void
+erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
+{
+ int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, p->fcalls, 0);
+ BUMP_REDS(p, reds);
+ ASSERT(CONTEXT_REDS - ERTS_BIF_REDS_LEFT(p)
+ >= erts_proc_sched_data(p)->virtual_reds);
+}
+
+
/*
* Place all living data on a the new heap; deallocate any old heap.
* Meant to be used by hibernate/3.
*/
-void
-erts_garbage_collect_hibernate(Process* p)
+static int
+garbage_collect_hibernate(Process* p, int check_long_gc)
{
Uint heap_size;
Eterm* heap;
Eterm* htop;
- Rootset rootset;
- char* src;
- Uint src_size;
Uint actual_size;
char* area;
Uint area_size;
Sint offs;
+ int reds;
if (p->flags & F_DISABLE_GC)
ERTS_INTERNAL_ERROR("GC disabled");
+ if (ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(p)))
+ p->flags &= ~(F_DIRTY_GC_HIBERNATE|F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC);
+ else if (check_long_gc) {
+ Uint flags = p->flags;
+ p->flags |= F_NEED_FULLSWEEP;
+ check_for_possibly_long_gc(p, (p->htop - p->heap) + p->mbuf_sz);
+ if (p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) {
+ p->flags = flags|F_DIRTY_GC_HIBERNATE;
+ return 1;
+ }
+ p->flags = flags;
+ }
/*
* Preliminaries.
*/
- erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
+ erts_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
ErtsGcQuickSanityCheck(p);
- ASSERT(p->mbuf_sz == 0);
- ASSERT(p->mbuf == 0);
ASSERT(p->stop == p->hend); /* Stack must be empty. */
/*
* Do it.
*/
+ heap_size = p->heap_sz + (p->old_htop - p->old_heap) + p->mbuf_sz;
- heap_size = p->heap_sz + (p->old_htop - p->old_heap);
heap = (Eterm*) ERTS_HEAP_ALLOC(ERTS_ALC_T_TMP_HEAP,
sizeof(Eterm)*heap_size);
htop = heap;
- (void) setup_rootset(p, p->arg_reg, p->arity, &rootset);
-#if HIPE
- hipe_empty_nstack(p);
-#endif
-
- src = (char *) p->heap;
- src_size = (char *) p->htop - src;
- htop = sweep_rootset(&rootset, htop, src, src_size);
- htop = sweep_one_area(heap, htop, src, src_size);
+ htop = full_sweep_heaps(p,
+ 1,
+ heap,
+ htop,
+ (char *) p->old_heap,
+ (char *) p->old_htop - (char *) p->old_heap,
+ p->arg_reg,
+ p->arity);
- if (p->old_heap) {
- src = (char *) p->old_heap;
- src_size = (char *) p->old_htop - src;
- htop = sweep_rootset(&rootset, htop, src, src_size);
- htop = sweep_one_area(heap, htop, src, src_size);
- }
+#ifdef HARDDEBUG
+ disallow_heap_frag_ref_in_heap(p, heap, htop);
+#endif
- cleanup_rootset(&rootset);
-
- if (MSO(p).first) {
- sweep_off_heap(p, 1);
- }
-
- /*
- * Update all pointers.
- */
- ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
- (void*)HEAP_START(p),
- HEAP_SIZE(p) * sizeof(Eterm));
- if (p->old_heap) {
- ERTS_HEAP_FREE(ERTS_ALC_T_OLD_HEAP,
- (void*)p->old_heap,
- (p->old_hend - p->old_heap) * sizeof(Eterm));
- p->old_heap = p->old_htop = p->old_hend = 0;
- }
+ erts_deallocate_young_generation(p);
p->heap = heap;
p->high_water = htop;
@@ -612,6 +944,7 @@ erts_garbage_collect_hibernate(Process* p)
}
FLAGS(p) &= ~F_FORCE_GC;
+ p->live_hf_end = ERTS_INVALID_HFRAG_PTR;
/*
* Move the heap to its final destination.
@@ -658,16 +991,80 @@ erts_garbage_collect_hibernate(Process* p)
ErtsGcQuickSanityCheck(p);
- erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC);
-}
+ p->flags |= F_HIBERNATED;
+
+ erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC);
+ reds = gc_cost(actual_size, actual_size);
+ return reds;
+}
void
+erts_garbage_collect_hibernate(Process* p)
+{
+ int reds = garbage_collect_hibernate(p, 1);
+ BUMP_REDS(p, reds);
+}
+
+/*
+ * HiPE native code stack scanning procedures:
+ * - fullsweep_nstack()
+ * - gensweep_nstack()
+ * - offset_nstack()
+ * - sweep_literals_nstack()
+ */
+#if defined(HIPE)
+
+#define GENSWEEP_NSTACK(p,old_htop,n_htop) \
+ do { \
+ Eterm *tmp_old_htop = old_htop; \
+ Eterm *tmp_n_htop = n_htop; \
+ gensweep_nstack((p), &tmp_old_htop, &tmp_n_htop); \
+ old_htop = tmp_old_htop; \
+ n_htop = tmp_n_htop; \
+ } while(0)
+
+/*
+ * offset_nstack() can ignore the descriptor-based traversal the other
+ * nstack procedures use and simply call offset_heap_ptr() instead.
+ * This relies on two facts:
+ * 1. The only live non-Erlang terms on an nstack are return addresses,
+ * and they will be skipped thanks to the low/high range check.
+ * 2. Dead values, even if mistaken for pointers into the low/high area,
+ * can be offset safely since they won't be dereferenced.
+ *
+ * XXX: WARNING: If HiPE starts storing other non-Erlang values on the
+ * nstack, such as floats, then this will have to be changed.
+ */
+static ERTS_INLINE void offset_nstack(Process* p, Sint offs,
+ char* area, Uint area_size)
+{
+ if (p->hipe.nstack) {
+ ASSERT(p->hipe.nsp && p->hipe.nstend);
+ offset_heap_ptr(hipe_nstack_start(p), hipe_nstack_used(p),
+ offs, area, area_size);
+ }
+ else {
+ ASSERT(!p->hipe.nsp && !p->hipe.nstend);
+ }
+}
+
+#else /* !HIPE */
+
+#define fullsweep_nstack(p,n_htop) (n_htop)
+#define GENSWEEP_NSTACK(p,old_htop,n_htop) do{}while(0)
+#define offset_nstack(p,offs,area,area_size) do{}while(0)
+#define sweep_literals_nstack(p,old_htop,area,area_size) (old_htop)
+
+#endif /* HIPE */
+
+int
erts_garbage_collect_literals(Process* p, Eterm* literals,
- Uint lit_size,
- struct erl_off_heap_header* oh)
+ Uint byte_lit_size,
+ struct erl_off_heap_header* oh,
+ int fcalls)
{
- Uint byte_lit_size = sizeof(Eterm)*lit_size;
+ Uint lit_size = byte_lit_size / sizeof(Eterm);
Uint old_heap_size;
Eterm* temp_lit;
Sint offs;
@@ -677,20 +1074,51 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
Uint area_size;
Eterm* old_htop;
Uint n;
- struct erl_off_heap_header** prev;
+ Uint ygen_usage = 0;
+ struct erl_off_heap_header** prev = NULL;
+ Sint64 reds;
+ int hibernated = !!(p->flags & F_HIBERNATED);
+
+ if (p->flags & (F_DISABLE_GC|F_DELAY_GC))
+ ERTS_INTERNAL_ERROR("GC disabled");
+
+ /*
+ * First an ordinary major collection...
+ */
+
+ p->flags |= F_NEED_FULLSWEEP;
+
+ if (ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(p)))
+ p->flags &= ~F_DIRTY_CLA;
+ else {
+ Uint size = byte_lit_size/sizeof(Uint);
+ ygen_usage = young_gen_usage(p);
+ if (hibernated)
+ size = size*2 + 3*ygen_usage;
+ else
+ size = size + 2*ygen_usage;
+ check_for_possibly_long_gc(p, size);
+ if (p->flags & F_DIRTY_MAJOR_GC) {
+ p->flags |= F_DIRTY_CLA;
+ return 10;
+ }
+ }
+
+ reds = (Sint64) garbage_collect(p, ERTS_INVALID_HFRAG_PTR, 0,
+ p->arg_reg, p->arity, fcalls,
+ ygen_usage);
+
+ ASSERT(!(p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)));
- if (p->flags & F_DISABLE_GC)
- return;
/*
* Set GC state.
*/
- erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
+ erts_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
/*
- * We assume that the caller has already done a major collection
- * (which has discarded the old heap), so that we don't have to cope
- * with pointer to literals on the old heap. We will now allocate
- * an old heap to contain the literals.
+ * Just did a major collection (which has discarded the old heap),
+ * so that we don't have to cope with pointer to literals on the
+ * old heap. We will now allocate an old heap to contain the literals.
*/
ASSERT(p->old_heap == 0); /* Must NOT have an old heap yet. */
@@ -724,7 +1152,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
area_size = byte_lit_size;
n = setup_rootset(p, p->arg_reg, p->arity, &rootset);
roots = rootset.roots;
- old_htop = p->old_htop;
+ old_htop = sweep_literals_nstack(p, p->old_htop, area, area_size);
while (n--) {
Eterm* g_ptr = roots->v;
Uint g_sz = roots->sz;
@@ -743,8 +1171,8 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*g_ptr++ = val;
- } else if (in_area(ptr, area, area_size)) {
- MOVE_BOXED(ptr,val,old_htop,g_ptr++);
+ } else if (ErtsInArea(ptr, area, area_size)) {
+ move_boxed(&ptr,val,&old_htop,g_ptr++);
} else {
g_ptr++;
}
@@ -754,8 +1182,8 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
val = *ptr;
if (IS_MOVED_CONS(val)) { /* Moved */
*g_ptr++ = ptr[1];
- } else if (in_area(ptr, area, area_size)) {
- MOVE_CONS(ptr,val,old_htop,g_ptr++);
+ } else if (ErtsInArea(ptr, area, area_size)) {
+ move_cons(&ptr,val,&old_htop,g_ptr++);
} else {
g_ptr++;
}
@@ -774,8 +1202,10 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
* Now we'll have to go through all heaps updating all other references.
*/
- old_htop = sweep_one_heap(p->heap, p->htop, old_htop, area, area_size);
- old_htop = sweep_one_area(p->old_heap, old_htop, area, area_size);
+ old_htop = sweep_literals_to_old_heap(p->heap, p->htop, old_htop, area, area_size);
+ old_htop = sweep_literal_area(p->old_heap, old_htop,
+ (char *) p->old_heap, sizeof(Eterm)*old_heap_size,
+ area, area_size);
ASSERT(p->old_htop <= old_htop && old_htop <= p->old_hend);
p->old_htop = old_htop;
@@ -786,10 +1216,10 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
*/
if (oh) {
- prev = &MSO(p).first;
- while (*prev) {
- prev = &(*prev)->next;
- }
+ prev = &MSO(p).first;
+ while (*prev) {
+ prev = &(*prev)->next;
+ }
}
/*
@@ -811,13 +1241,17 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
* link it into the MSO list for the process.
*/
- erts_refc_inc(&bptr->refc, 1);
+ erts_refc_inc(&bptr->intern.refc, 1);
*prev = ptr;
prev = &ptr->next;
}
oh = oh->next;
}
+ if (prev) {
+ *prev = NULL;
+ }
+
/*
* We no longer need this temporary area.
*/
@@ -826,19 +1260,62 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
/*
* Restore status.
*/
- erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC);
+ erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC);
+
+ reds += (Sint64) gc_cost((p->htop - p->heap) + byte_lit_size/sizeof(Uint), 0);
+
+ if (hibernated) {
+ /* Restore the process into hibernated state... */
+ reds += garbage_collect_hibernate(p, 0);
+ }
+
+ if (reds > INT_MAX)
+ return INT_MAX;
+ return (int) reds;
}
static int
-minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
+minor_collection(Process* p, ErlHeapFragment *live_hf_end,
+ int need, Eterm* objv, int nobj,
+ Uint ygen_usage, Uint *recl)
{
- Uint mature = HIGH_WATER(p) - HEAP_START(p);
+ Eterm *mature = p->abandoned_heap ? p->abandoned_heap : p->heap;
+ Uint mature_size = p->high_water - mature;
+ Uint size_before = ygen_usage;
+
+ /*
+ * Check if we have gone past the max heap size limit
+ */
+
+ if (MAX_HEAP_SIZE_GET(p)) {
+ Uint heap_size = size_before,
+ /* Note that we also count the un-allocated area
+ in between the stack and heap */
+ stack_size = HEAP_END(p) - HEAP_TOP(p),
+ extra_heap_size,
+ extra_old_heap_size = 0;
+
+ /* Add potential old heap size */
+ if (OLD_HEAP(p) == NULL && mature_size != 0) {
+ extra_old_heap_size = erts_next_heap_size(size_before, 1);
+ heap_size += extra_old_heap_size;
+ } else if (OLD_HEAP(p))
+ heap_size += OLD_HEND(p) - OLD_HEAP(p);
+
+ /* Add potential new young heap size */
+ extra_heap_size = next_heap_size(p, stack_size + size_before, 0);
+ heap_size += extra_heap_size;
+
+ if (heap_size > MAX_HEAP_SIZE_GET(p))
+ if (reached_max_heap_size(p, heap_size, extra_heap_size, extra_old_heap_size))
+ return -2;
+ }
/*
* Allocate an old heap if we don't have one and if we'll need one.
*/
- if (OLD_HEAP(p) == NULL && mature != 0) {
+ if (OLD_HEAP(p) == NULL && mature_size != 0) {
Eterm* n_old;
/* Note: We choose a larger heap size than strictly needed,
@@ -846,7 +1323,7 @@ minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
* This improved Estone by more than 1200 estones on my computer
* (Ultra Sparc 10).
*/
- Uint new_sz = erts_next_heap_size(HEAP_TOP(p) - HEAP_START(p), 1);
+ Uint new_sz = erts_next_heap_size(size_before, 1);
/* Create new, empty old_heap */
n_old = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_OLD_HEAP,
@@ -862,35 +1339,34 @@ minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
*/
if (OLD_HEAP(p) &&
- ((mature <= OLD_HEND(p) - OLD_HTOP(p)) &&
- ((BIN_VHEAP_MATURE(p) < ( BIN_OLD_VHEAP_SZ(p) - BIN_OLD_VHEAP(p)))) &&
- ((BIN_OLD_VHEAP_SZ(p) > BIN_OLD_VHEAP(p))) ) ) {
- ErlMessage *msgp;
- Uint size_after;
- Uint need_after;
- Uint stack_size = STACK_SZ_ON_HEAP(p);
- Uint fragments = MBUF_SIZE(p) + combined_message_size(p);
- Uint size_before = fragments + (HEAP_TOP(p) - HEAP_START(p));
- Uint new_sz = next_heap_size(p, HEAP_SIZE(p) + fragments, 0);
-
- do_minor(p, new_sz, objv, nobj);
+ ((mature_size <= OLD_HEND(p) - OLD_HTOP(p)) &&
+ ((BIN_OLD_VHEAP_SZ(p) > BIN_OLD_VHEAP(p))) ) ) {
+ Eterm *prev_old_htop;
+ Uint stack_size, size_after, adjust_size, need_after, new_sz, new_mature;
+
+ stack_size = p->hend - p->stop;
+ new_sz = stack_size + size_before;
+ new_sz = next_heap_size(p, new_sz, 0);
+
+ prev_old_htop = p->old_htop;
+ do_minor(p, live_hf_end, (char *) mature, mature_size*sizeof(Eterm),
+ new_sz, objv, nobj);
+
+ if (p->flags & F_ON_HEAP_MSGQ)
+ move_msgq_to_heap(p);
+
+ new_mature = p->old_htop - prev_old_htop;
+
+ size_after = new_mature;
+ size_after += HEAP_TOP(p) - HEAP_START(p) + p->mbuf_sz;
+ *recl += (size_before - size_after);
- /*
- * Copy newly received message onto the end of the new heap.
- */
- ErtsGcQuickSanityCheck(p);
- for (msgp = p->msg.first; msgp; msgp = msgp->next) {
- if (msgp->data.attached) {
- erts_move_msg_attached_data_to_heap(&p->htop, &p->off_heap, msgp);
- ErtsGcQuickSanityCheck(p);
- }
- }
ErtsGcQuickSanityCheck(p);
GEN_GCS(p)++;
- size_after = HEAP_TOP(p) - HEAP_START(p);
- need_after = size_after + need + stack_size;
- *recl += (size_before - size_after);
+ need_after = ((HEAP_TOP(p) - HEAP_START(p))
+ + need
+ + stack_size);
/*
* Excessively large heaps should be shrunk, but
@@ -901,6 +1377,8 @@ minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
* the heap size is substantial, we don't want to shrink.
*/
+ adjust_size = 0;
+
if ((HEAP_SIZE(p) > 3000) && (4 * need_after < HEAP_SIZE(p)) &&
((HEAP_SIZE(p) > 8000) ||
(HEAP_SIZE(p) > (OLD_HEND(p) - OLD_HEAP(p))))) {
@@ -922,80 +1400,41 @@ minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
: next_heap_size(p, wanted, 0);
if (wanted < HEAP_SIZE(p)) {
shrink_new_heap(p, wanted, objv, nobj);
+ adjust_size = p->htop - p->heap;
}
- ASSERT(HEAP_SIZE(p) == next_heap_size(p, HEAP_SIZE(p), 0));
- return 1; /* We are done. */
}
+ else if (need_after > HEAP_SIZE(p)) {
+ grow_new_heap(p, next_heap_size(p, need_after, 0), objv, nobj);
+ adjust_size = p->htop - p->heap;
+ }
+ /*else: The heap size turned out to be just right. We are done. */
- if (HEAP_SIZE(p) >= need_after) {
- /*
- * The heap size turned out to be just right. We are done.
- */
- ASSERT(HEAP_SIZE(p) == next_heap_size(p, HEAP_SIZE(p), 0));
- return 1;
- }
+ ASSERT(HEAP_SIZE(p) == next_heap_size(p, HEAP_SIZE(p), 0));
+
+ /* The heap usage during GC should be larger than what we end up
+ after a GC, even if we grow it. If this assertion is not true
+ we have to check size in grow_new_heap and potentially kill the
+ process from there */
+ ASSERT(!MAX_HEAP_SIZE_GET(p) ||
+ !(MAX_HEAP_SIZE_FLAGS_GET(p) & MAX_HEAP_SIZE_KILL) ||
+ MAX_HEAP_SIZE_GET(p) > (young_gen_usage(p) +
+ (OLD_HEND(p) - OLD_HEAP(p)) +
+ (HEAP_END(p) - HEAP_TOP(p))));
+
+ return gc_cost(size_after, adjust_size);
}
/*
- * Still not enough room after minor collection. Must force a major collection.
+ * Not enough room for a minor collection. Must force a major collection.
*/
- FLAGS(p) |= F_NEED_FULLSWEEP;
- return 0;
-}
-
-/*
- * HiPE native code stack scanning procedures:
- * - fullsweep_nstack()
- * - gensweep_nstack()
- * - offset_nstack()
- */
-#if defined(HIPE)
-
-#define GENSWEEP_NSTACK(p,old_htop,n_htop) \
- do { \
- Eterm *tmp_old_htop = old_htop; \
- Eterm *tmp_n_htop = n_htop; \
- gensweep_nstack((p), &tmp_old_htop, &tmp_n_htop); \
- old_htop = tmp_old_htop; \
- n_htop = tmp_n_htop; \
- } while(0)
-
-/*
- * offset_nstack() can ignore the descriptor-based traversal the other
- * nstack procedures use and simply call offset_heap_ptr() instead.
- * This relies on two facts:
- * 1. The only live non-Erlang terms on an nstack are return addresses,
- * and they will be skipped thanks to the low/high range check.
- * 2. Dead values, even if mistaken for pointers into the low/high area,
- * can be offset safely since they won't be dereferenced.
- *
- * XXX: WARNING: If HiPE starts storing other non-Erlang values on the
- * nstack, such as floats, then this will have to be changed.
- */
-static ERTS_INLINE void offset_nstack(Process* p, Sint offs,
- char* area, Uint area_size)
-{
- if (p->hipe.nstack) {
- ASSERT(p->hipe.nsp && p->hipe.nstend);
- offset_heap_ptr(hipe_nstack_start(p), hipe_nstack_used(p),
- offs, area, area_size);
- }
- else {
- ASSERT(!p->hipe.nsp && !p->hipe.nstend);
- }
+ return -1;
}
-#else /* !HIPE */
-
-#define fullsweep_nstack(p,n_htop) (n_htop)
-#define GENSWEEP_NSTACK(p,old_htop,n_htop) do{}while(0)
-#define offset_nstack(p,offs,area,area_size) do{}while(0)
-
-#endif /* HIPE */
-
static void
-do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
+do_minor(Process *p, ErlHeapFragment *live_hf_end,
+ char *mature, Uint mature_size,
+ Uint new_sz, Eterm* objv, int nobj)
{
Rootset rootset; /* Rootset for GC (stack, dictionary, etc). */
Roots* roots;
@@ -1004,17 +1443,24 @@ do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
Eterm* ptr;
Eterm val;
Eterm gval;
- char* heap = (char *) HEAP_START(p);
- Uint heap_size = (char *) HEAP_TOP(p) - heap;
- Uint mature_size = (char *) HIGH_WATER(p) - heap;
Eterm* old_htop = OLD_HTOP(p);
Eterm* n_heap;
+ char* oh = (char *) OLD_HEAP(p);
+ Uint oh_size = (char *) OLD_HTOP(p) - oh;
+
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] MINOR GC: %p %p %p %p\n", p->common.id,
+ HEAP_START(p), HEAP_END(p), OLD_HEAP(p), OLD_HEND(p)));
n_htop = n_heap = (Eterm*) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP,
sizeof(Eterm)*new_sz);
- if (MBUF(p) != NULL) {
- n_htop = collect_heap_frags(p, n_heap, n_htop, objv, nobj);
+ if (live_hf_end != ERTS_INVALID_HFRAG_PTR) {
+ /*
+ * Move heap frags that we know are completely live
+ * directly into the new young heap generation.
+ */
+ n_htop = collect_live_heap_frags(p, live_hf_end, n_heap, n_htop,
+ objv, nobj);
}
n = setup_rootset(p, objv, nobj, &rootset);
@@ -1037,10 +1483,10 @@ do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*g_ptr++ = val;
- } else if (in_area(ptr, heap, mature_size)) {
- MOVE_BOXED(ptr,val,old_htop,g_ptr++);
- } else if (in_area(ptr, heap, heap_size)) {
- MOVE_BOXED(ptr,val,n_htop,g_ptr++);
+ } else if (ErtsInArea(ptr, mature, mature_size)) {
+ move_boxed(&ptr,val,&old_htop,g_ptr++);
+ } else if (ErtsInYoungGen(gval, ptr, oh, oh_size)) {
+ move_boxed(&ptr,val,&n_htop,g_ptr++);
} else {
g_ptr++;
}
@@ -1052,10 +1498,10 @@ do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
val = *ptr;
if (IS_MOVED_CONS(val)) { /* Moved */
*g_ptr++ = ptr[1];
- } else if (in_area(ptr, heap, mature_size)) {
- MOVE_CONS(ptr,val,old_htop,g_ptr++);
- } else if (in_area(ptr, heap, heap_size)) {
- MOVE_CONS(ptr,val,n_htop,g_ptr++);
+ } else if (ErtsInArea(ptr, mature, mature_size)) {
+ move_cons(&ptr,val,&old_htop,g_ptr++);
+ } else if (ErtsInYoungGen(gval, ptr, oh, oh_size)) {
+ move_cons(&ptr,val,&n_htop,g_ptr++);
} else {
g_ptr++;
}
@@ -1079,7 +1525,7 @@ do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
*/
if (mature_size == 0) {
- n_htop = sweep_one_area(n_heap, n_htop, heap, heap_size);
+ n_htop = sweep_new_heap(n_heap, n_htop, oh, oh_size);
} else {
Eterm* n_hp = n_heap;
Eterm* ptr;
@@ -1096,10 +1542,10 @@ do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*n_hp++ = val;
- } else if (in_area(ptr, heap, mature_size)) {
- MOVE_BOXED(ptr,val,old_htop,n_hp++);
- } else if (in_area(ptr, heap, heap_size)) {
- MOVE_BOXED(ptr,val,n_htop,n_hp++);
+ } else if (ErtsInArea(ptr, mature, mature_size)) {
+ move_boxed(&ptr,val,&old_htop,n_hp++);
+ } else if (ErtsInYoungGen(gval, ptr, oh, oh_size)) {
+ move_boxed(&ptr,val,&n_htop,n_hp++);
} else {
n_hp++;
}
@@ -1110,10 +1556,10 @@ do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
val = *ptr;
if (IS_MOVED_CONS(val)) {
*n_hp++ = ptr[1];
- } else if (in_area(ptr, heap, mature_size)) {
- MOVE_CONS(ptr,val,old_htop,n_hp++);
- } else if (in_area(ptr, heap, heap_size)) {
- MOVE_CONS(ptr,val,n_htop,n_hp++);
+ } else if (ErtsInArea(ptr, mature, mature_size)) {
+ move_cons(&ptr,val,&old_htop,n_hp++);
+ } else if (ErtsInYoungGen(gval, ptr, oh, oh_size)) {
+ move_cons(&ptr,val,&n_htop,n_hp++);
} else {
n_hp++;
}
@@ -1132,11 +1578,11 @@ do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
if (IS_MOVED_BOXED(val)) {
*origptr = val;
mb->base = binary_bytes(val);
- } else if (in_area(ptr, heap, mature_size)) {
- MOVE_BOXED(ptr,val,old_htop,origptr);
+ } else if (ErtsInArea(ptr, mature, mature_size)) {
+ move_boxed(&ptr,val,&old_htop,origptr);
mb->base = binary_bytes(mb->orig);
- } else if (in_area(ptr, heap, heap_size)) {
- MOVE_BOXED(ptr,val,n_htop,origptr);
+ } else if (ErtsInYoungGen(*origptr, ptr, oh, oh_size)) {
+ move_boxed(&ptr,val,&n_htop,origptr);
mb->base = binary_bytes(mb->orig);
}
}
@@ -1156,9 +1602,8 @@ do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
* may point to the old (soon to be deleted) new_heap.
*/
- if (OLD_HTOP(p) < old_htop) {
- old_htop = sweep_one_area(OLD_HTOP(p), old_htop, heap, heap_size);
- }
+ if (OLD_HTOP(p) < old_htop)
+ old_htop = sweep_new_heap(OLD_HTOP(p), old_htop, oh, oh_size);
OLD_HTOP(p) = old_htop;
HIGH_WATER(p) = n_htop;
@@ -1189,18 +1634,16 @@ do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
}
#endif
- ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
- (void*)HEAP_START(p),
- HEAP_SIZE(p) * sizeof(Eterm));
+#ifdef HARDDEBUG
+ disallow_heap_frag_ref_in_heap(p, n_heap, n_htop);
+#endif
+
+ erts_deallocate_young_generation(p);
+
HEAP_START(p) = n_heap;
HEAP_TOP(p) = n_htop;
HEAP_SIZE(p) = new_sz;
HEAP_END(p) = n_heap + new_sz;
-
-#ifdef HARDDEBUG
- disallow_heap_frag_ref_in_heap(p);
-#endif
- remove_message_buffers(p);
}
/*
@@ -1208,38 +1651,31 @@ do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
*/
static int
-major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
+major_collection(Process* p, ErlHeapFragment *live_hf_end,
+ int need, Eterm* objv, int nobj,
+ Uint ygen_usage, Uint *recl)
{
- Rootset rootset;
- Roots* roots;
- Uint size_before;
+ Uint size_before, size_after, stack_size;
Eterm* n_heap;
Eterm* n_htop;
- char* src = (char *) HEAP_START(p);
- Uint src_size = (char *) HEAP_TOP(p) - src;
char* oh = (char *) OLD_HEAP(p);
Uint oh_size = (char *) OLD_HTOP(p) - oh;
- Uint n;
- Uint new_sz;
- Uint fragments = MBUF_SIZE(p) + combined_message_size(p);
- ErlMessage *msgp;
+ Uint new_sz, stk_sz;
+ int adjusted;
- size_before = fragments + (HEAP_TOP(p) - HEAP_START(p));
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] MAJOR GC: %p %p %p %p\n", p->common.id,
+ HEAP_START(p), HEAP_END(p), OLD_HEAP(p), OLD_HEND(p)));
/*
* Do a fullsweep GC. First figure out the size of the heap
* to receive all live data.
*/
- new_sz = HEAP_SIZE(p) + fragments + (OLD_HTOP(p) - OLD_HEAP(p));
- /*
- * We used to do
- *
- * new_sz += STACK_SZ_ON_HEAP(p);
- *
- * here for no obvious reason. (The stack size is already counted once
- * in HEAP_SIZE(p).)
- */
+ size_before = ygen_usage;
+ size_before += p->old_htop - p->old_heap;
+ stack_size = p->hend - p->stop;
+
+ new_sz = stack_size + size_before;
new_sz = next_heap_size(p, new_sz, 0);
/*
@@ -1249,17 +1685,93 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
if (new_sz == HEAP_SIZE(p) && FLAGS(p) & F_HEAP_GROW) {
new_sz = next_heap_size(p, HEAP_SIZE(p), 1);
}
+
+
+ if (MAX_HEAP_SIZE_GET(p)) {
+ Uint heap_size = size_before;
+
+ /* Add unused space in old heap */
+ heap_size += OLD_HEND(p) - OLD_HTOP(p);
+
+ /* Add stack + unused space in young heap */
+ heap_size += HEAP_END(p) - HEAP_TOP(p);
+
+ /* Add size of new young heap */
+ heap_size += new_sz;
+
+ if (MAX_HEAP_SIZE_GET(p) < heap_size)
+ if (reached_max_heap_size(p, heap_size, new_sz, 0))
+ return -2;
+ }
+
FLAGS(p) &= ~(F_HEAP_GROW|F_NEED_FULLSWEEP);
n_htop = n_heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP,
sizeof(Eterm)*new_sz);
- /*
- * Get rid of heap fragments.
- */
+ if (live_hf_end != ERTS_INVALID_HFRAG_PTR) {
+ /*
+ * Move heap frags that we know are completely live
+ * directly into the heap.
+ */
+ n_htop = collect_live_heap_frags(p, live_hf_end, n_heap, n_htop,
+ objv, nobj);
+ }
- if (MBUF(p) != NULL) {
- n_htop = collect_heap_frags(p, n_heap, n_htop, objv, nobj);
+ n_htop = full_sweep_heaps(p, 0, n_heap, n_htop, oh, oh_size, objv, nobj);
+
+ /* Move the stack to the end of the heap */
+ stk_sz = HEAP_END(p) - p->stop;
+ sys_memcpy(n_heap + new_sz - stk_sz, p->stop, stk_sz * sizeof(Eterm));
+ p->stop = n_heap + new_sz - stk_sz;
+
+#ifdef USE_VM_PROBES
+ if (HEAP_SIZE(p) != new_sz && DTRACE_ENABLED(process_heap_grow)) {
+ DTRACE_CHARBUF(pidbuf, DTRACE_TERM_BUF_SIZE);
+
+ dtrace_proc_str(p, pidbuf);
+ DTRACE3(process_heap_grow, pidbuf, HEAP_SIZE(p), new_sz);
}
+#endif
+
+#ifdef HARDDEBUG
+ disallow_heap_frag_ref_in_heap(p, n_heap, n_htop);
+#endif
+
+ erts_deallocate_young_generation(p);
+
+ HEAP_START(p) = n_heap;
+ HEAP_TOP(p) = n_htop;
+ HEAP_SIZE(p) = new_sz;
+ HEAP_END(p) = n_heap + new_sz;
+ GEN_GCS(p) = 0;
+
+ HIGH_WATER(p) = HEAP_TOP(p);
+
+ if (p->flags & F_ON_HEAP_MSGQ)
+ move_msgq_to_heap(p);
+
+ ErtsGcQuickSanityCheck(p);
+
+ size_after = HEAP_TOP(p) - HEAP_START(p) + p->mbuf_sz;
+ *recl += size_before - size_after;
+
+ adjusted = adjust_after_fullsweep(p, need, objv, nobj);
+
+ ErtsGcQuickSanityCheck(p);
+
+ return gc_cost(size_after, adjusted ? size_after : 0);
+}
+
+static Eterm *
+full_sweep_heaps(Process *p,
+ int hibernate,
+ Eterm *n_heap, Eterm* n_htop,
+ char *oh, Uint oh_size,
+ Eterm *objv, int nobj)
+{
+ Rootset rootset;
+ Roots *roots;
+ Uint n;
/*
* Copy all top-level terms directly referenced by the rootset to
@@ -1267,7 +1779,14 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
*/
n = setup_rootset(p, objv, nobj, &rootset);
- n_htop = fullsweep_nstack(p, n_htop);
+
+#ifdef HIPE
+ if (hibernate)
+ hipe_empty_nstack(p);
+ else
+ n_htop = fullsweep_nstack(p, n_htop);
+#endif
+
roots = rootset.roots;
while (n--) {
Eterm* g_ptr = roots->v;
@@ -1278,7 +1797,7 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
Eterm* ptr;
Eterm val;
Eterm gval = *g_ptr;
-
+
switch (primary_tag(gval)) {
case TAG_PRIMARY_BOXED: {
@@ -1287,8 +1806,8 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*g_ptr++ = val;
- } else if (in_area(ptr, src, src_size) || in_area(ptr, oh, oh_size)) {
- MOVE_BOXED(ptr,val,n_htop,g_ptr++);
+ } else if (!erts_is_literal(gval, ptr)) {
+ move_boxed(&ptr,val,&n_htop,g_ptr++);
} else {
g_ptr++;
}
@@ -1300,8 +1819,8 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
val = *ptr;
if (IS_MOVED_CONS(val)) {
*g_ptr++ = ptr[1];
- } else if (in_area(ptr, src, src_size) || in_area(ptr, oh, oh_size)) {
- MOVE_CONS(ptr,val,n_htop,g_ptr++);
+ } else if (!erts_is_literal(gval, ptr)) {
+ move_cons(&ptr,val,&n_htop,g_ptr++);
} else {
g_ptr++;
}
@@ -1325,74 +1844,7 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
* until all is copied.
*/
- if (oh_size == 0) {
- n_htop = sweep_one_area(n_heap, n_htop, src, src_size);
- } else {
- Eterm* n_hp = n_heap;
-
- while (n_hp != n_htop) {
- Eterm* ptr;
- Eterm val;
- Eterm gval = *n_hp;
-
- switch (primary_tag(gval)) {
- case TAG_PRIMARY_BOXED: {
- ptr = boxed_val(gval);
- val = *ptr;
- if (IS_MOVED_BOXED(val)) {
- ASSERT(is_boxed(val));
- *n_hp++ = val;
- } else if (in_area(ptr, src, src_size) || in_area(ptr, oh, oh_size)) {
- MOVE_BOXED(ptr,val,n_htop,n_hp++);
- } else {
- n_hp++;
- }
- break;
- }
- case TAG_PRIMARY_LIST: {
- ptr = list_val(gval);
- val = *ptr;
- if (IS_MOVED_CONS(val)) {
- *n_hp++ = ptr[1];
- } else if (in_area(ptr, src, src_size) || in_area(ptr, oh, oh_size)) {
- MOVE_CONS(ptr,val,n_htop,n_hp++);
- } else {
- n_hp++;
- }
- break;
- }
- case TAG_PRIMARY_HEADER: {
- if (!header_is_thing(gval))
- n_hp++;
- else {
- if (header_is_bin_matchstate(gval)) {
- ErlBinMatchState *ms = (ErlBinMatchState*) n_hp;
- ErlBinMatchBuffer *mb = &(ms->mb);
- Eterm* origptr;
- origptr = &(mb->orig);
- ptr = boxed_val(*origptr);
- val = *ptr;
- if (IS_MOVED_BOXED(val)) {
- *origptr = val;
- mb->base = binary_bytes(*origptr);
- } else if (in_area(ptr, src, src_size) ||
- in_area(ptr, oh, oh_size)) {
- MOVE_BOXED(ptr,val,n_htop,origptr);
- mb->base = binary_bytes(*origptr);
- ptr = boxed_val(*origptr);
- val = *ptr;
- }
- }
- n_hp += (thing_arityval(gval)+1);
- }
- break;
- }
- default:
- n_hp++;
- break;
- }
- }
- }
+ n_htop = sweep_heaps(n_heap, n_htop, oh, oh_size);
if (MSO(p).first) {
sweep_off_heap(p, 1);
@@ -1405,73 +1857,26 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
OLD_HEAP(p) = OLD_HTOP(p) = OLD_HEND(p) = NULL;
}
- /* Move the stack to the end of the heap */
- n = HEAP_END(p) - p->stop;
- sys_memcpy(n_heap + new_sz - n, p->stop, n * sizeof(Eterm));
- p->stop = n_heap + new_sz - n;
-
-#ifdef USE_VM_PROBES
- if (HEAP_SIZE(p) != new_sz && DTRACE_ENABLED(process_heap_grow)) {
- DTRACE_CHARBUF(pidbuf, DTRACE_TERM_BUF_SIZE);
-
- dtrace_proc_str(p, pidbuf);
- DTRACE3(process_heap_grow, pidbuf, HEAP_SIZE(p), new_sz);
- }
-#endif
-
- ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
- (void *) HEAP_START(p),
- (HEAP_END(p) - HEAP_START(p)) * sizeof(Eterm));
- HEAP_START(p) = n_heap;
- HEAP_TOP(p) = n_htop;
- HEAP_SIZE(p) = new_sz;
- HEAP_END(p) = n_heap + new_sz;
- GEN_GCS(p) = 0;
-
- HIGH_WATER(p) = HEAP_TOP(p);
-
- ErtsGcQuickSanityCheck(p);
-
- /*
- * Copy newly received message onto the end of the new heap.
- */
- for (msgp = p->msg.first; msgp; msgp = msgp->next) {
- if (msgp->data.attached) {
- erts_move_msg_attached_data_to_heap(&p->htop, &p->off_heap, msgp);
- ErtsGcQuickSanityCheck(p);
- }
- }
-
- *recl += adjust_after_fullsweep(p, size_before, need, objv, nobj);
-
-#ifdef HARDDEBUG
- disallow_heap_frag_ref_in_heap(p);
-#endif
- remove_message_buffers(p);
-
- ErtsGcQuickSanityCheck(p);
- return 1; /* We are done. */
+ return n_htop;
}
-static Uint
-adjust_after_fullsweep(Process *p, Uint size_before, int need, Eterm *objv, int nobj)
+static int
+adjust_after_fullsweep(Process *p, int need, Eterm *objv, int nobj)
{
- Uint wanted, sz, size_after, need_after;
+ int adjusted = 0;
+ Uint wanted, sz, need_after;
Uint stack_size = STACK_SZ_ON_HEAP(p);
- Uint reclaimed_now;
-
- size_after = (HEAP_TOP(p) - HEAP_START(p));
- reclaimed_now = (size_before - size_after);
/*
* Resize the heap if needed.
*/
- need_after = size_after + need + stack_size;
+ need_after = (HEAP_TOP(p) - HEAP_START(p)) + need + stack_size;
if (HEAP_SIZE(p) < need_after) {
/* Too small - grow to match requested need */
sz = next_heap_size(p, need_after, 0);
grow_new_heap(p, sz, objv, nobj);
+ adjusted = 1;
} else if (3 * HEAP_SIZE(p) < 4 * need_after){
/* Need more than 75% of current, postpone to next GC.*/
FLAGS(p) |= F_HEAP_GROW;
@@ -1488,42 +1893,62 @@ adjust_after_fullsweep(Process *p, Uint size_before, int need, Eterm *objv, int
if (sz < HEAP_SIZE(p)) {
shrink_new_heap(p, sz, objv, nobj);
+ adjusted = 1;
}
}
-
- return reclaimed_now;
+ return adjusted;
}
-/*
- * Return the size of all message buffers that are NOT linked in the
- * mbuf list.
- */
-static Uint
-combined_message_size(Process* p)
+void
+erts_deallocate_young_generation(Process *c_p)
{
- Uint sz = 0;
- ErlMessage *msgp;
+ Eterm *orig_heap;
- for (msgp = p->msg.first; msgp; msgp = msgp->next) {
- if (msgp->data.attached) {
- sz += erts_msg_attached_data_size(msgp);
- }
+ if (!c_p->abandoned_heap) {
+ orig_heap = c_p->heap;
+ ASSERT(!(c_p->flags & F_ABANDONED_HEAP_USE));
}
- return sz;
-}
+ else {
+ ErlHeapFragment *hfrag;
-/*
- * Remove all message buffers.
- */
-static void
-remove_message_buffers(Process* p)
-{
- if (MBUF(p) != NULL) {
- free_message_buffer(MBUF(p));
- MBUF(p) = NULL;
+ orig_heap = c_p->abandoned_heap;
+ c_p->abandoned_heap = NULL;
+ c_p->flags &= ~F_ABANDONED_HEAP_USE;
+
+ /*
+ * Temporary heap located in heap fragment
+ * only referred to by 'c_p->heap'. Add it to
+ * 'c_p->mbuf' list and deallocate it as any
+ * other heap fragment...
+ */
+ hfrag = ((ErlHeapFragment *)
+ (((char *) c_p->heap)
+ - offsetof(ErlHeapFragment, mem)));
+
+ ASSERT(!hfrag->off_heap.first);
+ ASSERT(!hfrag->off_heap.overhead);
+ ASSERT(!hfrag->next);
+ ASSERT(c_p->htop - c_p->heap <= hfrag->alloc_size);
+
+ hfrag->next = c_p->mbuf;
+ c_p->mbuf = hfrag;
+ }
+
+ if (c_p->mbuf) {
+ free_message_buffer(c_p->mbuf);
+ c_p->mbuf = NULL;
}
- MBUF_SIZE(p) = 0;
+ if (c_p->msg_frag) {
+ erts_cleanup_messages(c_p->msg_frag);
+ c_p->msg_frag = NULL;
+ }
+ c_p->mbuf_sz = 0;
+
+ ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
+ orig_heap,
+ c_p->heap_sz * sizeof(Eterm));
}
+
#ifdef HARDDEBUG
/*
@@ -1533,79 +1958,17 @@ remove_message_buffers(Process* p)
* For performance reasons, we use _unchecked_list_val(), _unchecked_boxed_val(),
* and so on to avoid a function call.
*/
-
-static void
-disallow_heap_frag_ref(Process* p, Eterm* n_htop, Eterm* objv, int nobj)
-{
- ErlHeapFragment* mbuf;
- ErlHeapFragment* qb;
- Eterm gval;
- Eterm* ptr;
- Eterm val;
-
- ASSERT(p->htop != NULL);
- mbuf = MBUF(p);
-
- while (nobj--) {
- gval = *objv;
-
- switch (primary_tag(gval)) {
-
- case TAG_PRIMARY_BOXED: {
- ptr = _unchecked_boxed_val(gval);
- val = *ptr;
- if (IS_MOVED_BOXED(val)) {
- ASSERT(is_boxed(val));
- objv++;
- } else {
- for (qb = mbuf; qb != NULL; qb = qb->next) {
- if (in_area(ptr, qb->mem, qb->alloc_size*sizeof(Eterm))) {
- abort();
- }
- }
- objv++;
- }
- break;
- }
-
- case TAG_PRIMARY_LIST: {
- ptr = _unchecked_list_val(gval);
- val = *ptr;
- if (IS_MOVED_CONS(val)) {
- objv++;
- } else {
- for (qb = mbuf; qb != NULL; qb = qb->next) {
- if (in_area(ptr, qb->mem, qb->alloc_size*sizeof(Eterm))) {
- abort();
- }
- }
- objv++;
- }
- break;
- }
-
- default: {
- objv++;
- break;
- }
- }
- }
-}
static void
-disallow_heap_frag_ref_in_heap(Process* p)
+disallow_heap_frag_ref_in_heap(Process *p, Eterm *heap, Eterm *htop)
{
Eterm* hp;
- Eterm* htop;
- Eterm* heap;
Uint heap_size;
if (p->mbuf == 0) {
return;
}
- htop = p->htop;
- heap = p->heap;
heap_size = (htop - heap)*sizeof(Eterm);
hp = heap;
@@ -1618,9 +1981,9 @@ disallow_heap_frag_ref_in_heap(Process* p)
switch (primary_tag(val)) {
case TAG_PRIMARY_BOXED:
ptr = _unchecked_boxed_val(val);
- if (!in_area(ptr, heap, heap_size)) {
+ if (!ErtsInArea(ptr, heap, heap_size)) {
for (qb = MBUF(p); qb != NULL; qb = qb->next) {
- if (in_area(ptr, qb->mem, qb->alloc_size*sizeof(Eterm))) {
+ if (ErtsInArea(ptr, qb->mem, qb->alloc_size*sizeof(Eterm))) {
abort();
}
}
@@ -1628,9 +1991,9 @@ disallow_heap_frag_ref_in_heap(Process* p)
break;
case TAG_PRIMARY_LIST:
ptr = _unchecked_list_val(val);
- if (!in_area(ptr, heap, heap_size)) {
+ if (!ErtsInArea(ptr, heap, heap_size)) {
for (qb = MBUF(p); qb != NULL; qb = qb->next) {
- if (in_area(ptr, qb->mem, qb->alloc_size*sizeof(Eterm))) {
+ if (ErtsInArea(ptr, qb->mem, qb->alloc_size*sizeof(Eterm))) {
abort();
}
}
@@ -1672,26 +2035,26 @@ disallow_heap_frag_ref_in_old_heap(Process* p)
val = *hp++;
switch (primary_tag(val)) {
case TAG_PRIMARY_BOXED:
- ptr = (Eterm *) EXPAND_POINTER(val);
- if (!in_area(ptr, old_heap, old_heap_size)) {
- if (in_area(ptr, new_heap, new_heap_size)) {
+ ptr = (Eterm *) val;
+ if (!ErtsInArea(ptr, old_heap, old_heap_size)) {
+ if (ErtsInArea(ptr, new_heap, new_heap_size)) {
abort();
}
for (qb = MBUF(p); qb != NULL; qb = qb->next) {
- if (in_area(ptr, qb->mem, qb->alloc_size*sizeof(Eterm))) {
+ if (ErtsInArea(ptr, qb->mem, qb->alloc_size*sizeof(Eterm))) {
abort();
}
}
}
break;
case TAG_PRIMARY_LIST:
- ptr = (Eterm *) EXPAND_POINTER(val);
- if (!in_area(ptr, old_heap, old_heap_size)) {
- if (in_area(ptr, new_heap, new_heap_size)) {
+ ptr = (Eterm *) val;
+ if (!ErtsInArea(ptr, old_heap, old_heap_size)) {
+ if (ErtsInArea(ptr, new_heap, new_heap_size)) {
abort();
}
for (qb = MBUF(p); qb != NULL; qb = qb->next) {
- if (in_area(ptr, qb->mem, qb->alloc_size*sizeof(Eterm))) {
+ if (ErtsInArea(ptr, qb->mem, qb->alloc_size*sizeof(Eterm))) {
abort();
}
}
@@ -1700,7 +2063,7 @@ disallow_heap_frag_ref_in_old_heap(Process* p)
case TAG_PRIMARY_HEADER:
if (header_is_thing(val)) {
hp += _unchecked_thing_arityval(val);
- if (!in_area(hp, old_heap, old_heap_size+1)) {
+ if (!ErtsInArea(hp, old_heap, old_heap_size+1)) {
abort();
}
}
@@ -1710,66 +2073,30 @@ disallow_heap_frag_ref_in_old_heap(Process* p)
}
#endif
-static Eterm*
-sweep_rootset(Rootset* rootset, Eterm* htop, char* src, Uint src_size)
+typedef enum {
+ ErtsSweepNewHeap,
+ ErtsSweepHeaps,
+ ErtsSweepLiteralArea
+} ErtsSweepType;
+
+static ERTS_FORCE_INLINE Eterm *
+sweep(Eterm *n_hp, Eterm *n_htop,
+ ErtsSweepType type,
+ char *oh, Uint ohsz,
+ char *src, Uint src_size)
{
- Roots* roots = rootset->roots;
- Uint n = rootset->num_roots;
Eterm* ptr;
- Eterm gval;
Eterm val;
+ Eterm gval;
- while (n--) {
- Eterm* g_ptr = roots->v;
- Uint g_sz = roots->sz;
-
- roots++;
- while (g_sz--) {
- gval = *g_ptr;
-
- switch (primary_tag(gval)) {
- case TAG_PRIMARY_BOXED: {
- ptr = boxed_val(gval);
- val = *ptr;
- if (IS_MOVED_BOXED(val)) {
- ASSERT(is_boxed(val));
- *g_ptr++ = val;
- } else if (in_area(ptr, src, src_size)) {
- MOVE_BOXED(ptr,val,htop,g_ptr++);
- } else {
- g_ptr++;
- }
- break;
- }
- case TAG_PRIMARY_LIST: {
- ptr = list_val(gval);
- val = *ptr;
- if (IS_MOVED_CONS(val)) {
- *g_ptr++ = ptr[1];
- } else if (in_area(ptr, src, src_size)) {
- MOVE_CONS(ptr,val,htop,g_ptr++);
- } else {
- g_ptr++;
- }
- break;
- }
-
- default:
- g_ptr++;
- break;
- }
- }
- }
- return htop;
-}
-
+#undef ERTS_IS_IN_SWEEP_AREA
-static Eterm*
-sweep_one_area(Eterm* n_hp, Eterm* n_htop, char* src, Uint src_size)
-{
- Eterm* ptr;
- Eterm val;
- Eterm gval;
+#define ERTS_IS_IN_SWEEP_AREA(TPtr, Ptr) \
+ (type == ErtsSweepHeaps \
+ ? !erts_is_literal((TPtr), (Ptr)) \
+ : (type == ErtsSweepNewHeap \
+ ? ErtsInYoungGen((TPtr), (Ptr), oh, ohsz) \
+ : ErtsInArea((Ptr), src, src_size)))
while (n_hp != n_htop) {
ASSERT(n_hp < n_htop);
@@ -1781,8 +2108,8 @@ sweep_one_area(Eterm* n_hp, Eterm* n_htop, char* src, Uint src_size)
if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*n_hp++ = val;
- } else if (in_area(ptr, src, src_size)) {
- MOVE_BOXED(ptr,val,n_htop,n_hp++);
+ } else if (ERTS_IS_IN_SWEEP_AREA(gval, ptr)) {
+ move_boxed(&ptr,val,&n_htop,n_hp++);
} else {
n_hp++;
}
@@ -1793,8 +2120,8 @@ sweep_one_area(Eterm* n_hp, Eterm* n_htop, char* src, Uint src_size)
val = *ptr;
if (IS_MOVED_CONS(val)) {
*n_hp++ = ptr[1];
- } else if (in_area(ptr, src, src_size)) {
- MOVE_CONS(ptr,val,n_htop,n_hp++);
+ } else if (ERTS_IS_IN_SWEEP_AREA(gval, ptr)) {
+ move_cons(&ptr,val,&n_htop,n_hp++);
} else {
n_hp++;
}
@@ -1807,15 +2134,15 @@ sweep_one_area(Eterm* n_hp, Eterm* n_htop, char* src, Uint src_size)
if (header_is_bin_matchstate(gval)) {
ErlBinMatchState *ms = (ErlBinMatchState*) n_hp;
ErlBinMatchBuffer *mb = &(ms->mb);
- Eterm* origptr;
+ Eterm* origptr;
origptr = &(mb->orig);
ptr = boxed_val(*origptr);
val = *ptr;
if (IS_MOVED_BOXED(val)) {
*origptr = val;
mb->base = binary_bytes(*origptr);
- } else if (in_area(ptr, src, src_size)) {
- MOVE_BOXED(ptr,val,n_htop,origptr);
+ } else if (ERTS_IS_IN_SWEEP_AREA(*origptr, ptr)) {
+ move_boxed(&ptr,val,&n_htop,origptr);
mb->base = binary_bytes(*origptr);
}
}
@@ -1829,10 +2156,41 @@ sweep_one_area(Eterm* n_hp, Eterm* n_htop, char* src, Uint src_size)
}
}
return n_htop;
+#undef ERTS_IS_IN_SWEEP_AREA
+}
+
+static Eterm *
+sweep_new_heap(Eterm *n_hp, Eterm *n_htop, char* old_heap, Uint old_heap_size)
+{
+ return sweep(n_hp, n_htop,
+ ErtsSweepNewHeap,
+ old_heap, old_heap_size,
+ NULL, 0);
+}
+
+static Eterm *
+sweep_heaps(Eterm *n_hp, Eterm *n_htop, char* old_heap, Uint old_heap_size)
+{
+ return sweep(n_hp, n_htop,
+ ErtsSweepHeaps,
+ old_heap, old_heap_size,
+ NULL, 0);
+}
+
+static Eterm *
+sweep_literal_area(Eterm *n_hp, Eterm *n_htop,
+ char* old_heap, Uint old_heap_size,
+ char* src, Uint src_size)
+{
+ return sweep(n_hp, n_htop,
+ ErtsSweepLiteralArea,
+ old_heap, old_heap_size,
+ src, src_size);
}
static Eterm*
-sweep_one_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop, char* src, Uint src_size)
+sweep_literals_to_old_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop,
+ char* src, Uint src_size)
{
while (heap_ptr < heap_end) {
Eterm* ptr;
@@ -1846,8 +2204,8 @@ sweep_one_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop, char* src, Uint sr
if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*heap_ptr++ = val;
- } else if (in_area(ptr, src, src_size)) {
- MOVE_BOXED(ptr,val,htop,heap_ptr++);
+ } else if (ErtsInArea(ptr, src, src_size)) {
+ move_boxed(&ptr,val,&htop,heap_ptr++);
} else {
heap_ptr++;
}
@@ -1858,8 +2216,8 @@ sweep_one_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop, char* src, Uint sr
val = *ptr;
if (IS_MOVED_CONS(val)) {
*heap_ptr++ = ptr[1];
- } else if (in_area(ptr, src, src_size)) {
- MOVE_CONS(ptr,val,htop,heap_ptr++);
+ } else if (ErtsInArea(ptr, src, src_size)) {
+ move_cons(&ptr,val,&htop,heap_ptr++);
} else {
heap_ptr++;
}
@@ -1869,6 +2227,21 @@ sweep_one_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop, char* src, Uint sr
if (!header_is_thing(gval)) {
heap_ptr++;
} else {
+ if (header_is_bin_matchstate(gval)) {
+ ErlBinMatchState *ms = (ErlBinMatchState*) heap_ptr;
+ ErlBinMatchBuffer *mb = &(ms->mb);
+ Eterm* origptr;
+ origptr = &(mb->orig);
+ ptr = boxed_val(*origptr);
+ val = *ptr;
+ if (IS_MOVED_BOXED(val)) {
+ *origptr = val;
+ mb->base = binary_bytes(*origptr);
+ } else if (ErtsInArea(ptr, src, src_size)) {
+ move_boxed(&ptr,val,&htop,origptr);
+ mb->base = binary_bytes(*origptr);
+ }
+ }
heap_ptr += (thing_arityval(gval)+1);
}
break;
@@ -1898,11 +2271,11 @@ move_one_area(Eterm* n_htop, char* src, Uint src_size)
ASSERT(val != ERTS_HOLE_MARKER);
if (is_header(val)) {
ASSERT(ptr + header_arity(val) < end);
- MOVE_BOXED(ptr, val, n_htop, &dummy_ref);
+ move_boxed(&ptr, val, &n_htop, &dummy_ref);
}
else { /* must be a cons cell */
ASSERT(ptr+1 < end);
- MOVE_CONS(ptr, val, n_htop, &dummy_ref);
+ move_cons(&ptr, val, &n_htop, &dummy_ref);
ptr += 2;
}
}
@@ -1915,32 +2288,22 @@ move_one_area(Eterm* n_htop, char* src, Uint src_size)
*/
static Eterm*
-collect_heap_frags(Process* p, Eterm* n_hstart, Eterm* n_htop,
- Eterm* objv, int nobj)
+collect_live_heap_frags(Process* p, ErlHeapFragment *live_hf_end,
+ Eterm* n_hstart, Eterm* n_htop,
+ Eterm* objv, int nobj)
{
ErlHeapFragment* qb;
char* frag_begin;
Uint frag_size;
/*
- * We don't allow references to a heap fragments from the stack, heap,
- * or process dictionary.
- */
-#ifdef HARDDEBUG
- disallow_heap_frag_ref(p, n_htop, p->stop, STACK_START(p) - p->stop);
- if (p->dictionary != NULL) {
- disallow_heap_frag_ref(p, n_htop, p->dictionary->data, p->dictionary->used);
- }
- disallow_heap_frag_ref_in_heap(p);
-#endif
-
- /*
* Move the heap fragments to the new heap. Note that no GC is done on
* the heap fragments. Any garbage will thus be moved as well and survive
* until next GC.
*/
qb = MBUF(p);
- while (qb != NULL) {
+ while (qb != live_hf_end) {
+ ASSERT(!qb->off_heap.first); /* process fragments use the MSO(p) list */
frag_size = qb->used_size * sizeof(Eterm);
if (frag_size != 0) {
frag_begin = (char *) qb->mem;
@@ -1951,12 +2314,178 @@ collect_heap_frags(Process* p, Eterm* n_hstart, Eterm* n_htop,
return n_htop;
}
+static ERTS_INLINE void
+copy_one_frag(Eterm** hpp, ErlOffHeap* off_heap,
+ ErlHeapFragment *bp, Eterm *refs, int nrefs)
+{
+ Uint sz;
+ int i;
+ Sint offs;
+ struct erl_off_heap_header* oh;
+ Eterm *fhp, *hp;
+
+ OH_OVERHEAD(off_heap, bp->off_heap.overhead);
+ sz = bp->used_size;
+
+ fhp = bp->mem;
+ hp = *hpp;
+ offs = hp - fhp;
+
+ oh = NULL;
+ while (sz--) {
+ Uint cpy_sz;
+ Eterm val = *fhp++;
+
+ switch (primary_tag(val)) {
+ case TAG_PRIMARY_IMMED1:
+ *hp++ = val;
+ break;
+ case TAG_PRIMARY_LIST:
+ if (erts_is_literal(val,list_val(val))) {
+ *hp++ = val;
+ } else {
+ *hp++ = offset_ptr(val, offs);
+ }
+ break;
+ case TAG_PRIMARY_BOXED:
+ if (erts_is_literal(val,boxed_val(val))) {
+ *hp++ = val;
+ } else {
+ *hp++ = offset_ptr(val, offs);
+ }
+ break;
+ case TAG_PRIMARY_HEADER:
+ *hp++ = val;
+ switch (val & _HEADER_SUBTAG_MASK) {
+ case ARITYVAL_SUBTAG:
+ break;
+ case REF_SUBTAG:
+ if (is_ordinary_ref_thing(fhp - 1))
+ goto the_default;
+ case REFC_BINARY_SUBTAG:
+ case FUN_SUBTAG:
+ case EXTERNAL_PID_SUBTAG:
+ case EXTERNAL_PORT_SUBTAG:
+ case EXTERNAL_REF_SUBTAG:
+ oh = (struct erl_off_heap_header*) (hp-1);
+ cpy_sz = thing_arityval(val);
+ goto cpy_words;
+ default:
+ the_default:
+ cpy_sz = header_arity(val);
+
+ cpy_words:
+ ASSERT(sz >= cpy_sz);
+ sz -= cpy_sz;
+ while (cpy_sz >= 8) {
+ cpy_sz -= 8;
+ *hp++ = *fhp++;
+ *hp++ = *fhp++;
+ *hp++ = *fhp++;
+ *hp++ = *fhp++;
+ *hp++ = *fhp++;
+ *hp++ = *fhp++;
+ *hp++ = *fhp++;
+ *hp++ = *fhp++;
+ }
+ switch (cpy_sz) {
+ case 7: *hp++ = *fhp++;
+ case 6: *hp++ = *fhp++;
+ case 5: *hp++ = *fhp++;
+ case 4: *hp++ = *fhp++;
+ case 3: *hp++ = *fhp++;
+ case 2: *hp++ = *fhp++;
+ case 1: *hp++ = *fhp++;
+ default: break;
+ }
+ if (oh) {
+ /* Add to offheap list */
+ oh->next = off_heap->first;
+ off_heap->first = oh;
+ ASSERT(*hpp <= (Eterm*)oh);
+ ASSERT(hp > (Eterm*)oh);
+ oh = NULL;
+ }
+ break;
+ }
+ break;
+ }
+ }
+
+ ASSERT(bp->used_size == hp - *hpp);
+ *hpp = hp;
+
+ for (i = 0; i < nrefs; i++) {
+ if (is_not_immed(refs[i]))
+ refs[i] = offset_ptr(refs[i], offs);
+ }
+ bp->off_heap.first = NULL;
+}
+
+static void
+move_msgq_to_heap(Process *p)
+{
+
+ ErtsMessage **mpp = &p->msg.first;
+ Uint64 pre_oh = MSO(p).overhead;
+
+ while (*mpp) {
+ ErtsMessage *mp = *mpp;
+
+ if (mp->data.attached) {
+ ErlHeapFragment *bp;
+
+ /*
+ * We leave not yet decoded distribution messages
+ * as they are in the queue since it is not
+ * possible to determine a maximum size until
+ * actual decoding...
+ */
+ if (is_value(ERL_MESSAGE_TERM(mp))) {
+
+ bp = erts_message_to_heap_frag(mp);
+
+ if (bp->next)
+ erts_move_multi_frags(&p->htop, &p->off_heap, bp,
+ mp->m, ERL_MESSAGE_REF_ARRAY_SZ, 0);
+ else
+ copy_one_frag(&p->htop, &p->off_heap, bp,
+ mp->m, ERL_MESSAGE_REF_ARRAY_SZ);
+
+ if (mp->data.attached != ERTS_MSG_COMBINED_HFRAG) {
+ mp->data.heap_frag = NULL;
+ free_message_buffer(bp);
+ }
+ else {
+ ErtsMessage *new_mp = erts_alloc_message(0, NULL);
+ sys_memcpy((void *) new_mp->m, (void *) mp->m,
+ sizeof(Eterm)*ERL_MESSAGE_REF_ARRAY_SZ);
+ erts_msgq_replace_msg_ref(&p->msg, new_mp, mpp);
+ mp->next = NULL;
+ erts_cleanup_messages(mp);
+ mp = new_mp;
+ }
+ }
+ }
+
+ mpp = &(*mpp)->next;
+ }
+
+ if (pre_oh != MSO(p).overhead) {
+ /* Got new binaries; update vheap size... */
+ BIN_VHEAP_SZ(p) = next_vheap_size(p, MSO(p).overhead, BIN_VHEAP_SZ(p));
+ }
+}
+
static Uint
setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
{
- Uint avail;
+ /*
+ * NOTE!
+ * Remember to update offset_rootset() when changing
+ * this function.
+ */
Roots* roots;
- ErlMessage* mp;
Uint n;
n = 0;
@@ -1968,8 +2497,8 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
++n;
if (p->dictionary != NULL) {
- roots[n].v = p->dictionary->data;
- roots[n].sz = p->dictionary->used;
+ roots[n].v = ERTS_PD_START(p->dictionary);
+ roots[n].sz = ERTS_PD_SIZE(p->dictionary);
++n;
}
if (nobj > 0) {
@@ -1979,7 +2508,7 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
}
ASSERT((is_nil(p->seq_trace_token) ||
- is_tuple(follow_moved(p->seq_trace_token)) ||
+ is_tuple(follow_moved(p->seq_trace_token, (Eterm) 0)) ||
is_atom(p->seq_trace_token)));
if (is_not_immed(p->seq_trace_token)) {
roots[n].v = &p->seq_trace_token;
@@ -1993,11 +2522,9 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
n++;
}
#endif
- ASSERT(is_nil(ERTS_TRACER_PROC(p)) ||
- is_internal_pid(ERTS_TRACER_PROC(p)) ||
- is_internal_port(ERTS_TRACER_PROC(p)));
+ ASSERT(IS_TRACER_VALID(ERTS_TRACER(p)));
- ASSERT(is_pid(follow_moved(p->group_leader)));
+ ASSERT(is_pid(follow_moved(p->group_leader, (Eterm) 0)));
if (is_not_immed(p->group_leader)) {
roots[n].v = &p->group_leader;
roots[n].sz = 1;
@@ -2018,33 +2545,56 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
roots[n].sz = 1;
n++;
}
+
+ /*
+ * If a NIF or BIF has saved arguments, they need to be added
+ */
+ if (erts_setup_nif_export_rootset(p, &roots[n].v, &roots[n].sz))
+ n++;
+
ASSERT(n <= rootset->size);
- mp = p->msg.first;
- avail = rootset->size - n;
- while (mp != NULL) {
- if (avail == 0) {
- Uint new_size = 2*rootset->size;
- if (roots == rootset->def) {
- roots = erts_alloc(ERTS_ALC_T_ROOTSET,
- new_size*sizeof(Roots));
- sys_memcpy(roots, rootset->def, sizeof(rootset->def));
- } else {
- roots = erts_realloc(ERTS_ALC_T_ROOTSET,
- (void *) roots,
- new_size*sizeof(Roots));
- }
+ switch (p->flags & (F_OFF_HEAP_MSGQ|F_OFF_HEAP_MSGQ_CHNG)) {
+ case F_OFF_HEAP_MSGQ|F_OFF_HEAP_MSGQ_CHNG:
+ (void) erts_move_messages_off_heap(p);
+ case F_OFF_HEAP_MSGQ:
+ break;
+ case F_OFF_HEAP_MSGQ_CHNG:
+ case 0: {
+ /*
+ * We do not have off heap message queue enabled, i.e. we
+ * need to add message queue to rootset...
+ */
+ ErtsMessage *mp;
+
+ /* Ensure large enough rootset... */
+ if (n + p->msg.len > rootset->size) {
+ Uint new_size = n + p->msg.len;
+ ERTS_GC_ASSERT(roots == rootset->def);
+ roots = erts_alloc(ERTS_ALC_T_ROOTSET,
+ new_size*sizeof(Roots));
+ sys_memcpy(roots, rootset->def, n*sizeof(Roots));
rootset->size = new_size;
- avail = new_size - n;
}
- if (mp->data.attached == NULL) {
- roots[n].v = mp->m;
- roots[n].sz = 2;
- n++;
- avail--;
+
+ for (mp = p->msg.first; mp; mp = mp->next) {
+
+ if (!mp->data.attached) {
+ /*
+ * Message may refer data on heap;
+ * add it to rootset...
+ */
+ roots[n].v = mp->m;
+ roots[n].sz = ERL_MESSAGE_REF_ARRAY_SZ;
+ n++;
+ }
}
- mp = mp->next;
+ break;
+ }
}
+
+ ASSERT(rootset->size >= n);
+
rootset->roots = roots;
rootset->num_roots = n;
return n;
@@ -2255,6 +2805,16 @@ link_live_proc_bin(struct shrink_cand_data *shrink,
*prevppp = &pbp->next;
}
+#ifdef ERTS_MAGIC_REF_THING_HEADER
+/*
+ * ERTS_MAGIC_REF_THING_HEADER only defined when there
+ * is a size difference between magic and ordinary references...
+ */
+# define ERTS_USED_MAGIC_REF_THING_HEADER__ ERTS_MAGIC_REF_THING_HEADER
+#else
+# define ERTS_USED_MAGIC_REF_THING_HEADER__ ERTS_REF_THING_HEADER
+#endif
+
static void
sweep_off_heap(Process *p, int fullsweep)
@@ -2279,16 +2839,17 @@ sweep_off_heap(Process *p, int fullsweep)
prev = &MSO(p).first;
ptr = MSO(p).first;
- /* Firts part of the list will reside on the (old) new-heap.
+ /* First part of the list will reside on the (old) new-heap.
* Keep if moved, otherwise deref.
*/
while (ptr) {
if (IS_MOVED_BOXED(ptr->thing_word)) {
- ASSERT(!in_area(ptr, oheap, oheap_sz));
+ ASSERT(!ErtsInArea(ptr, oheap, oheap_sz));
*prev = ptr = (struct erl_off_heap_header*) boxed_val(ptr->thing_word);
ASSERT(!IS_MOVED_BOXED(ptr->thing_word));
- if (ptr->thing_word == HEADER_PROC_BIN) {
- int to_new_heap = !in_area(ptr, oheap, oheap_sz);
+ switch (ptr->thing_word) {
+ case HEADER_PROC_BIN: {
+ int to_new_heap = !ErtsInArea(ptr, oheap, oheap_sz);
ASSERT(to_new_heap == !seen_mature || (!to_new_heap && (seen_mature=1)));
if (to_new_heap) {
bin_vheap += ptr->size / sizeof(Eterm);
@@ -2296,21 +2857,34 @@ sweep_off_heap(Process *p, int fullsweep)
BIN_OLD_VHEAP(p) += ptr->size / sizeof(Eterm); /* for binary gc (words)*/
}
link_live_proc_bin(&shrink, &prev, &ptr, to_new_heap);
- }
- else {
+ break;
+ }
+ case ERTS_USED_MAGIC_REF_THING_HEADER__: {
+ Uint size;
+ int to_new_heap = !ErtsInArea(ptr, oheap, oheap_sz);
+ ASSERT(is_magic_ref_thing(ptr));
+ ASSERT(to_new_heap == !seen_mature || (!to_new_heap && (seen_mature=1)));
+ size = (Uint) ((ErtsMRefThing *) ptr)->mb->orig_size;
+ if (to_new_heap)
+ bin_vheap += size / sizeof(Eterm);
+ else
+ BIN_OLD_VHEAP(p) += size / sizeof(Eterm); /* for binary gc (words)*/
+ /* fall through... */
+ }
+ default:
prev = &ptr->next;
ptr = ptr->next;
}
}
- else if (!in_area(ptr, oheap, oheap_sz)) {
+ else if (ErtsInArea(ptr, oheap, oheap_sz))
+ break; /* and let old-heap loop continue */
+ else {
/* garbage */
switch (thing_subtag(ptr->thing_word)) {
case REFC_BINARY_SUBTAG:
{
Binary* bptr = ((ProcBin*)ptr)->val;
- if (erts_refc_dectest(&bptr->refc, 0) == 0) {
- erts_bin_free(bptr);
- }
+ erts_bin_release(bptr);
break;
}
case FUN_SUBTAG:
@@ -2321,30 +2895,46 @@ sweep_off_heap(Process *p, int fullsweep)
}
break;
}
+ case REF_SUBTAG:
+ {
+ ErtsMagicBinary *bptr;
+ ASSERT(is_magic_ref_thing(ptr));
+ bptr = ((ErtsMRefThing *) ptr)->mb;
+ erts_bin_release((Binary *) bptr);
+ break;
+ }
default:
ASSERT(is_external_header(ptr->thing_word));
erts_deref_node_entry(((ExternalThing*)ptr)->node);
}
*prev = ptr = ptr->next;
}
- else break; /* and let old-heap loop continue */
}
/* The rest of the list resides on old-heap, and we just did a
* generational collection - keep objects in list.
*/
while (ptr) {
- ASSERT(in_area(ptr, oheap, oheap_sz));
+ ASSERT(ErtsInArea(ptr, oheap, oheap_sz));
ASSERT(!IS_MOVED_BOXED(ptr->thing_word));
- if (ptr->thing_word == HEADER_PROC_BIN) {
+ switch (ptr->thing_word) {
+ case HEADER_PROC_BIN:
BIN_OLD_VHEAP(p) += ptr->size / sizeof(Eterm); /* for binary gc (words)*/
link_live_proc_bin(&shrink, &prev, &ptr, 0);
- }
- else {
- ASSERT(is_fun_header(ptr->thing_word) ||
- is_external_header(ptr->thing_word));
- prev = &ptr->next;
- ptr = ptr->next;
+ break;
+ case ERTS_USED_MAGIC_REF_THING_HEADER__:
+ ASSERT(is_magic_ref_thing(ptr));
+ BIN_OLD_VHEAP(p) +=
+ (((Uint) ((ErtsMRefThing *) ptr)->mb->orig_size)
+ / sizeof(Eterm)); /* for binary gc (words)*/
+ /* fall through... */
+ default:
+ ASSERT(is_fun_header(ptr->thing_word) ||
+ is_external_header(ptr->thing_word)
+ || is_magic_ref_thing(ptr));
+ prev = &ptr->next;
+ ptr = ptr->next;
+ break;
}
}
@@ -2353,7 +2943,6 @@ sweep_off_heap(Process *p, int fullsweep)
}
BIN_VHEAP_SZ(p) = next_vheap_size(p, bin_vheap, BIN_VHEAP_SZ(p));
MSO(p).overhead = bin_vheap;
- BIN_VHEAP_MATURE(p) = bin_vheap;
/*
* If we got any shrink candidates, check them out.
@@ -2386,7 +2975,6 @@ sweep_off_heap(Process *p, int fullsweep)
}
pb->val = erts_bin_realloc(pb->val, new_size);
- pb->val->orig_size = new_size;
pb->bytes = (byte *) pb->val->orig_bytes;
}
}
@@ -2425,7 +3013,7 @@ offset_heap(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size)
switch (primary_tag(val)) {
case TAG_PRIMARY_LIST:
case TAG_PRIMARY_BOXED:
- if (in_area(ptr_val(val), area, area_size)) {
+ if (ErtsInArea(ptr_val(val), area, area_size)) {
*hp = offset_ptr(val, offs);
}
hp++;
@@ -2439,6 +3027,9 @@ offset_heap(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size)
}
tari = thing_arityval(val);
switch (thing_subtag(val)) {
+ case REF_SUBTAG:
+ if (is_ordinary_ref_thing(hp))
+ break;
case REFC_BINARY_SUBTAG:
case FUN_SUBTAG:
case EXTERNAL_PID_SUBTAG:
@@ -2447,7 +3038,7 @@ offset_heap(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size)
{
struct erl_off_heap_header* oh = (struct erl_off_heap_header*) hp;
- if (in_area(oh->next, area, area_size)) {
+ if (ErtsInArea(oh->next, area, area_size)) {
Eterm** uptr = (Eterm **) (void *) &oh->next;
*uptr += offs; /* Patch the mso chain */
}
@@ -2457,7 +3048,7 @@ offset_heap(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size)
{
ErlBinMatchState *ms = (ErlBinMatchState*) hp;
ErlBinMatchBuffer *mb = &(ms->mb);
- if (in_area(ptr_val(mb->orig), area, area_size)) {
+ if (ErtsInArea(ptr_val(mb->orig), area, area_size)) {
mb->orig = offset_ptr(mb->orig, offs);
mb->base = binary_bytes(mb->orig);
}
@@ -2487,7 +3078,7 @@ offset_heap_ptr(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size)
switch (primary_tag(val)) {
case TAG_PRIMARY_LIST:
case TAG_PRIMARY_BOXED:
- if (in_area(ptr_val(val), area, area_size)) {
+ if (ErtsInArea(ptr_val(val), area, area_size)) {
*hp = offset_ptr(val, offs);
}
hp++;
@@ -2502,7 +3093,7 @@ offset_heap_ptr(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size)
static void
offset_off_heap(Process* p, Sint offs, char* area, Uint area_size)
{
- if (MSO(p).first && in_area((Eterm *)MSO(p).first, area, area_size)) {
+ if (MSO(p).first && ErtsInArea((Eterm *)MSO(p).first, area, area_size)) {
Eterm** uptr = (Eterm**) (void *) &MSO(p).first;
*uptr += offs;
}
@@ -2514,35 +3105,39 @@ offset_off_heap(Process* p, Sint offs, char* area, Uint area_size)
static void
offset_mqueue(Process *p, Sint offs, char* area, Uint area_size)
{
- ErlMessage* mp = p->msg.first;
-
- while (mp != NULL) {
- Eterm mesg = ERL_MESSAGE_TERM(mp);
- if (is_value(mesg)) {
- switch (primary_tag(mesg)) {
- case TAG_PRIMARY_LIST:
- case TAG_PRIMARY_BOXED:
- if (in_area(ptr_val(mesg), area, area_size)) {
- ERL_MESSAGE_TERM(mp) = offset_ptr(mesg, offs);
+ ErtsMessage* mp = p->msg.first;
+
+ if ((p->flags & (F_OFF_HEAP_MSGQ|F_OFF_HEAP_MSGQ_CHNG)) != F_OFF_HEAP_MSGQ) {
+
+ while (mp != NULL) {
+ Eterm mesg = ERL_MESSAGE_TERM(mp);
+ if (is_value(mesg)) {
+ switch (primary_tag(mesg)) {
+ case TAG_PRIMARY_LIST:
+ case TAG_PRIMARY_BOXED:
+ if (ErtsInArea(ptr_val(mesg), area, area_size)) {
+ ERL_MESSAGE_TERM(mp) = offset_ptr(mesg, offs);
+ }
+ break;
}
- break;
}
- }
- mesg = ERL_MESSAGE_TOKEN(mp);
- if (is_boxed(mesg) && in_area(ptr_val(mesg), area, area_size)) {
- ERL_MESSAGE_TOKEN(mp) = offset_ptr(mesg, offs);
- }
+ mesg = ERL_MESSAGE_TOKEN(mp);
+ if (is_boxed(mesg) && ErtsInArea(ptr_val(mesg), area, area_size)) {
+ ERL_MESSAGE_TOKEN(mp) = offset_ptr(mesg, offs);
+ }
#ifdef USE_VM_PROBES
- mesg = ERL_MESSAGE_DT_UTAG(mp);
- if (is_boxed(mesg) && in_area(ptr_val(mesg), area, area_size)) {
- ERL_MESSAGE_DT_UTAG(mp) = offset_ptr(mesg, offs);
- }
+ mesg = ERL_MESSAGE_DT_UTAG(mp);
+ if (is_boxed(mesg) && ErtsInArea(ptr_val(mesg), area, area_size)) {
+ ERL_MESSAGE_DT_UTAG(mp) = offset_ptr(mesg, offs);
+ }
#endif
- ASSERT((is_nil(ERL_MESSAGE_TOKEN(mp)) ||
- is_tuple(ERL_MESSAGE_TOKEN(mp)) ||
- is_atom(ERL_MESSAGE_TOKEN(mp))));
- mp = mp->next;
+ ASSERT((is_nil(ERL_MESSAGE_TOKEN(mp)) ||
+ is_tuple(ERL_MESSAGE_TOKEN(mp)) ||
+ is_atom(ERL_MESSAGE_TOKEN(mp))));
+ mp = mp->next;
+ }
+
}
}
@@ -2550,9 +3145,11 @@ static void ERTS_INLINE
offset_one_rootset(Process *p, Sint offs, char* area, Uint area_size,
Eterm* objv, int nobj)
{
+ Eterm *v;
+ Uint sz;
if (p->dictionary) {
- offset_heap(p->dictionary->data,
- p->dictionary->used,
+ offset_heap(ERTS_PD_START(p->dictionary),
+ ERTS_PD_SIZE(p->dictionary),
offs, area, area_size);
}
@@ -2570,6 +3167,8 @@ offset_one_rootset(Process *p, Sint offs, char* area, Uint area_size,
offset_heap_ptr(objv, nobj, offs, area, area_size);
}
offset_off_heap(p, offs, area, area_size);
+ if (erts_setup_nif_export_rootset(p, &v, &sz))
+ offset_heap_ptr(v, sz, offs, area, area_size);
}
static void
@@ -2601,12 +3200,22 @@ reply_gc_info(void *vgcirp)
Eterm **hpp;
Uint sz, *szp;
ErlOffHeap *ohp = NULL;
- ErlHeapFragment *bp = NULL;
+ ErtsMessage *mp = NULL;
ASSERT(esdp);
reclaimed = esdp->gc_info.reclaimed;
garbage_cols = esdp->gc_info.garbage_cols;
+ /*
+ * Add dirty schedulers info on requesting
+ * schedulers info
+ */
+ if (gcirp->req_sched == esdp->no) {
+ erts_mtx_lock(&dirty_gc.mtx);
+ reclaimed += dirty_gc.info.reclaimed;
+ garbage_cols += dirty_gc.info.garbage_cols;
+ erts_mtx_unlock(&dirty_gc.mtx);
+ }
sz = 0;
hpp = NULL;
@@ -2616,7 +3225,7 @@ reply_gc_info(void *vgcirp)
if (hpp)
ref_copy = STORE_NC(hpp, ohp, gcirp->ref);
else
- *szp += REF_THING_SIZE;
+ *szp += ERTS_REF_THING_SIZE;
msg = erts_bld_tuple(hpp, szp, 3,
make_small(esdp->no),
@@ -2627,33 +3236,63 @@ reply_gc_info(void *vgcirp)
if (hpp)
break;
- hp = erts_alloc_message_heap(sz, &bp, &ohp, rp, &rp_locks);
+ mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
+
szp = NULL;
hpp = &hp;
}
- erts_queue_message(rp, &rp_locks, bp, msg, NIL
-#ifdef USE_VM_PROBES
- , NIL
-#endif
- );
+ erts_queue_message(rp, rp_locks, mp, msg, am_system);
if (gcirp->req_sched == esdp->no)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
- erts_smp_proc_dec_refc(rp);
+ erts_proc_dec_refc(rp);
- if (erts_smp_atomic32_dec_read_nob(&gcirp->refc) == 0)
+ if (erts_atomic32_dec_read_nob(&gcirp->refc) == 0)
gcireq_free(vgcirp);
}
+void erts_sub_binary_to_heap_binary(Eterm **pp, Eterm **hpp, Eterm *orig) {
+ Eterm *ptr = *pp;
+ Eterm *htop = *hpp;
+ Eterm gval;
+ ErlSubBin *sb = (ErlSubBin *)ptr;
+ ErlHeapBin *hb = (ErlHeapBin *)htop;
+ Eterm *real_bin;
+ byte *bs;
+
+ real_bin = binary_val(follow_moved(sb->orig, (Eterm)0));
+
+ if (*real_bin == HEADER_PROC_BIN) {
+ bs = ((ProcBin *) real_bin)->bytes + sb->offs;
+ } else {
+ bs = (byte *)(&(((ErlHeapBin *) real_bin)->data)) + sb->offs;
+ }
+
+ hb->thing_word = header_heap_bin(sb->size);
+ hb->size = sb->size;
+ sys_memcpy((byte *)hb->data, bs, sb->size);
+
+ gval = make_boxed(htop);
+ *orig = gval;
+ *ptr = gval;
+
+ ptr += ERL_SUB_BIN_SIZE;
+ htop += heap_bin_size(sb->size);
+
+ *hpp = htop;
+ *pp = ptr;
+}
+
+
Eterm
erts_gc_info_request(Process *c_p)
{
- ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
Eterm ref;
ErtsGCInfoReq *gcirp;
Eterm *hp;
@@ -2665,66 +3304,267 @@ erts_gc_info_request(Process *c_p)
gcirp->proc = c_p;
gcirp->ref = STORE_NC(&hp, NULL, ref);
gcirp->req_sched = esdp->no;
- erts_smp_atomic32_init_nob(&gcirp->refc,
+ erts_atomic32_init_nob(&gcirp->refc,
(erts_aint32_t) erts_no_schedulers);
- erts_smp_proc_add_refc(c_p, (Sint32) erts_no_schedulers);
+ erts_proc_add_refc(c_p, (Sint) erts_no_schedulers);
-#ifdef ERTS_SMP
if (erts_no_schedulers > 1)
erts_schedule_multi_misc_aux_work(1,
erts_no_schedulers,
reply_gc_info,
(void *) gcirp);
-#endif
reply_gc_info((void *) gcirp);
return ref;
}
-#if defined(DEBUG) || defined(ERTS_OFFHEAP_DEBUG)
+Eterm
+erts_process_gc_info(Process *p, Uint *sizep, Eterm **hpp,
+ Uint extra_heap_block,
+ Uint extra_old_heap_block_size)
+{
+ ERTS_DECL_AM(bin_vheap_size);
+ ERTS_DECL_AM(bin_vheap_block_size);
+ ERTS_DECL_AM(bin_old_vheap_size);
+ ERTS_DECL_AM(bin_old_vheap_block_size);
+ Eterm tags[] = {
+ /* If you increase the number of elements here, make sure to update
+ any call sites as they may have stack allocations that depend
+ on the number of elements here. */
+ am_old_heap_block_size,
+ am_heap_block_size,
+ am_mbuf_size,
+ am_recent_size,
+ am_stack_size,
+ am_old_heap_size,
+ am_heap_size,
+ AM_bin_vheap_size,
+ AM_bin_vheap_block_size,
+ AM_bin_old_vheap_size,
+ AM_bin_old_vheap_block_size
+ };
+ UWord values[] = {
+ OLD_HEAP(p) ? OLD_HEND(p) - OLD_HEAP(p) + extra_old_heap_block_size
+ : extra_old_heap_block_size,
+ HEAP_SIZE(p) + extra_heap_block,
+ MBUF_SIZE(p),
+ HIGH_WATER(p) - HEAP_START(p),
+ STACK_START(p) - p->stop,
+ OLD_HEAP(p) ? OLD_HTOP(p) - OLD_HEAP(p) : 0,
+ HEAP_TOP(p) - HEAP_START(p),
+ MSO(p).overhead,
+ BIN_VHEAP_SZ(p),
+ BIN_OLD_VHEAP(p),
+ BIN_OLD_VHEAP_SZ(p)
+ };
+
+ Eterm res = THE_NON_VALUE;
+ ErtsMessage *mp;
+
+ ERTS_CT_ASSERT(sizeof(values)/sizeof(*values) == sizeof(tags)/sizeof(*tags));
+ ERTS_CT_ASSERT(sizeof(values)/sizeof(*values) == ERTS_PROCESS_GC_INFO_MAX_TERMS);
+
+ if (p->abandoned_heap) {
+ Eterm *htop, *heap;
+ ERTS_GET_ORIG_HEAP(p, heap, htop);
+ values[3] = HIGH_WATER(p) - heap;
+ values[6] = htop - heap;
+ }
+
+ if (p->flags & F_ON_HEAP_MSGQ) {
+ /* If on heap messages in the internal queue are counted
+ as being part of the heap, so we have to add them to the
+ am_mbuf_size value. process_info(total_heap_size) should
+ be the same as adding old_heap_block_size + heap_block_size
+ + mbuf_size.
+ */
+ for (mp = p->msg.first; mp; mp = mp->next)
+ if (mp->data.attached)
+ values[2] += erts_msg_attached_data_size(mp);
+ }
+
+ res = erts_bld_atom_uword_2tup_list(hpp,
+ sizep,
+ sizeof(values)/sizeof(*values),
+ tags,
+ values);
+
+ return res;
+}
static int
-within2(Eterm *ptr, Process *p, Eterm *real_htop)
+reached_max_heap_size(Process *p, Uint total_heap_size,
+ Uint extra_heap_size, Uint extra_old_heap_size)
+{
+ Uint max_heap_flags = MAX_HEAP_SIZE_FLAGS_GET(p);
+ if (IS_TRACED_FL(p, F_TRACE_GC) ||
+ max_heap_flags & MAX_HEAP_SIZE_LOG) {
+ Eterm msg;
+ Uint size = 0;
+ Eterm *o_hp , *hp;
+ erts_process_gc_info(p, &size, NULL, extra_heap_size,
+ extra_old_heap_size);
+ o_hp = hp = erts_alloc(ERTS_ALC_T_TMP, size * sizeof(Eterm));
+ msg = erts_process_gc_info(p, NULL, &hp, extra_heap_size,
+ extra_old_heap_size);
+
+ if (max_heap_flags & MAX_HEAP_SIZE_LOG) {
+ int alive = erts_is_alive;
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ Eterm *o_hp, *hp, args = NIL;
+
+ /* Build the format message */
+ erts_dsprintf(dsbufp, " Process: ~p ");
+ if (alive)
+ erts_dsprintf(dsbufp, "on node ~p");
+ erts_dsprintf(dsbufp, "~n Context: maximum heap size reached~n");
+ erts_dsprintf(dsbufp, " Max Heap Size: ~p~n");
+ erts_dsprintf(dsbufp, " Total Heap Size: ~p~n");
+ erts_dsprintf(dsbufp, " Kill: ~p~n");
+ erts_dsprintf(dsbufp, " Error Logger: ~p~n");
+ erts_dsprintf(dsbufp, " GC Info: ~p~n");
+
+ /* Build the args in reverse order */
+ o_hp = hp = erts_alloc(ERTS_ALC_T_TMP, 2*(alive ? 7 : 6) * sizeof(Eterm));
+ args = CONS(hp, msg, args); hp += 2;
+ args = CONS(hp, am_true, args); hp += 2;
+ args = CONS(hp, (max_heap_flags & MAX_HEAP_SIZE_KILL ? am_true : am_false), args); hp += 2;
+ args = CONS(hp, make_small(total_heap_size), args); hp += 2;
+ args = CONS(hp, make_small(MAX_HEAP_SIZE_GET(p)), args); hp += 2;
+ if (alive) {
+ args = CONS(hp, erts_this_node->sysname, args); hp += 2;
+ }
+ args = CONS(hp, p->common.id, args); hp += 2;
+
+ erts_send_error_term_to_logger(p->group_leader, dsbufp, args);
+ erts_free(ERTS_ALC_T_TMP, o_hp);
+ }
+
+ if (IS_TRACED_FL(p, F_TRACE_GC))
+ trace_gc(p, am_gc_max_heap_size, 0, msg);
+
+ erts_free(ERTS_ALC_T_TMP, o_hp);
+ }
+ /* returns true if we should kill the process */
+ return max_heap_flags & MAX_HEAP_SIZE_KILL;
+}
+
+Eterm
+erts_max_heap_size_map(Sint max_heap_size, Uint max_heap_flags,
+ Eterm **hpp, Uint *sz)
+{
+ if (!hpp) {
+ *sz += (2*3 + 1 + MAP_HEADER_FLATMAP_SZ);
+ return THE_NON_VALUE;
+ } else {
+ Eterm *hp = *hpp;
+ Eterm keys = TUPLE3(hp, am_error_logger, am_kill, am_size);
+ flatmap_t *mp;
+ hp += 4;
+ mp = (flatmap_t*) hp;
+ mp->thing_word = MAP_HEADER_FLATMAP;
+ mp->size = 3;
+ mp->keys = keys;
+ hp += MAP_HEADER_FLATMAP_SZ;
+ *hp++ = max_heap_flags & MAX_HEAP_SIZE_LOG ? am_true : am_false;
+ *hp++ = max_heap_flags & MAX_HEAP_SIZE_KILL ? am_true : am_false;
+ *hp++ = make_small(max_heap_size);
+ *hpp = hp;
+ return make_flatmap(mp);
+ }
+}
+
+int
+erts_max_heap_size(Eterm arg, Uint *max_heap_size, Uint *max_heap_flags)
+{
+ Sint sz;
+ *max_heap_flags = H_MAX_FLAGS;
+ if (is_small(arg)) {
+ sz = signed_val(arg);
+ *max_heap_flags = H_MAX_FLAGS;
+ } else if (is_map(arg)) {
+ const Eterm *size = erts_maps_get(am_size, arg);
+ const Eterm *kill = erts_maps_get(am_kill, arg);
+ const Eterm *log = erts_maps_get(am_error_logger, arg);
+ if (size && is_small(*size)) {
+ sz = signed_val(*size);
+ } else {
+ /* size is mandatory */
+ return 0;
+ }
+ if (kill) {
+ if (*kill == am_true)
+ *max_heap_flags |= MAX_HEAP_SIZE_KILL;
+ else if (*kill == am_false)
+ *max_heap_flags &= ~MAX_HEAP_SIZE_KILL;
+ else
+ return 0;
+ }
+ if (log) {
+ if (*log == am_true)
+ *max_heap_flags |= MAX_HEAP_SIZE_LOG;
+ else if (*log == am_false)
+ *max_heap_flags &= ~MAX_HEAP_SIZE_LOG;
+ else
+ return 0;
+ }
+ } else
+ return 0;
+ if (sz < 0)
+ return 0;
+ *max_heap_size = sz;
+ return 1;
+}
+
+#if defined(DEBUG) || defined(ERTS_OFFHEAP_DEBUG)
+
+int
+erts_dbg_within_proc(Eterm *ptr, Process *p, Eterm *real_htop)
{
- ErlHeapFragment* bp = MBUF(p);
- ErlMessage* mp = p->msg.first;
- Eterm *htop = real_htop ? real_htop : HEAP_TOP(p);
+ ErlHeapFragment* bp;
+ ErtsMessage* mp;
+ Eterm *htop, *heap;
+
+ if (p->abandoned_heap) {
+ ERTS_GET_ORIG_HEAP(p, heap, htop);
+ if (heap <= ptr && ptr < htop)
+ return 1;
+ }
+
+ heap = p->heap;
+ htop = real_htop ? real_htop : HEAP_TOP(p);
if (OLD_HEAP(p) && (OLD_HEAP(p) <= ptr && ptr < OLD_HEND(p))) {
return 1;
}
- if (HEAP_START(p) <= ptr && ptr < htop) {
+ if (heap <= ptr && ptr < htop) {
return 1;
}
- while (bp != NULL) {
- if (bp->mem <= ptr && ptr < bp->mem + bp->used_size) {
- return 1;
- }
- bp = bp->next;
- }
+
+ mp = p->msg_frag;
+ bp = p->mbuf;
+
+ if (bp)
+ goto search_heap_frags;
+
while (mp) {
- if (mp->data.attached) {
- ErlHeapFragment *hfp;
- if (is_value(ERL_MESSAGE_TERM(mp)))
- hfp = mp->data.heap_frag;
- else if (is_not_nil(ERL_MESSAGE_TOKEN(mp)))
- hfp = erts_dist_ext_trailer(mp->data.dist_ext);
- else
- hfp = NULL;
- if (hfp && hfp->mem <= ptr && ptr < hfp->mem + hfp->used_size)
+
+ bp = erts_message_to_heap_frag(mp);
+ mp = mp->next;
+
+ search_heap_frags:
+
+ while (bp) {
+ if (bp->mem <= ptr && ptr < bp->mem + bp->used_size) {
return 1;
+ }
+ bp = bp->next;
}
- mp = mp->next;
}
- return 0;
-}
-int
-within(Eterm *ptr, Process *p)
-{
- return within2(ptr, p, NULL);
+ return 0;
}
#endif
@@ -2734,16 +3574,16 @@ within(Eterm *ptr, Process *p)
#define ERTS_CHK_OFFHEAP_ASSERT(EXP) \
do { \
if (!(EXP)) \
- erl_exit(ERTS_ABORT_EXIT, \
+ erts_exit(ERTS_ABORT_EXIT, \
"%s:%d: Assertion failed: %s\n", \
__FILE__, __LINE__, #EXP); \
} while (0)
+
#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_LIST
# define ERTS_OFFHEAP_VISITED_BIT ((Eterm) 1 << 31)
#endif
-
void
erts_check_off_heap2(Process *p, Eterm *htop)
{
@@ -2757,7 +3597,7 @@ erts_check_off_heap2(Process *p, Eterm *htop)
erts_aint_t refc;
switch (thing_subtag(u.hdr->thing_word)) {
case REFC_BINARY_SUBTAG:
- refc = erts_refc_read(&u.pb->val->refc, 1);
+ refc = erts_refc_read(&u.pb->val->intern.refc, 1);
break;
case FUN_SUBTAG:
refc = erts_refc_read(&u.fun->fe->refc, 1);
@@ -2767,12 +3607,16 @@ erts_check_off_heap2(Process *p, Eterm *htop)
case EXTERNAL_REF_SUBTAG:
refc = erts_refc_read(&u.ext->node->refc, 1);
break;
+ case REF_SUBTAG:
+ ASSERT(is_magic_ref_thing(u.hdr));
+ refc = erts_refc_read(&u.mref->mb->intern.refc, 1);
+ break;
default:
ASSERT(!"erts_check_off_heap2: Invalid thing_word");
}
ERTS_CHK_OFFHEAP_ASSERT(refc >= 1);
#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_LIST
- ERTS_CHK_OFFHEAP_ASSERT(!(u.hdr->thing_word & ERTS_EXTERNAL_VISITED_BIT));
+ ERTS_CHK_OFFHEAP_ASSERT(!(u.hdr->thing_word & ERTS_OFFHEAP_VISITED_BIT));
u.hdr->thing_word |= ERTS_OFFHEAP_VISITED_BIT;
#endif
if (old) {
@@ -2781,11 +3625,11 @@ erts_check_off_heap2(Process *p, Eterm *htop)
else if (oheap <= u.ep && u.ep < ohtop)
old = 1;
else {
- ERTS_CHK_OFFHEAP_ASSERT(within2(u.ep, p, htop));
+ ERTS_CHK_OFFHEAP_ASSERT(erts_dbg_within_proc(u.ep, p, htop));
}
}
-#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_EXTERNAL_LIST
+#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_LIST
for (u.hdr = MSO(p).first; u.hdr; u.hdr = u.hdr->next)
u.hdr->thing_word &= ~ERTS_OFFHEAP_VISITED_BIT;
#endif
diff --git a/erts/emulator/beam/erl_gc.h b/erts/emulator/beam/erl_gc.h
index 5203dda263..6a529b8443 100644
--- a/erts/emulator/beam/erl_gc.h
+++ b/erts/emulator/beam/erl_gc.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2007-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2007-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -20,63 +21,87 @@
#ifndef __ERL_GC_H__
#define __ERL_GC_H__
-#include "erl_map.h"
+#if defined(ERL_WANT_GC_INTERNALS__) || defined(ERTS_DO_INCL_GLB_INLINE_FUNC_DEF)
/* GC declarations shared by beam/erl_gc.c and hipe/hipe_gc.c */
-#if defined(DEBUG) && !ERTS_GLB_INLINE_INCL_FUNC_DEF
-# define HARDDEBUG 1
-#endif
+#define ERTS_POTENTIALLY_LONG_GC_HSIZE (128*1024) /* Words */
+
+#include "erl_map.h"
+#include "erl_fun.h"
+#include "erl_bits.h"
#define IS_MOVED_BOXED(x) (!is_header((x)))
#define IS_MOVED_CONS(x) (is_non_value((x)))
+void erts_sub_binary_to_heap_binary(Eterm **pp, Eterm **hpp, Eterm *orig);
-#define MOVE_CONS(PTR,CAR,HTOP,ORIG) \
-do { \
- Eterm gval; \
- \
- HTOP[0] = CAR; /* copy car */ \
- HTOP[1] = PTR[1]; /* copy cdr */ \
- gval = make_list(HTOP); /* new location */ \
- *ORIG = gval; /* redirect original reference */ \
- PTR[0] = THE_NON_VALUE; /* store forwarding indicator */ \
- PTR[1] = gval; /* store forwarding address */ \
- HTOP += 2; /* update tospace htop */ \
-} while(0)
-
-#define MOVE_BOXED(PTR,HDR,HTOP,ORIG) \
-do { \
- Eterm gval; \
- Sint nelts; \
- \
- ASSERT(is_header(HDR)); \
- nelts = header_arity(HDR); \
- switch ((HDR) & _HEADER_SUBTAG_MASK) { \
- case SUB_BINARY_SUBTAG: nelts++; break; \
- case MAP_SUBTAG: nelts+=map_get_size(PTR) + 1; break; \
- case FUN_SUBTAG: nelts+=((ErlFunThing*)(PTR))->num_free+1; break; \
- } \
- gval = make_boxed(HTOP); \
- *ORIG = gval; \
- *HTOP++ = HDR; \
- *PTR++ = gval; \
- while (nelts--) *HTOP++ = *PTR++; \
- \
-} while(0)
-
-#define in_area(ptr,start,nbytes) \
- ((UWord)((char*)(ptr) - (char*)(start)) < (nbytes))
+ERTS_GLB_INLINE void move_cons(Eterm **pp, Eterm car, Eterm **hpp, Eterm *orig);
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE void move_cons(Eterm **pp, Eterm car, Eterm **hpp, Eterm *orig)
+{
+ Eterm *ptr = *pp;
+ Eterm *htop = *hpp;
+ Eterm gval;
-extern Uint erts_test_long_gc_sleep;
+ htop[0] = car; /* copy car */
+ htop[1] = ptr[1]; /* copy cdr */
+ gval = make_list(htop); /* new location */
+ *orig = gval; /* redirect original reference */
+ ptr[0] = THE_NON_VALUE; /* store forwarding indicator */
+ ptr[1] = gval; /* store forwarding address */
+ *hpp += 2; /* update tospace htop */
+}
+#endif
-#if defined(DEBUG) || defined(ERTS_OFFHEAP_DEBUG)
-int within(Eterm *ptr, Process *p);
+ERTS_GLB_INLINE void move_boxed(Eterm **pp, Eterm hdr, Eterm **hpp, Eterm *orig);
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE void move_boxed(Eterm **pp, Eterm hdr, Eterm **hpp, Eterm *orig)
+{
+ Eterm gval;
+ Sint nelts;
+ Eterm *ptr = *pp;
+ Eterm *htop = *hpp;
+
+ ASSERT(is_header(hdr));
+ nelts = header_arity(hdr);
+ switch ((hdr) & _HEADER_SUBTAG_MASK) {
+ case SUB_BINARY_SUBTAG:
+ {
+ ErlSubBin *sb = (ErlSubBin *)ptr;
+ /* convert sub-binary to heap-binary if applicable */
+ if (sb->bitsize == 0 && sb->bitoffs == 0 &&
+ sb->is_writable == 0 && sb->size <= sizeof(Eterm) * 3) {
+ erts_sub_binary_to_heap_binary(pp, hpp, orig);
+ return;
+ }
+ }
+ nelts++;
+ break;
+ case MAP_SUBTAG:
+ if (is_flatmap_header(hdr)) nelts+=flatmap_get_size(ptr) + 1;
+ else nelts += hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ break;
+ case FUN_SUBTAG: nelts+=((ErlFunThing*)(ptr))->num_free+1; break;
+ }
+ gval = make_boxed(htop);
+ *orig = gval;
+ *htop++ = hdr;
+ *ptr++ = gval;
+ while (nelts--) *htop++ = *ptr++;
+
+ *hpp = htop;
+ *pp = ptr;
+}
#endif
-ERTS_GLB_INLINE Eterm follow_moved(Eterm term);
+#define ErtsInYoungGen(TPtr, Ptr, OldHeap, OldHeapSz) \
+ (!erts_is_literal((TPtr), (Ptr)) \
+ & !ErtsInArea((Ptr), (OldHeap), (OldHeapSz)))
+
+ERTS_GLB_INLINE Eterm follow_moved(Eterm term, Eterm xptr_tag);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-ERTS_GLB_INLINE Eterm follow_moved(Eterm term)
+ERTS_GLB_INLINE Eterm follow_moved(Eterm term, Eterm xptr_tag)
{
Eterm* ptr;
switch (primary_tag(term)) {
@@ -84,17 +109,79 @@ ERTS_GLB_INLINE Eterm follow_moved(Eterm term)
break;
case TAG_PRIMARY_BOXED:
ptr = boxed_val(term);
- if (IS_MOVED_BOXED(*ptr)) term = *ptr;
+ if (IS_MOVED_BOXED(*ptr)) term = (*ptr) | xptr_tag;
break;
case TAG_PRIMARY_LIST:
ptr = list_val(term);
- if (IS_MOVED_CONS(ptr[0])) term = ptr[1];
+ if (IS_MOVED_CONS(ptr[0])) term = (ptr[1]) | xptr_tag;
break;
default:
ASSERT(!"strange tag in follow_moved");
}
return term;
}
+
+#endif
+
+#endif /* ERL_GC_C__ || HIPE_GC_C__ */
+
+/*
+ * Global exported
+ */
+
+#define ERTS_IS_GC_DESIRED_INTERNAL(Proc, HTop, STop) \
+ ((((STop) - (HTop) < (Proc)->mbuf_sz)) \
+ | ((Proc)->off_heap.overhead > (Proc)->bin_vheap_sz) \
+ | !!((Proc)->flags & F_FORCE_GC))
+
+#define ERTS_IS_GC_DESIRED(Proc) \
+ ERTS_IS_GC_DESIRED_INTERNAL((Proc), (Proc)->htop, (Proc)->stop)
+
+#define ERTS_FORCE_GC_INTERNAL(Proc, FCalls) \
+ do { \
+ (Proc)->flags |= F_FORCE_GC; \
+ ERTS_VBUMP_ALL_REDS_INTERNAL((Proc), (FCalls)); \
+ } while (0)
+
+#define ERTS_FORCE_GC(Proc) \
+ ERTS_FORCE_GC_INTERNAL((Proc), (Proc)->fcalls)
+
+extern Uint erts_test_long_gc_sleep;
+
+typedef struct {
+ Uint64 reclaimed;
+ Uint64 garbage_cols;
+} ErtsGCInfo;
+
+#define ERTS_PROCESS_GC_INFO_MAX_TERMS (11) /* number of elements in process_gc_info*/
+#define ERTS_PROCESS_GC_INFO_MAX_SIZE \
+ (ERTS_PROCESS_GC_INFO_MAX_TERMS * (2/*cons*/ + 3/*2-tuple*/ + BIG_UINT_HEAP_SIZE))
+Eterm erts_process_gc_info(struct process*, Uint *, Eterm **, Uint, Uint);
+
+void erts_gc_info(ErtsGCInfo *gcip);
+void erts_init_gc(void);
+int erts_garbage_collect_nobump(struct process*, int, Eterm*, int, int);
+void erts_garbage_collect(struct process*, int, Eterm*, int);
+void erts_garbage_collect_hibernate(struct process* p);
+Eterm erts_gc_after_bif_call_lhf(struct process* p, ErlHeapFragment *live_hf_end,
+ Eterm result, Eterm* regs, Uint arity);
+Eterm erts_gc_after_bif_call(struct process* p, Eterm result, Eterm* regs, Uint arity);
+int erts_garbage_collect_literals(struct process* p, Eterm* literals,
+ Uint lit_size,
+ struct erl_off_heap_header* oh,
+ int fcalls);
+Uint erts_next_heap_size(Uint, Uint);
+Eterm erts_heap_sizes(struct process* p);
+
+void erts_offset_off_heap(struct erl_off_heap*, Sint, Eterm*, Eterm*);
+void erts_offset_heap_ptr(Eterm*, Uint, Sint, Eterm*, Eterm*);
+void erts_offset_heap(Eterm*, Uint, Sint, Eterm*, Eterm*);
+void erts_free_heap_frags(struct process* p);
+Eterm erts_max_heap_size_map(Sint, Uint, Eterm **, Uint *);
+int erts_max_heap_size(Eterm, Uint *, Uint *);
+void erts_deallocate_young_generation(Process *c_p);
+#if defined(DEBUG) || defined(ERTS_OFFHEAP_DEBUG)
+int erts_dbg_within_proc(Eterm *ptr, Process *p, Eterm* real_htop);
#endif
#endif /* __ERL_GC_H__ */
diff --git a/erts/emulator/beam/erl_goodfit_alloc.c b/erts/emulator/beam/erl_goodfit_alloc.c
index e9d8249ee1..50aa41b4d2 100644
--- a/erts/emulator/beam/erl_goodfit_alloc.c
+++ b/erts/emulator/beam/erl_goodfit_alloc.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -167,7 +168,7 @@ static Block_t * get_free_block (Allctr_t *, Uint,
static void link_free_block (Allctr_t *, Block_t *);
static void unlink_free_block (Allctr_t *, Block_t *);
static void update_last_aux_mbc (Allctr_t *, Carrier_t *);
-static Eterm info_options (Allctr_t *, char *, int *,
+static Eterm info_options (Allctr_t *, char *, fmtfn_t *,
void *, Uint **, Uint *);
static void init_atoms (void);
@@ -550,7 +551,7 @@ add_2tup(Uint **hpp, Uint *szp, Eterm *lp, Eterm el1, Eterm el2)
static Eterm
info_options(Allctr_t *allctr,
char *prefix,
- int *print_to_p,
+ fmtfn_t *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
@@ -570,8 +571,8 @@ info_options(Allctr_t *allctr,
if (hpp || szp) {
if (!atoms_initialized)
- erl_exit(1, "%s:%d: Internal error: Atoms not initialized",
- __FILE__, __LINE__);;
+ erts_exit(ERTS_ERROR_EXIT, "%s:%d: Internal error: Atoms not initialized",
+ __FILE__, __LINE__);
res = NIL;
add_2tup(hpp, szp, &res, am.as, am.gf);
diff --git a/erts/emulator/beam/erl_goodfit_alloc.h b/erts/emulator/beam/erl_goodfit_alloc.h
index 385de0da23..76dd558234 100644
--- a/erts/emulator/beam/erl_goodfit_alloc.h
+++ b/erts/emulator/beam/erl_goodfit_alloc.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c
new file mode 100644
index 0000000000..bda2c9b94d
--- /dev/null
+++ b/erts/emulator/beam/erl_hl_timer.c
@@ -0,0 +1,3516 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2015-2017. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: High level timers implementing BIF timers
+ * as well as process and port timers.
+ *
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+/* #define ERTS_MAGIC_REF_BIF_TIMERS */
+
+#include "sys.h"
+#include "global.h"
+#include "bif.h"
+#include "erl_bif_unique.h"
+#define ERTS_WANT_TIMER_WHEEL_API
+#include "erl_time.h"
+#include "erl_hl_timer.h"
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+#include "erl_binary.h"
+#endif
+
+#define ERTS_TMR_CHECK_CANCEL_ON_CREATE 0
+
+#if 0
+# define ERTS_HLT_HARD_DEBUG
+#endif
+#if 0
+# define ERTS_HLT_DEBUG
+#endif
+
+#if defined(ERTS_HLT_HARD_DEBUG) || defined(DEBUG)
+# if defined(ERTS_HLT_HARD_DEBUG)
+# undef ERTS_RBT_HARD_DEBUG
+# define ERTS_RBT_HARD_DEBUG 1
+# endif
+# ifndef ERTS_HLT_DEBUG
+# define ERTS_HLT_DEBUG 1
+# endif
+#endif
+
+#undef ERTS_HLT_ASSERT
+#if defined(ERTS_HLT_DEBUG)
+# define ERTS_HLT_ASSERT(E) ERTS_ASSERT(E)
+# undef ERTS_RBT_DEBUG
+# define ERTS_RBT_DEBUG
+#else
+# define ERTS_HLT_ASSERT(E) ((void) 1)
+#endif
+
+#if defined(ERTS_HLT_HARD_DEBUG) && defined(__GNUC__)
+#warning "* * * * * * * * * * * * * * * * * *"
+#warning "* ERTS_HLT_HARD_DEBUG IS ENABLED! *"
+#warning "* * * * * * * * * * * * * * * * * *"
+#endif
+
+#ifdef ERTS_HLT_HARD_DEBUG
+# define ERTS_HLT_HDBG_CHK_SRV(SRV) hdbg_chk_srv((SRV))
+static void hdbg_chk_srv(ErtsHLTimerService *srv);
+#else
+# define ERTS_HLT_HDBG_CHK_SRV(SRV) ((void) 1)
+#endif
+
+#if ERTS_REF_NUMBERS != 3
+#error "ERTS_REF_NUMBERS changed. Update me..."
+#endif
+
+typedef enum {
+ ERTS_TMR_BIF,
+ ERTS_TMR_PROC,
+ ERTS_TMR_PORT,
+ ERTS_TMR_CALLBACK
+} ErtsTmrType;
+
+#define ERTS_BIF_TIMER_SHORT_TIME 5000
+
+/* Bit 0 to 10 contains scheduler id (see mask below) */
+#define ERTS_TMR_ROFLG_HLT (((Uint32) 1) << 11)
+#define ERTS_TMR_ROFLG_BIF_TMR (((Uint32) 1) << 12)
+#define ERTS_TMR_ROFLG_PRE_ALC (((Uint32) 1) << 13)
+#define ERTS_TMR_ROFLG_REG_NAME (((Uint32) 1) << 14)
+#define ERTS_TMR_ROFLG_PROC (((Uint32) 1) << 15)
+#define ERTS_TMR_ROFLG_PORT (((Uint32) 1) << 16)
+#define ERTS_TMR_ROFLG_CALLBACK (((Uint32) 1) << 17)
+
+#define ERTS_TMR_ROFLG_SID_MASK \
+ (ERTS_TMR_ROFLG_HLT - (Uint32) 1)
+
+#define ERTS_TMR_STATE_ACTIVE ((erts_aint32_t) 0)
+#define ERTS_TMR_STATE_CANCELED ((erts_aint32_t) 1)
+#define ERTS_TMR_STATE_TIMED_OUT ((erts_aint32_t) 2)
+
+typedef struct ErtsHLTimer_ ErtsHLTimer;
+
+#define ERTS_HLT_PFLG_RED (((UWord) 1) << 0)
+#define ERTS_HLT_PFLG_SAME_TIME (((UWord) 1) << 1)
+
+#define ERTS_HLT_PFLGS_MASK \
+ (ERTS_HLT_PFLG_RED|ERTS_HLT_PFLG_SAME_TIME)
+
+#define ERTS_HLT_PFIELD_NOT_IN_TABLE (~((UWord) 0))
+
+typedef struct ErtsBifTimer_ ErtsBifTimer;
+
+typedef struct {
+ ErtsBifTimer *next;
+ ErtsBifTimer *prev;
+} ErtsBifTimerList;
+
+typedef struct {
+ UWord parent; /* parent pointer and flags... */
+ union {
+ struct {
+ ErtsHLTimer *right;
+ ErtsHLTimer *left;
+ } t;
+ struct {
+ ErtsHLTimer *prev;
+ ErtsHLTimer *next;
+ } l;
+ } u;
+ ErtsHLTimer *same_time;
+} ErtsHLTimerTimeTree;
+
+typedef struct {
+ UWord parent; /* parent pointer and flags... */
+ ErtsBifTimer *right;
+ ErtsBifTimer *left;
+} ErtsBifTimerTree;
+
+typedef struct {
+ Uint32 roflgs;
+ erts_atomic32_t refc;
+ union {
+ void *arg;
+ erts_atomic_t next;
+ } u;
+ union {
+ Process *proc;
+ Port *port;
+ Eterm name;
+ void (*callback)(void *);
+ } receiver;
+} ErtsTmrHead;
+
+struct ErtsHLTimer_ {
+ ErtsTmrHead head; /* NEED to be first! */
+ ErtsMonotonicTime timeout;
+ union {
+ ErtsThrPrgrLaterOp cleanup;
+ ErtsHLTimerTimeTree tree;
+ } time;
+
+#ifdef ERTS_HLT_HARD_DEBUG
+ int pending_timeout;
+#endif
+};
+
+typedef struct {
+ ErtsTmrHead head; /* NEED to be first! */
+ union {
+ ErtsTWheelTimer tw_tmr;
+ ErtsThrPrgrLaterOp cleanup;
+ } u;
+} ErtsTWTimer;
+
+struct ErtsBifTimer_ {
+ union {
+ ErtsTmrHead head;
+ ErtsHLTimer hlt;
+ ErtsTWTimer twt;
+ } type;
+ struct {
+ erts_atomic32_t state;
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsMagicBinary *mbin;
+ ErtsHLTimerList proc_list;
+#else
+ Uint32 refn[ERTS_REF_NUMBERS];
+ ErtsBifTimerTree proc_tree;
+ ErtsBifTimerTree tree;
+#endif
+ Eterm message;
+ ErlHeapFragment *bp;
+ } btm;
+};
+
+typedef union {
+ ErtsTmrHead head;
+ ErtsHLTimer hlt;
+ ErtsTWTimer twt;
+ ErtsBifTimer btm;
+} ErtsTimer;
+
+typedef ErtsTimer *(*ErtsCreateTimerFunc)(ErtsSchedulerData *esdp,
+ ErtsMonotonicTime timeout_pos,
+ int short_time, ErtsTmrType type,
+ void *rcvrp, Eterm rcvr,
+ Eterm msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsMagicBinary *mbin,
+#else
+ Uint32 *refn,
+#endif
+ void (*callback)(void *), void *arg);
+
+#ifdef SMALL_MEMORY
+#define BIF_TIMER_PREALC_SZ 10
+#define PTIMER_PREALC_SZ 10
+#else
+#define BIF_TIMER_PREALC_SZ 100
+#define PTIMER_PREALC_SZ 100
+#endif
+
+ERTS_SCHED_PREF_PALLOC_IMPL(bif_timer_pre,
+ ErtsBifTimer,
+ BIF_TIMER_PREALC_SZ)
+
+ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(tw_timer,
+ ErtsTWTimer,
+ PTIMER_PREALC_SZ,
+ ERTS_ALC_T_LL_PTIMER)
+
+#ifdef ERTS_HLT_DEBUG
+#define ERTS_TMR_TIMEOUT_YIELD_LIMIT 5
+#else
+#define ERTS_TMR_TIMEOUT_YIELD_LIMIT 100
+#endif
+#define ERTS_TMR_CANCELED_TIMER_LIMIT 100
+#define ERTS_TMR_CANCELED_TIMER_SMALL_LIMIT 5
+
+#define ERTS_TMR_TIMEOUT_YIELD_STATE_T same_time_list_yield_state_t
+#define ERTS_TMR_YIELDING_TIMEOUT_STATE_INITER {NULL, {0}}
+typedef struct {
+ int dummy;
+} ERTS_TMR_TIMEOUT_YIELD_STATE_T;
+
+typedef struct {
+ ErtsTmrHead marker;
+ erts_atomic_t last;
+} ErtsHLTCncldTmrQTail;
+
+
+typedef struct {
+ /*
+ * This structure needs to be cache line aligned for best
+ * performance.
+ */
+ union {
+ /*
+ * Modified by threads returning canceled
+ * timers to this timer service.
+ */
+ ErtsHLTCncldTmrQTail data;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(
+ sizeof(ErtsHLTCncldTmrQTail))];
+ } tail;
+ /*
+ * Everything below this point is *only* accessed by the
+ * thread managing this timer service.
+ */
+ struct {
+ ErtsTimer *first;
+ ErtsTimer *unref_end;
+ struct {
+ ErtsThrPrgrVal thr_progress;
+ int thr_progress_reached;
+ ErtsTimer *unref_end;
+ } next;
+ int used_marker;
+ } head;
+} ErtsHLTCncldTmrQ;
+
+
+typedef struct {
+ ErtsHLTimer *root;
+ ERTS_TMR_TIMEOUT_YIELD_STATE_T state;
+} ErtsYieldingTimeoutState;
+
+struct ErtsHLTimerService_ {
+ ErtsHLTCncldTmrQ canceled_queue;
+ ErtsHLTimer *time_tree;
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsBifTimer *btm_tree;
+#endif
+ ErtsHLTimer *next_timeout;
+ ErtsYieldingTimeoutState yield;
+ ErtsTWheelTimer service_timer;
+};
+
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+
+static ERTS_INLINE int
+refn_is_lt(Uint32 *x, Uint32 *y)
+{
+ /* !0 if x < y */
+ if (x[2] < y[2])
+ return 1;
+ if (x[2] != y[2])
+ return 0;
+ if (x[1] < y[1])
+ return 1;
+ if (x[1] != y[1])
+ return 0;
+ return x[0] < y[0];
+}
+
+static ERTS_INLINE int
+refn_is_eq(Uint32 *x, Uint32 *y)
+{
+ return (x[0] == y[0]) & (x[1] == y[1]) & (x[2] == y[2]);
+}
+
+#endif
+
+#define ERTS_RBT_PREFIX time
+#define ERTS_RBT_T ErtsHLTimer
+#define ERTS_RBT_KEY_T ErtsMonotonicTime
+#define ERTS_RBT_FLAGS_T UWord
+#define ERTS_RBT_INIT_EMPTY_TNODE(T) \
+ do { \
+ (T)->time.tree.parent = (UWord) NULL; \
+ (T)->time.tree.u.t.right = NULL; \
+ (T)->time.tree.u.t.left = NULL; \
+ } while (0)
+#define ERTS_RBT_IS_RED(T) \
+ ((int) ((T)->time.tree.parent & ERTS_HLT_PFLG_RED))
+#define ERTS_RBT_SET_RED(T) \
+ ((T)->time.tree.parent |= ERTS_HLT_PFLG_RED)
+#define ERTS_RBT_IS_BLACK(T) \
+ (!ERTS_RBT_IS_RED((T)))
+#define ERTS_RBT_SET_BLACK(T) \
+ ((T)->time.tree.parent &= ~ERTS_HLT_PFLG_RED)
+#define ERTS_RBT_GET_FLAGS(T) \
+ ((T)->time.tree.parent & ERTS_HLT_PFLGS_MASK)
+#define ERTS_RBT_SET_FLAGS(T, F) \
+ do { \
+ ERTS_HLT_ASSERT((((UWord) (F)) & ~ERTS_HLT_PFLGS_MASK) == 0); \
+ (T)->time.tree.parent &= ~ERTS_HLT_PFLGS_MASK; \
+ (T)->time.tree.parent |= (F); \
+ } while (0)
+#define ERTS_RBT_GET_PARENT(T) \
+ ((ErtsHLTimer *) ((T)->time.tree.parent & ~ERTS_HLT_PFLGS_MASK))
+#define ERTS_RBT_SET_PARENT(T, P) \
+ do { \
+ ERTS_HLT_ASSERT((((UWord) (P)) & ERTS_HLT_PFLGS_MASK) == 0); \
+ (T)->time.tree.parent &= ERTS_HLT_PFLGS_MASK; \
+ (T)->time.tree.parent |= (UWord) (P); \
+ } while (0)
+#define ERTS_RBT_GET_RIGHT(T) ((T)->time.tree.u.t.right)
+#define ERTS_RBT_SET_RIGHT(T, R) ((T)->time.tree.u.t.right = (R))
+#define ERTS_RBT_GET_LEFT(T) ((T)->time.tree.u.t.left)
+#define ERTS_RBT_SET_LEFT(T, L) ((T)->time.tree.u.t.left = (L))
+#define ERTS_RBT_GET_KEY(T) ((T)->timeout)
+#define ERTS_RBT_IS_LT(KX, KY) ((KX) < (KY))
+#define ERTS_RBT_IS_EQ(KX, KY) ((KX) == (KY))
+#define ERTS_RBT_WANT_DELETE
+#define ERTS_RBT_WANT_SMALLEST
+#define ERTS_RBT_WANT_LOOKUP_INSERT
+#define ERTS_RBT_WANT_REPLACE
+#define ERTS_RBT_WANT_FOREACH
+#ifdef ERTS_HLT_HARD_DEBUG
+# define ERTS_RBT_WANT_LOOKUP
+#endif
+#define ERTS_RBT_UNDEF
+
+#include "erl_rbtree.h"
+
+/* Use circular list for timers at same time */
+
+static ERTS_INLINE void
+same_time_list_insert(ErtsHLTimer **root, ErtsHLTimer *tmr)
+{
+ ErtsHLTimer *first = *root;
+ if (!first) {
+ ERTS_HLT_ASSERT((((UWord) root) & ERTS_HLT_PFLG_SAME_TIME) == 0);
+ tmr->time.tree.parent = ((UWord) root) | ERTS_HLT_PFLG_SAME_TIME;
+ tmr->time.tree.u.l.next = tmr;
+ tmr->time.tree.u.l.prev = tmr;
+ *root = tmr;
+ }
+ else {
+ tmr->time.tree.parent = ERTS_HLT_PFLG_SAME_TIME;
+ tmr->time.tree.u.l.next = first;
+ tmr->time.tree.u.l.prev = first->time.tree.u.l.prev;
+ first->time.tree.u.l.prev = tmr;
+ tmr->time.tree.u.l.prev->time.tree.u.l.next = tmr;
+ }
+}
+
+static ERTS_INLINE void
+same_time_list_delete(ErtsHLTimer *tmr)
+{
+ ErtsHLTimer **root, *next;
+
+ root = (ErtsHLTimer **) (tmr->time.tree.parent & ~ERTS_HLT_PFLG_SAME_TIME);
+ next = tmr->time.tree.u.l.next;
+
+ ERTS_HLT_ASSERT((tmr->time.tree.parent
+ == (((UWord) root) | ERTS_HLT_PFLG_SAME_TIME))
+ || (tmr->time.tree.parent
+ == ERTS_HLT_PFLG_SAME_TIME));
+
+ if (next == tmr) {
+ ERTS_HLT_ASSERT(root && *root == tmr);
+ ERTS_HLT_ASSERT(tmr->time.tree.u.l.prev == tmr);
+ *root = NULL;
+ }
+ else {
+ if (root) {
+ ERTS_HLT_ASSERT(*root == tmr);
+ *root = next;
+ next->time.tree.parent = ((UWord) root) | ERTS_HLT_PFLG_SAME_TIME;
+ }
+ tmr->time.tree.u.l.next->time.tree.u.l.prev = tmr->time.tree.u.l.prev;
+ tmr->time.tree.u.l.prev->time.tree.u.l.next = next;
+ }
+}
+
+static ERTS_INLINE void
+same_time_list_new_root(ErtsHLTimer **root)
+{
+ ErtsHLTimer *tmr = *root;
+ if (tmr) {
+ ERTS_HLT_ASSERT(root);
+ tmr->time.tree.parent = ((UWord) root) | ERTS_HLT_PFLG_SAME_TIME;
+ }
+}
+
+static ERTS_INLINE int
+same_time_list_foreach_destroy_yielding(ErtsHLTimer **root,
+ void (*op)(ErtsHLTimer *, void *),
+ void *arg,
+ ERTS_TMR_TIMEOUT_YIELD_STATE_T *ys,
+ Sint ylimit)
+{
+ Sint ycnt = ylimit;
+ ErtsHLTimer *end, *tmr = *root;
+ if (!tmr)
+ return 0;
+
+ ERTS_HLT_ASSERT(tmr->time.tree.parent
+ == (((UWord) root) | ERTS_HLT_PFLG_SAME_TIME));
+
+ end = tmr->time.tree.u.l.prev;
+ end->time.tree.u.l.next = NULL;
+
+ while (1) {
+ ErtsHLTimer *op_tmr = tmr;
+
+ ERTS_HLT_ASSERT((tmr->time.tree.parent
+ == (((UWord) root) | ERTS_HLT_PFLG_SAME_TIME))
+ || (tmr->time.tree.parent
+ == ERTS_HLT_PFLG_SAME_TIME));
+
+ tmr = tmr->time.tree.u.l.next;
+ (*op)(op_tmr, arg);
+ if (!tmr) {
+ *root = NULL;
+ return 0;
+ }
+ if (--ycnt <= 0) {
+ /* Make new circle of timers left to process... */
+ *root = tmr;
+ end->time.tree.u.l.next = tmr;
+ tmr->time.tree.u.l.prev = end;
+ tmr->time.tree.parent = ((UWord) root) | ERTS_HLT_PFLG_SAME_TIME;
+ return 1;
+ }
+ }
+}
+
+static ERTS_INLINE void
+same_time_list_foreach(ErtsHLTimer *root,
+ void (*op)(ErtsHLTimer *, void *),
+ void *arg)
+{
+ if (root) {
+ ErtsHLTimer *tmr = root;
+ do {
+ (*op)(tmr, arg);
+ tmr = tmr->time.tree.u.l.next;
+ } while (root != tmr);
+ }
+}
+
+#ifdef ERTS_HLT_HARD_DEBUG
+
+static ERTS_INLINE ErtsHLTimer *
+same_time_list_lookup(ErtsHLTimer *root, ErtsHLTimer *x)
+{
+ if (root) {
+ ErtsHLTimer *tmr = root;
+ do {
+ if (tmr == x)
+ return tmr;
+ tmr = tmr->time.tree.u.l.next;
+ } while (root != tmr);
+ }
+ return NULL;
+}
+
+#endif /* ERTS_HLT_HARD_DEBUG */
+
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+#define ERTS_BTM_HLT2REFN(T) ((T)->btm.mbin->refn)
+#else
+#define ERTS_BTM_HLT2REFN(T) ((T)->btm.refn)
+#endif
+
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+
+#define ERTS_RBT_PREFIX btm
+#define ERTS_RBT_T ErtsBifTimer
+#define ERTS_RBT_KEY_T Uint32 *
+#define ERTS_RBT_FLAGS_T UWord
+#define ERTS_RBT_INIT_EMPTY_TNODE(T) \
+ do { \
+ (T)->btm.tree.parent = (UWord) NULL; \
+ (T)->btm.tree.right = NULL; \
+ (T)->btm.tree.left = NULL; \
+ } while (0)
+#define ERTS_RBT_IS_RED(T) \
+ ((int) ((T)->btm.tree.parent & ERTS_HLT_PFLG_RED))
+#define ERTS_RBT_SET_RED(T) \
+ ((T)->btm.tree.parent |= ERTS_HLT_PFLG_RED)
+#define ERTS_RBT_IS_BLACK(T) \
+ (!ERTS_RBT_IS_RED((T)))
+#define ERTS_RBT_SET_BLACK(T) \
+ ((T)->btm.tree.parent &= ~ERTS_HLT_PFLG_RED)
+#define ERTS_RBT_GET_FLAGS(T) \
+ ((T)->btm.tree.parent & ERTS_HLT_PFLGS_MASK)
+#define ERTS_RBT_SET_FLAGS(T, F) \
+ do { \
+ ERTS_HLT_ASSERT((((UWord) (F)) & ~ERTS_HLT_PFLGS_MASK) == 0); \
+ (T)->btm.tree.parent &= ~ERTS_HLT_PFLGS_MASK; \
+ (T)->btm.tree.parent |= (F); \
+ } while (0)
+#define ERTS_RBT_GET_PARENT(T) \
+ ((ErtsBifTimer *) ((T)->btm.tree.parent & ~ERTS_HLT_PFLGS_MASK))
+#define ERTS_RBT_SET_PARENT(T, P) \
+ do { \
+ ERTS_HLT_ASSERT((((UWord) (P)) & ERTS_HLT_PFLGS_MASK) == 0); \
+ (T)->btm.tree.parent &= ERTS_HLT_PFLGS_MASK; \
+ (T)->btm.tree.parent |= (UWord) (P); \
+ } while (0)
+#define ERTS_RBT_GET_RIGHT(T) ((T)->btm.tree.right)
+#define ERTS_RBT_SET_RIGHT(T, R) ((T)->btm.tree.right = (R))
+#define ERTS_RBT_GET_LEFT(T) ((T)->btm.tree.left)
+#define ERTS_RBT_SET_LEFT(T, L) ((T)->btm.tree.left = (L))
+#define ERTS_RBT_GET_KEY(T) ERTS_BTM_HLT2REFN((T))
+#define ERTS_RBT_IS_LT(KX, KY) refn_is_lt((KX), (KY))
+#define ERTS_RBT_IS_EQ(KX, KY) refn_is_eq((KX), (KY))
+#define ERTS_RBT_WANT_DELETE
+#define ERTS_RBT_WANT_INSERT
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+#define ERTS_RBT_WANT_LOOKUP
+#endif
+#define ERTS_RBT_WANT_FOREACH
+#define ERTS_RBT_UNDEF
+
+#include "erl_rbtree.h"
+
+#endif /* !ERTS_MAGIC_REF_BIF_TIMERS */
+
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+
+static ERTS_INLINE void
+proc_btm_list_insert(ErtsBifTimer **list, ErtsBifTimer *x)
+{
+ ErtsBifTimer *y = *list;
+ if (!y) {
+ x->btm.proc_list.next = x;
+ x->btm.proc_list.prev = x;
+ *list = x;
+ }
+ else {
+ ERTS_HLT_ASSERT(y->btm.proc_list.prev->btm.proc_list.next == y);
+ x->btm.proc_list.next = y;
+ x->btm.proc_list.prev = y->btm.proc_list.prev;
+ y->btm.proc_list.prev->btm.proc_list.next = x;
+ y->btm.proc_list.prev = x;
+ }
+}
+
+static ERTS_INLINE void
+proc_btm_list_delete(ErtsBifTimer **list, ErtsBifTimer *x)
+{
+ ErtsBifTimer *y = *list;
+ if (y == x && x->btm.proc_list.next == x) {
+ ERTS_HLT_ASSERT(x->btm.proc_list.prev == x);
+ *list = NULL;
+ }
+ else {
+ if (y == x)
+ *list = x->btm.proc_list.next;
+ ERTS_HLT_ASSERT(x->btm.proc_list.prev->btm.proc_list.next == x);
+ ERTS_HLT_ASSERT(x->btm.proc_list.next->btm.proc_list.prev == x);
+ x->btm.proc_list.prev->btm.proc_list.next = x->btm.proc_list.next;
+ x->btm.proc_list.next->btm.proc_list.prev = x->btm.proc_list.prev;
+ }
+ x->btm.proc_list.next = NULL;
+}
+
+static ERTS_INLINE int
+proc_btm_list_foreach_destroy_yielding(ErtsBifTimer **list,
+ void (*destroy)(ErtsBifTimer *, void *),
+ void *arg,
+ int limit)
+{
+ int i;
+ ErtsBifTimer *first, *last;
+
+ first = *list;
+ if (!first)
+ return 0;
+
+ last = first->btm.proc_list.prev;
+ for (i = 0; i < limit; i++) {
+ ErtsBifTimer *x = last;
+ last = last->btm.proc_list.prev;
+ (*destroy)(x, arg);
+ x->btm.proc_list.next = NULL;
+ if (x == first) {
+ *list = NULL;
+ return 0;
+ }
+ }
+
+ last->btm.proc_list.next = first;
+ first->btm.proc_list.prev = last;
+ return 1;
+}
+
+#else /* !ERTS_MAGIC_REF_BIF_TIMERS */
+
+#define ERTS_RBT_PREFIX proc_btm
+#define ERTS_RBT_T ErtsBifTimer
+#define ERTS_RBT_KEY_T Uint32 *
+#define ERTS_RBT_FLAGS_T UWord
+#define ERTS_RBT_INIT_EMPTY_TNODE(T) \
+ do { \
+ (T)->btm.proc_tree.parent = (UWord) NULL; \
+ (T)->btm.proc_tree.right = NULL; \
+ (T)->btm.proc_tree.left = NULL; \
+ } while (0)
+#define ERTS_RBT_IS_RED(T) \
+ ((int) ((T)->btm.proc_tree.parent & ERTS_HLT_PFLG_RED))
+#define ERTS_RBT_SET_RED(T) \
+ ((T)->btm.proc_tree.parent |= ERTS_HLT_PFLG_RED)
+#define ERTS_RBT_IS_BLACK(T) \
+ (!ERTS_RBT_IS_RED((T)))
+#define ERTS_RBT_SET_BLACK(T) \
+ ((T)->btm.proc_tree.parent &= ~ERTS_HLT_PFLG_RED)
+#define ERTS_RBT_GET_FLAGS(T) \
+ ((T)->btm.proc_tree.parent & ERTS_HLT_PFLGS_MASK)
+#define ERTS_RBT_SET_FLAGS(T, F) \
+ do { \
+ ERTS_HLT_ASSERT((((UWord) (F)) & ~ERTS_HLT_PFLGS_MASK) == 0); \
+ (T)->btm.proc_tree.parent &= ~ERTS_HLT_PFLGS_MASK; \
+ (T)->btm.proc_tree.parent |= (F); \
+ } while (0)
+#define ERTS_RBT_GET_PARENT(T) \
+ ((ErtsBifTimer *) ((T)->btm.proc_tree.parent & ~ERTS_HLT_PFLGS_MASK))
+#define ERTS_RBT_SET_PARENT(T, P) \
+ do { \
+ ERTS_HLT_ASSERT((((UWord) (P)) & ERTS_HLT_PFLGS_MASK) == 0); \
+ (T)->btm.proc_tree.parent &= ERTS_HLT_PFLGS_MASK; \
+ (T)->btm.proc_tree.parent |= (UWord) (P); \
+ } while (0)
+#define ERTS_RBT_GET_RIGHT(T) ((T)->btm.proc_tree.right)
+#define ERTS_RBT_SET_RIGHT(T, R) ((T)->btm.proc_tree.right = (R))
+#define ERTS_RBT_GET_LEFT(T) ((T)->btm.proc_tree.left)
+#define ERTS_RBT_SET_LEFT(T, L) ((T)->btm.proc_tree.left = (L))
+#define ERTS_RBT_GET_KEY(T) ERTS_BTM_HLT2REFN((T))
+#define ERTS_RBT_IS_LT(KX, KY) refn_is_lt((KX), (KY))
+#define ERTS_RBT_IS_EQ(KX, KY) refn_is_eq((KX), (KY))
+#define ERTS_RBT_WANT_DELETE
+#define ERTS_RBT_WANT_INSERT
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+#define ERTS_RBT_WANT_LOOKUP
+#endif
+#define ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING
+#define ERTS_RBT_UNDEF
+
+#include "erl_rbtree.h"
+
+#endif /* !ERTS_MAGIC_REF_BIF_TIMERS */
+
+static void init_canceled_queue(ErtsHLTCncldTmrQ *cq);
+
+void
+erts_hl_timer_init(void)
+{
+ init_tw_timer_alloc();
+ init_bif_timer_pre_alloc();
+}
+
+ErtsHLTimerService *
+erts_create_timer_service(void)
+{
+ ErtsYieldingTimeoutState init_yield = ERTS_TMR_YIELDING_TIMEOUT_STATE_INITER;
+ ErtsHLTimerService *srv;
+
+ srv = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_TIMER_SERVICE,
+ sizeof(ErtsHLTimerService));
+ srv->time_tree = NULL;
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ srv->btm_tree = NULL;
+#endif
+ srv->next_timeout = NULL;
+ srv->yield = init_yield;
+ erts_twheel_init_timer(&srv->service_timer);
+
+ init_canceled_queue(&srv->canceled_queue);
+
+ return srv;
+}
+
+size_t
+erts_timer_type_size(ErtsAlcType_t type)
+{
+ switch (type) {
+ case ERTS_ALC_T_LL_PTIMER: return sizeof(ErtsTWTimer);
+ case ERTS_ALC_T_HL_PTIMER: return sizeof(ErtsHLTimer);
+ case ERTS_ALC_T_BIF_TIMER: return sizeof(ErtsBifTimer);
+ default: ERTS_INTERNAL_ERROR("Unknown type");
+ }
+ return 0;
+}
+
+static ERTS_INLINE ErtsMonotonicTime
+get_timeout_pos(ErtsMonotonicTime now, ErtsMonotonicTime msec)
+{
+ ErtsMonotonicTime timeout_pos;
+ if (msec <= 0)
+ return ERTS_MONOTONIC_TO_CLKTCKS(now);
+ timeout_pos = ERTS_MONOTONIC_TO_CLKTCKS(now-1);
+ timeout_pos += ERTS_MSEC_TO_CLKTCKS(msec) + 1;
+ return timeout_pos;
+}
+
+static ERTS_INLINE Sint64
+get_time_left(ErtsSchedulerData *esdp, ErtsMonotonicTime timeout_pos)
+{
+ ErtsMonotonicTime now = erts_get_monotonic_time(esdp);
+
+ now = ERTS_MONOTONIC_TO_CLKTCKS(now-1)+1;
+ if (timeout_pos <= now)
+ return (Sint64) 0;
+ return (Sint64) ERTS_CLKTCKS_TO_MSEC(timeout_pos - now);
+}
+
+static ERTS_INLINE int
+proc_timeout_common(Process *proc, void *tmr)
+{
+ if (tmr == (void *) erts_atomic_cmpxchg_mb(&proc->common.timer,
+ ERTS_PTMR_TIMEDOUT,
+ (erts_aint_t) tmr)) {
+ erts_aint32_t state;
+ erts_proc_lock(proc, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ state = erts_atomic32_read_acqb(&proc->state);
+ erts_proc_unlock(proc, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ if (!(state & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_EXITING)))
+ erts_schedule_process(proc, state, 0);
+ return 1;
+ }
+ return 0;
+}
+
+static ERTS_INLINE int
+port_timeout_common(Port *port, void *tmr)
+{
+ if (tmr == (void *) erts_atomic_cmpxchg_mb(&port->common.timer,
+ ERTS_PTMR_TIMEDOUT,
+ (erts_aint_t) tmr)) {
+ erts_port_task_schedule(port->common.id,
+ &port->timeout_task,
+ ERTS_PORT_TASK_TIMEOUT);
+ return 1;
+ }
+ return 0;
+}
+
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+
+static erts_atomic_t *
+mbin_to_btmref__(ErtsMagicBinary *mbin)
+{
+ return erts_binary_to_magic_indirection((Binary *) mbin);
+}
+
+static ERTS_INLINE void
+magic_binary_init(ErtsMagicBinary *mbin, ErtsBifTimer *tmr)
+{
+ erts_atomic_t *aptr = mbin_to_btmref__(mbin);
+ erts_atomic_init_nob(aptr, (erts_aint_t) tmr);
+}
+
+static ERTS_INLINE ErtsBifTimer *
+magic_binary_to_btm(ErtsMagicBinary *mbin)
+{
+ erts_atomic_t *aptr = mbin_to_btmref__(mbin);
+ ErtsBifTimer *tmr = (ErtsBifTimer *) erts_atomic_read_nob(aptr);
+ ERTS_HLT_ASSERT(!tmr || tmr->btm.mbin == mbin);
+ return tmr;
+}
+
+#endif /* ERTS_MAGIC_REF_BIF_TIMERS */
+
+static ERTS_INLINE erts_aint_t
+init_btm_specifics(ErtsSchedulerData *esdp,
+ ErtsBifTimer *tmr, Eterm msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsMagicBinary *mbin
+#else
+ Uint32 *refn
+#endif
+ )
+{
+ Uint hsz = is_immed(msg) ? ((Uint) 0) : size_object(msg);
+ int refc;
+ if (!hsz) {
+ tmr->btm.message = msg;
+ tmr->btm.bp = NULL;
+ }
+ else {
+ ErlHeapFragment *bp = new_message_buffer(hsz);
+ Eterm *hp = bp->mem;
+ tmr->btm.message = copy_struct(msg, hsz, &hp, &bp->off_heap);
+ tmr->btm.bp = bp;
+ }
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ refc = 1;
+ tmr->btm.mbin = mbin;
+ erts_refc_inc(&mbin->refc, 1);
+ magic_binary_init(mbin, tmr);
+ tmr->btm.proc_list.next = NULL;
+#else
+ refc = 0;
+ tmr->btm.refn[0] = refn[0];
+ tmr->btm.refn[1] = refn[1];
+ tmr->btm.refn[2] = refn[2];
+
+ tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+
+ btm_rbt_insert(&esdp->timer_service->btm_tree, tmr);
+#endif
+
+ erts_atomic32_init_nob(&tmr->btm.state, ERTS_TMR_STATE_ACTIVE);
+ return refc; /* refc from magic binary... */
+}
+
+static void tw_bif_timer_timeout(void *vbtmp);
+
+static ERTS_INLINE void
+timer_destroy(ErtsTimer *tmr, int twt, int btm)
+{
+ if (!btm) {
+ if (twt)
+ tw_timer_free(&tmr->twt);
+ else
+ erts_free(ERTS_ALC_T_HL_PTIMER, tmr);
+ }
+ else {
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ Binary *bp = (Binary *) tmr->btm.btm.mbin;
+ if (erts_refc_dectest(&bp->refc, 0) == 0)
+ erts_bin_free(bp);
+#endif
+ if (tmr->head.roflgs & ERTS_TMR_ROFLG_PRE_ALC)
+ bif_timer_pre_free(&tmr->btm);
+ else
+ erts_free(ERTS_ALC_T_BIF_TIMER, &tmr->btm);
+ }
+}
+
+static ERTS_INLINE void
+timer_pre_dec_refc(ErtsTimer *tmr)
+{
+#ifdef ERTS_HLT_DEBUG
+ erts_aint_t refc;
+ refc = erts_atomic32_dec_read_nob(&tmr->head.refc);
+ ERTS_HLT_ASSERT(refc > 0);
+#else
+ erts_atomic32_dec_nob(&tmr->head.refc);
+#endif
+}
+
+/*
+ * Basic timer wheel timer stuff
+ */
+
+static void
+scheduled_tw_timer_destroy(void *vtmr)
+{
+ ErtsTimer * tmr = (ErtsTimer *) vtmr;
+ int btm = !!(tmr->head.roflgs & ERTS_TMR_ROFLG_BIF_TMR);
+ timer_destroy((ErtsTimer *) vtmr, 1, btm);
+}
+
+static void
+schedule_tw_timer_destroy(ErtsTWTimer *tmr)
+{
+ Uint size;
+ /*
+ * Reference to process/port can be
+ * dropped at once...
+ */
+ if (tmr->head.roflgs & ERTS_TMR_ROFLG_PROC)
+ erts_proc_dec_refc(tmr->head.receiver.proc);
+ else if (tmr->head.roflgs & ERTS_TMR_ROFLG_PORT)
+ erts_port_dec_refc(tmr->head.receiver.port);
+
+ if (!(tmr->head.roflgs & ERTS_TMR_ROFLG_BIF_TMR))
+ size = sizeof(ErtsHLTimer);
+ else {
+ /* Message buffer already dropped... */
+ size = sizeof(ErtsBifTimer);
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ size += sizeof(ErtsMagicIndirectionWord);
+#endif
+ }
+
+ erts_schedule_thr_prgr_later_cleanup_op(
+ scheduled_tw_timer_destroy,
+ (void *) tmr,
+ &tmr->u.cleanup,
+ size);
+}
+
+static ERTS_INLINE void
+tw_timer_dec_refc(ErtsTWTimer *tmr)
+{
+ if (erts_atomic32_dec_read_relb(&tmr->head.refc) == 0) {
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+ schedule_tw_timer_destroy(tmr);
+ }
+}
+
+static void
+tw_proc_timeout(void *vtwtp)
+{
+ ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp;
+ Process *proc = twtp->head.receiver.proc;
+ if (proc_timeout_common(proc, vtwtp))
+ tw_timer_dec_refc(twtp);
+ tw_timer_dec_refc(twtp);
+}
+
+static void
+tw_port_timeout(void *vtwtp)
+{
+ ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp;
+ Port *port = twtp->head.receiver.port;
+ if (port_timeout_common(port, vtwtp))
+ tw_timer_dec_refc(twtp);
+ tw_timer_dec_refc(twtp);
+}
+
+static void
+cancel_tw_timer(ErtsSchedulerData *esdp, ErtsTWTimer *tmr)
+{
+ ERTS_HLT_ASSERT((tmr->head.roflgs & ERTS_TMR_ROFLG_SID_MASK)
+ == (Uint32) esdp->no);
+ erts_twheel_cancel_timer(esdp->timer_wheel, &tmr->u.tw_tmr);
+ tw_timer_dec_refc(tmr);
+}
+
+static void
+tw_callback_timeout(void *vtwtp)
+{
+ ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp;
+ void (*callback)(void *) = twtp->head.receiver.callback;
+ void *arg = twtp->head.u.arg;
+ tw_timer_dec_refc(twtp);
+ (*callback)(arg);
+}
+
+static ErtsTimer *
+create_tw_timer(ErtsSchedulerData *esdp,
+ ErtsMonotonicTime timeout_pos,
+ int short_time, ErtsTmrType type,
+ void *rcvrp, Eterm rcvr,
+ Eterm msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsMagicBinary *mbin,
+#else
+ Uint32 *refn,
+#endif
+ void (*callback)(void *), void *arg)
+{
+ ErtsTWTimer *tmr;
+ void (*timeout_func)(void *);
+ erts_aint32_t refc;
+
+ if (type != ERTS_TMR_BIF) {
+ tmr = tw_timer_alloc();
+ tmr->head.roflgs = 0;
+ }
+ else {
+ if (short_time) {
+ tmr = (ErtsTWTimer *) bif_timer_pre_alloc();
+ if (!tmr)
+ goto alloc_bif_timer;
+ tmr->head.roflgs = (ERTS_TMR_ROFLG_BIF_TMR
+ | ERTS_TMR_ROFLG_PRE_ALC);
+ }
+ else {
+ alloc_bif_timer:
+ tmr = (ErtsTWTimer *) erts_alloc(ERTS_ALC_T_BIF_TIMER,
+ sizeof(ErtsBifTimer));
+ tmr->head.roflgs = ERTS_TMR_ROFLG_BIF_TMR;
+ }
+ }
+
+ erts_twheel_init_timer(&tmr->u.tw_tmr);
+ tmr->head.roflgs |= (Uint32) esdp->no;
+ ERTS_HLT_ASSERT((((Uint32) esdp->no)
+ & ~ERTS_TMR_ROFLG_SID_MASK) == 0);
+
+ switch (type) {
+
+ case ERTS_TMR_PROC:
+ tmr->head.receiver.proc = (Process *) rcvrp;
+ tmr->head.roflgs |= ERTS_TMR_ROFLG_PROC;
+ timeout_func = tw_proc_timeout;
+ erts_proc_inc_refc((Process *) rcvrp);
+ refc = 2;
+ break;
+
+ case ERTS_TMR_PORT:
+ tmr->head.receiver.port = (Port *) rcvrp;
+ tmr->head.roflgs |= ERTS_TMR_ROFLG_PORT;
+ timeout_func = tw_port_timeout;
+ erts_port_inc_refc((Port *) rcvrp);
+ refc = 2;
+ break;
+
+ case ERTS_TMR_CALLBACK:
+ tmr->head.u.arg = arg;
+ tmr->head.receiver.callback = callback;
+
+ tmr->head.roflgs |= ERTS_TMR_ROFLG_CALLBACK;
+ timeout_func = tw_callback_timeout;
+ refc = 1;
+ break;
+
+ case ERTS_TMR_BIF:
+
+ timeout_func = tw_bif_timer_timeout;
+ if (is_internal_pid(rcvr)) {
+ tmr->head.roflgs |= ERTS_TMR_ROFLG_PROC;
+ tmr->head.receiver.proc = (Process *) rcvrp;
+ refc = 2;
+ }
+ else {
+ ERTS_HLT_ASSERT(is_atom(rcvr));
+ tmr->head.roflgs |= ERTS_TMR_ROFLG_REG_NAME;
+ tmr->head.receiver.name = (Eterm) rcvr;
+ refc = 1;
+ }
+
+ refc += init_btm_specifics(esdp,
+ (ErtsBifTimer *) tmr,
+ msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ mbin
+#else
+ refn
+#endif
+ );
+ break;
+
+ default:
+ ERTS_INTERNAL_ERROR("Unsupported timer type");
+ return NULL;
+ }
+
+ erts_atomic32_init_nob(&tmr->head.refc, refc);
+
+ erts_twheel_set_timer(esdp->timer_wheel,
+ &tmr->u.tw_tmr,
+ timeout_func,
+ tmr,
+ timeout_pos);
+
+ return (ErtsTimer *) tmr;
+}
+
+/*
+ * Basic high level timer stuff
+ */
+
+static void
+scheduled_hl_timer_destroy(void *vtmr)
+{
+ ErtsTimer * tmr = (ErtsTimer *) vtmr;
+ int btm = !!(tmr->head.roflgs & ERTS_TMR_ROFLG_BIF_TMR);
+ timer_destroy((ErtsTimer *) vtmr, 0, btm);
+}
+
+static void
+schedule_hl_timer_destroy(ErtsHLTimer *tmr, Uint32 roflgs)
+{
+ UWord size;
+
+ /*
+ * Reference to process/port can be dropped
+ * at once...
+ */
+
+ ERTS_HLT_ASSERT(erts_atomic32_read_nob(&tmr->head.refc) == 0);
+
+ if (roflgs & ERTS_TMR_ROFLG_REG_NAME) {
+ ERTS_HLT_ASSERT(is_atom(tmr->head.receiver.name));
+ }
+ else if (roflgs & ERTS_TMR_ROFLG_PROC) {
+ ERTS_HLT_ASSERT(tmr->head.receiver.proc);
+ erts_proc_dec_refc(tmr->head.receiver.proc);
+ }
+ else if (roflgs & ERTS_TMR_ROFLG_PORT) {
+ ERTS_HLT_ASSERT(tmr->head.receiver.port);
+ erts_port_dec_refc(tmr->head.receiver.port);
+ }
+
+ if (!(roflgs & ERTS_TMR_ROFLG_BIF_TMR))
+ size = sizeof(ErtsHLTimer);
+ else {
+ /* Message buffer already dropped... */
+ size = sizeof(ErtsBifTimer);
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ size += sizeof(ErtsMagicIndirectionWord);
+#endif
+ }
+
+ erts_schedule_thr_prgr_later_cleanup_op(
+ scheduled_hl_timer_destroy, tmr,
+ &tmr->time.cleanup, size);
+}
+
+static ERTS_INLINE void
+hl_timer_dec_refc(ErtsHLTimer *tmr, Uint32 roflgs)
+{
+ if (erts_atomic32_dec_read_relb(&tmr->head.refc) == 0) {
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+ schedule_hl_timer_destroy(tmr, roflgs);
+ }
+}
+
+static void hlt_service_timeout(void *vesdp);
+static void handle_canceled_queue(ErtsSchedulerData *esdp,
+ ErtsHLTCncldTmrQ *cq,
+ int use_limit,
+ int ops_limit,
+ int *need_thr_progress,
+ ErtsThrPrgrVal *thr_prgr_p,
+ int *need_more_work);
+
+static ERTS_INLINE void
+check_canceled_queue(ErtsSchedulerData *esdp, ErtsHLTimerService *srv)
+{
+#if ERTS_TMR_CHECK_CANCEL_ON_CREATE
+ ErtsHLTCncldTmrQ *cq = &srv->canceled_queue;
+ if (cq->head.first != cq->head.unref_end)
+ handle_canceled_queue(esdp, cq, 1,
+ ERTS_TMR_CANCELED_TIMER_SMALL_LIMIT,
+ NULL, NULL, NULL);
+#endif
+}
+
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+
+static int
+bif_timer_ref_destructor(Binary *unused)
+{
+ return 1;
+}
+
+static ERTS_INLINE void
+btm_clear_magic_binary(ErtsBifTimer *tmr)
+{
+ erts_atomic_t *aptr = mbin_to_btmref__(tmr->btm.mbin);
+ Uint32 roflgs = tmr->type.head.roflgs;
+#ifdef ERTS_HLT_DEBUG
+ erts_aint_t tval = erts_atomic_xchg_nob(aptr,
+ (erts_aint_t) NULL);
+ ERTS_HLT_ASSERT(tval == (erts_aint_t) tmr);
+#else
+ erts_atomic_set_nob(aptr, (erts_aint_t) NULL);
+#endif
+ if (roflgs & ERTS_TMR_ROFLG_HLT)
+ hl_timer_dec_refc(&tmr->type.hlt, roflgs);
+ else
+ tw_timer_dec_refc(&tmr->type.twt);
+}
+
+#endif /* ERTS_MAGIC_REF_BIF_TIMERS */
+
+static ERTS_INLINE void
+bif_timer_timeout(ErtsHLTimerService *srv,
+ ErtsBifTimer *tmr,
+ Uint32 roflgs)
+{
+ erts_aint32_t state;
+
+ ERTS_HLT_ASSERT(tmr->type.head.roflgs == roflgs);
+ ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_BIF_TMR);
+
+ state = erts_atomic32_cmpxchg_acqb(&tmr->btm.state,
+ ERTS_TMR_STATE_TIMED_OUT,
+ ERTS_TMR_STATE_ACTIVE);
+
+ ERTS_HLT_ASSERT(state == ERTS_TMR_STATE_CANCELED
+ || state == ERTS_TMR_STATE_ACTIVE);
+
+ if (state == ERTS_TMR_STATE_ACTIVE) {
+ Process *proc;
+
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ btm_clear_magic_binary(tmr);
+#endif
+
+ if (roflgs & ERTS_TMR_ROFLG_REG_NAME) {
+ Eterm term;
+ term = tmr->type.head.receiver.name;
+ ERTS_HLT_ASSERT(is_atom(term));
+ term = erts_whereis_name_to_id(NULL, term);
+ proc = erts_proc_lookup(term);
+ }
+ else {
+ ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_PROC);
+ proc = tmr->type.head.receiver.proc;
+ ERTS_HLT_ASSERT(proc);
+ }
+ if (proc) {
+ int dec_refc = 0;
+ ErtsMessage *mp = erts_alloc_message(0, NULL);
+ mp->data.heap_frag = tmr->btm.bp;
+ tmr->btm.bp = NULL;
+ erts_queue_message(proc, 0, mp, tmr->btm.message,
+ am_clock_service);
+ erts_proc_lock(proc, ERTS_PROC_LOCK_BTM);
+ /* If the process is exiting do not disturb the cleanup... */
+ if (!ERTS_PROC_IS_EXITING(proc)) {
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ if (tmr->btm.proc_list.next) {
+ proc_btm_list_delete(&proc->bif_timers, tmr);
+ dec_refc = 1;
+ }
+#else
+ if (tmr->btm.proc_tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ proc_btm_rbt_delete(&proc->bif_timers, tmr);
+ tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ dec_refc = 1;
+ }
+#endif
+ }
+ erts_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
+ if (dec_refc)
+ timer_pre_dec_refc((ErtsTimer *) tmr);
+ }
+ if (tmr->btm.bp)
+ free_message_buffer(tmr->btm.bp);
+ }
+
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ btm_rbt_delete(&srv->btm_tree, tmr);
+ tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ }
+#endif
+
+
+}
+
+static void
+tw_bif_timer_timeout(void *vbtmp)
+{
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsHLTimerService *srv = NULL;
+#else
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ErtsHLTimerService *srv = esdp->timer_service;
+#endif
+ ErtsBifTimer *btmp = (ErtsBifTimer *) vbtmp;
+ bif_timer_timeout(srv, btmp, btmp->type.head.roflgs);
+ tw_timer_dec_refc(&btmp->type.twt);
+}
+
+static ErtsTimer *
+create_hl_timer(ErtsSchedulerData *esdp,
+ ErtsMonotonicTime timeout_pos,
+ int short_time, ErtsTmrType type,
+ void *rcvrp, Eterm rcvr,
+ Eterm msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsMagicBinary *mbin,
+#else
+ Uint32 *refn,
+#endif
+ void (*callback)(void *), void *arg)
+{
+ ErtsHLTimerService *srv = esdp->timer_service;
+ ErtsHLTimer *tmr, *st_tmr;
+ erts_aint32_t refc;
+ Uint32 roflgs;
+
+ ERTS_HLT_HDBG_CHK_SRV(srv);
+
+ check_canceled_queue(esdp, srv);
+
+ ERTS_HLT_ASSERT((esdp->no & ~ERTS_TMR_ROFLG_SID_MASK) == 0);
+
+ roflgs = ((Uint32) esdp->no) | ERTS_TMR_ROFLG_HLT;
+
+ if (type != ERTS_TMR_BIF) {
+
+ tmr = erts_alloc(ERTS_ALC_T_HL_PTIMER,
+ sizeof(ErtsHLTimer));
+ tmr->timeout = timeout_pos;
+
+ switch (type) {
+
+ case ERTS_TMR_PROC:
+ ERTS_HLT_ASSERT(is_internal_pid(rcvr));
+
+ erts_proc_inc_refc((Process *) rcvrp);
+ tmr->head.receiver.proc = (Process *) rcvrp;
+ roflgs |= ERTS_TMR_ROFLG_PROC;
+ refc = 2;
+ break;
+
+ case ERTS_TMR_PORT:
+ ERTS_HLT_ASSERT(is_internal_port(rcvr));
+ erts_port_inc_refc((Port *) rcvrp);
+ tmr->head.receiver.port = (Port *) rcvrp;
+ roflgs |= ERTS_TMR_ROFLG_PORT;
+ refc = 2;
+ break;
+
+ case ERTS_TMR_CALLBACK:
+ roflgs |= ERTS_TMR_ROFLG_CALLBACK;
+ tmr->head.receiver.callback = callback;
+ tmr->head.u.arg = arg;
+ refc = 1;
+ break;
+
+ default:
+ ERTS_INTERNAL_ERROR("Unsupported timer type");
+ return NULL;
+ }
+
+ }
+ else { /* ERTS_TMR_BIF */
+
+ if (short_time) {
+ tmr = (ErtsHLTimer *) bif_timer_pre_alloc();
+ if (!tmr)
+ goto alloc_bif_timer;
+ roflgs |= ERTS_TMR_ROFLG_PRE_ALC;
+ }
+ else {
+ alloc_bif_timer:
+ tmr = (ErtsHLTimer *) erts_alloc(ERTS_ALC_T_BIF_TIMER,
+ sizeof(ErtsBifTimer));
+ }
+
+ tmr->timeout = timeout_pos;
+
+ roflgs |= ERTS_TMR_ROFLG_BIF_TMR;
+ if (is_internal_pid(rcvr)) {
+ roflgs |= ERTS_TMR_ROFLG_PROC;
+ tmr->head.receiver.proc = (Process *) rcvrp;
+ refc = 2;
+ }
+ else {
+ ERTS_HLT_ASSERT(is_atom(rcvr));
+ roflgs |= ERTS_TMR_ROFLG_REG_NAME;
+ tmr->head.receiver.name = rcvr;
+ refc = 1;
+ }
+
+ refc += init_btm_specifics(esdp,
+ (ErtsBifTimer *) tmr,
+ msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ mbin
+#else
+ refn
+#endif
+ );
+ }
+
+ tmr->head.roflgs = roflgs;
+ erts_atomic32_init_nob(&tmr->head.refc, refc);
+
+ if (!srv->next_timeout
+ || tmr->timeout < srv->next_timeout->timeout) {
+ if (srv->next_timeout)
+ erts_twheel_cancel_timer(esdp->timer_wheel,
+ &srv->service_timer);
+ erts_twheel_set_timer(esdp->timer_wheel,
+ &srv->service_timer,
+ hlt_service_timeout,
+ (void *) esdp,
+ tmr->timeout);
+ srv->next_timeout = tmr;
+ }
+
+ st_tmr = time_rbt_lookup_insert(&srv->time_tree, tmr);
+ tmr->time.tree.same_time = st_tmr;
+ if (st_tmr)
+ same_time_list_insert(&st_tmr->time.tree.same_time, tmr);
+
+#ifdef ERTS_HLT_HARD_DEBUG
+ tmr->pending_timeout = 0;
+#endif
+
+ ERTS_HLT_HDBG_CHK_SRV(srv);
+
+ return (ErtsTimer *) tmr;
+}
+
+static ERTS_INLINE void
+hlt_proc_timeout(ErtsHLTimer *tmr)
+{
+ if (proc_timeout_common(tmr->head.receiver.proc, (void *) tmr))
+ hl_timer_dec_refc(tmr, tmr->head.roflgs);
+}
+
+static ERTS_INLINE void
+hlt_port_timeout(ErtsHLTimer *tmr)
+{
+ if (port_timeout_common(tmr->head.receiver.port, (void *) tmr))
+ hl_timer_dec_refc(tmr, tmr->head.roflgs);
+}
+
+static void hlt_timeout(ErtsHLTimer *tmr, void *vsrv)
+{
+ ErtsHLTimerService *srv = (ErtsHLTimerService *) vsrv;
+ Uint32 roflgs;
+
+ ERTS_HLT_HDBG_CHK_SRV(srv);
+
+ roflgs = tmr->head.roflgs;
+ ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_HLT);
+
+ if (roflgs & ERTS_TMR_ROFLG_BIF_TMR)
+ bif_timer_timeout(srv, (ErtsBifTimer *) tmr, roflgs);
+ else if (roflgs & ERTS_TMR_ROFLG_PROC)
+ hlt_proc_timeout(tmr);
+ else if (roflgs & ERTS_TMR_ROFLG_PORT)
+ hlt_port_timeout(tmr);
+ else {
+ ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_CALLBACK);
+ (*tmr->head.receiver.callback)(tmr->head.u.arg);
+ }
+
+ tmr->time.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+
+ ERTS_HLT_HDBG_CHK_SRV(srv);
+
+ hl_timer_dec_refc(tmr, roflgs);
+}
+
+#ifdef ERTS_HLT_HARD_DEBUG
+static void
+set_pending_timeout(ErtsHLTimer *tmr, void *unused)
+{
+ tmr->pending_timeout = -1;
+}
+#endif
+
+static void
+hlt_service_timeout(void *vesdp)
+{
+ ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
+ ErtsHLTimerService *srv = esdp->timer_service;
+ ErtsHLTimer *tmr = srv->next_timeout;
+ int yield;
+
+ ERTS_HLT_HDBG_CHK_SRV(srv);
+
+ ERTS_HLT_ASSERT(esdp == erts_get_scheduler_data());
+
+ ERTS_HLT_ASSERT(!srv->yield.root || srv->yield.root == tmr);
+ ERTS_HLT_ASSERT(tmr);
+ ERTS_HLT_ASSERT(tmr->timeout <= erts_get_monotonic_time(esdp));
+
+ if (!srv->yield.root) {
+ ERTS_HLT_ASSERT(tmr->time.tree.parent
+ != ERTS_HLT_PFIELD_NOT_IN_TABLE);
+ time_rbt_delete(&srv->time_tree, tmr);
+ tmr->time.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+#ifdef ERTS_HLT_HARD_DEBUG
+ tmr->pending_timeout = 1;
+ if (tmr->time.tree.same_time)
+ same_time_list_foreach(tmr->time.tree.same_time, set_pending_timeout, NULL);
+#endif
+ }
+
+ if (!tmr->time.tree.same_time && !srv->yield.root)
+ yield = 0;
+ else {
+ yield = same_time_list_foreach_destroy_yielding(
+ &tmr->time.tree.same_time, hlt_timeout, (void *) srv,
+ &srv->yield.state, ERTS_TMR_TIMEOUT_YIELD_LIMIT);
+ }
+
+ if (yield)
+ srv->yield.root = tmr;
+ else {
+ srv->yield.root = NULL;
+ hlt_timeout(tmr, (void *) srv);
+
+ tmr = time_rbt_smallest(srv->time_tree);
+ srv->next_timeout = tmr;
+ }
+
+ ERTS_HLT_HDBG_CHK_SRV(srv);
+
+ if (tmr)
+ erts_twheel_set_timer(esdp->timer_wheel,
+ &srv->service_timer,
+ hlt_service_timeout,
+ vesdp,
+ tmr->timeout);
+}
+
+static void
+hlt_delete_timer(ErtsSchedulerData *esdp, ErtsHLTimer *tmr)
+{
+ ErtsHLTimerService *srv = esdp->timer_service;
+
+ ERTS_HLT_HDBG_CHK_SRV(srv);
+
+ if (tmr->time.tree.parent == ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ /* Already removed... */
+ ERTS_HLT_HDBG_CHK_SRV(srv);
+ return;
+ }
+
+ if (tmr->time.tree.parent & ERTS_HLT_PFLG_SAME_TIME) {
+ same_time_list_delete(tmr);
+ }
+ else if (tmr->time.tree.same_time) {
+ ErtsHLTimer *st_container;
+
+ ERTS_HLT_ASSERT((tmr->time.tree.parent & ERTS_HLT_PFLG_SAME_TIME) == 0);
+ st_container = tmr->time.tree.same_time->time.tree.u.l.prev;
+
+ ERTS_HLT_ASSERT(st_container);
+ ERTS_HLT_ASSERT(st_container->time.tree.parent
+ & ERTS_HLT_PFLG_SAME_TIME);
+ ERTS_HLT_ASSERT(tmr->timeout == st_container->timeout);
+
+ same_time_list_delete(st_container);
+ st_container->time.tree.same_time = tmr->time.tree.same_time;
+ same_time_list_new_root(&st_container->time.tree.same_time);
+
+ time_rbt_replace(&srv->time_tree, tmr, st_container);
+ ERTS_HLT_ASSERT((st_container->time.tree.parent
+ & ERTS_HLT_PFLG_SAME_TIME) == 0);
+
+ if (srv->next_timeout == tmr)
+ srv->next_timeout = st_container;
+ }
+ else {
+ ERTS_HLT_ASSERT((tmr->time.tree.parent & ERTS_HLT_PFLG_SAME_TIME) == 0);
+ time_rbt_delete(&srv->time_tree, tmr);
+ if (tmr == srv->next_timeout) {
+ ErtsHLTimer *smlst;
+ erts_twheel_cancel_timer(esdp->timer_wheel,
+ &srv->service_timer);
+ smlst = time_rbt_smallest(srv->time_tree);
+ srv->next_timeout = smlst;
+ if (smlst) {
+ ERTS_HLT_ASSERT(smlst->timeout > tmr->timeout);
+ erts_twheel_set_timer(esdp->timer_wheel,
+ &srv->service_timer,
+ hlt_service_timeout,
+ (void *) esdp,
+ smlst->timeout);
+ }
+ }
+ }
+ tmr->time.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+
+ hl_timer_dec_refc(tmr, tmr->head.roflgs);
+
+ ERTS_HLT_HDBG_CHK_SRV(srv);
+}
+
+/*
+ * Pass canceled timers back to originating scheduler
+ */
+
+static ERTS_INLINE void
+cleanup_sched_local_canceled_timer(ErtsSchedulerData *esdp,
+ ErtsTimer *tmr)
+{
+ Uint32 roflgs = tmr->head.roflgs;
+ ERTS_HLT_ASSERT(esdp == erts_get_scheduler_data());
+ ERTS_HLT_ASSERT((tmr->head.roflgs & ERTS_TMR_ROFLG_SID_MASK)
+ == (Uint32) esdp->no);
+
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ if (roflgs & ERTS_TMR_ROFLG_BIF_TMR) {
+ ErtsBifTimer *btm = (ErtsBifTimer *) tmr;
+ if (btm->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ btm_rbt_delete(&esdp->timer_service->btm_tree, btm);
+ btm->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ }
+ }
+#endif
+
+ if (roflgs & ERTS_TMR_ROFLG_HLT) {
+ hlt_delete_timer(esdp, &tmr->hlt);
+ hl_timer_dec_refc(&tmr->hlt, roflgs);
+ }
+ else {
+ cancel_tw_timer(esdp, &tmr->twt);
+ tw_timer_dec_refc(&tmr->twt);
+ }
+}
+
+
+static void
+init_canceled_queue(ErtsHLTCncldTmrQ *cq)
+{
+ erts_atomic_init_nob(&cq->tail.data.marker.u.next, ERTS_AINT_NULL);
+ erts_atomic_init_nob(&cq->tail.data.last,
+ (erts_aint_t) &cq->tail.data.marker);
+ cq->head.first = (ErtsTimer *) &cq->tail.data.marker;
+ cq->head.unref_end = (ErtsTimer *) &cq->tail.data.marker;
+ cq->head.next.thr_progress = erts_thr_progress_current();
+ cq->head.next.thr_progress_reached = 1;
+ cq->head.next.unref_end = (ErtsTimer *) &cq->tail.data.marker;
+ cq->head.used_marker = 1;
+}
+
+static ERTS_INLINE int
+cq_enqueue(ErtsHLTCncldTmrQ *cq, ErtsTimer *tmr, int cinit)
+{
+ erts_aint_t itmp;
+ ErtsTimer *enq, *this = tmr;
+
+ erts_atomic_init_nob(&this->head.u.next, ERTS_AINT_NULL);
+ /* Enqueue at end of list... */
+
+ enq = (ErtsTimer *) erts_atomic_read_nob(&cq->tail.data.last);
+ itmp = erts_atomic_cmpxchg_relb(&enq->head.u.next,
+ (erts_aint_t) this,
+ ERTS_AINT_NULL);
+ if (itmp == ERTS_AINT_NULL) {
+ /* We are required to move last pointer */
+#ifdef DEBUG
+ ASSERT(ERTS_AINT_NULL == erts_atomic_read_nob(&this->head.u.next));
+ ASSERT(((erts_aint_t) enq)
+ == erts_atomic_xchg_relb(&cq->tail.data.last,
+ (erts_aint_t) this));
+#else
+ erts_atomic_set_relb(&cq->tail.data.last, (erts_aint_t) this);
+#endif
+ return 1;
+ }
+ else {
+ /*
+ * We *need* to insert element somewhere in between the
+ * last element we read earlier and the actual last element.
+ */
+ int i = cinit;
+
+ while (1) {
+ erts_aint_t itmp2;
+ erts_atomic_set_nob(&this->head.u.next, itmp);
+ itmp2 = erts_atomic_cmpxchg_relb(&enq->head.u.next,
+ (erts_aint_t) this,
+ itmp);
+ if (itmp == itmp2)
+ return 0; /* inserted this */
+ if ((i & 1) == 0)
+ itmp = itmp2;
+ else {
+ enq = (ErtsTimer *) itmp2;
+ itmp = erts_atomic_read_acqb(&enq->head.u.next);
+ ASSERT(itmp != ERTS_AINT_NULL);
+ }
+ i++;
+ }
+ }
+}
+
+static ERTS_INLINE erts_aint_t
+check_insert_marker(ErtsHLTCncldTmrQ *cq, erts_aint_t ilast)
+{
+ if (!cq->head.used_marker
+ && cq->head.unref_end == (ErtsTimer *) ilast) {
+ erts_aint_t itmp;
+ ErtsTimer *last = (ErtsTimer *) ilast;
+
+ erts_atomic_init_nob(&cq->tail.data.marker.u.next, ERTS_AINT_NULL);
+ itmp = erts_atomic_cmpxchg_relb(&last->head.u.next,
+ (erts_aint_t) &cq->tail.data.marker,
+ ERTS_AINT_NULL);
+ if (itmp == ERTS_AINT_NULL) {
+ ilast = (erts_aint_t) &cq->tail.data.marker;
+ cq->head.used_marker = !0;
+ erts_atomic_set_relb(&cq->tail.data.last, ilast);
+ }
+ }
+ return ilast;
+}
+
+static ERTS_INLINE ErtsTimer *
+cq_dequeue(ErtsHLTCncldTmrQ *cq)
+{
+ ErtsTimer *tmr;
+
+ if (cq->head.first == cq->head.unref_end)
+ return NULL;
+
+ tmr = cq->head.first;
+ if (tmr == (ErtsTimer *) &cq->tail.data.marker) {
+ ASSERT(cq->head.used_marker);
+ cq->head.used_marker = 0;
+ tmr = (ErtsTimer *) erts_atomic_read_nob(&tmr->head.u.next);
+ if (tmr == cq->head.unref_end) {
+ cq->head.first = tmr;
+ return NULL;
+ }
+ }
+
+ cq->head.first = (ErtsTimer *) erts_atomic_read_nob(&tmr->head.u.next);
+
+ ASSERT(cq->head.first);
+
+ return tmr;
+}
+
+static int
+cq_check_incoming(ErtsSchedulerData *esdp, ErtsHLTCncldTmrQ *cq)
+{
+ erts_aint_t ilast = erts_atomic_read_nob(&cq->tail.data.last);
+ if (((ErtsTimer *) ilast) == (ErtsTimer *) &cq->tail.data.marker
+ && cq->head.first == (ErtsTimer *) &cq->tail.data.marker) {
+ /* Nothing more to do... */
+ return 0;
+ }
+
+ if (cq->head.next.thr_progress_reached
+ || erts_thr_progress_has_reached(cq->head.next.thr_progress)) {
+ cq->head.next.thr_progress_reached = 1;
+ /* Move unreferenced end pointer forward... */
+
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+
+ cq->head.unref_end = cq->head.next.unref_end;
+
+ ilast = check_insert_marker(cq, ilast);
+
+ if (cq->head.unref_end != (ErtsTimer *) ilast) {
+ cq->head.next.unref_end = (ErtsTimer *) ilast;
+ cq->head.next.thr_progress = erts_thr_progress_later(esdp);
+ cq->head.next.thr_progress_reached = 0;
+ }
+ }
+ return 1;
+}
+
+static ERTS_INLINE void
+store_earliest_thr_prgr(ErtsThrPrgrVal *prev_val, ErtsHLTCncldTmrQ *cq)
+{
+ if (!cq->head.next.thr_progress_reached
+ && (*prev_val == ERTS_THR_PRGR_INVALID
+ || erts_thr_progress_cmp(cq->head.next.thr_progress,
+ *prev_val) < 0)) {
+ *prev_val = cq->head.next.thr_progress;
+ }
+}
+
+static void
+handle_canceled_queue(ErtsSchedulerData *esdp,
+ ErtsHLTCncldTmrQ *cq,
+ int use_limit,
+ int ops_limit,
+ int *need_thr_progress,
+ ErtsThrPrgrVal *thr_prgr_p,
+ int *need_more_work)
+{
+ int need_thr_prgr = 0;
+ int need_mr_wrk = 0;
+ int have_checked_incoming = 0;
+ int ops = 0;
+
+ ERTS_HLT_ASSERT(cq == &esdp->timer_service->canceled_queue);
+
+ while (1) {
+ ErtsTimer *tmr = cq_dequeue(cq);
+
+ if (tmr)
+ cleanup_sched_local_canceled_timer(esdp, tmr);
+ else {
+ if (have_checked_incoming)
+ break;
+ need_thr_prgr = cq_check_incoming(esdp, cq);
+ if (need_thr_progress) {
+ *need_thr_progress |= need_thr_prgr;
+ if (need_thr_prgr)
+ store_earliest_thr_prgr(thr_prgr_p, cq);
+ }
+ have_checked_incoming = 1;
+ continue;
+ }
+
+ if (use_limit && ++ops >= ops_limit) {
+ if (cq->head.first != cq->head.unref_end) {
+ need_mr_wrk = 1;
+ if (need_more_work)
+ *need_more_work |= 1;
+ }
+ break;
+ }
+ }
+
+ if (need_thr_progress && !(need_thr_prgr | need_mr_wrk)) {
+ need_thr_prgr = cq_check_incoming(esdp, cq);
+ *need_thr_progress |= need_thr_prgr;
+ if (need_thr_prgr)
+ store_earliest_thr_prgr(thr_prgr_p, cq);
+ }
+}
+
+void
+erts_handle_canceled_timers(void *vesdp,
+ int *need_thr_progress,
+ ErtsThrPrgrVal *thr_prgr_p,
+ int *need_more_work)
+{
+ ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
+ ERTS_HLT_ASSERT(esdp == erts_get_scheduler_data());
+
+ handle_canceled_queue(esdp, &esdp->timer_service->canceled_queue,
+ 1, ERTS_TMR_CANCELED_TIMER_LIMIT,
+ need_thr_progress, thr_prgr_p,
+ need_more_work);
+}
+
+
+static void
+queue_canceled_timer(ErtsSchedulerData *esdp, int rsched_id, ErtsTimer *tmr)
+{
+ ErtsHLTCncldTmrQ *cq;
+ cq = &ERTS_SCHEDULER_IX(rsched_id-1)->timer_service->canceled_queue;
+ if (cq_enqueue(cq, tmr, rsched_id - (int) esdp->no))
+ erts_notify_canceled_timer(esdp, rsched_id);
+}
+
+static void
+continue_cancel_ptimer(ErtsSchedulerData *esdp, ErtsTimer *tmr)
+{
+ Uint32 sid = (tmr->head.roflgs & ERTS_TMR_ROFLG_SID_MASK);
+
+ if (esdp->no != sid)
+ queue_canceled_timer(esdp, sid, tmr);
+ else
+ cleanup_sched_local_canceled_timer(esdp, tmr);
+}
+
+/*
+ * BIF timer specific
+ */
+
+
+Uint erts_bif_timer_memory_size(void)
+{
+ return (Uint) 0;
+}
+
+static BIF_RETTYPE
+setup_bif_timer(Process *c_p, int twheel, ErtsMonotonicTime timeout_pos,
+ int short_time, Eterm rcvr, Eterm msg, int wrap)
+{
+ BIF_RETTYPE ret;
+ Eterm ref, tmo_msg, *hp;
+ ErtsBifTimer *tmr;
+ ErtsSchedulerData *esdp;
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ Binary *mbin;
+#endif
+ Eterm tmp_hp[4];
+ ErtsCreateTimerFunc create_timer;
+
+ if (is_not_internal_pid(rcvr) && is_not_atom(rcvr))
+ goto badarg;
+
+ esdp = erts_proc_sched_data(c_p);
+
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ mbin = erts_create_magic_indirection(bif_timer_ref_destructor);
+ hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
+ ref = erts_mk_magic_ref(&hp, &c_p->off_heap, mbin);
+ ASSERT(erts_get_ref_numbers_thr_id(((ErtsMagicBinary *)mbin)->refn)
+ == (Uint32) esdp->no);
+#else
+ hp = HAlloc(c_p, ERTS_REF_THING_SIZE);
+ ref = erts_sched_make_ref_in_buffer(esdp, hp);
+ ASSERT(erts_get_ref_numbers_thr_id(internal_ordinary_ref_numbers(ref))
+ == (Uint32) esdp->no);
+#endif
+
+ tmo_msg = wrap ? TUPLE3(tmp_hp, am_timeout, ref, msg) : msg;
+
+ create_timer = twheel ? create_tw_timer : create_hl_timer;
+ tmr = (ErtsBifTimer *) create_timer(esdp, timeout_pos,
+ short_time, ERTS_TMR_BIF,
+ NULL, rcvr, tmo_msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ (ErtsMagicBinary *) mbin,
+#else
+ internal_ordinary_ref_numbers(ref),
+#endif
+ NULL, NULL);
+
+ if (is_internal_pid(rcvr)) {
+ Process *proc = erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
+ rcvr, ERTS_PROC_LOCK_BTM,
+ ERTS_P2P_FLG_INC_REFC);
+ if (!proc) {
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ btm_clear_magic_binary(tmr);
+#else
+ if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ btm_rbt_delete(&esdp->timer_service->btm_tree, tmr);
+ tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ }
+#endif
+ if (tmr->btm.bp)
+ free_message_buffer(tmr->btm.bp);
+ if (twheel)
+ cancel_tw_timer(esdp, &tmr->type.twt);
+ else
+ hlt_delete_timer(esdp, &tmr->type.hlt);
+ timer_destroy((ErtsTimer *) tmr, twheel, 1);
+ }
+ else {
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ proc_btm_list_insert(&proc->bif_timers, tmr);
+#else
+ proc_btm_rbt_insert(&proc->bif_timers, tmr);
+#endif
+ erts_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
+ tmr->type.head.receiver.proc = proc;
+ }
+ }
+
+ ERTS_BIF_PREP_RET(ret, ref);
+ return ret;
+
+badarg:
+
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+ return ret;
+}
+
+static int
+cancel_bif_timer(ErtsBifTimer *tmr)
+{
+ erts_aint_t state;
+ Uint32 roflgs;
+ int res;
+
+ state = erts_atomic32_cmpxchg_acqb(&tmr->btm.state,
+ ERTS_TMR_STATE_CANCELED,
+ ERTS_TMR_STATE_ACTIVE);
+ if (state != ERTS_TMR_STATE_ACTIVE)
+ return 0;
+
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ btm_clear_magic_binary(tmr);
+#endif
+
+ if (tmr->btm.bp)
+ free_message_buffer(tmr->btm.bp);
+
+ res = -1;
+
+ roflgs = tmr->type.head.roflgs;
+ if (roflgs & ERTS_TMR_ROFLG_PROC) {
+ Process *proc;
+
+ proc = tmr->type.head.receiver.proc;
+ ERTS_HLT_ASSERT(!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_REG_NAME));
+
+ erts_proc_lock(proc, ERTS_PROC_LOCK_BTM);
+ /*
+ * If process is exiting, let it clean up
+ * the btm tree by itself (it may be in
+ * the middle of tree destruction).
+ */
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ if (!ERTS_PROC_IS_EXITING(proc) && tmr->btm.proc_list.next) {
+ proc_btm_list_delete(&proc->bif_timers, tmr);
+ res = 1;
+ }
+#else
+ if (!ERTS_PROC_IS_EXITING(proc)
+ && tmr->btm.proc_tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ proc_btm_rbt_delete(&proc->bif_timers, tmr);
+ tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ res = 1;
+ }
+#endif
+ erts_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
+ }
+
+ return res;
+}
+
+static ERTS_INLINE Sint64
+access_btm(ErtsBifTimer *tmr, Uint32 sid, ErtsSchedulerData *esdp, int cancel)
+{
+ int cncl_res;
+ Sint64 time_left;
+ ErtsMonotonicTime timeout;
+ int is_hlt;
+
+ if (!tmr)
+ return -1;
+
+ is_hlt = !!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_HLT);
+ timeout = (is_hlt
+ ? tmr->type.hlt.timeout
+ : erts_tweel_read_timeout(&tmr->type.twt.u.tw_tmr));
+
+ if (!cancel) {
+ erts_aint32_t state = erts_atomic32_read_acqb(&tmr->btm.state);
+ if (state == ERTS_TMR_STATE_ACTIVE)
+ return get_time_left(esdp, timeout);
+ return -1;
+ }
+
+ cncl_res = cancel_bif_timer(tmr);
+ if (!cncl_res)
+ return -1;
+
+ time_left = get_time_left(esdp, timeout);
+
+ if (sid != (Uint32) esdp->no) {
+ if (cncl_res > 0)
+ queue_canceled_timer(esdp, sid, (ErtsTimer *) tmr);
+ }
+ else {
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ btm_rbt_delete(&esdp->timer_service->btm_tree, tmr);
+ tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ }
+#endif
+ if (is_hlt) {
+ if (cncl_res > 0)
+ hl_timer_dec_refc(&tmr->type.hlt, tmr->type.hlt.head.roflgs);
+ hlt_delete_timer(esdp, &tmr->type.hlt);
+ }
+ else {
+ if (cncl_res > 0)
+ tw_timer_dec_refc(&tmr->type.twt);
+ cancel_tw_timer(esdp, &tmr->type.twt);
+ }
+ }
+
+ return time_left;
+}
+
+static ERTS_INLINE Eterm
+return_info(Process *c_p, Sint64 time_left)
+{
+ Uint hsz;
+ Eterm *hp;
+
+ if (time_left < 0)
+ return am_false;
+
+ if (time_left <= (Sint64) MAX_SMALL)
+ return make_small((Sint) time_left);
+
+ hsz = ERTS_SINT64_HEAP_SIZE(time_left);
+ hp = HAlloc(c_p, hsz);
+ return erts_sint64_to_big(time_left, &hp);
+}
+
+static ERTS_INLINE Eterm
+send_async_info(Process *proc, ErtsProcLocks initial_locks,
+ Eterm tref, int cancel, Sint64 time_left)
+{
+ ErtsProcLocks locks = initial_locks;
+ ErtsMessage *mp;
+ Eterm tag, res, msg, ref;
+ Uint hsz;
+ Eterm *hp;
+ ErlOffHeap *ohp;
+
+ hsz = 4;
+ hsz += NC_HEAP_SIZE(tref);
+
+ if (time_left > (Sint64) MAX_SMALL)
+ hsz += ERTS_SINT64_HEAP_SIZE(time_left);
+
+ mp = erts_alloc_message_heap(proc, &locks, hsz, &hp, &ohp);
+
+ if (cancel)
+ tag = am_cancel_timer;
+ else
+ tag = am_read_timer;
+
+ ref = STORE_NC(&hp, ohp, tref);
+
+ if (time_left < 0)
+ res = am_false;
+ else if (time_left <= (Sint64) MAX_SMALL)
+ res = make_small((Sint) time_left);
+ else
+ res = erts_sint64_to_big(time_left, &hp);
+
+ msg = TUPLE3(hp, tag, ref, res);
+
+ erts_queue_message(proc, locks, mp, msg, am_clock_service);
+
+ locks &= ~initial_locks;
+ if (locks)
+ erts_proc_unlock(proc, locks);
+
+ return am_ok;
+}
+
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+
+static BIF_RETTYPE
+access_bif_timer(Process *c_p, Eterm tref, int cancel, int async, int info)
+{
+ BIF_RETTYPE ret;
+ Eterm res;
+ Sint64 time_left;
+
+ if (!is_internal_magic_ref(tref)) {
+ if (is_not_ref(tref)) {
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+ return ret;
+ }
+ time_left = -1;
+ }
+ else {
+ ErtsMagicBinary *mbin;
+ mbin = (ErtsMagicBinary *) erts_magic_ref2bin(tref);
+ if (mbin->destructor != bif_timer_ref_destructor)
+ time_left = -1;
+ else {
+ ErtsBifTimer *tmr;
+ Uint32 sid;
+ tmr = magic_binary_to_btm(mbin);
+ sid = erts_get_ref_numbers_thr_id(internal_magic_ref_numbers(tref));
+ ASSERT(1 <= sid && sid <= erts_no_schedulers);
+ time_left = access_btm(tmr, sid, erts_proc_sched_data(c_p), cancel);
+ }
+ }
+
+ if (!info)
+ res = am_ok;
+ else if (!async)
+ res = return_info(c_p, time_left);
+ else
+ res = send_async_info(c_p, ERTS_PROC_LOCK_MAIN,
+ tref, cancel, time_left);
+
+ ERTS_BIF_PREP_RET(ret, res);
+
+ return ret;
+}
+
+#else /* !ERTS_MAGIC_REF_BIF_TIMERS */
+
+static ERTS_INLINE Eterm
+send_sync_info(Process *proc, ErtsProcLocks initial_locks,
+ Uint32 *refn, int cancel, Sint64 time_left)
+{
+ ErtsProcLocks locks = initial_locks;
+ ErtsMessage *mp;
+ Eterm res, msg, ref;
+ Uint hsz;
+ Eterm *hp;
+ ErlOffHeap *ohp;
+
+ hsz = 3 + ERTS_REF_THING_SIZE;
+
+ if (time_left > (Sint64) MAX_SMALL)
+ hsz += ERTS_SINT64_HEAP_SIZE(time_left);
+
+ mp = erts_alloc_message_heap(proc, &locks, hsz, &hp, &ohp);
+
+ write_ref_thing(hp, refn[0], refn[1], refn[2]);
+ ref = make_internal_ref(hp);
+ hp += ERTS_REF_THING_SIZE;
+
+ if (time_left < 0)
+ res = am_false;
+ else if (time_left <= (Sint64) MAX_SMALL)
+ res = make_small((Sint) time_left);
+ else
+ res = erts_sint64_to_big(time_left, &hp);
+
+ msg = TUPLE2(hp, ref, res);
+
+ erts_queue_message(proc, locks, mp, msg, am_clock_service);
+
+ locks &= ~initial_locks;
+ if (locks)
+ erts_proc_unlock(proc, locks);
+
+ return am_ok;
+}
+
+static ERTS_INLINE Eterm
+access_sched_local_btm(Process *c_p, Eterm pid,
+ Eterm tref, Uint32 *trefn,
+ Uint32 *rrefn,
+ int async, int cancel,
+ int return_res,
+ int info)
+{
+ ErtsSchedulerData *esdp;
+ ErtsHLTimerService *srv;
+ ErtsBifTimer *tmr;
+ Sint64 time_left;
+ Process *proc;
+ ErtsProcLocks proc_locks;
+
+ time_left = -1;
+
+ if (!c_p)
+ esdp = erts_get_scheduler_data();
+ else {
+ esdp = erts_proc_sched_data(c_p);
+ ERTS_HLT_ASSERT(esdp == erts_get_scheduler_data());
+ }
+
+ ERTS_HLT_ASSERT(erts_get_ref_numbers_thr_id(trefn)
+ == (Uint32) esdp->no);
+
+ srv = esdp->timer_service;
+
+ tmr = btm_rbt_lookup(srv->btm_tree, trefn);
+
+ time_left = access_btm(tmr, (Uint32) esdp->no, esdp, cancel);
+
+ if (!info)
+ return am_ok;
+
+ if (c_p) {
+ proc = c_p;
+ proc_locks = ERTS_PROC_LOCK_MAIN;
+ }
+ else {
+ proc = erts_proc_lookup(pid);
+ proc_locks = 0;
+ }
+
+ if (!async) {
+ if (c_p)
+ return return_info(c_p, time_left);
+
+ if (proc)
+ return send_sync_info(proc, proc_locks,
+ rrefn, cancel, time_left);
+ }
+ else if (proc) {
+ Eterm ref;
+ Eterm heap[ERTS_REF_THING_SIZE];
+ if (is_value(tref))
+ ref = tref;
+ else {
+ write_ref_thing(&heap[0], trefn[0], trefn[1], trefn[2]);
+ ref = make_internal_ref(&heap[0]);
+ }
+ return send_async_info(proc, proc_locks,
+ ref, cancel, time_left);
+ }
+
+ return am_ok;
+}
+
+#define ERTS_BTM_REQ_FLG_ASYNC (((Uint32) 1) << 0)
+#define ERTS_BTM_REQ_FLG_CANCEL (((Uint32) 1) << 1)
+#define ERTS_BTM_REQ_FLG_INFO (((Uint32) 1) << 2)
+
+typedef struct {
+ Eterm pid;
+ Uint32 trefn[ERTS_REF_NUMBERS];
+ Uint32 rrefn[ERTS_REF_NUMBERS];
+ Uint32 flags;
+} ErtsBifTimerRequest;
+
+static void
+bif_timer_access_request(void *vreq)
+{
+ ErtsBifTimerRequest *req = (ErtsBifTimerRequest *) vreq;
+ int async = (int) (req->flags & ERTS_BTM_REQ_FLG_ASYNC);
+ int cancel = (int) (req->flags & ERTS_BTM_REQ_FLG_CANCEL);
+ int info = (int) (req->flags & ERTS_BTM_REQ_FLG_INFO);
+ (void) access_sched_local_btm(NULL, req->pid, THE_NON_VALUE,
+ req->trefn, req->rrefn, async,
+ cancel, 0, info);
+ erts_free(ERTS_ALC_T_TIMER_REQUEST, vreq);
+}
+
+static int
+try_access_sched_remote_btm(ErtsSchedulerData *esdp,
+ Process *c_p, Uint32 sid,
+ Eterm tref, Uint32 *trefn,
+ int async, int cancel,
+ int info, Eterm *resp)
+{
+ ErtsBifTimer *tmr;
+ Sint64 time_left;
+
+ ERTS_HLT_ASSERT(c_p);
+
+ /*
+ * Check if the timer is aimed at current
+ * process...
+ */
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_BTM);
+ tmr = proc_btm_rbt_lookup(c_p->bif_timers, trefn);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_BTM);
+ if (!tmr)
+ return 0;
+
+ time_left = access_btm(tmr, sid, esdp, cancel);
+
+ if (!info)
+ *resp = am_ok;
+ else if (!async)
+ *resp = return_info(c_p, time_left);
+ else
+ *resp = send_async_info(c_p, ERTS_PROC_LOCK_MAIN,
+ tref, cancel, time_left);
+
+ return 1;
+}
+
+static Eterm
+no_timer_result(Process *c_p, Eterm tref, int cancel, int async, int info)
+{
+ ErtsMessage *mp;
+ Uint hsz;
+ Eterm *hp, msg, ref, tag;
+ ErlOffHeap *ohp;
+ ErtsProcLocks locks;
+
+ if (!async)
+ return am_false;
+ if (!info)
+ return am_ok;
+
+ hsz = 4;
+ hsz += NC_HEAP_SIZE(tref);
+ locks = ERTS_PROC_LOCK_MAIN;
+ mp = erts_alloc_message_heap(c_p, &locks, hsz, &hp, &ohp);
+ ref = STORE_NC(&hp, ohp, tref);
+ tag = cancel ? am_cancel_timer : am_read_timer;
+ msg = TUPLE3(hp, tag, ref, am_false);
+ erts_queue_message(c_p, locks, mp, msg, am_clock_service);
+ locks &= ~ERTS_PROC_LOCK_MAIN;
+ if (locks)
+ erts_proc_unlock(c_p, locks);
+ return am_ok;
+}
+
+static BIF_RETTYPE
+access_bif_timer(Process *c_p, Eterm tref, int cancel, int async, int info)
+{
+ BIF_RETTYPE ret;
+ ErtsSchedulerData *esdp;
+ Uint32 sid;
+ Uint32 *trefn;
+ Eterm res;
+
+ if (is_not_internal_ref(tref)) {
+ if (is_not_ref(tref))
+ goto badarg;
+ else
+ goto no_timer;
+ }
+
+ esdp = erts_proc_sched_data(c_p);
+
+ trefn = internal_ref_numbers(tref);
+ sid = erts_get_ref_numbers_thr_id(trefn);
+ if (sid < 1 || erts_no_schedulers < sid)
+ goto no_timer;
+
+ if (sid == (Uint32) esdp->no) {
+ res = access_sched_local_btm(c_p, c_p->common.id,
+ tref, trefn, NULL,
+ async, cancel, !async,
+ info);
+ ERTS_BIF_PREP_RET(ret, res);
+ }
+ else if (try_access_sched_remote_btm(esdp, c_p,
+ sid, tref, trefn,
+ async, cancel,
+ info, &res)) {
+ ERTS_BIF_PREP_RET(ret, res);
+ }
+ else {
+ /*
+ * Schedule access for execution on
+ * remote scheduler...
+ */
+ ErtsBifTimerRequest *req = erts_alloc(ERTS_ALC_T_TIMER_REQUEST,
+ sizeof(ErtsBifTimerRequest));
+
+ req->flags = 0;
+ if (cancel)
+ req->flags |= ERTS_BTM_REQ_FLG_CANCEL;
+ if (async)
+ req->flags |= ERTS_BTM_REQ_FLG_ASYNC;
+ if (info)
+ req->flags |= ERTS_BTM_REQ_FLG_INFO;
+
+ req->pid = c_p->common.id;
+
+ req->trefn[0] = trefn[0];
+ req->trefn[1] = trefn[1];
+ req->trefn[2] = trefn[2];
+
+ if (async)
+ ERTS_BIF_PREP_RET(ret, am_ok);
+ else {
+ Eterm *hp, rref;
+ Uint32 *rrefn;
+
+ hp = HAlloc(c_p, ERTS_REF_THING_SIZE);
+ rref = erts_sched_make_ref_in_buffer(esdp, hp);
+ rrefn = internal_ref_numbers(rref);
+
+ req->rrefn[0] = rrefn[0];
+ req->rrefn[1] = rrefn[1];
+ req->rrefn[2] = rrefn[2];
+
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+
+ if (ERTS_PROC_PENDING_EXIT(c_p))
+ ERTS_VBUMP_ALL_REDS(c_p);
+ else {
+ /*
+ * Caller needs to wait for a message containing
+ * the ref that we just created. No such message
+ * can exist in callers message queue at this time.
+ * We therefore move the save pointer of the
+ * callers message queue to the end of the queue.
+ *
+ * NOTE: It is of vital importance that the caller
+ * immediately do a receive unconditionaly
+ * waiting for the message with the reference;
+ * otherwise, next receive will *not* work
+ * as expected!
+ */
+ ERTS_MSGQ_MV_INQ2PRIVQ(c_p);
+ c_p->msg.save = c_p->msg.last;
+ }
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+
+ ERTS_BIF_PREP_TRAP1(ret, erts_await_result, c_p, rref);
+ }
+
+ erts_schedule_misc_aux_work(sid,
+ bif_timer_access_request,
+ (void *) req);
+ }
+
+ return ret;
+
+badarg:
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+ return ret;
+
+no_timer:
+ return no_timer_result(c_p, tref, cancel, async, info);
+}
+
+#endif /* !ERTS_MAGIC_REF_BIF_TIMERS */
+
+static ERTS_INLINE int
+bool_arg(Eterm val, int *argp)
+{
+ switch (val) {
+ case am_true: *argp = 1; return 1;
+ case am_false: *argp = 0; return 1;
+ default: return 0;
+ }
+}
+
+static ERTS_INLINE int
+parse_bif_timer_options(Eterm option_list, int *async,
+ int *info, int *abs)
+{
+ Eterm list = option_list;
+
+ if (async)
+ *async = 0;
+ if (info)
+ *info = 1;
+ if (abs)
+ *abs = 0;
+
+ while (is_list(list)) {
+ Eterm *consp, *tp, opt;
+
+ consp = list_val(list);
+ opt = CAR(consp);
+ if (is_not_tuple(opt))
+ return 0;
+
+ tp = tuple_val(opt);
+ if (arityval(tp[0]) != 2)
+ return 0;
+
+ switch (tp[1]) {
+ case am_async:
+ if (!async || !bool_arg(tp[2], async))
+ return 0;
+ break;
+ case am_info:
+ if (!info || !bool_arg(tp[2], info))
+ return 0;
+ break;
+ case am_abs:
+ if (!abs || !bool_arg(tp[2], abs))
+ return 0;
+ break;
+ default:
+ return 0;
+ }
+
+ list = CDR(consp);
+ }
+
+ if (is_not_nil(list))
+ return 0;
+ return 1;
+}
+
+static void
+exit_cancel_bif_timer(ErtsBifTimer *tmr, void *vesdp)
+{
+ ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
+ Uint32 sid, roflgs;
+ erts_aint_t state;
+ int is_hlt;
+
+ state = erts_atomic32_cmpxchg_acqb(&tmr->btm.state,
+ ERTS_TMR_STATE_CANCELED,
+ ERTS_TMR_STATE_ACTIVE);
+
+ roflgs = tmr->type.head.roflgs;
+ sid = roflgs & ERTS_TMR_ROFLG_SID_MASK;
+ is_hlt = !!(roflgs & ERTS_TMR_ROFLG_HLT);
+
+ ERTS_HLT_ASSERT(sid == erts_get_ref_numbers_thr_id(ERTS_BTM_HLT2REFN(tmr)));
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ERTS_HLT_ASSERT(tmr->btm.proc_list.next);
+#else
+ ERTS_HLT_ASSERT(tmr->btm.proc_tree.parent
+ != ERTS_HLT_PFIELD_NOT_IN_TABLE);
+ tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+#endif
+
+ if (state == ERTS_TMR_STATE_ACTIVE) {
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ btm_clear_magic_binary(tmr);
+#endif
+ if (tmr->btm.bp)
+ free_message_buffer(tmr->btm.bp);
+
+ if (sid != (Uint32) esdp->no) {
+ queue_canceled_timer(esdp, sid, (ErtsTimer *) tmr);
+ return;
+ }
+
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ btm_rbt_delete(&esdp->timer_service->btm_tree, tmr);
+ tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ }
+#endif
+ if (is_hlt)
+ hlt_delete_timer(esdp, &tmr->type.hlt);
+ else
+ cancel_tw_timer(esdp, &tmr->type.twt);
+ }
+ if (is_hlt)
+ hl_timer_dec_refc(&tmr->type.hlt, roflgs);
+ else
+ tw_timer_dec_refc(&tmr->type.twt);
+}
+
+#ifdef ERTS_HLT_DEBUG
+# define ERTS_BTM_MAX_DESTROY_LIMIT 2
+#else
+# define ERTS_BTM_MAX_DESTROY_LIMIT 50
+#endif
+
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+typedef struct {
+ ErtsBifTimers *bif_timers;
+ union {
+ proc_btm_rbt_yield_state_t proc_btm_yield_state;
+ } u;
+} ErtsBifTimerYieldState;
+#endif
+
+int erts_cancel_bif_timers(Process *p, ErtsBifTimers **btm, void **vyspp)
+{
+ ErtsSchedulerData *esdp = erts_proc_sched_data(p);
+
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+
+ return proc_btm_list_foreach_destroy_yielding(btm,
+ exit_cancel_bif_timer,
+ (void *) esdp,
+ ERTS_BTM_MAX_DESTROY_LIMIT);
+
+#else /* !ERTS_MAGIC_REF_BIF_TIMERS */
+
+ ErtsBifTimerYieldState ys = {*btm, {ERTS_RBT_YIELD_STAT_INITER}};
+ ErtsBifTimerYieldState *ysp;
+ int res;
+
+ ysp = (ErtsBifTimerYieldState *) *vyspp;
+ if (!ysp)
+ ysp = &ys;
+
+ res = proc_btm_rbt_foreach_destroy_yielding(&ysp->bif_timers,
+ exit_cancel_bif_timer,
+ (void *) esdp,
+ &ysp->u.proc_btm_yield_state,
+ ERTS_BTM_MAX_DESTROY_LIMIT);
+
+ if (res == 0) {
+ if (ysp != &ys)
+ erts_free(ERTS_ALC_T_BTM_YIELD_STATE, ysp);
+ *vyspp = NULL;
+ }
+ else {
+
+ if (ysp == &ys) {
+ ysp = erts_alloc(ERTS_ALC_T_BTM_YIELD_STATE,
+ sizeof(ErtsBifTimerYieldState));
+ sys_memcpy((void *) ysp, (void *) &ys,
+ sizeof(ErtsBifTimerYieldState));
+ }
+
+ *vyspp = (void *) ysp;
+ }
+
+ return res;
+
+#endif /* !ERTS_MAGIC_REF_BIF_TIMERS */
+}
+
+static ERTS_INLINE int
+parse_timeout_pos(ErtsSchedulerData *esdp, Eterm arg,
+ ErtsMonotonicTime *conv_arg, int abs,
+ ErtsMonotonicTime *tposp, int *stimep,
+ ErtsMonotonicTime *msp)
+{
+ ErtsMonotonicTime t, now;
+
+ if (!term_to_Sint64(arg, &t)) {
+ ERTS_HLT_ASSERT(!is_small(arg));
+ if (!is_big(arg))
+ return -1;
+
+ if (abs || !big_sign(arg))
+ return 1;
+
+ return -1;
+ }
+
+ if (conv_arg)
+ *conv_arg = t;
+
+ now = erts_get_monotonic_time(esdp);
+
+ if (abs) {
+ t += -1*ERTS_MONOTONIC_OFFSET_MSEC; /* external to internal */
+ if (t < ERTS_MONOTONIC_TO_MSEC(ERTS_MONOTONIC_BEGIN))
+ return 1;
+ if (t > ERTS_MONOTONIC_TO_MSEC(ERTS_MONOTONIC_END))
+ return 1;
+ if (msp)
+ *msp = t - ERTS_MONOTONIC_TO_MSEC(now);
+
+ *stimep = (t - ERTS_MONOTONIC_TO_MSEC(esdp->last_monotonic_time)
+ < ERTS_BIF_TIMER_SHORT_TIME);
+ *tposp = ERTS_MSEC_TO_CLKTCKS(t);
+ }
+ else {
+ ErtsMonotonicTime ticks;
+
+ if (t < 0)
+ return -1;
+
+ if (msp)
+ *msp = t;
+
+ ticks = ERTS_MSEC_TO_CLKTCKS(t);
+
+ if (ERTS_CLKTCK_RESOLUTION > 1000 && ticks < 0)
+ return 1;
+
+ ERTS_HLT_ASSERT(ticks >= 0);
+
+ ticks += ERTS_MONOTONIC_TO_CLKTCKS(now-1);
+ ticks += 1;
+
+ if (ticks < ERTS_MONOTONIC_TO_CLKTCKS(ERTS_MONOTONIC_BEGIN))
+ return 1;
+ if (ticks > ERTS_MONOTONIC_TO_CLKTCKS(ERTS_MONOTONIC_END))
+ return 1;
+
+ *stimep = (t < ERTS_BIF_TIMER_SHORT_TIME);
+ *tposp = ticks;
+ }
+
+ return 0;
+}
+
+/*
+ *
+ * The BIF timer BIFs...
+ */
+
+BIF_RETTYPE send_after_3(BIF_ALIST_3)
+{
+ ErtsMonotonicTime timeout_pos, tmo;
+ int short_time, tres;
+
+ tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1,
+ NULL, 0, &timeout_pos, &short_time, &tmo);
+ if (tres != 0)
+ BIF_ERROR(BIF_P, BADARG);
+
+ return setup_bif_timer(BIF_P, tmo < ERTS_TIMER_WHEEL_MSEC,
+ timeout_pos, short_time, BIF_ARG_2,
+ BIF_ARG_3, 0);
+}
+
+BIF_RETTYPE send_after_4(BIF_ALIST_4)
+{
+ ErtsMonotonicTime timeout_pos, tmo;
+ int short_time, abs, tres;
+
+ if (!parse_bif_timer_options(BIF_ARG_4, NULL, NULL, &abs))
+ BIF_ERROR(BIF_P, BADARG);
+
+ tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL,
+ abs, &timeout_pos, &short_time, &tmo);
+ if (tres != 0)
+ BIF_ERROR(BIF_P, BADARG);
+
+ return setup_bif_timer(BIF_P, tmo < ERTS_TIMER_WHEEL_MSEC,
+ timeout_pos, short_time, BIF_ARG_2,
+ BIF_ARG_3, 0);
+}
+
+BIF_RETTYPE start_timer_3(BIF_ALIST_3)
+{
+ ErtsMonotonicTime timeout_pos, tmo;
+ int short_time, tres;
+
+ tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL,
+ 0, &timeout_pos, &short_time, &tmo);
+ if (tres != 0)
+ BIF_ERROR(BIF_P, BADARG);
+
+ return setup_bif_timer(BIF_P, tmo < ERTS_TIMER_WHEEL_MSEC,
+ timeout_pos, short_time, BIF_ARG_2,
+ BIF_ARG_3, !0);
+}
+
+BIF_RETTYPE start_timer_4(BIF_ALIST_4)
+{
+ ErtsMonotonicTime timeout_pos, tmo;
+ int short_time, abs, tres;
+
+ if (!parse_bif_timer_options(BIF_ARG_4, NULL, NULL, &abs))
+ BIF_ERROR(BIF_P, BADARG);
+
+ tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL,
+ abs, &timeout_pos, &short_time, &tmo);
+ if (tres != 0)
+ BIF_ERROR(BIF_P, BADARG);
+
+ return setup_bif_timer(BIF_P, tmo < ERTS_TIMER_WHEEL_MSEC,
+ timeout_pos, short_time, BIF_ARG_2,
+ BIF_ARG_3, !0);
+}
+
+BIF_RETTYPE cancel_timer_1(BIF_ALIST_1)
+{
+ return access_bif_timer(BIF_P, BIF_ARG_1, 1, 0, 1);
+}
+
+BIF_RETTYPE cancel_timer_2(BIF_ALIST_2)
+{
+ BIF_RETTYPE ret;
+ int async, info;
+
+ if (parse_bif_timer_options(BIF_ARG_2, &async, &info, NULL))
+ return access_bif_timer(BIF_P, BIF_ARG_1, 1, async, info);
+
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ return ret;
+}
+
+BIF_RETTYPE read_timer_1(BIF_ALIST_1)
+{
+ return access_bif_timer(BIF_P, BIF_ARG_1, 0, 0, 1);
+}
+
+BIF_RETTYPE read_timer_2(BIF_ALIST_2)
+{
+ BIF_RETTYPE ret;
+ int async;
+
+ if (parse_bif_timer_options(BIF_ARG_2, &async, NULL, NULL))
+ return access_bif_timer(BIF_P, BIF_ARG_1, 0, async, 1);
+
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ return ret;
+}
+
+static void
+start_callback_timer(ErtsSchedulerData *esdp,
+ int twt,
+ ErtsMonotonicTime timeout_pos,
+ void (*callback)(void *),
+ void *arg)
+
+{
+ ErtsCreateTimerFunc create_timer = (twt
+ ? create_tw_timer
+ : create_hl_timer);
+ (void) create_timer(esdp, timeout_pos, 0,
+ ERTS_TMR_CALLBACK, NULL,
+ NIL, THE_NON_VALUE, NULL,
+ callback, arg);
+}
+
+typedef struct {
+ int twt;
+ ErtsMonotonicTime timeout_pos;
+ void (*callback)(void *);
+ void *arg;
+} ErtsStartCallbackTimerRequest;
+
+static void
+scheduled_start_callback_timer(void *vsctr)
+{
+ ErtsStartCallbackTimerRequest *sctr
+ = (ErtsStartCallbackTimerRequest *) vsctr;
+
+ start_callback_timer(erts_get_scheduler_data(),
+ sctr->twt,
+ sctr->timeout_pos,
+ sctr->callback,
+ sctr->arg);
+
+ erts_free(ERTS_ALC_T_TIMER_REQUEST, vsctr);
+}
+
+void
+erts_start_timer_callback(ErtsMonotonicTime tmo,
+ void (*callback)(void *),
+ void *arg)
+{
+ ErtsSchedulerData *esdp;
+ ErtsMonotonicTime timeout_pos;
+ int twt;
+
+ esdp = erts_get_scheduler_data();
+ timeout_pos = get_timeout_pos(erts_get_monotonic_time(esdp),
+ tmo);
+ twt = tmo < ERTS_TIMER_WHEEL_MSEC;
+
+ if (esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp))
+ start_callback_timer(esdp,
+ twt,
+ timeout_pos,
+ callback,
+ arg);
+ else {
+ ErtsStartCallbackTimerRequest *sctr;
+ sctr = erts_alloc(ERTS_ALC_T_TIMER_REQUEST,
+ sizeof(ErtsStartCallbackTimerRequest));
+ sctr->twt = twt;
+ sctr->timeout_pos = timeout_pos;
+ sctr->callback = callback;
+ sctr->arg = arg;
+ erts_schedule_misc_aux_work(1,
+ scheduled_start_callback_timer,
+ (void *) sctr);
+ }
+}
+
+/*
+ * Process and Port timer functionality.
+ *
+ * NOTE! These are only allowed to be called by a
+ * scheduler thread that currently is
+ * executing the process or port.
+ */
+
+static ERTS_INLINE void
+set_proc_timer_common(Process *c_p, ErtsSchedulerData *esdp, Sint64 tmo,
+ ErtsMonotonicTime timeout_pos, int short_time)
+{
+ void *tmr;
+ check_canceled_queue(esdp, esdp->timer_service);
+
+ if (tmo == 0)
+ c_p->flags |= F_TIMO;
+ else {
+ ErtsCreateTimerFunc create_timer;
+
+ c_p->flags |= F_INSLPQUEUE;
+ c_p->flags &= ~F_TIMO;
+
+ create_timer = (tmo < ERTS_TIMER_WHEEL_MSEC
+ ? create_tw_timer
+ : create_hl_timer);
+ tmr = (void *) create_timer(esdp, timeout_pos, short_time,
+ ERTS_TMR_PROC, (void *) c_p,
+ c_p->common.id, THE_NON_VALUE,
+ NULL, NULL, NULL);
+ erts_atomic_set_relb(&c_p->common.timer, (erts_aint_t) tmr);
+ }
+}
+
+int
+erts_set_proc_timer_term(Process *c_p, Eterm etmo)
+{
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
+ ErtsMonotonicTime tmo, timeout_pos;
+ int short_time, tres;
+
+ ERTS_HLT_ASSERT(erts_atomic_read_nob(&c_p->common.timer)
+ == ERTS_PTMR_NONE);
+
+ tres = parse_timeout_pos(esdp, etmo, &tmo, 0,
+ &timeout_pos, &short_time, NULL);
+ if (tres != 0)
+ return tres;
+
+ if ((tmo >> 32) != 0)
+ return 1;
+
+ set_proc_timer_common(c_p, esdp, tmo, timeout_pos, short_time);
+ return 0;
+}
+
+void
+erts_set_proc_timer_uword(Process *c_p, UWord tmo)
+{
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
+
+ ERTS_HLT_ASSERT(erts_atomic_read_nob(&c_p->common.timer)
+ == ERTS_PTMR_NONE);
+
+#ifndef ARCH_32
+ ERTS_HLT_ASSERT((tmo >> 32) == (UWord) 0);
+#endif
+
+ if (tmo == 0)
+ c_p->flags |= F_TIMO;
+ else {
+ ErtsMonotonicTime timeout_pos;
+ timeout_pos = get_timeout_pos(erts_get_monotonic_time(esdp),
+ (ErtsMonotonicTime) tmo);
+ set_proc_timer_common(c_p, esdp, (ErtsMonotonicTime) tmo,
+ timeout_pos,
+ tmo < ERTS_BIF_TIMER_SHORT_TIME);
+ }
+}
+
+void
+erts_cancel_proc_timer(Process *c_p)
+{
+ erts_aint_t tval;
+ tval = erts_atomic_xchg_acqb(&c_p->common.timer,
+ ERTS_PTMR_NONE);
+ c_p->flags &= ~(F_INSLPQUEUE|F_TIMO);
+ if (tval == ERTS_PTMR_NONE)
+ return;
+ if (tval == ERTS_PTMR_TIMEDOUT) {
+ erts_atomic_set_nob(&c_p->common.timer, ERTS_PTMR_NONE);
+ return;
+ }
+ continue_cancel_ptimer(erts_proc_sched_data(c_p),
+ (ErtsTimer *) tval);
+}
+
+void
+erts_set_port_timer(Port *c_prt, Sint64 tmo)
+{
+ void *tmr;
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ErtsMonotonicTime timeout_pos;
+ ErtsCreateTimerFunc create_timer;
+
+ if (erts_atomic_read_nob(&c_prt->common.timer) != ERTS_PTMR_NONE)
+ erts_cancel_port_timer(c_prt);
+
+ check_canceled_queue(esdp, esdp->timer_service);
+
+ timeout_pos = get_timeout_pos(erts_get_monotonic_time(esdp), tmo);
+
+ create_timer = (tmo < ERTS_TIMER_WHEEL_MSEC
+ ? create_tw_timer
+ : create_hl_timer);
+ tmr = (void *) create_timer(esdp, timeout_pos, 0, ERTS_TMR_PORT,
+ (void *) c_prt, c_prt->common.id,
+ THE_NON_VALUE, NULL, NULL, NULL);
+ erts_atomic_set_relb(&c_prt->common.timer, (erts_aint_t) tmr);
+}
+
+void
+erts_cancel_port_timer(Port *c_prt)
+{
+ erts_aint_t tval;
+ tval = erts_atomic_xchg_acqb(&c_prt->common.timer,
+ ERTS_PTMR_NONE);
+ if (tval == ERTS_PTMR_NONE)
+ return;
+ if (tval == ERTS_PTMR_TIMEDOUT) {
+ while (!erts_port_task_is_scheduled(&c_prt->timeout_task))
+ erts_thr_yield();
+ erts_port_task_abort(&c_prt->timeout_task);
+ erts_atomic_set_nob(&c_prt->common.timer, ERTS_PTMR_NONE);
+ return;
+ }
+ continue_cancel_ptimer(erts_get_scheduler_data(),
+ (ErtsTimer *) tval);
+}
+
+Sint64
+erts_read_port_timer(Port *c_prt)
+{
+ ErtsTimer *tmr;
+ erts_aint_t itmr;
+ ErtsMonotonicTime timeout_pos;
+
+ itmr = erts_atomic_read_acqb(&c_prt->common.timer);
+ if (itmr == ERTS_PTMR_NONE)
+ return (Sint64) -1;
+ if (itmr == ERTS_PTMR_TIMEDOUT)
+ return (Sint64) 0;
+ tmr = (ErtsTimer *) itmr;
+ if (tmr->head.roflgs & ERTS_TMR_ROFLG_HLT)
+ timeout_pos = tmr->hlt.timeout;
+ else
+ timeout_pos = erts_tweel_read_timeout(&tmr->twt.u.tw_tmr);
+ return get_time_left(NULL, timeout_pos);
+}
+
+/*
+ * Debug stuff...
+ */
+
+typedef struct {
+ fmtfn_t to;
+ void *to_arg;
+ ErtsMonotonicTime now;
+} ErtsBTMPrint;
+
+static void
+btm_print(ErtsBifTimer *tmr, void *vbtmp, ErtsMonotonicTime tpos, int is_hlt)
+{
+ ErtsBTMPrint *btmp = (ErtsBTMPrint *) vbtmp;
+ ErtsMonotonicTime left;
+ Eterm receiver;
+
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ if (!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_BIF_TMR))
+ return;
+#endif
+
+ if (is_hlt) {
+ ERTS_HLT_ASSERT(tmr->type.head.roflgs & ERTS_TMR_ROFLG_HLT);
+ if (tmr->type.hlt.timeout <= btmp->now)
+ left = 0;
+ else
+ left = ERTS_CLKTCKS_TO_MSEC(tmr->type.hlt.timeout - btmp->now);
+ }
+ else {
+ ERTS_HLT_ASSERT(!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_HLT));
+ if (tpos <= btmp->now)
+ left = 0;
+ else
+ left = ERTS_CLKTCKS_TO_MSEC(tpos - btmp->now);
+ }
+
+ receiver = ((tmr->type.head.roflgs & ERTS_TMR_ROFLG_REG_NAME)
+ ? tmr->type.head.receiver.name
+ : tmr->type.head.receiver.proc->common.id);
+
+ erts_print(btmp->to, btmp->to_arg,
+ "=timer:%T\n"
+ "Message: %T\n"
+ "Time left: %b64d\n",
+ receiver,
+ tmr->btm.message,
+ (Sint64) left);
+}
+
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+
+static void
+hlt_btm_print(ErtsHLTimer *tmr, void *vbtmp)
+{
+ btm_print((ErtsBifTimer *) tmr, vbtmp, 0, 1);
+}
+
+static void
+twt_btm_print(void *vbtmp, ErtsMonotonicTime tpos, void *vtwtp)
+{
+ btm_print((ErtsBifTimer *) vtwtp, vbtmp, tpos, 0);
+}
+
+#else
+
+static void
+btm_tree_print(ErtsBifTimer *tmr, void *vbtmp)
+{
+ int is_hlt = !!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_HLT);
+ ErtsMonotonicTime tpos;
+ if (is_hlt)
+ tpos = 0;
+ else
+ tpos = erts_tweel_read_timeout(&tmr->type.twt.u.tw_tmr);
+ btm_print(tmr, vbtmp, tpos, is_hlt);
+}
+
+#endif
+
+void
+erts_print_bif_timer_info(fmtfn_t to, void *to_arg)
+{
+ ErtsBTMPrint btmp;
+ int six;
+
+ if (!ERTS_IS_CRASH_DUMPING)
+ ERTS_INTERNAL_ERROR("Not crash dumping");
+
+ btmp.to = to;
+ btmp.to_arg = to_arg;
+ btmp.now = erts_get_monotonic_time(NULL);
+ btmp.now = ERTS_MONOTONIC_TO_CLKTCKS(btmp.now);
+
+ for (six = 0; six < erts_no_schedulers; six++) {
+ ErtsHLTimerService *srv =
+ erts_aligned_scheduler_data[six].esd.timer_service;
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsTimerWheel *twheel =
+ erts_aligned_scheduler_data[six].esd.timer_wheel;
+ erts_twheel_debug_foreach(twheel, tw_bif_timer_timeout,
+ twt_btm_print, (void *) &btmp);
+ time_rbt_foreach(srv->time_tree, hlt_btm_print, (void *) &btmp);
+#else
+ btm_rbt_foreach(srv->btm_tree, btm_tree_print, (void *) &btmp);
+#endif
+ }
+}
+
+typedef struct {
+ void (*func)(Eterm,
+ Eterm,
+ ErlHeapFragment *,
+ void *);
+ void *arg;
+} ErtsBTMForeachDebug;
+
+static void
+debug_btm_foreach(ErtsBifTimer *tmr, void *vbtmfd)
+{
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ if (!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_BIF_TMR))
+ return;
+#endif
+ if (erts_atomic32_read_nob(&tmr->btm.state) == ERTS_TMR_STATE_ACTIVE) {
+ ErtsBTMForeachDebug *btmfd = (ErtsBTMForeachDebug *) vbtmfd;
+ Eterm id = ((tmr->type.head.roflgs & ERTS_TMR_ROFLG_REG_NAME)
+ ? tmr->type.head.receiver.name
+ : tmr->type.head.receiver.proc->common.id);
+ (*btmfd->func)(id, tmr->btm.message, tmr->btm.bp, btmfd->arg);
+ }
+}
+
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+
+static void
+hlt_debug_btm_foreach(ErtsHLTimer *tmr, void *vbtmfd)
+{
+ debug_btm_foreach((ErtsBifTimer *) tmr, vbtmfd);
+}
+
+static void
+twt_debug_btm_foreach(void *vbtmfd, ErtsMonotonicTime tpos, void *vtwtp)
+{
+ debug_btm_foreach((ErtsBifTimer *) vtwtp, vbtmfd);
+}
+
+#endif
+
+void
+erts_debug_bif_timer_foreach(void (*func)(Eterm,
+ Eterm,
+ ErlHeapFragment *,
+ void *),
+ void *arg)
+{
+ ErtsBTMForeachDebug btmfd;
+ int six;
+
+ btmfd.func = func;
+ btmfd.arg = arg;
+
+ if (!erts_thr_progress_is_blocking())
+ ERTS_INTERNAL_ERROR("Not blocking thread progress");
+
+ for (six = 0; six < erts_no_schedulers; six++) {
+ ErtsHLTimerService *srv =
+ erts_aligned_scheduler_data[six].esd.timer_service;
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsTimerWheel *twheel =
+ erts_aligned_scheduler_data[six].esd.timer_wheel;
+ erts_twheel_debug_foreach(twheel, tw_bif_timer_timeout,
+ twt_debug_btm_foreach,
+ (void *) &btmfd);
+ time_rbt_foreach(srv->time_tree,
+ hlt_debug_btm_foreach,
+ (void *) &btmfd);
+#else
+ btm_rbt_foreach(srv->btm_tree,
+ debug_btm_foreach,
+ (void *) &btmfd);
+#endif
+ }
+}
+
+typedef struct {
+ void (*tclbk)(void *);
+ void (*func)(void *,
+ ErtsMonotonicTime,
+ void *);
+ void *arg;
+} ErtsDebugForeachCallbackTimer;
+
+static void
+debug_callback_timer_foreach_list(ErtsHLTimer *tmr, void *vdfct)
+{
+ ErtsDebugForeachCallbackTimer *dfct
+ = (ErtsDebugForeachCallbackTimer *) vdfct;
+
+ if ((tmr->head.roflgs & ERTS_TMR_ROFLG_CALLBACK)
+ && (tmr->head.receiver.callback == dfct->tclbk))
+ (*dfct->func)(dfct->arg,
+ tmr->timeout,
+ tmr->head.u.arg);
+}
+
+static void
+debug_callback_timer_foreach(ErtsHLTimer *tmr, void *vdfct)
+{
+ ErtsDebugForeachCallbackTimer *dfct
+ = (ErtsDebugForeachCallbackTimer *) vdfct;
+
+ if (tmr->time.tree.same_time)
+ same_time_list_foreach(tmr->time.tree.same_time,
+ debug_callback_timer_foreach_list,
+ vdfct);
+
+ if ((tmr->head.roflgs & ERTS_TMR_ROFLG_CALLBACK)
+ && (tmr->head.receiver.callback == dfct->tclbk))
+ (*dfct->func)(dfct->arg,
+ tmr->timeout,
+ tmr->head.u.arg);
+}
+
+static void
+debug_tw_callback_timer(void *vdfct,
+ ErtsMonotonicTime timeout_pos,
+ void *vtwtp)
+{
+ ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp;
+ ErtsDebugForeachCallbackTimer *dfct
+ = (ErtsDebugForeachCallbackTimer *) vdfct;
+
+ if (twtp->head.receiver.callback == dfct->tclbk)
+ (*dfct->func)(dfct->arg,
+ timeout_pos,
+ twtp->head.u.arg);
+}
+
+void
+erts_debug_callback_timer_foreach(void (*tclbk)(void *),
+ void (*func)(void *,
+ ErtsMonotonicTime,
+ void *),
+ void *arg)
+{
+ int six;
+ ErtsDebugForeachCallbackTimer dfct;
+
+ dfct.tclbk = tclbk;
+ dfct.func = func;
+ dfct.arg = arg;
+
+ if (!erts_thr_progress_is_blocking())
+ ERTS_INTERNAL_ERROR("Not blocking thread progress");
+
+ for (six = 0; six < erts_no_schedulers; six++) {
+ ErtsHLTimerService *srv =
+ erts_aligned_scheduler_data[six].esd.timer_service;
+ ErtsTimerWheel *twheel =
+ erts_aligned_scheduler_data[six].esd.timer_wheel;
+
+ erts_twheel_debug_foreach(twheel,
+ tw_callback_timeout,
+ debug_tw_callback_timer,
+ (void *) &dfct);
+
+ if (srv->yield.root)
+ debug_callback_timer_foreach(srv->yield.root,
+ (void *) &dfct);
+
+ time_rbt_foreach(srv->time_tree,
+ debug_callback_timer_foreach,
+ (void *) &dfct);
+ }
+}
+
+#ifdef ERTS_HLT_HARD_DEBUG
+
+typedef struct {
+ ErtsHLTimerService *srv;
+ int found_root;
+ ErtsHLTimer **rootpp;
+} ErtsHdbgHLT;
+
+static void
+st_hdbg_func(ErtsHLTimer *tmr, void *vhdbg)
+{
+ ErtsHdbgHLT *hdbg = (ErtsHdbgHLT *) vhdbg;
+ ErtsHLTimer **rootpp;
+ ERTS_HLT_ASSERT(tmr->time.tree.parent & ERTS_HLT_PFLG_SAME_TIME);
+ if (tmr->time.tree.parent == ERTS_HLT_PFLG_SAME_TIME) {
+ ERTS_HLT_ASSERT(tmr != *hdbg->rootpp);
+ }
+ else {
+ rootpp = (ErtsHLTimer **) (tmr->time.tree.parent
+ & ~ERTS_HLT_PFLG_SAME_TIME);
+ ERTS_HLT_ASSERT(rootpp == hdbg->rootpp);
+ ERTS_HLT_ASSERT(tmr == *rootpp);
+ ERTS_HLT_ASSERT(!hdbg->found_root);
+ hdbg->found_root = 1;
+ }
+ ERTS_HLT_ASSERT(tmr->time.tree.u.l.next->time.tree.u.l.prev == tmr);
+ ERTS_HLT_ASSERT(tmr->time.tree.u.l.prev->time.tree.u.l.next == tmr);
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ ERTS_HLT_ASSERT(btm_rbt_lookup(hdbg->srv->btm_tree, ERTS_BTM_HLT2REFN(tmr)) == tmr);
+#endif
+}
+
+static void
+tt_hdbg_func(ErtsHLTimer *tmr, void *vhdbg)
+{
+ ErtsHdbgHLT *hdbg = (ErtsHdbgHLT *) vhdbg;
+ ErtsHLTimer *prnt;
+ ERTS_HLT_ASSERT((tmr->time.tree.parent & ERTS_HLT_PFLG_SAME_TIME) == 0);
+ prnt = (ErtsHLTimer *) (tmr->time.tree.parent & ~ERTS_HLT_PFLGS_MASK);
+ if (prnt) {
+ ERTS_HLT_ASSERT(prnt->time.tree.u.t.left == tmr
+ || prnt->time.tree.u.t.right == tmr);
+ }
+ else {
+ ERTS_HLT_ASSERT(!hdbg->found_root);
+ hdbg->found_root = 1;
+ ERTS_HLT_ASSERT(tmr == *hdbg->rootpp);
+ }
+ if (tmr->time.tree.u.t.left) {
+ prnt = (ErtsHLTimer *) (tmr->time.tree.u.t.left->time.tree.parent
+ & ~ERTS_HLT_PFLGS_MASK);
+ ERTS_HLT_ASSERT(tmr == prnt);
+ }
+ if (tmr->time.tree.u.t.right) {
+ prnt = (ErtsHLTimer *) (tmr->time.tree.u.t.right->time.tree.parent
+ & ~ERTS_HLT_PFLGS_MASK);
+ ERTS_HLT_ASSERT(tmr == prnt);
+ }
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ if (tmr->head.roflgs & ERTS_TMR_ROFLG_BIF_TMR)
+ ERTS_HLT_ASSERT(btm_rbt_lookup(hdbg->srv->btm_tree, ERTS_BTM_HLT2REFN(tmr)) == tmr);
+#endif
+ if (tmr->time.tree.same_time) {
+ ErtsHdbgHLT st_hdbg;
+ st_hdbg.srv = hdbg->srv;
+ st_hdbg.found_root = 0;
+ st_hdbg.rootpp = &tmr->time.tree.same_time;
+ same_time_list_foreach(tmr->time.tree.same_time, st_hdbg_func, (void *) &st_hdbg);
+ ERTS_HLT_ASSERT(st_hdbg.found_root);
+ }
+}
+
+static void
+bt_hdbg_func(ErtsHLTimer *tmr, void *vhdbg)
+{
+ ErtsHdbgHLT *hdbg = (ErtsHdbgHLT *) vhdbg;
+ ErtsHLTimer *prnt;
+ ERTS_HLT_ASSERT((tmr->btm.tree.parent & ERTS_HLT_PFLG_SAME_TIME) == 0);
+ prnt = (ErtsHLTimer *) (tmr->btm.tree.parent & ~ERTS_HLT_PFLGS_MASK);
+ if (prnt) {
+ ERTS_HLT_ASSERT(prnt->btm.tree.left == tmr
+ || prnt->btm.tree.right == tmr);
+ }
+ else {
+ ERTS_HLT_ASSERT(!hdbg->found_root);
+ hdbg->found_root = 1;
+ ERTS_HLT_ASSERT(tmr == *hdbg->rootpp);
+ }
+ if (tmr->btm.tree.left) {
+ prnt = (ErtsHLTimer *) (tmr->btm.tree.left->btm.tree.parent
+ & ~ERTS_HLT_PFLGS_MASK);
+ ERTS_HLT_ASSERT(tmr == prnt);
+ }
+ if (tmr->btm.tree.right) {
+ prnt = (ErtsHLTimer *) (tmr->btm.tree.right->btm.tree.parent
+ & ~ERTS_HLT_PFLGS_MASK);
+ ERTS_HLT_ASSERT(tmr == prnt);
+ }
+ if (tmr->pending_timeout) {
+ if (tmr->pending_timeout > 0) /* container > 0 */
+ ERTS_HLT_ASSERT(tmr->time.tree.parent == ERTS_HLT_PFIELD_NOT_IN_TABLE);
+ else {
+ ERTS_HLT_ASSERT(tmr->time.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE);
+ ERTS_HLT_ASSERT(tmr->time.tree.parent & ERTS_HLT_PFLG_SAME_TIME);
+ }
+ }
+ else {
+ ErtsHLTimer *ttmr = time_rbt_lookup(hdbg->srv->time_tree, tmr->timeout);
+ ERTS_HLT_ASSERT(ttmr);
+ if (ttmr != tmr) {
+ ERTS_HLT_ASSERT(ttmr->time.tree.same_time);
+ ERTS_HLT_ASSERT(tmr == same_time_list_lookup(ttmr->time.tree.same_time, tmr));
+ }
+ }
+}
+
+static void
+hdbg_chk_srv(ErtsHLTimerService *srv)
+{
+ if (srv->time_tree) {
+ ErtsHdbgHLT hdbg;
+ hdbg.srv = srv;
+ hdbg.found_root = 0;
+ hdbg.rootpp = &srv->time_tree;
+ time_rbt_foreach(srv->time_tree, tt_hdbg_func, (void *) &hdbg);
+ ERTS_HLT_ASSERT(hdbg.found_root);
+ }
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ if (srv->btm_tree) {
+ ErtsHdbgHLT hdbg;
+ hdbg.srv = srv;
+ hdbg.found_root = 0;
+ hdbg.rootpp = &srv->btm_tree;
+ btm_rbt_foreach(srv->btm_tree, bt_hdbg_func, (void *) &hdbg);
+ ERTS_HLT_ASSERT(hdbg.found_root);
+ }
+#endif
+}
+
+#endif /* ERTS_HLT_HARD_DEBUG */
diff --git a/erts/emulator/beam/erl_hl_timer.h b/erts/emulator/beam/erl_hl_timer.h
new file mode 100644
index 0000000000..e6f5e8b67d
--- /dev/null
+++ b/erts/emulator/beam/erl_hl_timer.h
@@ -0,0 +1,86 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2015-2017. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_HL_TIMER_H__
+#define ERL_HL_TIMER_H__
+
+typedef struct ErtsBifTimer_ ErtsBifTimers;
+typedef struct ErtsHLTimerService_ ErtsHLTimerService;
+
+#include "sys.h"
+#include "erl_process.h"
+#define ERL_PORT_GET_PORT_TYPE_ONLY__
+#include "erl_port.h"
+#undef ERL_PORT_GET_PORT_TYPE_ONLY__
+#include "erl_message.h"
+#include "erl_alloc_types.h"
+
+#define ERTS_PTMR_NONE ((erts_aint_t) NULL)
+#define ERTS_PTMR_TIMEDOUT (ERTS_PTMR_NONE + ((erts_aint_t) 1))
+
+#define ERTS_PTMR_INIT(P) \
+ erts_atomic_init_nob(&(P)->common.timer, ERTS_PTMR_NONE)
+#define ERTS_PTMR_IS_SET(P) \
+ (ERTS_PTMR_NONE != erts_atomic_read_nob(&(P)->common.timer))
+#define ERTS_PTMR_IS_TIMED_OUT(P) \
+ (ERTS_PTMR_TIMEDOUT == erts_atomic_read_nob(&(P)->common.timer))
+
+#define ERTS_PTMR_CLEAR(P) \
+ do { \
+ ASSERT(ERTS_PTMR_IS_TIMED_OUT((P))); \
+ erts_atomic_set_nob(&(P)->common.timer, \
+ ERTS_PTMR_NONE); \
+ } while (0)
+
+size_t erts_timer_type_size(ErtsAlcType_t type);
+int erts_set_proc_timer_term(Process *, Eterm);
+void erts_set_proc_timer_uword(Process *, UWord);
+void erts_cancel_proc_timer(Process *);
+void erts_set_port_timer(Port *, Sint64);
+void erts_cancel_port_timer(Port *);
+Sint64 erts_read_port_timer(Port *);
+int erts_cancel_bif_timers(Process *, ErtsBifTimers **, void **);
+int erts_detach_accessor_bif_timers(Process *, ErtsBifTimers *, void **);
+ErtsHLTimerService *erts_create_timer_service(void);
+void erts_hl_timer_init(void);
+void erts_start_timer_callback(ErtsMonotonicTime,
+ void (*)(void *),
+ void *);
+void
+erts_handle_canceled_timers(void *vesdp,
+ int *need_thr_progress,
+ ErtsThrPrgrVal *thr_prgr_p,
+ int *need_more_work);
+
+Uint erts_bif_timer_memory_size(void);
+void erts_print_bif_timer_info(fmtfn_t to, void *to_arg);
+
+void erts_debug_bif_timer_foreach(void (*func)(Eterm,
+ Eterm,
+ ErlHeapFragment *,
+ void *),
+ void *arg);
+void
+erts_debug_callback_timer_foreach(void (*tclbk)(void *),
+ void (*func)(void *,
+ ErtsMonotonicTime,
+ void *),
+ void *arg);
+#endif /* ERL_HL_TIMER_H__ */
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index d54658f1ea..6cef9bd0e3 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -35,7 +36,7 @@
#include "dist.h"
#include "erl_mseg.h"
#include "erl_threads.h"
-#include "erl_bif_timer.h"
+#include "erl_hl_timer.h"
#include "erl_instrument.h"
#include "erl_printf_term.h"
#include "erl_misc_utils.h"
@@ -45,6 +46,10 @@
#include "erl_thr_queue.h"
#include "erl_async.h"
#include "erl_ptab.h"
+#include "erl_bif_unique.h"
+#define ERTS_WANT_TIMER_WHEEL_API
+#include "erl_time.h"
+#include "erl_check_io.h"
#ifdef HIPE
#include "hipe_mode_switch.h" /* for hipe_mode_switch_init() */
@@ -57,27 +62,25 @@
#define ERTS_DEFAULT_NO_ASYNC_THREADS 10
+#define ERTS_DEFAULT_SCHED_STACK_SIZE 128
+#define ERTS_DEFAULT_DCPU_SCHED_STACK_SIZE 40
+#define ERTS_DEFAULT_DIO_SCHED_STACK_SIZE 40
+
/*
* The variables below (prefixed with etp_) are for erts/etc/unix/etp-commands
* only. Do not remove even though they aren't used elsewhere in the emulator!
*/
-#ifdef ERTS_SMP
const int etp_smp_compiled = 1;
-#else
-const int etp_smp_compiled = 0;
-#endif
-#ifdef USE_THREADS
const int etp_thread_compiled = 1;
-#else
-const int etp_thread_compiled = 0;
-#endif
const char etp_erts_version[] = ERLANG_VERSION;
const char etp_otp_release[] = ERLANG_OTP_RELEASE;
const char etp_compile_date[] = ERLANG_COMPILE_DATE;
const char etp_arch[] = ERLANG_ARCHITECTURE;
#ifdef ERTS_ENABLE_KERNEL_POLL
+const int erts_use_kernel_poll = 1;
const int etp_kernel_poll_support = 1;
#else
+const int erts_use_kernel_poll = 0;
const int etp_kernel_poll_support = 0;
#endif
#if defined(ARCH_64)
@@ -87,11 +90,6 @@ const int etp_arch_bits = 32;
#else
# error "Not 64-bit, nor 32-bit arch"
#endif
-#if HALFWORD_HEAP
-const int etp_halfword = 1;
-#else
-const int etp_halfword = 0;
-#endif
#ifdef HIPE
const int etp_hipe = 1;
#else
@@ -112,11 +110,22 @@ const int etp_lock_check = 1;
#else
const int etp_lock_check = 0;
#endif
-#ifdef WORDS_BIGENDIAN
-const int etp_big_endian = 1;
+const int etp_endianness = ERTS_ENDIANNESS;
+const Eterm etp_ref_header = ERTS_REF_THING_HEADER;
+#ifdef ERTS_MAGIC_REF_THING_HEADER
+const Eterm etp_magic_ref_header = ERTS_MAGIC_REF_THING_HEADER;
+#else
+const Eterm etp_magic_ref_header = ERTS_REF_THING_HEADER;
+#endif
+const Eterm etp_the_non_value = THE_NON_VALUE;
+#ifdef ERTS_HOLE_MARKER
+const Eterm etp_hole_marker = ERTS_HOLE_MARKER;
#else
-const int etp_big_endian = 0;
+const Eterm etp_hole_marker = 0;
#endif
+
+static int modified_sched_thread_suggested_stack_size = 0;
+
/*
* Note about VxWorks: All variables must be initialized by executable code,
* not by an initializer. Otherwise a new instance of the emulator will
@@ -134,21 +143,18 @@ static void erl_init(int ncpu,
int legacy_proc_tab,
int port_tab_sz,
int port_tab_sz_ignore_files,
- int legacy_port_tab);
+ int legacy_port_tab,
+ int time_correction,
+ ErtsTimeWarpMode time_warp_mode,
+ int node_tab_delete_delay,
+ ErtsDbSpinCount db_spin_count);
static erts_atomic_t exiting;
-#ifdef ERTS_SMP
-erts_smp_atomic32_t erts_writing_erl_crash_dump;
+erts_atomic32_t erts_writing_erl_crash_dump;
erts_tsd_key_t erts_is_crash_dumping_key;
-#else
-volatile int erts_writing_erl_crash_dump = 0;
-#endif
int erts_initialized = 0;
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
-static erts_tid_t main_thread;
-#endif
int erts_use_sender_punish;
@@ -159,16 +165,15 @@ int erts_use_sender_punish;
Uint display_items; /* no of items to display in traces etc */
int H_MIN_SIZE; /* The minimum heap grain */
int BIN_VH_MIN_SIZE; /* The minimum binary virtual*/
+int H_MAX_SIZE; /* The maximum heap size */
+int H_MAX_FLAGS; /* The maximum heap flags */
Uint32 erts_debug_flags; /* Debug flags. */
-#ifdef ERTS_OPCODE_COUNTER_SUPPORT
-int count_instructions;
-#endif
int erts_backtrace_depth; /* How many functions to show in a backtrace
* in error codes.
*/
-erts_smp_atomic32_t erts_max_gen_gcs;
+erts_atomic32_t erts_max_gen_gcs;
Eterm erts_error_logger_warnings; /* What to map warning logs to, am_error,
am_info or am_warning, am_error is
@@ -178,22 +183,18 @@ int erts_compat_rel;
static int no_schedulers;
static int no_schedulers_online;
-#ifdef ERTS_DIRTY_SCHEDULERS
static int no_dirty_cpu_schedulers;
static int no_dirty_cpu_schedulers_online;
static int no_dirty_io_schedulers;
-#endif
#ifdef DEBUG
Uint32 verbose; /* See erl_debug.h for information about verbose */
#endif
-int erts_disable_tolerant_timeofday; /* Time correction can be disabled it is
- * not and/or it is too slow.
- */
-
int erts_atom_table_size = ATOM_LIMIT; /* Maximum number of atoms */
+int erts_pd_initial_size = 8; /* must be power of 2 */
+
int erts_modified_timing_level;
int erts_no_crash_dump = 0; /* Use -d to suppress crash dump. */
@@ -205,16 +206,16 @@ int erts_no_line_info = 0; /* -L: Don't load line information */
*/
ErtsModifiedTimings erts_modified_timings[] = {
- /* 0 */ {make_small(0), CONTEXT_REDS, INPUT_REDUCTIONS},
- /* 1 */ {make_small(0), 2*CONTEXT_REDS, 2*INPUT_REDUCTIONS},
- /* 2 */ {make_small(0), CONTEXT_REDS/2, INPUT_REDUCTIONS/2},
- /* 3 */ {make_small(0), 3*CONTEXT_REDS, 3*INPUT_REDUCTIONS},
- /* 4 */ {make_small(0), CONTEXT_REDS/3, 3*INPUT_REDUCTIONS},
- /* 5 */ {make_small(0), 4*CONTEXT_REDS, INPUT_REDUCTIONS/2},
- /* 6 */ {make_small(1), CONTEXT_REDS/4, 2*INPUT_REDUCTIONS},
- /* 7 */ {make_small(1), 5*CONTEXT_REDS, INPUT_REDUCTIONS/3},
- /* 8 */ {make_small(10), CONTEXT_REDS/5, 3*INPUT_REDUCTIONS},
- /* 9 */ {make_small(10), 6*CONTEXT_REDS, INPUT_REDUCTIONS/4}
+ /* 0 */ {make_small(0), CONTEXT_REDS},
+ /* 1 */ {make_small(0), (3*CONTEXT_REDS)/4},
+ /* 2 */ {make_small(0), CONTEXT_REDS/2},
+ /* 3 */ {make_small(0), (7*CONTEXT_REDS)/8},
+ /* 4 */ {make_small(0), CONTEXT_REDS/3},
+ /* 5 */ {make_small(0), (10*CONTEXT_REDS)/11},
+ /* 6 */ {make_small(1), CONTEXT_REDS/4},
+ /* 7 */ {make_small(1), (5*CONTEXT_REDS)/7},
+ /* 8 */ {make_small(10), CONTEXT_REDS/5},
+ /* 9 */ {make_small(10), (6*CONTEXT_REDS)/7}
};
#define ERTS_MODIFIED_TIMING_LEVELS \
@@ -264,11 +265,24 @@ this_rel_num(void)
i++;
this_rel = atoi(&this_rel_str[i]);
if (this_rel < 1)
- erl_exit(-1, "Unexpected ERLANG_OTP_RELEASE format\n");
+ erts_exit(1, "Unexpected ERLANG_OTP_RELEASE format\n");
}
return this_rel;
}
+static ERTS_INLINE void
+set_default_time_adj(int *time_correction_p, ErtsTimeWarpMode *time_warp_mode_p)
+{
+ *time_correction_p = 1;
+ *time_warp_mode_p = ERTS_NO_TIME_WARP_MODE;
+ if (!erts_check_time_adj_support(*time_correction_p,
+ *time_warp_mode_p)) {
+ *time_correction_p = 0;
+ ASSERT(erts_check_time_adj_support(*time_correction_p,
+ *time_warp_mode_p));
+ }
+}
+
/*
* Common error printout function, all error messages
* that don't go to the error logger go through here.
@@ -281,41 +295,31 @@ void erl_error(char *fmt, va_list args)
static int early_init(int *argc, char **argv);
-void
-erts_short_init(void)
-{
- int ncpu = early_init(NULL, NULL);
- erl_init(ncpu,
- ERTS_DEFAULT_MAX_PROCESSES,
- 0,
- ERTS_DEFAULT_MAX_PORTS,
- 0,
- 0);
- erts_initialized = 1;
-}
-
static void
erl_init(int ncpu,
int proc_tab_sz,
int legacy_proc_tab,
int port_tab_sz,
int port_tab_sz_ignore_files,
- int legacy_port_tab)
+ int legacy_port_tab,
+ int time_correction,
+ ErtsTimeWarpMode time_warp_mode,
+ int node_tab_delete_delay,
+ ErtsDbSpinCount db_spin_count)
{
- init_benchmarking();
-
+ erts_bif_unique_init();
erts_init_monitors();
- erts_init_time();
+ erts_init_time(time_correction, time_warp_mode);
erts_init_sys_common_misc();
erts_init_process(ncpu, proc_tab_sz, legacy_proc_tab);
erts_init_scheduling(no_schedulers,
- no_schedulers_online
-#ifdef ERTS_DIRTY_SCHEDULERS
- , no_dirty_cpu_schedulers,
+ no_schedulers_online,
+ erts_no_poll_threads,
+ no_dirty_cpu_schedulers,
no_dirty_cpu_schedulers_online,
no_dirty_io_schedulers
-#endif
);
+ erts_late_init_time_sup();
erts_init_cpu_topology(); /* Must be after init_scheduling */
erts_init_gc(); /* Must be after init_scheduling */
erts_alloc_late_init();
@@ -324,7 +328,6 @@ erl_init(int ncpu,
BIN_VH_MIN_SIZE = erts_next_heap_size(BIN_VH_MIN_SIZE, 0);
erts_init_trace();
- erts_init_binary();
erts_init_bits();
erts_code_ix_init();
erts_init_fun_table();
@@ -337,10 +340,10 @@ erl_init(int ncpu,
erts_ddll_init();
init_emulator();
erts_ptab_init(); /* Must be after init_emulator() */
+ erts_init_binary(); /* Must be after init_emulator() */
erts_bp_init();
- init_db(); /* Must be after init_emulator */
- erts_bif_timer_init();
- erts_init_node_tables();
+ init_db(db_spin_count); /* Must be after init_emulator */
+ erts_init_node_tables(node_tab_delete_delay);
init_dist();
erl_drv_thr_init();
erts_init_async();
@@ -352,25 +355,30 @@ erl_init(int ncpu,
erts_init_bif_re();
erts_init_unicode(); /* after RE to get access to PCRE unicode */
erts_init_external();
+ erts_init_map();
+ erts_beam_bif_load_init();
erts_delay_trap = erts_export_put(am_erlang, am_delay_trap, 2);
erts_late_init_process();
#if HAVE_ERTS_MSEG
erts_mseg_late_init(); /* Must be after timer (erts_init_time()) and thread
initializations */
#endif
+ erl_sys_late_init();
#ifdef HIPE
hipe_mode_switch_init(); /* Must be after init_load/beam_catches/init */
#endif
packet_parser_init();
erl_nif_init();
+ erts_msacc_init();
}
-static void
+static Eterm
erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char** argv)
{
int i;
Eterm start_mod;
Eterm args;
+ Eterm res;
Eterm* hp;
Process parent;
ErlSpawnOpts so;
@@ -379,7 +387,7 @@ erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char**
start_mod = erts_atom_put((byte *) modname, sys_strlen(modname), ERTS_ATOM_ENC_LATIN1, 1);
if (erts_find_function(start_mod, am_start, 2,
erts_active_code_ix()) == NULL) {
- erl_exit(5, "No function %s:start/2\n", modname);
+ erts_exit(ERTS_ERROR_EXIT, "No function %s:start/2\n", modname);
}
/*
@@ -387,7 +395,7 @@ erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char**
*/
erts_init_empty_process(&parent);
- erts_smp_proc_lock(&parent, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(&parent, ERTS_PROC_LOCK_MAIN);
hp = HAlloc(&parent, argc*2 + 4);
args = NIL;
for (i = argc-1; i >= 0; i--) {
@@ -400,12 +408,38 @@ erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char**
hp += 2;
args = CONS(hp, env, args);
- so.flags = 0;
- (void) erl_create_process(&parent, start_mod, am_start, args, &so);
- erts_smp_proc_unlock(&parent, ERTS_PROC_LOCK_MAIN);
+ so.flags = erts_default_spo_flags|SPO_SYSTEM_PROC;
+ res = erl_create_process(&parent, start_mod, am_start, args, &so);
+ erts_proc_unlock(&parent, ERTS_PROC_LOCK_MAIN);
erts_cleanup_empty_process(&parent);
+ return res;
}
+static Eterm
+erl_system_process_otp(Eterm parent_pid, char* modname, int off_heap_msgq)
+{
+ Eterm start_mod;
+ Process* parent;
+ ErlSpawnOpts so;
+ Eterm res;
+
+ start_mod = erts_atom_put((byte *) modname, sys_strlen(modname), ERTS_ATOM_ENC_LATIN1, 1);
+ if (erts_find_function(start_mod, am_start, 0,
+ erts_active_code_ix()) == NULL) {
+ erts_exit(ERTS_ERROR_EXIT, "No function %s:start/0\n", modname);
+ }
+
+ parent = erts_pid2proc(NULL, 0, parent_pid, ERTS_PROC_LOCK_MAIN);
+
+ so.flags = erts_default_spo_flags|SPO_SYSTEM_PROC;
+ if (off_heap_msgq)
+ so.flags |= SPO_OFF_HEAP_MSGQ;
+ res = erl_create_process(parent, start_mod, am_start, NIL, &so);
+ erts_proc_unlock(parent, ERTS_PROC_LOCK_MAIN);
+ return res;
+}
+
+
Eterm
erts_preloaded(Process* p)
{
@@ -475,12 +509,12 @@ load_preloaded(void)
length = preload_p[i].size;
module_name = erts_atom_put((byte *) name, sys_strlen(name), ERTS_ATOM_ENC_LATIN1, 1);
if ((code = sys_preload_begin(&preload_p[i])) == 0)
- erl_exit(1, "Failed to find preloaded code for module %s\n",
+ erts_exit(ERTS_ERROR_EXIT, "Failed to find preloaded code for module %s\n",
name);
res = erts_preload_module(NULL, 0, NIL, &module_name, code, length);
sys_preload_end(&preload_p[i]);
if (res != NIL)
- erl_exit(1,"Failed loading preloaded module %s (%T)\n",
+ erts_exit(ERTS_ERROR_EXIT,"Failed loading preloaded module %s (%T)\n",
name, res);
i++;
}
@@ -509,9 +543,9 @@ void erts_usage(void)
/* erts_fprintf(stderr, "-b func set the boot function (default boot)\n"); */
- erts_fprintf(stderr, "-c disable continuous date/time correction with\n");
- erts_fprintf(stderr, " respect to uptime\n");
-
+ erts_fprintf(stderr, "-c bool enable or disable time correction\n");
+ erts_fprintf(stderr, "-C mode set time warp mode; valid modes are:\n");
+ erts_fprintf(stderr, " no_time_warp|single_time_warp|multi_time_warp\n");
erts_fprintf(stderr, "-d don't write a crash dump for internally detected errors\n");
erts_fprintf(stderr, " (halt(String) will still produce a crash dump)\n");
erts_fprintf(stderr, "-fn[u|a|l] Control how filenames are interpreted\n");
@@ -519,10 +553,28 @@ void erts_usage(void)
H_DEFAULT_SIZE);
erts_fprintf(stderr, "-hmbs size set minimum binary virtual heap size in words (default %d)\n",
VH_DEFAULT_SIZE);
+ erts_fprintf(stderr, "-hmax size set maximum heap size in words (default %d)\n",
+ H_DEFAULT_MAX_SIZE);
+ erts_fprintf(stderr, "-hmaxk bool enable or disable kill at max heap size (default true)\n");
+ erts_fprintf(stderr, "-hmaxel bool enable or disable error_logger report at max heap size (default true)\n");
+ erts_fprintf(stderr, "-hpds size initial process dictionary size (default %d)\n",
+ erts_pd_initial_size);
+ erts_fprintf(stderr, "-hmqd val set default message queue data flag for processes,\n");
+ erts_fprintf(stderr, " valid values are: off_heap | on_heap\n");
+
+ erts_fprintf(stderr, "-IOp number set number of pollsets to be used to poll for I/O,\n");
+ erts_fprintf(stderr, " This value has to be equal or smaller than the\n");
+ erts_fprintf(stderr, " number of poll threads. If the current platform\n");
+ erts_fprintf(stderr, " does not support concurrent update of pollsets\n");
+ erts_fprintf(stderr, " this value is ignored.\n");
+ erts_fprintf(stderr, "-IOt number set number of threads to be used to poll for I/O\n");
+ erts_fprintf(stderr, "-IOPp number set number of pollsets as a percentage of the\n");
+ erts_fprintf(stderr, " number of poll threads.");
+ erts_fprintf(stderr, "-IOPt number set number of threads to be used to poll for I/O\n");
+ erts_fprintf(stderr, " as a percentage of the number of schedulers.");
/* erts_fprintf(stderr, "-i module set the boot module (default init)\n"); */
- erts_fprintf(stderr, "-K boolean enable or disable kernel poll\n");
erts_fprintf(stderr, "-n[s|a|d] Control behavior of signals to ports\n");
erts_fprintf(stderr, " Note that this flag is deprecated!\n");
erts_fprintf(stderr, "-M<X> <Y> memory allocator switches,\n");
@@ -548,6 +600,8 @@ void erts_usage(void)
erts_fprintf(stderr, " see the erl(1) documentation for more info.\n");
erts_fprintf(stderr, "-sct cput set cpu topology,\n");
erts_fprintf(stderr, " see the erl(1) documentation for more info.\n");
+ erts_fprintf(stderr, "-secio bool enable/disable eager check I/O scheduling,\n");
+ erts_fprintf(stderr, " see the erl(1) documentation for more info.\n");
#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT
erts_fprintf(stderr, "-sub bool enable/disable scheduler utilization balancing,\n");
#else
@@ -561,9 +615,20 @@ void erts_usage(void)
erts_fprintf(stderr, "-swt val set scheduler wakeup threshold, valid values are:\n");
erts_fprintf(stderr, " very_low|low|medium|high|very_high.\n");
erts_fprintf(stderr, "-sss size suggested stack size in kilo words for scheduler threads,\n");
- erts_fprintf(stderr, " valid range is [%d-%d]\n",
+ erts_fprintf(stderr, " valid range is [%d-%d] (default %d)\n",
+ ERTS_SCHED_THREAD_MIN_STACK_SIZE,
+ ERTS_SCHED_THREAD_MAX_STACK_SIZE,
+ ERTS_DEFAULT_SCHED_STACK_SIZE);
+ erts_fprintf(stderr, "-sssdcpu size suggested stack size in kilo words for dirty CPU scheduler\n");
+ erts_fprintf(stderr, " threads, valid range is [%d-%d] (default %d)\n",
ERTS_SCHED_THREAD_MIN_STACK_SIZE,
- ERTS_SCHED_THREAD_MAX_STACK_SIZE);
+ ERTS_SCHED_THREAD_MAX_STACK_SIZE,
+ ERTS_DEFAULT_DCPU_SCHED_STACK_SIZE);
+ erts_fprintf(stderr, "-sssdio size suggested stack size in kilo words for dirty IO scheduler\n");
+ erts_fprintf(stderr, " threads, valid range is [%d-%d] (default %d)\n",
+ ERTS_SCHED_THREAD_MIN_STACK_SIZE,
+ ERTS_SCHED_THREAD_MAX_STACK_SIZE,
+ ERTS_DEFAULT_DIO_SCHED_STACK_SIZE);
erts_fprintf(stderr, "-spp Bool set port parallelism scheduling hint\n");
erts_fprintf(stderr, "-S n1:n2 set number of schedulers (n1), and number of\n");
erts_fprintf(stderr, " schedulers online (n2), maximum for both\n");
@@ -572,7 +637,6 @@ void erts_usage(void)
erts_fprintf(stderr, "-SP p1:p2 specify schedulers (p1) and schedulers online (p2)\n");
erts_fprintf(stderr, " as percentages of logical processors configured and logical\n");
erts_fprintf(stderr, " processors available, respectively\n");
-#ifdef ERTS_DIRTY_SCHEDULERS
erts_fprintf(stderr, "-SDcpu n1:n2 set number of dirty CPU schedulers (n1), and number of\n");
erts_fprintf(stderr, " dirty CPU schedulers online (n2), valid range for both\n");
erts_fprintf(stderr, " numbers is [1-%d], and n2 must be less than or equal to n1\n",
@@ -582,7 +646,6 @@ void erts_usage(void)
erts_fprintf(stderr, " and logical processors available, respectively\n");
erts_fprintf(stderr, "-SDio n set number of dirty I/O schedulers, valid range is [0-%d]\n",
ERTS_MAX_NO_OF_DIRTY_IO_SCHEDULERS);
-#endif
erts_fprintf(stderr, "-t size set the maximum number of atoms the emulator can handle\n");
erts_fprintf(stderr, " valid range is [%d-%d]\n",
MIN_ATOM_TABLE_SIZE, MAX_ATOM_TABLE_SIZE);
@@ -592,18 +655,24 @@ void erts_usage(void)
erts_fprintf(stderr, "-v turn on chatty mode (GCs will be reported etc)\n");
- erts_fprintf(stderr, "-W<i|w> set error logger warnings mapping,\n");
+ erts_fprintf(stderr, "-W<i|w|e> set error logger warnings mapping,\n");
erts_fprintf(stderr, " see error_logger documentation for details\n");
erts_fprintf(stderr, "-zdbbl size set the distribution buffer busy limit in kilobytes\n");
erts_fprintf(stderr, " valid range is [1-%d]\n", INT_MAX/1024);
+ erts_fprintf(stderr, "-zdntgc time set delayed node table gc in seconds\n");
+ erts_fprintf(stderr, " valid values are infinity or intergers in the range [0-%d]\n",
+ ERTS_NODE_TAB_DELAY_GC_MAX);
+#if 0
+ erts_fprintf(stderr, "-zebwt val set ets busy wait threshold, valid values are:\n");
+ erts_fprintf(stderr, " none|very_short|short|medium|long|very_long|extremely_long\n");
+#endif
erts_fprintf(stderr, "\n");
erts_fprintf(stderr, "Note that if the emulator is started with erlexec (typically\n");
erts_fprintf(stderr, "from the erl script), these flags should be specified with +.\n");
erts_fprintf(stderr, "\n\n");
- erl_exit(-1, "");
+ erts_exit(1, "");
}
-#ifdef USE_THREADS
/*
* allocators for thread lib
*/
@@ -645,7 +714,6 @@ static void ethr_ll_free(void *ptr)
erts_free(ERTS_ALC_T_ETHR_LL, ptr);
}
-#endif
static int
early_init(int *argc, char **argv) /*
@@ -663,13 +731,11 @@ early_init(int *argc, char **argv) /*
int schdlrs_percentage = 100;
int schdlrs_onln_percentage = 100;
int max_main_threads;
-#ifdef ERTS_DIRTY_SCHEDULERS
int dirty_cpu_scheds;
int dirty_cpu_scheds_online;
int dirty_cpu_scheds_pctg = 100;
int dirty_cpu_scheds_onln_pctg = 100;
int dirty_io_scheds;
-#endif
int max_reader_groups;
int reader_groups;
char envbuf[21]; /* enough for any 64-bit integer */
@@ -679,13 +745,16 @@ early_init(int *argc, char **argv) /*
erts_sched_compact_load = 1;
erts_printf_eterm_func = erts_printf_term;
- erts_disable_tolerant_timeofday = 0;
display_items = 200;
erts_backtrace_depth = DEFAULT_BACKTRACE_SIZE;
erts_async_max_threads = ERTS_DEFAULT_NO_ASYNC_THREADS;
erts_async_thread_suggested_stack_size = ERTS_ASYNC_THREAD_MIN_STACK_SIZE;
H_MIN_SIZE = H_DEFAULT_SIZE;
BIN_VH_MIN_SIZE = VH_DEFAULT_SIZE;
+ H_MAX_SIZE = H_DEFAULT_MAX_SIZE;
+ H_MAX_FLAGS = MAX_HEAP_SIZE_KILL|MAX_HEAP_SIZE_LOG;
+
+ erts_term_init();
erts_initialized = 0;
@@ -695,11 +764,6 @@ early_init(int *argc, char **argv) /*
&ncpu,
&ncpuonln,
&ncpuavail);
-#ifndef ERTS_SMP
- ncpu = 1;
- ncpuonln = 1;
- ncpuavail = 1;
-#endif
ignore_break = 0;
replace_intr = 0;
@@ -711,27 +775,15 @@ early_init(int *argc, char **argv) /*
erts_sys_pre_init();
erts_atomic_init_nob(&exiting, 0);
-#ifdef ERTS_SMP
erts_thr_progress_pre_init();
-#endif
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init();
-#endif
-#ifdef ERTS_SMP
- erts_smp_atomic32_init_nob(&erts_writing_erl_crash_dump, 0L);
+ erts_atomic32_init_nob(&erts_writing_erl_crash_dump, 0L);
erts_tsd_key_create(&erts_is_crash_dumping_key,"erts_is_crash_dumping_key");
-#else
- erts_writing_erl_crash_dump = 0;
-#endif
- erts_smp_atomic32_init_nob(&erts_max_gen_gcs,
+ erts_atomic32_init_nob(&erts_max_gen_gcs,
(erts_aint32_t) ((Uint16) -1));
erts_pre_init_process();
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
- main_thread = erts_thr_self();
-#endif
/*
* We need to know the number of schedulers to use before we
@@ -745,11 +797,9 @@ early_init(int *argc, char **argv) /*
schdlrs = no_schedulers;
schdlrs_onln = no_schedulers_online;
-#ifdef ERTS_DIRTY_SCHEDULERS
dirty_cpu_scheds = no_schedulers;
dirty_cpu_scheds_online = no_schedulers_online;
dirty_io_scheds = 10;
-#endif
envbufsz = sizeof(envbuf);
@@ -802,6 +852,7 @@ early_init(int *argc, char **argv) /*
}
break;
}
+
case 'S' :
if (argv[i][2] == 'P') {
int ptot, ponln;
@@ -842,7 +893,6 @@ early_init(int *argc, char **argv) /*
("using %d:%d scheduler percentages\n",
schdlrs_percentage, schdlrs_onln_percentage));
}
-#ifdef ERTS_DIRTY_SCHEDULERS
else if (argv[i][2] == 'D') {
char *arg;
char *type = argv[i]+3;
@@ -954,7 +1004,6 @@ early_init(int *argc, char **argv) /*
break;
}
}
-#endif
else {
int tot, onln;
char *arg = get_arg(argv[i]+2, argv[i+1], &i);
@@ -1013,7 +1062,6 @@ early_init(int *argc, char **argv) /*
i++;
}
-#ifdef ERTS_SMP
/* apply any scheduler percentages */
if (schdlrs_percentage != 100 || schdlrs_onln_percentage != 100) {
schdlrs = schdlrs * schdlrs_percentage / 100;
@@ -1037,12 +1085,6 @@ early_init(int *argc, char **argv) /*
erts_usage();
}
}
-#else
- /* Silence gcc warnings */
- (void)schdlrs_percentage;
- (void)schdlrs_onln_percentage;
-#endif
-#ifdef ERTS_DIRTY_SCHEDULERS
/* apply any dirty scheduler precentages */
if (dirty_cpu_scheds_pctg != 100 || dirty_cpu_scheds_onln_pctg != 100) {
dirty_cpu_scheds = dirty_cpu_scheds * dirty_cpu_scheds_pctg / 100;
@@ -1050,33 +1092,31 @@ early_init(int *argc, char **argv) /*
}
if (dirty_cpu_scheds > schdlrs)
dirty_cpu_scheds = schdlrs;
+ if (dirty_cpu_scheds < 1)
+ dirty_cpu_scheds = 1;
if (dirty_cpu_scheds_online > schdlrs_onln)
dirty_cpu_scheds_online = schdlrs_onln;
-#endif
+ if (dirty_cpu_scheds_online < 1)
+ dirty_cpu_scheds_online = 1;
}
-#ifndef USE_THREADS
- erts_async_max_threads = 0;
-#endif
-#ifdef ERTS_SMP
no_schedulers = schdlrs;
no_schedulers_online = schdlrs_onln;
erts_no_schedulers = (Uint) no_schedulers;
-#endif
-#ifdef ERTS_DIRTY_SCHEDULERS
erts_no_dirty_cpu_schedulers = no_dirty_cpu_schedulers = dirty_cpu_scheds;
no_dirty_cpu_schedulers_online = dirty_cpu_scheds_online;
erts_no_dirty_io_schedulers = no_dirty_io_schedulers = dirty_io_scheds;
-#endif
erts_early_init_scheduling(no_schedulers);
alloc_opts.ncpu = ncpu;
erts_alloc_init(argc, argv, &alloc_opts); /* Handles (and removes)
-M flags. */
/* Require allocators */
-#ifdef ERTS_SMP
+
+ erts_init_check_io(argc, argv);
+
/*
* Thread progress management:
*
@@ -1084,22 +1124,18 @@ early_init(int *argc, char **argv) /*
* ** Scheduler threads (see erl_process.c)
* ** Aux thread (see erl_process.c)
* ** Sys message dispatcher thread (see erl_trace.c)
+ * ** IO Poll threads (see erl_check_io.c)
*
* * Unmanaged threads that need to register:
* ** Async threads (see erl_async.c)
* ** Dirty scheduler threads
*/
erts_thr_progress_init(no_schedulers,
- no_schedulers+2,
-#ifndef ERTS_DIRTY_SCHEDULERS
- erts_async_max_threads
-#else
+ no_schedulers+2+erts_no_poll_threads,
erts_async_max_threads +
erts_no_dirty_cpu_schedulers +
erts_no_dirty_io_schedulers
-#endif
);
-#endif
erts_thr_q_init();
erts_init_utils();
erts_early_init_cpu_topology(no_schedulers,
@@ -1107,7 +1143,6 @@ early_init(int *argc, char **argv) /*
max_reader_groups,
&reader_groups);
-#ifdef USE_THREADS
{
erts_thr_late_init_data_t elid = ERTS_THR_LATE_INIT_DATA_DEF_INITER;
elid.mem.std.alloc = ethr_std_alloc;
@@ -1124,7 +1159,7 @@ early_init(int *argc, char **argv) /*
erts_thr_late_init(&elid);
}
-#endif
+ erts_msacc_early_init();
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_late_init();
@@ -1150,26 +1185,6 @@ early_init(int *argc, char **argv) /*
return ncpu;
}
-#ifndef ERTS_SMP
-static void set_main_stack_size(void)
-{
- if (erts_sched_thread_suggested_stack_size > 0) {
-# if HAVE_DECL_GETRLIMIT && HAVE_DECL_SETRLIMIT && HAVE_DECL_RLIMIT_STACK
- struct rlimit rl;
- int bytes = erts_sched_thread_suggested_stack_size * sizeof(Uint) * 1024;
- if (getrlimit(RLIMIT_STACK, &rl) != 0 ||
- (rl.rlim_cur = bytes, setrlimit(RLIMIT_STACK, &rl) != 0)) {
- erts_fprintf(stderr, "failed to set stack size for scheduler "
- "thread to %d bytes\n", bytes);
- erts_usage();
- }
-# else
- erts_fprintf(stderr, "no OS support for dynamic stack size limit\n");
- erts_usage();
-# endif
- }
-}
-#endif
void
erl_start(int argc, char **argv)
@@ -1185,7 +1200,14 @@ erl_start(int argc, char **argv)
int port_tab_sz_ignore_files = 0;
int legacy_proc_tab = 0;
int legacy_port_tab = 0;
+ int time_correction;
+ ErtsTimeWarpMode time_warp_mode;
+ int node_tab_delete_delay = ERTS_NODE_TAB_DELAY_GC_DEFAULT;
+ ErtsDbSpinCount db_spin_count = ERTS_DB_SPNCNT_NORMAL;
+ Eterm otp_ring0_pid;
+ set_default_time_adj(&time_correction,
+ &time_warp_mode);
envbufsz = sizeof(envbuf);
if (erts_sys_getenv_raw(ERL_MAX_ETS_TABLES_ENV, envbuf, &envbufsz) == 0)
@@ -1196,7 +1218,7 @@ erl_start(int argc, char **argv)
envbufsz = sizeof(envbuf);
if (erts_sys_getenv_raw("ERL_FULLSWEEP_AFTER", envbuf, &envbufsz) == 0) {
Uint16 max_gen_gcs = atoi(envbuf);
- erts_smp_atomic32_set_nob(&erts_max_gen_gcs,
+ erts_atomic32_set_nob(&erts_max_gen_gcs,
(erts_aint32_t) max_gen_gcs);
}
@@ -1206,18 +1228,19 @@ erl_start(int argc, char **argv)
port_tab_sz_ignore_files = 1;
}
-#if (defined(__APPLE__) && defined(__MACH__)) || defined(__DARWIN__)
/*
- * The default stack size on MacOS X is too small for pcre.
+ * A default stack size suitable for pcre which might use quite
+ * a lot of stack.
*/
- erts_sched_thread_suggested_stack_size = 256;
-#endif
+ erts_sched_thread_suggested_stack_size = ERTS_DEFAULT_SCHED_STACK_SIZE;
+ erts_dcpu_sched_thread_suggested_stack_size = ERTS_DEFAULT_DCPU_SCHED_STACK_SIZE;
+ erts_dio_sched_thread_suggested_stack_size = ERTS_DEFAULT_DIO_SCHED_STACK_SIZE;
#ifdef DEBUG
verbose = DEBUG_DEFAULT;
#endif
- erts_error_logger_warnings = am_error;
+ erts_error_logger_warnings = am_warning;
while (i < argc) {
if (argv[i][0] != '-') {
@@ -1352,6 +1375,7 @@ erl_start(int argc, char **argv)
case 't': verbose |= DEBUG_THREADS; break;
case 'p': verbose |= DEBUG_PROCESSES; break;
case 'm': verbose |= DEBUG_MESSAGES; break;
+ case 'c': verbose |= DEBUG_SHCOPY; break;
default : erts_fprintf(stderr,"Unknown verbose option: %c\n",*ch);
}
}
@@ -1364,6 +1388,7 @@ erl_start(int argc, char **argv)
if (verbose & DEBUG_THREADS) erts_printf("THREADS ");
if (verbose & DEBUG_PROCESSES) erts_printf("PROCESSES ");
if (verbose & DEBUG_MESSAGES) erts_printf("MESSAGES ");
+ if (verbose & DEBUG_SHCOPY) erts_printf("SHCOPY ");
erts_printf("\n");
#else
erts_fprintf(stderr, "warning: -v (only in debug compiled code)\n");
@@ -1377,12 +1402,8 @@ erl_start(int argc, char **argv)
#ifdef DEBUG
strcat(tmp, ",DEBUG");
#endif
-#ifdef ERTS_SMP
strcat(tmp, ",SMP");
-#endif
-#ifdef USE_THREADS
strcat(tmp, ",ASYNC_THREADS");
-#endif
#ifdef HIPE
strcat(tmp, ",HIPE");
#endif
@@ -1392,7 +1413,7 @@ erl_start(int argc, char **argv)
}
erts_fprintf(stderr, "(" EMULATOR ") emulator version "
ERLANG_VERSION "\n");
- erl_exit(0, "");
+ erts_exit(0, "");
}
break;
@@ -1404,8 +1425,13 @@ erl_start(int argc, char **argv)
char *sub_param = argv[i]+2;
/* set default heap size
*
- * h|ms - min_heap_size
- * h|mbs - min_bin_vheap_size
+ * h|ms - min_heap_size
+ * h|mbs - min_bin_vheap_size
+ * h|pds - erts_pd_initial_size
+ * h|mqd - message_queue_data
+ * h|max - max_heap_size
+ * h|maxk - max_heap_kill
+ * h|maxel - max_heap_error_logger
*
*/
if (has_prefix("mbs", sub_param)) {
@@ -1423,6 +1449,64 @@ erl_start(int argc, char **argv)
erts_usage();
}
VERBOSE(DEBUG_SYSTEM, ("using minimum heap size %d\n", H_MIN_SIZE));
+ } else if (has_prefix("pds", sub_param)) {
+ arg = get_arg(sub_param+3, argv[i+1], &i);
+ if (!erts_pd_set_initial_size(atoi(arg))) {
+ erts_fprintf(stderr, "bad initial process dictionary size %s\n", arg);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM, ("using initial process dictionary size %d\n",
+ erts_pd_initial_size));
+ } else if (has_prefix("mqd", sub_param)) {
+ arg = get_arg(sub_param+3, argv[i+1], &i);
+ if (sys_strcmp(arg, "on_heap") == 0) {
+ erts_default_spo_flags &= ~SPO_OFF_HEAP_MSGQ;
+ erts_default_spo_flags |= SPO_ON_HEAP_MSGQ;
+ }
+ else if (sys_strcmp(arg, "off_heap") == 0) {
+ erts_default_spo_flags &= ~SPO_ON_HEAP_MSGQ;
+ erts_default_spo_flags |= SPO_OFF_HEAP_MSGQ;
+ }
+ else {
+ erts_fprintf(stderr,
+ "Invalid message_queue_data flag: %s\n", arg);
+ erts_usage();
+ }
+ } else if (has_prefix("maxk", sub_param)) {
+ arg = get_arg(sub_param+4, argv[i+1], &i);
+ if (strcmp(arg,"true") == 0) {
+ H_MAX_FLAGS |= MAX_HEAP_SIZE_KILL;
+ } else if (strcmp(arg,"false") == 0) {
+ H_MAX_FLAGS &= ~MAX_HEAP_SIZE_KILL;
+ } else {
+ erts_fprintf(stderr, "bad max heap kill %s\n", arg);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM, ("using max heap kill %d\n", H_MAX_FLAGS));
+ } else if (has_prefix("maxel", sub_param)) {
+ arg = get_arg(sub_param+5, argv[i+1], &i);
+ if (strcmp(arg,"true") == 0) {
+ H_MAX_FLAGS |= MAX_HEAP_SIZE_LOG;
+ } else if (strcmp(arg,"false") == 0) {
+ H_MAX_FLAGS &= ~MAX_HEAP_SIZE_LOG;
+ } else {
+ erts_fprintf(stderr, "bad max heap error logger %s\n", arg);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM, ("using max heap log %d\n", H_MAX_FLAGS));
+ } else if (has_prefix("max", sub_param)) {
+ arg = get_arg(sub_param+3, argv[i+1], &i);
+ if ((H_MAX_SIZE = atoi(arg)) < 0) {
+ erts_fprintf(stderr, "bad max heap size %s\n", arg);
+ erts_usage();
+ }
+ if (H_MAX_SIZE < H_MIN_SIZE && H_MAX_SIZE) {
+ erts_fprintf(stderr, "max heap size (%s) is not allowed to be "
+ "smaller than min heap size (%d)\n",
+ arg, H_MIN_SIZE);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM, ("using max heap size %d\n", H_MAX_SIZE));
} else {
/* backward compatibility */
arg = get_arg(argv[i]+2, argv[i+1], &i);
@@ -1495,16 +1579,6 @@ erl_start(int argc, char **argv)
have_break_handler = 0;
break;
- case 'K':
- /* If kernel poll support is present,
- erl_sys_args() will remove the K parameter
- and value */
- get_arg(argv[i]+2, argv[i+1], &i);
- erts_fprintf(stderr,
- "kernel-poll not supported; \"K\" parameter ignored\n",
- arg);
- break;
-
case 'n':
arg = get_arg(argv[i]+2, argv[i+1], &i);
switch (arg[0]) {
@@ -1674,6 +1748,9 @@ erl_start(int argc, char **argv)
erts_usage();
}
}
+ else if (has_prefix("ecio", sub_param)) {
+ /* ignore argument, eager check io no longer used */
+ }
else if (has_prefix("pp", sub_param)) {
arg = get_arg(sub_param+2, argv[i+1], &i);
if (sys_strcmp(arg, "true") == 0)
@@ -1749,10 +1826,45 @@ erl_start(int argc, char **argv)
VERBOSE(DEBUG_SYSTEM,
("scheduler wakeup threshold: %s\n", arg));
}
+ else if (has_prefix("ssdcpu", sub_param)) {
+ /* suggested stack size (Kilo Words) for dirty CPU scheduler threads */
+ arg = get_arg(sub_param+6, argv[i+1], &i);
+ erts_dcpu_sched_thread_suggested_stack_size = atoi(arg);
+
+ if ((erts_dcpu_sched_thread_suggested_stack_size
+ < ERTS_SCHED_THREAD_MIN_STACK_SIZE)
+ || (erts_dcpu_sched_thread_suggested_stack_size >
+ ERTS_SCHED_THREAD_MAX_STACK_SIZE)) {
+ erts_fprintf(stderr, "bad stack size for dirty CPU scheduler threads %s\n",
+ arg);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM,
+ ("suggested dirty CPU scheduler thread stack size %d kilo words\n",
+ erts_dcpu_sched_thread_suggested_stack_size));
+ }
+ else if (has_prefix("ssdio", sub_param)) {
+ /* suggested stack size (Kilo Words) for dirty IO scheduler threads */
+ arg = get_arg(sub_param+5, argv[i+1], &i);
+ erts_dio_sched_thread_suggested_stack_size = atoi(arg);
+
+ if ((erts_dio_sched_thread_suggested_stack_size
+ < ERTS_SCHED_THREAD_MIN_STACK_SIZE)
+ || (erts_dio_sched_thread_suggested_stack_size >
+ ERTS_SCHED_THREAD_MAX_STACK_SIZE)) {
+ erts_fprintf(stderr, "bad stack size for dirty IO scheduler threads %s\n",
+ arg);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM,
+ ("suggested dirty IO scheduler thread stack size %d kilo words\n",
+ erts_dio_sched_thread_suggested_stack_size));
+ }
else if (has_prefix("ss", sub_param)) {
/* suggested stack size (Kilo Words) for scheduler threads */
arg = get_arg(sub_param+2, argv[i+1], &i);
erts_sched_thread_suggested_stack_size = atoi(arg);
+ modified_sched_thread_suggested_stack_size = 1;
if ((erts_sched_thread_suggested_stack_size
< ERTS_SCHED_THREAD_MIN_STACK_SIZE)
@@ -1778,9 +1890,7 @@ erl_start(int argc, char **argv)
arg);
erts_usage();
}
-#ifdef ERTS_SMP
erts_runq_supervision_interval = val;
-#endif
}
else {
erts_fprintf(stderr, "bad scheduling option %s\n", argv[i]);
@@ -1878,15 +1988,51 @@ erl_start(int argc, char **argv)
}
break;
}
+ case 'C':
+ arg = get_arg(argv[i]+2, argv[i+1], &i);
+ if (sys_strcmp(arg, "no_time_warp") == 0)
+ time_warp_mode = ERTS_NO_TIME_WARP_MODE;
+ else if (sys_strcmp(arg, "single_time_warp") == 0)
+ time_warp_mode = ERTS_SINGLE_TIME_WARP_MODE;
+ else if (sys_strcmp(arg, "multi_time_warp") == 0)
+ time_warp_mode = ERTS_MULTI_TIME_WARP_MODE;
+ else {
+ erts_fprintf(stderr,
+ "Invalid time warp mode: %s\n", arg);
+ erts_usage();
+ }
+ break;
case 'c':
- if (argv[i][2] == 0) { /* -c: documented option */
- erts_disable_tolerant_timeofday = 1;
+ if (sys_strcmp(argv[i]+2, "false") == 0)
+ goto time_correction_false;
+ else if (sys_strcmp(argv[i]+2, "true") == 0)
+ goto time_correction_true;
+ else if (argv[i][2] == '\0') {
+ if (i + 1 >= argc)
+ goto time_correction_false;
+ else {
+ if (sys_strcmp(argv[i+1], "false") == 0) {
+ (void) get_arg(argv[i]+2, argv[i+1], &i);
+ goto time_correction_false;
+ }
+ else if (sys_strcmp(argv[i+1], "true") == 0) {
+ (void) get_arg(argv[i]+2, argv[i+1], &i);
+ time_correction_true:
+ time_correction = 1;
+ break;
+ }
+ else {
+ time_correction_false:
+ time_correction = 0;
+ break;
+ }
+ }
}
-#ifdef ERTS_OPCODE_COUNTER_SUPPORT
- else if (argv[i][2] == 'i') { /* -ci: undcoumented option*/
- count_instructions = 1;
+ else {
+ arg = get_arg(argv[i]+2, argv[i+1], &i);
+ erts_fprintf(stderr, "Invalid time correnction value: %s\n", arg);
+ erts_usage();
}
-#endif
break;
case 'W':
arg = get_arg(argv[i]+2, argv[i+1], &i);
@@ -1894,11 +2040,12 @@ erl_start(int argc, char **argv)
case 'i':
erts_error_logger_warnings = am_info;
break;
+ case 'e':
+ erts_error_logger_warnings = am_error;
+ break;
case 'w':
erts_error_logger_warnings = am_warning;
break;
- case 'e': /* The default */
- erts_error_logger_warnings = am_error;
default:
erts_fprintf(stderr, "unrecognized warning_map option %s\n", arg);
erts_usage();
@@ -1907,9 +2054,9 @@ erl_start(int argc, char **argv)
case 'z': {
char *sub_param = argv[i]+2;
- int new_limit;
if (has_prefix("dbbl", sub_param)) {
+ int new_limit;
arg = get_arg(sub_param+4, argv[i+1], &i);
new_limit = atoi(arg);
if (new_limit < 1 || INT_MAX/1024 < new_limit) {
@@ -1918,7 +2065,48 @@ erl_start(int argc, char **argv)
} else {
erts_dist_buf_busy_limit = new_limit*1024;
}
- } else {
+ }
+ else if (has_prefix("dntgc", sub_param)) {
+ long secs;
+
+ arg = get_arg(sub_param+5, argv[i+1], &i);
+ if (sys_strcmp(arg, "infinity") == 0)
+ secs = ERTS_NODE_TAB_DELAY_GC_INFINITY;
+ else {
+ char *endptr;
+ errno = 0;
+ secs = strtol(arg, &endptr, 10);
+ if (errno != 0 || *arg == '\0' || *endptr != '\0'
+ || secs < 0 || ERTS_NODE_TAB_DELAY_GC_MAX < secs) {
+ erts_fprintf(stderr, "Invalid delayed node table gc: %s\n", arg);
+ erts_usage();
+ }
+ }
+ node_tab_delete_delay = (int) secs;
+ }
+ else if (has_prefix("ebwt", sub_param)) {
+ arg = get_arg(sub_param+4, argv[i+1], &i);
+ if (sys_strcmp(arg, "none") == 0)
+ db_spin_count = ERTS_DB_SPNCNT_NONE;
+ else if (sys_strcmp(arg, "very_short") == 0)
+ db_spin_count = ERTS_DB_SPNCNT_VERY_LOW;
+ else if (sys_strcmp(arg, "short") == 0)
+ db_spin_count = ERTS_DB_SPNCNT_LOW;
+ else if (sys_strcmp(arg, "medium") == 0)
+ db_spin_count = ERTS_DB_SPNCNT_NORMAL;
+ else if (sys_strcmp(arg, "long") == 0)
+ db_spin_count = ERTS_DB_SPNCNT_HIGH;
+ else if (sys_strcmp(arg, "very_long") == 0)
+ db_spin_count = ERTS_DB_SPNCNT_VERY_HIGH;
+ else if (sys_strcmp(arg, "extremely_long") == 0)
+ db_spin_count = ERTS_DB_SPNCNT_EXTREMELY_HIGH;
+ else {
+ erts_fprintf(stderr,
+ "Invalid ets busy wait threshold: %s\n", arg);
+ erts_usage();
+ }
+ }
+ else {
erts_fprintf(stderr, "bad -z option %s\n", argv[i]);
erts_usage();
}
@@ -1932,6 +2120,30 @@ erl_start(int argc, char **argv)
i++;
}
+ if (!erts_check_time_adj_support(time_correction, time_warp_mode)) {
+ char *time_correction_str = time_correction ? "Enabled" : "Disabled";
+ char *time_warp_str = "undefined";
+ switch (time_warp_mode) {
+ case ERTS_NO_TIME_WARP_MODE:
+ time_warp_str = "no";
+ break;
+ case ERTS_SINGLE_TIME_WARP_MODE:
+ time_warp_str = "single";
+ break;
+ case ERTS_MULTI_TIME_WARP_MODE:
+ time_warp_str = "multi";
+ break;
+ default:
+ time_warp_str = "undefined";
+ break;
+ }
+ erts_fprintf(stderr, "%s time correction with %s time warp mode "
+ "is not supported on this platform\n",
+ time_correction_str,
+ time_warp_str);
+ erts_usage();
+ }
+
/* Output format on windows for sprintf defaults to three exponents.
* We use two-exponent to mimic normal sprintf behaviour.
*/
@@ -1960,12 +2172,23 @@ erl_start(int argc, char **argv)
boot_argc = argc - i; /* Number of arguments to init */
boot_argv = &argv[i];
+ if (erts_sched_thread_suggested_stack_size < ERTS_SCHED_THREAD_MIN_STACK_SIZE)
+ erts_sched_thread_suggested_stack_size = ERTS_SCHED_THREAD_MIN_STACK_SIZE;
+ if (erts_dcpu_sched_thread_suggested_stack_size < ERTS_SCHED_THREAD_MIN_STACK_SIZE)
+ erts_dcpu_sched_thread_suggested_stack_size = ERTS_SCHED_THREAD_MIN_STACK_SIZE;
+ if (erts_dio_sched_thread_suggested_stack_size < ERTS_SCHED_THREAD_MIN_STACK_SIZE)
+ erts_dio_sched_thread_suggested_stack_size = ERTS_SCHED_THREAD_MIN_STACK_SIZE;
+
erl_init(ncpu,
proc_tab_sz,
legacy_proc_tab,
port_tab_sz,
port_tab_sz_ignore_files,
- legacy_port_tab);
+ legacy_port_tab,
+ time_correction,
+ time_warp_mode,
+ node_tab_delete_delay,
+ db_spin_count);
load_preloaded();
erts_end_staging_code_ix();
@@ -1973,26 +2196,53 @@ erl_start(int argc, char **argv)
erts_initialized = 1;
- erl_first_process_otp("otp_ring0", NULL, 0, boot_argc, boot_argv);
+ otp_ring0_pid = erl_first_process_otp("otp_ring0", NULL, 0,
+ boot_argc, boot_argv);
+
+ {
+ /*
+ * The erts_code_purger and the erts_literal_area_collector
+ * system processes are *always* alive. If they terminate
+ * they bring the whole VM down.
+ */
+ Eterm pid;
+
+ pid = erl_system_process_otp(otp_ring0_pid, "erts_code_purger", !0);
+ erts_code_purger
+ = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc,
+ internal_pid_index(pid));
+ ASSERT(erts_code_purger && erts_code_purger->common.id == pid);
+ erts_proc_inc_refc(erts_code_purger);
+
+ pid = erl_system_process_otp(otp_ring0_pid, "erts_literal_area_collector", !0);
+ erts_literal_area_collector
+ = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc,
+ internal_pid_index(pid));
+ ASSERT(erts_literal_area_collector
+ && erts_literal_area_collector->common.id == pid);
+ erts_proc_inc_refc(erts_literal_area_collector);
+
+ pid = erl_system_process_otp(otp_ring0_pid, "erts_dirty_process_code_checker", !0);
+ erts_dirty_process_code_checker
+ = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc,
+ internal_pid_index(pid));
+ ASSERT(erts_dirty_process_code_checker
+ && erts_dirty_process_code_checker->common.id == pid);
+ erts_proc_inc_refc(erts_dirty_process_code_checker);
+
+ }
-#ifdef ERTS_SMP
erts_start_schedulers();
- /* Let system specific code decide what to do with the main thread... */
- erts_sys_main_thread(); /* May or may not return! */
-#else
- erts_thr_set_main_status(1, 1);
-#if ERTS_USE_ASYNC_READY_Q
- erts_get_scheduler_data()->aux_work_data.async_ready.queue
- = erts_get_async_ready_queue(1);
-#endif
- set_main_stack_size();
- process_main();
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_post_startup();
#endif
+
+ /* Let system specific code decide what to do with the main thread... */
+ erts_sys_main_thread(); /* May or may not return! */
}
-#ifdef USE_THREADS
__decl_noreturn void erts_thr_fatal_error(int err, char *what)
{
@@ -2006,7 +2256,6 @@ __decl_noreturn void erts_thr_fatal_error(int err, char *what)
abort();
}
-#endif
static void
system_cleanup(int flush_async)
@@ -2019,7 +2268,6 @@ system_cleanup(int flush_async)
* Another thread is currently exiting the system;
* wait for it to do its job.
*/
-#ifdef ERTS_SMP
if (erts_thr_progress_is_managed_thread()) {
/*
* The exiting thread might be waiting for
@@ -2028,7 +2276,6 @@ system_cleanup(int flush_async)
erts_thr_progress_active(NULL, 0);
erts_thr_progress_prepare_wait(NULL);
}
-#endif
/* Wait forever... */
while (1)
erts_milli_sleep(10000000);
@@ -2043,72 +2290,73 @@ system_cleanup(int flush_async)
if (!flush_async
|| !erts_initialized
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
- || !erts_equal_tids(main_thread, erts_thr_self())
-#endif
)
return;
-#ifdef ERTS_SMP
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_check_exact(NULL, 0);
#endif
-#endif
erts_exit_flush_async();
}
+static int erts_exit_code;
+
static __decl_noreturn void __noreturn
-erl_exit_vv(int n, int flush_async, char *fmt, va_list args1, va_list args2)
+erts_exit_vv(int n, int flush_async, char *fmt, va_list args1, va_list args2)
{
- unsigned int an;
-
system_cleanup(flush_async);
- save_statistics();
+ if (erts_mtrace_enabled)
+ erts_mtrace_exit((Uint32) n);
- an = abs(n);
+ if (fmt != NULL && *fmt != '\0')
+ erl_error(fmt, args2); /* Print error message. */
- if (erts_mtrace_enabled)
- erts_mtrace_exit((Uint32) an);
+ erts_exit_code = n;
- /* Produce an Erlang core dump if error */
- if (((n > 0 && erts_no_crash_dump == 0) || n == ERTS_DUMP_EXIT)
+ /* Produce an Erlang crash dump if error */
+ if (((n == ERTS_ERROR_EXIT && erts_no_crash_dump == 0) || n == ERTS_DUMP_EXIT)
&& erts_initialized) {
erl_crash_dump_v((char*) NULL, 0, fmt, args1);
}
- if (fmt != NULL && *fmt != '\0')
- erl_error(fmt, args2); /* Print error message. */
+ erts_exit_epilogue();
+}
+
+__decl_noreturn void __noreturn erts_exit_epilogue(void)
+{
+ int n = erts_exit_code;
+
sys_tty_reset(n);
if (n == ERTS_INTR_EXIT)
exit(0);
else if (n == ERTS_DUMP_EXIT)
ERTS_EXIT_AFTER_DUMP(1);
- else if (n > 0 || n == ERTS_ABORT_EXIT)
+ else if (n == ERTS_ERROR_EXIT || n == ERTS_ABORT_EXIT)
abort();
- exit(an);
+ exit(n);
}
/* Exit without flushing async threads */
-__decl_noreturn void __noreturn erl_exit(int n, char *fmt, ...)
+__decl_noreturn void __noreturn erts_exit(int n, char *fmt, ...)
{
va_list args1, args2;
va_start(args1, fmt);
va_start(args2, fmt);
- erl_exit_vv(n, 0, fmt, args1, args2);
+ erts_exit_vv(n, 0, fmt, args1, args2);
va_end(args2);
va_end(args1);
}
/* Exit after flushing async threads */
-__decl_noreturn void __noreturn erl_exit_flush_async(int n, char *fmt, ...)
+__decl_noreturn void __noreturn erts_flush_async_exit(int n, char *fmt, ...)
{
va_list args1, args2;
va_start(args1, fmt);
va_start(args2, fmt);
- erl_exit_vv(n, 1, fmt, args1, args2);
+ erts_exit_vv(n, 1, fmt, args1, args2);
va_end(args2);
va_end(args1);
}
diff --git a/erts/emulator/beam/erl_instrument.c b/erts/emulator/beam/erl_instrument.c
index df7c443387..634509f880 100644
--- a/erts/emulator/beam/erl_instrument.c
+++ b/erts/emulator/beam/erl_instrument.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -538,7 +539,7 @@ map_stat_free(ErtsAlcType_t n, void *extra, void *ptr)
}
-static void dump_memory_map_to_stream(FILE *fp)
+static void dump_memory_map_to_stream(fmtfn_t to, void* to_arg)
{
ErtsAlcType_t n;
MapStatBlock_t *bp;
@@ -550,7 +551,7 @@ static void dump_memory_map_to_stream(FILE *fp)
/* Write header */
- fprintf(fp,
+ erts_cbprintf(to, to_arg,
"{instr_hdr,\n"
" %lu,\n"
" %lu,\n"
@@ -573,7 +574,7 @@ static void dump_memory_map_to_stream(FILE *fp)
else
astr = ERTS_ALC_A2AD(ERTS_ALC_A_SYSTEM);
- fprintf(fp,
+ erts_cbprintf(to, to_arg,
"%s{%s,%s,%s}%s",
(n == ERTS_ALC_N_MIN) ? "" : " ",
ERTS_ALC_N2TD(n),
@@ -582,12 +583,12 @@ static void dump_memory_map_to_stream(FILE *fp)
(n == ERTS_ALC_N_MAX) ? "" : ",\n");
}
- fprintf(fp, "}}.\n");
+ erts_cbprintf(to, to_arg, "}}.\n");
/* Write memory data */
for (bp = mem_anchor; bp; bp = bp->next) {
if (is_internal_pid(bp->pid))
- fprintf(fp,
+ erts_cbprintf(to, to_arg,
"{%lu, %lu, %lu, {%lu,%lu,%lu}}.\n",
(UWord) bp->type_no,
(UWord) bp->mem,
@@ -596,7 +597,7 @@ static void dump_memory_map_to_stream(FILE *fp)
(UWord) pid_number(bp->pid),
(UWord) pid_serial(bp->pid));
else
- fprintf(fp,
+ erts_cbprintf(to, to_arg,
"{%lu, %lu, %lu, undefined}.\n",
(UWord) bp->type_no,
(UWord) bp->mem,
@@ -607,40 +608,29 @@ static void dump_memory_map_to_stream(FILE *fp)
erts_mtx_unlock(&instr_mutex);
}
-int erts_instr_dump_memory_map_to_fd(int fd)
+int erts_instr_dump_memory_map_to(fmtfn_t to, void* to_arg)
{
- char buf[BUFSIZ];
- FILE *f;
-
if (!erts_instr_memory_map)
return 0;
- f = fdopen(fd, "w");
- if (f == NULL)
- return 0;
-
- /* Avoid allocating memory; we may have run out of it at this point. */
- setbuf(f, buf);
-
- dump_memory_map_to_stream(f);
- fflush(f);
+ dump_memory_map_to_stream(to, to_arg);
return 1;
}
int erts_instr_dump_memory_map(const char *name)
{
- FILE *f;
+ int fd;
if (!erts_instr_memory_map)
return 0;
- f = fopen(name, "w");
- if (f == NULL)
+ fd = open(name, O_WRONLY | O_CREAT | O_TRUNC, 0640);
+ if (fd < 0)
return 0;
- dump_memory_map_to_stream(f);
+ dump_memory_map_to_stream(erts_write_fd, (void*)&fd);
- fclose(f);
+ close(fd);
return 1;
}
@@ -997,19 +987,19 @@ erts_instr_get_stat(Process *proc, Eterm what, int begin_max_period)
}
static void
-dump_stat_to_stream(FILE *fp, int begin_max_period)
+dump_stat_to_stream(fmtfn_t to, void* to_arg, int begin_max_period)
{
ErtsAlcType_t i, a_max, a_min;
erts_mtx_lock(&instr_mutex);
- fprintf(fp,
+ erts_cbprintf(to, to_arg,
"{instr_vsn,%lu}.\n",
(unsigned long) ERTS_INSTR_VSN);
update_max_ever_values(&stats->tot, 0, 0);
- fprintf(fp,
+ erts_cbprintf(to, to_arg,
"{total,[{total,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}]}.\n",
(UWord) stats->tot.size,
(UWord) stats->tot.max_size,
@@ -1037,7 +1027,7 @@ dump_stat_to_stream(FILE *fp, int begin_max_period)
for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
if (erts_allctrs_info[i].enabled) {
- fprintf(fp,
+ erts_cbprintf(to, to_arg,
"%s{%s,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}%s",
i == a_min ? "{allocators,\n [" : " ",
ERTS_ALC_A2AD(i),
@@ -1054,7 +1044,7 @@ dump_stat_to_stream(FILE *fp, int begin_max_period)
update_max_ever_values(stats->c, ERTS_ALC_C_MIN, ERTS_ALC_C_MAX);
for (i = ERTS_ALC_C_MIN; i <= ERTS_ALC_C_MAX; i++) {
- fprintf(fp,
+ erts_cbprintf(to, to_arg,
"%s{%s,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}%s",
i == ERTS_ALC_C_MIN ? "{classes,\n [" : " ",
ERTS_ALC_C2CD(i),
@@ -1070,7 +1060,7 @@ dump_stat_to_stream(FILE *fp, int begin_max_period)
update_max_ever_values(stats->n, ERTS_ALC_N_MIN, ERTS_ALC_N_MAX);
for (i = ERTS_ALC_N_MIN; i <= ERTS_ALC_N_MAX; i++) {
- fprintf(fp,
+ erts_cbprintf(to, to_arg,
"%s{%s,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}%s",
i == ERTS_ALC_N_MIN ? "{types,\n [" : " ",
ERTS_ALC_N2TD(i),
@@ -1094,40 +1084,29 @@ dump_stat_to_stream(FILE *fp, int begin_max_period)
}
-int erts_instr_dump_stat_to_fd(int fd, int begin_max_period)
+int erts_instr_dump_stat_to(fmtfn_t to, void* to_arg, int begin_max_period)
{
- char buf[BUFSIZ];
- FILE *fp;
-
if (!erts_instr_stat)
return 0;
- fp = fdopen(fd, "w");
- if (fp == NULL)
- return 0;
-
- /* Avoid allocating memory; we may have run out of it at this point. */
- setbuf(fp, buf);
-
- dump_stat_to_stream(fp, begin_max_period);
- fflush(fp);
+ dump_stat_to_stream(to, to_arg, begin_max_period);
return 1;
}
int erts_instr_dump_stat(const char *name, int begin_max_period)
{
- FILE *file;
+ int fd;
if (!erts_instr_stat)
return 0;
- file = fopen(name, "w");
- if (file == NULL)
+ fd = open(name, O_WRONLY | O_CREAT | O_TRUNC,0640);
+ if (fd < 0)
return 0;
- dump_stat_to_stream(file, begin_max_period);
+ dump_stat_to_stream(erts_write_fd, (void*)&fd, begin_max_period);
- fclose(file);
+ close(fd);
return 1;
}
@@ -1221,12 +1200,13 @@ erts_instr_init(int stat, int map_stat)
stats = erts_alloc(ERTS_ALC_T_INSTR_INFO, sizeof(struct stats_));
- erts_mtx_init(&instr_mutex, "instr");
+ erts_mtx_init(&instr_mutex, "instr", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
mem_anchor = NULL;
/* Install instrumentation functions */
- ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
+ ERTS_CT_ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
sys_memcpy((void *)real_allctrs,(void *)erts_allctrs,sizeof(erts_allctrs));
@@ -1244,7 +1224,8 @@ erts_instr_init(int stat, int map_stat)
if (map_stat) {
- erts_mtx_init(&instr_x_mutex, "instr_x");
+ erts_mtx_init(&instr_x_mutex, "instr_x", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
erts_instr_memory_map = 1;
erts_instr_stat = 1;
diff --git a/erts/emulator/beam/erl_instrument.h b/erts/emulator/beam/erl_instrument.h
index 37b9b67139..351172b2fa 100644
--- a/erts/emulator/beam/erl_instrument.h
+++ b/erts/emulator/beam/erl_instrument.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -28,10 +29,10 @@ extern int erts_instr_memory_map;
extern int erts_instr_stat;
Uint erts_instr_init(int stat, int map_stat);
-int erts_instr_dump_memory_map_to_fd(int fd);
+int erts_instr_dump_memory_map_to(fmtfn_t to, void* to_arg);
int erts_instr_dump_memory_map(const char *name);
Eterm erts_instr_get_memory_map(Process *process);
-int erts_instr_dump_stat_to_fd(int fd, int begin_max_period);
+int erts_instr_dump_stat_to(fmtfn_t to, void* to_arg, int begin_max_period);
int erts_instr_dump_stat(const char *name, int begin_max_period);
Eterm erts_instr_get_stat(Process *proc, Eterm what, int begin_max_period);
Eterm erts_instr_get_type_info(Process *proc);
diff --git a/erts/emulator/beam/erl_io_queue.c b/erts/emulator/beam/erl_io_queue.c
new file mode 100644
index 0000000000..40d69ea6b0
--- /dev/null
+++ b/erts/emulator/beam/erl_io_queue.c
@@ -0,0 +1,1231 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2017. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "global.h"
+
+#define ERL_WANT_HIPE_BIF_WRAPPER__
+#include "bif.h"
+#undef ERL_WANT_HIPE_BIF_WRAPPER__
+
+#include "erl_bits.h"
+#include "erl_io_queue.h"
+
+#define IOL2V_SMALL_BIN_LIMIT (ERL_ONHEAP_BIN_LIMIT * 4)
+
+static void free_binary(ErtsIOQBinary *b, int driver);
+static ErtsIOQBinary *alloc_binary(Uint size, char *source, void **iov_base, int driver);
+
+void erts_ioq_init(ErtsIOQueue *q, ErtsAlcType_t alct, int driver)
+{
+
+ ERTS_CT_ASSERT(offsetof(ErlNifIOVec,flags) == sizeof(ErtsIOVecCommon));
+ ERTS_CT_ASSERT(sizeof(ErlIOVec) == sizeof(ErtsIOVecCommon));
+ ERTS_CT_ASSERT(sizeof(size_t) == sizeof(ErlDrvSizeT));
+ ERTS_CT_ASSERT(sizeof(size_t) == sizeof(Uint));
+
+ q->alct = alct;
+ q->driver = driver;
+ q->size = 0;
+ q->v_head = q->v_tail = q->v_start = q->v_small;
+ q->v_end = q->v_small + ERTS_SMALL_IO_QUEUE;
+ q->b_head = q->b_tail = q->b_start = q->b_small;
+ q->b_end = q->b_small + ERTS_SMALL_IO_QUEUE;
+}
+
+void erts_ioq_clear(ErtsIOQueue *q)
+{
+ ErtsIOQBinary** binp = q->b_head;
+ int driver = q->driver;
+
+ if (q->v_start != q->v_small)
+ erts_free(q->alct, (void *) q->v_start);
+
+ while(binp < q->b_tail) {
+ if (*binp != NULL)
+ free_binary(*binp, driver);
+ binp++;
+ }
+ if (q->b_start != q->b_small)
+ erts_free(q->alct, (void *) q->b_start);
+ q->v_start = q->v_end = q->v_head = q->v_tail = NULL;
+ q->b_start = q->b_end = q->b_head = q->b_tail = NULL;
+ q->size = 0;
+}
+
+static void free_binary(ErtsIOQBinary *b, int driver)
+{
+ if (driver)
+ driver_free_binary(&b->driver);
+ else if (erts_refc_dectest(&b->nif.intern.refc, 0) == 0)
+ erts_bin_free(&b->nif);
+}
+
+static ErtsIOQBinary *alloc_binary(Uint size, char *source, void **iov_base, int driver)
+{
+ if (driver) {
+ ErlDrvBinary *bin = driver_alloc_binary(size);
+ if (!bin) return NULL;
+ sys_memcpy(bin->orig_bytes, source, size);
+ *iov_base = bin->orig_bytes;
+ return (ErtsIOQBinary *)bin;
+ } else {
+ /* This clause can be triggered in enif_ioq_enq_binary is used */
+ Binary *bin = erts_bin_nrml_alloc(size);
+ if (!bin) return NULL;
+ erts_refc_init(&bin->intern.refc, 1);
+ sys_memcpy(bin->orig_bytes, source, size);
+ *iov_base = bin->orig_bytes;
+ return (ErtsIOQBinary *)bin;
+ }
+}
+
+Uint erts_ioq_size(ErtsIOQueue *q)
+{
+ return q->size;
+}
+
+/* expand queue to hold n elements in tail or head */
+static int expandq(ErtsIOQueue* q, int n, int tail)
+/* tail: 0 if make room in head, make room in tail otherwise */
+{
+ int h_sz; /* room before header */
+ int t_sz; /* room after tail */
+ int q_sz; /* occupied */
+ int nvsz;
+ SysIOVec* niov;
+ ErtsIOQBinary** nbinv;
+
+ h_sz = q->v_head - q->v_start;
+ t_sz = q->v_end - q->v_tail;
+ q_sz = q->v_tail - q->v_head;
+
+ if (tail && (n <= t_sz)) /* do we need to expand tail? */
+ return 0;
+ else if (!tail && (n <= h_sz)) /* do we need to expand head? */
+ return 0;
+ else if (n > (h_sz + t_sz)) { /* need to allocate */
+ /* we may get little extra but it ok */
+ nvsz = (q->v_end - q->v_start) + n;
+
+ niov = erts_alloc_fnf(q->alct, nvsz * sizeof(SysIOVec));
+ if (!niov)
+ return -1;
+ nbinv = erts_alloc_fnf(q->alct, nvsz * sizeof(ErtsIOQBinary**));
+ if (!nbinv) {
+ erts_free(q->alct, (void *) niov);
+ return -1;
+ }
+ if (tail) {
+ sys_memcpy(niov, q->v_head, q_sz*sizeof(SysIOVec));
+ if (q->v_start != q->v_small)
+ erts_free(q->alct, (void *) q->v_start);
+ q->v_start = niov;
+ q->v_end = niov + nvsz;
+ q->v_head = q->v_start;
+ q->v_tail = q->v_head + q_sz;
+
+ sys_memcpy(nbinv, q->b_head, q_sz*sizeof(ErtsIOQBinary*));
+ if (q->b_start != q->b_small)
+ erts_free(q->alct, (void *) q->b_start);
+ q->b_start = nbinv;
+ q->b_end = nbinv + nvsz;
+ q->b_head = q->b_start;
+ q->b_tail = q->b_head + q_sz;
+ }
+ else {
+ sys_memcpy(niov+nvsz-q_sz, q->v_head, q_sz*sizeof(SysIOVec));
+ if (q->v_start != q->v_small)
+ erts_free(q->alct, (void *) q->v_start);
+ q->v_start = niov;
+ q->v_end = niov + nvsz;
+ q->v_tail = q->v_end;
+ q->v_head = q->v_tail - q_sz;
+
+ sys_memcpy(nbinv+nvsz-q_sz, q->b_head, q_sz*sizeof(ErtsIOQBinary*));
+ if (q->b_start != q->b_small)
+ erts_free(q->alct, (void *) q->b_start);
+ q->b_start = nbinv;
+ q->b_end = nbinv + nvsz;
+ q->b_tail = q->b_end;
+ q->b_head = q->b_tail - q_sz;
+ }
+ }
+ else if (tail) { /* move to beginning to make room in tail */
+ sys_memmove(q->v_start, q->v_head, q_sz*sizeof(SysIOVec));
+ q->v_head = q->v_start;
+ q->v_tail = q->v_head + q_sz;
+ sys_memmove(q->b_start, q->b_head, q_sz*sizeof(ErtsIOQBinary*));
+ q->b_head = q->b_start;
+ q->b_tail = q->b_head + q_sz;
+ }
+ else { /* move to end to make room */
+ sys_memmove(q->v_end-q_sz, q->v_head, q_sz*sizeof(SysIOVec));
+ q->v_tail = q->v_end;
+ q->v_head = q->v_tail-q_sz;
+ sys_memmove(q->b_end-q_sz, q->b_head, q_sz*sizeof(ErtsIOQBinary*));
+ q->b_tail = q->b_end;
+ q->b_head = q->b_tail-q_sz;
+ }
+
+ return 0;
+}
+
+static
+int skip(ErtsIOVec* vec, Uint skipbytes,
+ SysIOVec **iovp, ErtsIOQBinary ***binvp,
+ Uint *lenp)
+{
+ int n;
+ Uint len;
+ SysIOVec* iov;
+ ErtsIOQBinary** binv;
+
+ if (vec->common.size <= skipbytes)
+ return -1;
+
+ iov = vec->common.iov;
+ binv = vec->common.binv;
+ n = vec->common.vsize;
+ /* we use do here to strip iov_len=0 from beginning */
+ do {
+ len = iov->iov_len;
+ if (len <= skipbytes) {
+ skipbytes -= len;
+ iov++;
+ binv++;
+ n--;
+ }
+ else {
+ iov->iov_base = ((char *)(iov->iov_base)) + skipbytes;
+ iov->iov_len -= skipbytes;
+ skipbytes = 0;
+ }
+ } while(skipbytes > 0);
+
+ *binvp = binv;
+ *iovp = iov;
+ *lenp = len;
+
+ return n;
+}
+
+/* Put elements from vec at q tail */
+int erts_ioq_enqv(ErtsIOQueue *q, ErtsIOVec *eiov, Uint skipbytes)
+{
+ int n;
+ Uint len;
+ Uint size = eiov->common.size - skipbytes;
+ SysIOVec *iov;
+ ErtsIOQBinary** binv;
+ ErtsIOQBinary* b;
+
+ if (q == NULL)
+ return -1;
+
+ ASSERT(eiov->common.size >= skipbytes);
+ if (eiov->common.size <= skipbytes)
+ return 0;
+
+ n = skip(eiov, skipbytes, &iov, &binv, &len);
+
+ if (n < 0)
+ return n;
+
+ if (q->v_tail + n >= q->v_end)
+ if (expandq(q, n, 1))
+ return -1;
+
+ /* Queue and reference all binaries (remove zero length items) */
+ while(n--) {
+ if ((len = iov->iov_len) > 0) {
+ if ((b = *binv) == NULL) { /* special case create binary ! */
+ b = alloc_binary(len, iov->iov_base, (void**)&q->v_tail->iov_base,
+ q->driver);
+ if (!b) return -1;
+ *q->b_tail++ = b;
+ q->v_tail->iov_len = len;
+ q->v_tail++;
+ }
+ else {
+ if (q->driver)
+ driver_binary_inc_refc(&b->driver);
+ else
+ erts_refc_inc(&b->nif.intern.refc, 1);
+ *q->b_tail++ = b;
+ *q->v_tail++ = *iov;
+ }
+ }
+ iov++;
+ binv++;
+ }
+ q->size += size; /* update total size in queue */
+ return 0;
+}
+
+/* Put elements from vec at q head */
+int erts_ioq_pushqv(ErtsIOQueue *q, ErtsIOVec* vec, Uint skipbytes)
+{
+ int n;
+ Uint len;
+ Uint size = vec->common.size - skipbytes;
+ SysIOVec* iov;
+ ErtsIOQBinary** binv;
+ ErtsIOQBinary* b;
+
+ if (q == NULL)
+ return -1;
+
+ ASSERT(vec->common.size >= skipbytes);
+ if (vec->common.size <= skipbytes)
+ return 0;
+
+ n = skip(vec, skipbytes, &iov, &binv, &len);
+
+ if (n < 0)
+ return n;
+
+ if (q->v_head - n < q->v_start)
+ if (expandq(q, n, 0))
+ return -1;
+
+ /* Queue and reference all binaries (remove zero length items) */
+ iov += (n-1); /* move to end */
+ binv += (n-1); /* move to end */
+ while(n--) {
+ if ((len = iov->iov_len) > 0) {
+ if ((b = *binv) == NULL) { /* special case create binary ! */
+ if (q->driver) {
+ ErlDrvBinary *bin = driver_alloc_binary(len);
+ if (!bin) return -1;
+ sys_memcpy(bin->orig_bytes, iov->iov_base, len);
+ b = (ErtsIOQBinary *)bin;
+ q->v_head->iov_base = bin->orig_bytes;
+ }
+ *--q->b_head = b;
+ q->v_head--;
+ q->v_head->iov_len = len;
+ }
+ else {
+ if (q->driver)
+ driver_binary_inc_refc(&b->driver);
+ else
+ erts_refc_inc(&b->nif.intern.refc, 1);
+ *--q->b_head = b;
+ *--q->v_head = *iov;
+ }
+ }
+ iov--;
+ binv--;
+ }
+ q->size += size; /* update total size in queue */
+ return 0;
+}
+
+
+/*
+** Remove size bytes from queue head
+** Return number of bytes that remain in queue
+*/
+int erts_ioq_deq(ErtsIOQueue *q, Uint size)
+{
+ Uint len;
+
+ if ((q == NULL) || (q->size < size))
+ return -1;
+ q->size -= size;
+ while (size > 0) {
+ ASSERT(q->v_head != q->v_tail);
+
+ len = q->v_head->iov_len;
+ if (len <= size) {
+ size -= len;
+ free_binary(*q->b_head, q->driver);
+ *q->b_head++ = NULL;
+ q->v_head++;
+ }
+ else {
+ q->v_head->iov_base = ((char *)(q->v_head->iov_base)) + size;
+ q->v_head->iov_len -= size;
+ size = 0;
+ }
+ }
+
+ /* restart pointers (optimised for enq) */
+ if (q->v_head == q->v_tail) {
+ q->v_head = q->v_tail = q->v_start;
+ q->b_head = q->b_tail = q->b_start;
+ }
+ return 0;
+}
+
+
+Uint erts_ioq_peekqv(ErtsIOQueue *q, ErtsIOVec *ev) {
+ ASSERT(ev);
+
+ if (! q) {
+ return (Uint) -1;
+ } else {
+ if ((ev->common.vsize = q->v_tail - q->v_head) == 0) {
+ ev->common.size = 0;
+ ev->common.iov = NULL;
+ ev->common.binv = NULL;
+ } else {
+ ev->common.size = q->size;
+ ev->common.iov = q->v_head;
+ ev->common.binv = q->b_head;
+ }
+ return q->size;
+ }
+}
+
+SysIOVec* erts_ioq_peekq(ErtsIOQueue *q, int* vlenp) /* length of io-vector */
+{
+
+ if (q == NULL) {
+ *vlenp = -1;
+ return NULL;
+ }
+ if ((*vlenp = (q->v_tail - q->v_head)) == 0)
+ return NULL;
+ return q->v_head;
+}
+
+/* Fills a possibly deep list of chars and binaries into vec
+** Small characters are first stored in the buffer buf of length ln
+** binaries found are copied and linked into msoh
+** Return vector length on succsess,
+** -1 on overflow
+** -2 on type error
+*/
+
+static ERTS_INLINE void
+io_list_to_vec_set_vec(SysIOVec **iov, ErtsIOQBinary ***binv,
+ ErtsIOQBinary *bin, byte *ptr, Uint len,
+ int *vlen)
+{
+ while (len > MAX_SYSIOVEC_IOVLEN) {
+ (*iov)->iov_base = ptr;
+ (*iov)->iov_len = MAX_SYSIOVEC_IOVLEN;
+ ptr += MAX_SYSIOVEC_IOVLEN;
+ len -= MAX_SYSIOVEC_IOVLEN;
+ (*iov)++;
+ (*vlen)++;
+ *(*binv)++ = bin;
+ }
+ (*iov)->iov_base = ptr;
+ (*iov)->iov_len = len;
+ *(*binv)++ = bin;
+ (*iov)++;
+ (*vlen)++;
+}
+
+int
+erts_ioq_iolist_to_vec(Eterm obj, /* io-list */
+ SysIOVec* iov, /* io vector */
+ ErtsIOQBinary** binv, /* binary reference vector */
+ ErtsIOQBinary* cbin, /* binary to store characters */
+ Uint bin_limit, /* small binaries limit */
+ int driver)
+{
+ DECLARE_ESTACK(s);
+ Eterm* objp;
+ byte *buf = NULL;
+ Uint len = 0;
+ Uint csize = 0;
+ int vlen = 0;
+ byte* cptr;
+
+ if (cbin) {
+ if (driver) {
+ buf = (byte*)cbin->driver.orig_bytes;
+ len = cbin->driver.orig_size;
+ } else {
+ buf = (byte*)cbin->nif.orig_bytes;
+ len = cbin->nif.orig_size;
+ }
+ }
+ cptr = buf;
+
+ goto L_jump_start; /* avoid push */
+
+ while (!ESTACK_ISEMPTY(s)) {
+ obj = ESTACK_POP(s);
+ L_jump_start:
+ if (is_list(obj)) {
+ L_iter_list:
+ objp = list_val(obj);
+ obj = CAR(objp);
+ if (is_byte(obj)) {
+ if (len == 0)
+ goto L_overflow;
+ *buf++ = unsigned_val(obj);
+ csize++;
+ len--;
+ } else if (is_binary(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ goto handle_binary;
+ } else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ goto L_iter_list; /* on head */
+ } else if (!is_nil(obj)) {
+ goto L_type_error;
+ }
+ obj = CDR(objp);
+ if (is_list(obj))
+ goto L_iter_list; /* on tail */
+ else if (is_binary(obj)) {
+ goto handle_binary;
+ } else if (!is_nil(obj)) {
+ goto L_type_error;
+ }
+ } else if (is_binary(obj)) {
+ Eterm real_bin;
+ Uint offset;
+ Eterm* bptr;
+ Uint size;
+ int bitoffs;
+ int bitsize;
+
+ handle_binary:
+ size = binary_size(obj);
+ ERTS_GET_REAL_BIN(obj, real_bin, offset, bitoffs, bitsize);
+ ASSERT(bitsize == 0);
+ bptr = binary_val(real_bin);
+ if (*bptr == HEADER_PROC_BIN) {
+ ProcBin* pb = (ProcBin *) bptr;
+ if (bitoffs != 0) {
+ if (len < size) {
+ goto L_overflow;
+ }
+ erts_copy_bits(pb->bytes+offset, bitoffs, 1,
+ (byte *) buf, 0, 1, size*8);
+ csize += size;
+ buf += size;
+ len -= size;
+ } else if (bin_limit && size < bin_limit) {
+ if (len < size) {
+ goto L_overflow;
+ }
+ sys_memcpy(buf, pb->bytes+offset, size);
+ csize += size;
+ buf += size;
+ len -= size;
+ } else {
+ ErtsIOQBinary *qbin;
+ if (csize != 0) {
+ io_list_to_vec_set_vec(&iov, &binv, cbin,
+ cptr, csize, &vlen);
+ cptr = buf;
+ csize = 0;
+ }
+ if (pb->flags) {
+ erts_emasculate_writable_binary(pb);
+ }
+ if (driver)
+ qbin = (ErtsIOQBinary*)Binary2ErlDrvBinary(pb->val);
+ else
+ qbin = (ErtsIOQBinary*)pb->val;
+
+ io_list_to_vec_set_vec(
+ &iov, &binv, qbin,
+ pb->bytes+offset, size, &vlen);
+ }
+ } else {
+ ErlHeapBin* hb = (ErlHeapBin *) bptr;
+ if (len < size) {
+ goto L_overflow;
+ }
+ copy_binary_to_buffer(buf, 0,
+ ((byte *) hb->data)+offset, bitoffs,
+ 8*size);
+ csize += size;
+ buf += size;
+ len -= size;
+ }
+ } else if (!is_nil(obj)) {
+ goto L_type_error;
+ }
+ }
+
+ if (csize != 0) {
+ io_list_to_vec_set_vec(&iov, &binv, cbin, cptr, csize, &vlen);
+ }
+
+ DESTROY_ESTACK(s);
+ return vlen;
+
+ L_type_error:
+ DESTROY_ESTACK(s);
+ return -2;
+
+ L_overflow:
+ DESTROY_ESTACK(s);
+ return -1;
+}
+
+static ERTS_INLINE int
+io_list_vec_count(Eterm obj, Uint *v_size,
+ Uint *c_size, Uint *b_size, Uint *in_clist,
+ Uint *p_v_size, Uint *p_c_size, Uint *p_in_clist,
+ Uint blimit)
+{
+ Uint size = binary_size(obj);
+ Eterm real;
+ ERTS_DECLARE_DUMMY(Uint offset);
+ int bitoffs;
+ int bitsize;
+ ERTS_GET_REAL_BIN(obj, real, offset, bitoffs, bitsize);
+ if (bitsize != 0) return 1;
+ if (thing_subtag(*binary_val(real)) == REFC_BINARY_SUBTAG &&
+ bitoffs == 0) {
+ *b_size += size;
+ if (*b_size < size) return 2;
+ *in_clist = 0;
+ ++*v_size;
+ /* If iov_len is smaller then Uint we split the binary into*/
+ /* multiple smaller (2GB) elements in the iolist.*/
+ *v_size += size / MAX_SYSIOVEC_IOVLEN;
+ if (size >= blimit) {
+ *p_in_clist = 0;
+ ++*p_v_size;
+ } else {
+ *p_c_size += size;
+ if (!*p_in_clist) {
+ *p_in_clist = 1;
+ ++*p_v_size;
+ }
+ }
+ } else {
+ *c_size += size;
+ if (*c_size < size) return 2;
+ if (!*in_clist) {
+ *in_clist = 1;
+ ++*v_size;
+ }
+ *p_c_size += size;
+ if (!*p_in_clist) {
+ *p_in_clist = 1;
+ ++*p_v_size;
+ }
+ }
+ return 0;
+}
+
+#define IO_LIST_VEC_COUNT(obj) \
+ do { \
+ switch (io_list_vec_count(obj, &v_size, &c_size, \
+ &b_size, &in_clist, \
+ &p_v_size, &p_c_size, &p_in_clist, \
+ blimit)) { \
+ case 1: goto L_type_error; \
+ case 2: goto L_overflow_error; \
+ default: break; \
+ } \
+ } while(0)
+
+/*
+ * Returns 0 if successful and a non-zero value otherwise.
+ *
+ * Return values through pointers:
+ * *vsize - SysIOVec size needed for a writev
+ * *csize - Number of bytes not in binary (in the common binary)
+ * *pvsize - SysIOVec size needed if packing small binaries
+ * *pcsize - Number of bytes in the common binary if packing
+ * *total_size - Total size of iolist in bytes
+ */
+int
+erts_ioq_iolist_vec_len(Eterm obj, int* vsize, Uint* csize,
+ Uint* pvsize, Uint* pcsize,
+ size_t* total_size, Uint blimit)
+{
+ DECLARE_ESTACK(s);
+ Eterm* objp;
+ Uint v_size = 0;
+ Uint c_size = 0;
+ Uint b_size = 0;
+ Uint in_clist = 0;
+ Uint p_v_size = 0;
+ Uint p_c_size = 0;
+ Uint p_in_clist = 0;
+ size_t total;
+
+ goto L_jump_start; /* avoid a push */
+
+ while (!ESTACK_ISEMPTY(s)) {
+ obj = ESTACK_POP(s);
+ L_jump_start:
+ if (is_list(obj)) {
+ L_iter_list:
+ objp = list_val(obj);
+ obj = CAR(objp);
+
+ if (is_byte(obj)) {
+ c_size++;
+ if (c_size == 0) {
+ goto L_overflow_error;
+ }
+ if (!in_clist) {
+ in_clist = 1;
+ v_size++;
+ }
+ p_c_size++;
+ if (!p_in_clist) {
+ p_in_clist = 1;
+ p_v_size++;
+ }
+ }
+ else if (is_binary(obj)) {
+ IO_LIST_VEC_COUNT(obj);
+ }
+ else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ goto L_iter_list; /* on head */
+ }
+ else if (!is_nil(obj)) {
+ goto L_type_error;
+ }
+
+ obj = CDR(objp);
+ if (is_list(obj))
+ goto L_iter_list; /* on tail */
+ else if (is_binary(obj)) { /* binary tail is OK */
+ IO_LIST_VEC_COUNT(obj);
+ }
+ else if (!is_nil(obj)) {
+ goto L_type_error;
+ }
+ }
+ else if (is_binary(obj)) {
+ IO_LIST_VEC_COUNT(obj);
+ }
+ else if (!is_nil(obj)) {
+ goto L_type_error;
+ }
+ }
+
+ total = c_size + b_size;
+ if (total < c_size) {
+ goto L_overflow_error;
+ }
+ *total_size = total;
+
+ DESTROY_ESTACK(s);
+ *vsize = v_size;
+ *csize = c_size;
+ *pvsize = p_v_size;
+ *pcsize = p_c_size;
+ return 0;
+
+ L_type_error:
+ L_overflow_error:
+ DESTROY_ESTACK(s);
+ return 1;
+}
+
+typedef struct {
+ Eterm result_head;
+ Eterm result_tail;
+ Eterm input_list;
+
+ UWord acc_size;
+ Binary *acc;
+
+ /* We yield after copying this many bytes into the accumulator (Minus
+ * eating a few on consing etc). Large binaries will only count to the
+ * extent their split (if any) resulted in a copy op. */
+ UWord bytereds_available;
+ UWord bytereds_spent;
+
+ Process *process;
+ ErtsEStack estack;
+
+ Eterm magic_reference;
+} iol2v_state_t;
+
+static int iol2v_state_destructor(Binary *data) {
+ iol2v_state_t *state = ERTS_MAGIC_BIN_UNALIGNED_DATA(data);
+
+ DESTROY_SAVED_ESTACK(&state->estack);
+
+ if (state->acc != NULL) {
+ erts_bin_free(state->acc);
+ }
+
+ return 1;
+}
+
+static void iol2v_init(iol2v_state_t *state, Process *process, Eterm input) {
+ state->process = process;
+
+ state->result_head = NIL;
+ state->result_tail = NIL;
+ state->input_list = input;
+
+ state->magic_reference = NIL;
+ state->acc_size = 0;
+ state->acc = NULL;
+
+ CLEAR_SAVED_ESTACK(&state->estack);
+}
+
+static Eterm iol2v_make_sub_bin(iol2v_state_t *state, Eterm bin_term,
+ UWord offset, UWord size) {
+ Uint byte_offset, bit_offset, bit_size;
+ ErlSubBin *sb;
+ Eterm orig_pb_term;
+
+ sb = (ErlSubBin*)HAlloc(state->process, ERL_SUB_BIN_SIZE);
+
+ ERTS_GET_REAL_BIN(bin_term, orig_pb_term,
+ byte_offset, bit_offset, bit_size);
+
+ (void)bit_offset;
+ (void)bit_size;
+
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->bitsize = 0;
+ sb->bitoffs = 0;
+ sb->orig = orig_pb_term;
+ sb->is_writable = 0;
+
+ sb->offs = byte_offset + offset;
+ sb->size = size;
+
+ return make_binary(sb);
+}
+
+static Eterm iol2v_promote_acc(iol2v_state_t *state) {
+ ProcBin *pb;
+
+ state->acc = erts_bin_realloc(state->acc, state->acc_size);
+
+ pb = (ProcBin*)HAlloc(state->process, PROC_BIN_SIZE);
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = state->acc_size;
+ pb->val = state->acc;
+ pb->bytes = (byte*)(state->acc)->orig_bytes;
+ pb->flags = 0;
+ pb->next = MSO(state->process).first;
+ OH_OVERHEAD(&(MSO(state->process)), pb->size / sizeof(Eterm));
+ MSO(state->process).first = (struct erl_off_heap_header*)pb;
+
+ state->acc_size = 0;
+ state->acc = NULL;
+
+ return make_binary(pb);
+}
+
+/* Destructively enqueues a term to the result list, saving us the hassle of
+ * having to reverse it later. This is safe since GC is disabled and we never
+ * leak the unfinished term to the outside. */
+static void iol2v_enqueue_result(iol2v_state_t *state, Eterm term) {
+ Eterm prev_tail;
+ Eterm *hp;
+
+ prev_tail = state->result_tail;
+
+ hp = HAlloc(state->process, 2);
+ state->result_tail = CONS(hp, term, NIL);
+
+ if(prev_tail != NIL) {
+ Eterm *prev_cell = list_val(prev_tail);
+ CDR(prev_cell) = state->result_tail;
+ } else {
+ state->result_head = state->result_tail;
+ }
+
+ state->bytereds_spent += 1;
+}
+
+#ifndef DEBUG
+ #define ACC_REALLOCATION_LIMIT (IOL2V_SMALL_BIN_LIMIT * 32)
+#else
+ #define ACC_REALLOCATION_LIMIT (IOL2V_SMALL_BIN_LIMIT * 4)
+#endif
+
+static void iol2v_expand_acc(iol2v_state_t *state, UWord extra) {
+ UWord required_bytes, acc_alloc_size;
+
+ ERTS_CT_ASSERT(ERTS_UWORD_MAX > ACC_REALLOCATION_LIMIT / 2);
+ ASSERT(extra >= 1);
+
+ acc_alloc_size = state->acc != NULL ? (state->acc)->orig_size : 0;
+ required_bytes = state->acc_size + extra;
+
+ if (state->acc == NULL) {
+ UWord new_size = MAX(required_bytes, IOL2V_SMALL_BIN_LIMIT);
+
+ state->acc = erts_bin_nrml_alloc(new_size);
+ } else if (required_bytes > acc_alloc_size) {
+ Binary *prev_acc;
+ UWord new_size;
+
+ if (acc_alloc_size >= ACC_REALLOCATION_LIMIT) {
+ /* We skip reallocating once we hit a certain point; it often
+ * results in extra copying and we're very likely to overallocate
+ * on anything other than absurdly long byte/heapbin sequences. */
+ iol2v_enqueue_result(state, iol2v_promote_acc(state));
+ iol2v_expand_acc(state, extra);
+ return;
+ }
+
+ new_size = MAX(required_bytes, acc_alloc_size * 2);
+ prev_acc = state->acc;
+
+ state->acc = erts_bin_realloc(prev_acc, new_size);
+
+ if (prev_acc != state->acc) {
+ state->bytereds_spent += state->acc_size;
+ }
+ }
+
+ state->bytereds_spent += extra;
+}
+
+static int iol2v_append_byte_seq(iol2v_state_t *state, Eterm seq_start, Eterm *seq_end) {
+ Eterm lookahead, iterator;
+ Uint observed_bits;
+ SWord seq_length;
+ char *acc_data;
+
+ lookahead = seq_start;
+ seq_length = 0;
+
+ ASSERT(state->bytereds_available > state->bytereds_spent);
+
+ while (is_list(lookahead)) {
+ Eterm *cell = list_val(lookahead);
+
+ if (!is_small(CAR(cell))) {
+ break;
+ }
+
+ if (seq_length * 2 >= (state->bytereds_available - state->bytereds_spent)) {
+ break;
+ }
+
+ lookahead = CDR(cell);
+ seq_length += 1;
+ }
+
+ ASSERT(seq_length >= 1);
+
+ iol2v_expand_acc(state, seq_length);
+
+ /* Bump a few extra reductions to account for list traversal. */
+ state->bytereds_spent += seq_length;
+
+ acc_data = &(state->acc)->orig_bytes[state->acc_size];
+ state->acc_size += seq_length;
+
+ iterator = seq_start;
+ observed_bits = 0;
+
+ while (iterator != lookahead) {
+ Eterm *cell;
+ Uint byte;
+
+ cell = list_val(iterator);
+ iterator = CDR(cell);
+
+ byte = unsigned_val(CAR(cell));
+ observed_bits |= byte;
+
+ ASSERT(acc_data < &(state->acc)->orig_bytes[state->acc_size]);
+ *(acc_data++) = byte;
+ }
+
+ if (observed_bits > UCHAR_MAX) {
+ return 0;
+ }
+
+ ASSERT(acc_data == &(state->acc)->orig_bytes[state->acc_size]);
+ *seq_end = iterator;
+
+ return 1;
+}
+
+static int iol2v_append_binary(iol2v_state_t *state, Eterm bin_term) {
+ int is_acc_small, is_bin_small;
+ UWord combined_size;
+ UWord binary_size;
+
+ Uint byte_offset, bit_offset, bit_size;
+ byte *binary_data;
+
+ Eterm *parent_header;
+ Eterm parent_binary;
+
+ ASSERT(state->bytereds_available > state->bytereds_spent);
+
+ ERTS_GET_REAL_BIN(bin_term, parent_binary, byte_offset, bit_offset, bit_size);
+ parent_header = binary_val(parent_binary);
+ binary_size = binary_size(bin_term);
+
+ if (bit_offset != 0 || bit_size != 0) {
+ return 0;
+ } else if (binary_size == 0) {
+ state->bytereds_spent += 1;
+ return 1;
+ }
+
+ is_acc_small = state->acc_size < IOL2V_SMALL_BIN_LIMIT;
+ is_bin_small = binary_size < IOL2V_SMALL_BIN_LIMIT;
+ combined_size = binary_size + state->acc_size;
+
+ if (thing_subtag(*parent_header) == REFC_BINARY_SUBTAG) {
+ ProcBin *pb = (ProcBin*)parent_header;
+
+ if (pb->flags) {
+ erts_emasculate_writable_binary(pb);
+ }
+
+ binary_data = &((byte*)pb->bytes)[byte_offset];
+ } else {
+ ErlHeapBin *hb = (ErlHeapBin*)parent_header;
+
+ ASSERT(thing_subtag(*parent_header) == HEAP_BINARY_SUBTAG);
+ ASSERT(is_bin_small);
+
+ binary_data = &((byte*)&hb->data)[byte_offset];
+ }
+
+ if (!is_bin_small && (state->acc_size == 0 || !is_acc_small)) {
+ /* Avoid combining if we encounter an acceptably large binary while the
+ * accumulator is either empty or large enough to be returned on its
+ * own. */
+ if (state->acc_size != 0) {
+ iol2v_enqueue_result(state, iol2v_promote_acc(state));
+ }
+
+ iol2v_enqueue_result(state, bin_term);
+ } else if (is_bin_small || combined_size < (IOL2V_SMALL_BIN_LIMIT * 2)) {
+ /* If the candidate is small or we can't split the combination in two,
+ * then just copy it into the accumulator. */
+ iol2v_expand_acc(state, binary_size);
+
+ sys_memcpy(&(state->acc)->orig_bytes[state->acc_size],
+ binary_data, binary_size);
+
+ state->acc_size += binary_size;
+ } else {
+ /* Otherwise, append enough data for the accumulator to be valid, and
+ * then return the rest as a sub-binary. */
+ UWord spill = IOL2V_SMALL_BIN_LIMIT - state->acc_size;
+ Eterm binary_tail;
+
+ iol2v_expand_acc(state, spill);
+
+ sys_memcpy(&(state->acc)->orig_bytes[state->acc_size],
+ binary_data, spill);
+
+ state->acc_size += spill;
+
+ binary_tail = iol2v_make_sub_bin(state, bin_term, spill,
+ binary_size - spill);
+
+ iol2v_enqueue_result(state, iol2v_promote_acc(state));
+ iol2v_enqueue_result(state, binary_tail);
+ }
+
+ return 1;
+}
+
+static BIF_RETTYPE iol2v_yield(iol2v_state_t *state) {
+ if (is_nil(state->magic_reference)) {
+ iol2v_state_t *boxed_state;
+ Binary *magic_binary;
+ Eterm *hp;
+
+ magic_binary = erts_create_magic_binary_x(sizeof(*state),
+ &iol2v_state_destructor, ERTS_ALC_T_BINARY, 1);
+
+ boxed_state = ERTS_MAGIC_BIN_UNALIGNED_DATA(magic_binary);
+ sys_memcpy(boxed_state, state, sizeof(*state));
+
+ hp = HAlloc(boxed_state->process, ERTS_MAGIC_REF_THING_SIZE);
+ boxed_state->magic_reference =
+ erts_mk_magic_ref(&hp, &MSO(boxed_state->process), magic_binary);
+
+ state = boxed_state;
+ }
+
+ ERTS_BIF_YIELD1(bif_export[BIF_iolist_to_iovec_1],
+ state->process, state->magic_reference);
+}
+
+static BIF_RETTYPE iol2v_continue(iol2v_state_t *state) {
+ Eterm iterator;
+
+ DECLARE_ESTACK(s);
+ ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
+
+ state->bytereds_available =
+ ERTS_BIF_REDS_LEFT(state->process) * IOL2V_SMALL_BIN_LIMIT;
+ state->bytereds_spent = 0;
+
+ if (state->estack.start) {
+ ESTACK_RESTORE(s, &state->estack);
+ }
+
+ iterator = state->input_list;
+
+ for(;;) {
+ if (state->bytereds_spent >= state->bytereds_available) {
+ ESTACK_SAVE(s, &state->estack);
+ state->input_list = iterator;
+
+ return iol2v_yield(state);
+ }
+
+ while (is_list(iterator)) {
+ Eterm *cell;
+ Eterm head;
+
+ cell = list_val(iterator);
+ head = CAR(cell);
+
+ if (is_binary(head)) {
+ if (!iol2v_append_binary(state, head)) {
+ goto l_badarg;
+ }
+
+ iterator = CDR(cell);
+ } else if (is_small(head)) {
+ Eterm seq_end;
+
+ if (!iol2v_append_byte_seq(state, iterator, &seq_end)) {
+ goto l_badarg;
+ }
+
+ iterator = seq_end;
+ } else if (is_list(head) || is_nil(head)) {
+ Eterm tail = CDR(cell);
+
+ if (!is_nil(tail)) {
+ ESTACK_PUSH(s, tail);
+ }
+
+ state->bytereds_spent += 1;
+ iterator = head;
+ } else {
+ goto l_badarg;
+ }
+
+ if (state->bytereds_spent >= state->bytereds_available) {
+ ESTACK_SAVE(s, &state->estack);
+ state->input_list = iterator;
+
+ return iol2v_yield(state);
+ }
+ }
+
+ if (is_binary(iterator)) {
+ if (!iol2v_append_binary(state, iterator)) {
+ goto l_badarg;
+ }
+ } else if (!is_nil(iterator)) {
+ goto l_badarg;
+ }
+
+ if(ESTACK_ISEMPTY(s)) {
+ break;
+ }
+
+ iterator = ESTACK_POP(s);
+ }
+
+ if (state->acc_size != 0) {
+ iol2v_enqueue_result(state, iol2v_promote_acc(state));
+ }
+
+ BUMP_REDS(state->process, state->bytereds_spent / IOL2V_SMALL_BIN_LIMIT);
+
+ CLEAR_SAVED_ESTACK(&state->estack);
+ DESTROY_ESTACK(s);
+
+ BIF_RET(state->result_head);
+
+l_badarg:
+ CLEAR_SAVED_ESTACK(&state->estack);
+ DESTROY_ESTACK(s);
+
+ if (state->acc != NULL) {
+ erts_bin_free(state->acc);
+ state->acc = NULL;
+ }
+
+ BIF_ERROR(state->process, BADARG);
+}
+
+HIPE_WRAPPER_BIF_DISABLE_GC(iolist_to_iovec, 1)
+
+BIF_RETTYPE iolist_to_iovec_1(BIF_ALIST_1) {
+ BIF_RETTYPE result;
+
+ if (is_nil(BIF_ARG_1)) {
+ BIF_RET(NIL);
+ } else if (is_binary(BIF_ARG_1)) {
+ if (binary_size(BIF_ARG_1) != 0) {
+ Eterm *hp = HAlloc(BIF_P, 2);
+
+ BIF_RET(CONS(hp, BIF_ARG_1, NIL));
+ } else {
+ BIF_RET(NIL);
+ }
+ } else if (is_internal_magic_ref(BIF_ARG_1)) {
+ iol2v_state_t *state;
+ Binary *magic;
+
+ magic = erts_magic_ref2bin(BIF_ARG_1);
+
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(magic) != &iol2v_state_destructor) {
+ ASSERT(!(BIF_P->flags & F_DISABLE_GC));
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ ASSERT(BIF_P->flags & F_DISABLE_GC);
+
+ state = ERTS_MAGIC_BIN_UNALIGNED_DATA(magic);
+ result = iol2v_continue(state);
+ } else if (!is_list(BIF_ARG_1)) {
+ ASSERT(!(BIF_P->flags & F_DISABLE_GC));
+ BIF_ERROR(BIF_P, BADARG);
+ } else {
+ iol2v_state_t state;
+
+ iol2v_init(&state, BIF_P, BIF_ARG_1);
+
+ erts_set_gc_state(BIF_P, 0);
+
+ result = iol2v_continue(&state);
+ }
+
+ if (result != THE_NON_VALUE || BIF_P->freason != TRAP) {
+ erts_set_gc_state(BIF_P, 1);
+ }
+
+ BIF_RET(result);
+}
diff --git a/erts/emulator/beam/erl_io_queue.h b/erts/emulator/beam/erl_io_queue.h
new file mode 100644
index 0000000000..7d0fe6751c
--- /dev/null
+++ b/erts/emulator/beam/erl_io_queue.h
@@ -0,0 +1,201 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2017. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: A queue used for storing binary data that should be
+ * passed to writev or similar functions. Used by both
+ * the nif and driver api.
+ *
+ * Author: Lukas Larsson
+ */
+
+#ifndef ERL_IO_QUEUE_H__TYPES__
+#define ERL_IO_QUEUE_H__TYPES__
+
+#define ERTS_BINARY_TYPES_ONLY__
+#include "erl_binary.h"
+#undef ERTS_BINARY_TYPES_ONLY__
+#include "erl_nif.h"
+
+#ifdef DEBUG
+#define MAX_SYSIOVEC_IOVLEN (1ull << (32 - 1))
+#else
+#define MAX_SYSIOVEC_IOVLEN (1ull << (sizeof(((SysIOVec*)0)->iov_len) * 8 - 1))
+#endif
+
+#define ERTS_SMALL_IO_QUEUE 5
+
+typedef union {
+ ErlDrvBinary driver;
+ Binary nif;
+} ErtsIOQBinary;
+
+typedef struct {
+ int vsize; /* length of vectors */
+ Uint size; /* total size in bytes */
+ SysIOVec* iov;
+ ErtsIOQBinary** binv;
+} ErtsIOVecCommon;
+
+typedef union {
+ ErtsIOVecCommon common;
+ ErlIOVec driver;
+ ErlNifIOVec nif;
+} ErtsIOVec;
+
+/* head/tail represent the data in the queue
+ * start/end represent the edges of the allocated queue
+ * small is used when the number of iovec elements is < SMALL_IO_QUEUE
+ */
+typedef struct erts_io_queue {
+ ErtsAlcType_t alct;
+ int driver;
+ Uint size; /* total size in bytes */
+
+ SysIOVec* v_start;
+ SysIOVec* v_end;
+ SysIOVec* v_head;
+ SysIOVec* v_tail;
+ SysIOVec v_small[ERTS_SMALL_IO_QUEUE];
+
+ ErtsIOQBinary **b_start;
+ ErtsIOQBinary **b_end;
+ ErtsIOQBinary **b_head;
+ ErtsIOQBinary **b_tail;
+ ErtsIOQBinary *b_small[ERTS_SMALL_IO_QUEUE];
+
+} ErtsIOQueue;
+
+#endif /* ERL_IO_QUEUE_H__TYPES__ */
+
+#if !defined(ERL_IO_QUEUE_H) && !defined(ERTS_IO_QUEUE_TYPES_ONLY__)
+#define ERL_IO_QUEUE_H
+
+#include "erl_binary.h"
+#include "erl_bits.h"
+
+void erts_ioq_init(ErtsIOQueue *q, ErtsAlcType_t alct, int driver);
+void erts_ioq_clear(ErtsIOQueue *q);
+Uint erts_ioq_size(ErtsIOQueue *q);
+int erts_ioq_enqv(ErtsIOQueue *q, ErtsIOVec *vec, Uint skip);
+int erts_ioq_pushqv(ErtsIOQueue *q, ErtsIOVec *vec, Uint skip);
+int erts_ioq_deq(ErtsIOQueue *q, Uint Uint);
+Uint erts_ioq_peekqv(ErtsIOQueue *q, ErtsIOVec *ev);
+SysIOVec *erts_ioq_peekq(ErtsIOQueue *q, int *vlenp);
+Uint erts_ioq_sizeq(ErtsIOQueue *q);
+
+int erts_ioq_iolist_vec_len(Eterm obj, int* vsize, Uint* csize,
+ Uint* pvsize, Uint* pcsize,
+ size_t* total_size, Uint blimit);
+int erts_ioq_iolist_to_vec(Eterm obj, SysIOVec* iov,
+ ErtsIOQBinary** binv, ErtsIOQBinary* cbin,
+ Uint bin_limit, int driver_binary);
+
+ERTS_GLB_INLINE
+int erts_ioq_iodata_vec_len(Eterm obj, int* vsize, Uint* csize,
+ Uint* pvsize, Uint* pcsize,
+ size_t* total_size, Uint blimit);
+ERTS_GLB_INLINE
+int erts_ioq_iodata_to_vec(Eterm obj, SysIOVec* iov,
+ ErtsIOQBinary** binv, ErtsIOQBinary* cbin,
+ Uint bin_limit, int driver_binary);
+
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE
+int erts_ioq_iodata_vec_len(Eterm obj, int* vsize, Uint* csize,
+ Uint* pvsize, Uint* pcsize,
+ size_t* total_size, Uint blimit) {
+ if (is_binary(obj)) {
+ /* We optimize for when we get a procbin without a bit-offset
+ * that fits in one iov slot
+ */
+ Eterm real_bin;
+ byte bitoffs;
+ byte bitsize;
+ ERTS_DECLARE_DUMMY(Uint offset);
+ Uint size = binary_size(obj);
+ ERTS_GET_REAL_BIN(obj, real_bin, offset, bitoffs, bitsize);
+ if (size < MAX_SYSIOVEC_IOVLEN && bitoffs == 0 && bitsize == 0) {
+ *vsize = 1;
+ *pvsize = 1;
+ if (thing_subtag(*binary_val(real_bin)) == REFC_BINARY_SUBTAG) {
+ *csize = 0;
+ *pcsize = 0;
+ } else {
+ *csize = size;
+ *pcsize = size;
+ }
+ *total_size = size;
+ return 0;
+ }
+ }
+
+ return erts_ioq_iolist_vec_len(obj, vsize, csize,
+ pvsize, pcsize, total_size, blimit);
+}
+
+ERTS_GLB_INLINE
+int erts_ioq_iodata_to_vec(Eterm obj,
+ SysIOVec *iov,
+ ErtsIOQBinary **binv,
+ ErtsIOQBinary *cbin,
+ Uint bin_limit,
+ int driver)
+{
+ if (is_binary(obj)) {
+ Eterm real_bin;
+ byte bitoffs;
+ byte bitsize;
+ Uint offset;
+ Uint size = binary_size(obj);
+ ERTS_GET_REAL_BIN(obj, real_bin, offset, bitoffs, bitsize);
+ if (size < MAX_SYSIOVEC_IOVLEN && bitoffs == 0 && bitsize == 0) {
+ Eterm *bptr = binary_val(real_bin);
+ if (thing_subtag(*bptr) == REFC_BINARY_SUBTAG) {
+ ProcBin *pb = (ProcBin *)bptr;
+ if (pb->flags)
+ erts_emasculate_writable_binary(pb);
+ iov[0].iov_base = pb->bytes+offset;
+ iov[0].iov_len = size;
+ if (driver)
+ binv[0] = (ErtsIOQBinary*)Binary2ErlDrvBinary(pb->val);
+ else
+ binv[0] = (ErtsIOQBinary*)pb->val;
+ return 1;
+ } else {
+ ErlHeapBin* hb = (ErlHeapBin *)bptr;
+ byte *buf = driver ? (byte*)cbin->driver.orig_bytes :
+ (byte*)cbin->nif.orig_bytes;
+ copy_binary_to_buffer(buf, 0, ((byte *) hb->data)+offset, 0, 8*size);
+ iov[0].iov_base = buf;
+ iov[0].iov_len = size;
+ binv[0] = cbin;
+ return 1;
+ }
+ }
+ }
+ return erts_ioq_iolist_to_vec(obj, iov, binv, cbin, bin_limit, driver);
+}
+
+#endif
+
+#endif /* ERL_IO_QUEUE_H */
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 7e3a90779d..4cdef0200f 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -74,12 +75,9 @@ static erts_lc_lock_order_t erts_lock_order[] = {
* if only one lock use
* the lock name)"
*/
-#ifdef ERTS_SMP
{ "driver_lock", "driver_name" },
{ "port_lock", "port_id" },
-#endif
{ "port_data_lock", "address" },
-#ifdef ERTS_SMP
{ "bif_timers", NULL },
{ "reg_tab", NULL },
{ "proc_main", "pid" },
@@ -88,19 +86,20 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "hipe_mfait_lock", NULL },
#endif
{ "nodes_monitors", NULL },
+ { "resource_monitors", "address" },
{ "driver_list", NULL },
{ "proc_link", "pid" },
{ "proc_msgq", "pid" },
+ { "proc_btm", "pid" },
{ "dist_entry", "address" },
{ "dist_entry_links", "address" },
{ "code_write_permission", NULL },
- { "proc_status", "pid" },
- { "ports_snapshot", NULL },
+ { "purge_state", NULL },
{ "meta_name_tab", "address" },
- { "meta_main_tab_slot", "address" },
{ "db_tab", "address" },
+ { "proc_status", "pid" },
+ { "proc_trace", "pid" },
{ "db_tab_fix", "address" },
- { "meta_main_tab_main", NULL },
{ "db_hash_slot", "address" },
{ "node_table", NULL },
{ "dist_table", NULL },
@@ -109,23 +108,19 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "export_tab", NULL },
{ "fun_tab", NULL },
{ "environ", NULL },
-#endif
+ { "release_literal_areas", NULL },
{ "efile_drv", "address" },
-#if defined(ENABLE_CHILD_WAITER_THREAD) || defined(ERTS_SMP)
- { "child_status", NULL },
-#endif
{ "drv_ev_state_grow", NULL, },
{ "drv_ev_state", "address" },
{ "safe_hash", "address" },
- { "pollset_rm_list", NULL },
{ "removed_fd_pre_alloc_lock", "address" },
{ "state_prealloc", NULL },
{ "schdlr_sspnd", NULL },
{ "migration_info_update", NULL },
{ "run_queue", "address" },
-#ifdef ERTS_DIRTY_SCHEDULERS
{ "dirty_run_queue_sleep_list", "address" },
-#endif
+ { "dirty_gc_info", NULL },
+ { "dirty_break_point_index", NULL },
{ "process_table", NULL },
{ "cpu_info", NULL },
{ "pollset", "address" },
@@ -138,42 +133,37 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "mmap_init_atoms", NULL },
{ "drv_tsd", NULL },
{ "async_enq_mtx", NULL },
-#ifdef ERTS_SMP
- { "sys_msg_q", NULL },
+ { "msacc_list_mutex", NULL },
+ { "msacc_unmanaged_mutex", NULL },
{ "atom_tab", NULL },
- { "make_ref", NULL },
{ "misc_op_list_pre_alloc_lock", "address" },
{ "message_pre_alloc_lock", "address" },
{ "ptimer_pre_alloc_lock", "address", },
{ "btm_pre_alloc_lock", NULL, },
{ "dist_entry_out_queue", "address" },
{ "port_sched_lock", "port_id" },
+ { "sys_msg_q", NULL },
+ { "tracer_mtx", NULL },
{ "port_table", NULL },
-#endif
+ { "magic_ref_table", "address" },
{ "mtrace_op", NULL },
{ "instr_x", NULL },
{ "instr", NULL },
+ { "pollsets_lock", NULL },
{ "alcu_allocator", "index" },
{ "mseg", NULL },
-#if HALFWORD_HEAP
- { "pmmap", NULL },
-#endif
-#ifdef ERTS_SMP
{ "port_task_pre_alloc_lock", "address" },
{ "proclist_pre_alloc_lock", "address" },
{ "xports_list_pre_alloc_lock", "address" },
{ "inet_buffer_stack_lock", NULL },
- { "gc_info", NULL },
- { "io_wake", NULL },
- { "timer_wheel", NULL },
{ "system_block", NULL },
- { "timeofday", NULL },
+ { "get_time", NULL },
+ { "get_corrected_time", NULL },
+ { "runtime", NULL },
{ "breakpoints", NULL },
- { "pollsets_lock", NULL },
{ "pix_lock", "address" },
{ "run_queues_lists", NULL },
{ "sched_stat", NULL },
-#endif
{ "async_init_mtx", NULL },
#ifdef __WIN32__
#ifdef DEBUG
@@ -184,11 +174,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "efile_drv dtrace mutex", NULL },
#endif
{ "mtrace_buf", NULL },
-#ifdef __WIN32__
-#ifdef ERTS_SMP
- { "sys_gethrtime", NULL },
-#endif
-#endif
+ { "os_monotonic_time", NULL },
{ "erts_alloc_hard_debug", NULL },
{ "hard_dbg_mseg", NULL },
{ "erts_mmap", NULL }
@@ -197,42 +183,20 @@ static erts_lc_lock_order_t erts_lock_order[] = {
#define ERTS_LOCK_ORDER_SIZE \
(sizeof(erts_lock_order)/sizeof(erts_lc_lock_order_t))
-#define LOCK_IS_TYPE_ORDER_VIOLATION(LCK_FLG, LCKD_FLG) \
- (((LCKD_FLG) & (ERTS_LC_FLG_LT_SPINLOCK|ERTS_LC_FLG_LT_RWSPINLOCK)) \
- && ((LCK_FLG) \
- & ERTS_LC_FLG_LT_ALL \
- & ~(ERTS_LC_FLG_LT_SPINLOCK|ERTS_LC_FLG_LT_RWSPINLOCK)))
+#define LOCK_IS_TYPE_ORDER_VIOLATION(LCK_FLG, LCKD_FLG) \
+ (((LCKD_FLG) & ERTS_LOCK_FLAGS_MASK_TYPE) == ERTS_LOCK_FLAGS_TYPE_SPINLOCK \
+ && \
+ ((LCK_FLG) & ERTS_LOCK_FLAGS_MASK_TYPE) != ERTS_LOCK_FLAGS_TYPE_SPINLOCK)
static __decl_noreturn void __noreturn lc_abort(void);
-static char *
-lock_type(Uint16 flags)
+static const char *rw_op_str(erts_lock_options_t options)
{
- switch (flags & ERTS_LC_FLG_LT_ALL) {
- case ERTS_LC_FLG_LT_SPINLOCK: return "[spinlock]";
- case ERTS_LC_FLG_LT_RWSPINLOCK: return "[rw(spin)lock]";
- case ERTS_LC_FLG_LT_MUTEX: return "[mutex]";
- case ERTS_LC_FLG_LT_RWMUTEX: return "[rwmutex]";
- case ERTS_LC_FLG_LT_PROCLOCK: return "[proclock]";
- default: return "";
+ if(options == ERTS_LOCK_OPTIONS_WRITE) {
+ ERTS_INTERNAL_ERROR("Only write flag present");
}
-}
-static char *
-rw_op_str(Uint16 flags)
-{
- switch (flags & ERTS_LC_FLG_LO_READ_WRITE) {
- case ERTS_LC_FLG_LO_READ_WRITE:
- return " (rw)";
- case ERTS_LC_FLG_LO_READ:
- return " (r)";
- case ERTS_LC_FLG_LO_WRITE:
- erts_fprintf(stderr, "\nInternal error\n");
- lc_abort();
- default:
- break;
- }
- return "";
+ return erts_lock_options_get_short_desc(options);
}
typedef struct erts_lc_locked_lock_t_ erts_lc_locked_lock_t;
@@ -243,7 +207,8 @@ struct erts_lc_locked_lock_t_ {
Sint16 id;
char *file;
unsigned int line;
- Uint16 flags;
+ erts_lock_flags_t flags;
+ erts_lock_options_t taken_options;
};
typedef struct {
@@ -270,9 +235,9 @@ union erts_lc_free_block_t_ {
static ethr_tsd_key locks_key;
-static erts_lc_locked_locks_t *erts_locked_locks;
+static erts_lc_locked_locks_t *erts_locked_locks = NULL;
-static erts_lc_free_block_t *free_blocks;
+static erts_lc_free_block_t *free_blocks = NULL;
#ifdef ERTS_LC_STATIC_ALLOC
#define ERTS_LC_FB_CHUNK_SIZE 10000
@@ -311,8 +276,7 @@ static ERTS_INLINE void lc_free(void *p)
static void *lc_core_alloc(void)
{
lc_unlock();
- erts_fprintf(stderr, "Lock checker out of memory!\n");
- lc_abort();
+ ERTS_INTERNAL_ERROR("Lock checker out of memory!\n");
}
#else
@@ -325,8 +289,7 @@ static void *lc_core_alloc(void)
fbs = (erts_lc_free_block_t *) malloc(sizeof(erts_lc_free_block_t)
* ERTS_LC_FB_CHUNK_SIZE);
if (!fbs) {
- erts_fprintf(stderr, "Lock checker failed to allocate memory!\n");
- lc_abort();
+ ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!");
}
for (i = 1; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) {
#ifdef DEBUG
@@ -366,11 +329,11 @@ create_locked_locks(char *thread_name)
{
erts_lc_locked_locks_t *l_lcks = malloc(sizeof(erts_lc_locked_locks_t));
if (!l_lcks)
- lc_abort();
+ ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!");
l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
if (!l_lcks->thread_name)
- lc_abort();
+ ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!");
l_lcks->emu_thread = 0;
l_lcks->tid = erts_thr_self();
@@ -432,7 +395,7 @@ make_my_locked_locks(void)
}
static ERTS_INLINE erts_lc_locked_lock_t *
-new_locked_lock(erts_lc_lock_t *lck, Uint16 op_flags,
+new_locked_lock(erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line)
{
erts_lc_locked_lock_t *l_lck = (erts_lc_locked_lock_t *) lc_alloc();
@@ -442,12 +405,13 @@ new_locked_lock(erts_lc_lock_t *lck, Uint16 op_flags,
l_lck->extra = lck->extra;
l_lck->file = file;
l_lck->line = line;
- l_lck->flags = lck->flags | op_flags;
+ l_lck->flags = lck->flags;
+ l_lck->taken_options = options;
return l_lck;
}
static void
-raw_print_lock(char *prefix, Sint16 id, Wterm extra, Uint16 flags,
+raw_print_lock(char *prefix, Sint16 id, Wterm extra, erts_lock_flags_t flags,
char* file, unsigned int line, char *suffix)
{
char *lname = (0 <= id && id < ERTS_LOCK_ORDER_SIZE
@@ -459,16 +423,16 @@ raw_print_lock(char *prefix, Sint16 id, Wterm extra, Uint16 flags,
erts_fprintf(stderr,"%p",_unchecked_boxed_val(extra));
else
erts_fprintf(stderr,"%T",extra);
- erts_fprintf(stderr,"%s",lock_type(flags));
+ erts_fprintf(stderr,"[%s]",erts_lock_flags_get_type_name(flags));
if (file)
erts_fprintf(stderr,"(%s:%d)",file,line);
- erts_fprintf(stderr,"'%s%s",rw_op_str(flags),suffix);
+ erts_fprintf(stderr,"'(%s)%s",rw_op_str(flags),suffix);
}
static void
-print_lock2(char *prefix, Sint16 id, Wterm extra, Uint16 flags, char *suffix)
+print_lock2(char *prefix, Sint16 id, Wterm extra, erts_lock_flags_t flags, char *suffix)
{
raw_print_lock(prefix, id, extra, flags, NULL, 0, suffix);
}
@@ -523,9 +487,9 @@ uninitialized_lock(void)
static void
lock_twice(char *prefix, erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
- Uint16 op_flags)
+ erts_lock_options_t options)
{
- erts_fprintf(stderr, "%s%s", prefix, rw_op_str(op_flags));
+ erts_fprintf(stderr, "%s (%s)", prefix, rw_op_str(options));
print_lock(" ", lck, " lock which is already locked by thread!\n");
print_curr_locks(l_lcks);
lc_abort();
@@ -533,9 +497,9 @@ lock_twice(char *prefix, erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
static void
unlock_op_mismatch(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
- Uint16 op_flags)
+ erts_lock_options_t options)
{
- erts_fprintf(stderr, "Unlocking%s ", rw_op_str(op_flags));
+ erts_fprintf(stderr, "Unlocking (%s) ", rw_op_str(options));
print_lock("", lck, " lock which mismatch previous lock operation!\n");
print_curr_locks(l_lcks);
lc_abort();
@@ -691,7 +655,7 @@ erts_lc_set_thread_name(char *thread_name)
free((void *) l_lcks->thread_name);
l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
if (!l_lcks->thread_name)
- lc_abort();
+ ERTS_INTERNAL_ERROR("strdup failed");
}
l_lcks->emu_thread = 1;
}
@@ -746,84 +710,128 @@ erts_lc_get_lock_order_id(char *name)
return (Sint16) -1;
}
+static int compare_locked_by_id(erts_lc_locked_lock_t *locked_lock, erts_lc_lock_t *comparand)
+{
+ if(locked_lock->id < comparand->id) {
+ return -1;
+ } else if(locked_lock->id > comparand->id) {
+ return 1;
+ }
-static int
-find_lock(erts_lc_locked_lock_t **l_lcks, erts_lc_lock_t *lck)
+ return 0;
+}
+
+static int compare_locked_by_id_extra(erts_lc_locked_lock_t *locked_lock, erts_lc_lock_t *comparand)
{
- erts_lc_locked_lock_t *l_lck = *l_lcks;
+ int order = compare_locked_by_id(locked_lock, comparand);
+
+ if(order) {
+ return order;
+ } else if(locked_lock->extra < comparand->extra) {
+ return -1;
+ } else if(locked_lock->extra > comparand->extra) {
+ return 1;
+ }
- if (l_lck) {
- if (l_lck->id == lck->id && l_lck->extra == lck->extra) {
- if ((l_lck->flags & lck->flags) == lck->flags)
- return 1;
- return 0;
- }
- else if (l_lck->id < lck->id
- || (l_lck->id == lck->id
- && l_lck->extra < lck->extra)) {
- for (l_lck = l_lck->next; l_lck; l_lck = l_lck->next) {
- if (l_lck->id > lck->id
- || (l_lck->id == lck->id
- && l_lck->extra >= lck->extra)) {
- *l_lcks = l_lck;
- if (l_lck->id == lck->id
- && l_lck->extra == lck->extra
- && ((l_lck->flags & lck->flags) == lck->flags))
- return 1;
- return 0;
- }
- }
- }
- else {
- for (l_lck = l_lck->prev; l_lck; l_lck = l_lck->prev) {
- if (l_lck->id < lck->id
- || (l_lck->id == lck->id
- && l_lck->extra <= lck->extra)) {
- *l_lcks = l_lck;
- if (l_lck->id == lck->id
- && l_lck->extra == lck->extra
- && ((l_lck->flags & lck->flags) == lck->flags))
- return 1;
- return 0;
- }
- }
- }
+ return 0;
+}
+
+typedef int (*locked_compare_func)(erts_lc_locked_lock_t *, erts_lc_lock_t *);
+
+/* Searches through a list of taken locks, bailing when it hits an entry whose
+ * order relative to the search template is the opposite of the one at the
+ * start of the search. (*closest_neighbor) is either set to the exact match,
+ * or the one closest to it in the sort order. */
+static int search_locked_list(locked_compare_func compare,
+ erts_lc_locked_lock_t *locked_locks,
+ erts_lc_lock_t *search_template,
+ erts_lc_locked_lock_t **closest_neighbor)
+{
+ erts_lc_locked_lock_t *iterator = locked_locks;
+
+ (*closest_neighbor) = iterator;
+
+ if(iterator) {
+ int relative_order = compare(iterator, search_template);
+
+ if(relative_order < 0) {
+ while((iterator = iterator->next) != NULL) {
+ relative_order = compare(iterator, search_template);
+
+ if(relative_order >= 0) {
+ (*closest_neighbor) = iterator;
+ break;
+ }
+ }
+ } else if(relative_order > 0) {
+ while((iterator = iterator->prev) != NULL) {
+ relative_order = compare(iterator, search_template);
+
+ if(relative_order <= 0) {
+ (*closest_neighbor) = iterator;
+ break;
+ }
+ }
+ }
+
+ return relative_order == 0;
}
+
return 0;
}
+/* Searches for a lock in the given list that matches search_template, and sets
+ * (*locked_locks) to the closest lock in the sort order. */
static int
-find_id(erts_lc_locked_lock_t **l_lcks, Sint16 id)
-{
- erts_lc_locked_lock_t *l_lck = *l_lcks;
-
- if (l_lck) {
- if (l_lck->id == id)
- return 1;
- else if (l_lck->id < id) {
- for (l_lck = l_lck->next; l_lck; l_lck = l_lck->next) {
- if (l_lck->id >= id) {
- *l_lcks = l_lck;
- if (l_lck->id == id)
- return 1;
- return 0;
- }
- }
- }
- else {
- for (l_lck = l_lck->prev; l_lck; l_lck = l_lck->prev) {
- if (l_lck->id <= id) {
- *l_lcks = l_lck;
- if (l_lck->id == id)
- return 1;
- return 0;
- }
- }
- }
+find_lock(erts_lc_locked_lock_t **locked_locks, erts_lc_lock_t *search_template)
+{
+ erts_lc_locked_lock_t *closest_neighbor;
+ int found_lock;
+
+ found_lock = search_locked_list(compare_locked_by_id_extra,
+ (*locked_locks),
+ search_template,
+ &closest_neighbor);
+
+ (*locked_locks) = closest_neighbor;
+
+ if(found_lock) {
+ erts_lock_options_t relevant_options;
+ erts_lock_flags_t relevant_flags;
+
+ /* We only care about the options and flags that are set in the
+ * template. */
+ relevant_options = (closest_neighbor->taken_options & search_template->taken_options);
+ relevant_flags = (closest_neighbor->flags & search_template->flags);
+
+ return search_template->taken_options == relevant_options &&
+ search_template->flags == relevant_flags;
}
+
return 0;
}
+/* Searches for a lock in the given list by id, and sets (*locked_locks) to the
+ * closest lock in the sort order. */
+static int
+find_id(erts_lc_locked_lock_t **locked_locks, Sint16 id)
+{
+ erts_lc_locked_lock_t *closest_neighbor;
+ erts_lc_lock_t search_template;
+ int found_lock;
+
+ search_template.id = id;
+
+ found_lock = search_locked_list(compare_locked_by_id,
+ (*locked_locks),
+ &search_template,
+ &closest_neighbor);
+
+ (*locked_locks) = closest_neighbor;
+
+ return found_lock;
+}
+
void
erts_lc_have_locks(int *resv, erts_lc_lock_t *locks, int len)
{
@@ -919,17 +927,17 @@ erts_lc_check_exact(erts_lc_lock_t *have, int have_len)
}
void
-erts_lc_check_no_locked_of_type(Uint16 flags)
+erts_lc_check_no_locked_of_type(erts_lock_flags_t type)
{
erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
if (l_lcks) {
erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
for (l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next) {
- if (l_lck->flags & flags) {
+ if ((l_lck->flags & ERTS_LOCK_FLAGS_MASK_TYPE) == type) {
erts_fprintf(stderr,
"Locked lock of type %s found which isn't "
"allowed here!\n",
- lock_type(l_lck->flags));
+ erts_lock_flags_get_type_name(l_lck->flags));
print_curr_locks(l_lcks);
lc_abort();
}
@@ -938,7 +946,7 @@ erts_lc_check_no_locked_of_type(Uint16 flags)
}
int
-erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
{
#ifdef ERTS_LC_DO_NOT_FORCE_BUSY_TRYLOCK_ON_LOCK_ORDER_VIOLATION
return 0;
@@ -987,7 +995,7 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
if (tl_lck->id < lck->id
|| (tl_lck->id == lck->id && tl_lck->extra <= lck->extra)) {
if (tl_lck->id == lck->id && tl_lck->extra == lck->extra)
- lock_twice("Trylocking", l_lcks, lck, op_flags);
+ lock_twice("Trylocking", l_lcks, lck, options);
break;
}
}
@@ -1009,7 +1017,7 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
#endif
}
-void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line)
{
erts_lc_locked_locks_t *l_lcks;
@@ -1022,7 +1030,7 @@ void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
return;
l_lcks = make_my_locked_locks();
- l_lck = locked ? new_locked_lock(lck, op_flags, file, line) : NULL;
+ l_lck = locked ? new_locked_lock(lck, options, file, line) : NULL;
if (!l_lcks->locked.last) {
ASSERT(!l_lcks->locked.first);
@@ -1040,7 +1048,7 @@ void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
if (tl_lck->id < lck->id
|| (tl_lck->id == lck->id && tl_lck->extra <= lck->extra)) {
if (tl_lck->id == lck->id && tl_lck->extra == lck->extra)
- lock_twice("Trylocking", l_lcks, lck, op_flags);
+ lock_twice("Trylocking", l_lcks, lck, options);
if (locked) {
l_lck->next = tl_lck->next;
l_lck->prev = tl_lck;
@@ -1063,14 +1071,14 @@ void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
}
-void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_require_lock_flg(erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line)
{
erts_lc_locked_locks_t *l_lcks = make_my_locked_locks();
erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
if (!find_lock(&l_lck, lck))
required_not_locked(l_lcks, lck);
- l_lck = new_locked_lock(lck, op_flags, file, line);
+ l_lck = new_locked_lock(lck, options, file, line);
if (!l_lcks->required.last) {
ASSERT(!l_lcks->required.first);
l_lck->next = l_lck->prev = NULL;
@@ -1110,7 +1118,7 @@ void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags,
}
}
-void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
{
erts_lc_locked_locks_t *l_lcks = make_my_locked_locks();
erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
@@ -1138,7 +1146,7 @@ void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
lc_free((void *) l_lck);
}
-void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_lock_flg_x(erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line)
{
erts_lc_locked_locks_t *l_lcks;
@@ -1151,7 +1159,7 @@ void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags,
return;
l_lcks = make_my_locked_locks();
- l_lck = new_locked_lock(lck, op_flags, file, line);
+ l_lck = new_locked_lock(lck, options, file, line);
if (!l_lcks->locked.last) {
ASSERT(!l_lcks->locked.first);
@@ -1167,12 +1175,12 @@ void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags,
l_lcks->locked.last = l_lck;
}
else if (l_lcks->locked.last->id == lck->id && l_lcks->locked.last->extra == lck->extra)
- lock_twice("Locking", l_lcks, lck, op_flags);
+ lock_twice("Locking", l_lcks, lck, options);
else
lock_order_violation(l_lcks, lck);
}
-void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+void erts_lc_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
{
erts_lc_locked_locks_t *l_lcks;
erts_lc_locked_lock_t *l_lck;
@@ -1193,8 +1201,8 @@ void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
for (l_lck = l_lcks ? l_lcks->locked.last : NULL; l_lck; l_lck = l_lck->prev) {
if (l_lck->id == lck->id && l_lck->extra == lck->extra) {
- if ((l_lck->flags & ERTS_LC_FLG_LO_ALL) != op_flags)
- unlock_op_mismatch(l_lcks, lck, op_flags);
+ if ((l_lck->taken_options & ERTS_LOCK_OPTIONS_RDWR) != options)
+ unlock_op_mismatch(l_lcks, lck, options);
if (l_lck->prev)
l_lck->prev->next = l_lck->next;
else
@@ -1211,7 +1219,7 @@ void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
unlock_of_not_locked(l_lcks, lck);
}
-void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
{
erts_lc_locked_locks_t *l_lcks;
erts_lc_locked_lock_t *l_lck;
@@ -1275,23 +1283,25 @@ void erts_lc_unrequire_lock(erts_lc_lock_t *lck)
}
void
-erts_lc_init_lock(erts_lc_lock_t *lck, char *name, Uint16 flags)
+erts_lc_init_lock(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags)
{
lck->id = erts_lc_get_lock_order_id(name);
lck->extra = (UWord) &lck->extra;
ASSERT(is_not_immed(lck->extra));
lck->flags = flags;
+ lck->taken_options = 0;
lck->inited = ERTS_LC_INITITALIZED;
}
void
-erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, Uint16 flags, Eterm extra)
+erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags, Eterm extra)
{
lck->id = erts_lc_get_lock_order_id(name);
lck->extra = extra;
ASSERT(is_immed(lck->extra));
lck->flags = flags;
+ lck->taken_options = 0;
lck->inited = ERTS_LC_INITITALIZED;
}
@@ -1305,6 +1315,7 @@ erts_lc_destroy_lock(erts_lc_lock_t *lck)
lck->id = -1;
lck->extra = THE_NON_VALUE;
lck->flags = 0;
+ lck->taken_options = 0;
}
void
@@ -1330,7 +1341,7 @@ erts_lc_init(void)
#endif /* #ifdef ERTS_LC_STATIC_ALLOC */
if (ethr_spinlock_init(&free_blocks_lock) != 0)
- lc_abort();
+ ERTS_INTERNAL_ERROR("spinlock_init failed");
erts_tsd_key_create(&locks_key,"erts_lock_check_key");
}
diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h
index 3f7f417e61..5c2c38e8f2 100644
--- a/erts/emulator/beam/erl_lock_check.h
+++ b/erts/emulator/beam/erl_lock_check.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2012. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -35,6 +36,8 @@
#ifdef ERTS_ENABLE_LOCK_CHECK
+#include "erl_lock_flags.h"
+
#ifndef ERTS_ENABLE_LOCK_POSITION
/* Enable in order for _x variants of mtx functions to be used. */
#define ERTS_ENABLE_LOCK_POSITION 1
@@ -43,36 +46,14 @@
typedef struct {
int inited;
Sint16 id;
- Uint16 flags;
+ erts_lock_flags_t flags;
+ erts_lock_options_t taken_options;
UWord extra;
} erts_lc_lock_t;
#define ERTS_LC_INITITALIZED 0x7f7f7f7f
-
-#define ERTS_LC_FLG_LT_SPINLOCK (((Uint16) 1) << 0)
-#define ERTS_LC_FLG_LT_RWSPINLOCK (((Uint16) 1) << 1)
-#define ERTS_LC_FLG_LT_MUTEX (((Uint16) 1) << 2)
-#define ERTS_LC_FLG_LT_RWMUTEX (((Uint16) 1) << 3)
-#define ERTS_LC_FLG_LT_PROCLOCK (((Uint16) 1) << 4)
-
-#define ERTS_LC_FLG_LO_READ (((Uint16) 1) << 5)
-#define ERTS_LC_FLG_LO_WRITE (((Uint16) 1) << 6)
-
-#define ERTS_LC_FLG_LO_READ_WRITE (ERTS_LC_FLG_LO_READ \
- | ERTS_LC_FLG_LO_WRITE)
-
-#define ERTS_LC_FLG_LT_ALL (ERTS_LC_FLG_LT_SPINLOCK \
- | ERTS_LC_FLG_LT_RWSPINLOCK \
- | ERTS_LC_FLG_LT_MUTEX \
- | ERTS_LC_FLG_LT_RWMUTEX \
- | ERTS_LC_FLG_LT_PROCLOCK)
-
-#define ERTS_LC_FLG_LO_ALL (ERTS_LC_FLG_LO_READ \
- | ERTS_LC_FLG_LO_WRITE)
-
-
-#define ERTS_LC_LOCK_INIT(ID, X, F) {ERTS_LC_INITITALIZED, (ID), (F), (X)}
+#define ERTS_LC_LOCK_INIT(ID, X, F) {ERTS_LC_INITITALIZED, (ID), (F), 0, (X)}
void erts_lc_init(void);
void erts_lc_late_init(void);
@@ -82,31 +63,31 @@ void erts_lc_check(erts_lc_lock_t *have, int have_len,
void erts_lc_check_exact(erts_lc_lock_t *have, int have_len);
void erts_lc_have_locks(int *resv, erts_lc_lock_t *lcks, int len);
void erts_lc_have_lock_ids(int *resv, int *ids, int len);
-void erts_lc_check_no_locked_of_type(Uint16 flags);
-int erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags);
-void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_check_no_locked_of_type(erts_lock_flags_t flags);
+int erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, erts_lock_options_t options);
+void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line);
-void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_lock_flg_x(erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line);
-void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
-void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
+void erts_lc_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options);
+void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options);
int erts_lc_trylock_force_busy(erts_lc_lock_t *lck);
void erts_lc_trylock_x(int locked, erts_lc_lock_t *lck,
char* file, unsigned int line);
void erts_lc_lock_x(erts_lc_lock_t *lck, char* file, unsigned int line);
void erts_lc_unlock(erts_lc_lock_t *lck);
void erts_lc_might_unlock(erts_lc_lock_t *lck);
-void erts_lc_init_lock(erts_lc_lock_t *lck, char *name, Uint16 flags);
-void erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, Uint16 flags, Eterm extra);
+void erts_lc_init_lock(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags);
+void erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags, Eterm extra);
void erts_lc_destroy_lock(erts_lc_lock_t *lck);
void erts_lc_fail(char *fmt, ...);
int erts_lc_assert_failed(char *file, int line, char *assertion);
void erts_lc_set_thread_name(char *thread_name);
void erts_lc_pll(void);
-void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_require_lock_flg(erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line);
-void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
+void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, erts_lock_options_t options);
void erts_lc_require_lock(erts_lc_lock_t *lck, char *file, unsigned int line);
void erts_lc_unrequire_lock(erts_lc_lock_t *lck);
@@ -115,13 +96,7 @@ int erts_lc_is_emu_thr(void);
#define ERTS_LC_ASSERT(A) \
((void) (((A) || ERTS_SOMEONE_IS_CRASH_DUMPING) ? 1 : erts_lc_assert_failed(__FILE__, __LINE__, #A)))
-#ifdef ERTS_SMP
-#define ERTS_SMP_LC_ASSERT(A) ERTS_LC_ASSERT(A)
-#else
-#define ERTS_SMP_LC_ASSERT(A) ((void) 1)
-#endif
#else /* #ifdef ERTS_ENABLE_LOCK_CHECK */
-#define ERTS_SMP_LC_ASSERT(A) ((void) 1)
#define ERTS_LC_ASSERT(A) ((void) 1)
#endif /* #ifdef ERTS_ENABLE_LOCK_CHECK */
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
index 6f44bf097b..1ae6076b12 100644
--- a/erts/emulator/beam/erl_lock_count.c
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -1,676 +1,659 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2012. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * %CopyrightEnd%
- */
-
-/*
- * Description: Statistics for locks.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
- * Author: Bj�rn-Egil Dahlberg
- * Date: 2008-07-03
+ * %CopyrightEnd%
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
-/* Needed for VxWorks va_arg */
+#ifdef ERTS_ENABLE_LOCK_COUNT
+
#include "sys.h"
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#include "global.h"
#include "erl_lock_count.h"
-#include "ethread.h"
-#include "erl_term.h"
-#include "atom.h"
-#include <stdio.h>
+#include "erl_thr_progress.h"
+
+#include "erl_node_tables.h"
+#include "erl_alloc_util.h"
+#include "erl_check_io.h"
+#include "erl_poll.h"
+#include "erl_db.h"
-/* globals, dont access these without locks or blocks */
+#define LCNT_MAX_CARRIER_ENTRIES 255
-ethr_mutex lcnt_data_lock;
-erts_lcnt_data_t *erts_lcnt_data;
-Uint16 erts_lcnt_rt_options;
-erts_lcnt_time_t timer_start;
-const char *str_undefined = "undefined";
+/* - Locals that are shared with the header implementation - */
+
+#ifdef DEBUG
+int lcnt_initialization_completed__;
+#endif
-static ethr_tsd_key lcnt_thr_data_key;
-static int lcnt_n_thr;
-static erts_lcnt_thread_data_t *lcnt_thread_data[4096];
+erts_lock_flags_t lcnt_category_mask__;
+ethr_tsd_key lcnt_thr_data_key__;
+
+const int lcnt_log2_tab64__[64] = {
+ 63, 0, 58, 1, 59, 47, 53, 2,
+ 60, 39, 48, 27, 54, 33, 42, 3,
+ 61, 51, 37, 40, 49, 18, 28, 20,
+ 55, 30, 34, 11, 43, 14, 22, 4,
+ 62, 57, 46, 52, 38, 26, 32, 41,
+ 50, 36, 17, 19, 29, 10, 13, 21,
+ 56, 45, 25, 31, 35, 16, 9, 12,
+ 44, 24, 15, 8, 23, 7, 6, 5};
+
+/* - Local variables - */
+
+typedef struct lcnt_static_lock_ref_ {
+ erts_lcnt_ref_t *reference;
+
+ erts_lock_flags_t flags;
+ const char *name;
+ Eterm id;
+
+ struct lcnt_static_lock_ref_ *next;
+} lcnt_static_lock_ref_t;
+
+static ethr_atomic_t lcnt_static_lock_registry;
+
+static erts_lcnt_lock_info_list_t lcnt_current_lock_list;
+static erts_lcnt_lock_info_list_t lcnt_deleted_lock_list;
+
+static erts_lcnt_time_t lcnt_timer_start;
+
+static int lcnt_preserve_info;
/* local functions */
-static ERTS_INLINE void lcnt_lock(void) {
- ethr_mutex_lock(&lcnt_data_lock);
-}
+static void lcnt_clear_stats(erts_lcnt_lock_info_t *info) {
+ size_t i;
-static ERTS_INLINE void lcnt_unlock(void) {
- ethr_mutex_unlock(&lcnt_data_lock);
-}
+ for(i = 0; i < ERTS_LCNT_MAX_LOCK_LOCATIONS; i++) {
+ erts_lcnt_lock_stats_t *stats = &info->location_stats[i];
+ sys_memzero(&stats->wait_time_histogram, sizeof(stats->wait_time_histogram));
-static char* lcnt_lock_type(Uint16 flag) {
- switch(flag & ERTS_LCNT_LT_ALL) {
- case ERTS_LCNT_LT_SPINLOCK: return "spinlock";
- case ERTS_LCNT_LT_RWSPINLOCK: return "rw_spinlock";
- case ERTS_LCNT_LT_MUTEX: return "mutex";
- case ERTS_LCNT_LT_RWMUTEX: return "rw_mutex";
- case ERTS_LCNT_LT_PROCLOCK: return "proclock";
- default: return "";
- }
-}
+ stats->total_time_waited.s = 0;
+ stats->total_time_waited.ns = 0;
-static void lcnt_clear_stats(erts_lcnt_lock_stats_t *stats) {
- ethr_atomic_set(&stats->tries, 0);
- ethr_atomic_set(&stats->colls, 0);
- stats->timer.s = 0;
- stats->timer.ns = 0;
- stats->timer_n = 0;
- stats->file = (char *)str_undefined;
- stats->line = 0;
-}
-
-static void lcnt_time(erts_lcnt_time_t *time) {
-#ifdef HAVE_GETHRTIME
- SysHrTime hr_time;
- hr_time = sys_gethrtime();
- time->s = (unsigned long)(hr_time / 1000000000LL);
- time->ns = (unsigned long)(hr_time - 1000000000LL*time->s);
-#else
- SysTimeval tv;
- sys_gettimeofday(&tv);
- time->s = tv.tv_sec;
- time->ns = tv.tv_usec*1000LL;
-#endif
-}
+ stats->times_waited = 0;
+
+ stats->file = NULL;
+ stats->line = 0;
-static void lcnt_time_diff(erts_lcnt_time_t *d, erts_lcnt_time_t *t1, erts_lcnt_time_t *t0) {
- long ds;
- long dns;
-
- ds = t1->s - t0->s;
- dns = t1->ns - t0->ns;
-
- /* the difference should not be able to get bigger than 1 sec in ns*/
-
- if (dns < 0) {
- ds -= 1;
- dns += 1000000000LL;
+ ethr_atomic_set(&stats->attempts, 0);
+ ethr_atomic_set(&stats->collisions, 0);
}
- d->s = ds;
- d->ns = dns;
+ info->location_count = 1;
}
-/* difference d must be positive */
-
-static void lcnt_time_add(erts_lcnt_time_t *t, erts_lcnt_time_t *d) {
- unsigned long ngns = 0;
-
- t->s += d->s;
- t->ns += d->ns;
+static lcnt_thread_data_t__ *lcnt_thread_data_alloc(void) {
+ lcnt_thread_data_t__ *eltd =
+ (lcnt_thread_data_t__*)malloc(sizeof(lcnt_thread_data_t__));
- ngns = t->ns / 1000000000LL;
- t->ns = t->ns % 1000000000LL;
-
- t->s += ngns;
-}
+ if(!eltd) {
+ ERTS_INTERNAL_ERROR("Failed to allocate lcnt thread data.");
+ }
-static erts_lcnt_thread_data_t *lcnt_thread_data_alloc(void) {
- erts_lcnt_thread_data_t *eltd;
-
- eltd = (erts_lcnt_thread_data_t*)malloc(sizeof(erts_lcnt_thread_data_t));
eltd->timer_set = 0;
eltd->lock_in_conflict = 0;
- eltd->id = lcnt_n_thr++;
- /* set thread data to array */
- lcnt_thread_data[eltd->id] = eltd;
-
return eltd;
-}
-
-static erts_lcnt_thread_data_t *lcnt_get_thread_data(void) {
- return (erts_lcnt_thread_data_t *)ethr_tsd_get(lcnt_thr_data_key);
}
+/* - List operations -
+ *
+ * Info entries are kept in a doubly linked list where each entry is locked
+ * with its neighbors rather than a global lock. Deletion is rather quick, but
+ * insertion is still serial since the head becomes a de facto global lock.
+ *
+ * We rely on ad-hoc spinlocks to avoid "recursing" into this module. */
+
+#define LCNT_SPINLOCK_YIELD_ITERATIONS 50
+
+#define LCNT_SPINLOCK_HELPER_INIT \
+ Uint failed_spin_count = 0;
-/* debug */
+#define LCNT_SPINLOCK_HELPER_YIELD \
+ do { \
+ failed_spin_count++; \
+ if(!(failed_spin_count % LCNT_SPINLOCK_YIELD_ITERATIONS)) { \
+ erts_thr_yield(); \
+ } else { \
+ ERTS_SPIN_BODY; \
+ } \
+ } while(0)
-#if 0
-static char* lock_opt(Uint16 flag) {
- if ((flag & ERTS_LCNT_LO_WRITE) && (flag & ERTS_LCNT_LO_READ)) return "rw";
- if (flag & ERTS_LCNT_LO_READ ) return "r ";
- if (flag & ERTS_LCNT_LO_WRITE) return " w";
- return "--";
+static void lcnt_unlock_list_entry(erts_lcnt_lock_info_t *info) {
+ ethr_atomic32_set_relb(&info->lock, 0);
}
-static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action, char *extra) {
- erts_aint_t colls, tries, w_state, r_state;
- erts_lcnt_lock_stats_t *stats = NULL;
-
- char *type;
- int i;
-
- type = lcnt_lock_type(lock->flag);
- r_state = ethr_atomic_read(&lock->r_state);
- w_state = ethr_atomic_read(&lock->w_state);
-
-
- if (lock->flag & flag) {
- erts_printf("%20s [%30s] [r/w state %4ld/%4ld] id %T %s\r\n",
- action,
- lock->name,
- r_state,
- w_state,
- lock->id,
- extra);
- }
+static int lcnt_try_lock_list_entry(erts_lcnt_lock_info_t *info) {
+ return ethr_atomic32_cmpxchg_acqb(&info->lock, 1, 0) == 0;
}
-static void print_lock(erts_lcnt_lock_t *lock, char *action) {
- if (strcmp(lock->name, "proc_main") == 0) {
- print_lock_x(lock, ERTS_LCNT_LT_ALL, action, "");
+static void lcnt_lock_list_entry(erts_lcnt_lock_info_t *info) {
+ LCNT_SPINLOCK_HELPER_INIT;
+
+ while(!lcnt_try_lock_list_entry(info)) {
+ LCNT_SPINLOCK_HELPER_YIELD;
}
}
-#endif
+static void lcnt_lock_list_entry_with_neighbors(erts_lcnt_lock_info_t *info) {
+ LCNT_SPINLOCK_HELPER_INIT;
-static erts_lcnt_lock_stats_t *lcnt_get_lock_stats(erts_lcnt_lock_t *lock, char *file, unsigned int line) {
- unsigned int i;
- erts_lcnt_lock_stats_t *stats = NULL;
-
- for (i = 0; i < lock->n_stats; i++) {
- if ((lock->stats[i].file == file) && (lock->stats[i].line == line)) {
- return &(lock->stats[i]);
- }
- }
- if (lock->n_stats < ERTS_LCNT_MAX_LOCK_LOCATIONS) {
- stats = &lock->stats[lock->n_stats];
- lock->n_stats++;
+ for(;;) {
+ if(!lcnt_try_lock_list_entry(info))
+ goto retry_after_entry_failed;
+ if(!lcnt_try_lock_list_entry(info->next))
+ goto retry_after_next_failed;
+ if(!lcnt_try_lock_list_entry(info->prev))
+ goto retry_after_prev_failed;
+
+ return;
- stats->file = file;
- stats->line = line;
- return stats;
+ retry_after_prev_failed:
+ lcnt_unlock_list_entry(info->next);
+ retry_after_next_failed:
+ lcnt_unlock_list_entry(info);
+ retry_after_entry_failed:
+ LCNT_SPINLOCK_HELPER_YIELD;
}
- return &lock->stats[0];
+}
+static void lcnt_unlock_list_entry_with_neighbors(erts_lcnt_lock_info_t *info) {
+ lcnt_unlock_list_entry(info->prev);
+ lcnt_unlock_list_entry(info->next);
+ lcnt_unlock_list_entry(info);
}
-static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflict, erts_lcnt_time_t *time_wait) {
-
- ethr_atomic_inc(&stats->tries);
+static void lcnt_insert_list_entry(erts_lcnt_lock_info_list_t *list, erts_lcnt_lock_info_t *info) {
+ erts_lcnt_lock_info_t *next, *prev;
- if (lock_in_conflict)
- ethr_atomic_inc(&stats->colls);
+ prev = &list->head;
- if (time_wait) {
- lcnt_time_add(&(stats->timer), time_wait);
- stats->timer_n++;
- }
+ lcnt_lock_list_entry(prev);
+
+ next = prev->next;
+
+ lcnt_lock_list_entry(next);
+
+ info->next = next;
+ info->prev = prev;
+
+ prev->next = info;
+ next->prev = info;
+
+ lcnt_unlock_list_entry(next);
+ lcnt_unlock_list_entry(prev);
}
-/*
- * interface
- */
+static void lcnt_insert_list_carrier(erts_lcnt_lock_info_list_t *list,
+ erts_lcnt_lock_info_carrier_t *carrier) {
+ erts_lcnt_lock_info_t *next, *prev;
+ size_t i;
-void erts_lcnt_init() {
- erts_lcnt_thread_data_t *eltd = NULL;
-
- /* init lock */
- if (ethr_mutex_init(&lcnt_data_lock) != 0) abort();
+ for(i = 0; i < carrier->entry_count; i++) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[i];
- /* init tsd */
- lcnt_n_thr = 0;
+ info->prev = &carrier->entries[i - 1];
+ info->next = &carrier->entries[i + 1];
+ }
- ethr_tsd_key_create(&lcnt_thr_data_key,"lcnt_data");
+ prev = &list->head;
- lcnt_lock();
+ lcnt_lock_list_entry(prev);
- erts_lcnt_rt_options = ERTS_LCNT_OPT_PROCLOCK | ERTS_LCNT_OPT_LOCATION;
-
- eltd = lcnt_thread_data_alloc();
+ next = prev->next;
- ethr_tsd_set(lcnt_thr_data_key, eltd);
-
- /* init lcnt structure */
- erts_lcnt_data = (erts_lcnt_data_t*)malloc(sizeof(erts_lcnt_data_t));
- erts_lcnt_data->current_locks = erts_lcnt_list_init();
- erts_lcnt_data->deleted_locks = erts_lcnt_list_init();
+ lcnt_lock_list_entry(next);
- lcnt_unlock();
+ next->prev = &carrier->entries[carrier->entry_count - 1];
+ carrier->entries[carrier->entry_count - 1].next = next;
- /* set start timer and zero statistics */
- erts_lcnt_clear_counters();
+ prev->next = &carrier->entries[0];
+ carrier->entries[0].prev = prev;
+
+ lcnt_unlock_list_entry(next);
+ lcnt_unlock_list_entry(prev);
}
-void erts_lcnt_late_init() {
- erts_thr_install_exit_handler(erts_lcnt_thread_exit_handler);
+static void lcnt_init_list(erts_lcnt_lock_info_list_t *list) {
+ /* Ensure that ref_count operations explode when touching the sentinels in
+ * DEBUG mode. */
+ ethr_atomic_init(&(list->head.ref_count), -1);
+ ethr_atomic_init(&(list->tail.ref_count), -1);
+
+ ethr_atomic32_init(&(list->head.lock), 0);
+ (list->head).next = &list->tail;
+ (list->head).prev = &list->tail;
+
+ ethr_atomic32_init(&(list->tail.lock), 0);
+ (list->tail).next = &list->head;
+ (list->tail).prev = &list->head;
}
-/* list operations */
+/* - Carrier operations - */
+
+int lcnt_thr_progress_unmanaged_delay__(void) {
+ return erts_thr_progress_unmanaged_delay();
+}
-/* BEGIN ASSUMPTION: lcnt_data_lock taken */
+void lcnt_thr_progress_unmanaged_continue__(int handle) {
+ return erts_thr_progress_unmanaged_continue(handle);
+}
-erts_lcnt_lock_list_t *erts_lcnt_list_init(void) {
- erts_lcnt_lock_list_t *list;
-
- list = (erts_lcnt_lock_list_t*)malloc(sizeof(erts_lcnt_lock_list_t));
- list->head = NULL;
- list->tail = NULL;
- list->n = 0;
- return list;
+void lcnt_deallocate_carrier__(erts_lcnt_lock_info_carrier_t *carrier) {
+ ASSERT(ethr_atomic_read(&carrier->ref_count) == 0);
+ erts_free(ERTS_ALC_T_LCNT_CARRIER, (void*)carrier);
}
-/* only do this on the list with the deleted locks! */
-void erts_lcnt_list_clear(erts_lcnt_lock_list_t *list) {
- erts_lcnt_lock_t *lock = NULL,
- *next = NULL;
+static void lcnt_thr_prg_cleanup_carrier(void *data) {
+ erts_lcnt_lock_info_carrier_t *carrier = data;
+ size_t entry_count, i;
+
+ /* carrier->entry_count will be replaced with garbage if it's deallocated
+ * on the final iteration, so we'll tuck it away to get a clean exit. */
+ entry_count = carrier->entry_count;
+
+ for(i = 0; i < entry_count; i++) {
+ ASSERT(ethr_atomic_read(&carrier->ref_count) >= (entry_count - i));
- lock = list->head;
-
- while(lock != NULL) {
- next = lock->next;
- free(lock);
- lock = next;
+ erts_lcnt_release_lock_info(&carrier->entries[i]);
}
+}
+
+static void lcnt_schedule_carrier_cleanup(void *data) {
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+
+ /* We can't issue cleanup jobs on anything other than normal schedulers, so
+ * we move to the first scheduler if required. */
+
+ if(!esdp || esdp->type != ERTS_SCHED_NORMAL) {
+ erts_schedule_misc_aux_work(1, &lcnt_schedule_carrier_cleanup, data);
+ } else {
+ erts_lcnt_lock_info_carrier_t *carrier = data;
+ size_t carrier_size;
- list->head = NULL;
- list->tail = NULL;
- list->n = 0;
+ carrier_size = sizeof(erts_lcnt_lock_info_carrier_t) +
+ sizeof(erts_lcnt_lock_info_t) * carrier->entry_count;
+
+ erts_schedule_thr_prgr_later_cleanup_op(&lcnt_thr_prg_cleanup_carrier,
+ data, (ErtsThrPrgrLaterOp*)&carrier->release_entries, carrier_size);
+ }
+}
+
+static void lcnt_info_deallocate(erts_lcnt_lock_info_t *info) {
+ lcnt_release_carrier__(info->carrier);
}
-void erts_lcnt_list_insert(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock) {
- erts_lcnt_lock_t *tail = NULL;
+static void lcnt_info_dispose(erts_lcnt_lock_info_t *info) {
+ ASSERT(ethr_atomic_read(&info->ref_count) == 0);
+
+ if(lcnt_preserve_info) {
+ ethr_atomic_set(&info->ref_count, 1);
+
+ /* Move straight to deallocation the next time around. */
+ info->dispose = &lcnt_info_deallocate;
- tail = list->tail;
- if (tail) {
- tail->next = lock;
- lock->prev = tail;
+ lcnt_insert_list_entry(&lcnt_deleted_lock_list, info);
} else {
- list->head = lock;
- lock->prev = NULL;
- ASSERT(!lock->next);
+ lcnt_info_deallocate(info);
}
- lock->next = NULL;
- list->tail = lock;
-
- list->n++;
}
-void erts_lcnt_list_delete(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock) {
+static void lcnt_lock_info_init_helper(erts_lcnt_lock_info_t *info) {
+ ethr_atomic_init(&info->ref_count, 1);
+ ethr_atomic32_init(&info->lock, 0);
- if (lock->next) lock->next->prev = lock->prev;
- if (lock->prev) lock->prev->next = lock->next;
- if (list->head == lock) list->head = lock->next;
- if (list->tail == lock) list->tail = lock->prev;
-
- lock->prev = NULL;
- lock->next = NULL;
- list->n--;
+ ethr_atomic_init(&info->r_state, 0);
+ ethr_atomic_init(&info->w_state, 0);
+
+ info->dispose = &lcnt_info_dispose;
+
+ lcnt_clear_stats(info);
}
-/* END ASSUMPTION: lcnt_data_lock taken */
+erts_lcnt_lock_info_carrier_t *erts_lcnt_create_lock_info_carrier(int entry_count) {
+ erts_lcnt_lock_info_carrier_t *result;
+ size_t carrier_size, i;
+
+ ASSERT(entry_count > 0 && entry_count <= LCNT_MAX_CARRIER_ENTRIES);
+ ASSERT(lcnt_initialization_completed__);
+
+ carrier_size = sizeof(erts_lcnt_lock_info_carrier_t) +
+ sizeof(erts_lcnt_lock_info_t) * entry_count;
+
+ result = (erts_lcnt_lock_info_carrier_t*)erts_alloc(ERTS_ALC_T_LCNT_CARRIER, carrier_size);
+ result->entry_count = entry_count;
-/* lock operations */
+ ethr_atomic_init(&result->ref_count, entry_count);
-/* interface to erl_threads.h */
-/* only lock on init and destroy, all others should use atomics */
-void erts_lcnt_init_lock(erts_lcnt_lock_t *lock, char *name, Uint16 flag ) {
- erts_lcnt_init_lock_x(lock, name, flag, am_undefined);
+ for(i = 0; i < entry_count; i++) {
+ erts_lcnt_lock_info_t *info = &result->entries[i];
+
+ lcnt_lock_info_init_helper(info);
+
+ info->carrier = result;
+ }
+
+ return result;
}
-void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eterm id) {
+
+void erts_lcnt_install(erts_lcnt_ref_t *ref, erts_lcnt_lock_info_carrier_t *carrier) {
+ ethr_sint_t swapped_carrier;
+
+#ifdef DEBUG
int i;
- if (!name) {
- lock->flag = 0;
- return;
+
+ /* Verify that all locks share the same categories/static property; all
+ * other flags are fair game. */
+ for(i = 1; i < carrier->entry_count; i++) {
+ const erts_lock_flags_t SIGNIFICANT_DIFF_MASK =
+ ERTS_LOCK_FLAGS_MASK_CATEGORY | ERTS_LOCK_FLAGS_PROPERTY_STATIC;
+
+ erts_lcnt_lock_info_t *previous, *current;
+
+ previous = &carrier->entries[i - 1];
+ current = &carrier->entries[i];
+
+ ASSERT(!((previous->flags ^ current->flags) & SIGNIFICANT_DIFF_MASK));
}
- lcnt_lock();
-
- lock->next = NULL;
- lock->prev = NULL;
- lock->flag = flag;
- lock->name = name;
- lock->id = id;
+#endif
+
+ swapped_carrier = ethr_atomic_cmpxchg_mb(ref, (ethr_sint_t)carrier, (ethr_sint_t)NULL);
- ethr_atomic_init(&lock->r_state, 0);
- ethr_atomic_init(&lock->w_state, 0);
-
+ if(swapped_carrier != (ethr_sint_t)NULL) {
#ifdef DEBUG
- ethr_atomic_init(&lock->flowstate, 0);
+ ASSERT(ethr_atomic_read(&carrier->ref_count) == carrier->entry_count);
+ ethr_atomic_set(&carrier->ref_count, 0);
#endif
-
- lock->n_stats = 1;
- for (i = 0; i < ERTS_LCNT_MAX_LOCK_LOCATIONS; i++) {
- lcnt_clear_stats(&lock->stats[i]);
+ lcnt_deallocate_carrier__(carrier);
+ } else {
+ lcnt_insert_list_carrier(&lcnt_current_lock_list, carrier);
}
+}
+
+void erts_lcnt_uninstall(erts_lcnt_ref_t *ref) {
+ ethr_sint_t previous_carrier, swapped_carrier;
- erts_lcnt_list_insert(erts_lcnt_data->current_locks, lock);
-
- lcnt_unlock();
+ previous_carrier = ethr_atomic_read(ref);
+ swapped_carrier = ethr_atomic_cmpxchg_mb(ref, (ethr_sint_t)NULL, previous_carrier);
+
+ if(previous_carrier && previous_carrier == swapped_carrier) {
+ lcnt_schedule_carrier_cleanup((void*)previous_carrier);
+ }
}
-void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock) {
- erts_lcnt_lock_t *deleted_lock;
+/* - Static lock registry -
+ *
+ * Since static locks can be trusted to never disappear, we can track them
+ * pretty cheaply and won't need to bother writing an "erts_lcnt_update_xx"
+ * variant. */
- if (!ERTS_LCNT_LOCK_TYPE(lock)) return;
+static void lcnt_init_static_lock_registry(void) {
+ ethr_atomic_init(&lcnt_static_lock_registry, (ethr_sint_t)NULL);
+}
- lcnt_lock();
+static void lcnt_update_static_locks(void) {
+ lcnt_static_lock_ref_t *iterator =
+ (lcnt_static_lock_ref_t*)ethr_atomic_read(&lcnt_static_lock_registry);
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_COPYSAVE) {
- /* copy structure and insert the copy */
+ while(iterator != NULL) {
+ if(!erts_lcnt_check_enabled(iterator->flags)) {
+ erts_lcnt_uninstall(iterator->reference);
+ } else if(!erts_lcnt_check_ref_installed(iterator->reference)) {
+ erts_lcnt_lock_info_carrier_t *carrier = erts_lcnt_create_lock_info_carrier(1);
- deleted_lock = (erts_lcnt_lock_t*)malloc(sizeof(erts_lcnt_lock_t));
- memcpy(deleted_lock, lock, sizeof(erts_lcnt_lock_t));
+ erts_lcnt_init_lock_info_idx(carrier, 0, iterator->name, iterator->id, iterator->flags);
- deleted_lock->next = NULL;
- deleted_lock->prev = NULL;
+ erts_lcnt_install(iterator->reference, carrier);
+ }
- erts_lcnt_list_insert(erts_lcnt_data->deleted_locks, deleted_lock);
+ iterator = iterator->next;
}
- /* delete original */
- erts_lcnt_list_delete(erts_lcnt_data->current_locks, lock);
- lock->flag = 0;
-
- lcnt_unlock();
}
-/* lock */
+void lcnt_register_static_lock__(erts_lcnt_ref_t *reference, const char *name, Eterm id,
+ erts_lock_flags_t flags) {
+ lcnt_static_lock_ref_t *lock = malloc(sizeof(lcnt_static_lock_ref_t));
+ int retry_insertion;
-void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
- erts_aint_t r_state = 0, w_state = 0;
- erts_lcnt_thread_data_t *eltd;
-
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (!ERTS_LCNT_LOCK_TYPE(lock)) return;
+ ASSERT(flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC);
- eltd = lcnt_get_thread_data();
+ lock->reference = reference;
+ lock->flags = flags;
+ lock->name = name;
+ lock->id = id;
- ASSERT(eltd);
-
- w_state = ethr_atomic_read(&lock->w_state);
-
- if (option & ERTS_LCNT_LO_WRITE) {
- r_state = ethr_atomic_read(&lock->r_state);
- ethr_atomic_inc( &lock->w_state);
- }
- if (option & ERTS_LCNT_LO_READ) {
- ethr_atomic_inc( &lock->r_state);
- }
-
- /* we cannot acquire w_lock if either w or r are taken */
- /* we cannot acquire r_lock if w_lock is taken */
-
- if ((w_state > 0) || (r_state > 0)) {
- eltd->lock_in_conflict = 1;
- if (eltd->timer_set == 0)
- lcnt_time(&eltd->timer);
- eltd->timer_set++;
- } else {
- eltd->lock_in_conflict = 0;
- }
-}
+ do {
+ ethr_sint_t swapped_head;
-void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
- erts_aint_t w_state;
- erts_lcnt_thread_data_t *eltd;
-
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (!ERTS_LCNT_LOCK_TYPE(lock)) return;
+ lock->next = (lcnt_static_lock_ref_t*)ethr_atomic_read(&lcnt_static_lock_registry);
- w_state = ethr_atomic_read(&lock->w_state);
- ethr_atomic_inc( &lock->w_state);
+ swapped_head = ethr_atomic_cmpxchg_acqb(
+ &lcnt_static_lock_registry,
+ (ethr_sint_t)lock,
+ (ethr_sint_t)lock->next);
- eltd = lcnt_get_thread_data();
+ retry_insertion = (swapped_head != (ethr_sint_t)lock->next);
+ } while(retry_insertion);
+}
- ASSERT(eltd);
+/* - Initialization - */
- if (w_state > 0) {
- eltd->lock_in_conflict = 1;
- /* only set the timer if nobody else has it
- * This should only happen when proc_locks aquires several locks
- * 'atomicly'. All other locks will block the thread if w_state > 0
- * i.e. locked.
- */
- if (eltd->timer_set == 0)
- lcnt_time(&eltd->timer);
- eltd->timer_set++;
+void erts_lcnt_pre_thr_init() {
+ /* Ensure that the dependency hack mentioned in the header doesn't
+ * explode at runtime. */
+ ERTS_CT_ASSERT(sizeof(LcntThrPrgrLaterOp) >= sizeof(ErtsThrPrgrLaterOp));
+ ERTS_CT_ASSERT(ERTS_THR_PRGR_DHANDLE_MANAGED ==
+ (ErtsThrPrgrDelayHandle)LCNT_THR_PRGR_DHANDLE_MANAGED);
- } else {
- eltd->lock_in_conflict = 0;
- }
+ lcnt_init_list(&lcnt_current_lock_list);
+ lcnt_init_list(&lcnt_deleted_lock_list);
+
+ lcnt_init_static_lock_registry();
}
-/* if a lock wasn't really a lock operation, bad bad process locks */
+void erts_lcnt_post_thr_init() {
+ /* ASSUMPTION: this is safe since it runs prior to the creation of other
+ * threads (Directly after ethread init). */
-void erts_lcnt_lock_unaquire(erts_lcnt_lock_t *lock) {
- /* should check if this thread was "waiting" */
-
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (!ERTS_LCNT_LOCK_TYPE(lock)) return;
+ ethr_tsd_key_create(&lcnt_thr_data_key__, "lcnt_data");
- ethr_atomic_dec( &lock->w_state);
+ erts_lcnt_thread_setup();
}
-/* erts_lcnt_lock_post
- * used when we get a lock (i.e. directly after a lock operation)
- * if the timer was set then we had to wait for the lock
- * lock_post will calculate the wait time.
- */
-void erts_lcnt_lock_post(erts_lcnt_lock_t *lock) {
- erts_lcnt_lock_post_x(lock, (char*)str_undefined, 0);
-}
+void erts_lcnt_late_init() {
+ /* Set start timer and zero all statistics */
+ erts_lcnt_clear_counters();
+ erts_thr_install_exit_handler(erts_lcnt_thread_exit_handler);
-void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line) {
- erts_lcnt_thread_data_t *eltd;
- erts_lcnt_time_t timer;
- erts_lcnt_time_t time_wait;
- erts_lcnt_lock_stats_t *stats;
#ifdef DEBUG
- erts_aint_t flowstate;
+ /* It's safe to use erts_alloc and thread progress past this point. */
+ lcnt_initialization_completed__ = 1;
#endif
+}
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (!ERTS_LCNT_LOCK_TYPE(lock)) return;
-
-#ifdef DEBUG
- if (!(lock->flag & (ERTS_LCNT_LT_RWMUTEX | ERTS_LCNT_LT_RWSPINLOCK))) {
- flowstate = ethr_atomic_read(&lock->flowstate);
- ASSERT(flowstate == 0);
- ethr_atomic_inc( &lock->flowstate);
- }
-#endif
-
- eltd = lcnt_get_thread_data();
-
- ASSERT(eltd);
+void erts_lcnt_post_startup(void) {
+ /* Default to capturing everything to match the behavior of the old lock
+ * counter build. */
+ erts_lcnt_set_category_mask(ERTS_LOCK_FLAGS_MASK_CATEGORY);
+}
- /* if lock was in conflict, time it */
-
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_LOCATION) {
- stats = lcnt_get_lock_stats(lock, file, line);
- } else {
- stats = &lock->stats[0];
- }
+void erts_lcnt_thread_setup() {
+ lcnt_thread_data_t__ *eltd = lcnt_thread_data_alloc();
- if (eltd->timer_set) {
- lcnt_time(&timer);
-
- lcnt_time_diff(&time_wait, &timer, &(eltd->timer));
- lcnt_update_stats(stats, eltd->lock_in_conflict, &time_wait);
-
- eltd->timer_set--;
- ASSERT(eltd->timer_set >= 0);
- } else {
- lcnt_update_stats(stats, eltd->lock_in_conflict, NULL);
- }
+ ASSERT(eltd);
+ ethr_tsd_set(lcnt_thr_data_key__, eltd);
}
-/* unlock */
+void erts_lcnt_thread_exit_handler() {
+ lcnt_thread_data_t__ *eltd = lcnt_get_thread_data__();
-void erts_lcnt_unlock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (!ERTS_LCNT_LOCK_TYPE(lock)) return;
- if (option & ERTS_LCNT_LO_WRITE) ethr_atomic_dec(&lock->w_state);
- if (option & ERTS_LCNT_LO_READ ) ethr_atomic_dec(&lock->r_state);
+ if (eltd) {
+ free(eltd);
+ }
}
-void erts_lcnt_unlock(erts_lcnt_lock_t *lock) {
-#ifdef DEBUG
- erts_aint_t w_state;
- erts_aint_t flowstate;
-#endif
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (!ERTS_LCNT_LOCK_TYPE(lock)) return;
+/* - BIF interface - */
+
+void erts_lcnt_retain_lock_info(erts_lcnt_lock_info_t *info) {
#ifdef DEBUG
- /* flowstate */
- flowstate = ethr_atomic_read(&lock->flowstate);
- ASSERT(flowstate == 1);
- ethr_atomic_dec( &lock->flowstate);
-
- /* write state */
- w_state = ethr_atomic_read(&lock->w_state);
- ASSERT(w_state > 0)
+ ASSERT(ethr_atomic_inc_read_acqb(&info->ref_count) >= 2);
+#else
+ ethr_atomic_inc_acqb(&info->ref_count);
#endif
- ethr_atomic_dec(&lock->w_state);
}
-/* trylock */
+void erts_lcnt_release_lock_info(erts_lcnt_lock_info_t *info) {
+ ethr_sint_t count;
-void erts_lcnt_trylock_opt(erts_lcnt_lock_t *lock, int res, Uint16 option) {
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (!ERTS_LCNT_LOCK_TYPE(lock)) return;
- /* Determine lock_state via res instead of state */
- if (res != EBUSY) {
- if (option & ERTS_LCNT_LO_WRITE) ethr_atomic_inc(&lock->w_state);
- if (option & ERTS_LCNT_LO_READ ) ethr_atomic_inc(&lock->r_state);
- lcnt_update_stats(&(lock->stats[0]), 0, NULL);
- } else {
- ethr_atomic_inc(&lock->stats[0].tries);
- ethr_atomic_inc(&lock->stats[0].colls);
- }
-}
+ /* We need to acquire the lock before decrementing ref_count to avoid
+ * racing with list iteration; there's a short window between reading the
+ * reference to info and increasing its ref_count. */
+ lcnt_lock_list_entry_with_neighbors(info);
-
-void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res) {
- /* Determine lock_state via res instead of state */
-#ifdef DEBUG
- erts_aint_t flowstate;
-#endif
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (!ERTS_LCNT_LOCK_TYPE(lock)) return;
- if (res != EBUSY) {
-
-#ifdef DEBUG
- flowstate = ethr_atomic_read(&lock->flowstate);
- ASSERT(flowstate == 0);
- ethr_atomic_inc( &lock->flowstate);
-#endif
- ethr_atomic_inc(&lock->w_state);
-
- lcnt_update_stats(&(lock->stats[0]), 0, NULL);
+ count = ethr_atomic_dec_read(&info->ref_count);
+ ASSERT(count >= 0);
+
+ if(count > 0) {
+ lcnt_unlock_list_entry_with_neighbors(info);
} else {
- ethr_atomic_inc(&lock->stats[0].tries);
- ethr_atomic_inc(&lock->stats[0].colls);
+ (info->next)->prev = info->prev;
+ (info->prev)->next = info->next;
+
+ lcnt_unlock_list_entry_with_neighbors(info);
+
+ info->dispose(info);
}
}
-/* thread operations */
+erts_lock_flags_t erts_lcnt_get_category_mask() {
+ return lcnt_category_mask__;
+}
-void erts_lcnt_thread_setup(void) {
- erts_lcnt_thread_data_t *eltd;
+void erts_lcnt_set_category_mask(erts_lock_flags_t mask) {
+ erts_lock_flags_t changed_categories;
- lcnt_lock();
- /* lock for thread id global update */
- eltd = lcnt_thread_data_alloc();
- lcnt_unlock();
- ASSERT(eltd);
- ethr_tsd_set(lcnt_thr_data_key, eltd);
-}
+ ASSERT(!(mask & ~ERTS_LOCK_FLAGS_MASK_CATEGORY));
+ ASSERT(lcnt_initialization_completed__);
-void erts_lcnt_thread_exit_handler() {
- erts_lcnt_thread_data_t *eltd;
+ changed_categories = (lcnt_category_mask__ ^ mask);
+ lcnt_category_mask__ = mask;
- eltd = ethr_tsd_get(lcnt_thr_data_key);
+ if(changed_categories) {
+ lcnt_update_static_locks();
+ }
- if (eltd) {
- free(eltd);
+ if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION) {
+ erts_lcnt_update_distribution_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
}
-}
-/* bindings for bifs */
+ if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR) {
+ erts_lcnt_update_allocator_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
+ }
-Uint16 erts_lcnt_set_rt_opt(Uint16 opt) {
- Uint16 prev;
- prev = (erts_lcnt_rt_options & opt);
- erts_lcnt_rt_options |= opt;
- return prev;
+ if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_PROCESS) {
+ erts_lcnt_update_process_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
+ }
+
+ if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_IO) {
+ erts_lcnt_update_cio_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ erts_lcnt_update_driver_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ erts_lcnt_update_port_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ }
+
+ if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_DB) {
+ erts_lcnt_update_db_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_DB);
+ }
}
-Uint16 erts_lcnt_clear_rt_opt(Uint16 opt) {
- Uint16 prev;
- prev = (erts_lcnt_rt_options & opt);
- erts_lcnt_rt_options &= ~opt;
- return prev;
+void erts_lcnt_set_preserve_info(int enable) {
+ lcnt_preserve_info = enable;
+}
+
+int erts_lcnt_get_preserve_info() {
+ return lcnt_preserve_info;
}
void erts_lcnt_clear_counters(void) {
- erts_lcnt_lock_t *lock;
- erts_lcnt_lock_list_t *list;
- erts_lcnt_lock_stats_t *stats;
- int i;
+ erts_lcnt_lock_info_t *iterator;
- lcnt_lock();
+ lcnt_time__(&lcnt_timer_start);
- list = erts_lcnt_data->current_locks;
-
- for (lock = list->head; lock != NULL; lock = lock->next) {
- for( i = 0; i < ERTS_LCNT_MAX_LOCK_LOCATIONS; i++) {
- stats = &lock->stats[i];
- lcnt_clear_stats(stats);
- }
- lock->n_stats = 1;
+ iterator = NULL;
+ while(erts_lcnt_iterate_list(&lcnt_current_lock_list, &iterator)) {
+ lcnt_clear_stats(iterator);
}
- /* empty deleted locks in lock list */
- erts_lcnt_list_clear(erts_lcnt_data->deleted_locks);
-
- lcnt_time(&timer_start);
-
- lcnt_unlock();
+ iterator = NULL;
+ while(erts_lcnt_iterate_list(&lcnt_deleted_lock_list, &iterator)) {
+ erts_lcnt_release_lock_info(iterator);
+ }
}
-erts_lcnt_data_t *erts_lcnt_get_data(void) {
+erts_lcnt_data_t erts_lcnt_get_data(void) {
erts_lcnt_time_t timer_stop;
-
- lcnt_lock();
-
- lcnt_time(&timer_stop);
- lcnt_time_diff(&(erts_lcnt_data->duration), &timer_stop, &timer_start);
-
- lcnt_unlock();
-
- return erts_lcnt_data;
+ erts_lcnt_data_t result;
+
+ lcnt_time__(&timer_stop);
+
+ result.timer_start = lcnt_timer_start;
+
+ result.current_locks = &lcnt_current_lock_list;
+ result.deleted_locks = &lcnt_deleted_lock_list;
+
+ lcnt_time_diff__(&result.duration, &timer_stop, &result.timer_start);
+
+ return result;
}
-char *erts_lcnt_lock_type(Uint16 type) {
- return lcnt_lock_type(type);
+int erts_lcnt_iterate_list(erts_lcnt_lock_info_list_t *list, erts_lcnt_lock_info_t **iterator) {
+ erts_lcnt_lock_info_t *current, *next;
+
+ current = *iterator ? *iterator : &list->head;
+
+ ASSERT(current != &list->tail);
+
+ lcnt_lock_list_entry(current);
+
+ next = current->next;
+
+ if(next != &list->tail) {
+ erts_lcnt_retain_lock_info(next);
+ }
+
+ lcnt_unlock_list_entry(current);
+
+ if(current != &list->head) {
+ erts_lcnt_release_lock_info(current);
+ }
+
+ *iterator = next;
+
+ return next != &list->tail;
}
-#endif /* ifdef ERTS_ENABLE_LOCK_COUNT */
+#endif /* #ifdef ERTS_ENABLE_LOCK_COUNT */
diff --git a/erts/emulator/beam/erl_lock_count.h b/erts/emulator/beam/erl_lock_count.h
index 75f7cd028b..89d95a73cf 100644
--- a/erts/emulator/beam/erl_lock_count.h
+++ b/erts/emulator/beam/erl_lock_count.h
@@ -1,214 +1,928 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2012. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
-/*
- * Description: Statistics for locks.
- *
- * Author: Bj�rn-Egil Dahlberg
- * Date: 2008-07-03
- * Abstract:
- * Locks statistics internal representation.
- *
- * Conceptual representation,
- * - set name
- * | - id (the unique lock)
- * | | - lock type
- * | | - statistics
- * | | | - location (file and line number)
- * | | | - tries
- * | | | - collisions (including trylock busy)
- * | | | - timer (time spent in waiting for lock)
- * | | | - n_timer (collisions excluding trylock busy)
- *
- * Each instance of a lock is the unique lock, i.e. set and id in that set.
- * For each lock there is a set of statistics with where and what impact
- * the lock aqusition had.
- *
- * Runtime options
- * - suspend, used when internal lock-counting can't be applied. For instance
- * when allocating a term for the outside and halloc needs to be used.
- * Default: off.
- * - location, reserved and not used.
- * - proclock, disable proclock counting. Used when performance might be an
- * issue. Accessible from erts_debug:lock_counters({process_locks, bool()}).
- * Default: off.
- * - copysave, enable saving of destroyed locks (and thereby its statistics).
- * If memory constraints is an issue this need to be disabled.
- * Accessible from erts_debug:lock_counters({copy_save, bool()}).
- * Default: off.
+/**
+ * @description Statistics for locks.
+ * @file erl_lock_count.h
+ *
+ * @author Björn-Egil Dahlberg
+ * @author John Högberg
*
+ * Conceptual representation:
+ *
+ * - set name
+ * | - id (the unique lock)
+ * | | - lock type
+ * | | - statistics
+ * | | | - location (file and line number)
+ * | | | - attempts
+ * | | | - collisions (including trylock busy)
+ * | | | - timer (time spent in waiting for lock)
+ * | | | - n_timer (collisions excluding trylock busy)
+ * | | | - histogram
+ * | | | | - # 0 = log2(lock wait_time ns)
+ * | | | | - ...
+ * | | | | - # n = log2(lock wait_time ns)
+ *
+ * Each instance of a lock is the unique lock, i.e. set and id in that set.
+ * For each lock there is a set of statistics with where and what impact
+ * the lock acquisition had.
*/
-#include "sys.h"
-
#ifndef ERTS_LOCK_COUNT_H__
#define ERTS_LOCK_COUNT_H__
#ifdef ERTS_ENABLE_LOCK_COUNT
#ifndef ERTS_ENABLE_LOCK_POSITION
-/* Enable in order for _x variants of mtx functions to be used. */
+/** @brief Controls whether _x variants of mtx functions are used. */
#define ERTS_ENABLE_LOCK_POSITION 1
#endif
+#include "sys.h"
#include "ethread.h"
+#include "erl_term.h"
+#include "erl_lock_flags.h"
-#define ERTS_LCNT_MAX_LOCK_LOCATIONS (10)
-
-#define ERTS_LCNT_LT_SPINLOCK (((Uint16) 1) << 0)
-#define ERTS_LCNT_LT_RWSPINLOCK (((Uint16) 1) << 1)
-#define ERTS_LCNT_LT_MUTEX (((Uint16) 1) << 2)
-#define ERTS_LCNT_LT_RWMUTEX (((Uint16) 1) << 3)
-#define ERTS_LCNT_LT_PROCLOCK (((Uint16) 1) << 4)
-#define ERTS_LCNT_LT_ALLOC (((Uint16) 1) << 5)
-
-#define ERTS_LCNT_LO_READ (((Uint16) 1) << 6)
-#define ERTS_LCNT_LO_WRITE (((Uint16) 1) << 7)
-
-#define ERTS_LCNT_LO_READ_WRITE ( ERTS_LCNT_LO_READ \
- | ERTS_LCNT_LO_WRITE )
+#define ERTS_LCNT_MAX_LOCK_LOCATIONS (5)
-#define ERTS_LCNT_LT_ALL ( ERTS_LCNT_LT_SPINLOCK \
- | ERTS_LCNT_LT_RWSPINLOCK \
- | ERTS_LCNT_LT_MUTEX \
- | ERTS_LCNT_LT_RWMUTEX \
- | ERTS_LCNT_LT_PROCLOCK )
-/* runtime options */
-
-#define ERTS_LCNT_OPT_SUSPEND (((Uint16) 1) << 0)
-#define ERTS_LCNT_OPT_LOCATION (((Uint16) 1) << 1)
-#define ERTS_LCNT_OPT_PROCLOCK (((Uint16) 1) << 2)
-#define ERTS_LCNT_OPT_COPYSAVE (((Uint16) 1) << 3)
-#define ERTS_LCNT_OPT_PORTLOCK (((Uint16) 1) << 4)
+#define ERTS_LCNT_HISTOGRAM_MAX_NS (((unsigned long)1LL << 28) - 1)
+#if 0 || defined(ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT)
+#define ERTS_LCNT_HISTOGRAM_SLOT_SIZE (30)
+#define ERTS_LCNT_HISTOGRAM_RSHIFT (0)
+#else
+#define ERTS_LCNT_HISTOGRAM_SLOT_SIZE (20)
+#define ERTS_LCNT_HISTOGRAM_RSHIFT (10)
+#endif
typedef struct {
unsigned long s;
unsigned long ns;
} erts_lcnt_time_t;
-
-extern erts_lcnt_time_t timer_start;
-typedef struct erts_lcnt_lock_stats_s {
- /* "tries" and "colls" needs to be atomic since
- * trylock busy does not aquire a lock and there
- * is no post action to rectify the situation
- */
+typedef struct {
+ /* @brief log2 array of nano seconds occurences */
+ Uint32 ns[ERTS_LCNT_HISTOGRAM_SLOT_SIZE];
+} erts_lcnt_hist_t;
+
+typedef struct {
+ /** @brief In which file the lock was taken. May be NULL. */
+ const char *file;
+ /** @brief Line number in \c file */
+ unsigned int line;
- char *file; /* which file the lock was taken */
- unsigned int line; /* line number in file */
-
- ethr_atomic_t tries; /* n tries to get lock */
- ethr_atomic_t colls; /* n collisions of tries to get lock */
-
- unsigned long timer_n; /* #times waited for lock */
- erts_lcnt_time_t timer; /* total wait time for lock */
+ /* "attempts" and "collisions" need to be atomic since try_lock busy does
+ * not acquire a lock and there is no post action to rectify the
+ * situation. */
+
+ ethr_atomic_t attempts;
+ ethr_atomic_t collisions;
+
+ erts_lcnt_time_t total_time_waited;
+ Uint64 times_waited;
+
+ erts_lcnt_hist_t wait_time_histogram;
} erts_lcnt_lock_stats_t;
-/* rw locks uses both states, other locks only uses w_state */
-typedef struct erts_lcnt_lock_s {
- char *name; /* lock name */
- Uint16 flag; /* lock type */
- Eterm id; /* id if possible */
+typedef struct lcnt_lock_info_t_ {
+ erts_lock_flags_t flags;
+ const char *name;
+ /** @brief Id if possible, must be an immediate */
+ Eterm id;
-#ifdef DEBUG
- ethr_atomic_t flowstate;
-#endif
+ /* The first entry is reserved as a fallback for when location information
+ * is missing, and when the lock is used in more than (MAX_LOCK_LOCATIONS
+ * - 1) different places. */
+ erts_lcnt_lock_stats_t location_stats[ERTS_LCNT_MAX_LOCK_LOCATIONS];
+ unsigned int location_count;
- /* lock states */
- ethr_atomic_t w_state; /* 0 not taken, otherwise n threads waiting */
- ethr_atomic_t r_state; /* 0 not taken, > 0 -> writes will wait */
+ /* -- Everything below is internal to this module ---------------------- */
- /* statistics */
- unsigned int n_stats;
- erts_lcnt_lock_stats_t stats[ERTS_LCNT_MAX_LOCK_LOCATIONS]; /* first entry is "undefined"*/
-
- /* chains for list handling */
- /* data is hold by lcnt_lock */
- struct erts_lcnt_lock_s *prev;
- struct erts_lcnt_lock_s *next;
-} erts_lcnt_lock_t;
+ /* Lock states; rw locks uses both states, other locks only uses w_state */
-typedef struct {
- erts_lcnt_lock_t *head;
- erts_lcnt_lock_t *tail;
- unsigned long n;
-} erts_lcnt_lock_list_t;
-
-typedef struct {
- erts_lcnt_time_t duration; /* time since last clear */
- erts_lcnt_lock_list_t *current_locks;
- erts_lcnt_lock_list_t *deleted_locks;
-} erts_lcnt_data_t;
+ /** @brief Write state. 0 = not taken, otherwise n threads waiting */
+ ethr_atomic_t w_state;
+ /** @brief Read state. 0 = not taken, > 0 -> writes will wait */
+ ethr_atomic_t r_state;
+
+ struct lcnt_lock_info_t_ *prev;
+ struct lcnt_lock_info_t_ *next;
+
+ /** @brief Used in place of erts_refc_t to avoid a circular dependency. */
+ ethr_atomic_t ref_count;
+ ethr_atomic32_t lock;
+
+ /** @brief Deletion hook called once \c ref_count reaches 0; may defer
+ * deletion by modifying \c ref_count. */
+ void (*dispose)(struct lcnt_lock_info_t_ *);
+
+ struct lcnt_lock_info_carrier_ *carrier;
+} erts_lcnt_lock_info_t;
+
+typedef struct lcnt_lock_info_list_ {
+ erts_lcnt_lock_info_t head;
+ erts_lcnt_lock_info_t tail;
+} erts_lcnt_lock_info_list_t;
typedef struct {
- int id;
+ erts_lcnt_time_t timer_start; /**< Time of last clear */
+ erts_lcnt_time_t duration; /**< Time since last clear */
- erts_lcnt_time_t timer; /* timer */
- int timer_set; /* bool */
- int lock_in_conflict; /* bool */
-} erts_lcnt_thread_data_t;
+ erts_lcnt_lock_info_list_t *current_locks;
+ erts_lcnt_lock_info_list_t *deleted_locks;
+} erts_lcnt_data_t;
-/* globals */
+typedef struct lcnt_lock_info_carrier_ erts_lcnt_lock_info_carrier_t;
-extern Uint16 erts_lcnt_rt_options;
+typedef ethr_atomic_t erts_lcnt_ref_t;
-/* function declerations */
+/* -- Globals -------------------------------------------------------------- */
-void erts_lcnt_init(void);
+/** @brief Checks whether counting is enabled for any of the given
+ * categories. */
+#define erts_lcnt_check_enabled(flags) \
+ (lcnt_category_mask__ & flags)
+
+/* -- Lock operations ------------------------------------------------------
+ *
+ * All of these will nop if there's nothing "installed" on the given reference,
+ * in order to transparently support enable/disable at runtime. */
+
+/** @brief Records that a lock is being acquired. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock(erts_lcnt_ref_t *ref);
+
+/** @copydoc erts_lcnt_lock
+ * @param option Notes whether the lock is a read or write lock. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_opt(erts_lcnt_ref_t *ref, erts_lock_options_t option);
+
+/** @brief Records that a lock has been acquired. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_post(erts_lcnt_ref_t *ref);
+
+/** @copydoc erts_lcnt_lock_post.
+ * @param file The name of the file where the lock was acquired.
+ * @param line The line at which the lock was acquired. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_post_x(erts_lcnt_ref_t *ref, char *file, unsigned int line);
+
+/** @brief Records that a lock has been released. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_unlock(erts_lcnt_ref_t *ref);
+
+/** @copydoc erts_lcnt_unlock_opt
+ * @param option Whether the lock is a read or write lock. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_unlock_opt(erts_lcnt_ref_t *ref, erts_lock_options_t option);
+
+/** @brief Rectifies the case where a lock wasn't actually a lock operation.
+ *
+ * Only used for process locks at the moment. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_unacquire(erts_lcnt_ref_t *ref);
+
+/** @brief Records the result of a trylock, placing the queried lock status in
+ * \c result. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_trylock(erts_lcnt_ref_t *ref, int result);
+
+/** @copydoc erts_lcnt_trylock
+ * @param option Whether the lock is a read or write lock. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_trylock_opt(erts_lcnt_ref_t *ref, int result, erts_lock_options_t option);
+
+/* Indexed variants of the standard lock operations, for use when a single
+ * reference contains many counters (eg. process locks).
+ *
+ * erts_lcnt_open_ref must be used to safely extract the installed carrier,
+ * which must released with erts_lcnt_close_reference on success.
+ *
+ * Refer to \c erts_lcnt_lock for example usage. */
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index);
+ERTS_GLB_INLINE
+void erts_lcnt_lock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, erts_lock_options_t option);
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_post_idx(erts_lcnt_lock_info_carrier_t *carrier, int index);
+ERTS_GLB_INLINE
+void erts_lcnt_lock_post_x_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, char *file, unsigned int line);
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_unacquire_idx(erts_lcnt_lock_info_carrier_t *carrier, int index);
+
+ERTS_GLB_INLINE
+void erts_lcnt_unlock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index);
+ERTS_GLB_INLINE
+void erts_lcnt_unlock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, erts_lock_options_t option);
+
+ERTS_GLB_INLINE
+void erts_lcnt_trylock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, int result);
+ERTS_GLB_INLINE
+void erts_lcnt_trylock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, int result, erts_lock_options_t option);
+
+/* -- Reference operations ------------------------------------------------- */
+
+/** @brief Registers a lock counter reference; this must be called prior to
+ * using any other functions in this module. */
+ERTS_GLB_INLINE
+void erts_lcnt_init_ref(erts_lcnt_ref_t *ref);
+
+/** @brief As \c erts_lcnt_init_ref, but also enables lock counting right
+ * away if appropriate to reduce noise.
+ * @param id An immediate erlang term with whatever extra data you want to
+ * identify this lock with. */
+ERTS_GLB_INLINE
+void erts_lcnt_init_ref_x(erts_lcnt_ref_t *ref, const char *name,
+ Eterm id, erts_lock_flags_t flags);
+
+/** @brief Checks whether counting is enabled on the given reference. */
+ERTS_GLB_FORCE_INLINE
+int erts_lcnt_check_ref_installed(erts_lcnt_ref_t *ref);
+
+/** @brief Convenience macro to re/enable counting on an already initialized
+ * reference. Don't forget to specify the lock type in \c flags! */
+#define erts_lcnt_install_new_lock_info(ref, name, id, flags) \
+ if(!erts_lcnt_check_ref_installed(ref)) { \
+ erts_lcnt_lock_info_carrier_t *__carrier; \
+ __carrier = erts_lcnt_create_lock_info_carrier(1);\
+ erts_lcnt_init_lock_info_idx(__carrier, 0, name, id, flags); \
+ erts_lcnt_install(ref, __carrier);\
+ } while(0)
+
+erts_lcnt_lock_info_carrier_t *erts_lcnt_create_lock_info_carrier(int count);
+
+/* @brief Initializes the lock info at the given index.
+ * @param id An immediate erlang term with whatever extra data you want to
+ * identify this lock with.
+ * @param flags The flags the lock itself was initialized with. Keep in mind
+ * that all locks in a carrier must share the same category/static property. */
+ERTS_GLB_INLINE
+void erts_lcnt_init_lock_info_idx(erts_lcnt_lock_info_carrier_t *carrier, int index,
+ const char *name, Eterm id, erts_lock_flags_t flags);
+
+/** @brief Atomically installs the given lock counters. Nops (and releases the
+ * provided carrier) if something was already installed. */
+void erts_lcnt_install(erts_lcnt_ref_t *ref, erts_lcnt_lock_info_carrier_t *carrier);
+
+/** @brief Atomically removes the currently installed lock counters. Nops if
+ * nothing was installed. */
+void erts_lcnt_uninstall(erts_lcnt_ref_t *ref);
+
+ERTS_GLB_FORCE_INLINE
+int erts_lcnt_open_ref(erts_lcnt_ref_t *ref, int *handle, erts_lcnt_lock_info_carrier_t **result);
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_close_ref(int handle, erts_lcnt_lock_info_carrier_t *carrier);
+
+/* -- Module initialization ------------------------------------------------ */
+
+void erts_lcnt_pre_thr_init(void);
+void erts_lcnt_post_thr_init(void);
void erts_lcnt_late_init(void);
-/* thread operations */
+/* @brief Called after everything in the system has been initialized, including
+ * the schedulers. This is mainly a backwards compatibility shim for matching
+ * the old lcnt behavior where all lock counting was enabled by default. */
+void erts_lcnt_post_startup(void);
+
void erts_lcnt_thread_setup(void);
void erts_lcnt_thread_exit_handler(void);
-/* list operations (local) */
-erts_lcnt_lock_list_t *erts_lcnt_list_init(void);
+/* -- BIF interface -------------------------------------------------------- */
+
+/** @brief Safely iterates through all entries in the given list.
+ *
+ * The referenced item will be valid until the next call to
+ * \c erts_lcnt_iterate_list after which point it may be destroyed; call
+ * erts_lcnt_retain_lock_info if you wish to hang on to it beyond that point.
+ *
+ * Iteration can be cancelled by calling erts_lcnt_release_lock_info on the
+ * iterator and breaking out of the loop.
+ *
+ * @param iterator The iteration variable; set the pointee to NULL to start
+ * iteration.
+ * @return 1 while the iterator is valid, 0 at the end of the list. */
+int erts_lcnt_iterate_list(erts_lcnt_lock_info_list_t *list, erts_lcnt_lock_info_t **iterator);
+
+/** @brief Clears the counter state of all locks, and releases all locks
+ * preserved through erts_lcnt_set_preserve_info (if any). */
+void erts_lcnt_clear_counters(void);
+
+/** @brief Retrieves the global lock counter state.
+ *
+ * Note that the lists may be modified while you're mucking around with them.
+ * Always use \c erts_lcnt_iterate_list to enumerate them. */
+erts_lcnt_data_t erts_lcnt_get_data(void);
+
+void erts_lcnt_retain_lock_info(erts_lcnt_lock_info_t *info);
+void erts_lcnt_release_lock_info(erts_lcnt_lock_info_t *info);
+
+/** @brief Sets whether to preserve the info of destroyed/uninstalled locks.
+ *
+ * This option makes no distinction whether the lock was destroyed or if lock
+ * counting was simply disabled, so erts_lcnt_set_category_mask must not be
+ * used while this option is active. */
+void erts_lcnt_set_preserve_info(int enable);
+
+int erts_lcnt_get_preserve_info(void);
+
+/** @brief Updates the category mask, enabling or disabling counting on the
+ * affected locks as necessary.
+ *
+ * This is not guaranteed to find all existing locks; only those that are
+ * flagged as static locks and those reachable through other means can be
+ * altered. */
+void erts_lcnt_set_category_mask(erts_lock_flags_t mask);
+
+erts_lock_flags_t erts_lcnt_get_category_mask(void);
+
+/* -- Inline implementation ------------------------------------------------ */
+
+/* The following is a hack to get the things we need from erl_thr_progress.h,
+ * which we can't #include without dependency hell breaking loose.
+ *
+ * The size of LcntThrPrgrLaterOp and value of the constant are verified at
+ * compile-time in erts_lcnt_pre_thr_init. */
+
+int lcnt_thr_progress_unmanaged_delay__(void);
+void lcnt_thr_progress_unmanaged_continue__(int handle);
+typedef struct { Uint64 _[4]; } LcntThrPrgrLaterOp;
+#define LCNT_THR_PRGR_DHANDLE_MANAGED -1
+
+struct lcnt_lock_info_carrier_ {
+ ethr_atomic_t ref_count;
+
+ LcntThrPrgrLaterOp release_entries;
+
+ unsigned char entry_count;
+ erts_lcnt_lock_info_t entries[];
+};
+
+typedef struct {
+ erts_lcnt_time_t timer; /* timer */
+ int timer_set; /* bool */
+ int lock_in_conflict; /* bool */
+} lcnt_thread_data_t__;
+
+extern const int lcnt_log2_tab64__[];
+
+extern ethr_tsd_key lcnt_thr_data_key__;
+extern erts_lock_flags_t lcnt_category_mask__;
+
+#ifdef DEBUG
+extern int lcnt_initialization_completed__;
+#endif
+
+void lcnt_register_static_lock__(erts_lcnt_ref_t *reference, const char *name, Eterm id,
+ erts_lock_flags_t flags);
+
+void lcnt_deallocate_carrier__(erts_lcnt_lock_info_carrier_t *carrier);
+
+ERTS_GLB_INLINE
+int lcnt_log2__(Uint64 v);
+
+ERTS_GLB_INLINE
+void lcnt_update_wait_histogram__(erts_lcnt_hist_t *hist, erts_lcnt_time_t *time_waited);
+
+ERTS_GLB_INLINE
+void lcnt_update_stats__(erts_lcnt_lock_stats_t *stats, int lock_in_conflict, erts_lcnt_time_t *time_waited);
+
+ERTS_GLB_INLINE
+erts_lcnt_lock_stats_t *lcnt_get_lock_stats__(erts_lcnt_lock_info_t *info, char *file, unsigned int line);
+
+ERTS_GLB_INLINE
+void lcnt_dec_lock_state__(ethr_atomic_t *l_state);
+
+ERTS_GLB_INLINE
+void lcnt_time__(erts_lcnt_time_t *time);
+
+ERTS_GLB_INLINE
+void lcnt_time_add__(erts_lcnt_time_t *t, erts_lcnt_time_t *d);
+
+ERTS_GLB_INLINE
+void lcnt_time_diff__(erts_lcnt_time_t *d, erts_lcnt_time_t *t1, erts_lcnt_time_t *t0);
+
+ERTS_GLB_INLINE
+void lcnt_retain_carrier__(erts_lcnt_lock_info_carrier_t *carrier);
+
+ERTS_GLB_INLINE
+void lcnt_release_carrier__(erts_lcnt_lock_info_carrier_t *carrier);
+
+ERTS_GLB_INLINE
+lcnt_thread_data_t__ *lcnt_get_thread_data__(void);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE
+void lcnt_time__(erts_lcnt_time_t *time) {
+ /*
+ * erts_sys_hrtime() is the highest resolution
+ * we could find, it may or may not be monotonic...
+ */
+ ErtsMonotonicTime mtime = erts_sys_hrtime();
+ time->s = (unsigned long) (mtime / 1000000000LL);
+ time->ns = (unsigned long) (mtime - 1000000000LL*time->s);
+}
+
+/* difference d must be non-negative */
+
+ERTS_GLB_INLINE
+void lcnt_time_add__(erts_lcnt_time_t *t, erts_lcnt_time_t *d) {
+ t->s += d->s;
+ t->ns += d->ns;
+
+ t->s += t->ns / 1000000000LL;
+ t->ns = t->ns % 1000000000LL;
+}
+
+ERTS_GLB_INLINE
+void lcnt_time_diff__(erts_lcnt_time_t *d, erts_lcnt_time_t *t1, erts_lcnt_time_t *t0) {
+ long ds;
+ long dns;
+
+ ds = t1->s - t0->s;
+ dns = t1->ns - t0->ns;
+
+ /* the difference should not be able to get bigger than 1 sec in ns*/
+
+ if (dns < 0) {
+ ds -= 1;
+ dns += 1000000000LL;
+ }
+
+ ASSERT(ds >= 0);
+
+ d->s = ds;
+ d->ns = dns;
+}
+
+ERTS_GLB_INLINE
+int lcnt_log2__(Uint64 v) {
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v |= v >> 32;
+
+ return lcnt_log2_tab64__[((Uint64)((v - (v >> 1))*0x07EDD5E59A4E28C2)) >> 58];
+}
+
+ERTS_GLB_INLINE
+void lcnt_update_wait_histogram__(erts_lcnt_hist_t *hist, erts_lcnt_time_t *time_waited) {
+ int idx;
+
+ if(time_waited->s > 0 || time_waited->ns > ERTS_LCNT_HISTOGRAM_MAX_NS) {
+ idx = ERTS_LCNT_HISTOGRAM_SLOT_SIZE - 1;
+ } else {
+ unsigned long r = time_waited->ns >> ERTS_LCNT_HISTOGRAM_RSHIFT;
+
+ idx = r ? lcnt_log2__(r) : 0;
+ }
+
+ hist->ns[idx]++;
+}
+
+ERTS_GLB_INLINE
+void lcnt_update_stats__(erts_lcnt_lock_stats_t *stats, int lock_in_conflict, erts_lcnt_time_t *time_waited) {
+ ethr_atomic_inc(&stats->attempts);
+
+ if(lock_in_conflict) {
+ ethr_atomic_inc(&stats->collisions);
+ }
+
+ if(time_waited) {
+ stats->times_waited++;
+
+ lcnt_time_add__(&stats->total_time_waited, time_waited);
+ lcnt_update_wait_histogram__(&stats->wait_time_histogram, time_waited);
+ }
+}
+
+/* If we were installed while the lock was held, r/w_state will be 0 and we
+ * can't tell which unlock or unacquire operation was the last. To get around
+ * this we assume that all excess operations go *towards* zero rather than down
+ * to zero, eventually becoming consistent with the actual state once the lock
+ * is fully released.
+ *
+ * Conflicts might not be counted until the recorded state is fully consistent
+ * with the actual state, but there should be no other ill effects. */
+
+ERTS_GLB_INLINE
+void lcnt_dec_lock_state__(ethr_atomic_t *l_state) {
+ ethr_sint_t state = ethr_atomic_dec_read_acqb(l_state);
+
+ /* We can not assume that state is >= -1 here; unlock and unacquire might
+ * bring it below -1 and race to increment it back. */
+
+ if(state < 0) {
+ ethr_atomic_inc_acqb(l_state);
+ }
+}
+
+ERTS_GLB_INLINE
+erts_lcnt_lock_stats_t *lcnt_get_lock_stats__(erts_lcnt_lock_info_t *info, char *file, unsigned int line) {
+ unsigned int i;
+
+ ASSERT(info->location_count >= 1 && info->location_count <= ERTS_LCNT_MAX_LOCK_LOCATIONS);
-void erts_lcnt_list_clear( erts_lcnt_lock_list_t *list);
-void erts_lcnt_list_insert(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock);
-void erts_lcnt_list_delete(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock);
+ for(i = 0; i < info->location_count; i++) {
+ erts_lcnt_lock_stats_t *stats = &info->location_stats[i];
-/* lock operations (global) */
-void erts_lcnt_init_lock(erts_lcnt_lock_t *lock, char *name, Uint16 flag);
-void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eterm id);
-void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock);
+ if(stats->file == file && stats->line == line) {
+ return stats;
+ }
+ }
-void erts_lcnt_lock(erts_lcnt_lock_t *lock);
-void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option);
-void erts_lcnt_lock_post(erts_lcnt_lock_t *lock);
-void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line);
-void erts_lcnt_lock_unaquire(erts_lcnt_lock_t *lock);
+ if(info->location_count < ERTS_LCNT_MAX_LOCK_LOCATIONS) {
+ erts_lcnt_lock_stats_t *stats = &info->location_stats[info->location_count];
-void erts_lcnt_unlock(erts_lcnt_lock_t *lock);
-void erts_lcnt_unlock_opt(erts_lcnt_lock_t *lock, Uint16 option);
+ stats->file = file;
+ stats->line = line;
-void erts_lcnt_trylock_opt(erts_lcnt_lock_t *lock, int res, Uint16 option);
-void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res);
+ info->location_count++;
-/* bif interface */
-Uint16 erts_lcnt_set_rt_opt(Uint16 opt);
-Uint16 erts_lcnt_clear_rt_opt(Uint16 opt);
-void erts_lcnt_clear_counters(void);
-char *erts_lcnt_lock_type(Uint16 type);
-erts_lcnt_data_t *erts_lcnt_get_data(void);
+ return stats;
+ }
-#define ERTS_LCNT_LOCK_TYPE(lockp) ((lockp)->flag & ERTS_LCNT_LT_ALL)
+ return &info->location_stats[0];
+}
+
+ERTS_GLB_INLINE
+lcnt_thread_data_t__ *lcnt_get_thread_data__(void) {
+ lcnt_thread_data_t__ *eltd = (lcnt_thread_data_t__ *)ethr_tsd_get(lcnt_thr_data_key__);
+
+ ASSERT(eltd);
+
+ return eltd;
+}
+
+ERTS_GLB_FORCE_INLINE
+int erts_lcnt_open_ref(erts_lcnt_ref_t *ref, int *handle, erts_lcnt_lock_info_carrier_t **result) {
+ if(ERTS_LIKELY(!erts_lcnt_check_ref_installed(ref))) {
+ return 0;
+ }
+
+ ASSERT(lcnt_initialization_completed__);
+
+ (*handle) = lcnt_thr_progress_unmanaged_delay__();
+ (*result) = (erts_lcnt_lock_info_carrier_t*)ethr_atomic_read(ref);
+
+ if(*result) {
+ if(*handle != LCNT_THR_PRGR_DHANDLE_MANAGED) {
+ lcnt_retain_carrier__(*result);
+ lcnt_thr_progress_unmanaged_continue__(*handle);
+ }
+
+ return 1;
+ } else if(*handle != LCNT_THR_PRGR_DHANDLE_MANAGED) {
+ lcnt_thr_progress_unmanaged_continue__(*handle);
+ }
+
+ return 0;
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_close_ref(int handle, erts_lcnt_lock_info_carrier_t *carrier) {
+ if(handle != LCNT_THR_PRGR_DHANDLE_MANAGED) {
+ lcnt_release_carrier__(carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_init_ref(erts_lcnt_ref_t *ref) {
+ ethr_atomic_init(ref, (ethr_sint_t)NULL);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_init_ref_x(erts_lcnt_ref_t *ref, const char *name,
+ Eterm id, erts_lock_flags_t flags) {
+ erts_lcnt_init_ref(ref);
+
+ if(flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC) {
+ lcnt_register_static_lock__(ref, name, id, flags);
+ }
+
+ if(erts_lcnt_check_enabled(flags)) {
+ erts_lcnt_install_new_lock_info(ref, name, id, flags);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+int erts_lcnt_check_ref_installed(erts_lcnt_ref_t *ref) {
+ return (!!*ethr_atomic_addr(ref));
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock(erts_lcnt_ref_t *ref) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_lock_idx(carrier, 0);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_opt(erts_lcnt_ref_t *ref, erts_lock_options_t option) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_lock_opt_idx(carrier, 0, option);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_post(erts_lcnt_ref_t *ref) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_lock_post_idx(carrier, 0);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_post_x(erts_lcnt_ref_t *ref, char *file, unsigned int line) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_lock_post_x_idx(carrier, 0, file, line);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_unacquire(erts_lcnt_ref_t *ref) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_lock_unacquire_idx(carrier, 0);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_unlock(erts_lcnt_ref_t *ref) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_unlock_idx(carrier, 0);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_unlock_opt(erts_lcnt_ref_t *ref, erts_lock_options_t option) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_unlock_opt_idx(carrier, 0, option);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_trylock(erts_lcnt_ref_t *ref, int result) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_trylock_idx(carrier, 0, result);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_trylock_opt(erts_lcnt_ref_t *ref, int result, erts_lock_options_t option) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_trylock_opt_idx(carrier, 0, result, option);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index) {
+ erts_lcnt_lock_opt_idx(carrier, index, ERTS_LOCK_OPTIONS_WRITE);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, erts_lock_options_t option) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ lcnt_thread_data_t__ *eltd = lcnt_get_thread_data__();
+
+ ASSERT(index < carrier->entry_count);
+
+ ASSERT((option & ERTS_LOCK_OPTIONS_READ) || (option & ERTS_LOCK_OPTIONS_WRITE));
+
+ if(option & ERTS_LOCK_OPTIONS_WRITE) {
+ ethr_sint_t w_state, r_state;
+
+ w_state = ethr_atomic_inc_read(&info->w_state) - 1;
+ r_state = ethr_atomic_read(&info->r_state);
+
+ /* We cannot acquire w_lock if either w or r are taken */
+ eltd->lock_in_conflict = (w_state > 0) || (r_state > 0);
+ } else {
+ ethr_sint_t w_state = ethr_atomic_read(&info->w_state);
+
+ /* We cannot acquire r_lock if w_lock is taken */
+ eltd->lock_in_conflict = (w_state > 0);
+ }
+
+ if(option & ERTS_LOCK_OPTIONS_READ) {
+ ASSERT(info->flags & ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE);
+ ethr_atomic_inc(&info->r_state);
+ }
+
+ if(eltd->lock_in_conflict) {
+ /* Only set the timer if nobody else has it. This should only happen
+ * when proc_locks acquires several locks "atomically." All other locks
+ * will block the thread when locked (w_state > 0) */
+ if(eltd->timer_set == 0) {
+ lcnt_time__(&eltd->timer);
+ }
+
+ eltd->timer_set++;
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_post_idx(erts_lcnt_lock_info_carrier_t *carrier, int index) {
+ erts_lcnt_lock_post_x_idx(carrier, index, NULL, 0);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_post_x_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, char *file, unsigned int line) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ lcnt_thread_data_t__ *eltd = lcnt_get_thread_data__();
+ erts_lcnt_lock_stats_t *stats;
+
+ ASSERT(index < carrier->entry_count);
+
+ /* If the lock was in conflict, update the time spent waiting. */
+ stats = lcnt_get_lock_stats__(info, file, line);
+ if(eltd->timer_set) {
+ erts_lcnt_time_t time_wait;
+ erts_lcnt_time_t timer;
+
+ lcnt_time__(&timer);
+
+ lcnt_time_diff__(&time_wait, &timer, &eltd->timer);
+ lcnt_update_stats__(stats, eltd->lock_in_conflict, &time_wait);
+
+ eltd->timer_set--;
+
+ ASSERT(eltd->timer_set >= 0);
+ } else {
+ lcnt_update_stats__(stats, eltd->lock_in_conflict, NULL);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_unlock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index) {
+ ASSERT(index < carrier->entry_count);
+
+ erts_lcnt_unlock_opt_idx(carrier, index, ERTS_LOCK_OPTIONS_WRITE);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_unlock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, erts_lock_options_t option) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ ASSERT(index < carrier->entry_count);
+
+ ASSERT((option & ERTS_LOCK_OPTIONS_READ) || (option & ERTS_LOCK_OPTIONS_WRITE));
+
+ if(option & ERTS_LOCK_OPTIONS_WRITE) {
+ lcnt_dec_lock_state__(&info->w_state);
+ }
+
+ if(option & ERTS_LOCK_OPTIONS_READ) {
+ ASSERT(info->flags & ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE);
+ lcnt_dec_lock_state__(&info->r_state);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_unacquire_idx(erts_lcnt_lock_info_carrier_t *carrier, int index) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ ASSERT(index < carrier->entry_count);
+
+ lcnt_dec_lock_state__(&info->w_state);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_trylock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, int result) {
+ ASSERT(index < carrier->entry_count);
+
+ erts_lcnt_trylock_opt_idx(carrier, index, result, ERTS_LOCK_OPTIONS_WRITE);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_trylock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, int result, erts_lock_options_t option) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ ASSERT(index < carrier->entry_count);
+
+ ASSERT((option & ERTS_LOCK_OPTIONS_READ) || (option & ERTS_LOCK_OPTIONS_WRITE));
+
+ if(result != EBUSY) {
+ if(option & ERTS_LOCK_OPTIONS_WRITE) {
+ ethr_atomic_inc(&info->w_state);
+ }
+
+ if(option & ERTS_LOCK_OPTIONS_READ) {
+ ASSERT(info->flags & ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE);
+ ethr_atomic_inc(&info->r_state);
+ }
+
+ lcnt_update_stats__(&info->location_stats[0], 0, NULL);
+ } else {
+ ethr_atomic_inc(&info->location_stats[0].attempts);
+ ethr_atomic_inc(&info->location_stats[0].collisions);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_init_lock_info_idx(erts_lcnt_lock_info_carrier_t *carrier, int index,
+ const char *name, Eterm id, erts_lock_flags_t flags) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ ASSERT(is_immed(id));
+
+ ASSERT(flags & ERTS_LOCK_FLAGS_MASK_TYPE);
+ ASSERT(flags & ERTS_LOCK_FLAGS_MASK_CATEGORY);
+
+ info->flags = flags;
+ info->name = name;
+ info->id = id;
+}
+
+ERTS_GLB_INLINE
+void lcnt_retain_carrier__(erts_lcnt_lock_info_carrier_t *carrier) {
+#ifdef DEBUG
+ ASSERT(ethr_atomic_inc_read_acqb(&carrier->ref_count) >= 2);
+#else
+ ethr_atomic_inc_acqb(&carrier->ref_count);
+#endif
+}
+
+ERTS_GLB_INLINE
+void lcnt_release_carrier__(erts_lcnt_lock_info_carrier_t *carrier) {
+ ethr_sint_t count = ethr_atomic_dec_read_relb(&carrier->ref_count);
+
+ ASSERT(count >= 0);
+
+ if(count == 0) {
+ lcnt_deallocate_carrier__(carrier);
+ }
+}
+
+#endif
#endif /* ifdef ERTS_ENABLE_LOCK_COUNT */
#endif /* ifndef ERTS_LOCK_COUNT_H__ */
diff --git a/erts/emulator/beam/erl_lock_flags.c b/erts/emulator/beam/erl_lock_flags.c
new file mode 100644
index 0000000000..e0a0e95c09
--- /dev/null
+++ b/erts/emulator/beam/erl_lock_flags.c
@@ -0,0 +1,59 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2017. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "erl_lock_flags.h"
+
+const char *erts_lock_flags_get_type_name(erts_lock_flags_t flags) {
+ switch(flags & ERTS_LOCK_FLAGS_MASK_TYPE) {
+ case ERTS_LOCK_FLAGS_TYPE_PROCLOCK:
+ return "proclock";
+ case ERTS_LOCK_FLAGS_TYPE_MUTEX:
+ if(flags & ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE) {
+ return "rw_mutex";
+ }
+
+ return "mutex";
+ case ERTS_LOCK_FLAGS_TYPE_SPINLOCK:
+ if(flags & ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE) {
+ return "rw_spinlock";
+ }
+
+ return "spinlock";
+ default:
+ return "garbage";
+ }
+}
+
+const char *erts_lock_options_get_short_desc(erts_lock_options_t options) {
+ switch(options) {
+ case ERTS_LOCK_OPTIONS_RDWR:
+ return "rw";
+ case ERTS_LOCK_OPTIONS_READ:
+ return "r";
+ case ERTS_LOCK_OPTIONS_WRITE:
+ return "w";
+ default:
+ return "none";
+ }
+}
diff --git a/erts/emulator/beam/erl_lock_flags.h b/erts/emulator/beam/erl_lock_flags.h
new file mode 100644
index 0000000000..d711f69456
--- /dev/null
+++ b/erts/emulator/beam/erl_lock_flags.h
@@ -0,0 +1,78 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2017. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERTS_LOCK_FLAGS_H__
+#define ERTS_LOCK_FLAGS_H__
+
+#define ERTS_LOCK_OPTIONS_READ (1 << 1)
+#define ERTS_LOCK_OPTIONS_WRITE (1 << 2)
+
+#define ERTS_LOCK_OPTIONS_RDWR (ERTS_LOCK_OPTIONS_READ | ERTS_LOCK_OPTIONS_WRITE)
+
+/* Property/category are bitfields to simplify their use in masks. */
+#define ERTS_LOCK_FLAGS_MASK_CATEGORY (0xFFC0)
+#define ERTS_LOCK_FLAGS_MASK_PROPERTY (0x0030)
+
+/* Type is a plain number. */
+#define ERTS_LOCK_FLAGS_MASK_TYPE (0x000F)
+
+#define ERTS_LOCK_FLAGS_TYPE_SPINLOCK (1)
+#define ERTS_LOCK_FLAGS_TYPE_MUTEX (2)
+#define ERTS_LOCK_FLAGS_TYPE_PROCLOCK (3)
+
+/* "Static" guarantees that the lock will never be destroyed once created. */
+#define ERTS_LOCK_FLAGS_PROPERTY_STATIC (1 << 4)
+#define ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE (1 << 5)
+
+#define ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR (1 << 6)
+#define ERTS_LOCK_FLAGS_CATEGORY_PROCESS (1 << 7)
+#define ERTS_LOCK_FLAGS_CATEGORY_IO (1 << 8)
+#define ERTS_LOCK_FLAGS_CATEGORY_DB (1 << 9)
+#define ERTS_LOCK_FLAGS_CATEGORY_DEBUG (1 << 10)
+#define ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER (1 << 11)
+#define ERTS_LOCK_FLAGS_CATEGORY_GENERIC (1 << 12)
+#define ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION (1 << 13)
+
+#define ERTS_LOCK_TYPE_SPINLOCK \
+ (ERTS_LOCK_FLAGS_TYPE_SPINLOCK)
+#define ERTS_LOCK_TYPE_RWSPINLOCK \
+ (ERTS_LOCK_TYPE_SPINLOCK | \
+ ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE)
+#define ERTS_LOCK_TYPE_MUTEX \
+ (ERTS_LOCK_FLAGS_TYPE_MUTEX)
+#define ERTS_LOCK_TYPE_RWMUTEX \
+ (ERTS_LOCK_TYPE_MUTEX | \
+ ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE)
+#define ERTS_LOCK_TYPE_PROCLOCK \
+ (ERTS_LOCK_FLAGS_CATEGORY_PROCESS | \
+ ERTS_LOCK_FLAGS_TYPE_PROCLOCK)
+
+/* -- -- */
+
+typedef unsigned short erts_lock_flags_t;
+typedef unsigned short erts_lock_options_t;
+
+/* @brief Gets the type name of the lock, honoring the RW flag if supplied. */
+const char *erts_lock_flags_get_type_name(erts_lock_flags_t flags);
+
+/* @brief Gets a short-form description of the given lock options. (rw/r/w) */
+const char *erts_lock_options_get_short_desc(erts_lock_options_t options);
+
+#endif /* ERTS_LOCK_FLAGS_H__ */
diff --git a/erts/emulator/beam/erl_map.c b/erts/emulator/beam/erl_map.c
index fdd2d0c0f6..f0c54e05f7 100644
--- a/erts/emulator/beam/erl_map.c
+++ b/erts/emulator/beam/erl_map.c
@@ -1,21 +1,25 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2014. All Rights Reserved.
+ * Copyright Ericsson AB 2014-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*
+ * hashmaps are an adaption of Rich Hickeys Persistent HashMaps
+ * which were an adaption of Phil Bagwells - Hash Array Mapped Tries
+ *
* Author: Björn-Egil Dahlberg
*/
@@ -28,7 +32,10 @@
#include "global.h"
#include "erl_process.h"
#include "error.h"
+#define ERL_WANT_HIPE_BIF_WRAPPER__
#include "bif.h"
+#undef ERL_WANT_HIPE_BIF_WRAPPER__
+#include "erl_binary.h"
#include "erl_map.h"
@@ -47,6 +54,7 @@
* - maps:new/0
* - maps:put/3
* - maps:remove/2
+ * - maps:take/2
* - maps:to_list/1
* - maps:update/3
* - maps:values/1
@@ -58,41 +66,91 @@
* - maps:size/1
* - maps:without/2
*
+ * DEBUG: for sharing calculation
+ * - erts_internal:map_to_tuple_keys/1
*/
+#ifndef DECL_AM
+#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
+#endif
+
+/* for hashmap_from_list/1 */
+typedef struct {
+ Uint32 hx;
+ Uint32 skip;
+ Uint i;
+ Eterm val;
+} hxnode_t;
+
+
+static Eterm flatmap_merge(Process *p, Eterm nodeA, Eterm nodeB);
+static BIF_RETTYPE map_merge_mixed(Process *p, Eterm flat, Eterm tree, int swap_args);
+struct HashmapMergeContext_;
+static BIF_RETTYPE hashmap_merge(Process *p, Eterm nodeA, Eterm nodeB, int swap_args,
+ struct HashmapMergeContext_*);
+static Export hashmap_merge_trap_export;
+static BIF_RETTYPE maps_merge_trap_1(BIF_ALIST_1);
+static Uint hashmap_subtree_size(Eterm node);
+static Eterm hashmap_to_list(Process *p, Eterm map, Sint n);
+static Eterm hashmap_keys(Process *p, Eterm map);
+static Eterm hashmap_values(Process *p, Eterm map);
+static Eterm hashmap_delete(Process *p, Uint32 hx, Eterm key, Eterm node, Eterm *value);
+static Eterm flatmap_from_validated_list(Process *p, Eterm list, Uint size);
+static Eterm hashmap_from_validated_list(Process *p, Eterm list, Uint size);
+static Eterm hashmap_from_unsorted_array(ErtsHeapFactory*, hxnode_t *hxns, Uint n, int reject_dupkeys);
+static Eterm hashmap_from_sorted_unique_array(ErtsHeapFactory*, hxnode_t *hxns, Uint n, int is_root);
+static Eterm hashmap_from_chunked_array(ErtsHeapFactory*, hxnode_t *hxns, Uint n, Uint size, int is_root);
+static Eterm hashmap_info(Process *p, Eterm node);
+static Eterm hashmap_bld_tuple_uint(Uint **hpp, Uint *szp, Uint n, Uint nums[]);
+static int hxnodecmp(hxnode_t* a, hxnode_t* b);
+static int hxnodecmpkey(hxnode_t* a, hxnode_t* b);
+
+
+void erts_init_map(void) {
+ erts_init_trap_export(&hashmap_merge_trap_export,
+ am_maps, am_merge_trap, 1,
+ &maps_merge_trap_1);
+ return;
+}
+
+
/* erlang:map_size/1
* the corresponding instruction is implemented in:
* beam/erl_bif_guard.c
*/
BIF_RETTYPE map_size_1(BIF_ALIST_1) {
- if (is_map(BIF_ARG_1)) {
- Eterm *hp;
- Uint hsz = 0;
- map_t *mp = (map_t*)map_val(BIF_ARG_1);
- Uint n = map_get_size(mp);
-
- erts_bld_uint(NULL, &hsz, n);
+ if (is_flatmap(BIF_ARG_1)) {
+ flatmap_t *mp = (flatmap_t*)flatmap_val(BIF_ARG_1);
+ BIF_RET(make_small(flatmap_get_size(mp)));
+ } else if (is_hashmap(BIF_ARG_1)) {
+ Eterm *head, *hp, res;
+ Uint size, hsz=0;
+
+ head = hashmap_val(BIF_ARG_1);
+ size = head[1];
+ (void) erts_bld_uint(NULL, &hsz, size);
hp = HAlloc(BIF_P, hsz);
- BIF_RET(erts_bld_uint(&hp, NULL, n));
+ res = erts_bld_uint(&hp, NULL, size);
+ BIF_RET(res);
}
- BIF_ERROR(BIF_P, BADARG);
+ BIF_P->fvalue = BIF_ARG_1;
+ BIF_ERROR(BIF_P, BADMAP);
}
-/* maps:to_list/1
- */
+/* maps:to_list/1 */
BIF_RETTYPE maps_to_list_1(BIF_ALIST_1) {
- if (is_map(BIF_ARG_1)) {
+ if (is_flatmap(BIF_ARG_1)) {
Uint n;
Eterm* hp;
Eterm *ks,*vs, res, tup;
- map_t *mp = (map_t*)map_val(BIF_ARG_1);
+ flatmap_t *mp = (flatmap_t*)flatmap_val(BIF_ARG_1);
- ks = map_get_keys(mp);
- vs = map_get_values(mp);
- n = map_get_size(mp);
+ ks = flatmap_get_keys(mp);
+ vs = flatmap_get_values(mp);
+ n = flatmap_get_size(mp);
hp = HAlloc(BIF_P, (2 + 3) * n);
res = NIL;
@@ -102,108 +160,142 @@ BIF_RETTYPE maps_to_list_1(BIF_ALIST_1) {
}
BIF_RET(res);
+ } else if (is_hashmap(BIF_ARG_1)) {
+ return hashmap_to_list(BIF_P, BIF_ARG_1, -1);
}
+ BIF_P->fvalue = BIF_ARG_1;
+ BIF_ERROR(BIF_P, BADMAP);
+}
+
+/* erts_internal:maps_to_list/2
+ *
+ * This function should be removed once iterators are in place.
+ * Never document it.
+ * Never encourage its usage.
+ *
+ * A negative value in ARG 2 means the entire map.
+ */
+
+BIF_RETTYPE erts_internal_maps_to_list_2(BIF_ALIST_2) {
+ Sint m;
+ if (term_to_Sint(BIF_ARG_2, &m)) {
+ if (is_flatmap(BIF_ARG_1)) {
+ Uint n;
+ Eterm* hp;
+ Eterm *ks,*vs, res, tup;
+ flatmap_t *mp = (flatmap_t*)flatmap_val(BIF_ARG_1);
+
+ ks = flatmap_get_keys(mp);
+ vs = flatmap_get_values(mp);
+ n = flatmap_get_size(mp);
+
+ if (m >= 0) {
+ n = m < n ? m : n;
+ }
+
+ hp = HAlloc(BIF_P, (2 + 3) * n);
+ res = NIL;
+
+ while(n--) {
+ tup = TUPLE2(hp, ks[n], vs[n]); hp += 3;
+ res = CONS(hp, tup, res); hp += 2;
+ }
+
+ BIF_RET(res);
+ } else if (is_hashmap(BIF_ARG_1)) {
+ return hashmap_to_list(BIF_P, BIF_ARG_1, m);
+ }
+ BIF_P->fvalue = BIF_ARG_1;
+ BIF_ERROR(BIF_P, BADMAP);
+ }
BIF_ERROR(BIF_P, BADARG);
}
+
/* maps:find/2
* return value if key *matches* a key in the map
*/
-int erts_maps_find(Eterm key, Eterm map, Eterm *value) {
-
- Eterm *ks,*vs;
- map_t *mp;
- Uint n,i;
+const Eterm *
+erts_maps_get(Eterm key, Eterm map)
+{
+ Uint32 hx;
+ if (is_flatmap(map)) {
+ Eterm *ks, *vs;
+ flatmap_t *mp;
+ Uint n, i;
- mp = (map_t*)map_val(map);
- n = map_get_size(mp);
- ks = map_get_keys(mp);
- vs = map_get_values(mp);
+ mp = (flatmap_t *)flatmap_val(map);
+ n = flatmap_get_size(mp);
- for( i = 0; i < n; i++) {
- if (EQ(ks[i], key)) {
- *value = vs[i];
- return 1;
+ if (n == 0) {
+ return NULL;
}
+
+ ks = (Eterm *)tuple_val(mp->keys) + 1;
+ vs = flatmap_get_values(mp);
+
+ if (is_immed(key)) {
+ for (i = 0; i < n; i++) {
+ if (ks[i] == key) {
+ return &vs[i];
+ }
+ }
+ } else {
+ for (i = 0; i < n; i++) {
+ if (EQ(ks[i], key)) {
+ return &vs[i];
+ }
+ }
+ }
+ return NULL;
}
- return 0;
+ ASSERT(is_hashmap(map));
+ hx = hashmap_make_hash(key);
+
+ return erts_hashmap_get(hx, key, map);
}
BIF_RETTYPE maps_find_2(BIF_ALIST_2) {
if (is_map(BIF_ARG_2)) {
- Eterm *hp, value,res;
+ Eterm *hp, res;
+ const Eterm *value;
- if (erts_maps_find(BIF_ARG_1, BIF_ARG_2, &value)) {
+ value = erts_maps_get(BIF_ARG_1, BIF_ARG_2);
+ if (value) {
hp = HAlloc(BIF_P, 3);
res = make_tuple(hp);
*hp++ = make_arityval(2);
*hp++ = am_ok;
- *hp++ = value;
+ *hp++ = *value;
BIF_RET(res);
}
-
BIF_RET(am_error);
}
- BIF_ERROR(BIF_P, BADARG);
+ BIF_P->fvalue = BIF_ARG_2;
+ BIF_ERROR(BIF_P, BADMAP);
}
+
/* maps:get/2
* return value if key *matches* a key in the map
- * exception bad_key if none matches
+ * exception badkey if none matches
*/
-
-int erts_maps_get(Eterm key, Eterm map, Eterm *value) {
- Eterm *ks,*vs;
- map_t *mp;
- Uint n,i;
-
- mp = (map_t*)map_val(map);
- n = map_get_size(mp);
-
- if (n == 0)
- return 0;
-
- ks = map_get_keys(mp);
- vs = map_get_values(mp);
-
- if (is_immed(key)) {
- for( i = 0; i < n; i++) {
- if (ks[i] == key) {
- *value = vs[i];
- return 1;
- }
- }
- }
-
- for( i = 0; i < n; i++) {
- if (EQ(ks[i], key)) {
- *value = vs[i];
- return 1;
- }
- }
- return 0;
-}
-
BIF_RETTYPE maps_get_2(BIF_ALIST_2) {
if (is_map(BIF_ARG_2)) {
- Eterm *hp;
- Eterm value, error;
- char *s_error;
+ const Eterm *value;
- if (erts_maps_get(BIF_ARG_1, BIF_ARG_2, &value)) {
- BIF_RET(value);
+ value = erts_maps_get(BIF_ARG_1, BIF_ARG_2);
+ if (value) {
+ BIF_RET(*value);
}
- s_error = "bad_key";
- error = am_atom_put(s_error, sys_strlen(s_error));
-
- hp = HAlloc(BIF_P, 3);
- BIF_P->fvalue = TUPLE2(hp, error, BIF_ARG_1);
- BIF_ERROR(BIF_P, EXC_ERROR_2);
+ BIF_P->fvalue = BIF_ARG_1;
+ BIF_ERROR(BIF_P, BADKEY);
}
- BIF_ERROR(BIF_P, BADARG);
+ BIF_P->fvalue = BIF_ARG_2;
+ BIF_ERROR(BIF_P, BADMAP);
}
/* maps:from_list/1
@@ -211,13 +303,8 @@ BIF_RETTYPE maps_get_2(BIF_ALIST_2) {
*/
BIF_RETTYPE maps_from_list_1(BIF_ALIST_1) {
- Eterm *kv, item = BIF_ARG_1;
- Eterm *hp, *thp,*vs, *ks, keys, res;
- map_t *mp;
- Uint size = 0, unused_size = 0;
- Sint c = 0;
- Sint idx = 0;
-
+ Eterm item = BIF_ARG_1, res, *kv;
+ Uint size = 0;
if (is_list(item) || is_nil(item)) {
/* Calculate size and check validity */
@@ -238,486 +325,1426 @@ BIF_RETTYPE maps_from_list_1(BIF_ALIST_1) {
if (is_not_nil(item))
goto error;
- hp = HAlloc(BIF_P, 3 + 1 + (2 * size));
- thp = hp;
+ if (size > MAP_SMALL_MAP_LIMIT) {
+ BIF_RET(hashmap_from_validated_list(BIF_P, BIF_ARG_1, size));
+ } else {
+ BIF_RET(flatmap_from_validated_list(BIF_P, BIF_ARG_1, size));
+ }
+ }
+
+error:
+
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+static Eterm flatmap_from_validated_list(Process *p, Eterm list, Uint size) {
+ Eterm *kv, item = list;
+ Eterm *hp, *thp,*vs, *ks, keys, res;
+ flatmap_t *mp;
+ Uint unused_size = 0;
+ Sint c = 0;
+ Sint idx = 0;
+
+
+ hp = HAlloc(p, 3 + 1 + (2 * size));
+ thp = hp;
+ keys = make_tuple(hp);
+ *hp++ = make_arityval(size);
+ ks = hp;
+ hp += size;
+ mp = (flatmap_t*)hp;
+ res = make_flatmap(mp);
+ hp += MAP_HEADER_FLATMAP_SZ;
+ vs = hp;
+
+ mp->thing_word = MAP_HEADER_FLATMAP;
+ mp->size = size; /* set later, might shrink*/
+ mp->keys = keys;
+
+ if (size == 0)
+ return res;
+
+ /* first entry */
+ kv = tuple_val(CAR(list_val(item)));
+ ks[0] = kv[1];
+ vs[0] = kv[2];
+ size = 1;
+ item = CDR(list_val(item));
+
+ /* insert sort key/value pairs */
+ while(is_list(item)) {
+
+ kv = tuple_val(CAR(list_val(item)));
+
+ /* compare ks backwards
+ * idx represent word index to be written (hole position).
+ * We cannot copy the elements when searching since we might
+ * have an equal key. So we search for just the index first =(
+ *
+ * It is perhaps faster to move the values in the first pass.
+ * Check for uniqueness during insert phase and then have a
+ * second phace compacting the map if duplicates are found
+ * during insert. .. or do someother sort .. shell-sort perhaps.
+ */
+
+ idx = size;
+
+ while(idx > 0 && (c = CMP_TERM(kv[1],ks[idx-1])) < 0) { idx--; }
+
+ if (c == 0) {
+ /* last compare was equal,
+ * i.e. we have to release memory
+ * and overwrite that key/value
+ */
+ ks[idx-1] = kv[1];
+ vs[idx-1] = kv[2];
+ unused_size++;
+ } else {
+ Uint i = size;
+ while(i > idx) {
+ ks[i] = ks[i-1];
+ vs[i] = vs[i-1];
+ i--;
+ }
+ ks[idx] = kv[1];
+ vs[idx] = kv[2];
+ size++;
+ }
+ item = CDR(list_val(item));
+ }
+
+ if (unused_size) {
+ /* the key tuple is embedded in the heap
+ * write a bignum to clear it.
+ */
+ /* release values as normal since they are on the top of the heap */
+
+ ks[size] = make_pos_bignum_header(unused_size - 1);
+ HRelease(p, vs + size + unused_size, vs + size);
+ }
+
+ *thp = make_arityval(size);
+ mp->size = size;
+ return res;
+}
+
+#define swizzle32(D,S) \
+ do { \
+ (D) = ((S) & 0x0000000f) << 28 | ((S) & 0x000000f0) << 20 \
+ | ((S) & 0x00000f00) << 12 | ((S) & 0x0000f000) << 4 \
+ | ((S) & 0x000f0000) >> 4 | ((S) & 0x00f00000) >> 12 \
+ | ((S) & 0x0f000000) >> 20 | ((S) & 0xf0000000) >> 28; \
+ } while(0)
+
+#define maskval(V,L) (((V) >> ((7 - (L))*4)) & 0xf)
+#define cdepth(V1,V2) (hashmap_clz((V1) ^ (V2)) >> 2)
+
+static Eterm hashmap_from_validated_list(Process *p, Eterm list, Uint size) {
+ Eterm item = list;
+ Eterm *hp;
+ Eterm *kv, res;
+ Uint32 sw, hx;
+ Uint ix = 0;
+ hxnode_t *hxns;
+ ErtsHeapFactory factory;
+ DeclareTmpHeap(tmp,2,p);
+ ASSERT(size > 0);
+
+ hp = HAlloc(p, (2 * size));
+
+ /* create tmp hx values and leaf ptrs */
+ hxns = (hxnode_t *)erts_alloc(ERTS_ALC_T_TMP, size * sizeof(hxnode_t));
+
+ UseTmpHeap(2,p);
+ while(is_list(item)) {
+ res = CAR(list_val(item));
+ kv = tuple_val(res);
+ hx = hashmap_restore_hash(tmp,0,kv[1]);
+ swizzle32(sw,hx);
+ hxns[ix].hx = sw;
+ hxns[ix].val = CONS(hp, kv[1], kv[2]); hp += 2;
+ hxns[ix].skip = 1; /* will be reassigned in from_array */
+ hxns[ix].i = ix;
+ ix++;
+ item = CDR(list_val(item));
+ }
+ UnUseTmpHeap(2,p);
+
+ erts_factory_proc_init(&factory, p);
+ res = hashmap_from_unsorted_array(&factory, hxns, size, 0);
+ erts_factory_close(&factory);
+
+ erts_free(ERTS_ALC_T_TMP, (void *) hxns);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(p);
+
+ if (hashmap_size(res) <= MAP_SMALL_MAP_LIMIT) {
+ DECLARE_WSTACK(wstack);
+ Eterm *kv, *ks, *vs;
+ flatmap_t *mp;
+ Eterm keys;
+ Uint n = hashmap_size(res);
+
+ /* build flat structure */
+ hp = HAlloc(p, 3 + 1 + (2 * n));
keys = make_tuple(hp);
- *hp++ = make_arityval(size);
+ *hp++ = make_arityval(n);
ks = hp;
- hp += size;
- mp = (map_t*)hp;
- res = make_map(mp);
- hp += MAP_HEADER_SIZE;
+ hp += n;
+ mp = (flatmap_t*)hp;
+ hp += MAP_HEADER_FLATMAP_SZ;
vs = hp;
- mp->thing_word = MAP_HEADER;
- mp->size = size; /* set later, might shrink*/
+ mp->thing_word = MAP_HEADER_FLATMAP;
+ mp->size = n;
mp->keys = keys;
- if (size == 0)
- BIF_RET(res);
+ hashmap_iterator_init(&wstack, res, 0);
- item = BIF_ARG_1;
+ while ((kv=hashmap_iterator_next(&wstack)) != NULL) {
+ *ks++ = CAR(kv);
+ *vs++ = CDR(kv);
+ }
- /* first entry */
- kv = tuple_val(CAR(list_val(item)));
- ks[0] = kv[1];
- vs[0] = kv[2];
- size = 1;
- item = CDR(list_val(item));
+ /* it cannot have multiple keys */
+ erts_validate_and_sort_flatmap(mp);
- /* insert sort key/value pairs */
- while(is_list(item)) {
+ DESTROY_WSTACK(wstack);
+ return make_flatmap(mp);
+ }
- kv = tuple_val(CAR(list_val(item)));
-
- /* compare ks backwards
- * idx represent word index to be written (hole position).
- * We cannot copy the elements when searching since we might
- * have an equal key. So we search for just the index first =(
- *
- * It is perhaps faster to move the values in the first pass.
- * Check for uniqueness during insert phase and then have a
- * second phace compacting the map if duplicates are found
- * during insert. .. or do someother sort .. shell-sort perhaps.
- */
+ return res;
+}
- idx = size;
+Eterm erts_hashmap_from_array(ErtsHeapFactory* factory, Eterm *leafs, Uint n,
+ int reject_dupkeys) {
+ Uint32 sw, hx;
+ Uint ix;
+ hxnode_t *hxns;
+ Eterm res;
+
+ /* create tmp hx values and leaf ptrs */
+ hxns = (hxnode_t *)erts_alloc(ERTS_ALC_T_TMP, n * sizeof(hxnode_t));
+
+ for (ix = 0; ix < n; ix++) {
+ hx = hashmap_make_hash(*leafs);
+ swizzle32(sw,hx);
+ hxns[ix].hx = sw;
+ hxns[ix].val = make_list(leafs);
+ hxns[ix].skip = 1;
+ hxns[ix].i = ix;
+ leafs += 2;
+ }
- while(idx > 0 && (c = CMP_TERM(kv[1],ks[idx-1])) < 0) { idx--; }
+ res = hashmap_from_unsorted_array(factory, hxns, n, reject_dupkeys);
- if (c == 0) {
- /* last compare was equal,
- * i.e. we have to release memory
- * and overwrite that key/value
- */
- ks[idx-1] = kv[1];
- vs[idx-1] = kv[2];
- unused_size++;
- } else {
- Uint i = size;
- while(i > idx) {
- ks[i] = ks[i-1];
- vs[i] = vs[i-1];
- i--;
+ erts_free(ERTS_ALC_T_TMP, (void *) hxns);
+
+ return res;
+}
+
+Eterm erts_map_from_ks_and_vs(ErtsHeapFactory *factory, Eterm *ks0, Eterm *vs0, Uint n)
+{
+ if (n < MAP_SMALL_MAP_LIMIT) {
+ Eterm *ks, *vs, *hp;
+ flatmap_t *mp;
+ Eterm keys;
+
+ hp = erts_produce_heap(factory, 3 + 1 + (2 * n), 0);
+ keys = make_tuple(hp);
+ *hp++ = make_arityval(n);
+ ks = hp;
+ hp += n;
+ mp = (flatmap_t*)hp;
+ hp += MAP_HEADER_FLATMAP_SZ;
+ vs = hp;
+
+ mp->thing_word = MAP_HEADER_FLATMAP;
+ mp->size = n;
+ mp->keys = keys;
+
+ sys_memcpy(ks, ks0, n * sizeof(Eterm));
+ sys_memcpy(vs, vs0, n * sizeof(Eterm));
+
+ erts_validate_and_sort_flatmap(mp);
+
+ return make_flatmap(mp);
+ } else {
+ return erts_hashmap_from_ks_and_vs(factory, ks0, vs0, n);
+ }
+ return THE_NON_VALUE;
+}
+
+
+Eterm erts_hashmap_from_ks_and_vs_extra(ErtsHeapFactory *factory,
+ Eterm *ks, Eterm *vs, Uint n,
+ Eterm key, Eterm value) {
+ Uint32 sw, hx;
+ Uint i,sz;
+ hxnode_t *hxns;
+ Eterm *hp, res;
+
+ sz = (key == THE_NON_VALUE) ? n : (n + 1);
+ ASSERT(sz > MAP_SMALL_MAP_LIMIT);
+ hp = erts_produce_heap(factory, 2 * sz, 0);
+
+ /* create tmp hx values and leaf ptrs */
+ hxns = (hxnode_t *)erts_alloc(ERTS_ALC_T_TMP, sz * sizeof(hxnode_t));
+
+ for(i = 0; i < n; i++) {
+ hx = hashmap_make_hash(ks[i]);
+ swizzle32(sw,hx);
+ hxns[i].hx = sw;
+ hxns[i].val = CONS(hp, ks[i], vs[i]); hp += 2;
+ hxns[i].skip = 1; /* will be reassigned in from_array */
+ hxns[i].i = i;
+ }
+
+ if (key != THE_NON_VALUE) {
+ hx = hashmap_make_hash(key);
+ swizzle32(sw,hx);
+ hxns[i].hx = sw;
+ hxns[i].val = CONS(hp, key, value); hp += 2;
+ hxns[i].skip = 1;
+ hxns[i].i = i;
+ }
+
+ res = hashmap_from_unsorted_array(factory, hxns, sz, 0);
+
+ erts_free(ERTS_ALC_T_TMP, (void *) hxns);
+
+ return res;
+}
+
+static Eterm hashmap_from_unsorted_array(ErtsHeapFactory* factory,
+ hxnode_t *hxns, Uint n,
+ int reject_dupkeys) {
+ Uint jx = 0, ix = 0, lx, cx;
+ Eterm res;
+
+ if (n == 0) {
+ Eterm *hp;
+ hp = erts_produce_heap(factory, 2, 0);
+ hp[0] = MAP_HEADER_HAMT_HEAD_BITMAP(0);
+ hp[1] = 0;
+
+ return make_hashmap(hp);
+ }
+
+ /* sort and compact array (remove non-unique entries) */
+ qsort(hxns, n, sizeof(hxnode_t), (int (*)(const void *, const void *)) hxnodecmp);
+
+ ix = 0, cx = 0;
+ while(ix < n - 1) {
+ if (hxns[ix].hx == hxns[ix+1].hx) {
+
+ /* find region of equal hash values */
+ jx = ix + 1;
+ while(jx < n && hxns[ix].hx == hxns[jx].hx) { jx++; }
+ /* find all correct keys from region
+ * (last in list but now hash sorted so we check highest id instead) */
+
+ /* resort with keys instead of hash value within region */
+
+ qsort(&hxns[ix], jx - ix, sizeof(hxnode_t),
+ (int (*)(const void *, const void *)) hxnodecmpkey);
+
+ while(ix < jx) {
+ lx = ix;
+ while(++ix < jx && EQ(CAR(list_val(hxns[ix].val)),
+ CAR(list_val(hxns[lx].val)))) {
+ if (reject_dupkeys)
+ return THE_NON_VALUE;
+
+ if (hxns[ix].i > hxns[lx].i) {
+ lx = ix;
+ }
}
- ks[idx] = kv[1];
- vs[idx] = kv[2];
- size++;
+ hxns[cx].hx = hxns[lx].hx;
+ hxns[cx].val = hxns[lx].val;
+ cx++;
}
- item = CDR(list_val(item));
+ ix = jx;
+ continue;
}
+ if (ix > cx) {
+ hxns[cx].hx = hxns[ix].hx;
+ hxns[cx].val = hxns[ix].val;
+ }
+ cx++;
+ ix++;
+ }
- if (unused_size) {
- /* the key tuple is embedded in the heap
- * write a bignum to clear it.
- */
- /* release values as normal since they are on the top of the heap */
+ if (ix < n) {
+ hxns[cx].hx = hxns[ix].hx;
+ hxns[cx].val = hxns[ix].val;
+ cx++;
+ }
- ks[size] = make_pos_bignum_header(unused_size - 1);
- HRelease(BIF_P, vs + size + unused_size, vs + size);
- }
+ if (cx > 1) {
+ /* recursive decompose array */
+ res = hashmap_from_sorted_unique_array(factory, hxns, cx, 0);
+ } else {
+ Eterm *hp;
- *thp = make_arityval(size);
- mp->size = size;
- BIF_RET(res);
+ /* we only have one item, either because n was 1 or
+ * because we hade multiples of the same key.
+ *
+ * hash value has been swizzled, need to drag it down to get the
+ * correct slot. */
+
+ hp = erts_produce_heap(factory, HAMT_HEAD_BITMAP_SZ(1), 0);
+ hp[0] = MAP_HEADER_HAMT_HEAD_BITMAP(1 << ((hxns[0].hx >> 0x1c) & 0xf));
+ hp[1] = 1;
+ hp[2] = hxns[0].val;
+ res = make_hashmap(hp);
}
-error:
+ return res;
+}
- BIF_ERROR(BIF_P, BADARG);
+static Eterm hashmap_from_sorted_unique_array(ErtsHeapFactory* factory,
+ hxnode_t *hxns, Uint n, int lvl) {
+ Eterm res = NIL;
+ Uint i,ix,jx,elems;
+ Uint32 sw, hx;
+ Eterm val;
+ hxnode_t *tmp;
+ DeclareTmpHeapNoproc(th,2);
+ UseTmpHeapNoproc(2);
+ ASSERT(lvl < 32);
+ ix = 0;
+ elems = 1;
+ while (ix < n - 1) {
+ if (hxns[ix].hx == hxns[ix+1].hx) {
+ jx = ix + 1;
+ while (jx < n && hxns[ix].hx == hxns[jx].hx) { jx++; }
+ tmp = (hxnode_t *)erts_alloc(ERTS_ALC_T_TMP, ((jx - ix)) * sizeof(hxnode_t));
+
+ for(i = 0; i < jx - ix; i++) {
+ val = hxns[i + ix].val;
+ hx = hashmap_restore_hash(th, lvl + 8, CAR(list_val(val)));
+ swizzle32(sw,hx);
+ tmp[i].hx = sw;
+ tmp[i].val = val;
+ tmp[i].i = i;
+ tmp[i].skip = 1;
+ }
+
+ qsort(tmp, jx - ix, sizeof(hxnode_t), (int (*)(const void *, const void *)) hxnodecmp);
+
+ hxns[ix].skip = jx - ix;
+ hxns[ix].val = hashmap_from_sorted_unique_array(factory, tmp, jx - ix, lvl + 8);
+ erts_free(ERTS_ALC_T_TMP, (void *) tmp);
+ ix = jx;
+ if (ix < n) { elems++; }
+ continue;
+ }
+ hxns[ix].skip = 1;
+ elems++;
+ ix++;
+ }
+
+ res = hashmap_from_chunked_array(factory, hxns, elems, n, !lvl);
+
+ ERTS_FACTORY_HOLE_CHECK(factory);
+
+ UnUseTmpHeapNoproc(2);
+ return res;
}
-/* maps:is_key/2
- */
+#define HALLOC_EXTRA 200
+static Eterm hashmap_from_chunked_array(ErtsHeapFactory *factory, hxnode_t *hxns, Uint n,
+ Uint size, int is_root) {
+ Uint ix, d, dn, dc, slot, elems;
+ Uint32 v, vp, vn, hdr;
+ Uint bp, sz;
+ DECLARE_ESTACK(stack);
+ Eterm res = NIL, *hp = NULL, *nhp;
-BIF_RETTYPE maps_is_key_2(BIF_ALIST_2) {
- if (is_map(BIF_ARG_2)) {
- Eterm *ks, key;
- map_t *mp;
- Uint n,i;
- mp = (map_t*)map_val(BIF_ARG_2);
- key = BIF_ARG_1;
- n = map_get_size(mp);
- ks = map_get_keys(mp);
+ /* if we get here with only one element then
+ * we have eight levels of collisions
+ */
- if (n == 0)
- BIF_RET(am_false);
+ if (n == 1) {
+ res = hxns[0].val;
+ v = hxns[0].hx;
+ for (d = 7; d > 0; d--) {
+ slot = maskval(v,d);
+ hp = erts_produce_heap(factory, HAMT_NODE_BITMAP_SZ(1), HALLOC_EXTRA);
+ hp[0] = MAP_HEADER_HAMT_NODE_BITMAP(1 << slot);
+ hp[1] = res;
+ res = make_hashmap(hp);
+ }
- if (is_immed(key)) {
- for( i = 0; i < n; i++) {
- if (ks[i] == key) {
- BIF_RET(am_true);
- }
+ slot = maskval(v,0);
+ hp = erts_produce_heap(factory, (is_root ? 3 : 2), 0);
+
+ if (is_root) {
+ hp[0] = MAP_HEADER_HAMT_HEAD_BITMAP(1 << slot);
+ hp[1] = size;
+ hp[2] = res;
+ } else {
+ hp[0] = MAP_HEADER_HAMT_NODE_BITMAP(1 << slot);
+ hp[1] = res;
+ }
+ return make_hashmap(hp);
+ }
+
+ /* push initial nodes on the stack,
+ * this is the starting depth */
+
+ ix = 0;
+ d = 0;
+ vp = hxns[ix].hx;
+ v = hxns[ix + hxns[ix].skip].hx;
+
+ ASSERT(vp > v);
+ slot = maskval(vp,d);
+
+ while(slot == maskval(v,d)) {
+ ESTACK_PUSH(stack, 1 << slot);
+ d++;
+ slot = maskval(vp,d);
+ }
+
+ res = hxns[ix].val;
+
+ if (hxns[ix].skip > 1) {
+ dc = 7;
+ /* build collision nodes */
+ while (dc > d) {
+ hp = erts_produce_heap(factory, HAMT_NODE_BITMAP_SZ(1), HALLOC_EXTRA);
+ hp[0] = MAP_HEADER_HAMT_NODE_BITMAP(1 << maskval(vp,dc));
+ hp[1] = res;
+ res = make_hashmap(hp);
+ dc--;
+ }
+ }
+
+ ESTACK_PUSH2(stack,res,1 << slot);
+
+ /* all of the other nodes .. */
+ elems = n - 2; /* remove first and last elements */
+ while(elems--) {
+ hdr = ESTACK_POP(stack);
+ ix = ix + hxns[ix].skip;
+
+ /* determine if node or subtree should be built by looking
+ * at the next value. */
+
+ vn = hxns[ix + hxns[ix].skip].hx;
+ dn = cdepth(v,vn);
+ ASSERT(v > vn);
+
+ res = hxns[ix].val;
+
+ if (hxns[ix].skip > 1) {
+ int wat = (d > dn) ? d : dn;
+ dc = 7;
+ /* build collision nodes */
+ while (dc > wat) {
+ hp = erts_produce_heap(factory, HAMT_NODE_BITMAP_SZ(1), HALLOC_EXTRA);
+ hp[0] = MAP_HEADER_HAMT_NODE_BITMAP(1 << maskval(v,dc));
+ hp[1] = res;
+ res = make_hashmap(hp);
+ dc--;
}
}
- for( i = 0; i < n; i++) {
- if (EQ(ks[i], key)) {
- BIF_RET(am_true);
+ /* next depth is higher (implies collision) */
+ if (d < dn) {
+ /* hdr is the popped one initially */
+ while(d < dn) {
+ slot = maskval(v, d);
+ bp = 1 << slot;
+ ESTACK_PUSH(stack, hdr | bp);
+ d++;
+ hdr = 0; /* clear hdr for all other collisions */
}
+
+ slot = maskval(v, d);
+ bp = 1 << slot;
+ /* no more collisions */
+ ESTACK_PUSH2(stack,res,bp);
+ } else if (d == dn) {
+ /* no collisions at all */
+ slot = maskval(v, d);
+ bp = 1 << slot;
+ ESTACK_PUSH2(stack,res,hdr | bp);
+ } else {
+ /* dn < n, we have a drop and we are done
+ * build nodes and subtree */
+ while (dn != d) {
+ slot = maskval(v, d);
+ bp = 1 << slot;
+ /* OR bitposition before sz calculation to handle
+ * redundant collisions */
+ hdr |= bp;
+ sz = hashmap_bitcount(hdr);
+ hp = erts_produce_heap(factory, HAMT_NODE_BITMAP_SZ(sz), HALLOC_EXTRA);
+ nhp = hp;
+ *hp++ = MAP_HEADER_HAMT_NODE_BITMAP(hdr);
+ *hp++ = res; sz--;
+ while (sz--) { *hp++ = ESTACK_POP(stack); }
+ ASSERT((hp - nhp) < 18);
+ res = make_hashmap(nhp);
+
+ /* we need to pop the next hdr and push if we don't need it */
+
+ hdr = ESTACK_POP(stack);
+ d--;
+ }
+ ESTACK_PUSH2(stack,res,hdr);
}
- BIF_RET(am_false);
+
+ vp = v;
+ v = vn;
+ d = dn;
+ ERTS_FACTORY_HOLE_CHECK(factory);
}
- BIF_ERROR(BIF_P, BADARG);
+
+ /* v and vp are reused from above */
+ dn = cdepth(vp,v);
+ ix = ix + hxns[ix].skip;
+ res = hxns[ix].val;
+
+ if (hxns[ix].skip > 1) {
+ dc = 7;
+ /* build collision nodes */
+ while (dc > dn) {
+ hp = erts_produce_heap(factory, HAMT_NODE_BITMAP_SZ(1), HALLOC_EXTRA);
+ hp[0] = MAP_HEADER_HAMT_NODE_BITMAP(1 << maskval(v,dc));
+ hp[1] = res;
+ res = make_hashmap(hp);
+ dc--;
+ }
+ }
+
+ hdr = ESTACK_POP(stack);
+ /* pop remaining subtree if any */
+ while (dn) {
+ slot = maskval(v, dn);
+ bp = 1 << slot;
+ /* OR bitposition before sz calculation to handle
+ * redundant collisions */
+ hdr |= bp;
+ sz = hashmap_bitcount(hdr);
+ hp = erts_produce_heap(factory, HAMT_NODE_BITMAP_SZ(sz), HALLOC_EXTRA);
+ nhp = hp;
+ *hp++ = MAP_HEADER_HAMT_NODE_BITMAP(hdr);
+ *hp++ = res; sz--;
+
+ while (sz--) { *hp++ = ESTACK_POP(stack); }
+ res = make_hashmap(nhp);
+ hdr = ESTACK_POP(stack);
+ dn--;
+ }
+
+ /* and finally the root .. */
+
+ slot = maskval(v, dn);
+ bp = 1 << slot;
+ hdr |= bp;
+ sz = hashmap_bitcount(hdr);
+ hp = erts_produce_heap(factory, sz + /* hdr + item */ (is_root ? 2 : 1), 0);
+ nhp = hp;
+
+ if (is_root) {
+ *hp++ = (hdr == 0xffff) ? MAP_HEADER_HAMT_HEAD_ARRAY : MAP_HEADER_HAMT_HEAD_BITMAP(hdr);
+ *hp++ = size;
+ } else {
+ *hp++ = MAP_HEADER_HAMT_NODE_BITMAP(hdr);
+ }
+
+ *hp++ = res; sz--;
+ while (sz--) { *hp++ = ESTACK_POP(stack); }
+
+ res = make_hashmap(nhp);
+
+ ASSERT(ESTACK_COUNT(stack) == 0);
+ DESTROY_ESTACK(stack);
+ ERTS_FACTORY_HOLE_CHECK(factory);
+ return res;
+}
+#undef HALLOC_EXTRA
+
+static int hxnodecmpkey(hxnode_t *a, hxnode_t *b) {
+ Sint c = CMP_TERM(CAR(list_val(a->val)), CAR(list_val(b->val)));
+#if ERTS_SIZEOF_ETERM <= SIZEOF_INT
+ return c;
+#else
+ return c > 0 ? 1 : (c < 0 ? -1 : 0);
+#endif
}
-/* maps:keys/1
- */
+static int hxnodecmp(hxnode_t *a, hxnode_t *b) {
+ if (a->hx < b->hx)
+ return 1;
+ else if (a->hx == b->hx)
+ return 0;
+ else
+ return -1;
+}
+
+/* maps:is_key/2 */
+
+BIF_RETTYPE maps_is_key_2(BIF_ALIST_2) {
+ if (is_map(BIF_ARG_2)) {
+ BIF_RET(erts_maps_get(BIF_ARG_1, BIF_ARG_2) ? am_true : am_false);
+ }
+ BIF_P->fvalue = BIF_ARG_2;
+ BIF_ERROR(BIF_P, BADMAP);
+}
+
+/* maps:keys/1 */
BIF_RETTYPE maps_keys_1(BIF_ALIST_1) {
- if (is_map(BIF_ARG_1)) {
+ if (is_flatmap(BIF_ARG_1)) {
Eterm *hp, *ks, res = NIL;
- map_t *mp;
+ flatmap_t *mp;
Uint n;
- mp = (map_t*)map_val(BIF_ARG_1);
- n = map_get_size(mp);
+ mp = (flatmap_t*)flatmap_val(BIF_ARG_1);
+ n = flatmap_get_size(mp);
if (n == 0)
BIF_RET(res);
hp = HAlloc(BIF_P, (2 * n));
- ks = map_get_keys(mp);
+ ks = flatmap_get_keys(mp);
while(n--) {
res = CONS(hp, ks[n], res); hp += 2;
}
BIF_RET(res);
+ } else if (is_hashmap(BIF_ARG_1)) {
+ BIF_RET(hashmap_keys(BIF_P, BIF_ARG_1));
}
- BIF_ERROR(BIF_P, BADARG);
+ BIF_P->fvalue = BIF_ARG_1;
+ BIF_ERROR(BIF_P, BADMAP);
}
-/* maps:merge/2
- */
+
+/* maps:merge/2 */
+
+HIPE_WRAPPER_BIF_DISABLE_GC(maps_merge, 2)
BIF_RETTYPE maps_merge_2(BIF_ALIST_2) {
- if (is_map(BIF_ARG_1) && is_map(BIF_ARG_2)) {
- Eterm *hp,*thp;
- Eterm tup;
- Eterm *ks,*vs,*ks1,*vs1,*ks2,*vs2;
- map_t *mp1,*mp2,*mp_new;
- Uint n1,n2,i1,i2,need,unused_size=0;
- int c = 0;
-
- mp1 = (map_t*)map_val(BIF_ARG_1);
- mp2 = (map_t*)map_val(BIF_ARG_2);
- n1 = map_get_size(mp1);
- n2 = map_get_size(mp2);
-
- need = MAP_HEADER_SIZE + 1 + 2*(n1 + n2);
-
- hp = HAlloc(BIF_P, need);
- thp = hp;
- tup = make_tuple(thp);
- ks = hp + 1; hp += 1 + n1 + n2;
- mp_new = (map_t*)hp; hp += MAP_HEADER_SIZE;
- vs = hp; hp += n1 + n2;
-
- mp_new->thing_word = MAP_HEADER;
- mp_new->size = 0;
- mp_new->keys = tup;
-
- i1 = 0; i2 = 0;
- ks1 = map_get_keys(mp1);
- vs1 = map_get_values(mp1);
- ks2 = map_get_keys(mp2);
- vs2 = map_get_values(mp2);
-
- while(i1 < n1 && i2 < n2) {
- c = CMP_TERM(ks1[i1],ks2[i2]);
- if ( c == 0) {
- /* use righthand side arguments map value,
- * but advance both maps */
- *ks++ = ks2[i2];
- *vs++ = vs2[i2];
- i1++, i2++, unused_size++;
- } else if ( c < 0) {
- *ks++ = ks1[i1];
- *vs++ = vs1[i1];
- i1++;
- } else {
- *ks++ = ks2[i2];
- *vs++ = vs2[i2];
- i2++;
- }
+ if (is_flatmap(BIF_ARG_1)) {
+ if (is_flatmap(BIF_ARG_2)) {
+ BIF_RET(flatmap_merge(BIF_P, BIF_ARG_1, BIF_ARG_2));
+ } else if (is_hashmap(BIF_ARG_2)) {
+ /* Will always become a tree */
+ return map_merge_mixed(BIF_P, BIF_ARG_1, BIF_ARG_2, 0);
}
+ BIF_P->fvalue = BIF_ARG_2;
+ } else if (is_hashmap(BIF_ARG_1)) {
+ if (is_hashmap(BIF_ARG_2)) {
+ return hashmap_merge(BIF_P, BIF_ARG_1, BIF_ARG_2, 0, NULL);
+ } else if (is_flatmap(BIF_ARG_2)) {
+ /* Will always become a tree */
+ return map_merge_mixed(BIF_P, BIF_ARG_2, BIF_ARG_1, 1);
+ }
+ BIF_P->fvalue = BIF_ARG_2;
+ } else {
+ BIF_P->fvalue = BIF_ARG_1;
+ }
+ BIF_ERROR(BIF_P, BADMAP);
+}
+
+static Eterm flatmap_merge(Process *p, Eterm nodeA, Eterm nodeB) {
+ Eterm *hp,*thp;
+ Eterm tup;
+ Eterm *ks,*vs,*ks1,*vs1,*ks2,*vs2;
+ flatmap_t *mp1,*mp2,*mp_new;
+ Uint n,n1,n2,i1,i2,need,unused_size=0;
+ Sint c = 0;
+
+ mp1 = (flatmap_t*)flatmap_val(nodeA);
+ mp2 = (flatmap_t*)flatmap_val(nodeB);
+ n1 = flatmap_get_size(mp1);
+ n2 = flatmap_get_size(mp2);
- /* copy remaining */
- while (i1 < n1) {
+ need = MAP_HEADER_FLATMAP_SZ + 1 + 2 * (n1 + n2);
+
+ hp = HAlloc(p, need);
+ thp = hp;
+ tup = make_tuple(thp);
+ ks = hp + 1; hp += 1 + n1 + n2;
+ mp_new = (flatmap_t*)hp; hp += MAP_HEADER_FLATMAP_SZ;
+ vs = hp; hp += n1 + n2;
+
+ mp_new->thing_word = MAP_HEADER_FLATMAP;
+ mp_new->size = 0;
+ mp_new->keys = tup;
+
+ i1 = 0; i2 = 0;
+ ks1 = flatmap_get_keys(mp1);
+ vs1 = flatmap_get_values(mp1);
+ ks2 = flatmap_get_keys(mp2);
+ vs2 = flatmap_get_values(mp2);
+
+ while(i1 < n1 && i2 < n2) {
+ c = CMP_TERM(ks1[i1],ks2[i2]);
+ if (c == 0) {
+ /* use righthand side arguments map value,
+ * but advance both maps */
+ *ks++ = ks2[i2];
+ *vs++ = vs2[i2];
+ i1++, i2++, unused_size++;
+ } else if (c < 0) {
*ks++ = ks1[i1];
*vs++ = vs1[i1];
i1++;
- }
-
- while (i2 < n2) {
+ } else {
*ks++ = ks2[i2];
*vs++ = vs2[i2];
i2++;
}
+ }
- if (unused_size) {
- /* the key tuple is embedded in the heap, write a bignum to clear it.
- *
- * release values as normal since they are on the top of the heap
- * size = n1 + n1 - unused_size
- */
+ /* copy remaining */
+ while (i1 < n1) {
+ *ks++ = ks1[i1];
+ *vs++ = vs1[i1];
+ i1++;
+ }
+
+ while (i2 < n2) {
+ *ks++ = ks2[i2];
+ *vs++ = vs2[i2];
+ i2++;
+ }
+
+ if (unused_size) {
+ /* the key tuple is embedded in the heap, write a bignum to clear it.
+ *
+ * release values as normal since they are on the top of the heap
+ * size = n1 + n1 - unused_size
+ */
+
+ *ks = make_pos_bignum_header(unused_size - 1);
+ HRelease(p, vs + unused_size, vs);
+ }
+
+ n = n1 + n2 - unused_size;
+ *thp = make_arityval(n);
+ mp_new->size = n;
+
+ /* Reshape map to a hashmap if the map exceeds the limit */
- *ks = make_pos_bignum_header(unused_size - 1);
- HRelease(BIF_P, vs + unused_size, vs);
+ if (n > MAP_SMALL_MAP_LIMIT) {
+ Uint32 hx,sw;
+ Uint i;
+ Eterm res;
+ hxnode_t *hxns;
+ ErtsHeapFactory factory;
+
+ ks = flatmap_get_keys(mp_new);
+ vs = flatmap_get_values(mp_new);
+
+ hp = HAlloc(p, 2 * n);
+
+ hxns = (hxnode_t *)erts_alloc(ERTS_ALC_T_TMP,n * sizeof(hxnode_t));
+
+ for (i = 0; i < n; i++) {
+ hx = hashmap_make_hash(ks[i]);
+ swizzle32(sw,hx);
+ hxns[i].hx = sw;
+ hxns[i].val = CONS(hp, ks[i], vs[i]); hp += 2;
+ hxns[i].skip = 1;
+ hxns[i].i = i;
}
- mp_new->size = n1 + n2 - unused_size;
- *thp = make_arityval(n1 + n2 - unused_size);
+ erts_factory_proc_init(&factory, p);
+ res = hashmap_from_unsorted_array(&factory, hxns, n, 0);
+ erts_factory_close(&factory);
- BIF_RET(make_map(mp_new));
+ erts_free(ERTS_ALC_T_TMP, (void *) hxns);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(p);
+
+ return res;
}
- BIF_ERROR(BIF_P, BADARG);
+
+ return make_flatmap(mp_new);
}
-/* maps:new/2
- */
-BIF_RETTYPE maps_new_0(BIF_ALIST_0) {
- Eterm* hp;
- Eterm tup;
- map_t *mp;
+static Eterm map_merge_mixed(Process *p, Eterm flat, Eterm tree, int swap_args) {
+ Eterm *ks, *vs, *hp, res;
+ flatmap_t *mp;
+ Uint n, i;
+ hxnode_t *hxns;
+ Uint32 sw, hx;
+ ErtsHeapFactory factory;
- hp = HAlloc(BIF_P, (MAP_HEADER_SIZE + 1));
- tup = make_tuple(hp);
- *hp++ = make_arityval(0);
+ /* convert flat to tree */
- mp = (map_t*)hp;
- mp->thing_word = MAP_HEADER;
- mp->size = 0;
- mp->keys = tup;
+ ASSERT(is_flatmap(flat));
+ ASSERT(is_hashmap(tree));
+
+ mp = (flatmap_t*)flatmap_val(flat);
+ n = flatmap_get_size(mp);
+
+ ks = flatmap_get_keys(mp);
+ vs = flatmap_get_values(mp);
+
+ hp = HAlloc(p, 2 * n);
+
+ hxns = (hxnode_t *)erts_alloc(ERTS_ALC_T_TMP, n * sizeof(hxnode_t));
- BIF_RET(make_map(mp));
+ for (i = 0; i < n; i++) {
+ hx = hashmap_make_hash(ks[i]);
+ swizzle32(sw,hx);
+ hxns[i].hx = sw;
+ hxns[i].val = CONS(hp, ks[i], vs[i]); hp += 2;
+ hxns[i].skip = 1;
+ hxns[i].i = i;
+ }
+
+ erts_factory_proc_init(&factory, p);
+ res = hashmap_from_unsorted_array(&factory, hxns, n, 0);
+ erts_factory_close(&factory);
+
+ erts_free(ERTS_ALC_T_TMP, (void *) hxns);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(p);
+
+ return hashmap_merge(p, res, tree, swap_args, NULL);
}
-/* maps:put/3
- */
+#define PSTACK_TYPE struct HashmapMergePStackType
+struct HashmapMergePStackType {
+ Eterm nodeA, nodeB;
+ Eterm *srcA, *srcB;
+ Uint32 abm, bbm, rbm; /* node bitmaps */
+ int mix; /* &1: there are unique A stuff in node
+ * &2: there are unique B stuff in node */
+ int ix;
+ Eterm array[16]; /* temp node construction area */
+};
+
+typedef struct HashmapMergeContext_ {
+ Uint size; /* total key-value counter */
+ unsigned int lvl;
+ Eterm trap_bin;
+ ErtsPStack pstack;
+#ifdef DEBUG
+ Eterm dbg_map_A, dbg_map_B;
+#endif
+} HashmapMergeContext;
-Eterm erts_maps_put(Process *p, Eterm key, Eterm value, Eterm map) {
- Sint n,i;
- Sint c = 0;
- Eterm* hp, *shp;
- Eterm *ks,*vs, res, tup;
- map_t *mp = (map_t*)map_val(map);
+static int hashmap_merge_ctx_destructor(Binary* ctx_bin)
+{
+ HashmapMergeContext* ctx = (HashmapMergeContext*) ERTS_MAGIC_BIN_DATA(ctx_bin);
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(ctx_bin) == hashmap_merge_ctx_destructor);
- n = map_get_size(mp);
+ PSTACK_DESTROY_SAVED(&ctx->pstack);
+ return 1;
+}
- if (n == 0) {
- hp = HAlloc(p, MAP_HEADER_SIZE + 1 + 2);
- tup = make_tuple(hp);
- *hp++ = make_arityval(1);
- *hp++ = key;
- res = make_map(hp);
- *hp++ = MAP_HEADER;
- *hp++ = 1;
- *hp++ = tup;
- *hp++ = value;
+BIF_RETTYPE maps_merge_trap_1(BIF_ALIST_1) {
+ Binary* ctx_bin = erts_magic_ref2bin(BIF_ARG_1);
- return res;
- }
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(ctx_bin) == hashmap_merge_ctx_destructor);
- ks = map_get_keys(mp);
- vs = map_get_values(mp);
+ return hashmap_merge(BIF_P, NIL, NIL, 0,
+ (HashmapMergeContext*) ERTS_MAGIC_BIN_DATA(ctx_bin));
+}
- /* only allocate for values,
- * assume key-tuple will be intact
+#define HALLOC_EXTRA 200
+#define MAP_MERGE_LOOP_FACTOR 8
+
+static BIF_RETTYPE hashmap_merge(Process *p, Eterm map_A, Eterm map_B,
+ int swap_args, HashmapMergeContext* ctx) {
+#define PSTACK_TYPE struct HashmapMergePStackType
+ PSTACK_DECLARE(s, 4);
+ HashmapMergeContext local_ctx;
+ struct HashmapMergePStackType* sp;
+ Uint32 hx;
+ Eterm res = THE_NON_VALUE;
+ Eterm hdrA, hdrB;
+ Eterm *hp, *nhp;
+ Eterm trap_ret;
+ Sint initial_reds = (Sint) (ERTS_BIF_REDS_LEFT(p) * MAP_MERGE_LOOP_FACTOR);
+ Sint reds = initial_reds;
+ DeclareTmpHeap(th,2,p);
+ UseTmpHeap(2,p);
+
+ /*
+ * Strategy: Do depth-first traversal of both trees (at the same time)
+ * and merge each pair of nodes.
*/
- hp = HAlloc(p, MAP_HEADER_SIZE + n);
- shp = hp; /* save hp, used if optimistic update fails */
- res = make_map(hp);
- *hp++ = MAP_HEADER;
- *hp++ = n;
- *hp++ = mp->keys;
-
- if (is_immed(key)) {
- for( i = 0; i < n; i ++) {
- if (ks[i] == key) {
- *hp++ = value;
- vs++;
- c = 1;
- } else {
- *hp++ = *vs++;
+ PSTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
+
+ if (ctx == NULL) { /* first call */
+ hashmap_head_t* a = (hashmap_head_t*) hashmap_val(map_A);
+ hashmap_head_t* b = (hashmap_head_t*) hashmap_val(map_B);
+
+ sp = PSTACK_PUSH(s);
+ sp->srcA = swap_args ? &map_B : &map_A;
+ sp->srcB = swap_args ? &map_A : &map_B;
+ sp->mix = 0;
+ local_ctx.size = a->size + b->size;
+ local_ctx.lvl = 0;
+ #ifdef DEBUG
+ local_ctx.dbg_map_A = map_A;
+ local_ctx.dbg_map_B = map_B;
+ local_ctx.trap_bin = THE_NON_VALUE;
+ #endif
+ ctx = &local_ctx;
+ }
+ else {
+ PSTACK_RESTORE(s, &ctx->pstack);
+ sp = PSTACK_TOP(s);
+ goto resume_from_trap;
+ }
+
+recurse:
+
+ sp->nodeA = *sp->srcA;
+ sp->nodeB = *sp->srcB;
+
+ if (sp->nodeA == sp->nodeB) {
+ res = sp->nodeA;
+ ctx->size -= is_list(sp->nodeB) ? 1 : hashmap_subtree_size(sp->nodeB);
+ }
+ else {
+ if (is_list(sp->nodeA)) { /* A is LEAF */
+ Eterm keyA = CAR(list_val(sp->nodeA));
+
+ if (is_list(sp->nodeB)) { /* LEAF + LEAF */
+ Eterm keyB = CAR(list_val(sp->nodeB));
+
+ if (EQ(keyA, keyB)) {
+ --ctx->size;
+ res = sp->nodeB;
+ sp->mix = 2; /* We assume values differ.
+ + Don't spend time comparing big values.
+ - Might waste some heap space for internal
+ nodes that could otherwise be reused. */
+ goto merge_nodes;
+ }
+ }
+ hx = hashmap_restore_hash(th, ctx->lvl, keyA);
+ sp->abm = 1 << hashmap_index(hx);
+ /* keep srcA pointing at the leaf */
+ }
+ else { /* A is NODE */
+ sp->srcA = boxed_val(sp->nodeA);
+ hdrA = *sp->srcA++;
+ ASSERT(is_header(hdrA));
+ switch (hdrA & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY: {
+ sp->srcA++;
+ sp->abm = 0xffff;
+ break;
+ }
+ case HAMT_SUBTAG_HEAD_BITMAP: sp->srcA++;
+ case HAMT_SUBTAG_NODE_BITMAP: {
+ sp->abm = MAP_HEADER_VAL(hdrA);
+ break;
+ }
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "bad header %ld\r\n", hdrA);
+ }
+ }
+
+ if (is_list(sp->nodeB)) { /* B is LEAF */
+ Eterm keyB = CAR(list_val(sp->nodeB));
+
+ hx = hashmap_restore_hash(th, ctx->lvl, keyB);
+ sp->bbm = 1 << hashmap_index(hx);
+ /* keep srcB pointing at the leaf */
+ }
+ else { /* B is NODE */
+ sp->srcB = boxed_val(sp->nodeB);
+ hdrB = *sp->srcB++;
+ ASSERT(is_header(hdrB));
+ switch (hdrB & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY: {
+ sp->srcB++;
+ sp->bbm = 0xffff;
+ break;
+ }
+ case HAMT_SUBTAG_HEAD_BITMAP: sp->srcB++;
+ case HAMT_SUBTAG_NODE_BITMAP: {
+ sp->bbm = MAP_HEADER_VAL(hdrB);
+ break;
+ }
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "bad header %ld\r\n", hdrB);
+ }
+ }
+ }
+
+merge_nodes:
+
+ for (;;) {
+ if (is_value(res)) { /* We have a complete (sub-)tree or leaf */
+ int child_mix;
+ if (ctx->lvl == 0)
+ break;
+
+ /* Pop from stack and continue build parent node */
+ ctx->lvl--;
+ child_mix = sp->mix;
+ sp = PSTACK_POP(s);
+ sp->array[sp->ix++] = res;
+ sp->mix |= child_mix;
+ res = THE_NON_VALUE;
+ if (sp->rbm) {
+ sp->srcA++;
+ sp->srcB++;
}
+ } else { /* Start build a node */
+ sp->ix = 0;
+ sp->rbm = sp->abm | sp->bbm;
+ ASSERT(!(sp->rbm == 0 && ctx->lvl > 0));
}
- } else {
- for( i = 0; i < n; i ++) {
- if (EQ(ks[i], key)) {
- *hp++ = value;
- vs++;
- c = 1;
+
+ if (--reds <= 0) {
+ goto trap;
+ }
+resume_from_trap:
+
+ while (sp->rbm) {
+ Uint32 next = sp->rbm & (sp->rbm-1);
+ Uint32 bit = sp->rbm ^ next;
+ sp->rbm = next;
+ if (sp->abm & bit) {
+ if (sp->bbm & bit) {
+ /* Bit clash. Push and resolve by recursive merge */
+ Eterm* srcA = sp->srcA;
+ Eterm* srcB = sp->srcB;
+ ctx->lvl++;
+ sp = PSTACK_PUSH(s);
+ sp->srcA = srcA;
+ sp->srcB = srcB;
+ sp->mix = 0;
+ goto recurse;
+ } else {
+ sp->array[sp->ix++] = *sp->srcA++;
+ sp->mix |= 1;
+ }
} else {
- *hp++ = *vs++;
+ ASSERT(sp->bbm & bit);
+ sp->array[sp->ix++] = *sp->srcB++;
+ sp->mix |= 2;
}
}
+
+ switch (sp->mix) {
+ case 0: /* Nodes A and B contain the *EXACT* same sub-trees
+ => fall through and reuse nodeA */
+
+ case 1: /* Only unique A stuff => reuse nodeA */
+ res = sp->nodeA;
+ break;
+
+ case 2: /* Only unique B stuff => reuse nodeB */
+ res = sp->nodeB;
+ break;
+
+ case 3: /* We have a mix => must build new node */
+ ASSERT(sp->ix == hashmap_bitcount(sp->abm | sp->bbm));
+ if (ctx->lvl == 0) {
+ nhp = HAllocX(p, HAMT_HEAD_BITMAP_SZ(sp->ix), HALLOC_EXTRA);
+ hp = nhp;
+ *hp++ = (sp->ix == 16 ? MAP_HEADER_HAMT_HEAD_ARRAY
+ : MAP_HEADER_HAMT_HEAD_BITMAP(sp->abm | sp->bbm));
+ *hp++ = ctx->size;
+ } else {
+ nhp = HAllocX(p, HAMT_NODE_BITMAP_SZ(sp->ix), HALLOC_EXTRA);
+ hp = nhp;
+ *hp++ = MAP_HEADER_HAMT_NODE_BITMAP(sp->abm | sp->bbm);
+ }
+ sys_memcpy(hp, sp->array, sp->ix * sizeof(Eterm));
+ res = make_boxed(nhp);
+ break;
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "strange mix %d\r\n", sp->mix);
+ }
}
- if (c)
- return res;
+ /* Done */
- /* need to make a new tuple,
- * use old hp since it needs to be recreated anyway.
- */
- tup = make_tuple(shp);
- *shp++ = make_arityval(n+1);
+#ifdef DEBUG
+ {
+ Eterm *head = hashmap_val(res);
+ Uint size = head[1];
+ Uint real_size = hashmap_subtree_size(res);
+ ASSERT(size == real_size);
+ }
+#endif
- hp = HAlloc(p, 3 + n + 1);
- res = make_map(hp);
- *hp++ = MAP_HEADER;
- *hp++ = n + 1;
- *hp++ = tup;
+ if (ctx != &local_ctx) {
+ ASSERT(ctx->trap_bin != THE_NON_VALUE);
+ ASSERT(p->flags & F_DISABLE_GC);
+ erts_set_gc_state(p, 1);
+ }
+ else {
+ ASSERT(ctx->trap_bin == THE_NON_VALUE);
+ ASSERT(!(p->flags & F_DISABLE_GC));
+ }
+ PSTACK_DESTROY(s);
+ UnUseTmpHeap(2,p);
+ BUMP_REDS(p, (initial_reds - reds) / MAP_MERGE_LOOP_FACTOR);
+ return res;
- ks = map_get_keys(mp);
- vs = map_get_values(mp);
+trap: /* Yield */
- ASSERT(n >= 0);
+ if (ctx == &local_ctx) {
+ Binary* ctx_b = erts_create_magic_binary(sizeof(HashmapMergeContext),
+ hashmap_merge_ctx_destructor);
+ ctx = ERTS_MAGIC_BIN_DATA(ctx_b);
+ sys_memcpy(ctx, &local_ctx, sizeof(HashmapMergeContext));
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ ASSERT(ctx->trap_bin == THE_NON_VALUE);
+ ctx->trap_bin = erts_mk_magic_ref(&hp, &MSO(p), ctx_b);
- /* copy map in order */
- while (n && ((c = CMP_TERM(*ks, key)) < 0)) {
- *shp++ = *ks++;
- *hp++ = *vs++;
- n--;
+ erts_set_gc_state(p, 0);
}
+ else {
+ ASSERT(ctx->trap_bin != THE_NON_VALUE);
+ ASSERT(p->flags & F_DISABLE_GC);
+ }
+
+ PSTACK_SAVE(s, &ctx->pstack);
- *shp++ = key;
- *hp++ = value;
+ BUMP_ALL_REDS(p);
+ ERTS_BIF_PREP_TRAP1(trap_ret, &hashmap_merge_trap_export,
+ p, ctx->trap_bin);
+ UnUseTmpHeap(2,p);
+ return trap_ret;
+}
- ASSERT(n >= 0);
+static Uint hashmap_subtree_size(Eterm node) {
+ DECLARE_WSTACK(stack);
+ Uint size = 0;
- while(n--) {
- *shp++ = *ks++;
- *hp++ = *vs++;
+ hashmap_iterator_init(&stack, node, 0);
+ while (hashmap_iterator_next(&stack)) {
+ size++;
}
- /* we have one word remaining
- * this will work out fine once we get the size word
- * in the header.
- */
- *shp = make_pos_bignum_header(0);
- return res;
+ DESTROY_WSTACK(stack);
+ return size;
}
-BIF_RETTYPE maps_put_3(BIF_ALIST_3) {
- if (is_map(BIF_ARG_3)) {
- BIF_RET(erts_maps_put(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3));
+
+static int hash_cmp(Uint32 ha, Uint32 hb)
+{
+ int i;
+ for (i=0; i<8; i++) {
+ int cmp = (int)(ha & 0xF) - (int)(hb & 0xF);
+ if (cmp)
+ return cmp;
+ ha >>= 4;
+ hb >>= 4;
}
- BIF_ERROR(BIF_P, BADARG);
+ return 0;
}
-/* maps:remove/3
- */
+int hashmap_key_hash_cmp(Eterm* ap, Eterm* bp)
+{
+ unsigned int lvl = 0;
+ DeclareTmpHeapNoproc(th,2);
+ UseTmpHeapNoproc(2);
+
+ if (ap && bp) {
+ ASSERT(CMP_TERM(CAR(ap), CAR(bp)) != 0);
+ for (;;) {
+ Uint32 ha = hashmap_restore_hash(th, lvl, CAR(ap));
+ Uint32 hb = hashmap_restore_hash(th, lvl, CAR(bp));
+ int cmp = hash_cmp(ha, hb);
+ if (cmp) {
+ UnUseTmpHeapNoproc(2);
+ return cmp;
+ }
+ lvl += 8;
+ }
+ }
+ UnUseTmpHeapNoproc(2);
+ return ap ? -1 : 1;
+}
-int erts_maps_remove(Process *p, Eterm key, Eterm map, Eterm *res) {
- Sint n;
- Uint need;
- Eterm *hp_start;
- Eterm *thp, *mhp;
- Eterm *ks, *vs, tup;
- map_t *mp = (map_t*)map_val(map);
+/* maps:new/0 */
- n = map_get_size(mp);
+BIF_RETTYPE maps_new_0(BIF_ALIST_0) {
+ Eterm* hp;
+ Eterm tup;
+ flatmap_t *mp;
- if (n == 0) {
- *res = map;
- return 1;
- }
+ hp = HAlloc(BIF_P, (MAP_HEADER_FLATMAP_SZ + 1));
+ tup = make_tuple(hp);
+ *hp++ = make_arityval(0);
- ks = map_get_keys(mp);
- vs = map_get_values(mp);
+ mp = (flatmap_t*)hp;
+ mp->thing_word = MAP_HEADER_FLATMAP;
+ mp->size = 0;
+ mp->keys = tup;
- /* Assume key exists.
- * Release allocated if it didn't.
- * Allocate key tuple first.
- */
+ BIF_RET(make_flatmap(mp));
+}
- need = n + 1 - 1 + 3 + n - 1; /* tuple - 1 + map - 1 */
- hp_start = HAlloc(p, need);
- thp = hp_start;
- mhp = thp + n; /* offset with tuple heap size */
+/* maps:put/3 */
- tup = make_tuple(thp);
- *thp++ = make_arityval(n - 1);
-
- *res = make_map(mhp);
- *mhp++ = MAP_HEADER;
- *mhp++ = n - 1;
- *mhp++ = tup;
-
- if (is_immed(key)) {
- while (1) {
- if (*ks == key) {
- goto found_key;
- } else if (--n) {
- *mhp++ = *vs++;
- *thp++ = *ks++;
- } else
- break;
- }
- } else {
- while(1) {
- if (EQ(*ks, key)) {
- goto found_key;
- } else if (--n) {
- *mhp++ = *vs++;
- *thp++ = *ks++;
- } else
- break;
- }
+BIF_RETTYPE maps_put_3(BIF_ALIST_3) {
+ if (is_map(BIF_ARG_3)) {
+ BIF_RET(erts_maps_put(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3));
}
+ BIF_P->fvalue = BIF_ARG_3;
+ BIF_ERROR(BIF_P, BADMAP);
+}
- /* Not found, remove allocated memory
- * and return previous map.
- */
- HRelease(p, hp_start + need, hp_start);
+/* maps:take/2 */
- *res = map;
- return 1;
-
-found_key:
- /* Copy rest of keys and values */
- if (--n) {
- sys_memcpy(mhp, vs+1, n*sizeof(Eterm));
- sys_memcpy(thp, ks+1, n*sizeof(Eterm));
+BIF_RETTYPE maps_take_2(BIF_ALIST_2) {
+ if (is_map(BIF_ARG_2)) {
+ Eterm res, map, val;
+ if (erts_maps_take(BIF_P, BIF_ARG_1, BIF_ARG_2, &map, &val)) {
+ Eterm *hp = HAlloc(BIF_P, 3);
+ res = make_tuple(hp);
+ *hp++ = make_arityval(2);
+ *hp++ = val;
+ *hp++ = map;
+ BIF_RET(res);
+ }
+ BIF_RET(am_error);
}
- return 1;
+ BIF_P->fvalue = BIF_ARG_2;
+ BIF_ERROR(BIF_P, BADMAP);
}
+/* maps:remove/2 */
+
BIF_RETTYPE maps_remove_2(BIF_ALIST_2) {
if (is_map(BIF_ARG_2)) {
- Eterm res;
- if (erts_maps_remove(BIF_P, BIF_ARG_1, BIF_ARG_2, &res)) {
- BIF_RET(res);
- }
+ Eterm res;
+ (void) erts_maps_take(BIF_P, BIF_ARG_1, BIF_ARG_2, &res, NULL);
+ BIF_RET(res);
}
- BIF_ERROR(BIF_P, BADARG);
+ BIF_P->fvalue = BIF_ARG_2;
+ BIF_ERROR(BIF_P, BADMAP);
}
-/* maps:update/3
+/* erts_maps_take
+ * return 1 if key is found, otherwise 0
+ * If the key is not found res (output map) will be map (input map)
*/
+int erts_maps_take(Process *p, Eterm key, Eterm map,
+ Eterm *res, Eterm *value) {
+ Uint32 hx;
+ Eterm ret;
+ if (is_flatmap(map)) {
+ Sint n;
+ Uint need;
+ Eterm *hp_start;
+ Eterm *thp, *mhp;
+ Eterm *ks, *vs, tup;
+ flatmap_t *mp = (flatmap_t*)flatmap_val(map);
+
+ n = flatmap_get_size(mp);
+
+ if (n == 0) {
+ *res = map;
+ return 0;
+ }
+
+ ks = flatmap_get_keys(mp);
+ vs = flatmap_get_values(mp);
+
+ /* Assume key exists.
+ * Release allocated if it didn't.
+ * Allocate key tuple first.
+ */
+
+ need = n + 1 - 1 + 3 + n - 1; /* tuple - 1 + map - 1 */
+ hp_start = HAlloc(p, need);
+ thp = hp_start;
+ mhp = thp + n; /* offset with tuple heap size */
+
+ tup = make_tuple(thp);
+ *thp++ = make_arityval(n - 1);
+
+ *res = make_flatmap(mhp);
+ *mhp++ = MAP_HEADER_FLATMAP;
+ *mhp++ = n - 1;
+ *mhp++ = tup;
+
+ if (is_immed(key)) {
+ while (1) {
+ if (*ks == key) {
+ if (value) *value = *vs;
+ goto found_key;
+ } else if (--n) {
+ *mhp++ = *vs++;
+ *thp++ = *ks++;
+ } else
+ break;
+ }
+ } else {
+ while(1) {
+ if (EQ(*ks, key)) {
+ if (value) *value = *vs;
+ goto found_key;
+ } else if (--n) {
+ *mhp++ = *vs++;
+ *thp++ = *ks++;
+ } else
+ break;
+ }
+ }
+
+ /* Not found, remove allocated memory
+ * and return previous map.
+ */
+ HRelease(p, hp_start + need, hp_start);
+
+ *res = map;
+ return 0;
+
+found_key:
+ /* Copy rest of keys and values */
+ if (--n) {
+ sys_memcpy(mhp, vs+1, n*sizeof(Eterm));
+ sys_memcpy(thp, ks+1, n*sizeof(Eterm));
+ }
+ return 1;
+ }
+ ASSERT(is_hashmap(map));
+ hx = hashmap_make_hash(key);
+ ret = hashmap_delete(p, hx, key, map, value);
+ if (is_value(ret)) {
+ *res = ret;
+ return 1;
+ }
+ *res = map;
+ return 0;
+}
int erts_maps_update(Process *p, Eterm key, Eterm value, Eterm map, Eterm *res) {
+ Uint32 hx;
+ if (is_flatmap(map)) {
Sint n,i;
Eterm* hp,*shp;
Eterm *ks,*vs;
- map_t *mp = (map_t*)map_val(map);
+ flatmap_t *mp = (flatmap_t*)flatmap_val(map);
- if ((n = map_get_size(mp)) == 0) {
+ if ((n = flatmap_get_size(mp)) == 0) {
return 0;
}
- ks = map_get_keys(mp);
- vs = map_get_values(mp);
+ ks = flatmap_get_keys(mp);
+ vs = flatmap_get_values(mp);
/* only allocate for values,
* assume key-tuple will be intact
*/
- hp = HAlloc(p, MAP_HEADER_SIZE + n);
+ hp = HAlloc(p, MAP_HEADER_FLATMAP_SZ + n);
shp = hp;
- *hp++ = MAP_HEADER;
+ *hp++ = MAP_HEADER_FLATMAP;
*hp++ = n;
*hp++ = mp->keys;
@@ -739,7 +1766,7 @@ int erts_maps_update(Process *p, Eterm key, Eterm value, Eterm map, Eterm *res)
}
}
- HRelease(p, shp + MAP_HEADER_SIZE + n, shp);
+ HRelease(p, shp + MAP_HEADER_FLATMAP_SZ + n, shp);
return 0;
found_key:
@@ -747,56 +1774,956 @@ found_key:
vs++;
if (++i < n)
sys_memcpy(hp, vs, (n - i)*sizeof(Eterm));
- *res = make_map(shp);
+ *res = make_flatmap(shp);
return 1;
+ }
+
+ ASSERT(is_hashmap(map));
+ hx = hashmap_make_hash(key);
+ *res = erts_hashmap_insert(p, hx, key, value, map, 1);
+ if (is_value(*res))
+ return 1;
+
+ return 0;
+}
+
+Eterm erts_maps_put(Process *p, Eterm key, Eterm value, Eterm map) {
+ Uint32 hx;
+ Eterm res;
+ if (is_flatmap(map)) {
+ Sint n,i;
+ Sint c = 0;
+ Eterm* hp, *shp;
+ Eterm *ks, *vs, tup;
+ flatmap_t *mp = (flatmap_t*)flatmap_val(map);
+
+ n = flatmap_get_size(mp);
+
+ if (n == 0) {
+ hp = HAlloc(p, MAP_HEADER_FLATMAP_SZ + 1 + 2);
+ tup = make_tuple(hp);
+ *hp++ = make_arityval(1);
+ *hp++ = key;
+ res = make_flatmap(hp);
+ *hp++ = MAP_HEADER_FLATMAP;
+ *hp++ = 1;
+ *hp++ = tup;
+ *hp++ = value;
+
+ return res;
+ }
+
+ ks = flatmap_get_keys(mp);
+ vs = flatmap_get_values(mp);
+
+ /* only allocate for values,
+ * assume key-tuple will be intact
+ */
+
+ hp = HAlloc(p, MAP_HEADER_FLATMAP_SZ + n);
+ shp = hp; /* save hp, used if optimistic update fails */
+ res = make_flatmap(hp);
+ *hp++ = MAP_HEADER_FLATMAP;
+ *hp++ = n;
+ *hp++ = mp->keys;
+
+ if (is_immed(key)) {
+ for( i = 0; i < n; i ++) {
+ if (ks[i] == key) {
+ *hp++ = value;
+ vs++;
+ c = 1;
+ } else {
+ *hp++ = *vs++;
+ }
+ }
+ } else {
+ for( i = 0; i < n; i ++) {
+ if (EQ(ks[i], key)) {
+ *hp++ = value;
+ vs++;
+ c = 1;
+ } else {
+ *hp++ = *vs++;
+ }
+ }
+ }
+
+ if (c)
+ return res;
+
+ /* the map will grow */
+
+ if (n >= MAP_SMALL_MAP_LIMIT) {
+ ErtsHeapFactory factory;
+ HRelease(p, shp + MAP_HEADER_FLATMAP_SZ + n, shp);
+ ks = flatmap_get_keys(mp);
+ vs = flatmap_get_values(mp);
+
+ erts_factory_proc_init(&factory, p);
+ res = erts_hashmap_from_ks_and_vs_extra(&factory,ks,vs,n,key,value);
+ erts_factory_close(&factory);
+
+ return res;
+ }
+
+ /* still a small map. need to make a new tuple,
+ * use old hp since it needs to be recreated anyway. */
+
+ tup = make_tuple(shp);
+ *shp++ = make_arityval(n+1);
+
+ hp = HAlloc(p, 3 + n + 1);
+ res = make_flatmap(hp);
+ *hp++ = MAP_HEADER_FLATMAP;
+ *hp++ = n + 1;
+ *hp++ = tup;
+
+ ks = flatmap_get_keys(mp);
+ vs = flatmap_get_values(mp);
+
+ ASSERT(n >= 0);
+
+ /* copy map in order */
+ while (n && ((c = CMP_TERM(*ks, key)) < 0)) {
+ *shp++ = *ks++;
+ *hp++ = *vs++;
+ n--;
+ }
+
+ *shp++ = key;
+ *hp++ = value;
+
+ ASSERT(n >= 0);
+
+ while(n--) {
+ *shp++ = *ks++;
+ *hp++ = *vs++;
+ }
+ /* we have one word remaining
+ * this will work out fine once we get the size word
+ * in the header.
+ */
+ *shp = make_pos_bignum_header(0);
+ return res;
+ }
+ ASSERT(is_hashmap(map));
+
+ hx = hashmap_make_hash(key);
+ res = erts_hashmap_insert(p, hx, key, value, map, 0);
+ ASSERT(is_hashmap(res));
+
+ return res;
}
+/* maps:update/3 */
+
BIF_RETTYPE maps_update_3(BIF_ALIST_3) {
- if (is_map(BIF_ARG_3)) {
+ if (is_not_map(BIF_ARG_3)) {
+ BIF_P->fvalue = BIF_ARG_3;
+ BIF_ERROR(BIF_P, BADMAP);
+ } else {
Eterm res;
if (erts_maps_update(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, &res)) {
BIF_RET(res);
}
+ BIF_P->fvalue = BIF_ARG_1;
+ BIF_ERROR(BIF_P, BADKEY);
}
- BIF_ERROR(BIF_P, BADARG);
}
-/* maps:values/1
- */
+/* maps:values/1 */
BIF_RETTYPE maps_values_1(BIF_ALIST_1) {
- if (is_map(BIF_ARG_1)) {
+ if (is_flatmap(BIF_ARG_1)) {
Eterm *hp, *vs, res = NIL;
- map_t *mp;
+ flatmap_t *mp;
Uint n;
- mp = (map_t*)map_val(BIF_ARG_1);
- n = map_get_size(mp);
+ mp = (flatmap_t*)flatmap_val(BIF_ARG_1);
+ n = flatmap_get_size(mp);
if (n == 0)
BIF_RET(res);
hp = HAlloc(BIF_P, (2 * n));
- vs = map_get_values(mp);
+ vs = flatmap_get_values(mp);
while(n--) {
res = CONS(hp, vs[n], res); hp += 2;
}
BIF_RET(res);
+ } else if (is_hashmap(BIF_ARG_1)) {
+ BIF_RET(hashmap_values(BIF_P, BIF_ARG_1));
}
- BIF_ERROR(BIF_P, BADARG);
+ BIF_P->fvalue = BIF_ARG_1;
+ BIF_ERROR(BIF_P, BADMAP);
}
-int erts_validate_and_sort_map(map_t* mp)
+static Eterm hashmap_to_list(Process *p, Eterm node, Sint m) {
+ DECLARE_WSTACK(stack);
+ Eterm *hp, *kv;
+ Eterm tup, res = NIL;
+ Uint n = hashmap_size(node);
+
+ if (m >= 0) {
+ n = m < n ? m : n;
+ }
+
+ hp = HAlloc(p, n * (2 + 3));
+ hashmap_iterator_init(&stack, node, 0);
+ while (n--) {
+ kv = hashmap_iterator_next(&stack);
+ ASSERT(kv != NULL);
+ tup = TUPLE2(hp, CAR(kv), CDR(kv));
+ hp += 3;
+ res = CONS(hp, tup, res);
+ hp += 2;
+ }
+ DESTROY_WSTACK(stack);
+ return res;
+}
+
+void hashmap_iterator_init(ErtsWStack* s, Eterm node, int reverse) {
+ Eterm hdr = *hashmap_val(node);
+ Uint sz;
+
+ switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ sz = 16;
+ break;
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ case HAMT_SUBTAG_NODE_BITMAP:
+ sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ break;
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "bad header");
+ }
+
+ WSTACK_PUSH3((*s), (UWord)THE_NON_VALUE, /* end marker */
+ (UWord)(!reverse ? 0 : sz+1),
+ (UWord)node);
+}
+
+Eterm* hashmap_iterator_next(ErtsWStack* s) {
+ Eterm node, *ptr, hdr;
+ Uint32 sz;
+ Uint idx;
+
+ for (;;) {
+ ASSERT(!WSTACK_ISEMPTY((*s)));
+ node = (Eterm) WSTACK_POP((*s));
+ if (is_non_value(node)) {
+ return NULL;
+ }
+ idx = (Uint) WSTACK_POP((*s));
+ for (;;) {
+ ASSERT(is_boxed(node));
+ ptr = boxed_val(node);
+ hdr = *ptr;
+ ASSERT(is_header(hdr));
+ switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ ptr++;
+ sz = 16;
+ break;
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ ptr++;
+ case HAMT_SUBTAG_NODE_BITMAP:
+ sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ ASSERT(sz < 17);
+ break;
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "bad header");
+ }
+
+ idx++;
+
+ if (idx <= sz) {
+ WSTACK_PUSH2((*s), (UWord)idx, (UWord)node);
+
+ if (is_list(ptr[idx])) {
+ return list_val(ptr[idx]);
+ }
+ ASSERT(is_boxed(ptr[idx]));
+ node = ptr[idx];
+ idx = 0;
+ }
+ else
+ break; /* and pop parent node */
+ }
+ }
+}
+
+Eterm* hashmap_iterator_prev(ErtsWStack* s) {
+ Eterm node, *ptr, hdr;
+ Uint32 sz;
+ Uint idx;
+
+ for (;;) {
+ ASSERT(!WSTACK_ISEMPTY((*s)));
+ node = (Eterm) WSTACK_POP((*s));
+ if (is_non_value(node)) {
+ return NULL;
+ }
+ idx = (Uint) WSTACK_POP((*s));
+ for (;;) {
+ ASSERT(is_boxed(node));
+ ptr = boxed_val(node);
+ hdr = *ptr;
+ ASSERT(is_header(hdr));
+ switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ ptr++;
+ sz = 16;
+ break;
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ ptr++;
+ case HAMT_SUBTAG_NODE_BITMAP:
+ sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ ASSERT(sz < 17);
+ break;
+ default:
+ erts_exit(ERTS_ERROR_EXIT, "bad header");
+ }
+
+ if (idx > sz)
+ idx = sz;
+ else
+ idx--;
+
+ if (idx >= 1) {
+ WSTACK_PUSH2((*s), (UWord)idx, (UWord)node);
+
+ if (is_list(ptr[idx])) {
+ return list_val(ptr[idx]);
+ }
+ ASSERT(is_boxed(ptr[idx]));
+ node = ptr[idx];
+ idx = 17;
+ }
+ else
+ break; /* and pop parent node */
+ }
+ }
+}
+
+const Eterm *
+erts_hashmap_get(Uint32 hx, Eterm key, Eterm node)
{
- Eterm *ks = map_get_keys(mp);
- Eterm *vs = map_get_values(mp);
- Uint sz = map_get_size(mp);
+ Eterm *ptr, hdr, *res;
+ Uint ix, lvl = 0;
+ Uint32 hval,bp;
+ DeclareTmpHeapNoproc(th,2);
+ UseTmpHeapNoproc(2);
+
+ ASSERT(is_boxed(node));
+ ptr = boxed_val(node);
+ hdr = *ptr;
+ ASSERT(is_header(hdr));
+ ASSERT(is_hashmap_header_head(hdr));
+ ptr++;
+
+ for (;;) {
+ hval = MAP_HEADER_VAL(hdr);
+ ix = hashmap_index(hx);
+ if (hval != 0xffff) {
+ bp = 1 << ix;
+ if (!(bp & hval)) {
+ /* not occupied */
+ res = NULL;
+ break;
+ }
+ ix = hashmap_bitcount(hval & (bp - 1));
+ }
+ node = ptr[ix+1];
+
+ if (is_list(node)) { /* LEAF NODE [K|V] */
+ ptr = list_val(node);
+ res = EQ(CAR(ptr), key) ? &(CDR(ptr)) : NULL;
+ break;
+ }
+
+ hx = hashmap_shift_hash(th,hx,lvl,key);
+
+ ASSERT(is_boxed(node));
+ ptr = boxed_val(node);
+ hdr = *ptr;
+ ASSERT(is_header(hdr));
+ ASSERT(!is_hashmap_header_head(hdr));
+ }
+
+ UnUseTmpHeapNoproc(2);
+ return res;
+}
+
+Eterm erts_hashmap_insert(Process *p, Uint32 hx, Eterm key, Eterm value,
+ Eterm map, int is_update) {
+ Uint size, upsz;
+ Eterm *hp, res = THE_NON_VALUE;
+ DECLARE_ESTACK(stack);
+ if (erts_hashmap_insert_down(hx, key, map, &size, &upsz, &stack, is_update)) {
+ hp = HAlloc(p, size);
+ res = erts_hashmap_insert_up(hp, key, value, &upsz, &stack);
+ }
+
+ DESTROY_ESTACK(stack);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(p);
+ ERTS_HOLE_CHECK(p);
+
+ return res;
+}
+
+
+int erts_hashmap_insert_down(Uint32 hx, Eterm key, Eterm node, Uint *sz,
+ Uint *update_size, ErtsEStack *sp, int is_update) {
+ Eterm *ptr;
+ Eterm hdr, ckey;
+ Uint32 ix, cix, bp, hval, chx;
+ Uint slot, lvl = 0, clvl;
+ Uint size = 0, n = 0;
+ DeclareTmpHeapNoproc(th,2);
+
+ *update_size = 1;
+
+ UseTmpHeapNoproc(2);
+ for (;;) {
+ switch(primary_tag(node)) {
+ case TAG_PRIMARY_LIST: /* LEAF NODE [K|V] */
+ ptr = list_val(node);
+ ckey = CAR(ptr);
+ if (EQ(ckey, key)) {
+ *update_size = 0;
+ goto unroll;
+ }
+ if (is_update) {
+ UnUseTmpHeapNoproc(2);
+ return 0;
+ }
+ goto insert_subnodes;
+ case TAG_PRIMARY_BOXED:
+ ptr = boxed_val(node);
+ hdr = *ptr;
+ ASSERT(is_header(hdr));
+
+ switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ ix = hashmap_index(hx);
+ hx = hashmap_shift_hash(th,hx,lvl,key);
+ size += HAMT_HEAD_ARRAY_SZ;
+ ESTACK_PUSH2(*sp, ix, node);
+ node = ptr[ix+2];
+ break;
+ case HAMT_SUBTAG_NODE_BITMAP:
+ hval = MAP_HEADER_VAL(hdr);
+ ix = hashmap_index(hx);
+ bp = 1 << ix;
+ if (hval == 0xffff) {
+ slot = ix;
+ n = 16;
+ } else {
+ slot = hashmap_bitcount(hval & (bp - 1));
+ n = hashmap_bitcount(hval);
+ }
+
+ ESTACK_PUSH4(*sp, n, bp, slot, node);
+
+ if (!(bp & hval)) { /* not occupied */
+ if (is_update) {
+ UnUseTmpHeapNoproc(2);
+ return 0;
+ }
+ size += HAMT_NODE_BITMAP_SZ(n+1);
+ goto unroll;
+ }
+
+ hx = hashmap_shift_hash(th,hx,lvl,key);
+ node = ptr[slot+1];
+ ASSERT(HAMT_NODE_BITMAP_SZ(n) <= 17);
+ size += HAMT_NODE_BITMAP_SZ(n);
+ break;
+
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ hval = MAP_HEADER_VAL(hdr);
+ ix = hashmap_index(hx);
+ bp = 1 << ix;
+ slot = hashmap_bitcount(hval & (bp - 1));
+ n = hashmap_bitcount(hval);
+
+ ESTACK_PUSH4(*sp, n, bp, slot, node);
+
+ /* occupied */
+ if (bp & hval) {
+ hx = hashmap_shift_hash(th,hx,lvl,key);
+ node = ptr[slot+2];
+ ASSERT(HAMT_HEAD_BITMAP_SZ(n) <= 18);
+ size += HAMT_HEAD_BITMAP_SZ(n);
+ break;
+ }
+ /* not occupied */
+ if (is_update) {
+ UnUseTmpHeapNoproc(2);
+ return 0;
+ }
+ size += HAMT_HEAD_BITMAP_SZ(n+1);
+ goto unroll;
+ default:
+ erts_exit(ERTS_ERROR_EXIT, "bad header tag %ld\r\n", hdr & _HEADER_MAP_SUBTAG_MASK);
+ break;
+ }
+ break;
+ default:
+ erts_exit(ERTS_ERROR_EXIT, "bad primary tag %p\r\n", node);
+ break;
+ }
+ }
+insert_subnodes:
+ clvl = lvl;
+ chx = hashmap_restore_hash(th,clvl,ckey);
+ size += HAMT_NODE_BITMAP_SZ(2);
+ ix = hashmap_index(hx);
+ cix = hashmap_index(chx);
+
+ while (cix == ix) {
+ ESTACK_PUSH4(*sp, 0, 1 << ix, 0, MAP_HEADER_HAMT_NODE_BITMAP(0));
+ size += HAMT_NODE_BITMAP_SZ(1);
+ hx = hashmap_shift_hash(th,hx,lvl,key);
+ chx = hashmap_shift_hash(th,chx,clvl,ckey);
+ ix = hashmap_index(hx);
+ cix = hashmap_index(chx);
+ }
+ ESTACK_PUSH3(*sp, cix, ix, node);
+
+unroll:
+ *sz = size + /* res cons */ 2;
+ UnUseTmpHeapNoproc(2);
+ return 1;
+}
+
+Eterm erts_hashmap_insert_up(Eterm *hp, Eterm key, Eterm value,
+ Uint *update_size, ErtsEStack *sp) {
+ Eterm node, *ptr, hdr;
+ Eterm res;
+ Eterm *nhp = NULL;
+ Uint32 ix, cix, bp, hval;
+ Uint slot, n;
+ /* Needed for halfword */
+ DeclareTmpHeapNoproc(fake,1);
+ UseTmpHeapNoproc(1);
+
+ res = CONS(hp, key, value); hp += 2;
+
+ do {
+ node = ESTACK_POP(*sp);
+ switch(primary_tag(node)) {
+ case TAG_PRIMARY_LIST:
+ ix = (Uint32) ESTACK_POP(*sp);
+ cix = (Uint32) ESTACK_POP(*sp);
+
+ nhp = hp;
+ *hp++ = MAP_HEADER_HAMT_NODE_BITMAP((1 << ix) | (1 << cix));
+ if (ix < cix) {
+ *hp++ = res;
+ *hp++ = node;
+ } else {
+ *hp++ = node;
+ *hp++ = res;
+ }
+ res = make_hashmap(nhp);
+ break;
+ case TAG_PRIMARY_HEADER:
+ /* subnodes, fake it */
+ *fake = node;
+ node = make_boxed(fake);
+ case TAG_PRIMARY_BOXED:
+ ptr = boxed_val(node);
+ hdr = *ptr;
+ ASSERT(is_header(hdr));
+
+ switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ slot = (Uint) ESTACK_POP(*sp);
+ nhp = hp;
+ n = HAMT_HEAD_ARRAY_SZ - 2;
+ *hp++ = MAP_HEADER_HAMT_HEAD_ARRAY; ptr++;
+ *hp++ = (*ptr++) + *update_size;
+ while(n--) { *hp++ = *ptr++; }
+ nhp[slot+2] = res;
+ res = make_hashmap(nhp);
+ break;
+ case HAMT_SUBTAG_NODE_BITMAP:
+ slot = (Uint) ESTACK_POP(*sp);
+ bp = (Uint32) ESTACK_POP(*sp);
+ n = (Uint32) ESTACK_POP(*sp);
+ hval = MAP_HEADER_VAL(hdr);
+ nhp = hp;
+ *hp++ = MAP_HEADER_HAMT_NODE_BITMAP(hval | bp); ptr++;
+
+ n -= slot;
+ while(slot--) { *hp++ = *ptr++; }
+ *hp++ = res;
+ if (hval & bp) { ptr++; n--; }
+ while(n--) { *hp++ = *ptr++; }
+
+ res = make_hashmap(nhp);
+ break;
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ slot = (Uint) ESTACK_POP(*sp);
+ bp = (Uint32) ESTACK_POP(*sp);
+ n = (Uint32) ESTACK_POP(*sp);
+ hval = MAP_HEADER_VAL(hdr);
+ nhp = hp;
+ *hp++ = MAP_HEADER_HAMT_HEAD_BITMAP(hval | bp); ptr++;
+ *hp++ = (*ptr++) + *update_size;
+
+ n -= slot;
+ while(slot--) { *hp++ = *ptr++; }
+ *hp++ = res;
+ if (hval & bp) { ptr++; n--; }
+ while(n--) { *hp++ = *ptr++; }
+
+ if ((hval | bp) == 0xffff) {
+ *nhp = MAP_HEADER_HAMT_HEAD_ARRAY;
+ }
+ res = make_hashmap(nhp);
+ break;
+ default:
+ erts_exit(ERTS_ERROR_EXIT, "bad header tag %x\r\n", hdr & _HEADER_MAP_SUBTAG_MASK);
+ break;
+ }
+ break;
+ default:
+ erts_exit(ERTS_ERROR_EXIT, "bad primary tag %x\r\n", primary_tag(node));
+ break;
+ }
+
+ } while(!ESTACK_ISEMPTY(*sp));
+
+ UnUseTmpHeapNoproc(1);
+ return res;
+}
+
+static Eterm hashmap_keys(Process* p, Eterm node) {
+ DECLARE_WSTACK(stack);
+ hashmap_head_t* root;
+ Eterm *hp, *kv;
+ Eterm res = NIL;
+
+ root = (hashmap_head_t*) boxed_val(node);
+ hp = HAlloc(p, root->size * 2);
+ hashmap_iterator_init(&stack, node, 0);
+ while ((kv=hashmap_iterator_next(&stack)) != NULL) {
+ res = CONS(hp, CAR(kv), res);
+ hp += 2;
+ }
+ DESTROY_WSTACK(stack);
+ return res;
+}
+
+static Eterm hashmap_values(Process* p, Eterm node) {
+ DECLARE_WSTACK(stack);
+ hashmap_head_t* root;
+ Eterm *hp, *kv;
+ Eterm res = NIL;
+
+ root = (hashmap_head_t*) boxed_val(node);
+ hp = HAlloc(p, root->size * 2);
+ hashmap_iterator_init(&stack, node, 0);
+ while ((kv=hashmap_iterator_next(&stack)) != NULL) {
+ res = CONS(hp, CDR(kv), res);
+ hp += 2;
+ }
+ DESTROY_WSTACK(stack);
+ return res;
+}
+
+static Eterm hashmap_delete(Process *p, Uint32 hx, Eterm key,
+ Eterm map, Eterm *value) {
+ Eterm *hp = NULL, *nhp = NULL, *hp_end = NULL;
+ Eterm *ptr;
+ Eterm hdr, res = map, node = map;
+ Uint32 ix, bp, hval;
+ Uint slot, lvl = 0;
+ Uint size = 0, n = 0;
+ DECLARE_ESTACK(stack);
+ DeclareTmpHeapNoproc(th,2);
+ UseTmpHeapNoproc(2);
+
+ for (;;) {
+ switch(primary_tag(node)) {
+ case TAG_PRIMARY_LIST:
+ if (EQ(CAR(list_val(node)), key)) {
+ if (value) {
+ *value = CDR(list_val(node));
+ }
+ goto unroll;
+ }
+ res = THE_NON_VALUE;
+ goto not_found;
+ case TAG_PRIMARY_BOXED:
+ ptr = boxed_val(node);
+ hdr = *ptr;
+ ASSERT(is_header(hdr));
+
+ switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ ix = hashmap_index(hx);
+ hx = hashmap_shift_hash(th,hx,lvl,key);
+ size += HAMT_HEAD_ARRAY_SZ;
+ ESTACK_PUSH2(stack, ix, node);
+ node = ptr[ix+2];
+ break;
+ case HAMT_SUBTAG_NODE_BITMAP:
+ hval = MAP_HEADER_VAL(hdr);
+ ix = hashmap_index(hx);
+ bp = 1 << ix;
+ if (hval == 0xffff) {
+ slot = ix;
+ n = 16;
+ } else if (bp & hval) {
+ slot = hashmap_bitcount(hval & (bp - 1));
+ n = hashmap_bitcount(hval);
+ } else {
+ /* not occupied */
+ res = THE_NON_VALUE;
+ goto not_found;
+ }
+
+ ESTACK_PUSH4(stack, n, bp, slot, node);
+
+ hx = hashmap_shift_hash(th,hx,lvl,key);
+ node = ptr[slot+1];
+ ASSERT(HAMT_NODE_BITMAP_SZ(n) <= 17);
+ size += HAMT_NODE_BITMAP_SZ(n);
+ break;
+
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ hval = MAP_HEADER_VAL(hdr);
+ ix = hashmap_index(hx);
+ bp = 1 << ix;
+ slot = hashmap_bitcount(hval & (bp - 1));
+ n = hashmap_bitcount(hval);
+
+ ESTACK_PUSH4(stack, n, bp, slot, node);
+
+ /* occupied */
+ if (bp & hval) {
+ hx = hashmap_shift_hash(th,hx,lvl,key);
+ node = ptr[slot+2];
+ ASSERT(HAMT_HEAD_BITMAP_SZ(n) <= 18);
+ size += HAMT_HEAD_BITMAP_SZ(n);
+ break;
+ }
+ /* not occupied */
+ res = THE_NON_VALUE;
+ goto not_found;
+ default:
+ erts_exit(ERTS_ERROR_EXIT, "bad header tag %ld\r\n", hdr & _HEADER_MAP_SUBTAG_MASK);
+ break;
+ }
+ break;
+ default:
+ erts_exit(ERTS_ERROR_EXIT, "bad primary tag %p\r\n", node);
+ break;
+ }
+ }
+
+unroll:
+ /* the size is bounded and atleast one less than the previous size */
+ size -= 1;
+ n = hashmap_size(map) - 1;
+
+ if (n <= MAP_SMALL_MAP_LIMIT) {
+ DECLARE_WSTACK(wstack);
+ Eterm *kv, *ks, *vs;
+ flatmap_t *mp;
+ Eterm keys;
+
+ DESTROY_ESTACK(stack);
+
+ /* build flat structure */
+ hp = HAlloc(p, 3 + 1 + (2 * n));
+ keys = make_tuple(hp);
+ *hp++ = make_arityval(n);
+ ks = hp;
+ hp += n;
+ mp = (flatmap_t*)hp;
+ hp += MAP_HEADER_FLATMAP_SZ;
+ vs = hp;
+
+ mp->thing_word = MAP_HEADER_FLATMAP;
+ mp->size = n;
+ mp->keys = keys;
+
+ hashmap_iterator_init(&wstack, map, 0);
+
+ while ((kv=hashmap_iterator_next(&wstack)) != NULL) {
+ if (EQ(CAR(kv),key))
+ continue;
+ *ks++ = CAR(kv);
+ *vs++ = CDR(kv);
+ }
+
+ /* it cannot have multiple keys */
+ erts_validate_and_sort_flatmap(mp);
+
+ DESTROY_WSTACK(wstack);
+ UnUseTmpHeapNoproc(2);
+ return make_flatmap(mp);
+ }
+
+ hp = HAlloc(p, size);
+ hp_end = hp + size;
+ res = THE_NON_VALUE;
+
+ do {
+ node = ESTACK_POP(stack);
+
+ /* all nodes are things */
+ ptr = boxed_val(node);
+ hdr = *ptr;
+ ASSERT(is_header(hdr));
+
+ switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ ix = (Uint) ESTACK_POP(stack);
+ nhp = hp;
+ if (res == THE_NON_VALUE) {
+ n = 16;
+ n -= ix;
+ *hp++ = MAP_HEADER_HAMT_HEAD_BITMAP(0xffff ^ (1 << ix)); ptr++;
+ *hp++ = (*ptr++) - 1;
+ while(ix--) { *hp++ = *ptr++; }
+ ptr++; n--;
+ while(n--) { *hp++ = *ptr++; }
+ res = make_hashmap(nhp);
+ } else {
+ n = 16;
+ *hp++ = MAP_HEADER_HAMT_HEAD_ARRAY; ptr++;
+ *hp++ = (*ptr++) - 1;
+ while(n--) { *hp++ = *ptr++; }
+ nhp[ix+2] = res;
+ res = make_hashmap(nhp);
+ }
+ break;
+ case HAMT_SUBTAG_NODE_BITMAP:
+ slot = (Uint) ESTACK_POP(stack);
+ bp = (Uint32) ESTACK_POP(stack);
+ n = (Uint32) ESTACK_POP(stack);
+ nhp = hp;
+
+ /* bitmap change matrix
+ * res | none leaf bitmap
+ * ----------------------------
+ * n=1 | remove remove keep
+ * n=2 | other keep keep
+ * n>2 | shrink keep keep
+ *
+ * other: (remember, n is 2)
+ * shrink if the other bitmap value is a bitmap node
+ * remove if the other bitmap value is a leaf
+ *
+ * remove:
+ * this bitmap node is removed, res is moved up in tree (could be none)
+ * this is a special case of shrink
+ *
+ * keep:
+ * the current path index is still used down in the tree, need to keep it
+ * copy as usual with the updated res
+ *
+ * shrink:
+ * the current path index is no longer used down in the tree, remove it (shrink)
+ */
+ if (res == THE_NON_VALUE) {
+ if (n == 1) {
+ break;
+ } else if (n == 2) {
+ if (slot == 0) {
+ ix = 2; /* off by one 'cause hdr */
+ } else {
+ ix = 1; /* off by one 'cause hdr */
+ }
+ if (primary_tag(ptr[ix]) == TAG_PRIMARY_LIST) {
+ res = ptr[ix];
+ } else {
+ hval = MAP_HEADER_VAL(hdr);
+ *hp++ = MAP_HEADER_HAMT_NODE_BITMAP(hval ^ bp);
+ *hp++ = ptr[ix];
+ res = make_hashmap(nhp);
+ }
+ } else {
+ /* n > 2 */
+ hval = MAP_HEADER_VAL(hdr);
+ *hp++ = MAP_HEADER_HAMT_NODE_BITMAP(hval ^ bp); ptr++;
+ n -= slot;
+ while(slot--) { *hp++ = *ptr++; }
+ ptr++; n--;
+ while(n--) { *hp++ = *ptr++; }
+ res = make_hashmap(nhp);
+ }
+ } else if (primary_tag(res) == TAG_PRIMARY_LIST && n == 1) {
+ break;
+ } else {
+ /* res is bitmap or leaf && n > 1, keep */
+ n -= slot;
+ *hp++ = *ptr++;
+ while(slot--) { *hp++ = *ptr++; }
+ *hp++ = res;
+ ptr++; n--;
+ while(n--) { *hp++ = *ptr++; }
+ res = make_hashmap(nhp);
+ }
+ break;
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ slot = (Uint) ESTACK_POP(stack);
+ bp = (Uint32) ESTACK_POP(stack);
+ n = (Uint32) ESTACK_POP(stack);
+ nhp = hp;
+
+ if (res != THE_NON_VALUE) {
+ *hp++ = *ptr++;
+ *hp++ = (*ptr++) - 1;
+ n -= slot;
+ while(slot--) { *hp++ = *ptr++; }
+ *hp++ = res;
+ ptr++; n--;
+ while(n--) { *hp++ = *ptr++; }
+ } else {
+ hval = MAP_HEADER_VAL(hdr);
+ *hp++ = MAP_HEADER_HAMT_HEAD_BITMAP(hval ^ bp); ptr++;
+ *hp++ = (*ptr++) - 1;
+ n -= slot;
+ while(slot--) { *hp++ = *ptr++; }
+ ptr++; n--;
+ while(n--) { *hp++ = *ptr++; }
+ }
+ res = make_hashmap(nhp);
+ break;
+ default:
+ erts_exit(ERTS_ERROR_EXIT, "bad header tag %x\r\n", hdr & _HEADER_MAP_SUBTAG_MASK);
+ break;
+ }
+ } while(!ESTACK_ISEMPTY(stack));
+ HRelease(p, hp_end, hp);
+not_found:
+ DESTROY_ESTACK(stack);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(p);
+ ERTS_HOLE_CHECK(p);
+ UnUseTmpHeapNoproc(2);
+ return res;
+}
+
+
+int erts_validate_and_sort_flatmap(flatmap_t* mp)
+{
+ Eterm *ks = flatmap_get_keys(mp);
+ Eterm *vs = flatmap_get_values(mp);
+ Uint sz = flatmap_get_size(mp);
Uint ix,jx;
Eterm tmp;
- int c;
+ Sint c;
/* sort */
@@ -819,3 +2746,345 @@ int erts_validate_and_sort_map(map_t* mp)
}
return 1;
}
+
+#if 0 /* Can't get myself to remove this beautiful piece of code
+ for probabilistic overestimation of nr of nodes in a hashmap */
+
+/* Really rough estimate of sqrt(x)
+ * Guaranteed not to be less than sqrt(x)
+ */
+static int int_sqrt_ceiling(Uint x)
+{
+ int n;
+
+ if (x <= 2)
+ return x;
+
+ n = erts_fit_in_bits_uint(x-1);
+ if (n & 1) {
+ /* Calc: sqrt(2^n) = 2^(n/2) * sqrt(2) ~= 2^(n/2) * 3 / 2 */
+ return (1 << (n/2 - 1)) * 3;
+ }
+ else {
+ /* Calc: sqrt(2^n) = 2^(n/2) */
+ return 1 << (n / 2);
+ }
+}
+
+/* May not be enough if hashing is broken (not uniform)
+ * or if hell freezes over.
+ */
+Uint hashmap_overestimated_node_count(Uint k)
+{
+ /* k is nr of key-value pairs.
+ N(k) is expected nr of nodes in hamt.
+
+ Observation:
+ For uniformly distributed hash values, average of N varies between
+ 0.3*k and 0.4*k (with a beautiful sine curve)
+ and standard deviation of N is about sqrt(k)/3.
+
+ Assuming normal probability distribution, we overestimate nr of nodes
+ by 15 std.devs above the average, which gives a probability for overrun
+ less than 1.0e-49 (same magnitude as a git SHA1 collision).
+ */
+ return 2*k/5 + 1 + (15/3)*int_sqrt_ceiling(k);
+}
+#endif
+
+BIF_RETTYPE erts_debug_map_info_1(BIF_ALIST_1) {
+ if (is_hashmap(BIF_ARG_1)) {
+ BIF_RET(hashmap_info(BIF_P,BIF_ARG_1));
+ } else if (is_flatmap(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ } else {
+ BIF_P->fvalue = BIF_ARG_1;
+ BIF_ERROR(BIF_P, BADMAP);
+ }
+}
+
+/*
+ * erts_internal:map_to_tuple_keys/1
+ *
+ * Used in erts_debug:size/1
+ */
+
+BIF_RETTYPE erts_internal_map_to_tuple_keys_1(BIF_ALIST_1) {
+ if (is_flatmap(BIF_ARG_1)) {
+ flatmap_t *mp = (flatmap_t*)flatmap_val(BIF_ARG_1);
+ BIF_RET(mp->keys);
+ } else if (is_hashmap(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ } else {
+ BIF_P->fvalue = BIF_ARG_1;
+ BIF_ERROR(BIF_P, BADMAP);
+ }
+}
+
+/*
+ * erts_internal:term_type/1
+ *
+ * Used in erts_debug:size/1
+ */
+
+BIF_RETTYPE erts_internal_term_type_1(BIF_ALIST_1) {
+ Eterm obj = BIF_ARG_1;
+ switch (primary_tag(obj)) {
+ case TAG_PRIMARY_LIST:
+ BIF_RET(ERTS_MAKE_AM("list"));
+ case TAG_PRIMARY_BOXED: {
+ Eterm hdr = *boxed_val(obj);
+ ASSERT(is_header(hdr));
+ switch (hdr & _TAG_HEADER_MASK) {
+ case ARITYVAL_SUBTAG:
+ BIF_RET(ERTS_MAKE_AM("tuple"));
+ case EXPORT_SUBTAG:
+ BIF_RET(ERTS_MAKE_AM("export"));
+ case FUN_SUBTAG:
+ BIF_RET(ERTS_MAKE_AM("fun"));
+ case MAP_SUBTAG:
+ switch (MAP_HEADER_TYPE(hdr)) {
+ case MAP_HEADER_TAG_FLATMAP_HEAD :
+ BIF_RET(ERTS_MAKE_AM("flatmap"));
+ case MAP_HEADER_TAG_HAMT_HEAD_BITMAP :
+ case MAP_HEADER_TAG_HAMT_HEAD_ARRAY :
+ BIF_RET(ERTS_MAKE_AM("hashmap"));
+ case MAP_HEADER_TAG_HAMT_NODE_BITMAP :
+ BIF_RET(ERTS_MAKE_AM("hashmap_node"));
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "term_type: bad map header type %d\n", MAP_HEADER_TYPE(hdr));
+ }
+ case REFC_BINARY_SUBTAG:
+ BIF_RET(ERTS_MAKE_AM("refc_binary"));
+ case HEAP_BINARY_SUBTAG:
+ BIF_RET(ERTS_MAKE_AM("heap_binary"));
+ case SUB_BINARY_SUBTAG:
+ BIF_RET(ERTS_MAKE_AM("sub_binary"));
+ case BIN_MATCHSTATE_SUBTAG:
+ BIF_RET(ERTS_MAKE_AM("matchstate"));
+ case POS_BIG_SUBTAG:
+ case NEG_BIG_SUBTAG:
+ BIF_RET(ERTS_MAKE_AM("bignum"));
+ case REF_SUBTAG:
+ BIF_RET(ERTS_MAKE_AM("reference"));
+ case EXTERNAL_REF_SUBTAG:
+ BIF_RET(ERTS_MAKE_AM("external_reference"));
+ case EXTERNAL_PID_SUBTAG:
+ BIF_RET(ERTS_MAKE_AM("external_pid"));
+ case EXTERNAL_PORT_SUBTAG:
+ BIF_RET(ERTS_MAKE_AM("external_port"));
+ case FLOAT_SUBTAG:
+ BIF_RET(ERTS_MAKE_AM("hfloat"));
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "term_type: Invalid tag (0x%X)\n", hdr);
+ }
+ }
+ case TAG_PRIMARY_IMMED1:
+ switch (obj & _TAG_IMMED1_MASK) {
+ case _TAG_IMMED1_SMALL:
+ BIF_RET(ERTS_MAKE_AM("fixnum"));
+ case _TAG_IMMED1_PID:
+ BIF_RET(ERTS_MAKE_AM("pid"));
+ case _TAG_IMMED1_PORT:
+ BIF_RET(ERTS_MAKE_AM("port"));
+ case _TAG_IMMED1_IMMED2:
+ switch (obj & _TAG_IMMED2_MASK) {
+ case _TAG_IMMED2_ATOM:
+ BIF_RET(ERTS_MAKE_AM("atom"));
+ case _TAG_IMMED2_CATCH:
+ BIF_RET(ERTS_MAKE_AM("catch"));
+ case _TAG_IMMED2_NIL:
+ BIF_RET(ERTS_MAKE_AM("nil"));
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "term_type: Invalid tag (0x%X)\n", obj);
+ }
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "term_type: Invalid tag (0x%X)\n", obj);
+ }
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "term_type: Invalid tag (0x%X)\n", obj);
+ }
+}
+
+/*
+ * erts_internal:map_hashmap_children/1
+ *
+ * Used in erts_debug:size/1
+ */
+
+BIF_RETTYPE erts_internal_map_hashmap_children_1(BIF_ALIST_1) {
+ if (is_map(BIF_ARG_1)) {
+ Eterm node = BIF_ARG_1;
+ Eterm *ptr, hdr, *hp, res = NIL;
+ Uint sz = 0;
+ ptr = boxed_val(node);
+ hdr = *ptr;
+ ASSERT(is_header(hdr));
+
+ switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_FLATMAP:
+ BIF_ERROR(BIF_P, BADARG);
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ ptr++;
+ case HAMT_SUBTAG_NODE_BITMAP:
+ ptr++;
+ sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ break;
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ sz = 16;
+ ptr += 2;
+ break;
+ default:
+ erts_exit(ERTS_ERROR_EXIT, "bad header\r\n");
+ break;
+ }
+ ASSERT(sz < 17);
+ hp = HAlloc(BIF_P, 2*sz);
+ while(sz--) { res = CONS(hp, *ptr++, res); hp += 2; }
+ BIF_RET(res);
+ }
+ BIF_P->fvalue = BIF_ARG_1;
+ BIF_ERROR(BIF_P, BADMAP);
+}
+
+
+static Eterm hashmap_info(Process *p, Eterm node) {
+ Eterm *hp;
+ Eterm res = NIL, info = NIL;
+ Eterm *ptr, tup, hdr;
+ Uint sz;
+ DECL_AM(depth);
+ DECL_AM(leafs);
+ DECL_AM(bitmaps);
+ DECL_AM(arrays);
+ Uint nleaf=0, nbitmap=0, narray=0;
+ Uint bitmap_usage[16], leaf_usage[16];
+ Uint lvl = 0, clvl;
+ DECLARE_ESTACK(stack);
+
+ for (sz = 0; sz < 16; sz++) {
+ bitmap_usage[sz] = 0;
+ leaf_usage[sz] = 0;
+ }
+
+ ptr = boxed_val(node);
+ ESTACK_PUSH(stack, 0);
+ ESTACK_PUSH(stack, node);
+ do {
+ node = ESTACK_POP(stack);
+ clvl = ESTACK_POP(stack);
+ if (lvl < clvl)
+ lvl = clvl;
+ switch(primary_tag(node)) {
+ case TAG_PRIMARY_LIST:
+ nleaf++;
+ leaf_usage[clvl] += 1;
+ break;
+ case TAG_PRIMARY_BOXED:
+ ptr = boxed_val(node);
+ hdr = *ptr;
+ ASSERT(is_header(hdr));
+ switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_NODE_BITMAP:
+ nbitmap++;
+ sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ ASSERT(sz < 17);
+ bitmap_usage[sz-1] += 1;
+ while(sz--) {
+ ESTACK_PUSH(stack, clvl + 1);
+ ESTACK_PUSH(stack, ptr[sz+1]);
+ }
+ break;
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ nbitmap++;
+ sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ bitmap_usage[sz-1] += 1;
+ while(sz--) {
+ ESTACK_PUSH(stack, clvl + 1);
+ ESTACK_PUSH(stack, ptr[sz+2]);
+ }
+ break;
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ narray++;
+ sz = 16;
+ while(sz--) {
+ ESTACK_PUSH(stack, clvl + 1);
+ ESTACK_PUSH(stack, ptr[sz+2]);
+ }
+ break;
+ default:
+ erts_exit(ERTS_ERROR_EXIT, "bad header\r\n");
+ break;
+ }
+ }
+ } while(!ESTACK_ISEMPTY(stack));
+
+
+ /* size */
+ sz = 0;
+ hashmap_bld_tuple_uint(NULL,&sz,16,leaf_usage);
+ hashmap_bld_tuple_uint(NULL,&sz,16,bitmap_usage);
+
+ /* alloc */
+ hp = HAlloc(p, 2+3 + 3*(2+4) + sz);
+
+ info = hashmap_bld_tuple_uint(&hp,NULL,16,leaf_usage);
+ tup = TUPLE3(hp, AM_leafs, make_small(nleaf),info); hp += 4;
+ res = CONS(hp, tup, res); hp += 2;
+
+ info = hashmap_bld_tuple_uint(&hp,NULL,16,bitmap_usage);
+ tup = TUPLE3(hp, AM_bitmaps, make_small(nbitmap), info); hp += 4;
+ res = CONS(hp, tup, res); hp += 2;
+
+ tup = TUPLE3(hp, AM_arrays, make_small(narray),NIL); hp += 4;
+ res = CONS(hp, tup, res); hp += 2;
+
+ tup = TUPLE2(hp, AM_depth, make_small(lvl)); hp += 3;
+ res = CONS(hp, tup, res); hp += 2;
+
+ DESTROY_ESTACK(stack);
+ ERTS_HOLE_CHECK(p);
+ return res;
+}
+
+static Eterm hashmap_bld_tuple_uint(Uint **hpp, Uint *szp, Uint n, Uint nums[]) {
+ Eterm res = THE_NON_VALUE;
+ Eterm *ts = (Eterm *)erts_alloc(ERTS_ALC_T_TMP, n * sizeof(Eterm));
+ Uint i;
+
+ for (i = 0; i < n; i++) {
+ ts[i] = erts_bld_uint(hpp, szp, nums[i]);
+ }
+ res = erts_bld_tuplev(hpp, szp, n, ts);
+ erts_free(ERTS_ALC_T_TMP, (void *) ts);
+ return res;
+}
+
+
+/* implementation of builtin emulations */
+
+#if !ERTS_AT_LEAST_GCC_VSN__(3, 4, 0)
+/* Count leading zeros emulation */
+Uint32 hashmap_clz(Uint32 x) {
+ Uint32 y;
+ int n = 32;
+ y = x >>16; if (y != 0) {n = n -16; x = y;}
+ y = x >> 8; if (y != 0) {n = n - 8; x = y;}
+ y = x >> 4; if (y != 0) {n = n - 4; x = y;}
+ y = x >> 2; if (y != 0) {n = n - 2; x = y;}
+ y = x >> 1; if (y != 0) return n - 2;
+ return n - x;
+}
+
+const Uint32 SK5 = 0x55555555, SK3 = 0x33333333;
+const Uint32 SKF0 = 0xF0F0F0F, SKFF = 0xFF00FF;
+
+/* CTPOP emulation */
+Uint32 hashmap_bitcount(Uint32 x) {
+ x -= ((x >> 1 ) & SK5);
+ x = (x & SK3 ) + ((x >> 2 ) & SK3 );
+ x = (x & SKF0) + ((x >> 4 ) & SKF0);
+ x += x >> 8;
+ return (x + (x >> 16)) & 0x3F;
+}
+#endif
diff --git a/erts/emulator/beam/erl_map.h b/erts/emulator/beam/erl_map.h
index cfacb2ec28..c3ccf80b85 100644
--- a/erts/emulator/beam/erl_map.h
+++ b/erts/emulator/beam/erl_map.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2014. All Rights Reserved.
+ * Copyright Ericsson AB 2014-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -22,13 +23,23 @@
#define __ERL_MAP_H__
#include "sys.h"
+
+/* instrinsic wrappers */
+#if ERTS_AT_LEAST_GCC_VSN__(3, 4, 0)
+#define hashmap_clz(x) ((Uint32) __builtin_clz((unsigned int)(x)))
+#define hashmap_bitcount(x) ((Uint32) __builtin_popcount((unsigned int) (x)))
+#else
+Uint32 hashmap_clz(Uint32 x);
+Uint32 hashmap_bitcount(Uint32 x);
+#endif
+
/* MAP */
-typedef struct map_s {
+typedef struct flatmap_s {
Eterm thing_word;
Uint size;
Eterm keys; /* tuple */
-} map_t;
+} flatmap_t;
/* map node
*
* -----------
@@ -42,31 +53,147 @@ typedef struct map_s {
* -----------
*/
+/* the head-node is a bitmap or array with an untagged size */
+
+
+#define hashmap_size(x) (((hashmap_head_t*) hashmap_val(x))->size)
+#define hashmap_make_hash(Key) make_internal_hash(Key, 0)
+
+#define hashmap_restore_hash(Heap,Lvl,Key) \
+ (((Lvl) < 8) ? hashmap_make_hash(Key) >> (4*(Lvl)) : hashmap_make_hash(CONS(Heap, make_small((Lvl)>>3), (Key))) >> (4*((Lvl) & 7)))
+#define hashmap_shift_hash(Heap,Hx,Lvl,Key) \
+ (((++(Lvl)) & 7) ? (Hx) >> 4 : hashmap_make_hash(CONS(Heap, make_small((Lvl)>>3), Key)))
/* erl_term.h stuff */
-#define make_map(x) make_boxed((Eterm*)(x))
-#define make_map_rel(x, BASE) make_boxed_rel((Eterm*)(x),(BASE))
-#define is_map(x) (is_boxed((x)) && is_map_header(*boxed_val((x))))
-#define is_map_rel(RTERM,BASE) is_map(rterm2wterm(RTERM,BASE))
-#define is_not_map(x) (!is_map((x)))
-#define is_map_header(x) (((x) & (_TAG_HEADER_MASK)) == _TAG_HEADER_MAP)
-#define header_is_map(x) ((((x) & (_HEADER_SUBTAG_MASK)) == MAP_SUBTAG))
-#define map_val(x) (_unchecked_boxed_val((x)))
-#define map_val_rel(RTERM, BASE) map_val(rterm2wterm(RTERM, BASE))
-
-#define map_get_values(x) (((Eterm *)(x)) + 3)
-#define map_get_keys(x) (((Eterm *)tuple_val(((map_t *)(x))->keys)) + 1)
-#define map_get_size(x) (((map_t*)(x))->size)
-
-#define MAP_HEADER _make_header(1,_TAG_HEADER_MAP)
-#define MAP_HEADER_SIZE (sizeof(map_t) / sizeof(Eterm))
-
-Eterm erts_maps_put(Process *p, Eterm key, Eterm value, Eterm map);
-int erts_maps_update(Process *p, Eterm key, Eterm value, Eterm map, Eterm *res);
-int erts_maps_find(Eterm key, Eterm map, Eterm *value);
-int erts_maps_get(Eterm key, Eterm map, Eterm *value);
-int erts_maps_remove(Process *p, Eterm key, Eterm map, Eterm *res);
-int erts_validate_and_sort_map(map_t* map);
+#define flatmap_get_values(x) (((Eterm *)(x)) + sizeof(flatmap_t)/sizeof(Eterm))
+#define flatmap_get_keys(x) (((Eterm *)tuple_val(((flatmap_t *)(x))->keys)) + 1)
+#define flatmap_get_size(x) (((flatmap_t*)(x))->size)
+
+#ifdef DEBUG
+#define MAP_SMALL_MAP_LIMIT (3)
+#else
+#define MAP_SMALL_MAP_LIMIT (32)
#endif
+struct ErtsWStack_;
+struct ErtsEStack_;
+
+Eterm erts_maps_put(Process *p, Eterm key, Eterm value, Eterm map);
+int erts_maps_update(Process *p, Eterm key, Eterm value, Eterm map, Eterm *res);
+int erts_maps_remove(Process *p, Eterm key, Eterm map, Eterm *res);
+int erts_maps_take(Process *p, Eterm key, Eterm map, Eterm *res, Eterm *value);
+
+Eterm erts_hashmap_insert(Process *p, Uint32 hx, Eterm key, Eterm value,
+ Eterm node, int is_update);
+int erts_hashmap_insert_down(Uint32 hx, Eterm key, Eterm node, Uint *sz,
+ Uint *upsz, struct ErtsEStack_ *sp, int is_update);
+Eterm erts_hashmap_insert_up(Eterm *hp, Eterm key, Eterm value,
+ Uint *upsz, struct ErtsEStack_ *sp);
+
+int erts_validate_and_sort_flatmap(flatmap_t* map);
+void hashmap_iterator_init(struct ErtsWStack_* s, Eterm node, int reverse);
+Eterm* hashmap_iterator_next(struct ErtsWStack_* s);
+Eterm* hashmap_iterator_prev(struct ErtsWStack_* s);
+int hashmap_key_hash_cmp(Eterm* ap, Eterm* bp);
+Eterm erts_hashmap_from_array(ErtsHeapFactory*, Eterm *leafs, Uint n, int reject_dupkeys);
+
+#define erts_hashmap_from_ks_and_vs(F, KS, VS, N) \
+ erts_hashmap_from_ks_and_vs_extra((F), (KS), (VS), (N), THE_NON_VALUE, THE_NON_VALUE);
+
+Eterm erts_map_from_ks_and_vs(ErtsHeapFactory *factory, Eterm *ks, Eterm *vs, Uint n);
+Eterm erts_hashmap_from_ks_and_vs_extra(ErtsHeapFactory *factory,
+ Eterm *ks, Eterm *vs, Uint n,
+ Eterm k, Eterm v);
+
+const Eterm *erts_maps_get(Eterm key, Eterm map);
+
+const Eterm *erts_hashmap_get(Uint32 hx, Eterm key, Eterm map);
+
+/* hamt nodes v2.0
+ *
+ * node :: leaf | array | bitmap
+ * head
+ */
+typedef struct hashmap_head_s {
+ Eterm thing_word;
+ Uint size;
+ Eterm items[1];
+} hashmap_head_t;
+
+/* thing_word tagscheme
+ * Need two bits for map subtags
+ *
+ * Original HEADER representation:
+ *
+ * aaaaaaaaaaaaaaaa aaaaaaaaaatttt00 arity:26, tag:4
+ *
+ * For maps we have:
+ *
+ * vvvvvvvvvvvvvvvv aaaaaaaamm111100 val:16, arity:8, mtype:2
+ *
+ * unsure about trailing zeros
+ *
+ * map-tag:
+ * 00 - flat map tag (non-hamt) -> val:16 = #items
+ * 01 - map-node bitmap tag -> val:16 = bitmap
+ * 10 - map-head (array-node) -> val:16 = 0xffff
+ * 11 - map-head (bitmap-node) -> val:16 = bitmap
+ */
+
+/* erl_map.h stuff */
+
+#define is_hashmap_header_head(x) ((MAP_HEADER_TYPE(x) & (0x2)))
+
+#define MAKE_MAP_HEADER(Type,Arity,Val) \
+ (_make_header(((((Uint16)(Val)) << MAP_HEADER_ARITY_SZ) | (Arity)) << MAP_HEADER_TAG_SZ | (Type) , _TAG_HEADER_MAP))
+
+#define MAP_HEADER_FLATMAP \
+ MAKE_MAP_HEADER(MAP_HEADER_TAG_FLATMAP_HEAD,0x1,0x0)
+
+#define MAP_HEADER_HAMT_HEAD_ARRAY \
+ MAKE_MAP_HEADER(MAP_HEADER_TAG_HAMT_HEAD_ARRAY,0x1,0xffff)
+
+#define MAP_HEADER_HAMT_HEAD_BITMAP(Bmp) \
+ MAKE_MAP_HEADER(MAP_HEADER_TAG_HAMT_HEAD_BITMAP,0x1,Bmp)
+
+#define MAP_HEADER_HAMT_NODE_BITMAP(Bmp) \
+ MAKE_MAP_HEADER(MAP_HEADER_TAG_HAMT_NODE_BITMAP,0x0,Bmp)
+
+#define MAP_HEADER_FLATMAP_SZ (sizeof(flatmap_t) / sizeof(Eterm))
+
+#define HAMT_NODE_ARRAY_SZ (17)
+#define HAMT_HEAD_ARRAY_SZ (18)
+#define HAMT_NODE_BITMAP_SZ(n) (1 + n)
+#define HAMT_HEAD_BITMAP_SZ(n) (2 + n)
+
+/* 2 bits maps tag + 4 bits subtag + 2 ignore bits */
+#define _HEADER_MAP_SUBTAG_MASK (0xfc)
+/* 1 bit map tag + 1 ignore bit + 4 bits subtag + 2 ignore bits */
+#define _HEADER_MAP_HASHMAP_HEAD_MASK (0xbc)
+
+#define HAMT_SUBTAG_NODE_BITMAP ((MAP_HEADER_TAG_HAMT_NODE_BITMAP << _HEADER_ARITY_OFFS) | MAP_SUBTAG)
+#define HAMT_SUBTAG_HEAD_ARRAY ((MAP_HEADER_TAG_HAMT_HEAD_ARRAY << _HEADER_ARITY_OFFS) | MAP_SUBTAG)
+#define HAMT_SUBTAG_HEAD_BITMAP ((MAP_HEADER_TAG_HAMT_HEAD_BITMAP << _HEADER_ARITY_OFFS) | MAP_SUBTAG)
+#define HAMT_SUBTAG_HEAD_FLATMAP ((MAP_HEADER_TAG_FLATMAP_HEAD << _HEADER_ARITY_OFFS) | MAP_SUBTAG)
+
+#define hashmap_index(hash) (((Uint32)hash) & 0xf)
+
+/* hashmap heap size:
+ [one cons cell + one list term in parent node] per key
+ [one header + one boxed term in parent node] per inner node
+ [one header + one size word] for root node
+ Observed average number of nodes per key is about 0.35.
+*/
+#define HASHMAP_WORDS_PER_KEY 3
+#define HASHMAP_WORDS_PER_NODE 2
+#ifdef DEBUG
+# define HASHMAP_ESTIMATED_TOT_NODE_SIZE(KEYS) \
+ (HASHMAP_WORDS_PER_NODE * (KEYS) * 3/10) /* slightly under estimated */
+#else
+# define HASHMAP_ESTIMATED_TOT_NODE_SIZE(KEYS) \
+ (HASHMAP_WORDS_PER_NODE * (KEYS) * 4/10) /* slightly over estimated */
+#endif
+#define HASHMAP_ESTIMATED_HEAP_SIZE(KEYS) \
+ ((KEYS)*HASHMAP_WORDS_PER_KEY + HASHMAP_ESTIMATED_TOT_NODE_SIZE(KEYS))
+#endif
diff --git a/erts/emulator/beam/erl_math.c b/erts/emulator/beam/erl_math.c
index 16d4fdc09c..1f270eb55f 100644
--- a/erts/emulator/beam/erl_math.c
+++ b/erts/emulator/beam/erl_math.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -207,6 +208,24 @@ BIF_RETTYPE math_log_1(BIF_ALIST_1)
return math_call_1(BIF_P, log, BIF_ARG_1);
}
+#ifdef HAVE_LOG2
+static double
+log2_wrapper(double x)
+{
+ return log2(x);
+}
+#else
+static double
+log2_wrapper(double x)
+{
+ return log(x) / 0.6931471805599453; /* log(2.0); */
+}
+#endif
+
+BIF_RETTYPE math_log2_1(BIF_ALIST_1)
+{
+ return math_call_1(BIF_P, log2_wrapper, BIF_ARG_1);
+}
BIF_RETTYPE math_log10_1(BIF_ALIST_1)
{
@@ -228,6 +247,17 @@ BIF_RETTYPE math_pow_2(BIF_ALIST_2)
return math_call_2(BIF_P, pow, BIF_ARG_1, BIF_ARG_2);
}
+BIF_RETTYPE math_ceil_1(BIF_ALIST_1)
+{
+ return math_call_1(BIF_P, ceil, BIF_ARG_1);
+}
+BIF_RETTYPE math_floor_1(BIF_ALIST_1)
+{
+ return math_call_1(BIF_P, floor, BIF_ARG_1);
+}
-
+BIF_RETTYPE math_fmod_2(BIF_ALIST_2)
+{
+ return math_call_2(BIF_P, fmod, BIF_ARG_1, BIF_ARG_2);
+}
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index 0eb8117980..abf194cf94 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2012. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -31,9 +32,10 @@
#include "erl_process.h"
#include "erl_binary.h"
#include "dtrace-wrapper.h"
+#include "beam_bp.h"
-ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(message,
- ErlMessage,
+ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(message_ref,
+ ErtsMessageRef,
ERL_MESSAGE_BUF_SZ,
ERTS_ALC_T_MSG_REF)
@@ -43,27 +45,20 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(message,
#undef HARD_DEBUG
#endif
-
-
-
-#ifdef DEBUG
-static ERTS_INLINE int in_heapfrag(const Eterm* ptr, const ErlHeapFragment *bp)
+void
+init_message(void)
{
- return ((unsigned)(ptr - bp->mem) < bp->used_size);
+ init_message_ref_alloc();
}
-#endif
-
-void
-init_message(void)
+void *erts_alloc_message_ref(void)
{
- init_message_alloc();
+ return (void *) message_ref_alloc();
}
-void
-free_message(ErlMessage* mp)
+void erts_free_message_ref(void *mp)
{
- message_free(mp);
+ message_ref_free((ErtsMessageRef *) mp);
}
/* Allocate message buffer (size in words) */
@@ -73,7 +68,8 @@ new_message_buffer(Uint size)
ErlHeapFragment* bp;
bp = (ErlHeapFragment*) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP_FRAG,
ERTS_HEAP_FRAG_SIZE(size));
- ERTS_INIT_HEAP_FRAG(bp, size);
+ ERTS_INIT_HEAP_FRAG(bp, size, size);
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] new message buffer %p\n", erts_get_current_pid(), bp->mem));
return bp;
}
@@ -93,9 +89,6 @@ erts_resize_message_buffer(ErlHeapFragment *bp, Uint size,
#endif
ErlHeapFragment* nbp;
- /* ToDo: Make use of 'used_size' to avoid realloc
- when shrinking just a few words */
-
#ifdef DEBUG
{
Uint off_sz = size < bp->used_size ? size : bp->used_size;
@@ -110,8 +103,10 @@ erts_resize_message_buffer(ErlHeapFragment *bp, Uint size,
}
#endif
- if (size == bp->used_size)
+ if (size >= (bp->used_size - bp->used_size / 16)) {
+ bp->used_size = size;
return bp;
+ }
#ifdef HARD_DEBUG
dbg_brefs = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(Eterm *)*brefs_size);
@@ -172,15 +167,17 @@ erts_cleanup_offheap(ErlOffHeap *offheap)
for (u.hdr = offheap->first; u.hdr; u.hdr = u.hdr->next) {
switch (thing_subtag(u.hdr->thing_word)) {
case REFC_BINARY_SUBTAG:
- if (erts_refc_dectest(&u.pb->val->refc, 0) == 0) {
- erts_bin_free(u.pb->val);
- }
+ erts_bin_release(u.pb->val);
break;
case FUN_SUBTAG:
if (erts_refc_dectest(&u.fun->fe->refc, 0) == 0) {
erts_erase_fun_entry(u.fun->fe);
}
break;
+ case REF_SUBTAG:
+ ASSERT(is_magic_ref_thing(u.hdr));
+ erts_bin_release((Binary *)u.mref->mb);
+ break;
default:
ASSERT(is_external_header(u.hdr->thing_word));
erts_deref_node_entry(u.ext->node);
@@ -198,205 +195,152 @@ free_message_buffer(ErlHeapFragment* bp)
erts_cleanup_offheap(&bp->off_heap);
ERTS_HEAP_FREE(ERTS_ALC_T_HEAP_FRAG, (void *) bp,
- ERTS_HEAP_FRAG_SIZE(bp->size));
+ ERTS_HEAP_FRAG_SIZE(bp->alloc_size));
bp = next_bp;
}while (bp != NULL);
}
-static ERTS_INLINE void
-link_mbuf_to_proc(Process *proc, ErlHeapFragment *bp)
+void
+erts_cleanup_messages(ErtsMessage *msgp)
{
- if (bp) {
- /* Link the message buffer */
- bp->next = MBUF(proc);
- MBUF(proc) = bp;
- MBUF_SIZE(proc) += bp->used_size;
- FLAGS(proc) |= F_FORCE_GC;
-
- /* Move any off_heap's into the process */
- if (bp->off_heap.first != NULL) {
- struct erl_off_heap_header** next_p = &bp->off_heap.first;
- while (*next_p != NULL) {
- next_p = &((*next_p)->next);
+ ErtsMessage *mp = msgp;
+ while (mp) {
+ ErtsMessage *fmp;
+ ErlHeapFragment *bp;
+ if (is_non_value(ERL_MESSAGE_TERM(mp))) {
+ if (is_not_immed(ERL_MESSAGE_TOKEN(mp))) {
+ bp = (ErlHeapFragment *) mp->data.dist_ext->ext_endp;
+ erts_cleanup_offheap(&bp->off_heap);
}
- *next_p = MSO(proc).first;
- MSO(proc).first = bp->off_heap.first;
- bp->off_heap.first = NULL;
- OH_OVERHEAD(&(MSO(proc)), bp->off_heap.overhead);
+ if (mp->data.dist_ext)
+ erts_free_dist_ext_copy(mp->data.dist_ext);
}
+ else {
+ if (mp->data.attached != ERTS_MSG_COMBINED_HFRAG)
+ bp = mp->data.heap_frag;
+ else {
+ bp = mp->hfrag.next;
+ erts_cleanup_offheap(&mp->hfrag.off_heap);
+ }
+ if (bp)
+ free_message_buffer(bp);
+ }
+ fmp = mp;
+ mp = mp->next;
+ erts_free_message(fmp);
}
}
-Eterm
-erts_msg_distext2heap(Process *pp,
- ErtsProcLocks *plcksp,
- ErlHeapFragment **bpp,
- Eterm *tokenp,
- ErtsDistExternal *dist_extp)
+ErtsMessage *
+erts_realloc_shrink_message(ErtsMessage *mp, Uint sz, Eterm *brefs, Uint brefs_size)
{
- Eterm msg;
- Uint tok_sz = 0;
- Eterm *hp = NULL;
- Eterm *hp_end = NULL;
- ErlOffHeap *ohp;
- Sint sz;
-
- *bpp = NULL;
- sz = erts_decode_dist_ext_size(dist_extp);
- if (sz < 0)
- goto decode_error;
- if (is_not_nil(*tokenp)) {
- ErlHeapFragment *heap_frag = erts_dist_ext_trailer(dist_extp);
- tok_sz = heap_frag->used_size;
- sz += tok_sz;
- }
- if (pp)
- hp = erts_alloc_message_heap(sz, bpp, &ohp, pp, plcksp);
- else {
- *bpp = new_message_buffer(sz);
- hp = (*bpp)->mem;
- ohp = &(*bpp)->off_heap;
- }
- hp_end = hp + sz;
- msg = erts_decode_dist_ext(&hp, ohp, dist_extp);
- if (is_non_value(msg))
- goto decode_error;
- if (is_not_nil(*tokenp)) {
- ErlHeapFragment *heap_frag = erts_dist_ext_trailer(dist_extp);
- *tokenp = copy_struct(*tokenp, tok_sz, &hp, ohp);
- erts_cleanup_offheap(&heap_frag->off_heap);
- }
- erts_free_dist_ext_copy(dist_extp);
- if (hp_end != hp) {
- if (!(*bpp)) {
- HRelease(pp, hp_end, hp);
- }
- else {
- Uint final_size = hp - &(*bpp)->mem[0];
- Eterm brefs[2] = {msg, *tokenp};
- ASSERT(sz - (hp_end - hp) == final_size);
- *bpp = erts_resize_message_buffer(*bpp, final_size, &brefs[0], 2);
- msg = brefs[0];
- *tokenp = brefs[1];
- }
+ ErtsMessage *nmp = erts_realloc(ERTS_ALC_T_MSG, mp,
+ sizeof(ErtsMessage) + (sz - 1)*sizeof(Eterm));
+ if (nmp != mp) {
+ Eterm *sp = &mp->hfrag.mem[0];
+ Eterm *ep = sp + sz;
+ Sint offs = &nmp->hfrag.mem[0] - sp;
+ erts_offset_off_heap(&nmp->hfrag.off_heap, offs, sp, ep);
+ erts_offset_heap(&nmp->hfrag.mem[0], sz, offs, sp, ep);
+ if (brefs && brefs_size)
+ erts_offset_heap_ptr(brefs, brefs_size, offs, sp, ep);
}
- return msg;
- decode_error:
- if (is_not_nil(*tokenp)) {
- ErlHeapFragment *heap_frag = erts_dist_ext_trailer(dist_extp);
- erts_cleanup_offheap(&heap_frag->off_heap);
- }
- erts_free_dist_ext_copy(dist_extp);
- if (*bpp) {
- free_message_buffer(*bpp);
- *bpp = NULL;
- }
- else if (hp) {
- HRelease(pp, hp_end, hp);
- }
- return THE_NON_VALUE;
- }
+ nmp->hfrag.used_size = sz;
+ nmp->hfrag.alloc_size = sz;
+
+ return nmp;
+}
void
erts_queue_dist_message(Process *rcvr,
- ErtsProcLocks *rcvr_locks,
+ ErtsProcLocks rcvr_locks,
ErtsDistExternal *dist_ext,
- Eterm token)
+ Eterm token,
+ Eterm from)
{
- ErlMessage* mp;
+ ErtsMessage* mp;
#ifdef USE_VM_PROBES
Sint tok_label = 0;
Sint tok_lastcnt = 0;
Sint tok_serial = 0;
#endif
-#ifdef ERTS_SMP
erts_aint_t state;
-#endif
- ERTS_SMP_LC_ASSERT(*rcvr_locks == erts_proc_lc_my_proc_locks(rcvr));
+ ERTS_LC_ASSERT(rcvr_locks == erts_proc_lc_my_proc_locks(rcvr));
+
+ mp = erts_alloc_message(0, NULL);
+ mp->data.dist_ext = dist_ext;
- mp = message_alloc();
+ ERL_MESSAGE_TERM(mp) = THE_NON_VALUE;
+#ifdef USE_VM_PROBES
+ ERL_MESSAGE_DT_UTAG(mp) = NIL;
+ if (token == am_have_dt_utag)
+ ERL_MESSAGE_TOKEN(mp) = NIL;
+ else
+#endif
+ ERL_MESSAGE_TOKEN(mp) = token;
-#ifdef ERTS_SMP
- if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ)) {
- if (erts_smp_proc_trylock(rcvr, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
+ if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) {
+ if (erts_proc_trylock(rcvr, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ;
- if (*rcvr_locks & ERTS_PROC_LOCK_STATUS) {
- erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_STATUS);
- need_locks |= ERTS_PROC_LOCK_STATUS;
+ ErtsProcLocks unlocks =
+ rcvr_locks & ERTS_PROC_LOCKS_HIGHER_THAN(ERTS_PROC_LOCK_MSGQ);
+ if (unlocks) {
+ erts_proc_unlock(rcvr, unlocks);
+ need_locks |= unlocks;
}
- erts_smp_proc_lock(rcvr, need_locks);
+ erts_proc_lock(rcvr, need_locks);
}
}
- state = erts_smp_atomic32_read_acqb(&rcvr->state);
+ state = erts_atomic32_read_acqb(&rcvr->state);
if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) {
- if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ))
- erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
+ if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ))
+ erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
/* Drop message if receiver is exiting or has a pending exit ... */
- if (is_not_nil(token)) {
- ErlHeapFragment *heap_frag;
- heap_frag = erts_dist_ext_trailer(mp->data.dist_ext);
- erts_cleanup_offheap(&heap_frag->off_heap);
- }
- erts_free_dist_ext_copy(dist_ext);
- message_free(mp);
+ erts_cleanup_messages(mp);
}
else
-#endif
if (IS_TRACED_FL(rcvr, F_TRACE_RECEIVE)) {
+ if (from == am_Empty)
+ from = dist_ext->dep->sysname;
+
/* Ahh... need to decode it in order to trace it... */
- ErlHeapFragment *mbuf;
- Eterm msg;
- if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ))
- erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
- message_free(mp);
- msg = erts_msg_distext2heap(rcvr, rcvr_locks, &mbuf, &token, dist_ext);
- if (is_value(msg))
-#ifdef USE_VM_PROBES
- if (DTRACE_ENABLED(message_queued)) {
- DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE);
-
- dtrace_proc_str(rcvr, receiver_name);
- if (token != NIL && token != am_have_dt_utag) {
- tok_label = signed_val(SEQ_TRACE_T_LABEL(token));
- tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token));
- tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token));
- }
- DTRACE6(message_queued,
- receiver_name, size_object(msg), rcvr->msg.len,
- tok_label, tok_lastcnt, tok_serial);
- }
-#endif
- erts_queue_message(rcvr, rcvr_locks, mbuf, msg, token
+ if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ))
+ erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
+ if (!erts_decode_dist_message(rcvr, rcvr_locks, mp, 0))
+ erts_free_message(mp);
+ else {
+ Eterm msg = ERL_MESSAGE_TERM(mp);
+ token = ERL_MESSAGE_TOKEN(mp);
#ifdef USE_VM_PROBES
- , NIL
+ if (DTRACE_ENABLED(message_queued)) {
+ DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE);
+
+ dtrace_proc_str(rcvr, receiver_name);
+ if (have_seqtrace(token)) {
+ tok_label = signed_val(SEQ_TRACE_T_LABEL(token));
+ tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token));
+ tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token));
+ }
+ DTRACE6(message_queued,
+ receiver_name, size_object(msg), rcvr->msg.len,
+ tok_label, tok_lastcnt, tok_serial);
+ }
#endif
- );
+ erts_queue_message(rcvr, rcvr_locks, mp, msg, from);
+ }
}
else {
/* Enqueue message on external format */
- ERL_MESSAGE_TERM(mp) = THE_NON_VALUE;
-#ifdef USE_VM_PROBES
- ERL_MESSAGE_DT_UTAG(mp) = NIL;
- if (token == am_have_dt_utag) {
- ERL_MESSAGE_TOKEN(mp) = NIL;
- } else {
-#endif
- ERL_MESSAGE_TOKEN(mp) = token;
-#ifdef USE_VM_PROBES
- }
-#endif
- mp->next = NULL;
-
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(message_queued)) {
DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE);
dtrace_proc_str(rcvr, receiver_name);
- if (token != NIL && token != am_have_dt_utag) {
+ if (have_seqtrace(token)) {
tok_label = signed_val(SEQ_TRACE_T_LABEL(token));
tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token));
tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token));
@@ -409,96 +353,78 @@ erts_queue_dist_message(Process *rcvr,
tok_label, tok_lastcnt, tok_serial);
}
#endif
- mp->data.dist_ext = dist_ext;
- LINK_MESSAGE(rcvr, mp);
- if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ))
- erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
+ LINK_MESSAGE(rcvr, mp, &mp->next, 1);
+
+ if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ))
+ erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
- erts_proc_notify_new_message(rcvr);
+ erts_proc_notify_new_message(rcvr,
+ rcvr_locks
+ );
}
}
-/* Add a message last in message queue */
+/* Add messages last in message queue */
static Sint
-queue_message(Process *c_p,
- Process* receiver,
- ErtsProcLocks *receiver_locks,
- erts_aint32_t *receiver_state,
- ErlHeapFragment* bp,
- Eterm message,
- Eterm seq_trace_token
-#ifdef USE_VM_PROBES
- , Eterm dt_utag
-#endif
- )
+queue_messages(Process* receiver,
+ erts_aint32_t *receiver_state,
+ ErtsProcLocks receiver_locks,
+ ErtsMessage* first,
+ ErtsMessage** last,
+ Uint len,
+ Eterm from)
{
+ ErtsTracingEvent* te;
Sint res;
- ErlMessage* mp;
int locked_msgq = 0;
- erts_aint_t state;
+ erts_aint32_t state;
-#ifndef ERTS_SMP
- ASSERT(bp != NULL || receiver->mbuf == NULL);
-#endif
-
- ERTS_SMP_LC_ASSERT(*receiver_locks == erts_proc_lc_my_proc_locks(receiver));
-
- mp = message_alloc();
+ ASSERT(is_value(ERL_MESSAGE_TERM(first)));
+ ASSERT(ERL_MESSAGE_TOKEN(first) == am_undefined ||
+ ERL_MESSAGE_TOKEN(first) == NIL ||
+ is_tuple(ERL_MESSAGE_TOKEN(first)));
- if (receiver_state)
- state = *receiver_state;
- else
- state = erts_smp_atomic32_read_acqb(&receiver->state);
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(receiver) < ERTS_PROC_LOCK_MSGQ ||
+ receiver_locks == erts_proc_lc_my_proc_locks(receiver));
+#endif
-#ifdef ERTS_SMP
+ if (!(receiver_locks & ERTS_PROC_LOCK_MSGQ)) {
+ if (erts_proc_trylock(receiver, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
+ ErtsProcLocks need_locks;
- if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))
- goto exiting;
+ if (receiver_state)
+ state = *receiver_state;
+ else
+ state = erts_atomic32_read_nob(&receiver->state);
+ if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))
+ goto exiting;
- if (!(*receiver_locks & ERTS_PROC_LOCK_MSGQ)) {
- if (erts_smp_proc_trylock(receiver, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
- ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ;
- if (*receiver_locks & ERTS_PROC_LOCK_STATUS) {
- erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_STATUS);
- need_locks |= ERTS_PROC_LOCK_STATUS;
+ need_locks = receiver_locks & ERTS_PROC_LOCKS_HIGHER_THAN(ERTS_PROC_LOCK_MSGQ);
+ if (need_locks) {
+ erts_proc_unlock(receiver, need_locks);
}
- erts_smp_proc_lock(receiver, need_locks);
+ need_locks |= ERTS_PROC_LOCK_MSGQ;
+ erts_proc_lock(receiver, need_locks);
}
locked_msgq = 1;
- state = erts_smp_atomic32_read_nob(&receiver->state);
- if (receiver_state)
- *receiver_state = state;
}
-#endif
+
+ state = erts_atomic32_read_nob(&receiver->state);
if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) {
-#ifdef ERTS_SMP
exiting:
-#endif
/* Drop message if receiver is exiting or has a pending exit... */
if (locked_msgq)
- erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
- if (bp)
- free_message_buffer(bp);
- message_free(mp);
+ erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
+ erts_cleanup_messages(first);
return 0;
}
- ERL_MESSAGE_TERM(mp) = message;
- ERL_MESSAGE_TOKEN(mp) = seq_trace_token;
-#ifdef USE_VM_PROBES
- ERL_MESSAGE_DT_UTAG(mp) = dt_utag;
-#endif
- mp->next = NULL;
- mp->data.heap_frag = bp;
-
-#ifndef ERTS_SMP
res = receiver->msg.len;
-#else
- res = receiver->msg_inq.len;
- if (*receiver_locks & ERTS_PROC_LOCK_MAIN) {
+ if (receiver_locks & ERTS_PROC_LOCK_MAIN) {
/*
* We move 'in queue' to 'private queue' and place
* message at the end of 'private queue' in order
@@ -507,313 +433,115 @@ queue_message(Process *c_p,
* we don't need to include the 'in queue' in
* the root set when garbage collecting.
*/
- res += receiver->msg.len;
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(receiver);
- LINK_MESSAGE_PRIVQ(receiver, mp);
+ res += receiver->msg_inq.len;
+ ERTS_MSGQ_MV_INQ2PRIVQ(receiver);
+ LINK_MESSAGE_PRIVQ(receiver, first, last, len);
}
else
-#endif
{
- LINK_MESSAGE(receiver, mp);
+ LINK_MESSAGE(receiver, first, last, len);
}
+ if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)
+ && (te = &erts_receive_tracing[erts_active_bp_ix()],
+ te->on)) {
+
+ ErtsMessage *msg = first;
+
#ifdef USE_VM_PROBES
- if (DTRACE_ENABLED(message_queued)) {
- DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE);
- Sint tok_label = 0;
- Sint tok_lastcnt = 0;
- Sint tok_serial = 0;
-
- dtrace_proc_str(receiver, receiver_name);
- if (seq_trace_token != NIL && is_tuple(seq_trace_token)) {
- tok_label = signed_val(SEQ_TRACE_T_LABEL(seq_trace_token));
- tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(seq_trace_token));
- tok_serial = signed_val(SEQ_TRACE_T_SERIAL(seq_trace_token));
+ if (DTRACE_ENABLED(message_queued)) {
+ DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE);
+ Sint tok_label = 0;
+ Sint tok_lastcnt = 0;
+ Sint tok_serial = 0;
+ Eterm seq_trace_token = ERL_MESSAGE_TOKEN(msg);
+
+ dtrace_proc_str(receiver, receiver_name);
+ if (seq_trace_token != NIL && is_tuple(seq_trace_token)) {
+ tok_label = signed_val(SEQ_TRACE_T_LABEL(seq_trace_token));
+ tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(seq_trace_token));
+ tok_serial = signed_val(SEQ_TRACE_T_SERIAL(seq_trace_token));
+ }
+ DTRACE6(message_queued,
+ receiver_name, size_object(ERL_MESSAGE_TERM(msg)),
+ receiver->msg.len,
+ tok_label, tok_lastcnt, tok_serial);
}
- DTRACE6(message_queued,
- receiver_name, size_object(message), receiver->msg.len,
- tok_label, tok_lastcnt, tok_serial);
- }
#endif
+ while (msg) {
+ trace_receive(receiver, from, ERL_MESSAGE_TERM(msg), te);
+ msg = msg->next;
+ }
- if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE))
- trace_receive(receiver, message);
-
- if (locked_msgq)
- erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
-
- erts_proc_notify_new_message(receiver);
+ }
+ if (locked_msgq) {
+ erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
+ }
-#ifndef ERTS_SMP
- ERTS_HOLE_CHECK(receiver);
-#endif
+ erts_proc_notify_new_message(receiver, receiver_locks);
return res;
}
-void
-erts_queue_message(Process* receiver,
- ErtsProcLocks *receiver_locks,
- ErlHeapFragment* bp,
- Eterm message,
- Eterm seq_trace_token
-#ifdef USE_VM_PROBES
- , Eterm dt_utag
-#endif
- )
+static Sint
+queue_message(Process* receiver,
+ erts_aint32_t *receiver_state,
+ ErtsProcLocks receiver_locks,
+ ErtsMessage* mp, Eterm msg, Eterm from)
{
- queue_message(NULL,
- receiver,
- receiver_locks,
- NULL,
- bp,
- message,
- seq_trace_token
-#ifdef USE_VM_PROBES
- , dt_utag
-#endif
- );
+ ERL_MESSAGE_TERM(mp) = msg;
+ return queue_messages(receiver, receiver_state, receiver_locks,
+ mp, &mp->next, 1, from);
}
-void
-erts_link_mbuf_to_proc(struct process *proc, ErlHeapFragment *bp)
+Sint
+erts_queue_message(Process* receiver, ErtsProcLocks receiver_locks,
+ ErtsMessage* mp, Eterm msg, Eterm from)
{
- Eterm* htop = HEAP_TOP(proc);
+ return queue_message(receiver, NULL, receiver_locks, mp, msg, from);
+}
- link_mbuf_to_proc(proc, bp);
- if (htop < HEAP_LIMIT(proc)) {
- *htop = make_pos_bignum_header(HEAP_LIMIT(proc)-htop-1);
- HEAP_TOP(proc) = HEAP_LIMIT(proc);
- }
+
+Sint
+erts_queue_messages(Process* receiver, ErtsProcLocks receiver_locks,
+ ErtsMessage* first, ErtsMessage** last, Uint len,
+ Eterm from)
+{
+ return queue_messages(receiver, NULL, receiver_locks,
+ first, last, len, from);
}
-/*
- * Moves content of message buffer attached to a message into a heap.
- * The message buffer is deallocated.
- */
void
-erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
+erts_link_mbuf_to_proc(Process *proc, ErlHeapFragment *first_bp)
{
- struct erl_off_heap_header* oh;
- Eterm term, token, *fhp, *hp;
- Sint offs;
- Uint sz;
- ErlHeapFragment *bp;
-#ifdef USE_VM_PROBES
- Eterm utag;
-#endif
-
-#ifdef HARD_DEBUG
- struct erl_off_heap_header* dbg_oh_start = off_heap->first;
- Eterm dbg_term, dbg_token;
- ErlHeapFragment *dbg_bp;
- Uint *dbg_hp, *dbg_thp_start;
- Uint dbg_term_sz, dbg_token_sz;
-#ifdef USE_VM_PROBES
- Eterm dbg_utag;
- Uint dbg_utag_sz;
-#endif
-#endif
-
- bp = msg->data.heap_frag;
- term = ERL_MESSAGE_TERM(msg);
- token = ERL_MESSAGE_TOKEN(msg);
-#ifdef USE_VM_PROBES
- utag = ERL_MESSAGE_DT_UTAG(msg);
-#endif
- if (!bp) {
-#ifdef USE_VM_PROBES
- ASSERT(is_immed(term) && is_immed(token) && is_immed(utag));
-#else
- ASSERT(is_immed(term) && is_immed(token));
-#endif
- return;
- }
-
-#ifdef HARD_DEBUG
- dbg_term_sz = size_object(term);
- dbg_token_sz = size_object(token);
- dbg_bp = new_message_buffer(dbg_term_sz + dbg_token_sz);
-#ifdef USE_VM_PROBES
- dbg_utag_sz = size_object(utag);
- dbg_bp = new_message_buffer(dbg_term_sz + dbg_token_sz + dbg_utag_sz );
-#endif
- /*ASSERT(dbg_term_sz + dbg_token_sz == erts_msg_used_frag_sz(msg));
- Copied size may be smaller due to removed SubBins's or garbage.
- Copied size may be larger due to duplicated shared terms.
- */
- dbg_hp = dbg_bp->mem;
- dbg_term = copy_struct(term, dbg_term_sz, &dbg_hp, &dbg_bp->off_heap);
- dbg_token = copy_struct(token, dbg_token_sz, &dbg_hp, &dbg_bp->off_heap);
-#ifdef USE_VM_PROBES
- dbg_utag = copy_struct(utag, dbg_utag_sz, &dbg_hp, &dbg_bp->off_heap);
-#endif
- dbg_thp_start = *hpp;
-#endif
-
- if (bp->next != NULL) {
- move_multi_frags(hpp, off_heap, bp, msg->m,
-#ifdef USE_VM_PROBES
- 3
-#else
- 2
-#endif
- );
- goto copy_done;
- }
-
- OH_OVERHEAD(off_heap, bp->off_heap.overhead);
- sz = bp->used_size;
-
- ASSERT(is_immed(term) || in_heapfrag(ptr_val(term),bp));
- ASSERT(is_immed(token) || in_heapfrag(ptr_val(token),bp));
-
- fhp = bp->mem;
- hp = *hpp;
- offs = hp - fhp;
-
- oh = NULL;
- while (sz--) {
- Uint cpy_sz;
- Eterm val = *fhp++;
-
- switch (primary_tag(val)) {
- case TAG_PRIMARY_IMMED1:
- *hp++ = val;
- break;
- case TAG_PRIMARY_LIST:
- case TAG_PRIMARY_BOXED:
- ASSERT(in_heapfrag(ptr_val(val), bp));
- *hp++ = offset_ptr(val, offs);
- break;
- case TAG_PRIMARY_HEADER:
- *hp++ = val;
- switch (val & _HEADER_SUBTAG_MASK) {
- case ARITYVAL_SUBTAG:
- break;
- case REFC_BINARY_SUBTAG:
- case FUN_SUBTAG:
- case EXTERNAL_PID_SUBTAG:
- case EXTERNAL_PORT_SUBTAG:
- case EXTERNAL_REF_SUBTAG:
- oh = (struct erl_off_heap_header*) (hp-1);
- cpy_sz = thing_arityval(val);
- goto cpy_words;
- default:
- cpy_sz = header_arity(val);
-
- cpy_words:
- ASSERT(sz >= cpy_sz);
- sz -= cpy_sz;
- while (cpy_sz >= 8) {
- cpy_sz -= 8;
- *hp++ = *fhp++;
- *hp++ = *fhp++;
- *hp++ = *fhp++;
- *hp++ = *fhp++;
- *hp++ = *fhp++;
- *hp++ = *fhp++;
- *hp++ = *fhp++;
- *hp++ = *fhp++;
- }
- switch (cpy_sz) {
- case 7: *hp++ = *fhp++;
- case 6: *hp++ = *fhp++;
- case 5: *hp++ = *fhp++;
- case 4: *hp++ = *fhp++;
- case 3: *hp++ = *fhp++;
- case 2: *hp++ = *fhp++;
- case 1: *hp++ = *fhp++;
- default: break;
- }
- if (oh) {
- /* Add to offheap list */
- oh->next = off_heap->first;
- off_heap->first = oh;
- ASSERT(*hpp <= (Eterm*)oh);
- ASSERT(hp > (Eterm*)oh);
- oh = NULL;
+ if (first_bp) {
+ ErlHeapFragment *bp = first_bp;
+
+ while (1) {
+ /* Move any off_heap's into the process */
+ if (bp->off_heap.first != NULL) {
+ struct erl_off_heap_header** next_p = &bp->off_heap.first;
+ while (*next_p != NULL) {
+ next_p = &((*next_p)->next);
}
- break;
+ *next_p = MSO(proc).first;
+ MSO(proc).first = bp->off_heap.first;
+ bp->off_heap.first = NULL;
+ OH_OVERHEAD(&(MSO(proc)), bp->off_heap.overhead);
}
- break;
+ MBUF_SIZE(proc) += bp->used_size;
+ if (!bp->next)
+ break;
+ bp = bp->next;
}
- }
- ASSERT(bp->used_size == hp - *hpp);
- *hpp = hp;
-
- if (is_not_immed(token)) {
- ASSERT(in_heapfrag(ptr_val(token), bp));
- ERL_MESSAGE_TOKEN(msg) = offset_ptr(token, offs);
-#ifdef HARD_DEBUG
- ASSERT(dbg_thp_start <= ptr_val(ERL_MESSAGE_TOKEN(msg)));
- ASSERT(hp > ptr_val(ERL_MESSAGE_TOKEN(msg)));
-#endif
- }
-
- if (is_not_immed(term)) {
- ASSERT(in_heapfrag(ptr_val(term),bp));
- ERL_MESSAGE_TERM(msg) = offset_ptr(term, offs);
-#ifdef HARD_DEBUG
- ASSERT(dbg_thp_start <= ptr_val(ERL_MESSAGE_TERM(msg)));
- ASSERT(hp > ptr_val(ERL_MESSAGE_TERM(msg)));
-#endif
- }
-#ifdef USE_VM_PROBES
- if (is_not_immed(utag)) {
- ASSERT(in_heapfrag(ptr_val(utag), bp));
- ERL_MESSAGE_DT_UTAG(msg) = offset_ptr(utag, offs);
-#ifdef HARD_DEBUG
- ASSERT(dbg_thp_start <= ptr_val(ERL_MESSAGE_DT_UTAG(msg)));
- ASSERT(hp > ptr_val(ERL_MESSAGE_DT_UTAG(msg)));
-#endif
- }
-#endif
-
-copy_done:
-
-#ifdef HARD_DEBUG
- {
- int i, j;
- ErlHeapFragment* frag;
- {
- struct erl_off_heap_header* dbg_oh = off_heap->first;
- i = j = 0;
- while (dbg_oh != dbg_oh_start) {
- dbg_oh = dbg_oh->next;
- i++;
- }
- for (frag=bp; frag; frag=frag->next) {
- dbg_oh = frag->off_heap.first;
- while (dbg_oh) {
- dbg_oh = dbg_oh->next;
- j++;
- }
- }
- ASSERT(i == j);
- }
+ /* Link the message buffer */
+ bp->next = MBUF(proc);
+ MBUF(proc) = first_bp;
}
-#endif
-
-
- bp->off_heap.first = NULL;
- free_message_buffer(bp);
- msg->data.heap_frag = NULL;
-
-#ifdef HARD_DEBUG
- ASSERT(eq(ERL_MESSAGE_TERM(msg), dbg_term));
- ASSERT(eq(ERL_MESSAGE_TOKEN(msg), dbg_token));
-#ifdef USE_VM_PROBES
- ASSERT(eq(ERL_MESSAGE_DT_UTAG(msg), dbg_utag));
-#endif
- free_message_buffer(dbg_bp);
-#endif
-
}
-
Uint
-erts_msg_attached_data_size_aux(ErlMessage *msg)
+erts_msg_attached_data_size_aux(ErtsMessage *msg)
{
Sint sz;
ASSERT(is_non_value(ERL_MESSAGE_TERM(msg)));
@@ -822,14 +550,11 @@ erts_msg_attached_data_size_aux(ErlMessage *msg)
sz = erts_decode_dist_ext_size(msg->data.dist_ext);
if (sz < 0) {
- /* Bad external; remove it */
- if (is_not_nil(ERL_MESSAGE_TOKEN(msg))) {
- ErlHeapFragment *heap_frag;
- heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
- erts_cleanup_offheap(&heap_frag->off_heap);
- }
- erts_free_dist_ext_copy(msg->data.dist_ext);
- msg->data.dist_ext = NULL;
+ /* Bad external
+ * We leave the message intact in this case as it's not worth the trouble
+ * to make all callers remove it from queue. It will be detected again
+ * and removed from message queue later anyway.
+ */
return 0;
}
@@ -842,29 +567,77 @@ erts_msg_attached_data_size_aux(ErlMessage *msg)
return sz;
}
-void
-erts_move_msg_attached_data_to_heap(Eterm **hpp, ErlOffHeap *ohp, ErlMessage *msg)
+ErtsMessage *
+erts_try_alloc_message_on_heap(Process *pp,
+ erts_aint32_t *psp,
+ ErtsProcLocks *plp,
+ Uint sz,
+ Eterm **hpp,
+ ErlOffHeap **ohpp,
+ int *on_heap_p)
{
- if (is_value(ERL_MESSAGE_TERM(msg)))
- erts_move_msg_mbuf_to_heap(hpp, ohp, msg);
- else if (msg->data.dist_ext) {
- ASSERT(msg->data.dist_ext->heap_size >= 0);
- if (is_not_nil(ERL_MESSAGE_TOKEN(msg))) {
- ErlHeapFragment *heap_frag;
- heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
- ERL_MESSAGE_TOKEN(msg) = copy_struct(ERL_MESSAGE_TOKEN(msg),
- heap_frag->used_size,
- hpp,
- ohp);
- erts_cleanup_offheap(&heap_frag->off_heap);
+ int locked_main = 0;
+ ErtsMessage *mp;
+
+ ASSERT(!(*psp & ERTS_PSFLG_OFF_HEAP_MSGQ));
+
+ if ((*psp) & ERTS_PSFLGS_VOLATILE_HEAP)
+ goto in_message_fragment;
+ else if (
+ *plp & ERTS_PROC_LOCK_MAIN
+ ) {
+ try_on_heap:
+ if (((*psp) & ERTS_PSFLGS_VOLATILE_HEAP)
+ || (pp->flags & F_DISABLE_GC)
+ || HEAP_LIMIT(pp) - HEAP_TOP(pp) <= sz) {
+ /*
+ * The heap is either potentially in an inconsistent
+ * state, or not large enough.
+ */
+ if (locked_main) {
+ *plp &= ~ERTS_PROC_LOCK_MAIN;
+ erts_proc_unlock(pp, ERTS_PROC_LOCK_MAIN);
+ }
+ goto in_message_fragment;
}
- ERL_MESSAGE_TERM(msg) = erts_decode_dist_ext(hpp,
- ohp,
- msg->data.dist_ext);
- erts_free_dist_ext_copy(msg->data.dist_ext);
- msg->data.dist_ext = NULL;
+
+ *hpp = HEAP_TOP(pp);
+ HEAP_TOP(pp) = *hpp + sz;
+ *ohpp = &MSO(pp);
+ mp = erts_alloc_message(0, NULL);
+ mp->data.attached = NULL;
+ *on_heap_p = !0;
}
- /* else: bad external detected when calculating size */
+ else if (pp && erts_proc_trylock(pp, ERTS_PROC_LOCK_MAIN) == 0) {
+ locked_main = 1;
+ *psp = erts_atomic32_read_nob(&pp->state);
+ *plp |= ERTS_PROC_LOCK_MAIN;
+ goto try_on_heap;
+ }
+ else {
+ in_message_fragment:
+ if (!((*psp) & ERTS_PSFLG_ON_HEAP_MSGQ)) {
+ mp = erts_alloc_message(sz, hpp);
+ *ohpp = sz == 0 ? NULL : &mp->hfrag.off_heap;
+ }
+ else {
+ mp = erts_alloc_message(0, NULL);
+ if (!sz) {
+ *hpp = NULL;
+ *ohpp = NULL;
+ }
+ else {
+ ErlHeapFragment *bp;
+ bp = new_message_buffer(sz);
+ *hpp = &bp->mem[0];
+ mp->data.heap_frag = bp;
+ *ohpp = &bp->off_heap;
+ }
+ }
+ *on_heap_p = 0;
+ }
+
+ return mp;
}
/*
@@ -879,7 +652,8 @@ erts_send_message(Process* sender,
unsigned flags)
{
Uint msize;
- ErlHeapFragment* bp = NULL;
+ ErtsMessage* mp;
+ ErlOffHeap *ohp;
Eterm token = NIL;
Sint res = 0;
#ifdef USE_VM_PROBES
@@ -888,80 +662,94 @@ erts_send_message(Process* sender,
Sint tok_label = 0;
Sint tok_lastcnt = 0;
Sint tok_serial = 0;
+ Eterm utag = NIL;
+#endif
+ erts_aint32_t receiver_state;
+#ifdef SHCOPY_SEND
+ erts_shcopy_t info;
+#else
+ erts_literal_area_t litarea;
+ INITIALIZE_LITERAL_PURGE_AREA(litarea);
#endif
- BM_STOP_TIMER(system);
- BM_MESSAGE(message,sender,receiver);
- BM_START_TIMER(send);
- #ifdef USE_VM_PROBES
+#ifdef USE_VM_PROBES
*sender_name = *receiver_name = '\0';
- if (DTRACE_ENABLED(message_send)) {
+ if (DTRACE_ENABLED(message_send)) {
erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)),
"%T", sender->common.id);
erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)),
"%T", receiver->common.id);
}
#endif
+
+ receiver_state = erts_atomic32_read_nob(&receiver->state);
+
if (SEQ_TRACE_TOKEN(sender) != NIL && !(flags & ERTS_SND_FLG_NO_SEQ_TRACE)) {
Eterm* hp;
Eterm stoken = SEQ_TRACE_TOKEN(sender);
Uint seq_trace_size = 0;
#ifdef USE_VM_PROBES
Uint dt_utag_size = 0;
- Eterm utag = NIL;
-#endif
-
- BM_SWAP_TIMER(send,size);
- msize = size_object(message);
- BM_SWAP_TIMER(size,send);
-
-#ifdef USE_VM_PROBES
- if (stoken != am_have_dt_utag) {
#endif
+ /* SHCOPY corrupts the heap between
+ * copy_shared_calculate, and
+ * copy_shared_perform. (it inserts move_markers like the gc).
+ * Make sure we don't use the heap between those instances.
+ */
+ if (have_seqtrace(stoken)) {
seq_trace_update_send(sender);
- seq_trace_output(stoken, message, SEQ_TRACE_SEND,
+ seq_trace_output(stoken, message, SEQ_TRACE_SEND,
receiver->common.id, sender);
seq_trace_size = 6; /* TUPLE5 */
-#ifdef USE_VM_PROBES
- }
- if (DT_UTAG_FLAGS(sender) & DT_UTAG_SPREADING) {
- dt_utag_size = size_object(DT_UTAG(sender));
- } else if (stoken == am_have_dt_utag ) {
- stoken = NIL;
}
-#endif
-
- bp = new_message_buffer(msize + seq_trace_size
#ifdef USE_VM_PROBES
- + dt_utag_size
+ if (DT_UTAG_FLAGS(sender) & DT_UTAG_SPREADING) {
+ dt_utag_size = size_object(DT_UTAG(sender));
+ } else if (stoken == am_have_dt_utag ) {
+ stoken = NIL;
+ }
#endif
- );
- hp = bp->mem;
-
- BM_SWAP_TIMER(send,copy);
- token = copy_struct(stoken,
- seq_trace_size,
- &hp,
- &bp->off_heap);
- message = copy_struct(message, msize, &hp, &bp->off_heap);
+#ifdef SHCOPY_SEND
+ INITIALIZE_SHCOPY(info);
+ msize = copy_shared_calculate(message, &info);
+#else
+ msize = size_object_litopt(message, &litarea);
+#endif
+ mp = erts_alloc_message_heap_state(receiver,
+ &receiver_state,
+ receiver_locks,
+ (msize
#ifdef USE_VM_PROBES
- if (DT_UTAG_FLAGS(sender) & DT_UTAG_SPREADING) {
- utag = copy_struct(DT_UTAG(sender), dt_utag_size, &hp, &bp->off_heap);
-#ifdef DTRACE_TAG_HARDDEBUG
- erts_fprintf(stderr,
- "Dtrace -> (%T) Spreading tag (%T) with "
- "message %T!\r\n",sender->common.id, utag, message);
+ + dt_utag_size
#endif
- }
+ + seq_trace_size),
+ &hp,
+ &ohp);
+
+#ifdef SHCOPY_SEND
+ if (is_not_immed(message))
+ message = copy_shared_perform(message, msize, &info, &hp, ohp);
+ DESTROY_SHCOPY(info);
+#else
+ if (is_not_immed(message))
+ message = copy_struct_litopt(message, msize, &hp, ohp, &litarea);
#endif
- BM_MESSAGE_COPIED(msize);
- BM_SWAP_TIMER(copy,send);
+ if (is_immed(stoken))
+ token = stoken;
+ else
+ token = copy_struct(stoken, seq_trace_size, &hp, ohp);
#ifdef USE_VM_PROBES
+ if (DT_UTAG_FLAGS(sender) & DT_UTAG_SPREADING) {
+ if (is_immed(DT_UTAG(sender)))
+ utag = DT_UTAG(sender);
+ else
+ utag = copy_struct(DT_UTAG(sender), dt_utag_size, &hp, ohp);
+ }
if (DTRACE_ENABLED(message_send)) {
- if (stoken != NIL && stoken != am_have_dt_utag) {
+ if (have_seqtrace(stoken)) {
tok_label = signed_val(SEQ_TRACE_T_LABEL(stoken));
tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(stoken));
tok_serial = signed_val(SEQ_TRACE_T_SERIAL(stoken));
@@ -970,140 +758,55 @@ erts_send_message(Process* sender,
msize, tok_label, tok_lastcnt, tok_serial);
}
#endif
- res = queue_message(NULL,
- receiver,
- receiver_locks,
- NULL,
- bp,
- message,
- token
-#ifdef USE_VM_PROBES
- , utag
-#endif
- );
- BM_SWAP_TIMER(send,system);
- } else if (sender == receiver) {
- /* Drop message if receiver has a pending exit ... */
-#ifdef ERTS_SMP
- ErtsProcLocks need_locks = (~(*receiver_locks)
- & (ERTS_PROC_LOCK_MSGQ
- | ERTS_PROC_LOCK_STATUS));
- if (need_locks) {
- *receiver_locks |= need_locks;
- if (erts_smp_proc_trylock(receiver, need_locks) == EBUSY) {
- if (need_locks == ERTS_PROC_LOCK_MSGQ) {
- erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_STATUS);
- need_locks = ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS;
- }
- erts_smp_proc_lock(receiver, need_locks);
- }
+ } else {
+ Eterm *hp;
+
+ if (receiver == sender && !(receiver_state & ERTS_PSFLG_OFF_HEAP_MSGQ)) {
+ mp = erts_alloc_message(0, NULL);
+ msize = 0;
}
- if (!ERTS_PROC_PENDING_EXIT(receiver))
+ else {
+#ifdef SHCOPY_SEND
+ INITIALIZE_SHCOPY(info);
+ msize = copy_shared_calculate(message, &info);
+#else
+ msize = size_object_litopt(message, &litarea);
#endif
- {
- ErlMessage* mp = message_alloc();
-
- DTRACE6(message_send, sender_name, receiver_name,
- size_object(message), tok_label, tok_lastcnt, tok_serial);
- mp->data.attached = NULL;
- ERL_MESSAGE_TERM(mp) = message;
- ERL_MESSAGE_TOKEN(mp) = NIL;
-#ifdef USE_VM_PROBES
- ERL_MESSAGE_DT_UTAG(mp) = NIL;
+ mp = erts_alloc_message_heap_state(receiver,
+ &receiver_state,
+ receiver_locks,
+ msize,
+ &hp,
+ &ohp);
+#ifdef SHCOPY_SEND
+ if (is_not_immed(message))
+ message = copy_shared_perform(message, msize, &info, &hp, ohp);
+ DESTROY_SHCOPY(info);
+#else
+ if (is_not_immed(message))
+ message = copy_struct_litopt(message, msize, &hp, ohp, &litarea);
#endif
- mp->next = NULL;
- /*
- * We move 'in queue' to 'private queue' and place
- * message at the end of 'private queue' in order
- * to ensure that the 'in queue' doesn't contain
- * references into the heap. By ensuring this,
- * we don't need to include the 'in queue' in
- * the root set when garbage collecting.
- */
-
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(receiver);
- LINK_MESSAGE_PRIVQ(receiver, mp);
-
- res = receiver->msg.len;
-
- if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) {
- trace_receive(receiver, message);
- }
}
- BM_SWAP_TIMER(send,system);
- } else {
-#ifdef ERTS_SMP
- ErlOffHeap *ohp;
- Eterm *hp;
- erts_aint32_t state;
-
- BM_SWAP_TIMER(send,size);
- msize = size_object(message);
- BM_SWAP_TIMER(size,send);
- hp = erts_alloc_message_heap_state(msize,
- &bp,
- &ohp,
- receiver,
- receiver_locks,
- &state);
- BM_SWAP_TIMER(send,copy);
- message = copy_struct(message, msize, &hp, ohp);
- BM_MESSAGE_COPIED(msz);
- BM_SWAP_TIMER(copy,send);
+#ifdef USE_VM_PROBES
DTRACE6(message_send, sender_name, receiver_name,
msize, tok_label, tok_lastcnt, tok_serial);
- res = queue_message(sender,
- receiver,
- receiver_locks,
- &state,
- bp,
- message,
- token
-#ifdef USE_VM_PROBES
- , NIL
#endif
- );
- BM_SWAP_TIMER(send,system);
-#else
- ErlMessage* mp = message_alloc();
- Eterm *hp;
- BM_SWAP_TIMER(send,size);
- msize = size_object(message);
- BM_SWAP_TIMER(size,send);
-
- if (receiver->stop - receiver->htop <= msize) {
- BM_SWAP_TIMER(send,system);
- erts_garbage_collect(receiver, msize, receiver->arg_reg, receiver->arity);
- BM_SWAP_TIMER(system,send);
- }
- hp = receiver->htop;
- receiver->htop = hp + msize;
- BM_SWAP_TIMER(send,copy);
- message = copy_struct(message, msize, &hp, &receiver->off_heap);
- BM_MESSAGE_COPIED(msize);
- BM_SWAP_TIMER(copy,send);
- DTRACE6(message_send, sender_name, receiver_name,
- (uint32_t)msize, tok_label, tok_lastcnt, tok_serial);
- ERL_MESSAGE_TERM(mp) = message;
- ERL_MESSAGE_TOKEN(mp) = NIL;
+ }
+
+ ERL_MESSAGE_TOKEN(mp) = token;
#ifdef USE_VM_PROBES
- ERL_MESSAGE_DT_UTAG(mp) = NIL;
+ ERL_MESSAGE_DT_UTAG(mp) = utag;
#endif
- mp->next = NULL;
- mp->data.attached = NULL;
- LINK_MESSAGE(receiver, mp);
- res = receiver->msg.len;
- erts_proc_notify_new_message(receiver);
+ res = queue_message(receiver,
+ &receiver_state,
+ *receiver_locks,
+ mp, message,
+ sender->common.id);
- if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) {
- trace_receive(receiver, message);
- }
- BM_SWAP_TIMER(send,system);
-#endif /* #ifndef ERTS_SMP */
- }
- return res;
+ return res;
}
+
/*
* This function delivers an EXIT message to a process
* which is trapping EXITs.
@@ -1121,53 +824,1012 @@ erts_deliver_exit_message(Eterm from, Process *to, ErtsProcLocks *to_locksp,
Uint sz_from;
Eterm* hp;
Eterm temptoken;
- ErlHeapFragment* bp = NULL;
-
- if (token != NIL
-#ifdef USE_VM_PROBES
- && token != am_have_dt_utag
+ ErtsMessage* mp;
+ ErlOffHeap *ohp;
+#ifdef SHCOPY_SEND
+ erts_shcopy_t info;
#endif
- ) {
+ if (have_seqtrace(token)) {
ASSERT(is_tuple(token));
- sz_reason = size_object(reason);
sz_token = size_object(token);
sz_from = size_object(from);
- bp = new_message_buffer(sz_reason + sz_from + sz_token + 4);
- hp = bp->mem;
- mess = copy_struct(reason, sz_reason, &hp, &bp->off_heap);
- from_copy = copy_struct(from, sz_from, &hp, &bp->off_heap);
+#ifdef SHCOPY_SEND
+ INITIALIZE_SHCOPY(info);
+ sz_reason = copy_shared_calculate(reason, &info);
+#else
+ sz_reason = size_object(reason);
+#endif
+ mp = erts_alloc_message_heap(to, to_locksp,
+ sz_reason + sz_from + sz_token + 4,
+ &hp, &ohp);
+#ifdef SHCOPY_SEND
+ mess = copy_shared_perform(reason, sz_reason, &info, &hp, ohp);
+ DESTROY_SHCOPY(info);
+#else
+ mess = copy_struct(reason, sz_reason, &hp, ohp);
+#endif
+ from_copy = copy_struct(from, sz_from, &hp, ohp);
save = TUPLE3(hp, am_EXIT, from_copy, mess);
hp += 4;
/* the trace token must in this case be updated by the caller */
seq_trace_output(token, save, SEQ_TRACE_SEND, to->common.id, NULL);
- temptoken = copy_struct(token, sz_token, &hp, &bp->off_heap);
- erts_queue_message(to, to_locksp, bp, save, temptoken
-#ifdef USE_VM_PROBES
- , NIL
-#endif
- );
+ temptoken = copy_struct(token, sz_token, &hp, ohp);
+ ERL_MESSAGE_TOKEN(mp) = temptoken;
+ erts_queue_message(to, *to_locksp, mp, save, am_system);
} else {
- ErlOffHeap *ohp;
- sz_reason = size_object(reason);
sz_from = IS_CONST(from) ? 0 : size_object(from);
+#ifdef SHCOPY_SEND
+ INITIALIZE_SHCOPY(info);
+ sz_reason = copy_shared_calculate(reason, &info);
+#else
+ sz_reason = size_object(reason);
+#endif
+ mp = erts_alloc_message_heap(to, to_locksp,
+ sz_reason+sz_from+4, &hp, &ohp);
- hp = erts_alloc_message_heap(sz_reason+sz_from+4,
- &bp,
- &ohp,
- to,
- to_locksp);
-
+#ifdef SHCOPY_SEND
+ mess = copy_shared_perform(reason, sz_reason, &info, &hp, ohp);
+ DESTROY_SHCOPY(info);
+#else
mess = copy_struct(reason, sz_reason, &hp, ohp);
+#endif
from_copy = (IS_CONST(from)
? from
: copy_struct(from, sz_from, &hp, ohp));
save = TUPLE3(hp, am_EXIT, from_copy, mess);
- erts_queue_message(to, to_locksp, bp, save, NIL
+ erts_queue_message(to, *to_locksp, mp, save, am_system);
+ }
+}
+
+void erts_save_message_in_proc(Process *p, ErtsMessage *msgp)
+{
+ ErlHeapFragment *hfp;
+
+ if (msgp->data.attached == ERTS_MSG_COMBINED_HFRAG)
+ hfp = &msgp->hfrag;
+ else if (msgp->data.attached) {
+ hfp = msgp->data.heap_frag;
+ }
+ else {
+ erts_free_message(msgp);
+ return; /* Nothing to save */
+ }
+
+ while (1) {
+ struct erl_off_heap_header *ohhp = hfp->off_heap.first;
+ if (ohhp) {
+ for ( ; ohhp->next; ohhp = ohhp->next)
+ ;
+ ohhp->next = p->off_heap.first;
+ p->off_heap.first = hfp->off_heap.first;
+ hfp->off_heap.first = NULL;
+ }
+ p->off_heap.overhead += hfp->off_heap.overhead;
+ hfp->off_heap.overhead = 0;
+ p->mbuf_sz += hfp->used_size;
+
+ if (!hfp->next)
+ break;
+ hfp = hfp->next;
+ }
+
+ msgp->next = p->msg_frag;
+ p->msg_frag = msgp;
+}
+
+Sint
+erts_move_messages_off_heap(Process *c_p)
+{
+ int reds = 1;
+ /*
+ * Move all messages off heap. This *only* occurs when the
+ * process had off heap message disabled and just enabled
+ * it...
+ */
+ ErtsMessage *mp;
+
+ reds += c_p->msg.len / 10;
+
+ ASSERT(erts_atomic32_read_nob(&c_p->state)
+ & ERTS_PSFLG_OFF_HEAP_MSGQ);
+ ASSERT(c_p->flags & F_OFF_HEAP_MSGQ_CHNG);
+
+ for (mp = c_p->msg.first; mp; mp = mp->next) {
+ Uint msg_sz, token_sz;
+#ifdef USE_VM_PROBES
+ Uint utag_sz;
+#endif
+ Eterm *hp;
+ ErlHeapFragment *hfrag;
+
+ if (mp->data.attached)
+ continue;
+
+ if (is_immed(ERL_MESSAGE_TERM(mp))
+#ifdef USE_VM_PROBES
+ && is_immed(ERL_MESSAGE_DT_UTAG(mp))
+#endif
+ && is_not_immed(ERL_MESSAGE_TOKEN(mp)))
+ continue;
+
+ /*
+ * The message refers into the heap. Copy the message
+ * from the heap into a heap fragment and attach
+ * it to the message...
+ */
+ msg_sz = size_object(ERL_MESSAGE_TERM(mp));
+#ifdef USE_VM_PROBES
+ utag_sz = size_object(ERL_MESSAGE_DT_UTAG(mp));
+#endif
+ token_sz = size_object(ERL_MESSAGE_TOKEN(mp));
+
+ hfrag = new_message_buffer(msg_sz
#ifdef USE_VM_PROBES
- , NIL
+ + utag_sz
#endif
- );
+ + token_sz);
+ hp = hfrag->mem;
+ if (is_not_immed(ERL_MESSAGE_TERM(mp)))
+ ERL_MESSAGE_TERM(mp) = copy_struct(ERL_MESSAGE_TERM(mp),
+ msg_sz, &hp,
+ &hfrag->off_heap);
+ if (is_not_immed(ERL_MESSAGE_TOKEN(mp)))
+ ERL_MESSAGE_TOKEN(mp) = copy_struct(ERL_MESSAGE_TOKEN(mp),
+ token_sz, &hp,
+ &hfrag->off_heap);
+#ifdef USE_VM_PROBES
+ if (is_not_immed(ERL_MESSAGE_DT_UTAG(mp)))
+ ERL_MESSAGE_DT_UTAG(mp) = copy_struct(ERL_MESSAGE_DT_UTAG(mp),
+ utag_sz, &hp,
+ &hfrag->off_heap);
+#endif
+ mp->data.heap_frag = hfrag;
+ reds += 1;
+ }
+
+ return reds;
+}
+
+Sint
+erts_complete_off_heap_message_queue_change(Process *c_p)
+{
+ int reds = 1;
+
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
+ ASSERT(c_p->flags & F_OFF_HEAP_MSGQ_CHNG);
+ ASSERT(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ);
+
+ /*
+ * This job was first initiated when the process changed to off heap
+ * message queue management. Since then ERTS_PSFLG_OFF_HEAP_MSGQ
+ * has been set. However, the management state might have been changed
+ * again (multiple times) since then. Check users last requested state
+ * (the flags F_OFF_HEAP_MSGQ, and F_ON_HEAP_MSGQ), and make the state
+ * consistent with that.
+ */
+
+ if (!(c_p->flags & F_OFF_HEAP_MSGQ))
+ erts_atomic32_read_band_nob(&c_p->state,
+ ~ERTS_PSFLG_OFF_HEAP_MSGQ);
+ else {
+ reds += 2;
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
+ ERTS_MSGQ_MV_INQ2PRIVQ(c_p);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
+ reds += erts_move_messages_off_heap(c_p);
+ }
+ c_p->flags &= ~F_OFF_HEAP_MSGQ_CHNG;
+ return reds;
+}
+
+typedef struct {
+ Eterm pid;
+ ErtsThrPrgrLaterOp lop;
+} ErtsChangeOffHeapMessageQueue;
+
+static void
+change_off_heap_msgq(void *vcohmq)
+{
+ ErtsChangeOffHeapMessageQueue *cohmq;
+ /*
+ * Now we've waited thread progress which ensures that all
+ * messages to the process are enqueued off heap. Schedule
+ * completion of this change as a system task on the process
+ * itself. This in order to avoid lock contention on its
+ * main lock. We will be called in
+ * erts_complete_off_heap_message_queue_change() (above) when
+ * the system task has been selected for execution.
+ */
+ cohmq = (ErtsChangeOffHeapMessageQueue *) vcohmq;
+ erts_schedule_complete_off_heap_message_queue_change(cohmq->pid);
+ erts_free(ERTS_ALC_T_MSGQ_CHNG, vcohmq);
+}
+
+Eterm
+erts_change_message_queue_management(Process *c_p, Eterm new_state)
+{
+ Eterm res;
+
+#ifdef DEBUG
+ if (c_p->flags & F_OFF_HEAP_MSGQ) {
+ ASSERT(erts_atomic32_read_nob(&c_p->state)
+ & ERTS_PSFLG_OFF_HEAP_MSGQ);
+ }
+ else {
+ if (c_p->flags & F_OFF_HEAP_MSGQ_CHNG) {
+ ASSERT(erts_atomic32_read_nob(&c_p->state)
+ & ERTS_PSFLG_OFF_HEAP_MSGQ);
+ }
+ else {
+ ASSERT(!(erts_atomic32_read_nob(&c_p->state)
+ & ERTS_PSFLG_OFF_HEAP_MSGQ));
+ }
+ }
+#endif
+
+ switch (c_p->flags & (F_OFF_HEAP_MSGQ|F_ON_HEAP_MSGQ)) {
+
+ case F_OFF_HEAP_MSGQ:
+ res = am_off_heap;
+
+ switch (new_state) {
+ case am_off_heap:
+ break;
+ case am_on_heap:
+ c_p->flags |= F_ON_HEAP_MSGQ;
+ c_p->flags &= ~F_OFF_HEAP_MSGQ;
+ erts_atomic32_read_bor_nob(&c_p->state,
+ ERTS_PSFLG_ON_HEAP_MSGQ);
+ /*
+ * We are not allowed to clear ERTS_PSFLG_OFF_HEAP_MSGQ
+ * if a off heap change is ongoing. It will be adjusted
+ * when the change completes...
+ */
+ if (!(c_p->flags & F_OFF_HEAP_MSGQ_CHNG)) {
+ /* Safe to clear ERTS_PSFLG_OFF_HEAP_MSGQ... */
+ erts_atomic32_read_band_nob(&c_p->state,
+ ~ERTS_PSFLG_OFF_HEAP_MSGQ);
+ }
+ break;
+ default:
+ res = THE_NON_VALUE; /* badarg */
+ break;
+ }
+ break;
+
+ case F_ON_HEAP_MSGQ:
+ res = am_on_heap;
+
+ switch (new_state) {
+ case am_on_heap:
+ break;
+ case am_off_heap:
+ c_p->flags &= ~F_ON_HEAP_MSGQ;
+ erts_atomic32_read_band_nob(&c_p->state,
+ ~ERTS_PSFLG_ON_HEAP_MSGQ);
+ goto change_to_off_heap;
+ default:
+ res = THE_NON_VALUE; /* badarg */
+ break;
+ }
+ break;
+
+ default:
+ res = am_error;
+ ERTS_INTERNAL_ERROR("Inconsistent message queue management state");
+ break;
+ }
+
+ return res;
+
+change_to_off_heap:
+
+ c_p->flags |= F_OFF_HEAP_MSGQ;
+
+ /*
+ * We do not have to schedule a change if
+ * we have an ongoing off heap change...
+ */
+ if (!(c_p->flags & F_OFF_HEAP_MSGQ_CHNG)) {
+ ErtsChangeOffHeapMessageQueue *cohmq;
+ /*
+ * Need to set ERTS_PSFLG_OFF_HEAP_MSGQ and wait
+ * thread progress before completing the change in
+ * order to ensure that all senders observe that
+ * messages should be passed off heap. When the
+ * change has completed, GC does not need to inspect
+ * the message queue at all.
+ */
+ erts_atomic32_read_bor_nob(&c_p->state,
+ ERTS_PSFLG_OFF_HEAP_MSGQ);
+ c_p->flags |= F_OFF_HEAP_MSGQ_CHNG;
+ cohmq = erts_alloc(ERTS_ALC_T_MSGQ_CHNG,
+ sizeof(ErtsChangeOffHeapMessageQueue));
+ cohmq->pid = c_p->common.id;
+ erts_schedule_thr_prgr_later_op(change_off_heap_msgq,
+ (void *) cohmq,
+ &cohmq->lop);
+ }
+
+ return res;
+}
+
+int
+erts_decode_dist_message(Process *proc, ErtsProcLocks proc_locks,
+ ErtsMessage *msgp, int force_off_heap)
+{
+ ErtsHeapFactory factory;
+ Eterm msg;
+ ErlHeapFragment *bp;
+ Sint need;
+ int decode_in_heap_frag;
+
+ decode_in_heap_frag = (force_off_heap
+ || !(proc_locks & ERTS_PROC_LOCK_MAIN)
+ || (proc->flags & F_OFF_HEAP_MSGQ));
+
+ if (msgp->data.dist_ext->heap_size >= 0)
+ need = msgp->data.dist_ext->heap_size;
+ else {
+ need = erts_decode_dist_ext_size(msgp->data.dist_ext);
+ if (need < 0) {
+ /* bad msg; remove it... */
+ if (is_not_immed(ERL_MESSAGE_TOKEN(msgp))) {
+ bp = erts_dist_ext_trailer(msgp->data.dist_ext);
+ erts_cleanup_offheap(&bp->off_heap);
+ }
+ erts_free_dist_ext_copy(msgp->data.dist_ext);
+ msgp->data.dist_ext = NULL;
+ return 0;
+ }
+
+ msgp->data.dist_ext->heap_size = need;
+ }
+
+ if (is_not_immed(ERL_MESSAGE_TOKEN(msgp))) {
+ bp = erts_dist_ext_trailer(msgp->data.dist_ext);
+ need += bp->used_size;
+ }
+
+ if (decode_in_heap_frag)
+ erts_factory_heap_frag_init(&factory, new_message_buffer(need));
+ else
+ erts_factory_proc_prealloc_init(&factory, proc, need);
+
+ ASSERT(msgp->data.dist_ext->heap_size >= 0);
+ if (is_not_immed(ERL_MESSAGE_TOKEN(msgp))) {
+ ErlHeapFragment *heap_frag;
+ heap_frag = erts_dist_ext_trailer(msgp->data.dist_ext);
+ ERL_MESSAGE_TOKEN(msgp) = copy_struct(ERL_MESSAGE_TOKEN(msgp),
+ heap_frag->used_size,
+ &factory.hp,
+ factory.off_heap);
+ erts_cleanup_offheap(&heap_frag->off_heap);
+ }
+
+ msg = erts_decode_dist_ext(&factory, msgp->data.dist_ext);
+ ERL_MESSAGE_TERM(msgp) = msg;
+ erts_free_dist_ext_copy(msgp->data.dist_ext);
+ msgp->data.attached = NULL;
+
+ if (is_non_value(msg)) {
+ erts_factory_undo(&factory);
+ return 0;
}
+
+ erts_factory_trim_and_close(&factory, msgp->m,
+ ERL_MESSAGE_REF_ARRAY_SZ);
+
+ ASSERT(!msgp->data.heap_frag);
+
+ if (decode_in_heap_frag)
+ msgp->data.heap_frag = factory.heap_frags;
+
+ return 1;
}
+/*
+ * ERTS_INSPECT_MSGQ_KEEP_OH_MSGS == 0 will move off heap messages
+ * into the heap of the inspected process if off_heap_message_queue
+ * is false when process_info(_, messages) is called. That is, the
+ * following GC will have more data in the rootset compared to the
+ * scenario when process_info(_, messages) had not been called.
+ *
+ * ERTS_INSPECT_MSGQ_KEEP_OH_MSGS != 0 will keep off heap messages
+ * off heap when process_info(_, messages) is called regardless of
+ * the off_heap_message_queue setting of the process. That is, it
+ * will change the following execution of the process as little as
+ * possible.
+ */
+#define ERTS_INSPECT_MSGQ_KEEP_OH_MSGS 1
+
+Uint
+erts_prep_msgq_for_inspection(Process *c_p, Process *rp,
+ ErtsProcLocks rp_locks, ErtsMessageInfo *mip)
+{
+ Uint tot_heap_size;
+ ErtsMessage* mp;
+ Sint i;
+ int self_on_heap;
+
+ /*
+ * Prepare the message queue for inspection
+ * by process_info().
+ *
+ *
+ * - Decode all messages on external format
+ * - Remove all corrupt dist messages from queue
+ * - Save pointer to, and heap size need of each
+ * message in the mip array.
+ * - Return total heap size need for all messages
+ * that needs to be copied.
+ *
+ * If ERTS_INSPECT_MSGQ_KEEP_OH_MSGS == 0:
+ * - In case off heap messages is disabled and
+ * we are inspecting our own queue, move all
+ * off heap data into the heap.
+ */
+
+ self_on_heap = c_p == rp && !(c_p->flags & F_OFF_HEAP_MSGQ);
+
+ tot_heap_size = 0;
+ i = 0;
+ mp = rp->msg.first;
+ while (mp) {
+ Eterm msg = ERL_MESSAGE_TERM(mp);
+
+ mip[i].size = 0;
+
+ if (is_non_value(msg)) {
+ /* Dist message on external format; decode it... */
+ if (mp->data.attached)
+ erts_decode_dist_message(rp, rp_locks, mp,
+ ERTS_INSPECT_MSGQ_KEEP_OH_MSGS);
+
+ msg = ERL_MESSAGE_TERM(mp);
+
+ if (is_non_value(msg)) {
+ ErtsMessage **mpp;
+ ErtsMessage *bad_mp = mp;
+ /*
+ * Bad distribution message; remove
+ * it from the queue...
+ */
+ ASSERT(!mp->data.attached);
+
+ mpp = i == 0 ? &rp->msg.first : &mip[i-1].msgp->next;
+
+ ASSERT(*mpp == bad_mp);
+
+ erts_msgq_update_internal_pointers(&rp->msg, mpp, &bad_mp->next);
+
+ mp = mp->next;
+ *mpp = mp;
+ rp->msg.len--;
+ bad_mp->next = NULL;
+ erts_cleanup_messages(bad_mp);
+ continue;
+ }
+ }
+
+ ASSERT(is_value(msg));
+
+#if ERTS_INSPECT_MSGQ_KEEP_OH_MSGS
+ if (is_not_immed(msg) && (!self_on_heap || mp->data.attached)) {
+ Uint sz = size_object(msg);
+ mip[i].size = sz;
+ tot_heap_size += sz;
+ }
+#else
+ if (self_on_heap) {
+ if (mp->data.attached) {
+ ErtsMessage *tmp = NULL;
+ if (mp->data.attached != ERTS_MSG_COMBINED_HFRAG) {
+ erts_link_mbuf_to_proc(rp, mp->data.heap_frag);
+ mp->data.attached = NULL;
+ }
+ else {
+ /*
+ * Need to replace the message reference since
+ * we will get references to the message data
+ * from the heap...
+ */
+ ErtsMessage **mpp;
+ tmp = erts_alloc_message(0, NULL);
+ sys_memcpy((void *) tmp->m, (void *) mp->m,
+ sizeof(Eterm)*ERL_MESSAGE_REF_ARRAY_SZ);
+ mpp = i == 0 ? &rp->msg.first : &mip[i-1].msgp->next;
+ erts_msgq_replace_msg_ref(&rp->msg, tmp, mpp);
+ erts_save_message_in_proc(rp, mp);
+ mp = tmp;
+ }
+ }
+ }
+ else if (is_not_immed(msg)) {
+ Uint sz = size_object(msg);
+ mip[i].size = sz;
+ tot_heap_size += sz;
+ }
+
+#endif
+
+ mip[i].msgp = mp;
+ i++;
+ mp = mp->next;
+ }
+
+ return tot_heap_size;
+}
+
+void erts_factory_proc_init(ErtsHeapFactory* factory,
+ Process* p)
+{
+ erts_factory_proc_prealloc_init(factory, p, HEAP_LIMIT(p) - HEAP_TOP(p));
+}
+
+void erts_factory_proc_prealloc_init(ErtsHeapFactory* factory,
+ Process* p,
+ Sint size)
+{
+ ErlHeapFragment *bp = p->mbuf;
+ factory->mode = FACTORY_HALLOC;
+ factory->p = p;
+ factory->hp_start = HAlloc(p, size);
+ factory->hp = factory->hp_start;
+ factory->hp_end = factory->hp_start + size;
+ factory->off_heap = &p->off_heap;
+ factory->message = NULL;
+ factory->off_heap_saved.first = p->off_heap.first;
+ factory->off_heap_saved.overhead = p->off_heap.overhead;
+ factory->heap_frags_saved = bp;
+ factory->heap_frags_saved_used = bp ? bp->used_size : 0;
+ factory->heap_frags = NULL; /* not used */
+ factory->alloc_type = 0; /* not used */
+}
+
+void erts_factory_heap_frag_init(ErtsHeapFactory* factory,
+ ErlHeapFragment* bp)
+{
+ factory->mode = FACTORY_HEAP_FRAGS;
+ factory->p = NULL;
+ factory->hp_start = bp->mem;
+ factory->hp = bp->mem;
+ factory->hp_end = bp->mem + bp->alloc_size;
+ factory->off_heap = &bp->off_heap;
+ factory->message = NULL;
+ factory->heap_frags = bp;
+ factory->heap_frags_saved = NULL;
+ factory->heap_frags_saved_used = 0;
+ factory->alloc_type = ERTS_ALC_T_HEAP_FRAG;
+ ASSERT(!bp->next);
+ factory->off_heap_saved.first = factory->off_heap->first;
+ factory->off_heap_saved.overhead = factory->off_heap->overhead;
+
+ ASSERT(factory->hp >= factory->hp_start && factory->hp <= factory->hp_end);
+}
+
+
+ErtsMessage *
+erts_factory_message_create(ErtsHeapFactory* factory,
+ Process *proc,
+ ErtsProcLocks *proc_locksp,
+ Uint sz)
+{
+ Eterm *hp;
+ ErlOffHeap *ohp;
+ ErtsMessage *msgp;
+ int on_heap;
+ erts_aint32_t state;
+
+ state = proc ? erts_atomic32_read_nob(&proc->state) : 0;
+
+ if (state & ERTS_PSFLG_OFF_HEAP_MSGQ) {
+ msgp = erts_alloc_message(sz, &hp);
+ ohp = sz == 0 ? NULL : &msgp->hfrag.off_heap;
+ on_heap = 0;
+ }
+ else {
+ msgp = erts_try_alloc_message_on_heap(proc, &state,
+ proc_locksp,
+ sz, &hp, &ohp,
+ &on_heap);
+ }
+
+ if (on_heap) {
+ ERTS_ASSERT(*proc_locksp & ERTS_PROC_LOCK_MAIN);
+ ASSERT(ohp == &proc->off_heap);
+ factory->mode = FACTORY_HALLOC;
+ factory->p = proc;
+ factory->heap_frags_saved = proc->mbuf;
+ factory->heap_frags_saved_used = proc->mbuf ? proc->mbuf->used_size : 0;
+ }
+ else {
+ factory->mode = FACTORY_MESSAGE;
+ factory->p = NULL;
+ factory->heap_frags_saved = NULL;
+ factory->heap_frags_saved_used = 0;
+
+ if (msgp->data.attached == ERTS_MSG_COMBINED_HFRAG) {
+ ASSERT(!msgp->hfrag.next);
+ factory->heap_frags = NULL;
+ }
+ else {
+ ASSERT(!msgp->data.heap_frag
+ || !msgp->data.heap_frag->next);
+ factory->heap_frags = msgp->data.heap_frag;
+ }
+ }
+ factory->hp_start = hp;
+ factory->hp = hp;
+ factory->hp_end = hp + sz;
+ factory->message = msgp;
+ factory->off_heap = ohp;
+ factory->alloc_type = ERTS_ALC_T_HEAP_FRAG;
+ if (ohp) {
+ factory->off_heap_saved.first = ohp->first;
+ factory->off_heap_saved.overhead = ohp->overhead;
+ }
+ else {
+ factory->off_heap_saved.first = NULL;
+ factory->off_heap_saved.overhead = 0;
+ }
+
+ ASSERT(factory->hp >= factory->hp_start && factory->hp <= factory->hp_end);
+
+ return msgp;
+}
+
+void erts_factory_selfcontained_message_init(ErtsHeapFactory* factory,
+ ErtsMessage *msgp,
+ Eterm *hp)
+{
+ ErlHeapFragment* bp;
+ if (msgp->data.attached == ERTS_MSG_COMBINED_HFRAG) {
+ bp = &msgp->hfrag;
+ factory->heap_frags = NULL;
+ }
+ else {
+ bp = msgp->data.heap_frag;
+ factory->heap_frags = bp;
+ }
+ factory->mode = FACTORY_MESSAGE;
+ factory->p = NULL;
+ factory->hp_start = bp->mem;
+ factory->hp = hp;
+ factory->hp_end = bp->mem + bp->alloc_size;
+ factory->message = msgp;
+ factory->off_heap = &bp->off_heap;
+ factory->heap_frags_saved = NULL;
+ factory->heap_frags_saved_used = 0;
+ factory->alloc_type = ERTS_ALC_T_HEAP_FRAG;
+ ASSERT(!bp->next);
+ factory->off_heap_saved.first = factory->off_heap->first;
+ factory->off_heap_saved.overhead = factory->off_heap->overhead;
+
+ ASSERT(factory->hp >= factory->hp_start && factory->hp <= factory->hp_end);
+}
+
+/* One static sized heap that must suffice.
+ No extra heap fragments will be allocated.
+*/
+void erts_factory_static_init(ErtsHeapFactory* factory,
+ Eterm* hp,
+ Uint size,
+ ErlOffHeap* off_heap)
+{
+ factory->mode = FACTORY_STATIC;
+ factory->hp_start = hp;
+ factory->hp = hp;
+ factory->hp_end = hp + size;
+ factory->off_heap = off_heap;
+ factory->off_heap_saved.first = factory->off_heap->first;
+ factory->off_heap_saved.overhead = factory->off_heap->overhead;
+}
+
+/* A temporary heap with default buffer allocated/freed by client.
+ * factory_close is same as factory_undo
+ */
+void erts_factory_tmp_init(ErtsHeapFactory* factory, Eterm* hp, Uint size,
+ Uint32 atype)
+{
+ factory->mode = FACTORY_TMP;
+ factory->hp_start = hp;
+ factory->hp = hp;
+ factory->hp_end = hp + size;
+ factory->heap_frags = NULL;
+ factory->off_heap_saved.first = NULL;
+ factory->off_heap_saved.overhead = 0;
+ factory->off_heap = &factory->off_heap_saved;
+ factory->alloc_type = atype;
+}
+
+/* When we know the term is an immediate and need no heap.
+*/
+void erts_factory_dummy_init(ErtsHeapFactory* factory)
+{
+ factory->mode = FACTORY_CLOSED;
+}
+
+static void reserve_heap(ErtsHeapFactory*, Uint need, Uint xtra);
+
+Eterm* erts_produce_heap(ErtsHeapFactory* factory, Uint need, Uint xtra)
+{
+ Eterm* res;
+
+ ASSERT((unsigned int)factory->mode > (unsigned int)FACTORY_CLOSED);
+ if (factory->hp + need > factory->hp_end) {
+ reserve_heap(factory, need, xtra);
+ }
+ res = factory->hp;
+ factory->hp += need;
+ return res;
+}
+
+Eterm* erts_reserve_heap(ErtsHeapFactory* factory, Uint need)
+{
+ ASSERT((unsigned int)factory->mode > (unsigned int)FACTORY_CLOSED);
+ if (factory->hp + need > factory->hp_end) {
+ reserve_heap(factory, need, 200);
+ }
+ return factory->hp;
+}
+
+static void reserve_heap(ErtsHeapFactory* factory, Uint need, Uint xtra)
+{
+ ErlHeapFragment* bp;
+
+ switch (factory->mode) {
+ case FACTORY_HALLOC:
+ HRelease(factory->p, factory->hp_end, factory->hp);
+ factory->hp = HAllocX(factory->p, need, xtra);
+ factory->hp_end = factory->hp + need;
+ return;
+
+ case FACTORY_MESSAGE:
+ if (!factory->heap_frags) {
+ ASSERT(factory->message->data.attached == ERTS_MSG_COMBINED_HFRAG);
+ bp = &factory->message->hfrag;
+ }
+ else {
+ /* Fall through */
+ case FACTORY_HEAP_FRAGS:
+ case FACTORY_TMP:
+ bp = factory->heap_frags;
+ }
+
+ if (bp) {
+ ASSERT(factory->hp > bp->mem);
+ ASSERT(factory->hp <= factory->hp_end);
+ ASSERT(factory->hp_end == bp->mem + bp->alloc_size);
+
+ bp->used_size = factory->hp - bp->mem;
+ }
+ bp = (ErlHeapFragment*) ERTS_HEAP_ALLOC(factory->alloc_type,
+ ERTS_HEAP_FRAG_SIZE(need+xtra));
+ bp->next = factory->heap_frags;
+ factory->heap_frags = bp;
+ bp->alloc_size = need + xtra;
+ bp->used_size = need;
+ bp->off_heap.first = NULL;
+ bp->off_heap.overhead = 0;
+
+ factory->hp = bp->mem;
+ factory->hp_end = bp->mem + bp->alloc_size;
+ return;
+
+ case FACTORY_STATIC:
+ case FACTORY_CLOSED:
+ default:
+ ASSERT(!"Invalid factory mode");
+ }
+}
+
+void erts_factory_close(ErtsHeapFactory* factory)
+{
+ ErlHeapFragment* bp;
+
+ switch (factory->mode) {
+ case FACTORY_HALLOC:
+ HRelease(factory->p, factory->hp_end, factory->hp);
+ break;
+
+ case FACTORY_MESSAGE:
+ if (!factory->heap_frags) {
+ if (factory->message->data.attached == ERTS_MSG_COMBINED_HFRAG)
+ bp = &factory->message->hfrag;
+ else
+ bp = NULL;
+ }
+ else {
+ if (factory->message->data.attached == ERTS_MSG_COMBINED_HFRAG)
+ factory->message->hfrag.next = factory->heap_frags;
+ else
+ factory->message->data.heap_frag = factory->heap_frags;
+
+ /* Fall through */
+ case FACTORY_HEAP_FRAGS:
+ bp = factory->heap_frags;
+ }
+
+ if (bp) {
+ ASSERT(factory->hp >= bp->mem);
+ ASSERT(factory->hp <= factory->hp_end);
+ ASSERT(factory->hp_end == bp->mem + bp->alloc_size);
+
+ bp->used_size = factory->hp - bp->mem;
+ }
+ break;
+ case FACTORY_TMP:
+ erts_factory_undo(factory);
+ break;
+ case FACTORY_STATIC: break;
+ case FACTORY_CLOSED: break;
+ default:
+ ASSERT(!"Invalid factory mode");
+ }
+ factory->mode = FACTORY_CLOSED;
+}
+
+void erts_factory_trim_and_close(ErtsHeapFactory* factory,
+ Eterm *brefs, Uint brefs_size)
+{
+ ErlHeapFragment *bp;
+
+ switch (factory->mode) {
+ case FACTORY_MESSAGE: {
+ ErtsMessage *mp = factory->message;
+ if (mp->data.attached == ERTS_MSG_COMBINED_HFRAG) {
+ if (!factory->heap_frags) {
+ Uint sz = factory->hp - factory->hp_start;
+ mp = erts_shrink_message(mp, sz, brefs, brefs_size);
+ factory->message = mp;
+ factory->mode = FACTORY_CLOSED;
+ return;
+ }
+ /*else we don't trim multi fragmented messages for now (off_heap...) */
+ break;
+ }
+ /* Fall through... */
+ }
+ case FACTORY_HEAP_FRAGS:
+ bp = factory->heap_frags;
+ if (!bp)
+ break;
+ if (bp->next == NULL) {
+ Uint used_sz = factory->hp - bp->mem;
+ ASSERT(used_sz <= bp->alloc_size);
+ if (used_sz > 0)
+ bp = erts_resize_message_buffer(bp, used_sz,
+ brefs, brefs_size);
+ else {
+ free_message_buffer(bp);
+ bp = NULL;
+ }
+ factory->heap_frags = bp;
+ if (factory->mode == FACTORY_MESSAGE)
+ factory->message->data.heap_frag = bp;
+ factory->mode = FACTORY_CLOSED;
+ return;
+ }
+ /*else we don't trim multi fragmented messages for now (off_heap...) */
+ default:
+ break;
+ }
+ erts_factory_close(factory);
+}
+
+void erts_factory_undo(ErtsHeapFactory* factory)
+{
+ ErlHeapFragment* bp;
+ struct erl_off_heap_header *hdr, **hdr_nextp;
+
+ switch (factory->mode) {
+ case FACTORY_HALLOC:
+ case FACTORY_STATIC:
+ /* Cleanup off-heap
+ */
+ hdr_nextp = NULL;
+ for (hdr = factory->off_heap->first;
+ hdr != factory->off_heap_saved.first;
+ hdr = hdr->next) {
+
+ hdr_nextp = &hdr->next;
+ }
+
+ if (hdr_nextp != NULL) {
+ *hdr_nextp = NULL;
+ erts_cleanup_offheap(factory->off_heap);
+ factory->off_heap->first = factory->off_heap_saved.first;
+ factory->off_heap->overhead = factory->off_heap_saved.overhead;
+ }
+
+ if (factory->mode == FACTORY_HALLOC) {
+ /* Free heap frags
+ */
+ bp = factory->p->mbuf;
+ if (bp != factory->heap_frags_saved) {
+ do {
+ ErlHeapFragment *next_bp = bp->next;
+ ASSERT(bp->off_heap.first == NULL);
+ ERTS_HEAP_FREE(ERTS_ALC_T_HEAP_FRAG, (void *) bp,
+ ERTS_HEAP_FRAG_SIZE(bp->alloc_size));
+ bp = next_bp;
+ } while (bp != factory->heap_frags_saved);
+
+ factory->p->mbuf = bp;
+ }
+
+ /* Rollback heap top
+ */
+
+ if (HEAP_START(factory->p) <= factory->hp_start
+ && factory->hp_start <= HEAP_LIMIT(factory->p)) {
+ HEAP_TOP(factory->p) = factory->hp_start;
+ }
+
+ /* Fix last heap frag */
+ if (factory->heap_frags_saved) {
+ ASSERT(factory->heap_frags_saved == factory->p->mbuf);
+ if (factory->hp_start != factory->heap_frags_saved->mem)
+ factory->heap_frags_saved->used_size = factory->heap_frags_saved_used;
+ else {
+ factory->p->mbuf = factory->p->mbuf->next;
+ ERTS_HEAP_FREE(ERTS_ALC_T_HEAP_FRAG, factory->heap_frags_saved,
+ ERTS_HEAP_FRAG_SIZE(factory->heap_frags_saved->alloc_size));
+ }
+ }
+ }
+ break;
+
+ case FACTORY_MESSAGE:
+ if (factory->message->data.attached == ERTS_MSG_COMBINED_HFRAG)
+ factory->message->hfrag.next = factory->heap_frags;
+ else
+ factory->message->data.heap_frag = factory->heap_frags;
+ erts_cleanup_messages(factory->message);
+ break;
+ case FACTORY_TMP:
+ case FACTORY_HEAP_FRAGS:
+ erts_cleanup_offheap(factory->off_heap);
+ factory->off_heap->first = NULL;
+
+ bp = factory->heap_frags;
+ while (bp != NULL) {
+ ErlHeapFragment* next_bp = bp->next;
+
+ ASSERT(bp->off_heap.first == NULL);
+ ERTS_HEAP_FREE(factory->alloc_type, (void *) bp,
+ ERTS_HEAP_FRAG_SIZE(bp->alloc_size));
+ bp = next_bp;
+ }
+ break;
+
+ case FACTORY_CLOSED: break;
+ default:
+ ASSERT(!"Invalid factory mode");
+ }
+ factory->mode = FACTORY_CLOSED;
+#ifdef DEBUG
+ factory->p = NULL;
+ factory->hp = NULL;
+ factory->heap_frags = NULL;
+#endif
+}
+
+Uint
+erts_mbuf_size(Process *p)
+{
+ Uint sz = 0;
+ ErlHeapFragment* bp;
+ ErtsMessage* mp;
+
+ for (bp = p->mbuf; bp; bp = bp->next)
+ sz += bp->used_size;
+
+ for (mp = p->msg_frag; mp; mp = mp->next)
+ for (bp = erts_message_to_heap_frag(mp); bp; bp = bp->next)
+ sz += bp->used_size;
+
+ return sz;
+}
diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h
index 0f3bb8d281..9c8cf84e43 100644
--- a/erts/emulator/beam/erl_message.h
+++ b/erts/emulator/beam/erl_message.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2012. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -23,6 +24,8 @@
struct proc_bin;
struct external_thing_;
+typedef struct erl_mesg ErtsMessage;
+
/*
* This struct represents data that must be updated by structure copy,
* but is stored outside of any heap.
@@ -31,9 +34,6 @@ struct external_thing_;
struct erl_off_heap_header {
Eterm thing_word;
Uint size;
-#if HALFWORD_HEAP
- void* dummy_ptr_padding__;
-#endif
struct erl_off_heap_header* next;
};
@@ -51,9 +51,58 @@ typedef struct erl_off_heap {
(OHP)->first = NULL; \
(OHP)->overhead = 0; \
} while (0)
+
+typedef struct {
+ enum {
+ FACTORY_CLOSED = 0,
+ FACTORY_HALLOC,
+ FACTORY_MESSAGE,
+ FACTORY_HEAP_FRAGS,
+ FACTORY_STATIC,
+ FACTORY_TMP
+ } mode;
+ Process* p;
+ Eterm* hp_start;
+ Eterm* hp;
+ Eterm* hp_end;
+ ErtsMessage *message;
+ struct erl_heap_fragment* heap_frags;
+ struct erl_heap_fragment* heap_frags_saved;
+ Uint heap_frags_saved_used;
+ ErlOffHeap* off_heap;
+ ErlOffHeap off_heap_saved;
+ Uint32 alloc_type;
+} ErtsHeapFactory;
+
+void erts_factory_proc_init(ErtsHeapFactory*, Process*);
+void erts_factory_proc_prealloc_init(ErtsHeapFactory*, Process*, Sint size);
+void erts_factory_heap_frag_init(ErtsHeapFactory*, struct erl_heap_fragment*);
+ErtsMessage *erts_factory_message_create(ErtsHeapFactory *, Process *,
+ ErtsProcLocks *, Uint sz);
+void erts_factory_selfcontained_message_init(ErtsHeapFactory*, ErtsMessage *, Eterm *);
+void erts_factory_static_init(ErtsHeapFactory*, Eterm* hp, Uint size, ErlOffHeap*);
+void erts_factory_tmp_init(ErtsHeapFactory*, Eterm* hp, Uint size, Uint32 atype);
+void erts_factory_dummy_init(ErtsHeapFactory*);
+
+Eterm* erts_produce_heap(ErtsHeapFactory*, Uint need, Uint xtra);
+Eterm* erts_reserve_heap(ErtsHeapFactory*, Uint need);
+void erts_factory_close(ErtsHeapFactory*);
+void erts_factory_trim_and_close(ErtsHeapFactory*,Eterm *brefs, Uint brefs_size);
+void erts_factory_undo(ErtsHeapFactory*);
+
+#ifdef CHECK_FOR_HOLES
+# define ERTS_FACTORY_HOLE_CHECK(f) do { \
+ /*if ((f)->p) erts_check_for_holes((f)->p);*/ \
+ } while (0)
+#else
+# define ERTS_FACTORY_HOLE_CHECK(p)
+#endif
+
#include "external.h"
#include "erl_process.h"
+#define ERTS_INVALID_HFRAG_PTR ((ErlHeapFragment *) ~((UWord) 7))
+
/*
* This struct represents a heap fragment, which is used when there
* isn't sufficient room in the process heap and we can't do a GC.
@@ -63,38 +112,58 @@ typedef struct erl_heap_fragment ErlHeapFragment;
struct erl_heap_fragment {
ErlHeapFragment* next; /* Next heap fragment */
ErlOffHeap off_heap; /* Offset heap data. */
- unsigned alloc_size; /* Size in (half)words of mem */
- unsigned used_size; /* With terms to be moved to heap by GC */
+ Uint alloc_size; /* Size in (half)words of mem */
+ Uint used_size; /* With terms to be moved to heap by GC */
Eterm mem[1]; /* Data */
};
-typedef struct erl_mesg {
- struct erl_mesg* next; /* Next message */
- union {
- ErtsDistExternal *dist_ext;
- ErlHeapFragment *heap_frag;
- void *attached;
- } data;
+/* m[0] = message, m[1] = seq trace token */
+#define ERL_MESSAGE_REF_ARRAY_SZ 2
+#define ERL_MESSAGE_TERM(mp) ((mp)->m[0])
+#define ERL_MESSAGE_TOKEN(mp) ((mp)->m[1])
+
#ifdef USE_VM_PROBES
- Eterm m[3]; /* m[0] = message, m[1] = seq trace token, m[3] = dynamic trace user tag */
+/* m[2] = dynamic trace user tag */
+#undef ERL_MESSAGE_REF_ARRAY_SZ
+#define ERL_MESSAGE_REF_ARRAY_SZ 3
+#define ERL_MESSAGE_DT_UTAG(mp) ((mp)->m[2])
#else
- Eterm m[2]; /* m[0] = message, m[1] = seq trace token */
#endif
-} ErlMessage;
-#define ERL_MESSAGE_TERM(mp) ((mp)->m[0])
-#define ERL_MESSAGE_TOKEN(mp) ((mp)->m[1])
#ifdef USE_VM_PROBES
-#define ERL_MESSAGE_DT_UTAG(mp) ((mp)->m[2])
+#define have_no_seqtrace(T) ((T) == NIL || (T) == am_have_dt_utag)
+#else
+#define have_no_seqtrace(T) ((T) == NIL)
#endif
+#define have_seqtrace(T) (!have_no_seqtrace(T))
+
+#define ERL_MESSAGE_REF_FIELDS__ \
+ ErtsMessage *next; /* Next message */ \
+ union { \
+ ErtsDistExternal *dist_ext; \
+ ErlHeapFragment *heap_frag; \
+ void *attached; \
+ } data; \
+ Eterm m[ERL_MESSAGE_REF_ARRAY_SZ]
+
+
+typedef struct erl_msg_ref__ {
+ ERL_MESSAGE_REF_FIELDS__;
+} ErtsMessageRef;
+
+struct erl_mesg {
+ ERL_MESSAGE_REF_FIELDS__;
+
+ ErlHeapFragment hfrag;
+};
/* Size of default message buffer (erl_message.c) */
#define ERL_MESSAGE_BUF_SZ 500
typedef struct {
- ErlMessage* first;
- ErlMessage** last; /* point to the last next pointer */
- ErlMessage** save;
+ ErtsMessage* first;
+ ErtsMessage** last; /* point to the last next pointer */
+ ErtsMessage** save;
Sint len; /* queue length */
/*
@@ -102,65 +171,67 @@ typedef struct {
* recv_set/1 instructions.
*/
BeamInstr* mark; /* address to rec_loop/2 instruction */
- ErlMessage** saved_last; /* saved last pointer */
+ ErtsMessage** saved_last; /* saved last pointer */
} ErlMessageQueue;
-#ifdef ERTS_SMP
typedef struct {
- ErlMessage* first;
- ErlMessage** last; /* point to the last next pointer */
+ ErtsMessage* first;
+ ErtsMessage** last; /* point to the last next pointer */
Sint len; /* queue length */
} ErlMessageInQueue;
-#endif
+typedef struct erl_trace_message_queue__ {
+ struct erl_trace_message_queue__ *next; /* point to the next receiver */
+ Eterm receiver;
+ ErtsMessage* first;
+ ErtsMessage** last; /* point to the last next pointer */
+ Sint len; /* queue length */
+} ErlTraceMessageQueue;
+
/* Get "current" message */
#define PEEK_MESSAGE(p) (*(p)->msg.save)
+#ifdef USE_VM_PROBES
+#define LINK_MESSAGE_DTAG(mp, dt) ERL_MESSAGE_DT_UTAG(mp) = dt
+#else
+#define LINK_MESSAGE_DTAG(mp, dt)
+#endif
-/* Add message last in private message queue */
-#define LINK_MESSAGE_PRIVQ(p, mp) do { \
- *(p)->msg.last = (mp); \
- (p)->msg.last = &(mp)->next; \
- (p)->msg.len++; \
-} while(0)
+#define LINK_MESSAGE_IMPL(p, first_msg, last_msg, num_msgs, where) do { \
+ *(p)->where.last = (first_msg); \
+ (p)->where.last = (last_msg); \
+ (p)->where.len += (num_msgs); \
+ } while(0)
-#ifdef ERTS_SMP
-
-/* Move in message queue to end of private message queue */
-#define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(P) \
-do { \
- if ((P)->msg_inq.first) { \
- *(P)->msg.last = (P)->msg_inq.first; \
- (P)->msg.last = (P)->msg_inq.last; \
- (P)->msg.len += (P)->msg_inq.len; \
- (P)->msg_inq.first = NULL; \
- (P)->msg_inq.last = &(P)->msg_inq.first; \
- (P)->msg_inq.len = 0; \
- } \
-} while (0)
-
-/* Add message last in message queue */
-#define LINK_MESSAGE(p, mp) do { \
- *(p)->msg_inq.last = (mp); \
- (p)->msg_inq.last = &(mp)->next; \
- (p)->msg_inq.len++; \
-} while(0)
-
-#else
+/* Add message last in private message queue */
+#define LINK_MESSAGE_PRIVQ(p, first_msg, last_msg, len) \
+ do { \
+ LINK_MESSAGE_IMPL(p, first_msg, last_msg, len, msg); \
+ } while (0)
-#define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(P)
+/* Add message last_msg in message queue */
+#define LINK_MESSAGE(p, first_msg, last_msg, len) \
+ LINK_MESSAGE_IMPL(p, first_msg, last_msg, len, msg_inq)
+
+#define ERTS_MSGQ_MV_INQ2PRIVQ(p) \
+ do { \
+ if (p->msg_inq.first) { \
+ *p->msg.last = p->msg_inq.first; \
+ p->msg.last = p->msg_inq.last; \
+ p->msg.len += p->msg_inq.len; \
+ p->msg_inq.first = NULL; \
+ p->msg_inq.last = &p->msg_inq.first; \
+ p->msg_inq.len = 0; \
+ } \
+ } while (0)
-/* Add message last in message queue */
-#define LINK_MESSAGE(p, mp) LINK_MESSAGE_PRIVQ((p), (mp))
-
-#endif
/* Unlink current message */
#define UNLINK_MESSAGE(p,msgp) do { \
- ErlMessage* __mp = (msgp)->next; \
+ ErtsMessage* __mp = (msgp)->next; \
*(p)->msg.save = __mp; \
(p)->msg.len--; \
if (__mp == NULL) \
@@ -176,102 +247,207 @@ do { \
#define SAVE_MESSAGE(p) \
(p)->msg.save = &(*(p)->msg.save)->next
-/*
- * ErtsMoveMsgAttachmentIntoProc() moves data attached to a message
- * onto the heap of a process. The attached data is the content of
- * the the message either on the internal format or on the external
- * format, and also possibly a seq trace token on the internal format.
- * If the message content is on the external format, the decode might
- * fail. If the decoding fails, ERL_MESSAGE_TERM(M) will contain
- * THE_NON_VALUE. That is, ERL_MESSAGE_TERM(M) *has* to be checked
- * afterwards and taken care of appropriately.
- *
- * ErtsMoveMsgAttachmentIntoProc() will shallow copy to heap if
- * possible; otherwise, move to heap via garbage collection.
- *
- * ErtsMoveMsgAttachmentIntoProc() is used when receiveing messages
- * in process_main() and in hipe_check_get_msg().
- */
-
-#define ErtsMoveMsgAttachmentIntoProc(M, P, ST, HT, FC, SWPO, SWPI) \
-do { \
- if ((M)->data.attached) { \
- Uint need__ = erts_msg_attached_data_size((M)); \
- if ((ST) - (HT) >= need__) { \
- Uint *htop__ = (HT); \
- erts_move_msg_attached_data_to_heap(&htop__, &MSO((P)), (M));\
- ASSERT(htop__ - (HT) <= need__); \
- (HT) = htop__; \
- } \
- else { \
- { SWPO ; } \
- (FC) -= erts_garbage_collect((P), 0, NULL, 0); \
- { SWPI ; } \
- } \
- ASSERT(!(M)->data.attached); \
- } \
-} while (0)
-
#define ERTS_SND_FLG_NO_SEQ_TRACE (((unsigned) 1) << 0)
#define ERTS_HEAP_FRAG_SIZE(DATA_WORDS) \
(sizeof(ErlHeapFragment) - sizeof(Eterm) + (DATA_WORDS)*sizeof(Eterm))
-#define ERTS_INIT_HEAP_FRAG(HEAP_FRAG_P, DATA_WORDS) \
-do { \
- (HEAP_FRAG_P)->next = NULL; \
- (HEAP_FRAG_P)->alloc_size = (DATA_WORDS); \
- (HEAP_FRAG_P)->used_size = (DATA_WORDS); \
- (HEAP_FRAG_P)->off_heap.first = NULL; \
- (HEAP_FRAG_P)->off_heap.overhead = 0; \
-} while (0)
+#define ERTS_INIT_HEAP_FRAG(HEAP_FRAG_P, USED_WORDS, DATA_WORDS) \
+ do { \
+ (HEAP_FRAG_P)->next = NULL; \
+ (HEAP_FRAG_P)->alloc_size = (DATA_WORDS); \
+ (HEAP_FRAG_P)->used_size = (USED_WORDS); \
+ (HEAP_FRAG_P)->off_heap.first = NULL; \
+ (HEAP_FRAG_P)->off_heap.overhead = 0; \
+ } while (0)
+
+#ifdef USE_VM_PROBES
+#define ERL_MESSAGE_DT_UTAG_INIT(MP) ERL_MESSAGE_DT_UTAG(MP) = NIL
+#else
+#define ERL_MESSAGE_DT_UTAG_INIT(MP) do{ } while (0)
+#endif
+
+#define ERTS_INIT_MESSAGE(MP) \
+ do { \
+ (MP)->next = NULL; \
+ ERL_MESSAGE_TERM(MP) = THE_NON_VALUE; \
+ ERL_MESSAGE_TOKEN(MP) = NIL; \
+ ERL_MESSAGE_DT_UTAG_INIT(MP); \
+ MP->data.attached = NULL; \
+ } while (0)
void init_message(void);
-void free_message(ErlMessage *);
ErlHeapFragment* new_message_buffer(Uint);
ErlHeapFragment* erts_resize_message_buffer(ErlHeapFragment *, Uint,
Eterm *, Uint);
void free_message_buffer(ErlHeapFragment *);
-void erts_queue_dist_message(Process*, ErtsProcLocks*, ErtsDistExternal *, Eterm);
-void erts_queue_message(Process*, ErtsProcLocks*, ErlHeapFragment*, Eterm, Eterm
-#ifdef USE_VM_PROBES
- , Eterm dt_utag
-#endif
-);
+void erts_queue_dist_message(Process*, ErtsProcLocks, ErtsDistExternal *, Eterm, Eterm);
+Sint erts_queue_message(Process*, ErtsProcLocks,ErtsMessage*, Eterm, Eterm);
+Sint erts_queue_messages(Process*, ErtsProcLocks,
+ ErtsMessage*, ErtsMessage**, Uint, Eterm);
void erts_deliver_exit_message(Eterm, Process*, ErtsProcLocks *, Eterm, Eterm);
Sint erts_send_message(Process*, Process*, ErtsProcLocks*, Eterm, unsigned);
void erts_link_mbuf_to_proc(Process *proc, ErlHeapFragment *bp);
-void erts_move_msg_mbuf_to_heap(Eterm**, ErlOffHeap*, ErlMessage *);
+Uint erts_msg_attached_data_size_aux(ErtsMessage *msg);
-Uint erts_msg_attached_data_size_aux(ErlMessage *msg);
-void erts_move_msg_attached_data_to_heap(Eterm **, ErlOffHeap *, ErlMessage *);
+void erts_cleanup_offheap(ErlOffHeap *offheap);
+void erts_save_message_in_proc(Process *p, ErtsMessage *msg);
+Sint erts_move_messages_off_heap(Process *c_p);
+Sint erts_complete_off_heap_message_queue_change(Process *c_p);
+Eterm erts_change_message_queue_management(Process *c_p, Eterm new_state);
-Eterm erts_msg_distext2heap(Process *, ErtsProcLocks *, ErlHeapFragment **,
- Eterm *, ErtsDistExternal *);
+int erts_decode_dist_message(Process *, ErtsProcLocks, ErtsMessage *, int);
-void erts_cleanup_offheap(ErlOffHeap *offheap);
+void erts_cleanup_messages(ErtsMessage *mp);
+
+typedef struct {
+ Uint size;
+ ErtsMessage *msgp;
+} ErtsMessageInfo;
+
+Uint erts_prep_msgq_for_inspection(Process *c_p,
+ Process *rp,
+ ErtsProcLocks rp_locks,
+ ErtsMessageInfo *mip);
+
+void *erts_alloc_message_ref(void);
+void erts_free_message_ref(void *);
+
+#define ERTS_SMALL_FIX_MSG_SZ 10
+#define ERTS_MEDIUM_FIX_MSG_SZ 20
+#define ERTS_LARGE_FIX_MSG_SZ 30
+
+void *erts_alloc_small_message(void);
+void erts_free_small_message(void *mp);
+typedef struct {
+ ErtsMessage m;
+ Eterm data[ERTS_SMALL_FIX_MSG_SZ-1];
+} ErtsSmallFixSzMessage;
+
+typedef struct {
+ ErtsMessage m;
+ Eterm data[ERTS_MEDIUM_FIX_MSG_SZ-1];
+} ErtsMediumFixSzMessage;
-ERTS_GLB_INLINE Uint erts_msg_used_frag_sz(const ErlMessage *msg);
-ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErlMessage *msg);
+typedef struct {
+ ErtsMessage m;
+ Eterm data[ERTS_LARGE_FIX_MSG_SZ-1];
+} ErtsLargeFixSzMessage;
+
+ErtsMessage *erts_try_alloc_message_on_heap(Process *pp,
+ erts_aint32_t *psp,
+ ErtsProcLocks *plp,
+ Uint sz,
+ Eterm **hpp,
+ ErlOffHeap **ohpp,
+ int *on_heap_p);
+ErtsMessage *erts_realloc_shrink_message(ErtsMessage *mp, Uint sz,
+ Eterm *brefs, Uint brefs_size);
+
+ERTS_GLB_FORCE_INLINE ErtsMessage *erts_alloc_message(Uint sz, Eterm **hpp);
+ERTS_GLB_FORCE_INLINE ErtsMessage *erts_shrink_message(ErtsMessage *mp, Uint sz,
+ Eterm *brefs, Uint brefs_size);
+ERTS_GLB_FORCE_INLINE void erts_free_message(ErtsMessage *mp);
+ERTS_GLB_INLINE Uint erts_used_frag_sz(const ErlHeapFragment*);
+ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErtsMessage *msg);
+ERTS_GLB_INLINE void erts_msgq_update_internal_pointers(ErlMessageQueue *msgq,
+ ErtsMessage **newpp,
+ ErtsMessage **oldpp);
+ERTS_GLB_INLINE void erts_msgq_replace_msg_ref(ErlMessageQueue *msgq,
+ ErtsMessage *newp,
+ ErtsMessage **oldpp);
+
+#define ERTS_MSG_COMBINED_HFRAG ((void *) 0x1)
+
+#define erts_message_to_heap_frag(MP) \
+ (((MP)->data.attached == ERTS_MSG_COMBINED_HFRAG) ? \
+ &(MP)->hfrag : (MP)->data.heap_frag)
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-ERTS_GLB_INLINE Uint erts_msg_used_frag_sz(const ErlMessage *msg)
+
+ERTS_GLB_FORCE_INLINE ErtsMessage *erts_alloc_message(Uint sz, Eterm **hpp)
+{
+ ErtsMessage *mp;
+
+ if (sz == 0) {
+ mp = erts_alloc_message_ref();
+ ERTS_INIT_MESSAGE(mp);
+ if (hpp)
+ *hpp = NULL;
+ return mp;
+ }
+
+ mp = erts_alloc(ERTS_ALC_T_MSG,
+ sizeof(ErtsMessage) + (sz - 1)*sizeof(Eterm));
+
+ ERTS_INIT_MESSAGE(mp);
+ mp->data.attached = ERTS_MSG_COMBINED_HFRAG;
+ ERTS_INIT_HEAP_FRAG(&mp->hfrag, sz, sz);
+
+ if (hpp)
+ *hpp = &mp->hfrag.mem[0];
+
+ return mp;
+}
+
+ERTS_GLB_FORCE_INLINE ErtsMessage *
+erts_shrink_message(ErtsMessage *mp, Uint sz, Eterm *brefs, Uint brefs_size)
+{
+ if (sz == 0) {
+ ErtsMessage *nmp;
+ if (!mp->data.attached)
+ return mp;
+ ASSERT(mp->data.attached == ERTS_MSG_COMBINED_HFRAG);
+ nmp = erts_alloc_message_ref();
+#ifdef DEBUG
+ if (brefs && brefs_size) {
+ int i;
+ for (i = 0; i < brefs_size; i++)
+ ASSERT(is_non_value(brefs[i]) || is_immed(brefs[i]));
+ }
+#endif
+ erts_free(ERTS_ALC_T_MSG, mp);
+ return nmp;
+ }
+
+ ASSERT(mp->data.attached == ERTS_MSG_COMBINED_HFRAG);
+ ASSERT(mp->hfrag.used_size >= sz);
+
+ if (sz >= (mp->hfrag.alloc_size - mp->hfrag.alloc_size / 16)) {
+ mp->hfrag.used_size = sz;
+ return mp;
+ }
+
+ return erts_realloc_shrink_message(mp, sz, brefs, brefs_size);
+}
+
+ERTS_GLB_FORCE_INLINE void erts_free_message(ErtsMessage *mp)
+{
+ if (mp->data.attached != ERTS_MSG_COMBINED_HFRAG)
+ erts_free_message_ref(mp);
+ else
+ erts_free(ERTS_ALC_T_MSG, mp);
+}
+
+ERTS_GLB_INLINE Uint erts_used_frag_sz(const ErlHeapFragment* bp)
{
- const ErlHeapFragment *bp;
Uint sz = 0;
- for (bp = msg->data.heap_frag; bp!=NULL; bp=bp->next) {
+ for ( ; bp!=NULL; bp=bp->next) {
sz += bp->used_size;
}
return sz;
}
-ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErlMessage *msg)
+ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErtsMessage *msg)
{
ASSERT(msg->data.attached);
- if (is_value(ERL_MESSAGE_TERM(msg)))
- return erts_msg_used_frag_sz(msg);
+ if (is_value(ERL_MESSAGE_TERM(msg))) {
+ ErlHeapFragment *bp;
+ bp = erts_message_to_heap_frag(msg);
+ return erts_used_frag_sz(bp);
+ }
else if (msg->data.dist_ext->heap_size < 0)
return erts_msg_attached_data_size_aux(msg);
else {
@@ -284,6 +460,40 @@ ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErlMessage *msg)
return sz;
}
}
+
+ERTS_GLB_INLINE void
+erts_msgq_update_internal_pointers(ErlMessageQueue *msgq,
+ ErtsMessage **newpp,
+ ErtsMessage **oldpp)
+{
+ if (msgq->save == oldpp)
+ msgq->save = newpp;
+ if (msgq->last == oldpp)
+ msgq->last = newpp;
+ if (msgq->saved_last == oldpp)
+ msgq->saved_last = newpp;
+}
+
+ERTS_GLB_INLINE void
+erts_msgq_replace_msg_ref(ErlMessageQueue *msgq, ErtsMessage *newp, ErtsMessage **oldpp)
+{
+ ErtsMessage *oldp = *oldpp;
+ newp->next = oldp->next;
+ erts_msgq_update_internal_pointers(msgq, &newp->next, &oldp->next);
+ *oldpp = newp;
+}
+
+#endif
+
+Uint erts_mbuf_size(Process *p);
+#if defined(DEBUG) || 0
+# define ERTS_CHK_MBUF_SZ(P) \
+ do { \
+ Uint actual_mbuf_sz__ = erts_mbuf_size((P)); \
+ ERTS_ASSERT((P)->mbuf_sz >= actual_mbuf_sz__); \
+ } while (0)
+#else
+# define ERTS_CHK_MBUF_SZ(P) ((void) 1)
#endif
#endif
diff --git a/erts/emulator/beam/erl_monitors.c b/erts/emulator/beam/erl_monitors.c
index 244a2b26db..1c840d89f6 100644
--- a/erts/emulator/beam/erl_monitors.c
+++ b/erts/emulator/beam/erl_monitors.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2004-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2004-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -23,7 +24,7 @@
* key in the monitor case and the pid of the linked process as key in the
* link case. Lookups the order of the references is somewhat special. Local
* references are strictly smaller than remote references and are sorted
- * by inlined comparision functionality. Remote references are handled by the
+ * by inlined comparison functionality. Remote references are handled by the
* usual cmp function.
* Each Monitor is tagged with different tags depending on which end of the
* monitor it is.
@@ -44,6 +45,7 @@
#include "bif.h"
#include "big.h"
#include "erl_monitors.h"
+#include "erl_bif_unique.h"
#define STACK_NEED 50
#define MAX_MONITORS 0xFFFFFFFFUL
@@ -52,7 +54,7 @@
#define DIR_RIGHT 1
#define DIR_END 2
-static erts_smp_atomic_t tot_link_lh_size;
+static erts_atomic_t tot_link_lh_size;
/* Implements the sort order in monitor trees, which is different from
the ordinary term order.
@@ -78,7 +80,24 @@ static ERTS_INLINE int cmp_mon_ref(Eterm ref1, Eterm ref2)
b2 = boxed_val(ref2);
if (is_ref_thing_header(*b1)) {
if (is_ref_thing_header(*b2)) {
- return memcmp(b1+1,b2+1,ERTS_REF_WORDS*sizeof(Uint));
+ Uint32 *num1, *num2;
+ if (is_ordinary_ref_thing(b1)) {
+ ErtsORefThing *rtp = (ErtsORefThing *) b1;
+ num1 = rtp->num;
+ }
+ else {
+ ErtsMRefThing *mrtp = (ErtsMRefThing *) b1;
+ num1 = mrtp->mb->refn;
+ }
+ if (is_ordinary_ref_thing(b2)) {
+ ErtsORefThing *rtp = (ErtsORefThing *) b2;
+ num2 = rtp->num;
+ }
+ else {
+ ErtsMRefThing *mrtp = (ErtsMRefThing *) b2;
+ num2 = mrtp->mb->refn;
+ }
+ return erts_internal_ref_number_cmp(num1, num2);
}
return -1;
}
@@ -90,13 +109,14 @@ static ERTS_INLINE int cmp_mon_ref(Eterm ref1, Eterm ref2)
#define CP_LINK_VAL(To, Hp, From) \
do { \
- if (IS_CONST(From)) \
+ if (is_immed(From)) \
(To) = (From); \
else { \
Uint i__; \
Uint len__; \
ASSERT((Hp)); \
- ASSERT(is_internal_ref((From)) || is_external((From))); \
+ ASSERT(is_internal_ordinary_ref((From)) \
+ || is_external((From))); \
(To) = make_boxed((Hp)); \
len__ = thing_arityval(*boxed_val((From))) + 1; \
for(i__ = 0; i__ < len__; i__++) \
@@ -108,15 +128,15 @@ do { \
} \
} while (0)
-static ErtsMonitor *create_monitor(Uint type, Eterm ref, Eterm pid, Eterm name)
+static ErtsMonitor *create_monitor(Uint type, Eterm ref, UWord entity, Eterm name)
{
Uint mon_size = ERTS_MONITOR_SIZE;
ErtsMonitor *n;
Eterm *hp;
mon_size += NC_HEAP_SIZE(ref);
- if (!IS_CONST(pid)) {
- mon_size += NC_HEAP_SIZE(pid);
+ if (type != MON_NIF_TARGET && is_not_immed(entity)) {
+ mon_size += NC_HEAP_SIZE(entity);
}
if (mon_size <= ERTS_MONITOR_SH_SIZE) {
@@ -125,7 +145,7 @@ static ErtsMonitor *create_monitor(Uint type, Eterm ref, Eterm pid, Eterm name)
} else {
n = (ErtsMonitor *) erts_alloc(ERTS_ALC_T_MONITOR_LH,
mon_size*sizeof(Uint));
- erts_smp_atomic_add_nob(&tot_link_lh_size, mon_size*sizeof(Uint));
+ erts_atomic_add_nob(&tot_link_lh_size, mon_size*sizeof(Uint));
}
hp = n->heap;
@@ -134,8 +154,11 @@ static ErtsMonitor *create_monitor(Uint type, Eterm ref, Eterm pid, Eterm name)
n->type = (Uint16) type;
n->balance = 0; /* Always the same initial value */
n->name = name; /* atom() or [] */
- CP_LINK_VAL(n->ref, hp, ref); /*XXX Unneccesary check, never immediate*/
- CP_LINK_VAL(n->pid, hp, pid);
+ CP_LINK_VAL(n->ref, hp, ref); /*XXX Unnecessary check, never immediate*/
+ if (type == MON_NIF_TARGET)
+ n->u.resource = (ErtsResource*)entity;
+ else
+ CP_LINK_VAL(n->u.pid, hp, (Eterm)entity);
return n;
}
@@ -146,7 +169,7 @@ static ErtsLink *create_link(Uint type, Eterm pid)
ErtsLink *n;
Eterm *hp;
- if (!IS_CONST(pid)) {
+ if (is_not_immed(pid)) {
lnk_size += NC_HEAP_SIZE(pid);
}
@@ -156,7 +179,7 @@ static ErtsLink *create_link(Uint type, Eterm pid)
} else {
n = (ErtsLink *) erts_alloc(ERTS_ALC_T_NLINK_LH,
lnk_size*sizeof(Uint));
- erts_smp_atomic_add_nob(&tot_link_lh_size, lnk_size*sizeof(Uint));
+ erts_atomic_add_nob(&tot_link_lh_size, lnk_size*sizeof(Uint));
}
hp = n->heap;
@@ -191,13 +214,13 @@ static ErtsSuspendMonitor *create_suspend_monitor(Eterm pid)
void
erts_init_monitors(void)
{
- erts_smp_atomic_init_nob(&tot_link_lh_size, 0);
+ erts_atomic_init_nob(&tot_link_lh_size, 0);
}
Uint
erts_tot_link_lh_size(void)
{
- return (Uint) erts_smp_atomic_read_nob(&tot_link_lh_size);
+ return (Uint) erts_atomic_read_nob(&tot_link_lh_size);
}
void erts_destroy_monitor(ErtsMonitor *mon)
@@ -205,16 +228,16 @@ void erts_destroy_monitor(ErtsMonitor *mon)
Uint mon_size = ERTS_MONITOR_SIZE;
ErlNode *node;
- ASSERT(!IS_CONST(mon->ref));
+ ASSERT(is_not_immed(mon->ref));
mon_size += NC_HEAP_SIZE(mon->ref);
if (is_external(mon->ref)) {
node = external_thing_ptr(mon->ref)->node;
erts_deref_node_entry(node);
}
- if (!IS_CONST(mon->pid)) {
- mon_size += NC_HEAP_SIZE(mon->pid);
- if (is_external(mon->pid)) {
- node = external_thing_ptr(mon->pid)->node;
+ if (mon->type != MON_NIF_TARGET && is_not_immed(mon->u.pid)) {
+ mon_size += NC_HEAP_SIZE(mon->u.pid);
+ if (is_external(mon->u.pid)) {
+ node = external_thing_ptr(mon->u.pid)->node;
erts_deref_node_entry(node);
}
}
@@ -222,7 +245,7 @@ void erts_destroy_monitor(ErtsMonitor *mon)
erts_free(ERTS_ALC_T_MONITOR_SH, (void *) mon);
} else {
erts_free(ERTS_ALC_T_MONITOR_LH, (void *) mon);
- erts_smp_atomic_add_nob(&tot_link_lh_size, -1*mon_size*sizeof(Uint));
+ erts_atomic_add_nob(&tot_link_lh_size, -1*mon_size*sizeof(Uint));
}
}
@@ -233,7 +256,7 @@ void erts_destroy_link(ErtsLink *lnk)
ASSERT(lnk->type == LINK_NODE || ERTS_LINK_ROOT(lnk) == NULL);
- if (!IS_CONST(lnk->pid)) {
+ if (is_not_immed(lnk->pid)) {
lnk_size += NC_HEAP_SIZE(lnk->pid);
if (is_external(lnk->pid)) {
node = external_thing_ptr(lnk->pid)->node;
@@ -244,7 +267,7 @@ void erts_destroy_link(ErtsLink *lnk)
erts_free(ERTS_ALC_T_NLINK_SH, (void *) lnk);
} else {
erts_free(ERTS_ALC_T_NLINK_LH, (void *) lnk);
- erts_smp_atomic_add_nob(&tot_link_lh_size, -1*lnk_size*sizeof(Uint));
+ erts_atomic_add_nob(&tot_link_lh_size, -1*lnk_size*sizeof(Uint));
}
}
@@ -328,7 +351,7 @@ static void insertion_rotation(int dstack[], int dpos,
}
}
-void erts_add_monitor(ErtsMonitor **root, Uint type, Eterm ref, Eterm pid,
+void erts_add_monitor(ErtsMonitor **root, Uint type, Eterm ref, UWord entity,
Eterm name)
{
void *tstack[STACK_NEED];
@@ -338,12 +361,14 @@ void erts_add_monitor(ErtsMonitor **root, Uint type, Eterm ref, Eterm pid,
int state = 0;
ErtsMonitor **this = root;
Sint c;
+
+ ASSERT(is_internal_ordinary_ref(ref) || is_external_ref(ref));
dstack[0] = DIR_END;
for (;;) {
if (!*this) { /* Found our place */
state = 1;
- *this = create_monitor(type,ref,pid,name);
+ *this = create_monitor(type,ref,entity,name);
break;
} else if ((c = CMP_MON_REF(ref,(*this)->ref)) < 0) {
/* go left */
@@ -355,7 +380,7 @@ void erts_add_monitor(ErtsMonitor **root, Uint type, Eterm ref, Eterm pid,
tstack[tpos++] = this;
this = &((*this)->right);
} else { /* Equal key is an error for monitors */
- erl_exit(1,"Insertion of already present monitor!");
+ erts_exit(ERTS_ERROR_EXIT,"Insertion of already present monitor!");
break;
}
}
@@ -913,8 +938,12 @@ static void erts_dump_monitors(ErtsMonitor *root, int indent)
if (root == NULL)
return;
erts_dump_monitors(root->right,indent+2);
- erts_printf("%*s[%b16d:%b16u:%T:%T:%T]\n", indent, "", root->balance,
- root->type, root->ref, root->pid, root->name);
+ erts_printf("%*s[%b16d:%b16u:%T:%T", indent, "", root->balance,
+ root->type, root->ref, root->name);
+ if (root->type == MON_NIF_TARGET)
+ erts_printf(":%p]\n", root->u.resource);
+ else
+ erts_printf(":%T]\n", root->u.pid);
erts_dump_monitors(root->left,indent+2);
}
@@ -956,15 +985,14 @@ Eterm erts_debug_dump_monitors_1(BIF_ALIST_1)
DistEntry *dep;
rp = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, pid, ERTS_PROC_LOCK_LINK);
if (!rp) {
- ERTS_SMP_ASSERT_IS_NOT_EXITING(p);
+ ERTS_ASSERT_IS_NOT_EXITING(p);
if (is_atom(pid) && is_node_name_atom(pid) &&
(dep = erts_find_dist_entry(pid)) != NULL) {
erts_printf("Dumping dist monitors-------------------\n");
- erts_smp_de_links_lock(dep);
+ erts_de_links_lock(dep);
erts_dump_monitors(dep->monitors,0);
- erts_smp_de_links_unlock(dep);
+ erts_de_links_unlock(dep);
erts_printf("Monitors dumped-------------------------\n");
- erts_deref_dist_entry(dep);
BIF_RET(am_true);
} else {
BIF_ERROR(p,BADARG);
@@ -973,7 +1001,7 @@ Eterm erts_debug_dump_monitors_1(BIF_ALIST_1)
erts_printf("Dumping pid monitors--------------------\n");
erts_dump_monitors(ERTS_P_MONITORS(rp),0);
erts_printf("Monitors dumped-------------------------\n");
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
BIF_RET(am_true);
}
}
@@ -1001,15 +1029,14 @@ Eterm erts_debug_dump_links_1(BIF_ALIST_1)
} else {
rp = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, pid, ERTS_PROC_LOCK_LINK);
if (!rp) {
- ERTS_SMP_ASSERT_IS_NOT_EXITING(p);
+ ERTS_ASSERT_IS_NOT_EXITING(p);
if (is_atom(pid) && is_node_name_atom(pid) &&
(dep = erts_find_dist_entry(pid)) != NULL) {
erts_printf("Dumping dist links----------------------\n");
- erts_smp_de_links_lock(dep);
+ erts_de_links_lock(dep);
erts_dump_links(dep->nlinks,0);
- erts_smp_de_links_unlock(dep);
+ erts_de_links_unlock(dep);
erts_printf("Links dumped----------------------------\n");
- erts_deref_dist_entry(dep);
BIF_RET(am_true);
} else {
BIF_ERROR(p,BADARG);
@@ -1019,7 +1046,7 @@ Eterm erts_debug_dump_links_1(BIF_ALIST_1)
erts_printf("Dumping pid links-----------------------\n");
erts_dump_links(ERTS_P_LINKS(rp), 0);
erts_printf("Links dumped----------------------------\n");
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
BIF_RET(am_true);
}
}
@@ -1029,7 +1056,7 @@ void erts_one_link_size(ErtsLink *lnk, void *vpu)
{
Uint *pu = vpu;
*pu += ERTS_LINK_SIZE*sizeof(Uint);
- if(!IS_CONST(lnk->pid))
+ if(is_not_immed(lnk->pid))
*pu += NC_HEAP_SIZE(lnk->pid)*sizeof(Uint);
if (lnk->type != LINK_NODE && ERTS_LINK_ROOT(lnk) != NULL) {
erts_doforall_links(ERTS_LINK_ROOT(lnk),&erts_one_link_size,vpu);
@@ -1039,8 +1066,8 @@ void erts_one_mon_size(ErtsMonitor *mon, void *vpu)
{
Uint *pu = vpu;
*pu += ERTS_MONITOR_SIZE*sizeof(Uint);
- if(!IS_CONST(mon->pid))
- *pu += NC_HEAP_SIZE(mon->pid)*sizeof(Uint);
- if(!IS_CONST(mon->ref))
+ if(mon->type != MON_NIF_TARGET && is_not_immed(mon->u.pid))
+ *pu += NC_HEAP_SIZE(mon->u.pid)*sizeof(Uint);
+ if(is_not_immed(mon->ref))
*pu += NC_HEAP_SIZE(mon->ref)*sizeof(Uint);
}
diff --git a/erts/emulator/beam/erl_monitors.h b/erts/emulator/beam/erl_monitors.h
index fb11dbbd22..1cacecd7e9 100644
--- a/erts/emulator/beam/erl_monitors.h
+++ b/erts/emulator/beam/erl_monitors.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2004-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2004-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -81,7 +82,9 @@
/* Type tags for monitors */
#define MON_ORIGIN 1
-#define MON_TARGET 3
+#define MON_TARGET 2
+#define MON_NIF_TARGET 3
+#define MON_TIME_OFFSET 4
/* Type tags for links */
#define LINK_PID 1 /* ...Or port */
@@ -89,7 +92,7 @@
/* Size of a monitor without heap, in words (fixalloc) */
#define ERTS_MONITOR_SIZE ((sizeof(ErtsMonitor) - sizeof(Uint))/sizeof(Uint))
-#define ERTS_MONITOR_SH_SIZE (ERTS_MONITOR_SIZE + REF_THING_SIZE)
+#define ERTS_MONITOR_SH_SIZE (ERTS_MONITOR_SIZE + ERTS_REF_THING_SIZE)
#define ERTS_LINK_SIZE ((sizeof(ErtsLink) - sizeof(Uint))/sizeof(Uint))
#define ERTS_LINK_SH_SIZE ERTS_LINK_SIZE /* Size of fix-alloced links */
@@ -103,11 +106,15 @@ typedef struct erts_monitor_or_link {
typedef struct erts_monitor {
struct erts_monitor *left, *right;
Sint16 balance;
- Uint16 type; /* MON_ORIGIN | MON_TARGET */
+ Uint16 type; /* MON_ORIGIN | MON_TARGET | MON_NIF_TARGET | MON_TIME_OFFSET */
Eterm ref;
- Eterm pid; /* In case of distributed named monitor, this is the
- nodename atom in MON_ORIGIN process, otherwise a pid or
- , in case of a MON_TARGET, a port */
+ union {
+ Eterm pid; /* In case of distributed named monitor, this is the
+ * nodename atom in MON_ORIGIN process, otherwise a pid or,
+ * in case of a MON_TARGET, a port
+ */
+ struct ErtsResource_* resource; /* MON_NIF_TARGET */
+ }u;
Eterm name; /* When monitoring a named process: atom() else [] */
Uint heap[1]; /* Larger in reality */
} ErtsMonitor;
@@ -142,7 +149,7 @@ Uint erts_tot_link_lh_size(void);
/* Prototypes */
void erts_destroy_monitor(ErtsMonitor *mon);
-void erts_add_monitor(ErtsMonitor **root, Uint type, Eterm ref, Eterm pid,
+void erts_add_monitor(ErtsMonitor **root, Uint type, Eterm ref, UWord entity,
Eterm name);
ErtsMonitor *erts_remove_monitor(ErtsMonitor **root, Eterm ref);
ErtsMonitor *erts_lookup_monitor(ErtsMonitor *root, Eterm ref);
diff --git a/erts/emulator/beam/erl_msacc.c b/erts/emulator/beam/erl_msacc.c
new file mode 100644
index 0000000000..d659842b7e
--- /dev/null
+++ b/erts/emulator/beam/erl_msacc.c
@@ -0,0 +1,468 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2014-2017. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Microstate accounting.
+ *
+ * We keep track of the different states that the
+ * Erlang VM threads are in, in order to provide
+ * performance/debugging statistics. There is a
+ * small overhead in enabling this, but in the big
+ * scheme of things it should be negligible.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#define ERTS_MSACC_STATE_STRINGS 1
+
+#include "sys.h"
+#include "global.h"
+#include "erl_threads.h"
+#include "erl_bif_unique.h"
+#include "erl_map.h"
+#include "erl_msacc.h"
+#include "erl_bif_table.h"
+
+#if ERTS_ENABLE_MSACC
+
+static Eterm erts_msacc_gather_stats(ErtsMsAcc *msacc, ErtsHeapFactory *factory);
+static void erts_msacc_reset(ErtsMsAcc *msacc);
+static ErtsMsAcc* get_msacc(void);
+
+erts_tsd_key_t ERTS_WRITE_UNLIKELY(erts_msacc_key);
+#ifndef ERTS_MSACC_ALWAYS_ON
+int ERTS_WRITE_UNLIKELY(erts_msacc_enabled);
+#endif
+
+static Eterm *erts_msacc_state_atoms = NULL;
+static erts_rwmtx_t msacc_mutex;
+static ErtsMsAcc *msacc_managed = NULL;
+static ErtsMsAcc *msacc_unmanaged = NULL;
+static Uint msacc_unmanaged_count = 0;
+
+#if ERTS_MSACC_STATE_COUNT < MAP_SMALL_MAP_LIMIT
+#define DEFAULT_MSACC_MSG_SIZE (3 + 1 + ERTS_MSACC_STATE_COUNT * 2 + 3 + ERTS_REF_THING_SIZE)
+#else
+#define DEFAULT_MSACC_MSG_SIZE (3 + ERTS_MSACC_STATE_COUNT * 3 + 3 + ERTS_REF_THING_SIZE)
+#endif
+
+/* we have to split initiation as atoms are not inited in early init */
+void erts_msacc_early_init(void) {
+#ifndef ERTS_MSACC_ALWAYS_ON
+ erts_msacc_enabled = 0;
+#endif
+ erts_rwmtx_init(&msacc_mutex, "msacc_list_mutex", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
+ erts_tsd_key_create(&erts_msacc_key,"erts_msacc_key");
+}
+
+void erts_msacc_init(void) {
+ int i;
+ erts_msacc_state_atoms = erts_alloc(ERTS_ALC_T_MSACC,
+ sizeof(Eterm)*ERTS_MSACC_STATE_COUNT);
+ for (i = 0; i < ERTS_MSACC_STATE_COUNT; i++) {
+ erts_msacc_state_atoms[i] = am_atom_put(erts_msacc_states[i],
+ strlen(erts_msacc_states[i]));
+ }
+}
+
+void erts_msacc_init_thread(char *type, int id, int managed) {
+ ErtsMsAcc *msacc;
+
+ msacc = erts_alloc(ERTS_ALC_T_MSACC, sizeof(ErtsMsAcc) +
+ sizeof(ErtsMsAccPerfCntr) * ERTS_MSACC_STATE_COUNT);
+
+ msacc->type = strdup(type);
+ msacc->id = make_small(id);
+ msacc->unmanaged = !managed;
+ msacc->tid = erts_thr_self();
+ msacc->perf_counter = 0;
+
+ erts_rwmtx_rwlock(&msacc_mutex);
+ if (!managed) {
+ erts_mtx_init(&msacc->mtx, "msacc_unmanaged_mutex", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
+ msacc->next = msacc_unmanaged;
+ msacc_unmanaged = msacc;
+ msacc_unmanaged_count++;
+ ERTS_MSACC_TSD_SET(msacc);
+ } else {
+ msacc->next = msacc_managed;
+ msacc_managed = msacc;
+ }
+ erts_rwmtx_rwunlock(&msacc_mutex);
+
+ erts_msacc_reset(msacc);
+
+#ifdef ERTS_MSACC_ALWAYS_ON
+ ERTS_MSACC_TSD_SET(msacc);
+ msacc->perf_counter = erts_sys_perf_counter();
+ msacc->state = ERTS_MSACC_STATE_OTHER;
+#endif
+}
+
+#ifdef ERTS_MSACC_EXTENDED_STATES
+
+void erts_msacc_set_bif_state(ErtsMsAcc *__erts_msacc_cache, Eterm mod, void *fn) {
+
+#ifdef ERTS_MSACC_EXTENDED_BIFS
+#define BIF_LIST(Mod,Func,Arity,BifFuncAddr,FuncAddr,Num) \
+ if (fn == &BifFuncAddr) { \
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATIC_STATE_COUNT + Num); \
+ } else
+#include "erl_bif_list.h"
+#undef BIF_LIST
+ { /* The last else in the macro expansion,
+ this happens for internal bifs, i.e. traps etc */
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_BIF);
+ }
+#else
+ if (mod == am_ets) {
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_ETS);
+ } else {
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_BIF);
+ }
+#endif
+}
+
+#endif
+
+/*
+ * Creates a structure looking like this
+ * #{ type => scheduler, id => 1, counters => #{ State1 => Counter1 ... StateN => CounterN}}
+ */
+static
+Eterm erts_msacc_gather_stats(ErtsMsAcc *msacc, ErtsHeapFactory *factory) {
+ Uint sz = 0;
+ Eterm *hp, cvs[ERTS_MSACC_STATE_COUNT];
+ Eterm key, state_map;
+ int i;
+ flatmap_t *map;
+
+ hp = erts_produce_heap(factory, 4, 0);
+ key = TUPLE3(hp,am_counters,am_id,am_type);
+
+ for (i = 0; i < ERTS_MSACC_STATE_COUNT; i++) {
+ cvs[i] = erts_bld_sint64(NULL, &sz,(Sint64)msacc->counters[i].pc);
+#ifdef ERTS_MSACC_STATE_COUNTERS
+ erts_bld_uint64(NULL,&sz,msacc->counters[i].sc);
+ sz += 3;
+#endif
+ }
+
+ hp = erts_produce_heap(factory, sz, 0);
+
+ for (i = 0; i < ERTS_MSACC_STATE_COUNT; i++) {
+ cvs[i] = erts_bld_sint64(&hp,NULL,(Sint64)msacc->counters[i].pc);
+#ifdef ERTS_MSACC_STATE_COUNTERS
+ Eterm counter__ = erts_bld_uint64(&hp,NULL,msacc->counters[i].sc);
+ cvs[i] = TUPLE2(hp,cvs[i],counter__);
+ hp += 3;
+#endif
+ }
+
+ state_map = erts_map_from_ks_and_vs(factory, erts_msacc_state_atoms, cvs,
+ ERTS_MSACC_STATE_COUNT);
+
+ hp = erts_produce_heap(factory, MAP_HEADER_FLATMAP_SZ + 3, 0);
+ map = (flatmap_t*)hp;
+ hp += MAP_HEADER_FLATMAP_SZ;
+ map->thing_word = MAP_HEADER_FLATMAP;
+ map->size = 3;
+ map->keys = key;
+ hp[0] = state_map;
+ hp[1] = msacc->id;
+ hp[2] = am_atom_put(msacc->type,strlen(msacc->type));
+
+ return make_flatmap(map);
+}
+
+typedef struct {
+ int action;
+ Process *proc;
+ Eterm ref;
+ Eterm ref_heap[ERTS_REF_THING_SIZE];
+ Uint req_sched;
+ erts_atomic32_t refc;
+} ErtsMSAccReq;
+
+static ErtsMsAcc* get_msacc(void) {
+ ErtsMsAcc *msacc;
+ erts_rwmtx_rlock(&msacc_mutex);
+ msacc = msacc_managed;
+ while (!erts_equal_tids(msacc->tid,erts_thr_self())) {
+ msacc = msacc->next;
+ ASSERT(msacc != NULL);
+ }
+ erts_rwmtx_runlock(&msacc_mutex);
+ return msacc;
+}
+
+static void send_reply(ErtsMsAcc *msacc, ErtsMSAccReq *msaccrp) {
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ Process *rp = msaccrp->proc;
+ ErtsMessage *msgp = NULL;
+ Eterm *hp;
+ Eterm ref_copy = NIL, msg;
+ ErtsProcLocks rp_locks = (esdp && msaccrp->req_sched == esdp->no
+ ? ERTS_PROC_LOCK_MAIN : 0);
+ ErtsHeapFactory factory;
+
+ if (msaccrp->action == ERTS_MSACC_GATHER) {
+
+ msgp = erts_factory_message_create(&factory, rp, &rp_locks, DEFAULT_MSACC_MSG_SIZE);
+
+ if (msacc->unmanaged) erts_mtx_lock(&msacc->mtx);
+
+ hp = erts_produce_heap(&factory, ERTS_REF_THING_SIZE + 3 /* tuple */, 0);
+ ref_copy = STORE_NC(&hp, &msgp->hfrag.off_heap, msaccrp->ref);
+ msg = erts_msacc_gather_stats(msacc, &factory);
+ msg = TUPLE2(hp, ref_copy, msg);
+
+ if (msacc->unmanaged) erts_mtx_unlock(&msacc->mtx);
+
+ erts_factory_close(&factory);
+ } else {
+ ErlOffHeap *ohp = NULL;
+ msgp = erts_alloc_message_heap(rp, &rp_locks, ERTS_REF_THING_SIZE, &hp, &ohp);
+ msg = STORE_NC(&hp, &msgp->hfrag.off_heap, msaccrp->ref);
+ }
+
+ erts_queue_message(rp, rp_locks, msgp, msg, am_system);
+
+ if (esdp && msaccrp->req_sched == esdp->no)
+ rp_locks &= ~ERTS_PROC_LOCK_MAIN;
+
+ if (rp_locks)
+ erts_proc_unlock(rp, rp_locks);
+
+}
+
+static void
+reply_msacc(void *vmsaccrp)
+{
+ ErtsMsAcc *msacc = ERTS_MSACC_TSD_GET();
+ ErtsMSAccReq *msaccrp = (ErtsMSAccReq *) vmsaccrp;
+
+ ASSERT(!msacc || !msacc->unmanaged);
+
+ if (msaccrp->action == ERTS_MSACC_ENABLE && !msacc) {
+ msacc = get_msacc();
+
+ msacc->perf_counter = erts_sys_perf_counter();
+
+ msacc->state = ERTS_MSACC_STATE_OTHER;
+
+ ERTS_MSACC_TSD_SET(msacc);
+
+ } else if (msaccrp->action == ERTS_MSACC_DISABLE && msacc) {
+ ERTS_MSACC_TSD_SET(NULL);
+ } else if (msaccrp->action == ERTS_MSACC_RESET) {
+ msacc = msacc ? msacc : get_msacc();
+ erts_msacc_reset(msacc);
+ } else if (msaccrp->action == ERTS_MSACC_GATHER && !msacc) {
+ msacc = get_msacc();
+ }
+
+ ASSERT(!msacc || !msacc->unmanaged);
+
+ send_reply(msacc, msaccrp);
+
+ erts_proc_dec_refc(msaccrp->proc);
+
+ if (erts_atomic32_dec_read_nob(&msaccrp->refc) == 0)
+ erts_free(ERTS_ALC_T_MSACC, vmsaccrp);
+}
+
+static void erts_msacc_reset(ErtsMsAcc *msacc) {
+ int i;
+ if (msacc->unmanaged) erts_mtx_lock(&msacc->mtx);
+
+ for (i = 0; i < ERTS_MSACC_STATE_COUNT; i++) {
+ msacc->counters[i].pc = 0;
+#ifdef ERTS_MSACC_STATE_COUNTERS
+ msacc->counters[i].sc = 0;
+#endif
+ }
+
+ if (msacc->unmanaged) erts_mtx_unlock(&msacc->mtx);
+}
+
+#endif /* ERTS_ENABLE_MSACC */
+
+
+/*
+ * This function is responsible for enabling, disabling, resetting and
+ * gathering data related to microstate accounting.
+ *
+ * Managed threads and unmanaged threads are handled differently.
+ * - managed threads get a misc_aux job telling them to switch on msacc
+ * - unmanaged have some fields protected by a mutex that has to be taken
+ * before any values can be updated
+ *
+ * For performance reasons there is also a global value erts_msacc_enabled
+ * that controls the state of all threads. Statistics gathering is only on
+ * if erts_msacc_enabled && msacc is true.
+ */
+Eterm
+erts_msacc_request(Process *c_p, int action, Eterm *threads)
+{
+#ifdef ERTS_ENABLE_MSACC
+ ErtsMsAcc *msacc = ERTS_MSACC_TSD_GET();
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
+ Eterm ref;
+ ErtsMSAccReq *msaccrp;
+ Eterm *hp;
+
+
+#ifdef ERTS_MSACC_ALWAYS_ON
+ if (action == ERTS_MSACC_ENABLE || action == ERTS_MSACC_DISABLE)
+ return THE_NON_VALUE;
+#else
+ /* take care of double enable, and double disable here */
+ if (msacc && action == ERTS_MSACC_ENABLE) {
+ return THE_NON_VALUE;
+ } else if (!msacc && action == ERTS_MSACC_DISABLE) {
+ return THE_NON_VALUE;
+ }
+#endif
+
+ ref = erts_make_ref(c_p);
+
+ msaccrp = erts_alloc(ERTS_ALC_T_MSACC, sizeof(ErtsMSAccReq));
+ hp = &msaccrp->ref_heap[0];
+
+ msaccrp->action = action;
+ msaccrp->proc = c_p;
+ msaccrp->ref = STORE_NC(&hp, NULL, ref);
+ msaccrp->req_sched = esdp->no;
+
+ *threads = erts_no_schedulers;
+ *threads += 1; /* aux thread */
+
+ erts_atomic32_init_nob(&msaccrp->refc,(erts_aint32_t)*threads);
+
+ erts_proc_add_refc(c_p, *threads);
+
+ if (erts_no_schedulers > 1)
+ erts_schedule_multi_misc_aux_work(1,
+ erts_no_schedulers,
+ reply_msacc,
+ (void *) msaccrp);
+ /* aux thread */
+ erts_schedule_misc_aux_work(0, reply_msacc, (void *) msaccrp);
+
+ /* Manage unmanaged threads */
+ switch (action) {
+ case ERTS_MSACC_GATHER: {
+ Uint unmanaged_count;
+ ErtsMsAcc *msacc, **unmanaged;
+ int i = 0;
+
+ /* we copy a list of pointers here so that we do not have to have
+ the msacc_mutex when sending messages */
+ erts_rwmtx_rlock(&msacc_mutex);
+ unmanaged_count = msacc_unmanaged_count;
+ unmanaged = erts_alloc(ERTS_ALC_T_MSACC,
+ sizeof(ErtsMsAcc*)*unmanaged_count);
+
+ for (i = 0, msacc = msacc_unmanaged;
+ i < unmanaged_count;
+ i++, msacc = msacc->next) {
+ unmanaged[i] = msacc;
+ }
+ erts_rwmtx_runlock(&msacc_mutex);
+
+ for (i = 0; i < unmanaged_count; i++) {
+ erts_mtx_lock(&unmanaged[i]->mtx);
+ if (unmanaged[i]->perf_counter) {
+ ErtsSysPerfCounter perf_counter;
+ /* if enabled update stats */
+ perf_counter = erts_sys_perf_counter();
+ unmanaged[i]->counters[unmanaged[i]->state].pc +=
+ perf_counter - unmanaged[i]->perf_counter;
+ unmanaged[i]->perf_counter = perf_counter;
+ }
+ erts_mtx_unlock(&unmanaged[i]->mtx);
+ send_reply(unmanaged[i],msaccrp);
+ }
+ erts_free(ERTS_ALC_T_MSACC,unmanaged);
+ /* We have just sent unmanaged_count messages, so bump no of threads */
+ *threads += unmanaged_count;
+ break;
+ }
+ case ERTS_MSACC_RESET: {
+ ErtsMsAcc *msacc;
+ erts_rwmtx_rlock(&msacc_mutex);
+ for (msacc = msacc_unmanaged; msacc != NULL; msacc = msacc->next)
+ erts_msacc_reset(msacc);
+ erts_rwmtx_runlock(&msacc_mutex);
+ break;
+ }
+ case ERTS_MSACC_ENABLE: {
+ erts_rwmtx_rlock(&msacc_mutex);
+ for (msacc = msacc_unmanaged; msacc != NULL; msacc = msacc->next) {
+ erts_mtx_lock(&msacc->mtx);
+ msacc->perf_counter = erts_sys_perf_counter();
+ /* we assume the unmanaged thread is sleeping */
+ msacc->state = ERTS_MSACC_STATE_SLEEP;
+ erts_mtx_unlock(&msacc->mtx);
+ }
+ erts_rwmtx_runlock(&msacc_mutex);
+ break;
+ }
+ case ERTS_MSACC_DISABLE: {
+ ErtsSysPerfCounter perf_counter;
+ erts_rwmtx_rlock(&msacc_mutex);
+ /* make sure to update stats with latest results */
+ for (msacc = msacc_unmanaged; msacc != NULL; msacc = msacc->next) {
+ erts_mtx_lock(&msacc->mtx);
+ perf_counter = erts_sys_perf_counter();
+ msacc->counters[msacc->state].pc += perf_counter - msacc->perf_counter;
+ msacc->perf_counter = 0;
+ erts_mtx_unlock(&msacc->mtx);
+ }
+ erts_rwmtx_runlock(&msacc_mutex);
+ break;
+ }
+ default: { ASSERT(0); }
+ }
+
+
+ *threads = make_small(*threads);
+
+ reply_msacc((void *) msaccrp);
+
+#ifndef ERTS_MSACC_ALWAYS_ON
+ /* enable/disable the global value */
+ if (action == ERTS_MSACC_ENABLE) {
+ erts_msacc_enabled = 1;
+ } else if (action == ERTS_MSACC_DISABLE) {
+ erts_msacc_enabled = 0;
+ }
+#endif
+
+ return ref;
+#else
+ return THE_NON_VALUE;
+#endif
+}
diff --git a/erts/emulator/beam/erl_msacc.h b/erts/emulator/beam/erl_msacc.h
new file mode 100644
index 0000000000..2588dec903
--- /dev/null
+++ b/erts/emulator/beam/erl_msacc.h
@@ -0,0 +1,439 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2014-2016. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_MSACC_H__
+#define ERL_MSACC_H__
+
+/* Can be enabled/disabled via configure */
+#if defined(ERTS_ENABLE_MSACC) && ERTS_ENABLE_MSACC == 2
+#define ERTS_MSACC_EXTENDED_STATES 1
+#endif
+
+/* Uncomment this to also count the number of
+ transitions to a state. This will add a count
+ to the counter map. */
+/* #define ERTS_MSACC_STATE_COUNTERS 1 */
+
+/* Uncomment this to make msacc to always be on,
+ this reduces overhead a little bit when profiling */
+/* #define ERTS_MSACC_ALWAYS_ON 1 */
+
+/* Uncomment this to keep individual stats for all
+ of the bifs when extended states is enabled */
+/* #define ERTS_MSACC_EXTENDED_BIFS 1 */
+
+#define ERTS_MSACC_DISABLE 0
+#define ERTS_MSACC_ENABLE 1
+#define ERTS_MSACC_RESET 2
+#define ERTS_MSACC_GATHER 3
+
+/*
+ * When adding a new state, you have to:
+ * * Add it here
+ * * Increment ERTS_MSACC_STATE_COUNT
+ * * Add string value to erts_msacc_states
+ * * Have to be in alphabetical order!
+ * * Only add states to the non-extended section after
+ * careful benchmarking to make sure the overhead
+ * when disabled is minimal.
+ */
+
+#ifndef ERTS_MSACC_EXTENDED_STATES
+#define ERTS_MSACC_STATE_AUX 0
+#define ERTS_MSACC_STATE_CHECK_IO 1
+#define ERTS_MSACC_STATE_EMULATOR 2
+#define ERTS_MSACC_STATE_GC 3
+#define ERTS_MSACC_STATE_OTHER 4
+#define ERTS_MSACC_STATE_PORT 5
+#define ERTS_MSACC_STATE_SLEEP 6
+
+#define ERTS_MSACC_STATE_COUNT 7
+
+#if defined(ERTS_MSACC_STATE_STRINGS) && defined(ERTS_ENABLE_MSACC)
+static char *erts_msacc_states[] = {
+ "aux",
+ "check_io",
+ "emulator",
+ "gc",
+ "other",
+ "port",
+ "sleep"
+};
+#endif
+
+#else
+
+#define ERTS_MSACC_STATE_ALLOC 0
+#define ERTS_MSACC_STATE_AUX 1
+#define ERTS_MSACC_STATE_BIF 2
+#define ERTS_MSACC_STATE_BUSY_WAIT 3
+#define ERTS_MSACC_STATE_CHECK_IO 4
+#define ERTS_MSACC_STATE_EMULATOR 5
+#define ERTS_MSACC_STATE_ETS 6
+#define ERTS_MSACC_STATE_GC 7
+#define ERTS_MSACC_STATE_GC_FULL 8
+#define ERTS_MSACC_STATE_NIF 9
+#define ERTS_MSACC_STATE_OTHER 10
+#define ERTS_MSACC_STATE_PORT 11
+#define ERTS_MSACC_STATE_SEND 12
+#define ERTS_MSACC_STATE_SLEEP 13
+#define ERTS_MSACC_STATE_TIMERS 14
+
+#define ERTS_MSACC_STATIC_STATE_COUNT 15
+
+#ifdef ERTS_MSACC_EXTENDED_BIFS
+#define ERTS_MSACC_STATE_COUNT (ERTS_MSACC_STATIC_STATE_COUNT + BIF_SIZE)
+#else
+#define ERTS_MSACC_STATE_COUNT ERTS_MSACC_STATIC_STATE_COUNT
+#endif
+
+#ifdef ERTS_MSACC_STATE_STRINGS
+static char *erts_msacc_states[] = {
+ "alloc",
+ "aux",
+ "bif",
+ "busy_wait",
+ "check_io",
+ "emulator",
+ "ets",
+ "gc",
+ "gc_full",
+ "nif",
+ "other",
+ "port",
+ "send",
+ "sleep",
+ "timers"
+#ifdef ERTS_MSACC_EXTENDED_BIFS
+#define BIF_LIST(Mod,Func,Arity,BifFuncAddr,FuncAddr,Num) \
+ ,"bif_" #Mod "_" #Func "_" #Arity
+#include "erl_bif_list.h"
+#undef BIF_LIST
+#endif
+};
+#endif
+
+#endif
+
+typedef struct erl_msacc_t_ ErtsMsAcc;
+typedef struct erl_msacc_p_cnt_t_ {
+ ErtsSysPerfCounter pc;
+#ifdef ERTS_MSACC_STATE_COUNTERS
+ Uint64 sc;
+#endif
+} ErtsMsAccPerfCntr;
+
+struct erl_msacc_t_ {
+
+ /* protected by msacc_mutex in erl_msacc.c, and should be constant */
+ int unmanaged;
+ erts_mtx_t mtx;
+ ErtsMsAcc *next;
+ erts_tid_t tid;
+ Eterm id;
+ char *type;
+
+ /* the the values below are protected by mtx iff unmanaged = 1 */
+ ErtsSysPerfCounter perf_counter;
+ Uint state;
+ ErtsMsAccPerfCntr counters[];
+
+};
+
+#ifdef ERTS_ENABLE_MSACC
+
+extern erts_tsd_key_t erts_msacc_key;
+
+#ifdef ERTS_MSACC_ALWAYS_ON
+#define erts_msacc_enabled 1
+#else
+extern int erts_msacc_enabled;
+#endif
+
+#define ERTS_MSACC_TSD_GET() erts_tsd_get(erts_msacc_key)
+#define ERTS_MSACC_TSD_SET(tsd) erts_tsd_set(erts_msacc_key,tsd)
+
+void erts_msacc_early_init(void);
+void erts_msacc_init(void);
+void erts_msacc_init_thread(char *type, int id, int liberty);
+
+/* The defines below are used to instrument the vm code
+ * with different state changes. There are two variants
+ * of each define. One that has a cached ErtsMsAcc *
+ * that it can use, and one that does not.
+ * The cached values are necessary to have in order to get
+ * low enough overhead when running without msacc enabled.
+ *
+ * The two most common patterns to use the function with are:
+ *
+ * ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_NEWSTATE);
+ * ... call other function in new state ...
+ * ERTS_MSACC_POP_STATE();
+ *
+ * Note that the erts_msacc_push* function declare new variables, so
+ * to conform with C89 we have to call it in the beginning of a function.
+ * We might not want to change state it the beginning though, so we use this:
+ *
+ * ERTS_MSACC_PUSH_STATE();
+ * ... some other code ...
+ * ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_NEWSTATE);
+ * ... call other function in new state ...
+ * ERTS_MSACC_POP_STATE();
+ *
+ * Notice that we used the cached version of set_state as push_state already
+ * read the erts_msacc_enabled to the cache.
+ *
+ * Most macros also have other variants with the suffix _m which means that
+ * they are known to only be called in managed threads, or with the _x suffix
+ * which means that it should only be used in an emulator compiled with
+ * extended states.
+ *
+ * Here is a listing of the entire api:
+ *
+ * void ERTS_MSACC_DECLARE_CACHE()
+ * void ERTS_MSACC_UPDATE_CACHE()
+ * void ERTS_MSACC_IS_ENABLED()
+ * void ERTS_MSACC_IS_ENABLED_CACHED()
+ *
+ * void ERTS_MSACC_PUSH_STATE()
+ * void ERTS_MSACC_SET_STATE(int state)
+ * void ERTS_MSACC_PUSH_AND_SET_STATE(int state)
+ *
+ * void ERTS_MSACC_PUSH_STATE_CACHED()
+ * void ERTS_MSACC_SET_STATE_CACHED(int state)
+ * void ERTS_MSACC_PUSH_AND_SET_STATE_CACHED(int state)
+ * void ERTS_MSACC_POP_STATE()
+ *
+ * void ERTS_MSACC_PUSH_STATE_M()
+ * void ERTS_MSACC_PUSH_STATE_CACHED_M()
+ * void ERTS_MSACC_SET_STATE_CACHED_M(int state)
+ * void ERTS_MSACC_SET_STATE_M(int state)
+ * void ERTS_MSACC_POP_STATE_M()
+ * void ERTS_MSACC_PUSH_AND_SET_STATE_M(int state)
+ *
+ * Most functions are also available with an _x suffix that are only enabled
+ * when using the extra states. If they are not, just add them to the end
+ * of this file.
+ */
+
+/* cache handling functions */
+#define ERTS_MSACC_IS_ENABLED() ERTS_UNLIKELY(erts_msacc_enabled)
+#define ERTS_MSACC_DECLARE_CACHE() \
+ ErtsMsAcc *ERTS_MSACC_UPDATE_CACHE(); \
+ ERTS_DECLARE_DUMMY(Uint __erts_msacc_state) = ERTS_MSACC_STATE_OTHER;
+#define ERTS_MSACC_IS_ENABLED_CACHED() ERTS_UNLIKELY(__erts_msacc_cache != NULL)
+#define ERTS_MSACC_UPDATE_CACHE() \
+ __erts_msacc_cache = erts_msacc_enabled ? ERTS_MSACC_TSD_GET() : NULL
+
+
+/* The defines below implicitly declare and load a new cache */
+#define ERTS_MSACC_PUSH_STATE() \
+ ERTS_MSACC_DECLARE_CACHE(); \
+ ERTS_MSACC_PUSH_STATE_CACHED()
+#define ERTS_MSACC_SET_STATE(state) \
+ ERTS_MSACC_DECLARE_CACHE(); \
+ ERTS_MSACC_SET_STATE_CACHED(state)
+#define ERTS_MSACC_PUSH_AND_SET_STATE(state) \
+ ERTS_MSACC_PUSH_STATE(); ERTS_MSACC_SET_STATE_CACHED(state)
+
+/* The defines below need an already declared cache to work */
+#define ERTS_MSACC_PUSH_STATE_CACHED() \
+ __erts_msacc_state = ERTS_MSACC_IS_ENABLED_CACHED() ? \
+ erts_msacc_get_state_um__(__erts_msacc_cache) : ERTS_MSACC_STATE_OTHER
+#define ERTS_MSACC_SET_STATE_CACHED(state) \
+ if (ERTS_MSACC_IS_ENABLED_CACHED()) \
+ erts_msacc_set_state_um__(__erts_msacc_cache, state, 1)
+#define ERTS_MSACC_PUSH_AND_SET_STATE_CACHED(state) \
+ ERTS_MSACC_PUSH_STATE_CACHED(); ERTS_MSACC_SET_STATE_CACHED(state)
+#define ERTS_MSACC_POP_STATE() \
+ if (ERTS_MSACC_IS_ENABLED_CACHED()) \
+ erts_msacc_set_state_um__(__erts_msacc_cache, __erts_msacc_state, 0)
+
+/* Only use these defines when we know that we have in a managed thread */
+#define ERTS_MSACC_PUSH_STATE_M() \
+ ERTS_MSACC_DECLARE_CACHE(); \
+ ERTS_MSACC_PUSH_STATE_CACHED_M()
+#define ERTS_MSACC_PUSH_STATE_CACHED_M() \
+ do { \
+ if (ERTS_MSACC_IS_ENABLED_CACHED()) { \
+ ASSERT(!__erts_msacc_cache->unmanaged); \
+ __erts_msacc_state = erts_msacc_get_state_m__(__erts_msacc_cache); \
+ } else { \
+ __erts_msacc_state = ERTS_MSACC_STATE_OTHER; \
+ } \
+ } while(0)
+#define ERTS_MSACC_SET_STATE_M(state) \
+ ERTS_MSACC_DECLARE_CACHE(); \
+ ERTS_MSACC_SET_STATE_CACHED_M(state)
+#define ERTS_MSACC_SET_STATE_CACHED_M(state) \
+ do { \
+ if (ERTS_MSACC_IS_ENABLED_CACHED()) { \
+ ASSERT(!__erts_msacc_cache->unmanaged); \
+ erts_msacc_set_state_m__(__erts_msacc_cache, state, 1); \
+ } \
+ } while(0)
+#define ERTS_MSACC_POP_STATE_M() \
+ do { \
+ if (ERTS_MSACC_IS_ENABLED_CACHED()) { \
+ ASSERT(!__erts_msacc_cache->unmanaged); \
+ erts_msacc_set_state_m__(__erts_msacc_cache, __erts_msacc_state, 0); \
+ } \
+ } while(0)
+#define ERTS_MSACC_PUSH_AND_SET_STATE_M(state) \
+ ERTS_MSACC_PUSH_STATE_M(); ERTS_MSACC_SET_STATE_CACHED_M(state)
+
+ERTS_GLB_INLINE
+void erts_msacc_set_state_um__(ErtsMsAcc *msacc,Uint state,int increment);
+ERTS_GLB_INLINE
+void erts_msacc_set_state_m__(ErtsMsAcc *msacc,Uint state,int increment);
+
+ERTS_GLB_INLINE
+Uint erts_msacc_get_state_um__(ErtsMsAcc *msacc);
+ERTS_GLB_INLINE
+Uint erts_msacc_get_state_m__(ErtsMsAcc *msacc);
+
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE
+Uint erts_msacc_get_state_um__(ErtsMsAcc *msacc) {
+ Uint state;
+ if (msacc->unmanaged)
+ erts_mtx_lock(&msacc->mtx);
+ state = msacc->state;
+ if (msacc->unmanaged)
+ erts_mtx_unlock(&msacc->mtx);
+ return state;
+}
+
+ERTS_GLB_INLINE
+Uint erts_msacc_get_state_m__(ErtsMsAcc *msacc) {
+ return msacc->state;
+}
+
+ERTS_GLB_INLINE
+void erts_msacc_set_state_um__(ErtsMsAcc *msacc, Uint new_state, int increment) {
+ if (ERTS_UNLIKELY(msacc->unmanaged)) {
+ erts_mtx_lock(&msacc->mtx);
+ if (ERTS_LIKELY(!msacc->perf_counter)) {
+ msacc->state = new_state;
+ erts_mtx_unlock(&msacc->mtx);
+ return;
+ }
+ }
+
+ erts_msacc_set_state_m__(msacc,new_state,increment);
+
+ if (ERTS_UNLIKELY(msacc->unmanaged))
+ erts_mtx_unlock(&msacc->mtx);
+}
+
+ERTS_GLB_INLINE
+void erts_msacc_set_state_m__(ErtsMsAcc *msacc, Uint new_state, int increment) {
+ ErtsSysPerfCounter prev_perf_counter;
+ Sint64 diff;
+
+ if (new_state == msacc->state)
+ return;
+
+ prev_perf_counter = msacc->perf_counter;
+ msacc->perf_counter = erts_sys_perf_counter();
+ diff = msacc->perf_counter - prev_perf_counter;
+ ASSERT(diff >= 0);
+ msacc->counters[msacc->state].pc += diff;
+#ifdef ERTS_MSACC_STATE_COUNTERS
+ msacc->counters[new_state].sc += increment;
+#endif
+ msacc->state = new_state;
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#else
+
+#define ERTS_MSACC_IS_ENABLED() 0
+#define erts_msacc_early_init()
+#define erts_msacc_init()
+#define erts_msacc_init_thread(type, id, liberty)
+#define ERTS_MSACC_PUSH_STATE()
+#define ERTS_MSACC_PUSH_STATE_CACHED()
+#define ERTS_MSACC_POP_STATE()
+#define ERTS_MSACC_SET_STATE(state)
+#define ERTS_MSACC_SET_STATE_CACHED(state)
+#define ERTS_MSACC_PUSH_AND_SET_STATE(state)
+#define ERTS_MSACC_PUSH_AND_SET_STATE_CACHED(state)
+#define ERTS_MSACC_UPDATE_CACHE()
+#define ERTS_MSACC_IS_ENABLED_CACHED()
+#define ERTS_MSACC_DECLARE_CACHE()
+#define ERTS_MSACC_PUSH_STATE_M()
+#define ERTS_MSACC_PUSH_STATE_CACHED_M()
+#define ERTS_MSACC_SET_STATE_CACHED_M(state)
+#define ERTS_MSACC_POP_STATE_M()
+#define ERTS_MSACC_PUSH_AND_SET_STATE_M(state)
+#define ERTS_MSACC_SET_BIF_STATE_CACHED_X(Mod,Addr)
+
+#endif /* ERTS_ENABLE_MSACC */
+
+#ifndef ERTS_MSACC_EXTENDED_STATES
+
+#define ERTS_MSACC_PUSH_STATE_X()
+#define ERTS_MSACC_POP_STATE_X()
+#define ERTS_MSACC_SET_STATE_X(state)
+#define ERTS_MSACC_SET_STATE_M_X(state)
+#define ERTS_MSACC_SET_STATE_CACHED_X(state)
+#define ERTS_MSACC_PUSH_AND_SET_STATE_X(state)
+#define ERTS_MSACC_PUSH_AND_SET_STATE_CACHED_X(state)
+#define ERTS_MSACC_UPDATE_CACHE_X()
+#define ERTS_MSACC_IS_ENABLED_CACHED_X() 0
+#define ERTS_MSACC_DECLARE_CACHE_X()
+#define ERTS_MSACC_PUSH_STATE_M_X()
+#define ERTS_MSACC_PUSH_STATE_CACHED_M_X()
+#define ERTS_MSACC_SET_STATE_CACHED_M_X(state)
+#define ERTS_MSACC_POP_STATE_M_X()
+#define ERTS_MSACC_PUSH_AND_SET_STATE_M_X(state)
+#define ERTS_MSACC_PUSH_AND_SET_STATE_CACHED_M_X(state)
+#define ERTS_MSACC_SET_BIF_STATE_CACHED_X(Mod,Addr)
+
+#else
+
+void erts_msacc_set_bif_state(ErtsMsAcc *msacc, Eterm mod, void *addr);
+
+#define ERTS_MSACC_PUSH_STATE_X() ERTS_MSACC_PUSH_STATE()
+#define ERTS_MSACC_POP_STATE_X() ERTS_MSACC_POP_STATE()
+#define ERTS_MSACC_SET_STATE_X(state) ERTS_MSACC_SET_STATE(state)
+#define ERTS_MSACC_SET_STATE_M_X(state) ERTS_MSACC_SET_STATE_M(state)
+#define ERTS_MSACC_SET_STATE_CACHED_X(state) ERTS_MSACC_SET_STATE_CACHED(state)
+#define ERTS_MSACC_PUSH_AND_SET_STATE_X(state) ERTS_MSACC_PUSH_AND_SET_STATE(state)
+#define ERTS_MSACC_PUSH_AND_SET_STATE_CACHED_X(state) ERTS_MSACC_PUSH_AND_SET_STATE_CACHED(state)
+#define ERTS_MSACC_UPDATE_CACHE_X() ERTS_MSACC_UPDATE_CACHE()
+#define ERTS_MSACC_IS_ENABLED_CACHED_X() ERTS_MSACC_IS_ENABLED_CACHED()
+#define ERTS_MSACC_DECLARE_CACHE_X() ERTS_MSACC_DECLARE_CACHE()
+#define ERTS_MSACC_PUSH_STATE_M_X() ERTS_MSACC_PUSH_STATE_M()
+#define ERTS_MSACC_PUSH_STATE_CACHED_M_X() ERTS_MSACC_PUSH_STATE_CACHED_M()
+#define ERTS_MSACC_SET_STATE_CACHED_M_X(state) ERTS_MSACC_SET_STATE_CACHED_M(state)
+#define ERTS_MSACC_POP_STATE_M_X() ERTS_MSACC_POP_STATE_M()
+#define ERTS_MSACC_PUSH_AND_SET_STATE_M_X(state) ERTS_MSACC_PUSH_AND_SET_STATE_M(state)
+#define ERTS_MSACC_SET_BIF_STATE_CACHED_X(Mod,Addr) \
+ if (ERTS_MSACC_IS_ENABLED_CACHED_X()) \
+ erts_msacc_set_bif_state(__erts_msacc_cache, Mod, Addr)
+
+#endif /* !ERTS_MSACC_EXTENDED_STATES */
+
+#endif /* ERL_MSACC_H__ */
diff --git a/erts/emulator/beam/erl_mtrace.c b/erts/emulator/beam/erl_mtrace.c
index c8bb126687..f2a660f085 100644
--- a/erts/emulator/beam/erl_mtrace.c
+++ b/erts/emulator/beam/erl_mtrace.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -571,7 +572,7 @@ void erts_mtrace_pre_init(void)
void erts_mtrace_init(char *receiver, char *nodename)
{
- char hostname[MAXHOSTNAMELEN];
+ char hostname[MAXHOSTNAMELEN + 1];
char pid[21]; /* enough for a 64 bit number */
socket_desc = ERTS_SOCK_INVALID_SOCKET;
@@ -582,8 +583,10 @@ void erts_mtrace_init(char *receiver, char *nodename)
byte ip_addr[4];
Uint16 port;
- erts_mtx_init(&mtrace_buf_mutex, "mtrace_buf");
- erts_mtx_init(&mtrace_op_mutex, "mtrace_op");
+ erts_mtx_init(&mtrace_buf_mutex, "mtrace_buf", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
+ erts_mtx_init(&mtrace_op_mutex, "mtrace_op", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
socket_desc = erts_sock_open();
if (socket_desc == ERTS_SOCK_INVALID_SOCKET) {
@@ -612,9 +615,10 @@ void erts_mtrace_init(char *receiver, char *nodename)
}
tracep = trace_buffer;
endp = trace_buffer + TRACE_BUF_SZ;
- if (erts_sock_gethostname(hostname, MAXHOSTNAMELEN) != 0)
+ /* gethostname requires that the len is max(hostname) + 1 */
+ if (erts_sock_gethostname(hostname, MAXHOSTNAMELEN + 1) != 0)
hostname[0] = '\0';
- hostname[MAXHOSTNAMELEN-1] = '\0';
+ hostname[MAXHOSTNAMELEN] = '\0';
sys_get_pid(pid, sizeof(pid));
write_trace_header(nodename ? nodename : "", pid, hostname);
erts_mtrace_update_heap_size();
@@ -627,7 +631,7 @@ erts_mtrace_install_wrapper_functions(void)
if (erts_mtrace_enabled) {
int i;
/* Install trace functions */
- ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
+ ERTS_CT_ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
sys_memcpy((void *) real_allctrs,
(void *) erts_allctrs,
diff --git a/erts/emulator/beam/erl_mtrace.h b/erts/emulator/beam/erl_mtrace.h
index 204543ddb0..776c70a819 100644
--- a/erts/emulator/beam/erl_mtrace.h
+++ b/erts/emulator/beam/erl_mtrace.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
diff --git a/erts/emulator/beam/erl_nfunc_sched.c b/erts/emulator/beam/erl_nfunc_sched.c
new file mode 100644
index 0000000000..f97e86bf95
--- /dev/null
+++ b/erts/emulator/beam/erl_nfunc_sched.c
@@ -0,0 +1,180 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2016. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#define ERTS_WANT_NFUNC_SCHED_INTERNALS__
+
+#include "global.h"
+#include "erl_process.h"
+#include "bif.h"
+#include "erl_nfunc_sched.h"
+#include "erl_trace.h"
+
+NifExport *
+erts_new_proc_nif_export(Process *c_p, int argc)
+{
+ size_t size;
+ int i;
+ NifExport *nep, *old_nep;
+
+ size = sizeof(NifExport) + (argc-1)*sizeof(Eterm);
+ nep = erts_alloc(ERTS_ALC_T_NIF_TRAP_EXPORT, size);
+
+ for (i = 0; i < ERTS_NUM_CODE_IX; i++)
+ nep->exp.addressv[i] = &nep->exp.beam[0];
+
+ nep->argc = -1; /* unused marker */
+ nep->argv_size = argc;
+ nep->trace = NULL;
+ old_nep = ERTS_PROC_SET_NIF_TRAP_EXPORT(c_p, nep);
+ if (old_nep) {
+ ASSERT(!nep->trace);
+ erts_free(ERTS_ALC_T_NIF_TRAP_EXPORT, old_nep);
+ }
+ return nep;
+}
+
+void
+erts_destroy_nif_export(Process *p)
+{
+ NifExport *nep = ERTS_PROC_SET_NIF_TRAP_EXPORT(p, NULL);
+ if (nep) {
+ if (nep->m)
+ erts_nif_export_cleanup_nif_mod(nep);
+ erts_free(ERTS_ALC_T_NIF_TRAP_EXPORT, nep);
+ }
+}
+
+void
+erts_nif_export_save_trace(Process *c_p, NifExport *nep, int applying,
+ Export* ep, BeamInstr *cp, Uint32 flags,
+ Uint32 flags_meta, BeamInstr* I,
+ ErtsTracer meta_tracer)
+{
+ NifExportTrace *netp;
+ ASSERT(nep && nep->argc >= 0);
+ ASSERT(!nep->trace);
+ netp = erts_alloc(ERTS_ALC_T_NIF_EXP_TRACE,
+ sizeof(NifExportTrace));
+ netp->applying = applying;
+ netp->ep = ep;
+ netp->cp = cp;
+ netp->flags = flags;
+ netp->flags_meta = flags_meta;
+ netp->I = I;
+ netp->meta_tracer = NIL;
+ erts_tracer_update(&netp->meta_tracer, meta_tracer);
+ nep->trace = netp;
+}
+
+void
+erts_nif_export_restore_trace(Process *c_p, Eterm result, NifExport *nep)
+{
+ NifExportTrace *netp = nep->trace;
+ nep->trace = NULL;
+ erts_bif_trace_epilogue(c_p, result, netp->applying, netp->ep,
+ netp->cp, netp->flags, netp->flags_meta,
+ netp->I, netp->meta_tracer);
+ erts_tracer_update(&netp->meta_tracer, NIL);
+ erts_free(ERTS_ALC_T_NIF_EXP_TRACE, netp);
+}
+
+NifExport *
+erts_nif_export_schedule(Process *c_p, Process *dirty_shadow_proc,
+ ErtsCodeMFA *mfa, BeamInstr *pc,
+ BeamInstr instr,
+ void *dfunc, void *ifunc,
+ Eterm mod, Eterm func,
+ int argc, const Eterm *argv)
+{
+ Process *used_proc;
+ ErtsSchedulerData *esdp;
+ Eterm* reg;
+ NifExport* nep;
+ int i;
+
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
+ & ERTS_PROC_LOCK_MAIN);
+
+ if (dirty_shadow_proc) {
+ esdp = erts_get_scheduler_data();
+ ASSERT(esdp && ERTS_SCHEDULER_IS_DIRTY(esdp));
+
+ used_proc = dirty_shadow_proc;
+ }
+ else {
+ esdp = erts_proc_sched_data(c_p);
+ ASSERT(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp));
+
+ used_proc = c_p;
+ ERTS_VBUMP_ALL_REDS(c_p);
+ }
+
+ reg = esdp->x_reg_array;
+
+ if (mfa)
+ nep = erts_get_proc_nif_export(c_p, (int) mfa->arity);
+ else {
+ /* If no mfa, this is not the first schedule... */
+ nep = ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p);
+ ASSERT(nep && nep->argc >= 0);
+ }
+
+ if (nep->argc < 0) {
+ /*
+ * First schedule; save things that might
+ * need to be restored...
+ */
+ for (i = 0; i < (int) mfa->arity; i++)
+ nep->argv[i] = reg[i];
+ nep->pc = pc;
+ nep->cp = c_p->cp;
+ nep->mfa = mfa;
+ nep->current = c_p->current;
+ ASSERT(argc >= 0);
+ nep->argc = (int) mfa->arity;
+ nep->m = NULL;
+
+ ASSERT(!erts_check_nif_export_in_area(c_p,
+ (char *) nep,
+ (sizeof(NifExport)
+ + (sizeof(Eterm)
+ *(nep->argc-1)))));
+ }
+ /* Copy new arguments into register array if necessary... */
+ if (reg != argv) {
+ for (i = 0; i < argc; i++)
+ reg[i] = argv[i];
+ }
+ ASSERT(is_atom(mod) && is_atom(func));
+ nep->exp.info.mfa.module = mod;
+ nep->exp.info.mfa.function = func;
+ nep->exp.info.mfa.arity = (Uint) argc;
+ nep->exp.beam[0] = (BeamInstr) instr; /* call_nif || apply_bif */
+ nep->exp.beam[1] = (BeamInstr) dfunc;
+ nep->func = ifunc;
+ used_proc->arity = argc;
+ used_proc->freason = TRAP;
+ used_proc->i = (BeamInstr*) nep->exp.addressv[0];
+ return nep;
+}
diff --git a/erts/emulator/beam/erl_nfunc_sched.h b/erts/emulator/beam/erl_nfunc_sched.h
new file mode 100644
index 0000000000..b8a4e4ebc3
--- /dev/null
+++ b/erts/emulator/beam/erl_nfunc_sched.h
@@ -0,0 +1,328 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2016. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_NFUNC_SCHED_H__
+#define ERL_NFUNC_SCHED_H__
+
+#include "erl_process.h"
+#include "bif.h"
+#include "error.h"
+
+typedef struct {
+ int applying;
+ Export* ep;
+ BeamInstr *cp;
+ Uint32 flags;
+ Uint32 flags_meta;
+ BeamInstr* I;
+ ErtsTracer meta_tracer;
+} NifExportTrace;
+
+/*
+ * NIF exports need a few more items than the Export struct provides,
+ * including the erl_module_nif* and a NIF function pointer, so the
+ * NifExport below adds those. The Export member must be first in the
+ * struct. A number of values are stored for error handling purposes
+ * only.
+ *
+ * 'argc' is >= 0 when NifExport is in use, and < 0 when not.
+ */
+
+typedef struct {
+ Export exp;
+ struct erl_module_nif* m; /* NIF module, or NULL if BIF */
+ void *func; /* Indirect NIF or BIF to execute (may be unused) */
+ ErtsCodeMFA *current;/* Current as set when originally called */
+ NifExportTrace *trace;
+ /* --- The following is only used on error --- */
+ BeamInstr *pc; /* Program counter */
+ BeamInstr *cp; /* Continuation pointer */
+ ErtsCodeMFA *mfa; /* MFA of original call */
+ int argc; /* Number of arguments in original call */
+ int argv_size; /* Allocated size of argv */
+ Eterm argv[1]; /* Saved arguments from the original call */
+} NifExport;
+
+NifExport *erts_new_proc_nif_export(Process *c_p, int argc);
+void erts_nif_export_save_trace(Process *c_p, NifExport *nep, int applying,
+ Export* ep, BeamInstr *cp, Uint32 flags,
+ Uint32 flags_meta, BeamInstr* I,
+ ErtsTracer meta_tracer);
+void erts_nif_export_restore_trace(Process *c_p, Eterm result, NifExport *nep);
+void erts_destroy_nif_export(Process *p);
+NifExport *erts_nif_export_schedule(Process *c_p, Process *dirty_shadow_proc,
+ ErtsCodeMFA *mfa, BeamInstr *pc,
+ BeamInstr instr,
+ void *dfunc, void *ifunc,
+ Eterm mod, Eterm func,
+ int argc, const Eterm *argv);
+void erts_nif_export_cleanup_nif_mod(NifExport *ep); /* erl_nif.c */
+ERTS_GLB_INLINE NifExport *erts_get_proc_nif_export(Process *c_p, int extra);
+ERTS_GLB_INLINE int erts_setup_nif_export_rootset(Process* proc, Eterm** objv,
+ Uint* nobj);
+ERTS_GLB_INLINE int erts_check_nif_export_in_area(Process *p,
+ char *start, Uint size);
+ERTS_GLB_INLINE void erts_nif_export_restore(Process *c_p, NifExport *ep,
+ Eterm result);
+ERTS_GLB_INLINE void erts_nif_export_restore_error(Process* c_p, BeamInstr **pc,
+ Eterm *reg, ErtsCodeMFA **nif_mfa);
+ERTS_GLB_INLINE int erts_nif_export_check_save_trace(Process *c_p, Eterm result,
+ int applying, Export* ep,
+ BeamInstr *cp, Uint32 flags,
+ Uint32 flags_meta, BeamInstr* I,
+ ErtsTracer meta_tracer);
+ERTS_GLB_INLINE Process *erts_proc_shadow2real(Process *c_p);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE NifExport *
+erts_get_proc_nif_export(Process *c_p, int argc)
+{
+ NifExport *nep = ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p);
+ if (!nep || (nep->argc < 0 && nep->argv_size < argc))
+ return erts_new_proc_nif_export(c_p, argc);
+ return nep;
+}
+
+/*
+ * If a process has saved arguments, they need to be part of the GC
+ * rootset. The function below is called from setup_rootset() in
+ * erl_gc.c. Any exception term saved in the NifExport is also made
+ * part of the GC rootset here; it always resides in rootset[0].
+ */
+ERTS_GLB_INLINE int
+erts_setup_nif_export_rootset(Process* proc, Eterm** objv, Uint* nobj)
+{
+ NifExport* ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+
+ if (!ep || ep->argc <= 0)
+ return 0;
+
+ *objv = ep->argv;
+ *nobj = ep->argc;
+ return 1;
+}
+
+/*
+ * Check if nif export points into code area...
+ */
+ERTS_GLB_INLINE int
+erts_check_nif_export_in_area(Process *p, char *start, Uint size)
+{
+ NifExport *nep = ERTS_PROC_GET_NIF_TRAP_EXPORT(p);
+ if (!nep || nep->argc < 0)
+ return 0;
+ if (ErtsInArea(nep->pc, start, size))
+ return 1;
+ if (ErtsInArea(nep->cp, start, size))
+ return 1;
+ if (ErtsInArea(nep->mfa, start, size))
+ return 1;
+ if (ErtsInArea(nep->current, start, size))
+ return 1;
+ return 0;
+}
+
+ERTS_GLB_INLINE void
+erts_nif_export_restore(Process *c_p, NifExport *ep, Eterm result)
+{
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()));
+ ERTS_LC_ASSERT(!(c_p->static_flags
+ & ERTS_STC_FLG_SHADOW_PROC));
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
+ & ERTS_PROC_LOCK_MAIN);
+
+ c_p->current = ep->current;
+ ep->argc = -1; /* Unused nif-export marker... */
+ if (ep->trace)
+ erts_nif_export_restore_trace(c_p, result, ep);
+}
+
+ERTS_GLB_INLINE void
+erts_nif_export_restore_error(Process* c_p, BeamInstr **pc,
+ Eterm *reg, ErtsCodeMFA **nif_mfa)
+{
+ NifExport *nep = (NifExport *) ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p);
+ int ix;
+
+ ASSERT(nep);
+ *pc = nep->pc;
+ c_p->cp = nep->cp;
+ *nif_mfa = nep->mfa;
+ for (ix = 0; ix < nep->argc; ix++)
+ reg[ix] = nep->argv[ix];
+ erts_nif_export_restore(c_p, nep, THE_NON_VALUE);
+}
+
+ERTS_GLB_INLINE int
+erts_nif_export_check_save_trace(Process *c_p, Eterm result,
+ int applying, Export* ep,
+ BeamInstr *cp, Uint32 flags,
+ Uint32 flags_meta, BeamInstr* I,
+ ErtsTracer meta_tracer)
+{
+ if (is_non_value(result) && c_p->freason == TRAP) {
+ NifExport *nep = ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p);
+ if (nep && nep->argc >= 0) {
+ erts_nif_export_save_trace(c_p, nep, applying, ep,
+ cp, flags, flags_meta,
+ I, meta_tracer);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+ERTS_GLB_INLINE Process *
+erts_proc_shadow2real(Process *c_p)
+{
+ if (c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
+ Process *real_c_p = c_p->next;
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()));
+ ASSERT(real_c_p->common.id == c_p->common.id);
+ return real_c_p;
+ }
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()));
+ return c_p;
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* ERL_NFUNC_SCHED_H__ */
+
+#if defined(ERTS_WANT_NFUNC_SCHED_INTERNALS__) && !defined(ERTS_NFUNC_SCHED_INTERNALS__)
+#define ERTS_NFUNC_SCHED_INTERNALS__
+
+#define ERTS_I_BEAM_OP_TO_NIF_EXPORT(I) \
+ (ASSERT(BeamIsOpCode(*(I), op_apply_bif) || \
+ BeamIsOpCode(*(I), op_call_nif)), \
+ ((NifExport *) (((char *) (I)) - offsetof(NifExport, exp.beam[0]))))
+
+
+#include "erl_message.h"
+#include <stddef.h>
+
+ERTS_GLB_INLINE void erts_flush_dirty_shadow_proc(Process *sproc);
+ERTS_GLB_INLINE void erts_cache_dirty_shadow_proc(Process *sproc);
+ERTS_GLB_INLINE Process *erts_make_dirty_shadow_proc(ErtsSchedulerData *esdp,
+ Process *c_p);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_flush_dirty_shadow_proc(Process *sproc)
+{
+ Process *c_p = sproc->next;
+
+ ASSERT(sproc->common.id == c_p->common.id);
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
+ & ERTS_PROC_LOCK_MAIN);
+
+ ASSERT(c_p->stop == sproc->stop);
+ ASSERT(c_p->hend == sproc->hend);
+ ASSERT(c_p->heap == sproc->heap);
+ ASSERT(c_p->abandoned_heap == sproc->abandoned_heap);
+ ASSERT(c_p->heap_sz == sproc->heap_sz);
+ ASSERT(c_p->high_water == sproc->high_water);
+ ASSERT(c_p->old_heap == sproc->old_heap);
+ ASSERT(c_p->old_htop == sproc->old_htop);
+ ASSERT(c_p->old_hend == sproc->old_hend);
+
+ ASSERT(c_p->htop <= sproc->htop && sproc->htop <= c_p->stop);
+
+ c_p->htop = sproc->htop;
+
+ if (!c_p->mbuf)
+ c_p->mbuf = sproc->mbuf;
+ else if (sproc->mbuf) {
+ ErlHeapFragment *bp;
+ for (bp = sproc->mbuf; bp->next; bp = bp->next)
+ ASSERT(!bp->off_heap.first);
+ bp->next = c_p->mbuf;
+ c_p->mbuf = sproc->mbuf;
+ }
+
+ c_p->mbuf_sz += sproc->mbuf_sz;
+
+ if (!c_p->off_heap.first)
+ c_p->off_heap.first = sproc->off_heap.first;
+ else if (sproc->off_heap.first) {
+ struct erl_off_heap_header *ohhp;
+ for (ohhp = sproc->off_heap.first; ohhp->next; ohhp = ohhp->next)
+ ;
+ ohhp->next = c_p->off_heap.first;
+ c_p->off_heap.first = sproc->off_heap.first;
+ }
+
+ c_p->off_heap.overhead += sproc->off_heap.overhead;
+}
+
+ERTS_GLB_INLINE void
+erts_cache_dirty_shadow_proc(Process *sproc)
+{
+ Process *c_p = sproc->next;
+ ASSERT(c_p);
+ ASSERT(sproc->common.id == c_p->common.id);
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
+ & ERTS_PROC_LOCK_MAIN);
+
+ sproc->htop = c_p->htop;
+ sproc->stop = c_p->stop;
+ sproc->hend = c_p->hend;
+ sproc->heap = c_p->heap;
+ sproc->abandoned_heap = c_p->abandoned_heap;
+ sproc->heap_sz = c_p->heap_sz;
+ sproc->high_water = c_p->high_water;
+ sproc->old_hend = c_p->old_hend;
+ sproc->old_htop = c_p->old_htop;
+ sproc->old_heap = c_p->old_heap;
+ sproc->mbuf = NULL;
+ sproc->mbuf_sz = 0;
+ ERTS_INIT_OFF_HEAP(&sproc->off_heap);
+}
+
+ERTS_GLB_INLINE Process *
+erts_make_dirty_shadow_proc(ErtsSchedulerData *esdp, Process *c_p)
+{
+ Process *sproc;
+
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp));
+
+ sproc = esdp->dirty_shadow_process;
+ ASSERT(sproc);
+ ASSERT(sproc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
+ ASSERT(erts_atomic32_read_nob(&sproc->state)
+ == (ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_PROXY));
+
+ sproc->next = c_p;
+ sproc->common.id = c_p->common.id;
+
+ erts_cache_dirty_shadow_proc(sproc);
+
+ return sproc;
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+
+#endif /* defined(ERTS_WANT_NFUNC_SCHED_INTERNALS__) && !defined(ERTS_NFUNC_SCHED_INTERNALS__) */
+
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index ff551ea3af..f7f12efe28 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -1,24 +1,42 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2009-2014. All Rights Reserved.
+ * Copyright Ericsson AB 2009-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
/* Erlang Native InterFace
*/
+/*
+ * Environment contains a pointer to currently executing process.
+ * In the dirty case this pointer do however not point to the
+ * actual process structure of the executing process, but instead
+ * a "shadow process structure". This in order to be able to handle
+ * heap allocation without the need to acquire the main lock on
+ * the process.
+ *
+ * The dirty process is allowed to allocate on the heap without
+ * the main lock, i.e., incrementing htop, but is not allowed to
+ * modify mbuf, offheap, etc without the main lock. The dirty
+ * process moves mbuf list and offheap list of the shadow process
+ * structure into the real structure when the dirty nif call
+ * completes.
+ */
+
+
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
@@ -36,6 +54,12 @@
#include "erl_thr_progress.h"
#include "dtrace-wrapper.h"
#include "erl_process.h"
+#include "erl_bif_unique.h"
+#include "erl_utils.h"
+#include "erl_io_queue.h"
+#undef ERTS_WANT_NFUNC_SCHED_INTERNALS__
+#define ERTS_WANT_NFUNC_SCHED_INTERNALS__
+#include "erl_nfunc_sched.h"
#if defined(USE_DYNAMIC_TRACE) && (defined(USE_DTRACE) || defined(USE_SYSTEMTAP))
#define HAVE_USE_DTRACE 1
#endif
@@ -43,7 +67,6 @@
#include <limits.h>
#include <stddef.h> /* offsetof */
-
/* Information about a loaded nif library.
* Each successful call to erlang:load_nif will allocate an instance of
* erl_module_nif. Two calls opening the same library will thus have the same
@@ -52,14 +75,19 @@
struct erl_module_nif {
void* priv_data;
void* handle; /* "dlopen" */
- struct enif_entry_t* entry;
+ struct enif_entry_t entry;
erts_refc_t rt_cnt; /* number of resource types */
erts_refc_t rt_dtor_cnt; /* number of resource types with destructors */
Module* mod; /* Can be NULL if orphan with dtor-resources left */
+
+ ErlNifFunc _funcs_copy_[1]; /* only used for old libs */
};
+typedef ERL_NIF_TERM (*NativeFunPtr)(ErlNifEnv*, int, const ERL_NIF_TERM[]);
+
#ifdef DEBUG
# define READONLY_CHECK
+# define ERTS_DBG_NIF_NOT_SCHED_MARKER ((void *) (UWord) 1)
#endif
#ifdef READONLY_CHECK
# define ADD_READONLY_CHECK(ENV,PTR,SIZE) add_readonly_check(ENV,PTR,SIZE)
@@ -68,6 +96,14 @@ static void add_readonly_check(ErlNifEnv*, unsigned char* ptr, unsigned sz);
# define ADD_READONLY_CHECK(ENV,PTR,SIZE) ((void)0)
#endif
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+# define ASSERT_IN_ENV(ENV, TERM, NR, TYPE) dbg_assert_in_env(ENV, TERM, NR, TYPE, __func__)
+static void dbg_assert_in_env(ErlNifEnv*, Eterm term, int nr, const char* type, const char* func);
+# include "erl_gc.h"
+#else
+# define ASSERT_IN_ENV(ENV, TERM, NR, TYPE)
+#endif
+
#ifdef DEBUG
static int is_offheap(const ErlOffHeap* off_heap);
#endif
@@ -77,9 +113,48 @@ void dtrace_nifenv_str(ErlNifEnv *, char *);
#endif
#define MIN_HEAP_FRAG_SZ 200
-static Eterm* alloc_heap_heavy(ErlNifEnv* env, unsigned need, Eterm* hp);
+static Eterm* alloc_heap_heavy(ErlNifEnv* env, size_t need, Eterm* hp);
+
+static ERTS_INLINE int
+is_scheduler(void)
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ if (!esdp)
+ return 0;
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ return -1;
+ return 1;
+}
-static ERTS_INLINE Eterm* alloc_heap(ErlNifEnv* env, unsigned need)
+static ERTS_INLINE void
+execution_state(ErlNifEnv *env, Process **c_pp, int *schedp)
+{
+ if (schedp)
+ *schedp = is_scheduler();
+ if (c_pp) {
+ if (!env || env->proc->common.id == ERTS_INVALID_PID)
+ *c_pp = NULL;
+ else {
+ Process *c_p = env->proc;
+
+ if (!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC)) {
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
+ & ERTS_PROC_LOCK_MAIN);
+ }
+ else {
+ c_p = env->proc->next;
+ ASSERT(is_scheduler() < 0);
+ ASSERT(c_p && env->proc->common.id == c_p->common.id);
+ }
+
+ *c_pp = c_p;
+
+ ASSERT(!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC));
+ }
+ }
+}
+
+static ERTS_INLINE Eterm* alloc_heap(ErlNifEnv* env, size_t need)
{
Eterm* hp = env->hp;
env->hp += need;
@@ -89,15 +164,18 @@ static ERTS_INLINE Eterm* alloc_heap(ErlNifEnv* env, unsigned need)
return alloc_heap_heavy(env, need, hp);
}
-static Eterm* alloc_heap_heavy(ErlNifEnv* env, unsigned need, Eterm* hp)
-{
+static Eterm* alloc_heap_heavy(ErlNifEnv* env, size_t need, Eterm* hp)
+{
env->hp = hp;
- if (env->heap_frag == NULL) {
+ if (env->heap_frag == NULL) {
ASSERT(HEAP_LIMIT(env->proc) == env->hp_end);
- HEAP_TOP(env->proc) = env->hp;
+ ASSERT(env->hp + need > env->hp_end);
+ HEAP_TOP(env->proc) = env->hp;
}
else {
- env->heap_frag->used_size = hp - env->heap_frag->mem;
+ Uint usz = env->hp - env->heap_frag->mem;
+ env->proc->mbuf_sz += usz - env->heap_frag->used_size;
+ env->heap_frag->used_size = usz;
ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size);
}
hp = erts_heap_alloc(env->proc, need, MIN_HEAP_FRAG_SZ);
@@ -109,7 +187,7 @@ static Eterm* alloc_heap_heavy(ErlNifEnv* env, unsigned need, Eterm* hp)
}
#if SIZEOF_LONG != ERTS_SIZEOF_ETERM
-static ERTS_INLINE void ensure_heap(ErlNifEnv* env, unsigned may_need)
+static ERTS_INLINE void ensure_heap(ErlNifEnv* env, size_t may_need)
{
if (env->hp + may_need > env->hp_end) {
alloc_heap_heavy(env, may_need, env->hp);
@@ -118,7 +196,8 @@ static ERTS_INLINE void ensure_heap(ErlNifEnv* env, unsigned may_need)
}
#endif
-void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif)
+void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif,
+ Process* tracee)
{
env->mod_nif = mod_nif;
env->proc = p;
@@ -127,19 +206,37 @@ void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif)
env->heap_frag = NULL;
env->fpe_was_unmasked = erts_block_fpe();
env->tmp_obj_list = NULL;
-}
+ env->exception_thrown = 0;
+ env->tracee = tracee;
-static void pre_nif_noproc(ErlNifEnv* env, struct erl_module_nif* mod_nif)
-{
- env->mod_nif = mod_nif;
- env->proc = NULL;
- env->hp = NULL;
- env->hp_end = NULL;
- env->heap_frag = NULL;
- env->fpe_was_unmasked = erts_block_fpe();
- env->tmp_obj_list = NULL;
+ ASSERT(p->common.id != ERTS_INVALID_PID);
+
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ env->dbg_disable_assert_in_env = 0;
+#endif
+#if defined(DEBUG) && defined(ERTS_DIRTY_SCHEDULERS)
+ {
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ASSERT(esdp);
+
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ erts_aint32_t state = erts_atomic32_read_nob(&p->state);
+
+ ASSERT(p->scheduler_data == esdp);
+ ASSERT((state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS))
+ && !(state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)));
+ }
+ }
+#endif
}
+static void full_cache_env(ErlNifEnv *env);
+static void cache_env(ErlNifEnv* env);
+static void full_flush_env(ErlNifEnv *env);
+static void flush_env(ErlNifEnv* env);
+
/* Temporary object header, auto-deallocated when NIF returns
* or when independent environment is cleared.
*/
@@ -162,27 +259,155 @@ static ERTS_INLINE void free_tmp_objs(ErlNifEnv* env)
void erts_post_nif(ErlNifEnv* env)
{
erts_unblock_fpe(env->fpe_was_unmasked);
- if (env->heap_frag == NULL) {
- ASSERT(env->hp_end == HEAP_LIMIT(env->proc));
- ASSERT(env->hp >= HEAP_TOP(env->proc));
- ASSERT(env->hp <= HEAP_LIMIT(env->proc));
- HEAP_TOP(env->proc) = env->hp;
+ full_flush_env(env);
+ free_tmp_objs(env);
+ env->exiting = ERTS_PROC_IS_EXITING(env->proc);
+}
+
+
+/*
+ * Initialize a NifExport struct. Create it if needed and store it in the
+ * proc. The direct_fp function is what will be invoked by op_call_nif, and
+ * the indirect_fp function, if not NULL, is what the direct_fp function
+ * will call. If the allocated NifExport isn't enough to hold all of argv,
+ * allocate a larger one. Save 'current' and registers if first time this
+ * call is scheduled.
+ */
+
+static ERTS_INLINE ERL_NIF_TERM
+schedule(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp,
+ Eterm mod, Eterm func_name, int argc, const ERL_NIF_TERM argv[])
+{
+ NifExport *ep;
+ Process *c_p, *dirty_shadow_proc;
+
+ execution_state(env, &c_p, NULL);
+ if (c_p == env->proc)
+ dirty_shadow_proc = NULL;
+ else
+ dirty_shadow_proc = env->proc;
+
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p));
+
+ ep = erts_nif_export_schedule(c_p, dirty_shadow_proc,
+ c_p->current,
+ c_p->cp,
+ BeamOpCodeAddr(op_call_nif),
+ direct_fp, indirect_fp,
+ mod, func_name,
+ argc, (const Eterm *) argv);
+ if (!ep->m) {
+ /* First time this call is scheduled... */
+ erts_refc_inc(&env->mod_nif->rt_dtor_cnt, 1);
+ ep->m = env->mod_nif;
}
- else {
- ASSERT(env->hp_end != HEAP_LIMIT(env->proc));
- ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size);
- env->heap_frag->used_size = env->hp - env->heap_frag->mem;
- ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size);
+ return (ERL_NIF_TERM) THE_NON_VALUE;
+}
+
+
+static ERL_NIF_TERM dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+static ERL_NIF_TERM dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+
+int
+erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *reg)
+{
+ int exiting;
+ ERL_NIF_TERM *argv = (ERL_NIF_TERM *) reg;
+ NifExport *nep = ERTS_I_BEAM_OP_TO_NIF_EXPORT(I);
+ ErtsCodeMFA *codemfa = erts_code_to_codemfa(I);
+ NativeFunPtr dirty_nif = (NativeFunPtr) I[1];
+ ErlNifEnv env;
+ ERL_NIF_TERM result;
+#ifdef DEBUG
+ erts_aint32_t state = erts_atomic32_read_nob(&c_p->state);
+
+ ASSERT(nep == ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p));
+
+ ASSERT(!c_p->scheduler_data);
+ ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING)
+ && !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)));
+ ASSERT(esdp);
+
+ nep->func = ERTS_DBG_NIF_NOT_SCHED_MARKER;
+#endif
+
+ erts_pre_nif(&env, c_p, nep->m, NULL);
+
+ env.proc = erts_make_dirty_shadow_proc(esdp, c_p);
+
+ env.proc->freason = EXC_NULL;
+ env.proc->fvalue = NIL;
+ env.proc->ftrace = NIL;
+ env.proc->i = c_p->i;
+
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p)));
+
+ erts_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC));
+
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ result = (*dirty_nif)(&env, codemfa->arity, argv); /* Call dirty NIF */
+
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ ASSERT(env.proc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
+ ASSERT(env.proc->next == c_p);
+
+ exiting = ERTS_PROC_IS_EXITING(c_p);
+
+ if (!exiting) {
+ if (env.exception_thrown) {
+ schedule_exception:
+ schedule(&env, dirty_nif_exception, NULL,
+ am_erts_internal, am_dirty_nif_exception,
+ 1, &env.proc->fvalue);
+ }
+ else if (is_value(result)) {
+ schedule(&env, dirty_nif_finalizer, NULL,
+ am_erts_internal, am_dirty_nif_finalizer,
+ 1, &result);
+ }
+ else if (env.proc->freason != TRAP) { /* user returned garbage... */
+ ERTS_DECL_AM(badreturn);
+ (void) enif_raise_exception(&env, AM_badreturn);
+ goto schedule_exception;
+ }
+ else {
+ /* Rescheduled by dirty NIF call... */
+ ASSERT(nep->func != ERTS_DBG_NIF_NOT_SCHED_MARKER);
+ }
+ c_p->i = env.proc->i;
+ c_p->arity = env.proc->arity;
}
- free_tmp_objs(env);
+
+#ifdef DEBUG
+ if (nep->func == ERTS_DBG_NIF_NOT_SCHED_MARKER)
+ nep->func = NULL;
+#endif
+
+ erts_unblock_fpe(env.fpe_was_unmasked);
+ full_flush_env(&env);
+ free_tmp_objs(&env);
+
+ return exiting;
}
-static void post_nif_noproc(ErlNifEnv* env)
+
+static void full_flush_env(ErlNifEnv* env)
{
- erts_unblock_fpe(env->fpe_was_unmasked);
- free_tmp_objs(env);
+ flush_env(env);
+ if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)
+ /* Dirty nif call using shadow process struct */
+ erts_flush_dirty_shadow_proc(env->proc);
}
+static void full_cache_env(ErlNifEnv* env)
+{
+ if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)
+ erts_cache_dirty_shadow_proc(env->proc);
+ cache_env(env);
+}
/* Flush out our cached heap pointers to allow an ordinary HAlloc
*/
@@ -195,9 +420,12 @@ static void flush_env(ErlNifEnv* env)
HEAP_TOP(env->proc) = env->hp;
}
else {
+ Uint usz;
ASSERT(env->hp_end != HEAP_LIMIT(env->proc));
ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size);
- env->heap_frag->used_size = env->hp - env->heap_frag->mem;
+ usz = env->hp - env->heap_frag->mem;
+ env->proc->mbuf_sz += usz - env->heap_frag->used_size;
+ env->heap_frag->used_size = usz;
ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size);
}
}
@@ -206,6 +434,7 @@ static void flush_env(ErlNifEnv* env)
*/
static void cache_env(ErlNifEnv* env)
{
+ env->heap_frag = MBUF(env->proc);
if (env->heap_frag == NULL) {
ASSERT(env->hp_end == HEAP_LIMIT(env->proc));
ASSERT(env->hp <= HEAP_TOP(env->proc));
@@ -213,10 +442,6 @@ static void cache_env(ErlNifEnv* env)
env->hp = HEAP_TOP(env->proc);
}
else {
- ASSERT(env->hp_end != HEAP_LIMIT(env->proc));
- ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size);
- env->heap_frag = MBUF(env->proc);
- ASSERT(env->heap_frag != NULL);
env->hp = env->heap_frag->mem + env->heap_frag->used_size;
env->hp_end = env->heap_frag->mem + env->heap_frag->alloc_size;
}
@@ -247,18 +472,20 @@ struct enif_msg_environment_t
Process phony_proc;
};
-ErlNifEnv* enif_alloc_env(void)
+static ERTS_INLINE void
+setup_nif_env(struct enif_msg_environment_t* msg_env,
+ struct erl_module_nif* mod,
+ Process* tracee)
{
- struct enif_msg_environment_t* msg_env =
- erts_alloc_fnf(ERTS_ALC_T_NIF, sizeof(struct enif_msg_environment_t));
Eterm* phony_heap = (Eterm*) msg_env; /* dummy non-NULL ptr */
-
- msg_env->env.hp = phony_heap;
+
+ msg_env->env.hp = phony_heap;
msg_env->env.hp_end = phony_heap;
msg_env->env.heap_frag = NULL;
- msg_env->env.mod_nif = NULL;
+ msg_env->env.mod_nif = mod;
msg_env->env.tmp_obj_list = NULL;
msg_env->env.proc = &msg_env->phony_proc;
+ msg_env->env.exception_thrown = 0;
memset(&msg_env->phony_proc, 0, sizeof(Process));
HEAP_START(&msg_env->phony_proc) = phony_heap;
HEAP_TOP(&msg_env->phony_proc) = phony_heap;
@@ -266,10 +493,22 @@ ErlNifEnv* enif_alloc_env(void)
HEAP_END(&msg_env->phony_proc) = phony_heap;
MBUF(&msg_env->phony_proc) = NULL;
msg_env->phony_proc.common.id = ERTS_INVALID_PID;
+ msg_env->env.tracee = tracee;
+
#ifdef FORCE_HEAP_FRAGS
msg_env->phony_proc.space_verified = 0;
msg_env->phony_proc.space_verified_from = NULL;
#endif
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ msg_env->env.dbg_disable_assert_in_env = 0;
+#endif
+}
+
+ErlNifEnv* enif_alloc_env(void)
+{
+ struct enif_msg_environment_t* msg_env =
+ erts_alloc_fnf(ERTS_ALC_T_NIF, sizeof(struct enif_msg_environment_t));
+ setup_nif_env(msg_env, NULL, NULL);
return &msg_env->env;
}
void enif_free_env(ErlNifEnv* env)
@@ -278,6 +517,20 @@ void enif_free_env(ErlNifEnv* env)
erts_free(ERTS_ALC_T_NIF, env);
}
+static ERTS_INLINE void pre_nif_noproc(struct enif_msg_environment_t* msg_env,
+ struct erl_module_nif* mod,
+ Process* tracee)
+{
+ setup_nif_env(msg_env, mod, tracee);
+ msg_env->env.fpe_was_unmasked = erts_block_fpe();
+}
+
+static ERTS_INLINE void post_nif_noproc(struct enif_msg_environment_t* msg_env)
+{
+ erts_unblock_fpe(msg_env->env.fpe_was_unmasked);
+ enif_clear_env(&msg_env->env);
+}
+
static ERTS_INLINE void clear_offheap(ErlOffHeap* oh)
{
oh->first = NULL;
@@ -304,79 +557,372 @@ void enif_clear_env(ErlNifEnv* env)
ASSERT(!is_offheap(&MSO(p)));
free_tmp_objs(env);
}
+
+#ifdef DEBUG
+static int enif_send_delay = 0;
+#define ERTS_FORCE_ENIF_SEND_DELAY() (enif_send_delay++ % 2 == 0)
+#else
+#ifdef ERTS_PROC_LOCK_OWN_IMPL
+#define ERTS_FORCE_ENIF_SEND_DELAY() 0
+#else
+/*
+ * We always schedule messages if we do not use our own
+ * process lock implementation, as if we try to do a trylock on
+ * a lock that might already be locked by the same thread.
+ * And what happens then with different mutex implementations
+ * is not always guaranteed.
+ */
+#define ERTS_FORCE_ENIF_SEND_DELAY() 1
+#endif
+#endif
+
+int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks)
+{
+ ErlTraceMessageQueue *msgq, **last_msgq;
+ int reds = 0;
+
+ /* Only one thread at a time is allowed to flush trace messages,
+ so we require the main lock to be held when doing the flush */
+ ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
+
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_TRACE);
+
+ msgq = c_p->trace_msg_q;
+
+ if (!msgq)
+ goto error;
+
+ do {
+ Process* rp;
+ ErtsProcLocks rp_locks;
+ ErtsMessage *first, **last;
+ Uint len;
+
+ first = msgq->first;
+ last = msgq->last;
+ len = msgq->len;
+ msgq->first = NULL;
+ msgq->last = &msgq->first;
+ msgq->len = 0;
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE);
+
+ ASSERT(len != 0);
+
+ rp = erts_proc_lookup(msgq->receiver);
+ if (rp) {
+ rp_locks = 0;
+ if (rp->common.id == c_p->common.id)
+ rp_locks = c_p_locks;
+ erts_queue_messages(rp, rp_locks, first, last, len, c_p->common.id);
+ if (rp->common.id == c_p->common.id)
+ rp_locks &= ~c_p_locks;
+ if (rp_locks)
+ erts_proc_unlock(rp, rp_locks);
+ reds += len;
+ } else {
+ erts_cleanup_messages(first);
+ }
+ reds += 1;
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_TRACE);
+ msgq = msgq->next;
+ } while (msgq);
+
+ last_msgq = &c_p->trace_msg_q;
+
+ while (*last_msgq) {
+ msgq = *last_msgq;
+ if (msgq->len == 0) {
+ *last_msgq = msgq->next;
+ erts_free(ERTS_ALC_T_TRACE_MSG_QUEUE, msgq);
+ } else {
+ last_msgq = &msgq->next;
+ }
+ }
+
+error:
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE);
+
+ return reds;
+}
+
+
int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
ErlNifEnv* msg_env, ERL_NIF_TERM msg)
{
struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)msg_env;
ErtsProcLocks rp_locks = 0;
+ ErtsProcLocks lc_locks = 0;
Process* rp;
Process* c_p;
- ErlHeapFragment* frags;
+ ErtsMessage *mp;
Eterm receiver = to_pid->pid;
- int flush_me = 0;
- int scheduler = erts_get_scheduler_id() != 0;
+ int scheduler;
- if (env != NULL) {
- c_p = env->proc;
- if (receiver == c_p->common.id) {
- rp_locks = ERTS_PROC_LOCK_MAIN;
- flush_me = 1;
- }
+ execution_state(env, &c_p, &scheduler);
+
+
+ if (scheduler > 0) { /* Normal scheduler */
+ rp = erts_proc_lookup(receiver);
+ if (!rp)
+ return 0;
}
else {
-#ifdef ERTS_SMP
- c_p = NULL;
-#else
- erl_exit(ERTS_ABORT_EXIT,"enif_send: env==NULL on non-SMP VM");
-#endif
- }
-
- rp = (scheduler
- ? erts_proc_lookup(receiver)
- : erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
- receiver, rp_locks, ERTS_P2P_FLG_SMP_INC_REFC));
- if (rp == NULL) {
- ASSERT(env == NULL || receiver != c_p->common.id);
- return 0;
+ if (c_p) {
+ ASSERT(scheduler < 0); /* Dirty scheduler */
+ if (ERTS_PROC_IS_EXITING(c_p))
+ return 0;
+
+ if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ }
+ }
+
+ rp = erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
+ receiver, rp_locks,
+ ERTS_P2P_FLG_INC_REFC);
+ if (!rp) {
+ if (c_p && (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC))
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ return 0;
+ }
}
- flush_env(msg_env);
- frags = menv->env.heap_frag;
- ASSERT(frags == MBUF(&menv->phony_proc));
- if (frags != NULL) {
- /* Move all offheap's from phony proc to the first fragment.
- Quick and dirty, but erts_move_msg_mbuf_to_heap doesn't care. */
- ASSERT(!is_offheap(&frags->off_heap));
- frags->off_heap = MSO(&menv->phony_proc);
- clear_offheap(&MSO(&menv->phony_proc));
- menv->env.heap_frag = NULL;
- MBUF(&menv->phony_proc) = NULL;
+
+ if (c_p == rp)
+ rp_locks = ERTS_PROC_LOCK_MAIN;
+
+ if (menv) {
+ flush_env(msg_env);
+ mp = erts_alloc_message(0, NULL);
+ mp->data.heap_frag = menv->env.heap_frag;
+ ASSERT(mp->data.heap_frag == MBUF(&menv->phony_proc));
+ if (mp->data.heap_frag != NULL) {
+ /* Move all offheap's from phony proc to the first fragment.
+ Quick and dirty... */
+ ASSERT(!is_offheap(&mp->data.heap_frag->off_heap));
+ mp->data.heap_frag->off_heap = MSO(&menv->phony_proc);
+ clear_offheap(&MSO(&menv->phony_proc));
+ menv->env.heap_frag = NULL;
+ MBUF(&menv->phony_proc) = NULL;
+ }
+ } else {
+ erts_literal_area_t litarea;
+ ErlOffHeap *ohp;
+ Eterm *hp;
+ Uint sz;
+ INITIALIZE_LITERAL_PURGE_AREA(litarea);
+ sz = size_object_litopt(msg, &litarea);
+ if (c_p && !env->tracee) {
+ full_flush_env(env);
+ mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
+ full_cache_env(env);
+ }
+ else {
+ erts_aint_t state = erts_atomic32_read_nob(&rp->state);
+ if (state & ERTS_PSFLG_OFF_HEAP_MSGQ) {
+ mp = erts_alloc_message(sz, &hp);
+ ohp = sz == 0 ? NULL : &mp->hfrag.off_heap;
+ }
+ else {
+ ErlHeapFragment *bp = new_message_buffer(sz);
+ mp = erts_alloc_message(0, NULL);
+ mp->data.heap_frag = bp;
+ hp = bp->mem;
+ ohp = &bp->off_heap;
+ }
+ }
+ msg = copy_struct_litopt(msg, sz, &hp, ohp, &litarea);
}
- ASSERT(!is_offheap(&MSO(&menv->phony_proc)));
- if (flush_me) {
- flush_env(env); /* Needed for ERTS_HOLE_CHECK */
+ ERL_MESSAGE_TERM(mp) = msg;
+
+ if (!env || !env->tracee) {
+
+ if (c_p && IS_TRACED_FL(c_p, F_TRACE_SEND)) {
+ full_flush_env(env);
+ trace_send(c_p, receiver, msg);
+ full_cache_env(env);
+ }
}
- erts_queue_message(rp, &rp_locks, frags, msg, am_undefined
-#ifdef USE_VM_PROBES
- , NIL
+ else {
+ /* This clause is taken when the nif is called in the context
+ of a traced process. We do not know which locks we have
+ so we have to do a try lock and if that fails we enqueue
+ the message in a special trace message output queue of the
+ tracee */
+ ErlTraceMessageQueue *msgq;
+ Process *t_p = env->tracee;
+
+
+ erts_proc_lock(t_p, ERTS_PROC_LOCK_TRACE);
+
+ msgq = t_p->trace_msg_q;
+
+ while (msgq != NULL) {
+ if (msgq->receiver == receiver) {
+ break;
+ }
+ msgq = msgq->next;
+ }
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ lc_locks = erts_proc_lc_my_proc_locks(rp);
+ rp_locks |= lc_locks;
#endif
- );
+ if (ERTS_FORCE_ENIF_SEND_DELAY() || msgq ||
+ rp_locks & ERTS_PROC_LOCK_MSGQ ||
+ erts_proc_trylock(rp, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
+
+ if (!msgq) {
+ msgq = erts_alloc(ERTS_ALC_T_TRACE_MSG_QUEUE,
+ sizeof(ErlTraceMessageQueue));
+ msgq->receiver = receiver;
+ msgq->first = mp;
+ msgq->last = &mp->next;
+ msgq->len = 1;
+
+ /* Insert in linked list */
+ msgq->next = t_p->trace_msg_q;
+ t_p->trace_msg_q = msgq;
+
+ erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
+
+ erts_schedule_flush_trace_messages(t_p, 0);
+ } else {
+ msgq->len++;
+ *msgq->last = mp;
+ msgq->last = &mp->next;
+ erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
+ }
+ goto done;
+ } else {
+ erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
+ rp_locks &= ~ERTS_PROC_LOCK_TRACE;
+ rp_locks |= ERTS_PROC_LOCK_MSGQ;
+ }
+ }
+
+ erts_queue_message(rp, rp_locks, mp, msg,
+ c_p ? c_p->common.id : am_undefined);
+
+done:
if (c_p == rp)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
- if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
- if (!scheduler)
- erts_smp_proc_dec_refc(rp);
- if (flush_me) {
- cache_env(env);
- }
+ if (rp_locks & ~lc_locks)
+ erts_proc_unlock(rp, rp_locks & ~lc_locks);
+ if (c_p && (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC))
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ if (scheduler <= 0)
+ erts_proc_dec_refc(rp);
+
return 1;
}
+int
+enif_port_command(ErlNifEnv *env, const ErlNifPort* to_port,
+ ErlNifEnv *msg_env, ERL_NIF_TERM msg)
+{
+ int iflags = (erts_port_synchronous_ops
+ ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
+ : ERTS_PORT_SFLGS_INVALID_LOOKUP);
+ int scheduler;
+ Process *c_p;
+ Port *prt;
+ int res;
+
+ if (!env)
+ erts_exit(ERTS_ABORT_EXIT, "enif_port_command: env == NULL");
+
+ execution_state(env, &c_p, &scheduler);
+
+ if (!c_p)
+ c_p = env->proc;
+
+ if (scheduler > 0)
+ prt = erts_port_lookup(to_port->port_id, iflags);
+ else {
+ if (ERTS_PROC_IS_EXITING(c_p))
+ return 0;
+ prt = erts_thr_port_lookup(to_port->port_id, iflags);
+ }
+
+ if (!prt)
+ res = 0;
+ else
+ res = erts_port_output_async(prt, c_p->common.id, msg);
+
+ if (scheduler <= 0)
+ erts_port_dec_refc(prt);
+
+ return res;
+}
+
+/*
+ * env must be the caller's environment in a scheduler or NULL in a
+ * non-scheduler thread.
+ * name must be an atom - anything else will just waste time.
+ */
+static Eterm call_whereis(ErlNifEnv *env, Eterm name)
+{
+ Process *c_p;
+ Eterm res;
+ int scheduler;
+
+ execution_state(env, &c_p, &scheduler);
+ ASSERT((c_p && scheduler) || (!c_p && !scheduler));
+
+ if (scheduler < 0) {
+ /* dirty scheduler */
+ if (ERTS_PROC_IS_EXITING(c_p))
+ return 0;
+
+ if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)
+ c_p = NULL; /* as we don't have main lock */
+ }
+
+
+ if (c_p) {
+ /* main lock may be released below and c_p->htop updated by others */
+ flush_env(env);
+ }
+ res = erts_whereis_name_to_id(c_p, name);
+ if (c_p)
+ cache_env(env);
+
+ return res;
+}
+
+int enif_whereis_pid(ErlNifEnv *env, ERL_NIF_TERM name, ErlNifPid *pid)
+{
+ Eterm res;
+
+ if (is_not_atom(name))
+ return 0;
+
+ res = call_whereis(env, name);
+ /* enif_get_local_ functions check the type */
+ return enif_get_local_pid(env, res, pid);
+}
+
+int enif_whereis_port(ErlNifEnv *env, ERL_NIF_TERM name, ErlNifPort *port)
+{
+ Eterm res;
+
+ if (is_not_atom(name))
+ return 0;
+
+ res = call_whereis(env, name);
+ /* enif_get_local_ functions check the type */
+ return enif_get_local_port(env, res, port);
+}
+
ERL_NIF_TERM enif_make_copy(ErlNifEnv* dst_env, ERL_NIF_TERM src_term)
{
Uint sz;
Eterm* hp;
+ /*
+ * No preserved sharing allowed as long as literals are also preserved.
+ * Process independent environment can not be reached by purge.
+ */
sz = size_object(src_term);
hp = alloc_heap(dst_env, sz);
return copy_struct(src_term, sz, &hp, &MSO(dst_env->proc));
@@ -392,12 +938,28 @@ static int is_offheap(const ErlOffHeap* oh)
ErlNifPid* enif_self(ErlNifEnv* caller_env, ErlNifPid* pid)
{
+ if (caller_env->proc->common.id == ERTS_INVALID_PID)
+ return NULL;
pid->pid = caller_env->proc->common.id;
return pid;
}
+
int enif_get_local_pid(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifPid* pid)
{
- return is_internal_pid(term) ? (pid->pid=term, 1) : 0;
+ if (is_internal_pid(term)) {
+ pid->pid=term;
+ return 1;
+ }
+ return 0;
+}
+
+int enif_get_local_port(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifPort* port)
+{
+ if (is_internal_port(term)) {
+ port->port_id=term;
+ return 1;
+ }
+ return 0;
}
int enif_is_atom(ErlNifEnv* env, ERL_NIF_TERM term)
@@ -447,7 +1009,7 @@ int enif_is_list(ErlNifEnv* env, ERL_NIF_TERM term)
int enif_is_exception(ErlNifEnv* env, ERL_NIF_TERM term)
{
- return term == THE_NON_VALUE;
+ return env->exception_thrown && term == THE_NON_VALUE;
}
int enif_is_number(ErlNifEnv* env, ERL_NIF_TERM term)
@@ -472,6 +1034,16 @@ int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin)
struct enif_tmp_obj_t* tmp;
byte* raw_ptr;
}u;
+
+ if (is_binary(bin_term)) {
+ ProcBin *pb = (ProcBin*) binary_val(bin_term);
+ if (pb->thing_word == HEADER_SUB_BIN) {
+ ErlSubBin* sb = (ErlSubBin*) pb;
+ pb = (ProcBin*) binary_val(sb->orig);
+ }
+ if (pb->thing_word == HEADER_PROC_BIN && pb->flags)
+ erts_emasculate_writable_binary(pb);
+ }
u.tmp = NULL;
bin->data = erts_get_aligned_binary_bytes_extra(bin_term, &u.raw_ptr, allocator,
sizeof(struct enif_tmp_obj_t));
@@ -487,7 +1059,7 @@ int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin)
bin->bin_term = bin_term;
bin->size = binary_size(bin_term);
bin->ref_bin = NULL;
- ADD_READONLY_CHECK(env, bin->data, bin->size);
+ ADD_READONLY_CHECK(env, bin->data, bin->size);
return 1;
}
@@ -539,9 +1111,6 @@ int enif_alloc_binary(size_t size, ErlNifBinary* bin)
if (refbin == NULL) {
return 0; /* The NIF must take action */
}
- refbin->flags = BIN_FLAG_DRV; /* BUGBUG: Flag? */
- erts_refc_init(&refbin->refc, 1);
- refbin->orig_size = (SWord) size;
bin->size = size;
bin->data = (unsigned char*) refbin->orig_bytes;
@@ -561,7 +1130,6 @@ int enif_realloc_binary(ErlNifBinary* bin, size_t size)
if (!newbin) {
return 0;
}
- newbin->orig_size = size;
bin->ref_bin = newbin;
bin->data = (unsigned char*) newbin->orig_bytes;
bin->size = size;
@@ -581,9 +1149,7 @@ void enif_release_binary(ErlNifBinary* bin)
if (bin->ref_bin != NULL) {
Binary* refbin = bin->ref_bin;
ASSERT(bin->bin_term == THE_NON_VALUE);
- if (erts_refc_dectest(&refbin->refc, 0) == 0) {
- erts_bin_free(refbin);
- }
+ erts_bin_release(refbin);
}
#ifdef DEBUG
bin->data = NULL;
@@ -601,6 +1167,68 @@ unsigned char* enif_make_new_binary(ErlNifEnv* env, size_t size,
return binary_bytes(*termp);
}
+int enif_term_to_binary(ErlNifEnv *dst_env, ERL_NIF_TERM term,
+ ErlNifBinary *bin)
+{
+ Sint size;
+ byte *bp;
+ Binary* refbin;
+
+ size = erts_encode_ext_size(term);
+ if (!enif_alloc_binary(size, bin))
+ return 0;
+
+ refbin = bin->ref_bin;
+
+ bp = bin->data;
+
+ erts_encode_ext(term, &bp);
+
+ bin->size = bp - bin->data;
+ refbin->orig_size = bin->size;
+
+ ASSERT(bin->data + bin->size == bp);
+
+ return 1;
+}
+
+size_t enif_binary_to_term(ErlNifEnv *dst_env,
+ const unsigned char* data,
+ size_t data_sz,
+ ERL_NIF_TERM *term,
+ ErlNifBinaryToTerm opts)
+{
+ Sint size;
+ ErtsHeapFactory factory;
+ byte *bp = (byte*) data;
+
+ ERTS_CT_ASSERT(ERL_NIF_BIN2TERM_SAFE == ERTS_DIST_EXT_BTT_SAFE);
+
+ if (opts & ~ERL_NIF_BIN2TERM_SAFE) {
+ return 0;
+ }
+ if ((size = erts_decode_ext_size(bp, data_sz)) < 0)
+ return 0;
+
+ if (size > 0) {
+ flush_env(dst_env);
+ erts_factory_proc_prealloc_init(&factory, dst_env->proc, size);
+ } else {
+ erts_factory_dummy_init(&factory);
+ }
+
+ *term = erts_decode_ext(&factory, &bp, (Uint32)opts);
+
+ if (is_non_value(*term)) {
+ return 0;
+ }
+ erts_factory_close(&factory);
+ cache_env(dst_env);
+
+ ASSERT(bp > data);
+ return bp - data;
+}
+
int enif_is_identical(Eterm lhs, Eterm rhs)
{
return EQ(lhs,rhs);
@@ -619,6 +1247,22 @@ int enif_compare(Eterm lhs, Eterm rhs)
return result;
}
+ErlNifUInt64 enif_hash(ErlNifHash type, Eterm term, ErlNifUInt64 salt)
+{
+ switch (type) {
+ case ERL_NIF_INTERNAL_HASH:
+ return make_internal_hash(term, (Uint32) salt);
+ case ERL_NIF_PHASH2:
+ /* It appears that make_hash2 doesn't always react to seasoning
+ * as well as it should. Therefore, let's make it ignore the salt
+ * value and declare salted uses of phash2 as unsupported.
+ */
+ return make_hash2(term) & ((1 << 27) - 1);
+ default:
+ return 0;
+ }
+}
+
int enif_get_tuple(ErlNifEnv* env, Eterm tpl, int* arity, const Eterm** array)
{
Eterm* ptr;
@@ -685,7 +1329,7 @@ Eterm enif_make_binary(ErlNifEnv* env, ErlNifBinary* bin)
OH_OVERHEAD(&(MSO(env->proc)), pb->size / sizeof(Eterm));
bin_term = make_binary(pb);
- if (erts_refc_read(&bptr->refc, 1) == 1) {
+ if (erts_refc_read(&bptr->intern.refc, 1) == 1) {
/* Total ownership transfer */
bin->ref_bin = NULL;
bin->bin_term = bin_term;
@@ -707,7 +1351,7 @@ Eterm enif_make_sub_binary(ErlNifEnv* env, ERL_NIF_TERM bin_term,
Eterm orig;
Uint offset, bit_offset, bit_size;
#ifdef DEBUG
- unsigned src_size;
+ size_t src_size;
ASSERT(is_binary(bin_term));
src_size = binary_size(bin_term);
@@ -730,7 +1374,21 @@ Eterm enif_make_sub_binary(ErlNifEnv* env, ERL_NIF_TERM bin_term,
Eterm enif_make_badarg(ErlNifEnv* env)
{
- BIF_ERROR(env->proc, BADARG);
+ return enif_raise_exception(env, am_badarg);
+}
+
+Eterm enif_raise_exception(ErlNifEnv* env, ERL_NIF_TERM reason)
+{
+ env->exception_thrown = 1;
+ env->proc->fvalue = reason;
+ BIF_ERROR(env->proc, EXC_ERROR);
+}
+
+int enif_has_pending_exception(ErlNifEnv* env, ERL_NIF_TERM* reason)
+{
+ if (env->exception_thrown && reason != NULL)
+ *reason = env->proc->fvalue;
+ return env->exception_thrown;
}
int enif_get_atom(ErlNifEnv* env, Eterm atom, char* buf, unsigned len,
@@ -875,8 +1533,13 @@ int enif_get_list_cell(ErlNifEnv* env, Eterm term, Eterm* head, Eterm* tail)
int enif_get_list_length(ErlNifEnv* env, Eterm term, unsigned* len)
{
- if (is_not_list(term) && is_not_nil(term)) return 0;
- *len = erts_list_length(term);
+ Sint i;
+ Uint u;
+
+ if ((i = erts_list_length(term)) < 0) return 0;
+ u = (Uint)i;
+ if ((unsigned)u != u) return 0;
+ *len = u;
return 1;
}
@@ -952,8 +1615,12 @@ ERL_NIF_TERM enif_make_uint64(ErlNifEnv* env, ErlNifUInt64 i)
ERL_NIF_TERM enif_make_double(ErlNifEnv* env, double d)
{
- Eterm* hp = alloc_heap(env,FLOAT_SIZE_OBJECT);
+ Eterm* hp;
FloatDef f;
+
+ if (!erts_isfinite(d))
+ return enif_make_badarg(env);
+ hp = alloc_heap(env,FLOAT_SIZE_OBJECT);
f.fd = d;
PUT_DOUBLE(f, hp);
return make_float(hp);
@@ -966,6 +1633,8 @@ ERL_NIF_TERM enif_make_atom(ErlNifEnv* env, const char* name)
ERL_NIF_TERM enif_make_atom_len(ErlNifEnv* env, const char* name, size_t len)
{
+ if (len > MAX_ATOM_CHARACTERS)
+ return enif_make_badarg(env);
return erts_atom_put((byte*)name, len, ERTS_ATOM_ENC_LATIN1, 1);
}
@@ -979,11 +1648,16 @@ int enif_make_existing_atom_len(ErlNifEnv* env, const char* name, size_t len,
ERL_NIF_TERM* atom, ErlNifCharEncoding encoding)
{
ASSERT(encoding == ERL_NIF_LATIN1);
+ if (len > MAX_ATOM_CHARACTERS)
+ return 0;
return erts_atom_get(name, len, atom, ERTS_ATOM_ENC_LATIN1);
}
ERL_NIF_TERM enif_make_tuple(ErlNifEnv* env, unsigned cnt, ...)
{
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ int nr = 0;
+#endif
Eterm* hp = alloc_heap(env,cnt+1);
Eterm ret = make_tuple(hp);
va_list ap;
@@ -991,7 +1665,9 @@ ERL_NIF_TERM enif_make_tuple(ErlNifEnv* env, unsigned cnt, ...)
*hp++ = make_arityval(cnt);
va_start(ap,cnt);
while (cnt--) {
- *hp++ = va_arg(ap,Eterm);
+ Eterm elem = va_arg(ap,Eterm);
+ ASSERT_IN_ENV(env, elem, ++nr, "tuple");
+ *hp++ = elem;
}
va_end(ap);
return ret;
@@ -999,12 +1675,16 @@ ERL_NIF_TERM enif_make_tuple(ErlNifEnv* env, unsigned cnt, ...)
ERL_NIF_TERM enif_make_tuple_from_array(ErlNifEnv* env, const ERL_NIF_TERM arr[], unsigned cnt)
{
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ int nr = 0;
+#endif
Eterm* hp = alloc_heap(env,cnt+1);
Eterm ret = make_tuple(hp);
const Eterm* src = arr;
*hp++ = make_arityval(cnt);
while (cnt--) {
+ ASSERT_IN_ENV(env, *src, ++nr, "tuple");
*hp++ = *src++;
}
return ret;
@@ -1015,6 +1695,8 @@ ERL_NIF_TERM enif_make_list_cell(ErlNifEnv* env, Eterm car, Eterm cdr)
Eterm* hp = alloc_heap(env,2);
Eterm ret = make_list(hp);
+ ASSERT_IN_ENV(env, car, 0, "head of list cell");
+ ASSERT_IN_ENV(env, cdr, 0, "tail of list cell");
CAR(hp) = car;
CDR(hp) = cdr;
return ret;
@@ -1026,6 +1708,9 @@ ERL_NIF_TERM enif_make_list(ErlNifEnv* env, unsigned cnt, ...)
return NIL;
}
else {
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ int nr = 0;
+#endif
Eterm* hp = alloc_heap(env,cnt*2);
Eterm ret = make_list(hp);
Eterm* last = &ret;
@@ -1033,8 +1718,10 @@ ERL_NIF_TERM enif_make_list(ErlNifEnv* env, unsigned cnt, ...)
va_start(ap,cnt);
while (cnt--) {
+ Eterm term = va_arg(ap,Eterm);
*last = make_list(hp);
- *hp = va_arg(ap,Eterm);
+ ASSERT_IN_ENV(env, term, ++nr, "list");
+ *hp = term;
last = ++hp;
++hp;
}
@@ -1046,14 +1733,19 @@ ERL_NIF_TERM enif_make_list(ErlNifEnv* env, unsigned cnt, ...)
ERL_NIF_TERM enif_make_list_from_array(ErlNifEnv* env, const ERL_NIF_TERM arr[], unsigned cnt)
{
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ int nr = 0;
+#endif
Eterm* hp = alloc_heap(env,cnt*2);
Eterm ret = make_list(hp);
Eterm* last = &ret;
const Eterm* src = arr;
while (cnt--) {
+ Eterm term = *src++;
*last = make_list(hp);
- *hp = *src++;
+ ASSERT_IN_ENV(env, term, ++nr, "list");
+ *hp = term;
last = ++hp;
++hp;
}
@@ -1077,7 +1769,7 @@ ERL_NIF_TERM enif_make_string_len(ErlNifEnv* env, const char* string,
ERL_NIF_TERM enif_make_ref(ErlNifEnv* env)
{
- Eterm* hp = alloc_heap(env, REF_THING_SIZE);
+ Eterm* hp = alloc_heap(env, ERTS_REF_THING_SIZE);
return erts_make_ref_in_buffer(hp);
}
@@ -1086,13 +1778,9 @@ void enif_system_info(ErlNifSysInfo *sip, size_t si_size)
driver_system_info(sip, si_size);
}
-int enif_make_reverse_list(ErlNifEnv* env, ERL_NIF_TERM term, ERL_NIF_TERM *list) {
- Eterm *listptr, ret = NIL, *hp;
-
- if (is_nil(term)) {
- *list = term;
- return 1;
- }
+int enif_make_reverse_list(ErlNifEnv* env, ERL_NIF_TERM term, ERL_NIF_TERM *list)
+{
+ Eterm *listptr, ret, *hp;
ret = NIL;
@@ -1109,6 +1797,106 @@ int enif_make_reverse_list(ErlNifEnv* env, ERL_NIF_TERM term, ERL_NIF_TERM *list
return 1;
}
+int enif_is_current_process_alive(ErlNifEnv* env)
+{
+ Process *c_p;
+ int scheduler;
+
+ execution_state(env, &c_p, &scheduler);
+
+ if (!c_p)
+ erts_exit(ERTS_ABORT_EXIT,
+ "enif_is_current_process_alive: "
+ "Invalid environment");
+
+ if (!scheduler)
+ erts_exit(ERTS_ABORT_EXIT, "enif_is_current_process_alive: "
+ "called from non-scheduler thread");
+
+ return !ERTS_PROC_IS_EXITING(c_p);
+}
+
+int enif_is_process_alive(ErlNifEnv* env, ErlNifPid *proc)
+{
+ int scheduler;
+
+ execution_state(env, NULL, &scheduler);
+
+ if (scheduler > 0)
+ return !!erts_proc_lookup(proc->pid);
+ else {
+ Process* rp = erts_pid2proc_opt(NULL, 0, proc->pid, 0,
+ ERTS_P2P_FLG_INC_REFC);
+ if (rp)
+ erts_proc_dec_refc(rp);
+ return !!rp;
+ }
+}
+
+int enif_is_port_alive(ErlNifEnv *env, ErlNifPort *port)
+{
+ int scheduler;
+ Uint32 iflags = (erts_port_synchronous_ops
+ ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
+ : ERTS_PORT_SFLGS_INVALID_LOOKUP);
+
+ execution_state(env, NULL, &scheduler);
+
+ if (scheduler > 0)
+ return !!erts_port_lookup(port->port_id, iflags);
+ else {
+ Port *prt = erts_thr_port_lookup(port->port_id, iflags);
+ if (prt)
+ erts_port_dec_refc(prt);
+ return !!prt;
+ }
+}
+
+ERL_NIF_TERM
+enif_now_time(ErlNifEnv *env)
+{
+ Uint mega, sec, micro;
+ Eterm *hp;
+ get_now(&mega, &sec, &micro);
+ hp = alloc_heap(env, 4);
+ return TUPLE3(hp, make_small(mega), make_small(sec), make_small(micro));
+}
+
+ERL_NIF_TERM
+enif_cpu_time(ErlNifEnv *env)
+{
+#ifdef HAVE_ERTS_NOW_CPU
+ Uint mega, sec, micro;
+ Eterm *hp;
+ erts_get_now_cpu(&mega, &sec, &micro);
+ hp = alloc_heap(env, 4);
+ return TUPLE3(hp, make_small(mega), make_small(sec), make_small(micro));
+#else
+ return enif_make_badarg(env);
+#endif
+}
+
+ERL_NIF_TERM
+enif_make_unique_integer(ErlNifEnv *env, ErlNifUniqueInteger properties)
+{
+ int monotonic = properties & ERL_NIF_UNIQUE_MONOTONIC;
+ int positive = properties & ERL_NIF_UNIQUE_POSITIVE;
+ Eterm *hp;
+ Uint hsz;
+
+ if (monotonic) {
+ Sint64 raw_unique = erts_raw_get_unique_monotonic_integer();
+ hsz = erts_raw_unique_monotonic_integer_heap_size(raw_unique, positive);
+ hp = alloc_heap(env, hsz);
+ return erts_raw_make_unique_monotonic_integer_value(&hp, raw_unique, positive);
+ } else {
+ Uint64 raw_unique[ERTS_UNIQUE_INT_RAW_VALUES];
+ erts_raw_get_unique_integer(raw_unique);
+ hsz = erts_raw_unique_integer_heap_size(raw_unique, positive);
+ hp = alloc_heap(env, hsz);
+ return erts_raw_make_unique_integer(&hp, raw_unique, positive);
+ }
+}
ErlNifMutex* enif_mutex_create(char *name) { return erl_drv_mutex_create(name); }
void enif_mutex_destroy(ErlNifMutex *mtx) { erl_drv_mutex_destroy(mtx); }
@@ -1142,6 +1930,27 @@ ErlNifTid enif_thread_self(void) { return erl_drv_thread_self(); }
int enif_equal_tids(ErlNifTid tid1, ErlNifTid tid2) { return erl_drv_equal_tids(tid1,tid2); }
void enif_thread_exit(void *resp) { erl_drv_thread_exit(resp); }
int enif_thread_join(ErlNifTid tid, void **respp) { return erl_drv_thread_join(tid,respp); }
+int enif_getenv(const char *key, char *value, size_t *value_size) { return erl_drv_getenv(key, value, value_size); }
+
+ErlNifTime enif_monotonic_time(ErlNifTimeUnit time_unit)
+{
+ return (ErlNifTime) erts_napi_monotonic_time((int) time_unit);
+}
+
+ErlNifTime enif_time_offset(ErlNifTimeUnit time_unit)
+{
+ return (ErlNifTime) erts_napi_time_offset((int) time_unit);
+}
+
+ErlNifTime
+enif_convert_time_unit(ErlNifTime val,
+ ErlNifTimeUnit from,
+ ErlNifTimeUnit to)
+{
+ return (ErlNifTime) erts_napi_convert_time_unit((ErtsMonotonicTime) val,
+ (int) from,
+ (int) to);
+}
int enif_fprintf(void* filep, const char* format, ...)
{
@@ -1153,38 +1962,23 @@ int enif_fprintf(void* filep, const char* format, ...)
return ret;
}
+int enif_snprintf(char *buffer, size_t size, const char* format, ...)
+{
+ int ret;
+ va_list arglist;
+ va_start(arglist, format);
+ ret = erts_vsnprintf(buffer, size, format, arglist);
+ va_end(arglist);
+ return ret;
+}
+
/***********************************************************
** Memory managed (GC'ed) "resource" objects **
***********************************************************/
-
-struct enif_resource_type_t
-{
- struct enif_resource_type_t* next; /* list of all resource types */
- struct enif_resource_type_t* prev;
- struct erl_module_nif* owner; /* that created this type and thus implements the destructor*/
- ErlNifResourceDtor* dtor; /* user destructor function */
- erts_refc_t refc; /* num of resources of this type (HOTSPOT warning)
- +1 for active erl_module_nif */
- Eterm module;
- Eterm name;
-};
-
/* dummy node in circular list */
struct enif_resource_type_t resource_type_list;
-typedef struct enif_resource_t
-{
- struct enif_resource_type_t* type;
-#ifdef DEBUG
- erts_refc_t nif_refc;
-#endif
- char data[1];
-}ErlNifResource;
-
-#define SIZEOF_ErlNifResource(SIZE) (offsetof(ErlNifResource,data) + (SIZE))
-#define DATA_TO_RESOURCE(PTR) ((ErlNifResource*)((char*)(PTR) - offsetof(ErlNifResource,data)))
-
static ErlNifResourceType* find_resource_type(Eterm module, Eterm name)
{
ErlNifResourceType* type;
@@ -1209,11 +2003,11 @@ static void close_lib(struct erl_module_nif* lib)
ASSERT(lib->handle != NULL);
ASSERT(erts_refc_read(&lib->rt_dtor_cnt,0) == 0);
- if (lib->entry != NULL && lib->entry->unload != NULL) {
- ErlNifEnv env;
- pre_nif_noproc(&env, lib);
- lib->entry->unload(&env, lib->priv_data);
- post_nif_noproc(&env);
+ if (lib->entry.unload != NULL) {
+ struct enif_msg_environment_t msg_env;
+ pre_nif_noproc(&msg_env, lib, NULL);
+ lib->entry.unload(&msg_env.env, lib->priv_data);
+ post_nif_noproc(&msg_env);
}
if (!erts_is_static_nif(lib->handle))
erts_sys_ddll_close(lib->handle);
@@ -1246,24 +2040,23 @@ struct opened_resource_type
ErlNifResourceFlags op;
ErlNifResourceType* type;
- ErlNifResourceDtor* new_dtor;
+ ErlNifResourceTypeInit new_callbacks;
};
static struct opened_resource_type* opened_rt_list = NULL;
-ErlNifResourceType*
-enif_open_resource_type(ErlNifEnv* env,
- const char* module_str,
- const char* name_str,
- ErlNifResourceDtor* dtor,
- ErlNifResourceFlags flags,
- ErlNifResourceFlags* tried)
+static
+ErlNifResourceType* open_resource_type(ErlNifEnv* env,
+ const char* name_str,
+ const ErlNifResourceTypeInit* init,
+ ErlNifResourceFlags flags,
+ ErlNifResourceFlags* tried,
+ size_t sizeof_init)
{
ErlNifResourceType* type = NULL;
ErlNifResourceFlags op = flags;
Eterm module_am, name_am;
- ASSERT(erts_smp_thr_progress_is_blocking());
- ASSERT(module_str == NULL); /* for now... */
+ ASSERT(erts_thr_progress_is_blocking());
module_am = make_atom(env->mod_nif->mod->module);
name_am = enif_make_atom(env, name_str);
@@ -1297,7 +2090,9 @@ enif_open_resource_type(ErlNifEnv* env,
sizeof(struct opened_resource_type));
ort->op = op;
ort->type = type;
- ort->new_dtor = dtor;
+ sys_memzero(&ort->new_callbacks, sizeof(ErlNifResourceTypeInit));
+ ASSERT(sizeof_init > 0 && sizeof_init <= sizeof(ErlNifResourceTypeInit));
+ sys_memcpy(&ort->new_callbacks, init, sizeof_init);
ort->next = opened_rt_list;
opened_rt_list = ort;
}
@@ -1307,6 +2102,31 @@ enif_open_resource_type(ErlNifEnv* env,
return type;
}
+ErlNifResourceType*
+enif_open_resource_type(ErlNifEnv* env,
+ const char* module_str,
+ const char* name_str,
+ ErlNifResourceDtor* dtor,
+ ErlNifResourceFlags flags,
+ ErlNifResourceFlags* tried)
+{
+ ErlNifResourceTypeInit init = {dtor, NULL};
+ ASSERT(module_str == NULL); /* for now... */
+ return open_resource_type(env, name_str, &init, flags, tried,
+ sizeof(init));
+}
+
+ErlNifResourceType*
+enif_open_resource_type_x(ErlNifEnv* env,
+ const char* name_str,
+ const ErlNifResourceTypeInit* init,
+ ErlNifResourceFlags flags,
+ ErlNifResourceFlags* tried)
+{
+ return open_resource_type(env, name_str, init, flags, tried,
+ env->mod_nif->entry.sizeof_ErlNifResourceTypeInit);
+}
+
static void commit_opened_resource_types(struct erl_module_nif* lib)
{
while (opened_rt_list) {
@@ -1325,7 +2145,9 @@ static void commit_opened_resource_types(struct erl_module_nif* lib)
}
type->owner = lib;
- type->dtor = ort->new_dtor;
+ type->dtor = ort->new_callbacks.dtor;
+ type->stop = ort->new_callbacks.stop;
+ type->down = ort->new_callbacks.down;
if (type->dtor != NULL) {
erts_refc_inc(&lib->rt_dtor_cnt, 1);
@@ -1351,18 +2173,117 @@ static void rollback_opened_resource_types(void)
}
}
+struct destroy_monitor_ctx
+{
+ ErtsResource* resource;
+ int exiting_procs;
+ int scheduler;
+};
+
+static void destroy_one_monitor(ErtsMonitor* mon, void* context)
+{
+ struct destroy_monitor_ctx* ctx = (struct destroy_monitor_ctx*) context;
+ Process* rp;
+ ErtsMonitor *rmon = NULL;
+ int is_exiting;
+
+ ASSERT(mon->type == MON_ORIGIN);
+ ASSERT(is_internal_pid(mon->u.pid));
+ ASSERT(is_internal_ref(mon->ref));
+
+ if (ctx->scheduler > 0) { /* Normal scheduler */
+ rp = erts_proc_lookup(mon->u.pid);
+ }
+ else {
+ rp = erts_proc_lookup_inc_refc(mon->u.pid);
+ }
+
+ if (!rp) {
+ is_exiting = 1;
+ }
+ if (rp) {
+ erts_proc_lock(rp, ERTS_PROC_LOCK_LINK);
+ if (ERTS_PROC_IS_EXITING(rp)) {
+ is_exiting = 1;
+ } else {
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
+ ASSERT(rmon);
+ is_exiting = 0;
+ }
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ if (ctx->scheduler <= 0)
+ erts_proc_dec_refc(rp);
+ }
+ if (is_exiting) {
+ ctx->resource->monitors->pending_failed_fire++;
+ }
+
+ /* ToDo: Delay destruction after monitor_locks */
+ if (rmon) {
+ ASSERT(rmon->type == MON_NIF_TARGET);
+ ASSERT(rmon->u.resource == ctx->resource);
+ erts_destroy_monitor(rmon);
+ }
+ erts_destroy_monitor(mon);
+}
+
+static void destroy_all_monitors(ErtsMonitor* monitors, ErtsResource* resource)
+{
+ struct destroy_monitor_ctx ctx;
+
+ execution_state(NULL, NULL, &ctx.scheduler);
+
+ ctx.resource = resource;
+ erts_sweep_monitors(monitors, &destroy_one_monitor, &ctx);
+}
+
-static void nif_resource_dtor(Binary* bin)
+# define NIF_RESOURCE_DTOR &nif_resource_dtor
+
+static int nif_resource_dtor(Binary* bin)
{
- ErlNifResource* resource = (ErlNifResource*) ERTS_MAGIC_BIN_DATA(bin);
+ ErtsResource* resource = (ErtsResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(bin);
ErlNifResourceType* type = resource->type;
- ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == &nif_resource_dtor);
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == NIF_RESOURCE_DTOR);
+
+ if (resource->monitors) {
+ ErtsResourceMonitors* rm = resource->monitors;
+
+ ASSERT(type->down);
+ erts_mtx_lock(&rm->lock);
+ ASSERT(erts_refc_read(&bin->intern.refc, 0) == 0);
+ if (rm->root) {
+ ASSERT(!rm->is_dying);
+ destroy_all_monitors(rm->root, resource);
+ rm->root = NULL;
+ }
+ if (rm->pending_failed_fire) {
+ /*
+ * Resource death struggle prolonged to serve exiting process(es).
+ * Destructor will be called again when last exiting process
+ * tries to fire its MON_NIF_TARGET monitor (and fails).
+ *
+ * This resource is doomed. It has no "real" references and
+ * should get not get called upon to do anything except the
+ * final destructor call.
+ *
+ * We keep refc at 0 and use a separate counter for exiting
+ * processes to avoid resource getting revived by "dec_term".
+ */
+ ASSERT(!rm->is_dying);
+ rm->is_dying = 1;
+ erts_mtx_unlock(&rm->lock);
+ return 0;
+ }
+ erts_mtx_unlock(&rm->lock);
+ erts_mtx_destroy(&rm->lock);
+ }
if (type->dtor != NULL) {
- ErlNifEnv env;
- pre_nif_noproc(&env, type->owner);
- type->dtor(&env,resource->data);
- post_nif_noproc(&env);
+ struct enif_msg_environment_t msg_env;
+ pre_nif_noproc(&msg_env, type->owner, NULL);
+ type->dtor(&msg_env.env, resource->data);
+ post_nif_noproc(&msg_env);
}
if (erts_refc_dectest(&type->refc, 0) == 0) {
ASSERT(type->next == NULL);
@@ -1371,83 +2292,203 @@ static void nif_resource_dtor(Binary* bin)
steal_resource_type(type);
erts_free(ERTS_ALC_T_NIF, type);
}
+ return 1;
}
-void* enif_alloc_resource(ErlNifResourceType* type, size_t size)
+void erts_resource_stop(ErtsResource* resource, ErlNifEvent e,
+ int is_direct_call)
{
- Binary* bin = erts_create_magic_binary(SIZEOF_ErlNifResource(size), &nif_resource_dtor);
- ErlNifResource* resource = ERTS_MAGIC_BIN_DATA(bin);
+ struct enif_msg_environment_t msg_env;
+ ASSERT(resource->type->stop);
+ pre_nif_noproc(&msg_env, resource->type->owner, NULL);
+ resource->type->stop(&msg_env.env, resource->data, e, is_direct_call);
+ post_nif_noproc(&msg_env);
+}
+
+void erts_fire_nif_monitor(ErtsResource* resource, Eterm pid, Eterm ref)
+{
+ ErtsMonitor* rmon;
+ ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
+ struct enif_msg_environment_t msg_env;
+ ErlNifPid nif_pid;
+ ErlNifMonitor nif_monitor;
+ ErtsResourceMonitors* rmp = resource->monitors;
+
+ ASSERT(rmp);
+ ASSERT(resource->type->down);
+
+ erts_mtx_lock(&rmp->lock);
+ rmon = erts_remove_monitor(&rmp->root, ref);
+ if (!rmon) {
+ int free_me = (--rmp->pending_failed_fire == 0) && rmp->is_dying;
+ ASSERT(rmp->pending_failed_fire >= 0);
+ erts_mtx_unlock(&rmp->lock);
+
+ if (free_me) {
+ ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) == 0);
+ erts_bin_free(&bin->binary);
+ }
+ return;
+ }
+ ASSERT(!rmp->is_dying);
+ if (erts_refc_inc_unless(&bin->binary.intern.refc, 0, 0) == 0) {
+ /*
+ * Racing resource destruction.
+ * To avoid a more complex refc-dance with destructing thread
+ * we avoid calling 'down' and just silently remove the monitor.
+ * This can happen even for non smp as destructor calls may be scheduled.
+ */
+ erts_mtx_unlock(&rmp->lock);
+ }
+ else {
+ erts_mtx_unlock(&rmp->lock);
+
+ ASSERT(rmon->u.pid == pid);
+ erts_ref_to_driver_monitor(ref, &nif_monitor);
+ nif_pid.pid = pid;
+ pre_nif_noproc(&msg_env, resource->type->owner, NULL);
+ resource->type->down(&msg_env.env, resource->data, &nif_pid, &nif_monitor);
+ post_nif_noproc(&msg_env);
+
+ erts_bin_release(&bin->binary);
+ }
+ erts_destroy_monitor(rmon);
+}
+
+void* enif_alloc_resource(ErlNifResourceType* type, size_t data_sz)
+{
+ size_t magic_sz = offsetof(ErtsResource,data);
+ Binary* bin;
+ ErtsResource* resource;
+ size_t monitors_offs;
+
+ if (type->down) {
+ /* Put ErtsResourceMonitors after user data and properly aligned */
+ monitors_offs = ((data_sz + ERTS_ALLOC_ALIGN_BYTES - 1)
+ & ~((size_t)ERTS_ALLOC_ALIGN_BYTES - 1));
+ magic_sz += monitors_offs + sizeof(ErtsResourceMonitors);
+ }
+ else {
+ ERTS_UNDEF(monitors_offs, 0);
+ magic_sz += data_sz;
+ }
+ bin = erts_create_magic_binary_x(magic_sz, NIF_RESOURCE_DTOR,
+ ERTS_ALC_T_BINARY,
+ 1); /* unaligned */
+ resource = ERTS_MAGIC_BIN_UNALIGNED_DATA(bin);
ASSERT(type->owner && type->next && type->prev); /* not allowed in load/upgrade */
resource->type = type;
- erts_refc_inc(&bin->refc, 1);
+ erts_refc_inc(&bin->intern.refc, 1);
#ifdef DEBUG
erts_refc_init(&resource->nif_refc, 1);
#endif
erts_refc_inc(&resource->type->refc, 2);
+ if (type->down) {
+ resource->monitors = (ErtsResourceMonitors*) (resource->data + monitors_offs);
+ erts_mtx_init(&resource->monitors->lock, "resource_monitors", NIL,
+ ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+ resource->monitors->root = NULL;
+ resource->monitors->pending_failed_fire = 0;
+ resource->monitors->is_dying = 0;
+ resource->monitors->user_data_sz = data_sz;
+ }
+ else {
+ resource->monitors = NULL;
+ }
return resource->data;
}
void enif_release_resource(void* obj)
{
- ErlNifResource* resource = DATA_TO_RESOURCE(obj);
- ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_DATA(resource);
+ ErtsResource* resource = DATA_TO_RESOURCE(obj);
+ ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
- ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == &nif_resource_dtor);
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == NIF_RESOURCE_DTOR);
+ ASSERT(!(resource->monitors && resource->monitors->is_dying));
#ifdef DEBUG
erts_refc_dec(&resource->nif_refc, 0);
#endif
- if (erts_refc_dectest(&bin->binary.refc, 0) == 0) {
- erts_bin_free(&bin->binary);
- }
+ erts_bin_release(&bin->binary);
}
void enif_keep_resource(void* obj)
{
- ErlNifResource* resource = DATA_TO_RESOURCE(obj);
- ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_DATA(resource);
+ ErtsResource* resource = DATA_TO_RESOURCE(obj);
+ ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
- ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == &nif_resource_dtor);
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == NIF_RESOURCE_DTOR);
+ ASSERT(!(resource->monitors && resource->monitors->is_dying));
#ifdef DEBUG
erts_refc_inc(&resource->nif_refc, 1);
#endif
- erts_refc_inc(&bin->binary.refc, 2);
+ erts_refc_inc(&bin->binary.intern.refc, 2);
+}
+
+Eterm erts_bld_resource_ref(Eterm** hpp, ErlOffHeap* oh, ErtsResource* resource)
+{
+ ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
+ ASSERT(!(resource->monitors && resource->monitors->is_dying));
+ return erts_mk_magic_ref(hpp, oh, &bin->binary);
}
ERL_NIF_TERM enif_make_resource(ErlNifEnv* env, void* obj)
{
- ErlNifResource* resource = DATA_TO_RESOURCE(obj);
- ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_DATA(resource);
- Eterm* hp = alloc_heap(env,PROC_BIN_SIZE);
- return erts_mk_magic_binary_term(&hp, &MSO(env->proc), &bin->binary);
+ ErtsResource* resource = DATA_TO_RESOURCE(obj);
+ ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
+ Eterm* hp = alloc_heap(env, ERTS_MAGIC_REF_THING_SIZE);
+ ASSERT(!(resource->monitors && resource->monitors->is_dying));
+ return erts_mk_magic_ref(&hp, &MSO(env->proc), &bin->binary);
}
ERL_NIF_TERM enif_make_resource_binary(ErlNifEnv* env, void* obj,
const void* data, size_t size)
{
- Eterm bin = enif_make_resource(env, obj);
- ProcBin* pb = (ProcBin*) binary_val(bin);
- pb->bytes = (byte*) data;
+ ErtsResource* resource = DATA_TO_RESOURCE(obj);
+ ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
+ ErlOffHeap *ohp = &MSO(env->proc);
+ Eterm* hp = alloc_heap(env,PROC_BIN_SIZE);
+ ProcBin* pb = (ProcBin *) hp;
+
+ pb->thing_word = HEADER_PROC_BIN;
pb->size = size;
- return bin;
+ pb->next = ohp->first;
+ ohp->first = (struct erl_off_heap_header*) pb;
+ pb->val = &bin->binary;
+ pb->bytes = (byte*) data;
+ pb->flags = 0;
+
+ OH_OVERHEAD(ohp, size / sizeof(Eterm));
+ erts_refc_inc(&bin->binary.intern.refc, 1);
+
+ return make_binary(hp);
}
int enif_get_resource(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifResourceType* type,
void** objp)
{
- ProcBin* pb;
Binary* mbin;
- ErlNifResource* resource;
- if (!ERTS_TERM_IS_MAGIC_BINARY(term)) {
- return 0;
+ ErtsResource* resource;
+ if (is_internal_magic_ref(term))
+ mbin = erts_magic_ref2bin(term);
+ else {
+ Eterm *hp;
+ if (!is_binary(term))
+ return 0;
+ hp = binary_val(term);
+ if (thing_subtag(*hp) != REFC_BINARY_SUBTAG)
+ return 0;
+ /*
+ if (((ProcBin *) hp)->size != 0) {
+ return 0; / * Or should we allow "resource binaries" as handles? * /
+ }
+ */
+ mbin = ((ProcBin *) hp)->val;
+ if (!(mbin->intern.flags & BIN_FLAG_MAGIC))
+ return 0;
}
- pb = (ProcBin*) binary_val(term);
- /*if (pb->size != 0) {
- return 0; / * Or should we allow "resource binaries" as handles? * /
- }*/
- mbin = pb->val;
- resource = (ErlNifResource*) ERTS_MAGIC_BIN_DATA(mbin);
- if (ERTS_MAGIC_BIN_DESTRUCTOR(mbin) != &nif_resource_dtor
+ resource = (ErtsResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(mbin);
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(mbin) != NIF_RESOURCE_DTOR
|| resource->type != type) {
return 0;
}
@@ -1457,9 +2498,14 @@ int enif_get_resource(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifResourceType* typ
size_t enif_sizeof_resource(void* obj)
{
- ErlNifResource* resource = DATA_TO_RESOURCE(obj);
- Binary* bin = &ERTS_MAGIC_BIN_FROM_DATA(resource)->binary;
- return ERTS_MAGIC_BIN_DATA_SIZE(bin) - offsetof(ErlNifResource,data);
+ ErtsResource* resource = DATA_TO_RESOURCE(obj);
+ if (resource->monitors) {
+ return resource->monitors->user_data_sz;
+ }
+ else {
+ Binary* bin = &ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource)->binary;
+ return ERTS_MAGIC_BIN_UNALIGNED_DATA_SIZE(bin) - offsetof(ErtsResource,data);
+ }
}
@@ -1501,183 +2547,282 @@ void* enif_dlsym(void* handle, const char* symbol,
int enif_consume_timeslice(ErlNifEnv* env, int percent)
{
+ Process *proc;
Sint reds;
+ execution_state(env, &proc, NULL);
+
ASSERT(is_proc_bound(env) && percent >= 1 && percent <= 100);
if (percent < 1) percent = 1;
else if (percent > 100) percent = 100;
reds = ((CONTEXT_REDS+99) / 100) * percent;
ASSERT(reds > 0 && reds <= CONTEXT_REDS);
- BUMP_REDS(env->proc, reds);
- return ERTS_BIF_REDS_LEFT(env->proc) == 0;
+ BUMP_REDS(proc, reds);
+ return ERTS_BIF_REDS_LEFT(proc) == 0;
}
-#ifdef ERTS_DIRTY_SCHEDULERS
+static ERTS_INLINE void
+nif_export_cleanup_nif_mod(NifExport *ep)
+{
+ if (erts_refc_dectest(&ep->m->rt_dtor_cnt, 0) == 0 && ep->m->mod == NULL)
+ close_lib(ep->m);
+ ep->m = NULL;
+}
-/* NIFs exports need one more item than the Export struct provides, the
- * erl_module_nif*, so the DirtyNifExport below adds that. The Export
- * member must be first in the struct.
- */
-typedef struct {
- Export exp;
- struct erl_module_nif* m;
-} DirtyNifExport;
+void
+erts_nif_export_cleanup_nif_mod(NifExport *ep)
+{
+ nif_export_cleanup_nif_mod(ep);
+}
-static void
-alloc_proc_psd(Process* proc, DirtyNifExport **ep)
+static ERTS_INLINE void
+nif_export_restore(Process *c_p, NifExport *ep, Eterm res)
{
- int i;
- if (!*ep) {
- *ep = erts_alloc(ERTS_ALC_T_PSD, sizeof(DirtyNifExport));
- sys_memset((void*) *ep, 0, sizeof(DirtyNifExport));
- for (i=0; i<ERTS_NUM_CODE_IX; i++) {
- (*ep)->exp.addressv[i] = &(*ep)->exp.code[3];
- }
- (*ep)->exp.code[3] = (BeamInstr) em_call_nif;
- }
- (void) ERTS_PROC_SET_DIRTY_SCHED_TRAP_EXPORT(proc, ERTS_PROC_LOCK_MAIN, &(*ep)->exp);
+ erts_nif_export_restore(c_p, ep, res);
+ ASSERT(ep->m);
+ nif_export_cleanup_nif_mod(ep);
}
+
+
+/*
+ * Finalize a dirty NIF call. This function is scheduled to cause the VM to
+ * switch the process off a dirty scheduler thread and back onto a regular
+ * scheduler thread, and then return the result from the dirty NIF. It also
+ * restores the original NIF MFA when necessary based on the value of
+ * ep->func set by execute_dirty_nif via init_nif_sched_data -- non-NULL
+ * means restore, NULL means do not restore.
+ */
static ERL_NIF_TERM
-execute_dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- Eterm* reg = ERTS_PROC_GET_SCHDATA(env->proc)->x_reg_array;
- ERL_NIF_TERM result, dirty_result = (ERL_NIF_TERM) reg[0];
- typedef ERL_NIF_TERM (*FinalizerFP)(ErlNifEnv*, ERL_NIF_TERM);
- FinalizerFP fp;
-#if HAVE_INT64 && SIZEOF_LONG != 8
- ASSERT(sizeof(fp) <= sizeof(ErlNifUInt64));
- enif_get_uint64(env, reg[1], (ErlNifUInt64 *) &fp);
-#else
- ASSERT(sizeof(fp) <= sizeof(unsigned long));
- enif_get_ulong(env, reg[1], (unsigned long *) &fp);
-#endif
- result = (*fp)(env, dirty_result);
- if (erts_refc_dectest(&env->mod_nif->rt_dtor_cnt, 0) == 0
- && env->mod_nif->mod == NULL)
- close_lib(env->mod_nif);
- return result;
+ Process* proc;
+ NifExport* ep;
+
+ execution_state(env, &proc, NULL);
+
+ ASSERT(argc == 1);
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc)));
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ nif_export_restore(proc, ep, argv[0]);
+ return argv[0];
}
-#endif /* ERTS_DIRTY_SCHEDULERS */
+/* Finalize a dirty NIF call that raised an exception. Otherwise same as
+ * the dirty_nif_finalizer() function.
+ */
+static ERL_NIF_TERM
+dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ ERL_NIF_TERM ret;
+ Process* proc;
+ NifExport* ep;
+ Eterm exception;
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+ execution_state(env, &proc, NULL);
-ERL_NIF_TERM
-enif_schedule_dirty_nif(ErlNifEnv* env, int flags,
- ERL_NIF_TERM (*fp)(ErlNifEnv*, int, const ERL_NIF_TERM[]),
- int argc, const ERL_NIF_TERM argv[])
-{
-#ifdef USE_THREADS
- erts_aint32_t state, n, a;
- Process* proc = env->proc;
- Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array;
- DirtyNifExport* ep = NULL;
- int i;
+ ASSERT(argc == 1);
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc)));
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ exception = argv[0]; /* argv overwritten by restore below... */
+ nif_export_cleanup_nif_mod(ep);
+ ret = enif_raise_exception(env, exception);
- int chkflgs = (flags & (ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND));
- if (chkflgs != ERL_NIF_DIRTY_JOB_IO_BOUND && chkflgs != ERL_NIF_DIRTY_JOB_CPU_BOUND)
- return enif_make_badarg(env);
+ /* Restore orig info for error and clear nif export in handle_error() */
+ proc->freason |= EXF_RESTORE_NIF;
+ return ret;
+}
- a = erts_smp_atomic32_read_acqb(&proc->state);
- while (1) {
- n = state = a;
- /*
- * clear any current dirty flags and dirty queue indicators,
- * in case the application is shifting a job from one type
- * of dirty scheduler to the other
- */
- n &= ~(ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC
- |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q);
- if (chkflgs == ERL_NIF_DIRTY_JOB_CPU_BOUND)
- n |= ERTS_PSFLG_DIRTY_CPU_PROC;
- else
- n |= ERTS_PSFLG_DIRTY_IO_PROC;
- a = erts_smp_atomic32_cmpxchg_mb(&proc->state, n, state);
- if (a == state)
- break;
- }
- if (!(ep = (DirtyNifExport*) ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(proc)))
- alloc_proc_psd(proc, &ep);
- ERTS_VBUMP_ALL_REDS(proc);
- ep->exp.code[2] = argc;
- for (i = 0; i < argc; i++) {
- reg[i] = (Eterm) argv[i];
- }
- proc->i = (BeamInstr*) ep->exp.addressv[0];
- ep->exp.code[4] = (BeamInstr) fp;
- ep->m = env->mod_nif;
- proc->freason = TRAP;
+/*
+ * Dirty NIF scheduling wrapper function. Schedule a dirty NIF to execute.
+ * The dirty scheduler thread type (CPU or I/O) is indicated in flags
+ * parameter.
+ */
+static ERTS_INLINE ERL_NIF_TERM
+schedule_dirty_nif(ErlNifEnv* env, int flags, NativeFunPtr fp,
+ Eterm func_name, int argc, const ERL_NIF_TERM argv[])
+{
+ Process* proc;
- erts_refc_inc(&env->mod_nif->rt_dtor_cnt, 1);
+ ASSERT(is_atom(func_name));
+ ASSERT(fp);
- return THE_NON_VALUE;
-#else
- return (*fp)(env, argc, argv);
-#endif
+ ASSERT(flags==ERL_NIF_DIRTY_JOB_IO_BOUND || flags==ERL_NIF_DIRTY_JOB_CPU_BOUND);
+
+ execution_state(env, &proc, NULL);
+
+ (void) erts_atomic32_read_bset_nob(&proc->state,
+ (ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC),
+ (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND
+ ? ERTS_PSFLG_DIRTY_CPU_PROC
+ : ERTS_PSFLG_DIRTY_IO_PROC));
+
+ return schedule(env, fp, NULL, proc->current->module, func_name, argc, argv);
}
-ERL_NIF_TERM
-enif_schedule_dirty_nif_finalizer(ErlNifEnv* env, ERL_NIF_TERM result,
- ERL_NIF_TERM (*fp)(ErlNifEnv*, ERL_NIF_TERM))
-{
-#ifdef USE_THREADS
- Process* proc = env->proc;
- Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array;
- DirtyNifExport* ep;
-
- erts_smp_atomic32_read_band_mb(&proc->state,
- ~(ERTS_PSFLG_DIRTY_CPU_PROC
- |ERTS_PSFLG_DIRTY_IO_PROC
- |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q
- |ERTS_PSFLG_DIRTY_IO_PROC_IN_Q));
- if (!(ep = (DirtyNifExport*) ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(proc)))
- alloc_proc_psd(proc, &ep);
- ERTS_VBUMP_ALL_REDS(proc);
- ep->exp.code[2] = 2;
- reg[0] = (Eterm) result;
-#if HAVE_INT64 && SIZEOF_LONG != 8
- ASSERT(sizeof(fp) <= sizeof(ErlNifUInt64));
- reg[1] = (Eterm) enif_make_uint64(env, (ErlNifUInt64) fp);
-#else
- ASSERT(sizeof(fp) <= sizeof(unsigned long));
- reg[1] = (Eterm) enif_make_ulong(env, (unsigned long) fp);
-#endif
- proc->i = (BeamInstr*) ep->exp.addressv[0];
- ep->exp.code[4] = (BeamInstr) execute_dirty_nif_finalizer;
- proc->freason = TRAP;
+static ERTS_INLINE ERL_NIF_TERM
+static_schedule_dirty_nif(ErlNifEnv* env, erts_aint32_t dirty_psflg,
+ int argc, const ERL_NIF_TERM argv[])
+{
+ Process *proc;
+ NifExport *ep;
+ Eterm mod, func;
+ NativeFunPtr fp;
- return THE_NON_VALUE;
-#else
- return (*fp)(env, result);
-#endif
+ execution_state(env, &proc, NULL);
+
+ /*
+ * Called in order to schedule statically determined
+ * dirty NIF calls...
+ *
+ * Note that 'current' does not point into a NifExport
+ * structure; only a structure with similar
+ * parts (located in code).
+ */
+
+ ep = ErtsContainerStruct(proc->current, NifExport, exp.info.mfa);
+ mod = proc->current->module;
+ func = proc->current->function;
+ fp = (NativeFunPtr) ep->func;
+
+ ASSERT(is_atom(mod) && is_atom(func));
+ ASSERT(fp);
+
+ (void) erts_atomic32_read_bset_nob(&proc->state,
+ (ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC),
+ dirty_psflg);
+
+ return schedule(env, fp, NULL, mod, func, argc, argv);
}
-/* A simple finalizer that just returns its result argument */
-ERL_NIF_TERM
-enif_dirty_nif_finalizer(ErlNifEnv* env, ERL_NIF_TERM result)
+static ERL_NIF_TERM
+static_schedule_dirty_io_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- return result;
+ return static_schedule_dirty_nif(env, ERTS_PSFLG_DIRTY_IO_PROC, argc, argv);
}
-int
-enif_is_on_dirty_scheduler(ErlNifEnv* env)
+static ERL_NIF_TERM
+static_schedule_dirty_cpu_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- return ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data);
+ return static_schedule_dirty_nif(env, ERTS_PSFLG_DIRTY_CPU_PROC, argc, argv);
}
-int
-enif_have_dirty_schedulers()
+
+/*
+ * NIF execution wrapper used by enif_schedule_nif() for regular NIFs. It
+ * calls the actual NIF, restores original NIF MFA if necessary, and
+ * then returns the NIF result.
+ */
+static ERL_NIF_TERM
+execute_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
-#ifdef USE_THREADS
- return 1;
-#else
- return 0;
+ Process* proc;
+ NativeFunPtr fp;
+ NifExport* ep;
+ ERL_NIF_TERM result;
+
+ execution_state(env, &proc, NULL);
+
+ ep = ErtsContainerStruct(proc->current, NifExport, exp.info.mfa);
+ fp = ep->func;
+ ASSERT(ep);
+ ASSERT(!env->exception_thrown);
+
+ fp = (NativeFunPtr) ep->func;
+
+#ifdef DEBUG
+ ep->func = ERTS_DBG_NIF_NOT_SCHED_MARKER;
+#endif
+
+ result = (*fp)(env, argc, argv);
+
+ ASSERT(ep == ERTS_PROC_GET_NIF_TRAP_EXPORT(proc));
+
+ if (is_value(result) || proc->freason != TRAP) {
+ /* Done (not rescheduled)... */
+ ASSERT(ep->func == ERTS_DBG_NIF_NOT_SCHED_MARKER);
+ if (!env->exception_thrown)
+ nif_export_restore(proc, ep, result);
+ else {
+ nif_export_cleanup_nif_mod(ep);
+ /*
+ * Restore orig info for error and clear nif
+ * export in handle_error()
+ */
+ proc->freason |= EXF_RESTORE_NIF;
+ }
+ }
+
+#ifdef DEBUG
+ if (ep->func == ERTS_DBG_NIF_NOT_SCHED_MARKER)
+ ep->func = NULL;
#endif
+
+ return result;
}
-#endif /* ERL_NIF_DIRTY_SCHEDULER_SUPPORT */
+ERL_NIF_TERM
+enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags,
+ ERL_NIF_TERM (*fp)(ErlNifEnv*, int, const ERL_NIF_TERM[]),
+ int argc, const ERL_NIF_TERM argv[])
+{
+ Process* proc;
+ ERL_NIF_TERM fun_name_atom, result;
+ int scheduler;
+
+ if (argc > MAX_ARG)
+ return enif_make_badarg(env);
+ fun_name_atom = enif_make_atom(env, fun_name);
+ if (enif_is_exception(env, fun_name_atom))
+ return fun_name_atom;
+
+ execution_state(env, &proc, &scheduler);
+ if (scheduler <= 0) {
+ if (scheduler == 0)
+ enif_make_badarg(env);
+ erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
+ }
+
+ if (flags == 0)
+ result = schedule(env, execute_nif, fp, proc->current->module,
+ fun_name_atom, argc, argv);
+ else if (!(flags & ~(ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND))) {
+ result = schedule_dirty_nif(env, flags, fp, fun_name_atom, argc, argv);
+ }
+ else
+ result = enif_make_badarg(env);
+
+ if (scheduler < 0)
+ erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+
+ return result;
+}
+
+int
+enif_thread_type(void)
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+
+ if (!esdp)
+ return ERL_NIF_THR_UNDEFINED;
+
+ switch (esdp->type) {
+ case ERTS_SCHED_NORMAL:
+ return ERL_NIF_THR_NORMAL_SCHEDULER;
+ case ERTS_SCHED_DIRTY_CPU:
+ return ERL_NIF_THR_DIRTY_CPU_SCHEDULER;
+ case ERTS_SCHED_DIRTY_IO:
+ return ERL_NIF_THR_DIRTY_IO_SCHEDULER;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid scheduler type");
+ return -1;
+ }
+}
/* Maps */
@@ -1688,29 +2833,33 @@ int enif_is_map(ErlNifEnv* env, ERL_NIF_TERM term)
int enif_get_map_size(ErlNifEnv* env, ERL_NIF_TERM term, size_t *size)
{
- if (is_map(term)) {
- map_t *mp;
- mp = (map_t*)map_val(term);
- *size = map_get_size(mp);
+ if (is_flatmap(term)) {
+ flatmap_t *mp;
+ mp = (flatmap_t*)flatmap_val(term);
+ *size = flatmap_get_size(mp);
return 1;
}
+ else if (is_hashmap(term)) {
+ *size = hashmap_size(term);
+ return 1;
+ }
return 0;
}
ERL_NIF_TERM enif_make_new_map(ErlNifEnv* env)
{
- Eterm* hp = alloc_heap(env,MAP_HEADER_SIZE+1);
+ Eterm* hp = alloc_heap(env,MAP_HEADER_FLATMAP_SZ+1);
Eterm tup;
- map_t *mp;
+ flatmap_t *mp;
tup = make_tuple(hp);
*hp++ = make_arityval(0);
- mp = (map_t*)hp;
- mp->thing_word = MAP_HEADER;
+ mp = (flatmap_t*)hp;
+ mp->thing_word = MAP_HEADER_FLATMAP;
mp->size = 0;
mp->keys = tup;
- return make_map(mp);
+ return make_flatmap(mp);
}
int enif_make_map_put(ErlNifEnv* env,
@@ -1719,9 +2868,13 @@ int enif_make_map_put(ErlNifEnv* env,
Eterm value,
Eterm *map_out)
{
- if (is_not_map(map_in)) {
+ if (!is_map(map_in)) {
return 0;
}
+ ASSERT_IN_ENV(env, map_in, 0, "old map");
+ ASSERT_IN_ENV(env, key, 0, "key");
+ ASSERT_IN_ENV(env, value, 0, "value");
+
flush_env(env);
*map_out = erts_maps_put(env->proc, key, value, map_in);
cache_env(env);
@@ -1733,10 +2886,16 @@ int enif_get_map_value(ErlNifEnv* env,
Eterm key,
Eterm *value)
{
- if (is_not_map(map)) {
+ const Eterm *ret;
+ if (!is_map(map)) {
return 0;
}
- return erts_maps_get(key, map, value);
+ ret = erts_maps_get(key, map);
+ if (ret) {
+ *value = *ret;
+ return 1;
+ }
+ return 0;
}
int enif_make_map_update(ErlNifEnv* env,
@@ -1746,10 +2905,14 @@ int enif_make_map_update(ErlNifEnv* env,
Eterm *map_out)
{
int res;
- if (is_not_map(map_in)) {
+ if (!is_map(map_in)) {
return 0;
}
+ ASSERT_IN_ENV(env, map_in, 0, "old map");
+ ASSERT_IN_ENV(env, key, 0, "key");
+ ASSERT_IN_ENV(env, value, 0, "value");
+
flush_env(env);
res = erts_maps_update(env->proc, key, value, map_in, map_out);
cache_env(env);
@@ -1761,14 +2924,13 @@ int enif_make_map_remove(ErlNifEnv* env,
Eterm key,
Eterm *map_out)
{
- int res;
- if (is_not_map(map_in)) {
+ if (!is_map(map_in)) {
return 0;
}
flush_env(env);
- res = erts_maps_remove(env->proc, key, map_in, map_out);
+ (void) erts_maps_take(env->proc, key, map_in, map_out, NULL);
cache_env(env);
- return res;
+ return 1;
}
int enif_map_iterator_create(ErlNifEnv *env,
@@ -1776,13 +2938,13 @@ int enif_map_iterator_create(ErlNifEnv *env,
ErlNifMapIterator *iter,
ErlNifMapIteratorEntry entry)
{
- if (is_map(map)) {
- map_t *mp = (map_t*)map_val(map);
+ if (is_flatmap(map)) {
+ flatmap_t *mp = (flatmap_t*)flatmap_val(map);
size_t offset;
switch (entry) {
- case ERL_NIF_MAP_ITERATOR_HEAD: offset = 0; break;
- case ERL_NIF_MAP_ITERATOR_TAIL: offset = map_get_size(mp) - 1; break;
+ case ERL_NIF_MAP_ITERATOR_FIRST: offset = 0; break;
+ case ERL_NIF_MAP_ITERATOR_LAST: offset = flatmap_get_size(mp) - 1; break;
default: goto error;
}
@@ -1791,14 +2953,37 @@ int enif_map_iterator_create(ErlNifEnv *env,
*/
iter->map = map;
- iter->ks = ((Eterm *)map_get_keys(mp)) + offset;
- iter->vs = ((Eterm *)map_get_values(mp)) + offset;
- iter->t_limit = map_get_size(mp) + 1;
+ iter->u.flat.ks = ((Eterm *)flatmap_get_keys(mp)) + offset;
+ iter->u.flat.vs = ((Eterm *)flatmap_get_values(mp)) + offset;
+ iter->size = flatmap_get_size(mp);
iter->idx = offset + 1;
return 1;
}
-
+ else if (is_hashmap(map)) {
+ iter->map = map;
+ iter->size = hashmap_size(map);
+ iter->u.hash.wstack = erts_alloc(ERTS_ALC_T_NIF, sizeof(ErtsDynamicWStack));
+ WSTACK_INIT(iter->u.hash.wstack, ERTS_ALC_T_NIF);
+
+ switch (entry) {
+ case ERL_NIF_MAP_ITERATOR_FIRST:
+ iter->idx = 1;
+ hashmap_iterator_init(&iter->u.hash.wstack->ws, map, 0);
+ iter->u.hash.kv = hashmap_iterator_next(&iter->u.hash.wstack->ws);
+ break;
+ case ERL_NIF_MAP_ITERATOR_LAST:
+ iter->idx = hashmap_size(map);
+ hashmap_iterator_init(&iter->u.hash.wstack->ws, map, 1);
+ iter->u.hash.kv = hashmap_iterator_prev(&iter->u.hash.wstack->ws);
+ break;
+ default:
+ goto error;
+ }
+ ASSERT(!!iter->u.hash.kv == (iter->idx >= 1 &&
+ iter->idx <= iter->size));
+ return 1;
+ }
error:
#ifdef DEBUG
iter->map = THE_NON_VALUE;
@@ -1808,48 +2993,97 @@ error:
void enif_map_iterator_destroy(ErlNifEnv *env, ErlNifMapIterator *iter)
{
- /* not used */
+ if (is_hashmap(iter->map)) {
+ WSTACK_DESTROY(iter->u.hash.wstack->ws);
+ erts_free(ERTS_ALC_T_NIF, iter->u.hash.wstack);
+ }
+ else
+ ASSERT(is_flatmap(iter->map));
+
#ifdef DEBUG
iter->map = THE_NON_VALUE;
#endif
-
}
int enif_map_iterator_is_tail(ErlNifEnv *env, ErlNifMapIterator *iter)
{
- ASSERT(iter && is_map(iter->map));
- ASSERT(iter->idx >= 0 && (iter->idx <= map_get_size(map_val(iter->map)) + 1));
- return (iter->t_limit == 1 || iter->idx == iter->t_limit);
+ ASSERT(iter);
+ if (is_flatmap(iter->map)) {
+ ASSERT(iter->idx >= 0);
+ ASSERT(iter->idx <= flatmap_get_size(flatmap_val(iter->map)) + 1);
+ return (iter->size == 0 || iter->idx > iter->size);
+ }
+ else {
+ ASSERT(is_hashmap(iter->map));
+ return iter->idx > iter->size;
+ }
}
int enif_map_iterator_is_head(ErlNifEnv *env, ErlNifMapIterator *iter)
{
- ASSERT(iter && is_map(iter->map));
- ASSERT(iter->idx >= 0 && (iter->idx <= map_get_size(map_val(iter->map)) + 1));
- return (iter->t_limit == 1 || iter->idx == 0);
+ ASSERT(iter);
+ if (is_flatmap(iter->map)) {
+ ASSERT(iter->idx >= 0);
+ ASSERT(iter->idx <= flatmap_get_size(flatmap_val(iter->map)) + 1);
+ return (iter->size == 0 || iter->idx == 0);
+ }
+ else {
+ ASSERT(is_hashmap(iter->map));
+ return iter->idx == 0;
+ }
}
int enif_map_iterator_next(ErlNifEnv *env, ErlNifMapIterator *iter)
{
- ASSERT(iter && is_map(iter->map));
- if (iter->idx < iter->t_limit) {
- iter->idx++;
- iter->ks++;
- iter->vs++;
+ ASSERT(iter);
+ if (is_flatmap(iter->map)) {
+ if (iter->idx <= iter->size) {
+ iter->idx++;
+ iter->u.flat.ks++;
+ iter->u.flat.vs++;
+ }
+ return (iter->idx <= iter->size);
+ }
+ else {
+ ASSERT(is_hashmap(iter->map));
+
+ if (iter->idx <= hashmap_size(iter->map)) {
+ if (iter->idx < 1) {
+ hashmap_iterator_init(&iter->u.hash.wstack->ws, iter->map, 0);
+ }
+ iter->u.hash.kv = hashmap_iterator_next(&iter->u.hash.wstack->ws);
+ iter->idx++;
+ ASSERT(!!iter->u.hash.kv == (iter->idx <= iter->size));
+ }
+ return iter->idx <= iter->size;
}
- return (iter->idx != iter->t_limit);
}
int enif_map_iterator_prev(ErlNifEnv *env, ErlNifMapIterator *iter)
{
- ASSERT(iter && is_map(iter->map));
- if (iter->idx > 0) {
- iter->idx--;
- iter->ks--;
- iter->vs--;
+ ASSERT(iter);
+ if (is_flatmap(iter->map)) {
+ if (iter->idx > 0) {
+ iter->idx--;
+ iter->u.flat.ks--;
+ iter->u.flat.vs--;
+ }
+ return iter->idx > 0;
+ }
+ else {
+ ASSERT(is_hashmap(iter->map));
+
+ if (iter->idx > 0) {
+ if (iter->idx > iter->size) {
+ hashmap_iterator_init(&iter->u.hash.wstack->ws, iter->map, 1);
+ }
+ iter->u.hash.kv = hashmap_iterator_prev(&iter->u.hash.wstack->ws);
+ iter->idx--;
+ ASSERT(!!iter->u.hash.kv == (iter->idx > 0));
+ }
+ return iter->idx > 0;
}
- return (iter->idx > 0);
}
int enif_map_iterator_get_pair(ErlNifEnv *env,
@@ -1857,35 +3091,532 @@ int enif_map_iterator_get_pair(ErlNifEnv *env,
Eterm *key,
Eterm *value)
{
- ASSERT(iter && is_map(iter->map));
- if (iter->idx > 0 && iter->idx < iter->t_limit) {
- ASSERT(iter->ks >= map_get_keys(map_val(iter->map)) &&
- iter->ks < (map_get_keys(map_val(iter->map)) + map_get_size(map_val(iter->map))));
- ASSERT(iter->vs >= map_get_values(map_val(iter->map)) &&
- iter->vs < (map_get_values(map_val(iter->map)) + map_get_size(map_val(iter->map))));
- *key = *(iter->ks);
- *value = *(iter->vs);
- return 1;
+ ASSERT(iter);
+ if (is_flatmap(iter->map)) {
+ if (iter->idx > 0 && iter->idx <= iter->size) {
+ ASSERT(iter->u.flat.ks >= flatmap_get_keys(flatmap_val(iter->map)) &&
+ iter->u.flat.ks < (flatmap_get_keys(flatmap_val(iter->map)) + flatmap_get_size(flatmap_val(iter->map))));
+ ASSERT(iter->u.flat.vs >= flatmap_get_values(flatmap_val(iter->map)) &&
+ iter->u.flat.vs < (flatmap_get_values(flatmap_val(iter->map)) + flatmap_get_size(flatmap_val(iter->map))));
+ *key = *(iter->u.flat.ks);
+ *value = *(iter->u.flat.vs);
+ return 1;
+ }
+ }
+ else {
+ ASSERT(is_hashmap(iter->map));
+ if (iter->idx > 0 && iter->idx <= iter->size) {
+ *key = CAR(iter->u.hash.kv);
+ *value = CDR(iter->u.hash.kv);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+int enif_monitor_process(ErlNifEnv* env, void* obj, const ErlNifPid* target_pid,
+ ErlNifMonitor* monitor)
+{
+ int scheduler;
+ ErtsResource* rsrc = DATA_TO_RESOURCE(obj);
+ Process *rp;
+ Eterm tmp[ERTS_REF_THING_SIZE];
+ Eterm ref;
+ int retval;
+
+ ASSERT(ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(rsrc)->magic_binary.destructor
+ == NIF_RESOURCE_DTOR);
+ ASSERT(!(rsrc->monitors && rsrc->monitors->is_dying));
+ ASSERT(!rsrc->monitors == !rsrc->type->down);
+
+
+ if (!rsrc->monitors) {
+ ASSERT(!rsrc->type->down);
+ return -1;
+ }
+ ASSERT(rsrc->type->down);
+
+ execution_state(env, NULL, &scheduler);
+
+ if (scheduler > 0) /* Normal scheduler */
+ rp = erts_proc_lookup_raw(target_pid->pid);
+ else
+ rp = erts_proc_lookup_raw_inc_refc(target_pid->pid);
+
+ if (!rp)
+ return 1;
+
+ ref = erts_make_ref_in_buffer(tmp);
+
+ erts_mtx_lock(&rsrc->monitors->lock);
+ erts_proc_lock(rp, ERTS_PROC_LOCK_LINK);
+ if (ERTS_PSFLG_FREE & erts_atomic32_read_nob(&rp->state)) {
+ retval = 1;
+ }
+ else {
+ erts_add_monitor(&rsrc->monitors->root, MON_ORIGIN, ref, rp->common.id, NIL);
+ erts_add_monitor(&ERTS_P_MONITORS(rp), MON_NIF_TARGET, ref, (UWord)rsrc, NIL);
+ retval = 0;
+ }
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ erts_mtx_unlock(&rsrc->monitors->lock);
+
+ if (scheduler <= 0)
+ erts_proc_dec_refc(rp);
+ if (monitor)
+ erts_ref_to_driver_monitor(ref,monitor);
+
+ return retval;
+}
+
+int enif_demonitor_process(ErlNifEnv* env, void* obj, const ErlNifMonitor* monitor)
+{
+ int scheduler;
+ ErtsResource* rsrc = DATA_TO_RESOURCE(obj);
+#ifdef DEBUG
+ ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(rsrc);
+#endif
+ Process *rp;
+ ErtsMonitor *mon;
+ ErtsMonitor *rmon = NULL;
+ Eterm ref_heap[ERTS_REF_THING_SIZE];
+ Eterm ref;
+ int is_exiting;
+
+ ASSERT(bin->magic_binary.destructor == NIF_RESOURCE_DTOR);
+ ASSERT(!(rsrc->monitors && rsrc->monitors->is_dying));
+
+ execution_state(env, NULL, &scheduler);
+
+ ref = erts_driver_monitor_to_ref(ref_heap, monitor);
+
+ erts_mtx_lock(&rsrc->monitors->lock);
+ mon = erts_remove_monitor(&rsrc->monitors->root, ref);
+
+ if (mon == NULL) {
+ erts_mtx_unlock(&rsrc->monitors->lock);
+ return 1;
+ }
+
+ ASSERT(mon->type == MON_ORIGIN);
+ ASSERT(is_internal_pid(mon->u.pid));
+
+ if (scheduler > 0) /* Normal scheduler */
+ rp = erts_proc_lookup(mon->u.pid);
+ else
+ rp = erts_proc_lookup_inc_refc(mon->u.pid);
+
+ if (!rp) {
+ is_exiting = 1;
+ }
+ else {
+ erts_proc_lock(rp, ERTS_PROC_LOCK_LINK);
+ if (ERTS_PROC_IS_EXITING(rp)) {
+ is_exiting = 1;
+ } else {
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref);
+ ASSERT(rmon);
+ is_exiting = 0;
+ }
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+
+ if (scheduler <= 0)
+ erts_proc_dec_refc(rp);
+ }
+ if (is_exiting) {
+ rsrc->monitors->pending_failed_fire++;
+ }
+ erts_mtx_unlock(&rsrc->monitors->lock);
+
+ if (rmon) {
+ ASSERT(rmon->type == MON_NIF_TARGET);
+ ASSERT(rmon->u.resource == rsrc);
+ erts_destroy_monitor(rmon);
+ }
+ erts_destroy_monitor(mon);
+
+ return 0;
+}
+
+int enif_compare_monitors(const ErlNifMonitor *monitor1,
+ const ErlNifMonitor *monitor2)
+{
+ return sys_memcmp((void *) monitor1, (void *) monitor2,
+ ERTS_REF_THING_SIZE*sizeof(Eterm));
+}
+
+ErlNifIOQueue *enif_ioq_create(ErlNifIOQueueOpts opts)
+{
+ ErlNifIOQueue *q;
+
+ if (opts != ERL_NIF_IOQ_NORMAL)
+ return NULL;
+
+ q = enif_alloc(sizeof(ErlNifIOQueue));
+ if (!q) return NULL;
+ erts_ioq_init(q, ERTS_ALC_T_NIF, 0);
+
+ return q;
+}
+
+void enif_ioq_destroy(ErlNifIOQueue *q)
+{
+ erts_ioq_clear(q);
+ enif_free(q);
+}
+
+/* If the iovec was preallocated (Stack or otherwise) it needs to be marked as
+ * such to perform a proper free. */
+#define ERL_NIF_IOVEC_FLAGS_PREALLOC (1 << 0)
+
+void enif_free_iovec(ErlNifIOVec *iov)
+{
+ int i;
+ /* Decrement the refc of all the binaries */
+ for (i = 0; i < iov->iovcnt; i++) {
+ Binary *bptr = ((Binary**)iov->ref_bins)[i];
+ /* bptr can be null if enq_binary was used */
+ if (bptr && erts_refc_dectest(&bptr->intern.refc, 0) == 0) {
+ erts_bin_free(bptr);
+ }
+ }
+
+ if (!(iov->flags & ERL_NIF_IOVEC_FLAGS_PREALLOC)) {
+ enif_free(iov);
+ }
+}
+
+typedef struct {
+ UWord sublist_length;
+ Eterm sublist_start;
+ Eterm sublist_end;
+
+ UWord offheap_size;
+ UWord onheap_size;
+
+ UWord iovec_len;
+} iovec_slice_t;
+
+static int examine_iovec_term(Eterm list, UWord max_length, iovec_slice_t *result) {
+ Eterm lookahead;
+
+ result->sublist_start = list;
+ result->sublist_length = 0;
+ result->offheap_size = 0;
+ result->onheap_size = 0;
+ result->iovec_len = 0;
+
+ lookahead = result->sublist_start;
+
+ while (is_list(lookahead)) {
+ Eterm *binary_header, binary;
+ Eterm *cell;
+ UWord size;
+
+ cell = list_val(lookahead);
+ binary = CAR(cell);
+
+ if (!is_binary(binary)) {
+ return 0;
+ }
+
+ size = binary_size(binary);
+ binary_header = binary_val(binary);
+
+ /* If we're a sub-binary we'll need to check our underlying binary to
+ * determine whether we're on-heap or not. */
+ if(thing_subtag(*binary_header) == SUB_BINARY_SUBTAG) {
+ ErlSubBin *sb = (ErlSubBin*)binary_header;
+
+ /* Reject bitstrings */
+ if((sb->bitoffs + sb->bitsize) > 0) {
+ return 0;
+ }
+
+ ASSERT(size <= binary_size(sb->orig));
+ binary_header = binary_val(sb->orig);
+ }
+
+ if(thing_subtag(*binary_header) == HEAP_BINARY_SUBTAG) {
+ ASSERT(size <= ERL_ONHEAP_BIN_LIMIT);
+
+ result->iovec_len += 1;
+ result->onheap_size += size;
+ } else {
+ ASSERT(thing_subtag(*binary_header) == REFC_BINARY_SUBTAG);
+
+ result->iovec_len += 1 + size / MAX_SYSIOVEC_IOVLEN;
+ result->offheap_size += size;
+ }
+
+ result->sublist_length += 1;
+ lookahead = CDR(cell);
+
+ if(result->sublist_length >= max_length) {
+ break;
+ }
+ }
+
+ if (!is_nil(lookahead) && !is_list(lookahead)) {
+ return 0;
+ }
+
+ result->sublist_end = lookahead;
+
+ return 1;
+}
+
+static void inspect_raw_binary_data(Eterm binary, ErlNifBinary *result) {
+ Eterm *parent_header;
+ Eterm parent_binary;
+
+ int bit_offset, bit_size;
+ Uint byte_offset;
+
+ ASSERT(is_binary(binary));
+
+ ERTS_GET_REAL_BIN(binary, parent_binary, byte_offset, bit_offset, bit_size);
+
+ parent_header = binary_val(parent_binary);
+
+ result->size = binary_size(binary);
+ result->bin_term = binary;
+
+ if (thing_subtag(*parent_header) == REFC_BINARY_SUBTAG) {
+ ProcBin *pb = (ProcBin*)parent_header;
+
+ ASSERT(pb->val != NULL);
+ ASSERT(byte_offset < pb->size);
+ ASSERT(&pb->bytes[byte_offset] >= (byte*)(pb->val)->orig_bytes);
+
+ result->data = (unsigned char*)&pb->bytes[byte_offset];
+ result->ref_bin = (void*)pb->val;
+ } else {
+ ErlHeapBin *hb = (ErlHeapBin*)parent_header;
+
+ ASSERT(thing_subtag(*parent_header) == HEAP_BINARY_SUBTAG);
+
+ result->data = &((unsigned char*)&hb->data)[byte_offset];
+ result->ref_bin = NULL;
+ }
+}
+
+static int fill_iovec_with_slice(ErlNifEnv *env,
+ iovec_slice_t *slice,
+ ErlNifIOVec *iovec) {
+ UWord onheap_offset, iovec_idx;
+ ErlNifBinary onheap_data;
+ Eterm sublist_iterator;
+
+ /* Set up a common refc binary for all on-heap binaries. */
+ if (slice->onheap_size > 0) {
+ if (!enif_alloc_binary(slice->onheap_size, &onheap_data)) {
+ return 0;
+ }
+ }
+
+ sublist_iterator = slice->sublist_start;
+ onheap_offset = 0;
+ iovec_idx = 0;
+
+ while (sublist_iterator != slice->sublist_end) {
+ ErlNifBinary raw_data;
+ Eterm *cell;
+
+ cell = list_val(sublist_iterator);
+ inspect_raw_binary_data(CAR(cell), &raw_data);
+
+ /* If this isn't a refc binary, copy its contents to the onheap buffer
+ * and reference that instead. */
+ if (raw_data.ref_bin == NULL) {
+ ASSERT(onheap_offset < onheap_data.size);
+ ASSERT(slice->onheap_size > 0);
+
+ sys_memcpy(&onheap_data.data[onheap_offset],
+ raw_data.data, raw_data.size);
+
+ raw_data.data = &onheap_data.data[onheap_offset];
+ raw_data.ref_bin = onheap_data.ref_bin;
+ }
+
+ ASSERT(raw_data.ref_bin != NULL);
+
+ while (raw_data.size > 0) {
+ UWord chunk_len = MIN(raw_data.size, MAX_SYSIOVEC_IOVLEN);
+
+ ASSERT(iovec_idx < iovec->iovcnt);
+
+ iovec->iov[iovec_idx].iov_base = raw_data.data;
+ iovec->iov[iovec_idx].iov_len = chunk_len;
+
+ iovec->ref_bins[iovec_idx] = raw_data.ref_bin;
+
+ raw_data.data += chunk_len;
+ raw_data.size -= chunk_len;
+
+ iovec_idx += 1;
+ }
+
+ sublist_iterator = CDR(cell);
}
+
+ ASSERT(iovec_idx == iovec->iovcnt);
+
+ if (env == NULL) {
+ int i;
+ for (i = 0; i < iovec->iovcnt; i++) {
+ Binary *refc_binary = (Binary*)(iovec->ref_bins[i]);
+ erts_refc_inc(&refc_binary->intern.refc, 1);
+ }
+
+ if (slice->onheap_size > 0) {
+ /* Transfer ownership to the iovec; we've taken references to it in
+ * the above loop. */
+ enif_release_binary(&onheap_data);
+ }
+ } else {
+ if (slice->onheap_size > 0) {
+ /* Attach the binary to our environment and let the GC take care of
+ * it after returning. */
+ enif_make_binary(env, &onheap_data);
+ }
+ }
+
+ return 1;
+}
+
+static int create_iovec_from_slice(ErlNifEnv *env,
+ iovec_slice_t *slice,
+ ErlNifIOVec **result) {
+ ErlNifIOVec *iovec = *result;
+
+ if (iovec && slice->iovec_len < ERL_NIF_IOVEC_SIZE) {
+ iovec->iov = iovec->small_iov;
+ iovec->ref_bins = iovec->small_ref_bin;
+ iovec->flags = ERL_NIF_IOVEC_FLAGS_PREALLOC;
+ } else {
+ UWord iov_offset, binv_offset, alloc_size;
+ char *alloc_base;
+
+ iov_offset = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(ErlNifIOVec));
+ binv_offset = iov_offset;
+ binv_offset += ERTS_ALC_DATA_ALIGN_SIZE(slice->iovec_len * sizeof(SysIOVec));
+ alloc_size = binv_offset;
+ alloc_size += slice->iovec_len * sizeof(Binary*);
+
+ /* If we have an environment we'll attach the allocated data to it. The
+ * GC will take care of releasing it later on. */
+ if (env != NULL) {
+ ErlNifBinary gc_bin;
+
+ if (!enif_alloc_binary(alloc_size, &gc_bin)) {
+ return 0;
+ }
+
+ alloc_base = (char*)gc_bin.data;
+ enif_make_binary(env, &gc_bin);
+ } else {
+ alloc_base = enif_alloc(alloc_size);
+ }
+
+ iovec = (ErlNifIOVec*)alloc_base;
+ iovec->iov = (SysIOVec*)(alloc_base + iov_offset);
+ iovec->ref_bins = (void**)(alloc_base + binv_offset);
+ iovec->flags = 0;
+ }
+
+ iovec->size = slice->offheap_size + slice->onheap_size;
+ iovec->iovcnt = slice->iovec_len;
+
+ if(!fill_iovec_with_slice(env, slice, iovec)) {
+ if (env == NULL && !(iovec->flags & ERL_NIF_IOVEC_FLAGS_PREALLOC)) {
+ enif_free(iovec);
+ }
+
+ return 0;
+ }
+
+ *result = iovec;
+
+ return 1;
+}
+
+int enif_inspect_iovec(ErlNifEnv *env, size_t max_elements,
+ ERL_NIF_TERM list, ERL_NIF_TERM *tail,
+ ErlNifIOVec **iov) {
+ iovec_slice_t slice;
+
+ if(!examine_iovec_term(list, max_elements, &slice)) {
+ return 0;
+ } else if(!create_iovec_from_slice(env, &slice, iov)) {
+ return 0;
+ }
+
+ (*tail) = slice.sublist_end;
+
+ return 1;
+}
+
+/* */
+int enif_ioq_enqv(ErlNifIOQueue *q, ErlNifIOVec *iov, size_t skip)
+{
+ if(skip <= iov->size) {
+ return !erts_ioq_enqv(q, (ErtsIOVec*)iov, skip);
+ }
+
return 0;
}
+int enif_ioq_enq_binary(ErlNifIOQueue *q, ErlNifBinary *bin, size_t skip)
+{
+ ErlNifIOVec vec = {1, bin->size, NULL, NULL, ERL_NIF_IOVEC_FLAGS_PREALLOC };
+ Binary *ref_bin = (Binary*)bin->ref_bin;
+ int res;
+ vec.iov = vec.small_iov;
+ vec.ref_bins = vec.small_ref_bin;
+ vec.iov[0].iov_base = bin->data;
+ vec.iov[0].iov_len = bin->size;
+ ((Binary**)(vec.ref_bins))[0] = ref_bin;
+
+ res = enif_ioq_enqv(q, &vec, skip);
+ enif_release_binary(bin);
+ return res;
+}
+
+size_t enif_ioq_size(ErlNifIOQueue *q)
+{
+ return erts_ioq_size(q);
+}
+
+int enif_ioq_deq(ErlNifIOQueue *q, size_t elems, size_t *size)
+{
+ if (erts_ioq_deq(q, elems) == -1)
+ return 0;
+ if (size)
+ *size = erts_ioq_size(q);
+ return 1;
+}
+
+SysIOVec *enif_ioq_peek(ErlNifIOQueue *q, int *iovlen)
+{
+ return erts_ioq_peekq(q, iovlen);
+}
+
/***************************************************************************
** load_nif/2 **
***************************************************************************/
-static BeamInstr** get_func_pp(BeamInstr* mod_code, Eterm f_atom, unsigned arity)
+static ErtsCodeInfo** get_func_pp(BeamCodeHeader* mod_code, Eterm f_atom, unsigned arity)
{
- int n = (int) mod_code[MI_NUM_FUNCTIONS];
+ int n = (int) mod_code->num_functions;
int j;
for (j = 0; j < n; ++j) {
- BeamInstr* code_ptr = (BeamInstr*) mod_code[MI_FUNCTIONS+j];
- ASSERT(code_ptr[0] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- if (f_atom == ((Eterm) code_ptr[3])
- && arity == ((unsigned) code_ptr[4])) {
-
- return (BeamInstr**) &mod_code[MI_FUNCTIONS+j];
+ ErtsCodeInfo* ci = mod_code->functions[j];
+ ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI));
+ if (f_atom == ci->mfa.function
+ && arity == ci->mfa.arity) {
+ return mod_code->functions+j;
}
}
return NULL;
@@ -1935,16 +3666,16 @@ Eterm erts_nif_taints(Process* p)
return list;
}
-void erts_print_nif_taints(int to, void* to_arg)
+void erts_print_nif_taints(fmtfn_t to, void* to_arg)
{
struct tainted_module_t* t;
const char* delim = "";
for (t=first_tainted_module ; t!=NULL; t=t->next) {
const Atom* atom = atom_tab(atom_val(t->module_atom));
- erts_print(to,to_arg,"%s%.*s", delim, atom->len, atom->name);
+ erts_cbprintf(to,to_arg,"%s%.*s", delim, atom->len, atom->name);
delim = ",";
}
- erts_print(to,to_arg,"\n");
+ erts_cbprintf(to,to_arg,"\n");
}
@@ -1977,10 +3708,62 @@ static Eterm load_nif_error(Process* p, const char* atom, const char* format, ..
return ret;
}
+#define AT_LEAST_VERSION(E,MAJ,MIN) \
+ (((E)->major * 0x100 + (E)->minor) >= ((MAJ) * 0x100 + (MIN)))
+
+/*
+ * Allocate erl_module_nif and make a _modern_ copy of the lib entry.
+ */
+static struct erl_module_nif* create_lib(const ErlNifEntry* src)
+{
+ struct erl_module_nif* lib;
+ ErlNifEntry* dst;
+ Uint bytes = offsetof(struct erl_module_nif, _funcs_copy_);
+
+ if (!AT_LEAST_VERSION(src, 2, 7))
+ bytes += src->num_of_funcs * sizeof(ErlNifFunc);
+
+ lib = erts_alloc(ERTS_ALC_T_NIF, bytes);
+ dst = &lib->entry;
+
+ sys_memcpy(dst, src, offsetof(ErlNifEntry, vm_variant));
+
+ if (AT_LEAST_VERSION(src, 2, 1)) {
+ dst->vm_variant = src->vm_variant;
+ } else {
+ dst->vm_variant = "beam.vanilla";
+ }
+ if (AT_LEAST_VERSION(src, 2, 7)) {
+ dst->options = src->options;
+ } else {
+ /*
+ * Make a modern copy of the ErlNifFunc array
+ */
+ struct ErlNifFunc_V1 {
+ const char* name;
+ unsigned arity;
+ ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+ }*src_funcs = (struct ErlNifFunc_V1*) src->funcs;
+ int i;
+ for (i = 0; i < src->num_of_funcs; ++i) {
+ sys_memcpy(&lib->_funcs_copy_[i], &src_funcs[i], sizeof(*src_funcs));
+ lib->_funcs_copy_[i].flags = 0;
+ }
+ dst->funcs = lib->_funcs_copy_;
+ dst->options = 0;
+ }
+ if (AT_LEAST_VERSION(src, 2, 12)) {
+ dst->sizeof_ErlNifResourceTypeInit = src->sizeof_ErlNifResourceTypeInit;
+ } else {
+ dst->sizeof_ErlNifResourceTypeInit = 0;
+ }
+ return lib;
+};
+
+
BIF_RETTYPE load_nif_2(BIF_ALIST_2)
{
static const char bad_lib[] = "bad_lib";
- static const char reload[] = "reload";
static const char upgrade[] = "upgrade";
char* lib_name = NULL;
void* handle = NULL;
@@ -1988,16 +3771,23 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
ErlNifEntry* entry = NULL;
ErlNifEnv env;
int i, err, encoding;
- Module* mod;
+ Module* module_p;
Eterm mod_atom;
const Atom* mod_atomp;
Eterm f_atom;
- BeamInstr* caller;
+ ErtsCodeMFA* caller;
ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT;
Eterm ret = am_ok;
int veto;
struct erl_module_nif* lib = NULL;
- int reload_warning = 0;
+ struct erl_module_instance* this_mi;
+ struct erl_module_instance* prev_mi;
+
+ if (BIF_P->flags & F_HIPE_MODE) {
+ ret = load_nif_error(BIF_P, "notsup", "Calling load_nif from HiPE compiled "
+ "modules not supported");
+ BIF_RET(ret);
+ }
encoding = erts_get_native_filename_encoding();
if (encoding == ERL_FILENAME_WIN_WCHAR) {
@@ -2019,34 +3809,48 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
}
/* Block system (is this the right place to do it?) */
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
/* Find calling module */
ASSERT(BIF_P->current != NULL);
- ASSERT(BIF_P->current[0] == am_erlang
- && BIF_P->current[1] == am_load_nif
- && BIF_P->current[2] == 2);
+ ASSERT(BIF_P->current->module == am_erlang
+ && BIF_P->current->function == am_load_nif
+ && BIF_P->current->arity == 2);
caller = find_function_from_pc(BIF_P->cp);
ASSERT(caller != NULL);
- mod_atom = caller[0];
+ mod_atom = caller->module;
ASSERT(is_atom(mod_atom));
- mod=erts_get_module(mod_atom, erts_active_code_ix());
- ASSERT(mod != NULL);
+ module_p = erts_get_module(mod_atom, erts_active_code_ix());
+ ASSERT(module_p != NULL);
mod_atomp = atom_tab(atom_val(mod_atom));
init_func = erts_static_nif_get_nif_init((char*)mod_atomp->name, mod_atomp->len);
if (init_func != NULL)
handle = init_func;
- if (!in_area(caller, mod->curr.code, mod->curr.code_length)) {
- ASSERT(in_area(caller, mod->old.code, mod->old.code_length));
-
+ this_mi = &module_p->curr;
+ prev_mi = &module_p->old;
+ if (in_area(caller, module_p->old.code_hdr, module_p->old.code_length)) {
ret = load_nif_error(BIF_P, "old_code", "Calling load_nif from old "
"module '%T' not allowed", mod_atom);
- }
+ goto error;
+ } else if (module_p->on_load) {
+ ASSERT(module_p->on_load->code_hdr->on_load_function_ptr);
+ if (module_p->curr.code_hdr) {
+ prev_mi = &module_p->curr;
+ } else {
+ prev_mi = &module_p->old;
+ }
+ this_mi = module_p->on_load;
+ }
+
+ if (this_mi->nif != NULL) {
+ ret = load_nif_error(BIF_P,"reload","NIF library already loaded"
+ " (reload disallowed since OTP 20).");
+ }
else if (init_func == NULL &&
- (err=erts_sys_ddll_open(lib_name, &handle, &errdesc)) != ERL_DE_NO_ERROR) {
+ (err=erts_sys_ddll_open(lib_name, &handle, &errdesc)) != ERL_DE_NO_ERROR) {
const char slogan[] = "Failed to load NIF library";
if (strstr(errdesc.str, lib_name) != NULL) {
ret = load_nif_error(BIF_P, "load_failed", "%s: '%s'", slogan, errdesc.str);
@@ -2074,7 +3878,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
ret = load_nif_error(BIF_P, bad_lib, "Library version (%d.%d) not compatible (with %d.%d).",
entry->major, entry->minor, ERL_NIF_MAJOR_VERSION, ERL_NIF_MINOR_VERSION);
}
- else if (entry->minor >= 1
+ else if (AT_LEAST_VERSION(entry, 2, 1)
&& sys_strcmp(entry->vm_variant, ERL_NIF_VM_VARIANT) != 0) {
ret = load_nif_error(BIF_P, bad_lib, "Library (%s) not compiled for "
"this vm variant (%s).",
@@ -2085,23 +3889,45 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
" match calling module '%T'", entry->name, mod_atom);
}
else {
- /*erts_fprintf(stderr, "Found module %T\r\n", mod_atom);*/
-
- for (i=0; i < entry->num_of_funcs && ret==am_ok; i++) {
- BeamInstr** code_pp;
- ErlNifFunc* f = &entry->funcs[i];
+ lib = create_lib(entry);
+ entry = &lib->entry; /* Use a guaranteed modern lib entry from now on */
+
+ lib->handle = handle;
+ erts_refc_init(&lib->rt_cnt, 0);
+ erts_refc_init(&lib->rt_dtor_cnt, 0);
+ ASSERT(opened_rt_list == NULL);
+ lib->mod = module_p;
+
+ for (i=0; i < entry->num_of_funcs && ret==am_ok; i++) {
+ ErtsCodeInfo** ci_pp;
+ ErlNifFunc* f = &entry->funcs[i];
+
if (!erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1)
- || (code_pp = get_func_pp(mod->curr.code, f_atom, f->arity))==NULL) {
+ || (ci_pp = get_func_pp(this_mi->code_hdr, f_atom, f->arity))==NULL) {
ret = load_nif_error(BIF_P,bad_lib,"Function not found %T:%s/%u",
mod_atom, f->name, f->arity);
- }
- else if (code_pp[1] - code_pp[0] < (5+3)) {
+ }
+ else if (f->flags) {
+ /*
+ * If the flags field is non-zero and this emulator was
+ * built with dirty scheduler support, check that the flags
+ * value is legal. But if this emulator was built without
+ * dirty scheduler support, treat a non-zero flags field as
+ * a load error.
+ */
+ if (f->flags != ERL_NIF_DIRTY_JOB_IO_BOUND && f->flags != ERL_NIF_DIRTY_JOB_CPU_BOUND)
+ ret = load_nif_error(BIF_P, bad_lib, "Illegal flags field value %d for NIF %T:%s/%u",
+ f->flags, mod_atom, f->name, f->arity);
+ }
+ else if (erts_codeinfo_to_code(ci_pp[1]) - erts_codeinfo_to_code(ci_pp[0])
+ < BEAM_NIF_MIN_FUNC_SZ)
+ {
ret = load_nif_error(BIF_P,bad_lib,"No explicit call to load_nif"
- " in module (%T:%s/%u to small)",
- mod_atom, entry->funcs[i].name, entry->funcs[i].arity);
+ " in module (%T:%s/%u too small)",
+ mod_atom, f->name, f->arity);
}
/*erts_fprintf(stderr, "Found NIF %T:%s/%u\r\n",
- mod_atom, entry->funcs[i].name, entry->funcs[i].arity);*/
+ mod_atom, f->name, f->arity);*/
}
}
@@ -2109,112 +3935,69 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
goto error;
}
- /* Call load, reload or upgrade:
+ /* Call load or upgrade:
*/
-
- lib = erts_alloc(ERTS_ALC_T_NIF, sizeof(struct erl_module_nif));
- lib->handle = handle;
- lib->entry = entry;
- erts_refc_init(&lib->rt_cnt, 0);
- erts_refc_init(&lib->rt_dtor_cnt, 0);
- ASSERT(opened_rt_list == NULL);
- lib->mod = mod;
env.mod_nif = lib;
- if (mod->curr.nif != NULL) { /*************** Reload ******************/
- /*
- * Repeated load_nif calls from same Erlang module instance ("reload")
- * is deprecated and was only ment as a development feature not to
- * be used in production systems. (See warning below)
- */
- int k;
- lib->priv_data = mod->curr.nif->priv_data;
- ASSERT(mod->curr.nif->entry != NULL);
- if (entry->reload == NULL) {
- ret = load_nif_error(BIF_P,reload,"Reload not supported by this NIF library.");
- goto error;
- }
- /* Check that no NIF is removed */
- for (k=0; k < mod->curr.nif->entry->num_of_funcs; k++) {
- ErlNifFunc* old_func = &mod->curr.nif->entry->funcs[k];
- for (i=0; i < entry->num_of_funcs; i++) {
- if (old_func->arity == entry->funcs[i].arity
- && sys_strcmp(old_func->name, entry->funcs[i].name) == 0) {
- break;
- }
- }
- if (i == entry->num_of_funcs) {
- ret = load_nif_error(BIF_P,reload,"Reloaded library missing "
- "function %T:%s/%u\r\n", mod_atom,
- old_func->name, old_func->arity);
- goto error;
- }
- }
- erts_pre_nif(&env, BIF_P, lib);
- veto = entry->reload(&env, &lib->priv_data, BIF_ARG_2);
- erts_post_nif(&env);
- if (veto) {
- ret = load_nif_error(BIF_P, reload, "Library reload-call unsuccessful.");
- }
- else {
- commit_opened_resource_types(lib);
- mod->curr.nif->entry = NULL; /* to prevent 'unload' callback */
- erts_unload_nif(mod->curr.nif);
- reload_warning = 1;
- }
+ lib->priv_data = NULL;
+ if (prev_mi->nif != NULL) { /**************** Upgrade ***************/
+ void* prev_old_data = prev_mi->nif->priv_data;
+ if (entry->upgrade == NULL) {
+ ret = load_nif_error(BIF_P, upgrade, "Upgrade not supported by this NIF library.");
+ goto error;
+ }
+ erts_pre_nif(&env, BIF_P, lib, NULL);
+ veto = entry->upgrade(&env, &lib->priv_data, &prev_mi->nif->priv_data, BIF_ARG_2);
+ erts_post_nif(&env);
+ if (veto) {
+ prev_mi->nif->priv_data = prev_old_data;
+ ret = load_nif_error(BIF_P, upgrade, "Library upgrade-call unsuccessful (%d).", veto);
+ }
}
- else {
- lib->priv_data = NULL;
- if (mod->old.nif != NULL) { /**************** Upgrade ***************/
- void* prev_old_data = mod->old.nif->priv_data;
- if (entry->upgrade == NULL) {
- ret = load_nif_error(BIF_P, upgrade, "Upgrade not supported by this NIF library.");
- goto error;
- }
- erts_pre_nif(&env, BIF_P, lib);
- veto = entry->upgrade(&env, &lib->priv_data, &mod->old.nif->priv_data, BIF_ARG_2);
- erts_post_nif(&env);
- if (veto) {
- mod->old.nif->priv_data = prev_old_data;
- ret = load_nif_error(BIF_P, upgrade, "Library upgrade-call unsuccessful.");
- }
- else
- commit_opened_resource_types(lib);
- }
- else if (entry->load != NULL) { /********* Initial load ***********/
- erts_pre_nif(&env, BIF_P, lib);
- veto = entry->load(&env, &lib->priv_data, BIF_ARG_2);
- erts_post_nif(&env);
- if (veto) {
- ret = load_nif_error(BIF_P, "load", "Library load-call unsuccessful.");
- }
- else
- commit_opened_resource_types(lib);
- }
+ else if (entry->load != NULL) { /********* Initial load ***********/
+ erts_pre_nif(&env, BIF_P, lib, NULL);
+ veto = entry->load(&env, &lib->priv_data, BIF_ARG_2);
+ erts_post_nif(&env);
+ if (veto) {
+ ret = load_nif_error(BIF_P, "load", "Library load-call unsuccessful (%d).", veto);
+ }
}
if (ret == am_ok) {
+ commit_opened_resource_types(lib);
+
/*
** Everything ok, patch the beam code with op_call_nif
*/
- mod->curr.nif = lib;
+
+ this_mi->nif = lib;
for (i=0; i < entry->num_of_funcs; i++)
{
- BeamInstr* code_ptr;
- erts_atom_get(entry->funcs[i].name, sys_strlen(entry->funcs[i].name), &f_atom, ERTS_ATOM_ENC_LATIN1);
- code_ptr = *get_func_pp(mod->curr.code, f_atom, entry->funcs[i].arity);
-
- if (code_ptr[1] == 0) {
- code_ptr[5+0] = (BeamInstr) BeamOp(op_call_nif);
+ ErlNifFunc* f = &entry->funcs[i];
+ ErtsCodeInfo* ci;
+ BeamInstr *code_ptr;
+
+ erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1);
+ ci = *get_func_pp(this_mi->code_hdr, f_atom, f->arity);
+ code_ptr = erts_codeinfo_to_code(ci);
+
+ if (ci->u.gen_bp == NULL) {
+ code_ptr[0] = BeamOpCodeAddr(op_call_nif);
}
else { /* Function traced, patch the original instruction word */
- GenericBp* g = (GenericBp *) code_ptr[1];
- ASSERT(code_ptr[5+0] ==
- (BeamInstr) BeamOp(op_i_generic_breakpoint));
- g->orig_instr = (BeamInstr) BeamOp(op_call_nif);
- }
- code_ptr[5+1] = (BeamInstr) entry->funcs[i].fptr;
- code_ptr[5+2] = (BeamInstr) lib;
+ GenericBp* g = ci->u.gen_bp;
+ ASSERT(BeamIsOpCode(code_ptr[0], op_i_generic_breakpoint));
+ g->orig_instr = BeamOpCodeAddr(op_call_nif);
+ }
+ if (f->flags) {
+ code_ptr[3] = (BeamInstr) f->fptr;
+ code_ptr[1] = (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) ?
+ (BeamInstr) static_schedule_dirty_io_nif :
+ (BeamInstr) static_schedule_dirty_cpu_nif;
+ }
+ else
+ code_ptr[1] = (BeamInstr) f->fptr;
+ code_ptr[2] = (BeamInstr) lib;
}
}
else {
@@ -2230,19 +4013,11 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
erts_sys_ddll_free_error(&errdesc);
}
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_release_code_write_permission();
erts_free(ERTS_ALC_T_TMP, lib_name);
- if (reload_warning) {
- erts_dsprintf_buf_t* dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp,
- "Repeated calls to erlang:load_nif from module '%T'.\n\n"
- "The NIF reload mechanism is deprecated and must not "
- "be used in production systems.\n", mod_atom);
- erts_send_warning_to_logger(BIF_P->group_leader, dsbufp);
- }
BIF_RET(ret);
}
@@ -2252,9 +4027,12 @@ erts_unload_nif(struct erl_module_nif* lib)
{
ErlNifResourceType* rt;
ErlNifResourceType* next;
- ASSERT(erts_smp_thr_progress_is_blocking());
+ ASSERT(erts_thr_progress_is_blocking());
ASSERT(lib != NULL);
ASSERT(lib->mod != NULL);
+
+ erts_tracer_nif_clear();
+
for (rt = resource_type_list.next;
rt != &resource_type_list;
rt = next) {
@@ -2289,12 +4067,93 @@ erts_unload_nif(struct erl_module_nif* lib)
void erl_nif_init()
{
+ ERTS_CT_ASSERT((offsetof(ErtsResource,data) % 8)
+ == ERTS_MAGIC_BIN_BYTES_TO_ALIGN);
+
resource_type_list.next = &resource_type_list;
resource_type_list.prev = &resource_type_list;
resource_type_list.dtor = NULL;
resource_type_list.owner = NULL;
resource_type_list.module = THE_NON_VALUE;
resource_type_list.name = THE_NON_VALUE;
+
+}
+
+int erts_nif_get_funcs(struct erl_module_nif* mod,
+ ErlNifFunc **funcs)
+{
+ *funcs = mod->entry.funcs;
+ return mod->entry.num_of_funcs;
+}
+
+Eterm erts_nif_call_function(Process *p, Process *tracee,
+ struct erl_module_nif* mod,
+ ErlNifFunc *fun, int argc, Eterm *argv)
+{
+ Eterm nif_result;
+#ifdef DEBUG
+ /* Verify that function is part of this module */
+ int i;
+ for (i = 0; i < mod->entry.num_of_funcs; i++)
+ if (fun == &(mod->entry.funcs[i]))
+ break;
+ ASSERT(i < mod->entry.num_of_funcs);
+ if (p)
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN
+ || erts_thr_progress_is_blocking());
+#endif
+ if (p) {
+ /* This is almost a normal nif call like in beam_emu,
+ except that any heap consumed by the nif will be
+ released without checking if anything in it is live.
+ This is because we cannot do a GC here as we don't know
+ the number of live registers that have to be preserved.
+ This means that any heap part of the returned term may
+ not be used outside this function. */
+ struct enif_environment_t env;
+ ErlHeapFragment *orig_hf = MBUF(p);
+ ErlOffHeap orig_oh = MSO(p);
+ Eterm *orig_htop = HEAP_TOP(p);
+ ASSERT(is_internal_pid(p->common.id));
+ MBUF(p) = NULL;
+ clear_offheap(&MSO(p));
+
+ erts_pre_nif(&env, p, mod, tracee);
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ env.dbg_disable_assert_in_env = 1;
+#endif
+ nif_result = (*fun->fptr)(&env, argc, argv);
+ if (env.exception_thrown)
+ nif_result = THE_NON_VALUE;
+ erts_post_nif(&env);
+
+ /* Free any offheap and heap fragments created in nif */
+ if (MSO(p).first) {
+ erts_cleanup_offheap(&MSO(p));
+ clear_offheap(&MSO(p));
+ }
+ if (MBUF(p))
+ free_message_buffer(MBUF(p));
+
+ /* restore original heap fragment list */
+ MBUF(p) = orig_hf;
+ MSO(p) = orig_oh;
+ HEAP_TOP(p) = orig_htop;
+ } else {
+ /* Nif call was done without a process context,
+ so we create a phony one. */
+ struct enif_msg_environment_t msg_env;
+ pre_nif_noproc(&msg_env, mod, tracee);
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ msg_env.env.dbg_disable_assert_in_env = 1;
+#endif
+ nif_result = (*fun->fptr)(&msg_env.env, argc, argv);
+ if (msg_env.env.exception_thrown)
+ nif_result = THE_NON_VALUE;
+ post_nif_noproc(&msg_env);
+ }
+
+ return nif_result;
}
#ifdef USE_VM_PROBES
@@ -2352,6 +4211,55 @@ static unsigned calc_checksum(unsigned char* ptr, unsigned size)
#endif /* READONLY_CHECK */
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+static void dbg_assert_in_env(ErlNifEnv* env, Eterm term,
+ int nr, const char* type, const char* func)
+{
+ Uint saved_used_size;
+ Eterm* real_htop;
+
+ if (is_immed(term)
+ || (is_non_value(term) && env->exception_thrown)
+ || erts_is_literal(term, ptr_val(term)))
+ return;
+
+ if (env->dbg_disable_assert_in_env) {
+ /*
+ * Trace nifs may cheat as built terms are discarded after return.
+ * ToDo: Check if 'term' is part of argv[].
+ */
+ return;
+ }
+
+ if (env->heap_frag) {
+ ASSERT(env->heap_frag == MBUF(env->proc));
+ ASSERT(env->hp >= env->heap_frag->mem);
+ ASSERT(env->hp <= env->heap_frag->mem + env->heap_frag->alloc_size);
+ saved_used_size = env->heap_frag->used_size;
+ env->heap_frag->used_size = env->hp - env->heap_frag->mem;
+ real_htop = NULL;
+ }
+ else {
+ real_htop = env->hp;
+ }
+ if (!erts_dbg_within_proc(ptr_val(term), env->proc, real_htop)) {
+ fprintf(stderr, "\r\nFAILED ASSERTION in %s:\r\n", func);
+ if (nr) {
+ fprintf(stderr, "Term #%d of the %s is not from same ErlNifEnv.",
+ nr, type);
+ }
+ else {
+ fprintf(stderr, "The %s is not from the same ErlNifEnv.", type);
+ }
+ fprintf(stderr, "\r\nABORTING\r\n");
+ abort();
+ }
+ if (env->heap_frag) {
+ env->heap_frag->used_size = saved_used_size;
+ }
+}
+#endif
+
#ifdef HAVE_USE_DTRACE
#define MESSAGE_BUFSIZ 1024
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index 5b93c2398e..d195721054 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2009-2014. All Rights Reserved.
+ * Copyright Ericsson AB 2009-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -27,7 +28,6 @@
# include "config.h"
#endif
-#include "erl_native_features_config.h"
#include "erl_drv_nif.h"
/* Version history:
@@ -42,9 +42,18 @@
** 2.5: R17 Maps API additions
** 2.6: R17 with maps
** R17 dirty schedulers
+** 2.7: 17.3 add enif_schedule_nif
+** remove enif_schedule_dirty_nif, enif_schedule_dirty_nif_finalizer, enif_dirty_nif_finalizer
+** add ErlNifEntry options
+** add ErlNifFunc flags
+** 2.8: 18.0 add enif_has_pending_exception
+** 2.9: 18.2 enif_getenv
+** 2.10: Time API
+** 2.11: 19.0 enif_snprintf
+** 2.12: 20.0 add enif_queue
*/
#define ERL_NIF_MAJOR_VERSION 2
-#define ERL_NIF_MINOR_VERSION 6
+#define ERL_NIF_MINOR_VERSION 12
/*
* The emulator will refuse to load a nif-lib with a major version
@@ -60,73 +69,43 @@
#include <stdlib.h>
-#ifdef SIZEOF_CHAR
-# define SIZEOF_CHAR_SAVED__ SIZEOF_CHAR
-# undef SIZEOF_CHAR
-#endif
-#ifdef SIZEOF_SHORT
-# define SIZEOF_SHORT_SAVED__ SIZEOF_SHORT
-# undef SIZEOF_SHORT
-#endif
-#ifdef SIZEOF_INT
-# define SIZEOF_INT_SAVED__ SIZEOF_INT
-# undef SIZEOF_INT
-#endif
-#ifdef SIZEOF_LONG
-# define SIZEOF_LONG_SAVED__ SIZEOF_LONG
-# undef SIZEOF_LONG
-#endif
-#ifdef SIZEOF_LONG_LONG
-# define SIZEOF_LONG_LONG_SAVED__ SIZEOF_LONG_LONG
-# undef SIZEOF_LONG_LONG
-#endif
-#ifdef HALFWORD_HEAP_EMULATOR
-# define HALFWORD_HEAP_EMULATOR_SAVED__ HALFWORD_HEAP_EMULATOR
-# undef HALFWORD_HEAP_EMULATOR
-#endif
-#include "erl_int_sizes_config.h"
-
#ifdef __cplusplus
extern "C" {
#endif
-#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
-typedef unsigned __int64 ErlNifUInt64;
-typedef __int64 ErlNifSInt64;
-#elif SIZEOF_LONG == 8
-typedef unsigned long ErlNifUInt64;
-typedef long ErlNifSInt64;
-#elif SIZEOF_LONG_LONG == 8
-typedef unsigned long long ErlNifUInt64;
-typedef long long ErlNifSInt64;
-#else
-#error No 64-bit integer type
-#endif
+typedef ErlNapiUInt64 ErlNifUInt64;
+typedef ErlNapiSInt64 ErlNifSInt64;
+typedef ErlNapiUInt ErlNifUInt;
+typedef ErlNapiSInt ErlNifSInt;
-#ifdef HALFWORD_HEAP_EMULATOR
-# define ERL_NIF_VM_VARIANT "beam.halfword"
-typedef unsigned int ERL_NIF_TERM;
-#else
# define ERL_NIF_VM_VARIANT "beam.vanilla"
-# if SIZEOF_LONG == SIZEOF_VOID_P
-typedef unsigned long ERL_NIF_TERM;
-# elif SIZEOF_LONG_LONG == SIZEOF_VOID_P
-typedef unsigned long long ERL_NIF_TERM;
-# endif
-#endif
+typedef ErlNifUInt ERL_NIF_TERM;
typedef ERL_NIF_TERM ERL_NIF_UINT;
+typedef ErlNifSInt64 ErlNifTime;
+
+#define ERL_NIF_TIME_ERROR ((ErlNifSInt64) ERTS_NAPI_TIME_ERROR__)
+
+typedef enum {
+ ERL_NIF_SEC = ERTS_NAPI_SEC__,
+ ERL_NIF_MSEC = ERTS_NAPI_MSEC__,
+ ERL_NIF_USEC = ERTS_NAPI_USEC__,
+ ERL_NIF_NSEC = ERTS_NAPI_NSEC__
+} ErlNifTimeUnit;
+
struct enif_environment_t;
typedef struct enif_environment_t ErlNifEnv;
-typedef struct
+typedef struct enif_func_t
{
const char* name;
unsigned arity;
ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+ unsigned flags;
}ErlNifFunc;
+
typedef struct enif_entry_t
{
int major;
@@ -138,9 +117,16 @@ typedef struct enif_entry_t
int (*reload) (ErlNifEnv*, void** priv_data, ERL_NIF_TERM load_info);
int (*upgrade)(ErlNifEnv*, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info);
void (*unload) (ErlNifEnv*, void* priv_data);
+
+ /* Added in 2.1 */
const char* vm_variant;
-}ErlNifEntry;
+ /* Added in 2.7 */
+ unsigned options; /* Unused. Can be set to 0 or 1 (dirty sched config) */
+
+ /* Added in 2.12 */
+ size_t sizeof_ErlNifResourceTypeInit;
+}ErlNifEntry;
typedef struct
@@ -153,8 +139,18 @@ typedef struct
void* ref_bin;
}ErlNifBinary;
-typedef struct enif_resource_type_t ErlNifResourceType;
-typedef void ErlNifResourceDtor(ErlNifEnv*, void*);
+#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
+typedef void* ErlNifEvent; /* FIXME: Use 'HANDLE' somehow without breaking existing source */
+#else
+typedef int ErlNifEvent;
+#endif
+
+/* Return bits from enif_select: */
+#define ERL_NIF_SELECT_STOP_CALLED (1 << 0)
+#define ERL_NIF_SELECT_STOP_SCHEDULED (1 << 1)
+#define ERL_NIF_SELECT_INVALID_EVENT (1 << 2)
+#define ERL_NIF_SELECT_FAILED (1 << 3)
+
typedef enum
{
ERL_NIF_RT_CREATE = 1,
@@ -169,7 +165,25 @@ typedef enum
typedef struct
{
ERL_NIF_TERM pid; /* internal, may change */
-}ErlNifPid;
+} ErlNifPid;
+
+typedef struct
+{
+ ERL_NIF_TERM port_id; /* internal, may change */
+}ErlNifPort;
+
+typedef ErlDrvMonitor ErlNifMonitor;
+
+typedef struct enif_resource_type_t ErlNifResourceType;
+typedef void ErlNifResourceDtor(ErlNifEnv*, void*);
+typedef void ErlNifResourceStop(ErlNifEnv*, void*, ErlNifEvent, int is_direct_call);
+typedef void ErlNifResourceDown(ErlNifEnv*, void*, ErlNifPid*, ErlNifMonitor*);
+
+typedef struct {
+ ErlNifResourceDtor* dtor;
+ ErlNifResourceStop* stop; /* at ERL_NIF_SELECT_STOP event */
+ ErlNifResourceDown* down; /* enif_monitor_process */
+} ErlNifResourceTypeInit;
typedef ErlDrvSysInfo ErlNifSysInfo;
@@ -181,33 +195,91 @@ typedef int ErlNifTSDKey;
typedef ErlDrvThreadOpts ErlNifThreadOpts;
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
typedef enum
{
- ERL_NIF_DIRTY_JOB_CPU_BOUND = ERL_DRV_DIRTY_JOB_CPU_BOUND,
- ERL_NIF_DIRTY_JOB_IO_BOUND = ERL_DRV_DIRTY_JOB_IO_BOUND
+ ERL_NIF_DIRTY_JOB_CPU_BOUND = ERL_DIRTY_JOB_CPU_BOUND,
+ ERL_NIF_DIRTY_JOB_IO_BOUND = ERL_DIRTY_JOB_IO_BOUND
}ErlNifDirtyTaskFlags;
-#endif
typedef struct /* All fields all internal and may change */
{
ERL_NIF_TERM map;
- ERL_NIF_UINT t_limit;
+ ERL_NIF_UINT size;
ERL_NIF_UINT idx;
- ERL_NIF_TERM *ks;
- ERL_NIF_TERM *vs;
+ union {
+ struct {
+ ERL_NIF_TERM *ks;
+ ERL_NIF_TERM *vs;
+ }flat;
+ struct {
+ struct ErtsDynamicWStack_* wstack;
+ ERL_NIF_TERM* kv;
+ }hash;
+ }u;
void* __spare__[2]; /* for future additions to be ABI compatible (same struct size) */
} ErlNifMapIterator;
typedef enum {
- ERL_NIF_MAP_ITERATOR_HEAD = 1,
- ERL_NIF_MAP_ITERATOR_TAIL = 2
+ ERL_NIF_MAP_ITERATOR_FIRST = 1,
+ ERL_NIF_MAP_ITERATOR_LAST = 2,
+
+ /* deprecated synonyms (undocumented in 17 and 18-rc) */
+ ERL_NIF_MAP_ITERATOR_HEAD = ERL_NIF_MAP_ITERATOR_FIRST,
+ ERL_NIF_MAP_ITERATOR_TAIL = ERL_NIF_MAP_ITERATOR_LAST
} ErlNifMapIteratorEntry;
+typedef enum {
+ ERL_NIF_UNIQUE_POSITIVE = (1 << 0),
+ ERL_NIF_UNIQUE_MONOTONIC = (1 << 1)
+} ErlNifUniqueInteger;
+
+typedef enum {
+ ERL_NIF_BIN2TERM_SAFE = 0x20000000
+} ErlNifBinaryToTerm;
+
+typedef enum {
+ ERL_NIF_INTERNAL_HASH = 1,
+ ERL_NIF_PHASH2 = 2
+} ErlNifHash;
+
+#define ERL_NIF_IOVEC_SIZE 16
+
+typedef struct erl_nif_io_vec {
+ int iovcnt; /* length of vectors */
+ size_t size; /* total size in bytes */
+ SysIOVec *iov;
+
+ /* internals (avert your eyes) */
+ void **ref_bins; /* Binary[] */
+ int flags;
+
+ /* Used when stack allocating the io vec */
+ SysIOVec small_iov[ERL_NIF_IOVEC_SIZE];
+ void *small_ref_bin[ERL_NIF_IOVEC_SIZE];
+} ErlNifIOVec;
+
+typedef struct erts_io_queue ErlNifIOQueue;
+
+typedef enum {
+ ERL_NIF_IOQ_NORMAL = 1
+} ErlNifIOQueueOpts;
+
+/*
+ * Return values from enif_thread_type(). Negative values
+ * reserved for specific types of non-scheduler threads.
+ * Positive values reserved for scheduler thread types.
+ */
+
+#define ERL_NIF_THR_UNDEFINED 0
+#define ERL_NIF_THR_NORMAL_SCHEDULER 1
+#define ERL_NIF_THR_DIRTY_CPU_SCHEDULER 2
+#define ERL_NIF_THR_DIRTY_IO_SCHEDULER 3
+
#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
# define ERL_NIF_API_FUNC_DECL(RET_TYPE, NAME, ARGS) RET_TYPE (*NAME) ARGS
typedef struct {
# include "erl_nif_api_funcs.h"
+ void* erts_alc_test;
} TWinDynNifCallbacks;
extern TWinDynNifCallbacks WinDynNifCallbacks;
# undef ERL_NIF_API_FUNC_DECL
@@ -227,22 +299,27 @@ extern TWinDynNifCallbacks WinDynNifCallbacks;
#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
# define ERL_NIF_INIT_GLOB TWinDynNifCallbacks WinDynNifCallbacks;
-# ifdef STATIC_ERLANG_NIF
-# define ERL_NIF_INIT_DECL(MODNAME) __declspec(dllexport) ErlNifEntry* MODNAME ## _nif_init(TWinDynNifCallbacks* callbacks)
-# else
-# define ERL_NIF_INIT_DECL(MODNAME) __declspec(dllexport) ErlNifEntry* nif_init(TWinDynNifCallbacks* callbacks)
-# endif
+# define ERL_NIF_INIT_ARGS TWinDynNifCallbacks* callbacks
# define ERL_NIF_INIT_BODY memcpy(&WinDynNifCallbacks,callbacks,sizeof(TWinDynNifCallbacks))
+# define ERL_NIF_INIT_EXPORT __declspec(dllexport)
#else
# define ERL_NIF_INIT_GLOB
+# define ERL_NIF_INIT_ARGS void
# define ERL_NIF_INIT_BODY
-# ifdef STATIC_ERLANG_NIF
-# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* MODNAME ## _nif_init(void)
+# if defined(__GNUC__) && __GNUC__ >= 4
+# define ERL_NIF_INIT_EXPORT __attribute__ ((visibility("default")))
+# elif defined (__SUNPRO_C) && (__SUNPRO_C >= 0x550)
+# define ERL_NIF_INIT_EXPORT __global
# else
-# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* nif_init(void)
+# define ERL_NIF_INIT_EXPORT
# endif
#endif
+#ifdef STATIC_ERLANG_NIF
+# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* MODNAME ## _nif_init(ERL_NIF_INIT_ARGS)
+#else
+# define ERL_NIF_INIT_DECL(MODNAME) ERL_NIF_INIT_EXPORT ErlNifEntry* nif_init(ERL_NIF_INIT_ARGS)
+#endif
#ifdef __cplusplus
}
@@ -268,7 +345,9 @@ ERL_NIF_INIT_DECL(NAME) \
sizeof(FUNCS) / sizeof(*FUNCS), \
FUNCS, \
LOAD, RELOAD, UPGRADE, UNLOAD, \
- ERL_NIF_VM_VARIANT \
+ ERL_NIF_VM_VARIANT, \
+ 1, \
+ sizeof(ErlNifResourceTypeInit) \
}; \
ERL_NIF_INIT_BODY; \
return &entry; \
diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h
index d7c554e60b..9e573307d8 100644
--- a/erts/emulator/beam/erl_nif_api_funcs.h
+++ b/erts/emulator/beam/erl_nif_api_funcs.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2009-2014. All Rights Reserved.
+ * Copyright Ericsson AB 2009-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -22,7 +23,7 @@
#endif
/*
-** WARNING: add new ERL_NIF_API_FUNC_DECL entries at the bottom of the list
+** WARNING: Add new ERL_NIF_API_FUNC_DECL entries at the bottom of the list
** to keep compatibility on Windows!!!
**
** And don't forget to increase ERL_NIF_MINOR_VERSION in erl_nif.h
@@ -141,14 +142,6 @@ ERL_NIF_API_FUNC_DECL(int,enif_is_number,(ErlNifEnv*, ERL_NIF_TERM term));
ERL_NIF_API_FUNC_DECL(void*,enif_dlopen,(const char* lib, void (*err_handler)(void*,const char*), void* err_arg));
ERL_NIF_API_FUNC_DECL(void*,enif_dlsym,(void* handle, const char* symbol, void (*err_handler)(void*,const char*), void* err_arg));
ERL_NIF_API_FUNC_DECL(int,enif_consume_timeslice,(ErlNifEnv*, int percent));
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
-ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_schedule_dirty_nif,(ErlNifEnv*,int,ERL_NIF_TERM (*)(ErlNifEnv*,int,const ERL_NIF_TERM[]),int,const ERL_NIF_TERM[]));
-ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_schedule_dirty_nif_finalizer,(ErlNifEnv*,ERL_NIF_TERM,ERL_NIF_TERM (*)(ErlNifEnv*,ERL_NIF_TERM)));
-ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_dirty_nif_finalizer,(ErlNifEnv*,ERL_NIF_TERM));
-ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*));
-ERL_NIF_API_FUNC_DECL(int,enif_have_dirty_schedulers,(void));
-#endif
-
ERL_NIF_API_FUNC_DECL(int, enif_is_map, (ErlNifEnv* env, ERL_NIF_TERM term));
ERL_NIF_API_FUNC_DECL(int, enif_get_map_size, (ErlNifEnv* env, ERL_NIF_TERM term, size_t *size));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM, enif_make_new_map, (ErlNifEnv* env));
@@ -163,12 +156,53 @@ ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_is_tail, (ErlNifEnv *env, ErlNifMap
ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_next, (ErlNifEnv *env, ErlNifMapIterator *iter));
ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_prev, (ErlNifEnv *env, ErlNifMapIterator *iter));
ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_get_pair, (ErlNifEnv *env, ErlNifMapIterator *iter, ERL_NIF_TERM *key, ERL_NIF_TERM *value));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_schedule_nif,(ErlNifEnv*,const char*,int,ERL_NIF_TERM (*)(ErlNifEnv*,int,const ERL_NIF_TERM[]),int,const ERL_NIF_TERM[]));
+ERL_NIF_API_FUNC_DECL(int, enif_has_pending_exception, (ErlNifEnv *env, ERL_NIF_TERM* reason));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM, enif_raise_exception, (ErlNifEnv *env, ERL_NIF_TERM reason));
+ERL_NIF_API_FUNC_DECL(int,enif_getenv,(const char* key, char* value, size_t* value_size));
+ERL_NIF_API_FUNC_DECL(ErlNifTime, enif_monotonic_time, (ErlNifTimeUnit));
+ERL_NIF_API_FUNC_DECL(ErlNifTime, enif_time_offset, (ErlNifTimeUnit));
+ERL_NIF_API_FUNC_DECL(ErlNifTime, enif_convert_time_unit, (ErlNifTime, ErlNifTimeUnit, ErlNifTimeUnit));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM, enif_now_time, (ErlNifEnv *env));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM, enif_cpu_time, (ErlNifEnv *env));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM, enif_make_unique_integer, (ErlNifEnv *env, ErlNifUniqueInteger properties));
+ERL_NIF_API_FUNC_DECL(int, enif_is_current_process_alive, (ErlNifEnv *env));
+ERL_NIF_API_FUNC_DECL(int, enif_is_process_alive, (ErlNifEnv *env, ErlNifPid *pid));
+ERL_NIF_API_FUNC_DECL(int, enif_is_port_alive, (ErlNifEnv *env, ErlNifPort *port_id));
+ERL_NIF_API_FUNC_DECL(int, enif_get_local_port, (ErlNifEnv* env, ERL_NIF_TERM, ErlNifPort* port_id));
+ERL_NIF_API_FUNC_DECL(int, enif_term_to_binary, (ErlNifEnv *env, ERL_NIF_TERM term, ErlNifBinary *bin));
+ERL_NIF_API_FUNC_DECL(size_t, enif_binary_to_term, (ErlNifEnv *env, const unsigned char* data, size_t sz, ERL_NIF_TERM *term, unsigned int opts));
+ERL_NIF_API_FUNC_DECL(int, enif_port_command, (ErlNifEnv *env, const ErlNifPort* to_port, ErlNifEnv *msg_env, ERL_NIF_TERM msg));
+ERL_NIF_API_FUNC_DECL(int,enif_thread_type,(void));
+ERL_NIF_API_FUNC_DECL(int,enif_snprintf,(char * buffer, size_t size, const char *format, ...));
+ERL_NIF_API_FUNC_DECL(int,enif_select,(ErlNifEnv* env, ErlNifEvent e, enum ErlNifSelectFlags flags, void* obj, const ErlNifPid* pid, ERL_NIF_TERM ref));
+ERL_NIF_API_FUNC_DECL(ErlNifResourceType*,enif_open_resource_type_x,(ErlNifEnv*, const char* name_str, const ErlNifResourceTypeInit*, ErlNifResourceFlags flags, ErlNifResourceFlags* tried));
+ERL_NIF_API_FUNC_DECL(int, enif_monitor_process,(ErlNifEnv*,void* obj,const ErlNifPid*,ErlNifMonitor *monitor));
+ERL_NIF_API_FUNC_DECL(int, enif_demonitor_process,(ErlNifEnv*,void* obj,const ErlNifMonitor *monitor));
+ERL_NIF_API_FUNC_DECL(int, enif_compare_monitors,(const ErlNifMonitor*,const ErlNifMonitor*));
+ERL_NIF_API_FUNC_DECL(ErlNifUInt64,enif_hash,(ErlNifHash type, ERL_NIF_TERM term, ErlNifUInt64 salt));
+ERL_NIF_API_FUNC_DECL(int, enif_whereis_pid, (ErlNifEnv *env, ERL_NIF_TERM name, ErlNifPid *pid));
+ERL_NIF_API_FUNC_DECL(int, enif_whereis_port, (ErlNifEnv *env, ERL_NIF_TERM name, ErlNifPort *port));
+
+ERL_NIF_API_FUNC_DECL(ErlNifIOQueue *,enif_ioq_create,(ErlNifIOQueueOpts opts));
+ERL_NIF_API_FUNC_DECL(void,enif_ioq_destroy,(ErlNifIOQueue *q));
+
+ERL_NIF_API_FUNC_DECL(int,enif_ioq_enq_binary,(ErlNifIOQueue *q, ErlNifBinary *bin, size_t skip));
+ERL_NIF_API_FUNC_DECL(int,enif_ioq_enqv,(ErlNifIOQueue *q, ErlNifIOVec *iov, size_t skip));
+
+ERL_NIF_API_FUNC_DECL(size_t,enif_ioq_size,(ErlNifIOQueue *q));
+ERL_NIF_API_FUNC_DECL(int,enif_ioq_deq,(ErlNifIOQueue *q, size_t count, size_t *size));
+
+ERL_NIF_API_FUNC_DECL(SysIOVec*,enif_ioq_peek,(ErlNifIOQueue *q, int *iovlen));
+
+ERL_NIF_API_FUNC_DECL(int,enif_inspect_iovec,(ErlNifEnv *env, size_t max_length, ERL_NIF_TERM iovec_term, ERL_NIF_TERM *tail, ErlNifIOVec **iovec));
+ERL_NIF_API_FUNC_DECL(void,enif_free_iovec,(ErlNifIOVec *iov));
/*
-** Add new entries here to keep compatibility on Windows!!!
+** ADD NEW ENTRIES HERE (before this comment) !!!
*/
-#endif
+#endif /* ERL_NIF_API_FUNC_DECL */
/*
** Please keep the ERL_NIF_API_FUNC_MACRO list below in the same order
@@ -282,21 +316,12 @@ ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_get_pair, (ErlNifEnv *env, ErlNifMa
# define enif_make_int64 ERL_NIF_API_FUNC_MACRO(enif_make_int64)
# define enif_make_uint64 ERL_NIF_API_FUNC_MACRO(enif_make_uint64)
#endif
-
# define enif_is_exception ERL_NIF_API_FUNC_MACRO(enif_is_exception)
# define enif_make_reverse_list ERL_NIF_API_FUNC_MACRO(enif_make_reverse_list)
# define enif_is_number ERL_NIF_API_FUNC_MACRO(enif_is_number)
# define enif_dlopen ERL_NIF_API_FUNC_MACRO(enif_dlopen)
# define enif_dlsym ERL_NIF_API_FUNC_MACRO(enif_dlsym)
# define enif_consume_timeslice ERL_NIF_API_FUNC_MACRO(enif_consume_timeslice)
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
-# define enif_schedule_dirty_nif ERL_NIF_API_FUNC_MACRO(enif_schedule_dirty_nif)
-# define enif_schedule_dirty_nif_finalizer ERL_NIF_API_FUNC_MACRO(enif_schedule_dirty_nif_finalizer)
-# define enif_dirty_nif_finalizer ERL_NIF_API_FUNC_MACRO(enif_dirty_nif_finalizer)
-# define enif_is_on_dirty_scheduler ERL_NIF_API_FUNC_MACRO(enif_is_on_dirty_scheduler)
-# define enif_have_dirty_schedulers ERL_NIF_API_FUNC_MACRO(enif_have_dirty_schedulers)
-#endif
-
# define enif_is_map ERL_NIF_API_FUNC_MACRO(enif_is_map)
# define enif_get_map_size ERL_NIF_API_FUNC_MACRO(enif_get_map_size)
# define enif_make_new_map ERL_NIF_API_FUNC_MACRO(enif_make_new_map)
@@ -311,11 +336,54 @@ ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_get_pair, (ErlNifEnv *env, ErlNifMa
# define enif_map_iterator_next ERL_NIF_API_FUNC_MACRO(enif_map_iterator_next)
# define enif_map_iterator_prev ERL_NIF_API_FUNC_MACRO(enif_map_iterator_prev)
# define enif_map_iterator_get_pair ERL_NIF_API_FUNC_MACRO(enif_map_iterator_get_pair)
+# define enif_schedule_nif ERL_NIF_API_FUNC_MACRO(enif_schedule_nif)
+# define enif_has_pending_exception ERL_NIF_API_FUNC_MACRO(enif_has_pending_exception)
+# define enif_raise_exception ERL_NIF_API_FUNC_MACRO(enif_raise_exception)
+# define enif_getenv ERL_NIF_API_FUNC_MACRO(enif_getenv)
+# define enif_monotonic_time ERL_NIF_API_FUNC_MACRO(enif_monotonic_time)
+# define enif_time_offset ERL_NIF_API_FUNC_MACRO(enif_time_offset)
+# define enif_convert_time_unit ERL_NIF_API_FUNC_MACRO(enif_convert_time_unit)
+# define enif_now_time ERL_NIF_API_FUNC_MACRO(enif_now_time)
+# define enif_cpu_time ERL_NIF_API_FUNC_MACRO(enif_cpu_time)
+# define enif_make_unique_integer ERL_NIF_API_FUNC_MACRO(enif_make_unique_integer)
+# define enif_is_current_process_alive ERL_NIF_API_FUNC_MACRO(enif_is_current_process_alive)
+# define enif_is_process_alive ERL_NIF_API_FUNC_MACRO(enif_is_process_alive)
+# define enif_is_port_alive ERL_NIF_API_FUNC_MACRO(enif_is_port_alive)
+# define enif_get_local_port ERL_NIF_API_FUNC_MACRO(enif_get_local_port)
+# define enif_term_to_binary ERL_NIF_API_FUNC_MACRO(enif_term_to_binary)
+# define enif_binary_to_term ERL_NIF_API_FUNC_MACRO(enif_binary_to_term)
+# define enif_port_command ERL_NIF_API_FUNC_MACRO(enif_port_command)
+# define enif_thread_type ERL_NIF_API_FUNC_MACRO(enif_thread_type)
+# define enif_snprintf ERL_NIF_API_FUNC_MACRO(enif_snprintf)
+# define enif_select ERL_NIF_API_FUNC_MACRO(enif_select)
+# define enif_open_resource_type_x ERL_NIF_API_FUNC_MACRO(enif_open_resource_type_x)
+# define enif_monitor_process ERL_NIF_API_FUNC_MACRO(enif_monitor_process)
+# define enif_demonitor_process ERL_NIF_API_FUNC_MACRO(enif_demonitor_process)
+# define enif_compare_monitors ERL_NIF_API_FUNC_MACRO(enif_compare_monitors)
+# define enif_hash ERL_NIF_API_FUNC_MACRO(enif_hash)
+# define enif_whereis_pid ERL_NIF_API_FUNC_MACRO(enif_whereis_pid)
+# define enif_whereis_port ERL_NIF_API_FUNC_MACRO(enif_whereis_port)
+# define enif_ioq_create ERL_NIF_API_FUNC_MACRO(enif_ioq_create)
+# define enif_ioq_destroy ERL_NIF_API_FUNC_MACRO(enif_ioq_destroy)
+# define enif_ioq_enq ERL_NIF_API_FUNC_MACRO(enif_ioq_enq)
+# define enif_ioq_enq_binary ERL_NIF_API_FUNC_MACRO(enif_ioq_enq_binary)
+# define enif_ioq_enqv ERL_NIF_API_FUNC_MACRO(enif_ioq_enqv)
+# define enif_ioq_size ERL_NIF_API_FUNC_MACRO(enif_ioq_size)
+# define enif_ioq_deq ERL_NIF_API_FUNC_MACRO(enif_ioq_deq)
+# define enif_ioq_peek ERL_NIF_API_FUNC_MACRO(enif_ioq_peek)
+# define enif_inspect_iovec ERL_NIF_API_FUNC_MACRO(enif_inspect_iovec)
+# define enif_free_iovec ERL_NIF_API_FUNC_MACRO(enif_free_iovec)
/*
-** Add new entries here
+** ADD NEW ENTRIES HERE (before this comment)
*/
-#endif
+
+/*
+ * Conditional EXPERIMENTAL stuff always last
+ * Must be moved up and made unconditional to support binary backward
+ * compatibility on Windows.
+ */
+#endif /* ERL_NIF_API_FUNC_MACRO */
#if defined(__GNUC__) && !(defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
@@ -535,7 +603,7 @@ static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list9(ErlNifEnv* env,
#ifndef enif_make_pid
-# define enif_make_pid(ENV, PID) ((const ERL_NIF_TERM)((PID)->pid))
+# define enif_make_pid(ENV, PID) ((void)(ENV),(const ERL_NIF_TERM)((PID)->pid))
#if SIZEOF_LONG == 8
# define enif_get_int64 enif_get_long
diff --git a/erts/emulator/beam/erl_node_container_utils.h b/erts/emulator/beam/erl_node_container_utils.h
index 17f6b32bb1..6ec428e282 100644
--- a/erts/emulator/beam/erl_node_container_utils.h
+++ b/erts/emulator/beam/erl_node_container_utils.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2001-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2001-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -254,19 +255,16 @@ extern ErtsPTab erts_port;
* Refs *
\* */
-#if defined(ARCH_64) && !HALFWORD_HEAP
-
-#define internal_ref_no_of_numbers(x) \
- (internal_ref_data((x))[0])
-#define internal_thing_ref_no_of_numbers(thing) \
- (internal_thing_ref_data(thing)[0])
-#define internal_ref_numbers(x) \
- (&internal_ref_data((x))[1])
-#define internal_thing_ref_numbers(thing) \
- (&internal_thing_ref_data(thing)[1])
-#define external_ref_no_of_numbers(x) \
+#define internal_ref_no_numbers(x) ERTS_REF_NUMBERS
+#define internal_ref_numbers(x) (is_internal_ordinary_ref((x)) \
+ ? internal_ordinary_ref_numbers((x)) \
+ : (ASSERT(is_internal_magic_ref((x))), \
+ internal_magic_ref_numbers((x))))
+#if defined(ARCH_64)
+
+#define external_ref_no_numbers(x) \
(external_ref_data((x))[0])
-#define external_thing_ref_no_of_numbers(thing) \
+#define external_thing_ref_no_numbers(thing) \
(external_thing_ref_data(thing)[0])
#define external_ref_numbers(x) \
(&external_ref_data((x))[1])
@@ -276,12 +274,8 @@ extern ErtsPTab erts_port;
#else
-#define internal_ref_no_of_numbers(x) (internal_ref_data_words((x)))
-#define internal_thing_ref_no_of_numbers(t) (internal_thing_ref_data_words(t))
-#define internal_ref_numbers(x) (internal_ref_data((x)))
-#define internal_thing_ref_numbers(t) (internal_thing_ref_data(t))
-#define external_ref_no_of_numbers(x) (external_ref_data_words((x)))
-#define external_thing_ref_no_of_numbers(t) (external_thing_ref_data_words((t)))
+#define external_ref_no_numbers(x) (external_ref_data_words((x)))
+#define external_thing_ref_no_numbers(t) (external_thing_ref_data_words((t)))
#define external_ref_numbers(x) (external_ref_data((x)))
#define external_thing_ref_numbers(t) (external_thing_ref_data((t)))
@@ -298,15 +292,9 @@ extern ErtsPTab erts_port;
#define internal_ref_channel_no(x) (internal_channel_no((x)))
#define external_ref_channel_no(x) (external_channel_no((x)))
-#define ref_data_words(x) (is_internal_ref((x)) \
- ? internal_ref_data_words((x)) \
- : external_ref_data_words((x)))
-#define ref_data(x) (is_internal_ref((x)) \
- ? internal_ref_data((x)) \
- : external_ref_data((x)))
-#define ref_no_of_numbers(x) (is_internal_ref((x)) \
- ? internal_ref_no_of_numbers((x))\
- : external_ref_no_of_numbers((x)))
+#define ref_no_numbers(x) (is_internal_ref((x)) \
+ ? internal_ref_no_numbers((x))\
+ : external_ref_no_numbers((x)))
#define ref_numbers(x) (is_internal_ref((x)) \
? internal_ref_numbers((x)) \
: external_ref_numbers((x)))
@@ -327,8 +315,6 @@ extern ErtsPTab erts_port;
: external_ref_channel_no((x)))
#define is_ref(x) (is_internal_ref((x)) \
|| is_external_ref((x)))
-#define is_ref_rel(x,Base) (is_internal_ref_rel((x),Base) \
- || is_external_ref_rel((x),Base))
#define is_not_ref(x) (!is_ref(x))
#endif
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index c6d136f951..0f3dfa797c 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2001-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2001-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -28,31 +29,88 @@
#include "error.h"
#include "erl_thr_progress.h"
#include "dtrace-wrapper.h"
+#include "erl_binary.h"
+#include "erl_bif_unique.h"
Hash erts_dist_table;
Hash erts_node_table;
-erts_smp_rwmtx_t erts_dist_table_rwmtx;
-erts_smp_rwmtx_t erts_node_table_rwmtx;
+erts_rwmtx_t erts_dist_table_rwmtx;
+erts_rwmtx_t erts_node_table_rwmtx;
DistEntry *erts_hidden_dist_entries;
DistEntry *erts_visible_dist_entries;
-DistEntry *erts_not_connected_dist_entries;
+DistEntry *erts_not_connected_dist_entries; /* including erts_this_dist_entry */
Sint erts_no_of_hidden_dist_entries;
Sint erts_no_of_visible_dist_entries;
-Sint erts_no_of_not_connected_dist_entries;
+Sint erts_no_of_not_connected_dist_entries; /* including erts_this_dist_entry */
DistEntry *erts_this_dist_entry;
ErlNode *erts_this_node;
char erts_this_node_sysname_BUFFER[256],
*erts_this_node_sysname = "uninitialized yet";
-static Uint node_entries;
-static Uint dist_entries;
+static Uint node_entries = 0;
+static Uint dist_entries = 0;
static int references_atoms_need_init = 1;
+static ErtsMonotonicTime orig_node_tab_delete_delay;
+static ErtsMonotonicTime node_tab_delete_delay;
+
/* -- The distribution table ---------------------------------------------- */
+#define ErtsBin2DistEntry(B) \
+ ((DistEntry *) ERTS_MAGIC_BIN_DATA((B)))
+#define ErtsDistEntry2Bin(DEP) \
+ ((Binary *) ERTS_MAGIC_BIN_FROM_DATA((DEP)))
+
+static ERTS_INLINE erts_aint_t
+de_refc_read(DistEntry *dep, erts_aint_t min)
+{
+ return erts_refc_read(&ErtsDistEntry2Bin(dep)->intern.refc, min);
+}
+
+static ERTS_INLINE erts_aint_t
+de_refc_inc_read(DistEntry *dep, erts_aint_t min)
+{
+ return erts_refc_inctest(&ErtsDistEntry2Bin(dep)->intern.refc, min);
+}
+
+static ERTS_INLINE void
+de_refc_inc(DistEntry *dep, erts_aint_t min)
+{
+ erts_refc_inc(&ErtsDistEntry2Bin(dep)->intern.refc, min);
+}
+
+static ERTS_INLINE void
+de_refc_dec(DistEntry *dep, erts_aint_t min)
+{
+#ifdef DEBUG
+ (void) erts_refc_read(&ErtsDistEntry2Bin(dep)->intern.refc, min+1);
+#endif
+ erts_bin_release(ErtsDistEntry2Bin(dep));
+}
+
+static ERTS_INLINE erts_aint_t
+de_refc_dec_read(DistEntry *dep, erts_aint_t min)
+{
+ return erts_refc_dectest(&ErtsDistEntry2Bin(dep)->intern.refc, min);
+}
+
+void
+erts_ref_dist_entry(DistEntry *dep)
+{
+ ASSERT(dep);
+ de_refc_inc(dep, 1);
+}
+
+void
+erts_deref_dist_entry(DistEntry *dep)
+{
+ ASSERT(dep);
+ de_refc_dec(dep, 0);
+}
+
#ifdef DEBUG
static int
is_in_de_list(DistEntry *dep, DistEntry *dep_list)
@@ -81,54 +139,75 @@ dist_table_cmp(void *dep1, void *dep2)
static void*
dist_table_alloc(void *dep_tmpl)
{
- Eterm chnl_nr;
+#ifdef DEBUG
+ erts_aint_t refc;
+#endif
Eterm sysname;
+ Binary *bin;
DistEntry *dep;
- erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
- rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
-
- if(((DistEntry *) dep_tmpl) == erts_this_dist_entry)
- return dep_tmpl;
+ erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
sysname = ((DistEntry *) dep_tmpl)->sysname;
- chnl_nr = make_small((Uint) atom_val(sysname));
- dep = (DistEntry *) erts_alloc(ERTS_ALC_T_DIST_ENTRY, sizeof(DistEntry));
+
+ bin = erts_create_magic_binary_x(sizeof(DistEntry),
+ erts_dist_entry_destructor,
+ ERTS_ALC_T_DIST_ENTRY,
+ 0);
+ dep = ErtsBin2DistEntry(bin);
dist_entries++;
+#ifdef DEBUG
+ refc =
+#else
+ (void)
+#endif
+ de_refc_dec_read(dep, -1);
+ ASSERT(refc == -1);
+
dep->prev = NULL;
- erts_refc_init(&dep->refc, -1);
- erts_smp_rwmtx_init_opt_x(&dep->rwmtx, &rwmtx_opt, "dist_entry", chnl_nr);
+ erts_rwmtx_init_opt(&dep->rwmtx, &rwmtx_opt, "dist_entry", sysname,
+ ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
dep->sysname = sysname;
dep->cid = NIL;
+ erts_atomic_init_nob(&dep->input_handler, (erts_aint_t) NIL);
dep->connection_id = 0;
dep->status = 0;
dep->flags = 0;
dep->version = 0;
- erts_smp_mtx_init_x(&dep->lnk_mtx, "dist_entry_links", chnl_nr);
+ erts_mtx_init(&dep->lnk_mtx, "dist_entry_links", sysname,
+ ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
dep->node_links = NULL;
dep->nlinks = NULL;
dep->monitors = NULL;
- erts_smp_mtx_init_x(&dep->qlock, "dist_entry_out_queue", chnl_nr);
- dep->qflgs = 0;
- dep->qsize = 0;
+ erts_mtx_init(&dep->qlock, "dist_entry_out_queue", sysname,
+ ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+ erts_atomic32_init_nob(&dep->qflgs, 0);
+ erts_atomic_init_nob(&dep->qsize, 0);
+ erts_atomic64_init_nob(&dep->in, 0);
+ erts_atomic64_init_nob(&dep->out, 0);
dep->out_queue.first = NULL;
dep->out_queue.last = NULL;
dep->suspended = NULL;
+ dep->tmp_out_queue.first = NULL;
+ dep->tmp_out_queue.last = NULL;
dep->finalized_out_queue.first = NULL;
dep->finalized_out_queue.last = NULL;
- erts_smp_atomic_init_nob(&dep->dist_cmd_scheduled, 0);
+ erts_atomic_init_nob(&dep->dist_cmd_scheduled, 0);
erts_port_task_handle_init(&dep->dist_cmd);
dep->send = NULL;
dep->cache = NULL;
/* Link in */
- /* All new dist entries are "not connected" */
+ /* All new dist entries are "not connected".
+ * erts_this_dist_entry is also always included among "not connected"
+ */
dep->next = erts_not_connected_dist_entries;
if(erts_not_connected_dist_entries) {
ASSERT(erts_not_connected_dist_entries->prev == NULL);
@@ -145,9 +224,6 @@ dist_table_free(void *vdep)
{
DistEntry *dep = (DistEntry *) vdep;
- if(dep == erts_this_dist_entry)
- return;
-
ASSERT(is_nil(dep->cid));
ASSERT(dep->nlinks == NULL);
ASSERT(dep->node_links == NULL);
@@ -173,44 +249,77 @@ dist_table_free(void *vdep)
erts_no_of_not_connected_dist_entries--;
ASSERT(!dep->cache);
- erts_smp_rwmtx_destroy(&dep->rwmtx);
- erts_smp_mtx_destroy(&dep->lnk_mtx);
- erts_smp_mtx_destroy(&dep->qlock);
+ erts_rwmtx_destroy(&dep->rwmtx);
+ erts_mtx_destroy(&dep->lnk_mtx);
+ erts_mtx_destroy(&dep->qlock);
#ifdef DEBUG
sys_memset(vdep, 0x77, sizeof(DistEntry));
#endif
- erts_free(ERTS_ALC_T_DIST_ENTRY, (void *) dep);
+ erts_bin_free(ErtsDistEntry2Bin(dep));
- ASSERT(dist_entries > 1);
+ ASSERT(dist_entries > 0);
dist_entries--;
}
void
-erts_dist_table_info(int to, void *to_arg)
+erts_dist_table_info(fmtfn_t to, void *to_arg)
{
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
- erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_rlock(&erts_dist_table_rwmtx);
hash_info(to, to_arg, &erts_dist_table);
if (lock)
- erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_runlock(&erts_dist_table_rwmtx);
+}
+
+static ERTS_INLINE DistEntry *find_dist_entry(Eterm sysname,
+ int inc_refc,
+ int connected_only)
+{
+ DistEntry *res;
+ DistEntry de;
+ de.sysname = sysname;
+ erts_rwmtx_rlock(&erts_dist_table_rwmtx);
+ res = hash_get(&erts_dist_table, (void *) &de);
+ if (res) {
+ if (connected_only && is_nil(res->cid))
+ res = NULL;
+ else {
+ int pend_delete;
+ erts_aint_t refc;
+ if (inc_refc) {
+ refc = de_refc_inc_read(res, 1);
+ pend_delete = refc < 2;
+ }
+ else {
+ refc = de_refc_read(res, 0);
+ pend_delete = refc < 1;
+ }
+ if (pend_delete) /* Pending delete */
+ de_refc_inc(res, 1);
+ }
+ }
+ erts_rwmtx_runlock(&erts_dist_table_rwmtx);
+ return res;
}
DistEntry *
erts_channel_no_to_dist_entry(Uint cno)
{
+ /*
+ * Does NOT increase reference count!
+ */
+
/*
* For this node (and previous incarnations of this node),
* ERST_INTERNAL_CHANNEL_NO (will always be 0 I guess) is used as
* channel no. For other nodes, the atom index of the atom corresponding
* to the node name is used as channel no.
*/
- if(cno == ERST_INTERNAL_CHANNEL_NO) {
- erts_refc_inc(&erts_this_dist_entry->refc, 2);
+ if (cno == ERST_INTERNAL_CHANNEL_NO)
return erts_this_dist_entry;
- }
if((cno > MAX_ATOM_INDEX)
|| (cno >= atom_table_size())
@@ -219,95 +328,150 @@ erts_channel_no_to_dist_entry(Uint cno)
/* cno is a valid atom index; find corresponding dist entry (if there
is one) */
- return erts_find_dist_entry(make_atom(cno));
+ return find_dist_entry(make_atom(cno), 0, 0);
}
-
DistEntry *
erts_sysname_to_connected_dist_entry(Eterm sysname)
{
- DistEntry de;
- DistEntry *res_dep;
- de.sysname = sysname;
-
- if(erts_this_dist_entry->sysname == sysname) {
- erts_refc_inc(&erts_this_dist_entry->refc, 2);
+ /*
+ * Does NOT increase reference count!
+ */
+ if(erts_this_dist_entry->sysname == sysname)
return erts_this_dist_entry;
- }
-
- erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
- res_dep = (DistEntry *) hash_get(&erts_dist_table, (void *) &de);
- if (res_dep) {
- erts_aint_t refc = erts_refc_inctest(&res_dep->refc, 1);
- if (refc < 2) /* Pending delete */
- erts_refc_inc(&res_dep->refc, 1);
- }
- erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
- if (res_dep) {
- int deref;
- erts_smp_rwmtx_rlock(&res_dep->rwmtx);
- deref = is_nil(res_dep->cid);
- erts_smp_rwmtx_runlock(&res_dep->rwmtx);
- if (deref) {
- erts_deref_dist_entry(res_dep);
- res_dep = NULL;
- }
- }
- return res_dep;
+ return find_dist_entry(sysname, 0, 1);
}
DistEntry *erts_find_or_insert_dist_entry(Eterm sysname)
{
+ /*
+ * This function DOES increase reference count!
+ */
DistEntry *res;
DistEntry de;
erts_aint_t refc;
- res = erts_find_dist_entry(sysname);
+ res = find_dist_entry(sysname, 1, 0);
if (res)
return res;
de.sysname = sysname;
- erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_rwlock(&erts_dist_table_rwmtx);
res = hash_put(&erts_dist_table, (void *) &de);
- refc = erts_refc_inctest(&res->refc, 0);
+ refc = de_refc_inc_read(res, 0);
if (refc < 2) /* New or pending delete */
- erts_refc_inc(&res->refc, 1);
- erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+ de_refc_inc(res, 1);
+ erts_rwmtx_rwunlock(&erts_dist_table_rwmtx);
return res;
}
DistEntry *erts_find_dist_entry(Eterm sysname)
{
- DistEntry *res;
- DistEntry de;
- de.sysname = sysname;
- erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
- res = hash_get(&erts_dist_table, (void *) &de);
- if (res) {
- erts_aint_t refc = erts_refc_inctest(&res->refc, 1);
- if (refc < 2) /* Pending delete */
- erts_refc_inc(&res->refc, 1);
- }
- erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
- return res;
+ /*
+ * Does NOT increase reference count!
+ */
+ return find_dist_entry(sysname, 0, 0);
+}
+
+DistEntry *
+erts_dhandle_to_dist_entry(Eterm dhandle)
+{
+ Binary *bin;
+ if (!is_internal_magic_ref(dhandle))
+ return NULL;
+ bin = erts_magic_ref2bin(dhandle);
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(bin) != erts_dist_entry_destructor)
+ return NULL;
+ return ErtsBin2DistEntry(bin);
}
-void erts_delete_dist_entry(DistEntry *dep)
+Eterm
+erts_make_dhandle(Process *c_p, DistEntry *dep)
{
- ASSERT(dep != erts_this_dist_entry);
- if(dep != erts_this_dist_entry) {
- erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
- /*
- * Another thread might have looked up this dist entry after
- * we decided to delete it (refc became zero). If so, the other
- * thread incremented refc twice. Once for the new reference
- * and once for this thread. Therefore, delete dist entry if
- * refc is 0 or -1 after a decrement.
- */
- if (erts_refc_dectest(&dep->refc, -1) <= 0)
- (void) hash_erase(&erts_dist_table, (void *) dep);
- erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+ Binary *bin;
+ Eterm *hp;
+
+ bin = ErtsDistEntry2Bin(dep);
+ ASSERT(bin);
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == erts_dist_entry_destructor);
+ hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
+ return erts_mk_magic_ref(&hp, &c_p->off_heap, bin);
+}
+
+static void try_delete_dist_entry(void *vbin);
+
+static void
+prepare_try_delete_dist_entry(void *vbin)
+{
+ Binary *bin = (Binary *) vbin;
+ DistEntry *dep = ErtsBin2DistEntry(bin);
+ Uint size;
+ erts_aint_t refc;
+
+ refc = de_refc_read(dep, 0);
+ if (refc > 0)
+ return;
+
+ size = ERTS_MAGIC_BIN_SIZE(sizeof(DistEntry));
+ erts_schedule_thr_prgr_later_cleanup_op(try_delete_dist_entry,
+ vbin, &dep->later_op, size);
+}
+
+static void try_delete_dist_entry(void *vbin)
+{
+ Binary *bin = (Binary *) vbin;
+ DistEntry *dep = ErtsBin2DistEntry(bin);
+ erts_aint_t refc;
+
+ erts_rwmtx_rwlock(&erts_dist_table_rwmtx);
+ /*
+ * Another thread might have looked up this dist entry after
+ * we decided to delete it (refc became zero). If so, the other
+ * thread incremented refc twice. Once for the new reference
+ * and once for this thread.
+ *
+ * If refc reach -1, no one has used the entry since we
+ * set up the timer. Delete the entry.
+ *
+ * If refc reach 0, the entry is currently not in use
+ * but has been used since we set up the timer. Set up a
+ * new timer.
+ *
+ * If refc > 0, the entry is in use. Keep the entry.
+ */
+ refc = de_refc_dec_read(dep, -1);
+ if (refc == -1)
+ (void) hash_erase(&erts_dist_table, (void *) dep);
+ erts_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+
+ if (refc == 0) {
+ if (node_tab_delete_delay == 0)
+ prepare_try_delete_dist_entry(vbin);
+ else if (node_tab_delete_delay > 0)
+ erts_start_timer_callback(node_tab_delete_delay,
+ prepare_try_delete_dist_entry,
+ vbin);
}
}
+int erts_dist_entry_destructor(Binary *bin)
+{
+ DistEntry *dep = ErtsBin2DistEntry(bin);
+ erts_aint_t refc;
+
+ refc = de_refc_read(dep, -1);
+
+ if (refc == -1)
+ return 1; /* Allow deallocation of structure... */
+
+ if (node_tab_delete_delay == 0)
+ prepare_try_delete_dist_entry((void *) bin);
+ else if (node_tab_delete_delay > 0)
+ erts_start_timer_callback(node_tab_delete_delay,
+ prepare_try_delete_dist_entry,
+ (void *) bin);
+
+ return 0;
+}
+
Uint
erts_dist_table_size(void)
{
@@ -320,7 +484,7 @@ erts_dist_table_size(void)
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
- erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_rlock(&erts_dist_table_rwmtx);
#ifdef DEBUG
hash_get_info(&hi, &erts_dist_table);
ASSERT(dist_entries == hi.objs);
@@ -340,26 +504,25 @@ erts_dist_table_size(void)
ASSERT(dist_entries == (erts_no_of_visible_dist_entries
+ erts_no_of_hidden_dist_entries
- + erts_no_of_not_connected_dist_entries
- + 1 /* erts_this_dist_entry */));
+ + erts_no_of_not_connected_dist_entries));
#endif
res = (hash_table_sz(&erts_dist_table)
+ dist_entries*sizeof(DistEntry)
+ erts_dist_cache_size());
if (lock)
- erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_runlock(&erts_dist_table_rwmtx);
return res;
}
void
erts_set_dist_entry_not_connected(DistEntry *dep)
{
- ERTS_SMP_LC_ASSERT(erts_lc_is_de_rwlocked(dep));
- erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
+ ERTS_LC_ASSERT(erts_lc_is_de_rwlocked(dep));
+ erts_rwmtx_rwlock(&erts_dist_table_rwmtx);
ASSERT(dep != erts_this_dist_entry);
- ASSERT(is_internal_port(dep->cid));
+ ASSERT(is_internal_port(dep->cid) || is_internal_pid(dep->cid));
if(dep->flags & DFLAG_PUBLISHED) {
if(dep->prev) {
@@ -403,18 +566,18 @@ erts_set_dist_entry_not_connected(DistEntry *dep)
}
erts_not_connected_dist_entries = dep;
erts_no_of_not_connected_dist_entries++;
- erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_rwunlock(&erts_dist_table_rwmtx);
}
void
erts_set_dist_entry_connected(DistEntry *dep, Eterm cid, Uint flags)
{
- ERTS_SMP_LC_ASSERT(erts_lc_is_de_rwlocked(dep));
- erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
+ ERTS_LC_ASSERT(erts_lc_is_de_rwlocked(dep));
+ erts_rwmtx_rwlock(&erts_dist_table_rwmtx);
ASSERT(dep != erts_this_dist_entry);
ASSERT(is_nil(dep->cid));
- ASSERT(is_internal_port(cid));
+ ASSERT(is_internal_port(cid) || is_internal_pid(cid));
if(dep->prev) {
ASSERT(is_in_de_list(dep, erts_not_connected_dist_entries));
@@ -434,10 +597,19 @@ erts_set_dist_entry_connected(DistEntry *dep, Eterm cid, Uint flags)
dep->status |= ERTS_DE_SFLG_CONNECTED;
dep->flags = flags;
dep->cid = cid;
+ erts_atomic_set_nob(&dep->input_handler,
+ (erts_aint_t) cid);
+
dep->connection_id++;
dep->connection_id &= ERTS_DIST_EXT_CON_ID_MASK;
dep->prev = NULL;
+ erts_atomic64_set_nob(&dep->in, 0);
+ erts_atomic64_set_nob(&dep->out, 0);
+ erts_atomic32_set_nob(&dep->qflgs,
+ (is_internal_port(cid)
+ ? ERTS_DE_QFLG_PORT_CTRL
+ : ERTS_DE_QFLG_PROC_CTRL));
if(flags & DFLAG_PUBLISHED) {
dep->next = erts_visible_dist_entries;
if(erts_visible_dist_entries) {
@@ -456,7 +628,7 @@ erts_set_dist_entry_connected(DistEntry *dep, Eterm cid, Uint flags)
erts_hidden_dist_entries = dep;
erts_no_of_hidden_dist_entries++;
}
- erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_rwunlock(&erts_dist_table_rwmtx);
}
/* -- Node table --------------------------------------------------------- */
@@ -473,31 +645,7 @@ node_table_hash(void *venp)
Uint32 cre = ((ErlNode *) venp)->creation;
HashValue h = atom_tab(atom_val(((ErlNode *) venp)->sysname))->slot.bucket.hvalue;
- h *= PRIME0;
- h += cre & 0xff;
-
-#if MAX_CREATION >= (1 << 8)
- h *= PRIME1;
- h += (cre >> 8) & 0xff;
-#endif
-
-#if MAX_CREATION >= (1 << 16)
- h *= PRIME2;
- h += (cre >> 16) & 0xff;
-#endif
-
-#if MAX_CREATION >= (1 << 24)
- h *= PRIME3;
- h += (cre >> 24) & 0xff;
-#endif
-
-#if 0
-/* XXX Problems in older versions of GCC */
- #if MAX_CREATION >= (1UL << 32)
- #error "MAX_CREATION larger than size of expected creation storage (Uint32)"
- #endif
-#endif
- return h;
+ return (h + cre) * PRIME0;
}
static int
@@ -514,9 +662,6 @@ node_table_alloc(void *venp_tmpl)
{
ErlNode *enp;
- if(((ErlNode *) venp_tmpl) == erts_this_node)
- return venp_tmpl;
-
enp = (ErlNode *) erts_alloc(ERTS_ALC_T_NODE_ENTRY, sizeof(ErlNode));
node_entries++;
@@ -534,8 +679,7 @@ node_table_free(void *venp)
{
ErlNode *enp = (ErlNode *) venp;
- if(enp == erts_this_node)
- return;
+ ERTS_LC_ASSERT(enp != erts_this_node || erts_thr_progress_is_blocking());
erts_deref_dist_entry(enp->dist_entry);
#ifdef DEBUG
@@ -543,7 +687,7 @@ node_table_free(void *venp)
#endif
erts_free(ERTS_ALC_T_NODE_ENTRY, venp);
- ASSERT(node_entries > 1);
+ ASSERT(node_entries > 0);
node_entries--;
}
@@ -556,48 +700,48 @@ erts_node_table_size(void)
#endif
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
- erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
+ erts_rwmtx_rlock(&erts_node_table_rwmtx);
#ifdef DEBUG
hash_get_info(&hi, &erts_node_table);
ASSERT(node_entries == hi.objs);
#endif
res = hash_table_sz(&erts_node_table) + node_entries*sizeof(ErlNode);
if (lock)
- erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
+ erts_rwmtx_runlock(&erts_node_table_rwmtx);
return res;
}
void
-erts_node_table_info(int to, void *to_arg)
+erts_node_table_info(fmtfn_t to, void *to_arg)
{
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
- erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
+ erts_rwmtx_rlock(&erts_node_table_rwmtx);
hash_info(to, to_arg, &erts_node_table);
if (lock)
- erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
+ erts_rwmtx_runlock(&erts_node_table_rwmtx);
}
-ErlNode *erts_find_or_insert_node(Eterm sysname, Uint creation)
+ErlNode *erts_find_or_insert_node(Eterm sysname, Uint32 creation)
{
ErlNode *res;
ErlNode ne;
ne.sysname = sysname;
ne.creation = creation;
- erts_smp_rwmtx_rlock(&erts_node_table_rwmtx);
+ erts_rwmtx_rlock(&erts_node_table_rwmtx);
res = hash_get(&erts_node_table, (void *) &ne);
if (res && res != erts_this_node) {
erts_aint_t refc = erts_refc_inctest(&res->refc, 0);
if (refc < 2) /* New or pending delete */
erts_refc_inc(&res->refc, 1);
}
- erts_smp_rwmtx_runlock(&erts_node_table_rwmtx);
+ erts_rwmtx_runlock(&erts_node_table_rwmtx);
if (res)
return res;
- erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
+ erts_rwmtx_rwlock(&erts_node_table_rwmtx);
res = hash_put(&erts_node_table, (void *) &ne);
ASSERT(res);
if (res != erts_this_node) {
@@ -605,30 +749,55 @@ ErlNode *erts_find_or_insert_node(Eterm sysname, Uint creation)
if (refc < 2) /* New or pending delete */
erts_refc_inc(&res->refc, 1);
}
- erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
+ erts_rwmtx_rwunlock(&erts_node_table_rwmtx);
return res;
}
-void erts_delete_node(ErlNode *enp)
+static void try_delete_node(void *venp)
+{
+ ErlNode *enp = (ErlNode *) venp;
+ erts_aint_t refc;
+
+ erts_rwmtx_rwlock(&erts_node_table_rwmtx);
+ /*
+ * Another thread might have looked up this node after we
+ * decided to delete it (refc became zero). If so, the other
+ * thread incremented refc twice. Once for the new reference
+ * and once for this thread.
+ *
+ * If refc reach -1, no one has used the entry since we
+ * set up the timer. Delete the entry.
+ *
+ * If refc reach 0, the entry is currently not in use
+ * but has been used since we set up the timer. Set up a
+ * new timer.
+ *
+ * If refc > 0, the entry is in use. Keep the entry.
+ */
+ refc = erts_refc_dectest(&enp->refc, -1);
+ if (refc == -1)
+ (void) hash_erase(&erts_node_table, (void *) enp);
+ erts_rwmtx_rwunlock(&erts_node_table_rwmtx);
+
+ if (refc == 0)
+ erts_schedule_delete_node(enp);
+}
+
+void erts_schedule_delete_node(ErlNode *enp)
{
ASSERT(enp != erts_this_node);
- if(enp != erts_this_node) {
- erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
- /*
- * Another thread might have looked up this node after we
- * decided to delete it (refc became zero). If so, the other
- * thread incremented refc twice. Once for the new reference
- * and once for this thread. Therefore, delete node if refc
- * is 0 or -1 after a decrement.
- */
- if (erts_refc_dectest(&enp->refc, -1) <= 0)
- (void) hash_erase(&erts_node_table, (void *) enp);
- erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
+ if (enp != erts_this_node) {
+ if (node_tab_delete_delay == 0)
+ try_delete_node((void *) enp);
+ else if (node_tab_delete_delay > 0)
+ erts_start_timer_callback(node_tab_delete_delay,
+ try_delete_node,
+ (void *) enp);
}
}
struct pn_data {
- int to;
+ fmtfn_t to;
void *to_arg;
Eterm sysname;
int no_sysname;
@@ -651,14 +820,14 @@ static void print_node(void *venp, void *vpndp)
erts_print(pndp->to, pndp->to_arg, " %d", enp->creation);
#ifdef DEBUG
erts_print(pndp->to, pndp->to_arg, " (refc=%ld)",
- erts_refc_read(&enp->refc, 1));
+ erts_refc_read(&enp->refc, 0));
#endif
pndp->no_sysname++;
}
pndp->no_total++;
}
-void erts_print_node_info(int to,
+void erts_print_node_info(fmtfn_t to,
void *to_arg,
Eterm sysname,
int *no_sysname,
@@ -674,13 +843,13 @@ void erts_print_node_info(int to,
pnd.no_total = 0;
if (lock)
- erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
+ erts_rwmtx_rlock(&erts_node_table_rwmtx);
hash_foreach(&erts_node_table, print_node, (void *) &pnd);
if (pnd.no_sysname != 0) {
erts_print(to, to_arg, "\n");
}
if (lock)
- erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
+ erts_rwmtx_runlock(&erts_node_table_rwmtx);
if(no_sysname)
*no_sysname = pnd.no_sysname;
@@ -693,45 +862,71 @@ void erts_print_node_info(int to,
void
erts_set_this_node(Eterm sysname, Uint creation)
{
- erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
- erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
+ ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
+ ASSERT(2 <= de_refc_read(erts_this_dist_entry, 2));
- (void) hash_erase(&erts_dist_table, (void *) erts_this_dist_entry);
- erts_this_dist_entry->sysname = sysname;
- erts_this_dist_entry->creation = creation;
- (void) hash_put(&erts_dist_table, (void *) erts_this_dist_entry);
+ if (erts_refc_dectest(&erts_this_node->refc, 0) == 0)
+ try_delete_node(erts_this_node);
- (void) hash_erase(&erts_node_table, (void *) erts_this_node);
- erts_this_node->sysname = sysname;
- erts_this_node->creation = creation;
- erts_this_node_sysname = erts_this_node_sysname_BUFFER;
- erts_snprintf(erts_this_node_sysname, sizeof(erts_this_node_sysname),
- "%T", sysname);
- (void) hash_put(&erts_node_table, (void *) erts_this_node);
+ erts_deref_dist_entry(erts_this_dist_entry);
- erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
- erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
+ erts_this_node = NULL; /* to make sure refc is bumped for this node */
+ erts_this_node = erts_find_or_insert_node(sysname, creation);
+ erts_this_dist_entry = erts_this_node->dist_entry;
+ erts_ref_dist_entry(erts_this_dist_entry);
+
+ erts_this_node_sysname = erts_this_node_sysname_BUFFER;
+ erts_snprintf(erts_this_node_sysname, sizeof(erts_this_node_sysname_BUFFER),
+ "%T", sysname);
}
-void erts_init_node_tables(void)
+Uint
+erts_delayed_node_table_gc(void)
{
- erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
- HashFunctions f;
-
- rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
- rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
-
- f.hash = (H_FUN) dist_table_hash;
- f.cmp = (HCMP_FUN) dist_table_cmp;
- f.alloc = (HALLOC_FUN) dist_table_alloc;
- f.free = (HFREE_FUN) dist_table_free;
-
- erts_this_dist_entry = erts_alloc(ERTS_ALC_T_DIST_ENTRY, sizeof(DistEntry));
- dist_entries = 1;
+ if (node_tab_delete_delay < 0)
+ return (Uint) ERTS_NODE_TAB_DELAY_GC_INFINITY;
+ if (node_tab_delete_delay == 0)
+ return (Uint) 0;
+ return (Uint) ((node_tab_delete_delay-1)/1000 + 1);
+}
+void erts_init_node_tables(int dd_sec)
+{
+ erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
+ HashFunctions f;
+ ErlNode node_tmpl;
+
+ if (dd_sec == ERTS_NODE_TAB_DELAY_GC_INFINITY)
+ node_tab_delete_delay = (ErtsMonotonicTime) -1;
+ else
+ node_tab_delete_delay = ((ErtsMonotonicTime) dd_sec)*1000;
+
+ orig_node_tab_delete_delay = node_tab_delete_delay;
+
+ rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED;
+
+ erts_rwmtx_init_opt(&erts_node_table_rwmtx, &rwmtx_opt, "node_table", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+ erts_rwmtx_init_opt(&erts_dist_table_rwmtx, &rwmtx_opt, "dist_table", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+
+ f.hash = (H_FUN) dist_table_hash;
+ f.cmp = (HCMP_FUN) dist_table_cmp;
+ f.alloc = (HALLOC_FUN) dist_table_alloc;
+ f.free = (HFREE_FUN) dist_table_free;
+ f.meta_alloc = (HMALLOC_FUN) erts_alloc;
+ f.meta_free = (HMFREE_FUN) erts_free;
+ f.meta_print = (HMPRINT_FUN) erts_print;
hash_init(ERTS_ALC_T_DIST_TABLE, &erts_dist_table, "dist_table", 11, f);
+ f.hash = (H_FUN) node_table_hash;
+ f.cmp = (HCMP_FUN) node_table_cmp;
+ f.alloc = (HALLOC_FUN) node_table_alloc;
+ f.free = (HFREE_FUN) node_table_free;
+ hash_init(ERTS_ALC_T_NODE_TABLE, &erts_node_table, "node_table", 11, f);
+
erts_hidden_dist_entries = NULL;
erts_visible_dist_entries = NULL;
erts_not_connected_dist_entries = NULL;
@@ -739,83 +934,62 @@ void erts_init_node_tables(void)
erts_no_of_visible_dist_entries = 0;
erts_no_of_not_connected_dist_entries = 0;
- erts_this_dist_entry->next = NULL;
- erts_this_dist_entry->prev = NULL;
- erts_refc_init(&erts_this_dist_entry->refc, 1); /* erts_this_node */
-
- erts_smp_rwmtx_init_opt_x(&erts_this_dist_entry->rwmtx,
- &rwmtx_opt,
- "dist_entry",
- make_small(ERST_INTERNAL_CHANNEL_NO));
- erts_this_dist_entry->sysname = am_Noname;
- erts_this_dist_entry->cid = NIL;
- erts_this_dist_entry->connection_id = 0;
- erts_this_dist_entry->status = 0;
- erts_this_dist_entry->flags = 0;
- erts_this_dist_entry->version = 0;
-
- erts_smp_mtx_init_x(&erts_this_dist_entry->lnk_mtx,
- "dist_entry_links",
- make_small(ERST_INTERNAL_CHANNEL_NO));
- erts_this_dist_entry->node_links = NULL;
- erts_this_dist_entry->nlinks = NULL;
- erts_this_dist_entry->monitors = NULL;
-
- erts_smp_mtx_init_x(&erts_this_dist_entry->qlock,
- "dist_entry_out_queue",
- make_small(ERST_INTERNAL_CHANNEL_NO));
- erts_this_dist_entry->qflgs = 0;
- erts_this_dist_entry->qsize = 0;
- erts_this_dist_entry->out_queue.first = NULL;
- erts_this_dist_entry->out_queue.last = NULL;
- erts_this_dist_entry->suspended = NULL;
-
- erts_this_dist_entry->finalized_out_queue.first = NULL;
- erts_this_dist_entry->finalized_out_queue.last = NULL;
- erts_smp_atomic_init_nob(&erts_this_dist_entry->dist_cmd_scheduled, 0);
- erts_port_task_handle_init(&erts_this_dist_entry->dist_cmd);
- erts_this_dist_entry->send = NULL;
- erts_this_dist_entry->cache = NULL;
-
- (void) hash_put(&erts_dist_table, (void *) erts_this_dist_entry);
-
- f.hash = (H_FUN) node_table_hash;
- f.cmp = (HCMP_FUN) node_table_cmp;
- f.alloc = (HALLOC_FUN) node_table_alloc;
- f.free = (HFREE_FUN) node_table_free;
+ node_tmpl.sysname = am_Noname;
+ node_tmpl.creation = 0;
+ erts_this_node = hash_put(&erts_node_table, &node_tmpl);
+ /* +1 for erts_this_node */
+ erts_refc_init(&erts_this_node->refc, 1);
- hash_init(ERTS_ALC_T_NODE_TABLE, &erts_node_table, "node_table", 11, f);
+ ASSERT(erts_this_node->dist_entry != NULL);
+ erts_this_dist_entry = erts_this_node->dist_entry;
+ /* +1 for erts_this_dist_entry */
+ erts_ref_dist_entry(erts_this_dist_entry);
- erts_this_node = erts_alloc(ERTS_ALC_T_NODE_ENTRY, sizeof(ErlNode));
- node_entries = 1;
+ ASSERT(2 == de_refc_read(erts_this_dist_entry, 2));
- erts_refc_init(&erts_this_node->refc, 1); /* The system itself */
- erts_this_node->sysname = am_Noname;
- erts_this_node->creation = 0;
- erts_this_node->dist_entry = erts_this_dist_entry;
erts_this_node_sysname = erts_this_node_sysname_BUFFER;
- erts_snprintf(erts_this_node_sysname, sizeof(erts_this_node_sysname),
+ erts_snprintf(erts_this_node_sysname, sizeof(erts_this_node_sysname_BUFFER),
"%T", erts_this_node->sysname);
- (void) hash_put(&erts_node_table, (void *) erts_this_node);
-
- erts_smp_rwmtx_init_opt(&erts_node_table_rwmtx, &rwmtx_opt, "node_table");
- erts_smp_rwmtx_init_opt(&erts_dist_table_rwmtx, &rwmtx_opt, "dist_table");
-
references_atoms_need_init = 1;
}
-#ifdef ERTS_SMP
#ifdef ERTS_ENABLE_LOCK_CHECK
int erts_lc_is_de_rwlocked(DistEntry *dep)
{
- return erts_smp_lc_rwmtx_is_rwlocked(&dep->rwmtx);
+ return erts_lc_rwmtx_is_rwlocked(&dep->rwmtx);
}
int erts_lc_is_de_rlocked(DistEntry *dep)
{
- return erts_smp_lc_rwmtx_is_rlocked(&dep->rwmtx);
+ return erts_lc_rwmtx_is_rlocked(&dep->rwmtx);
}
#endif
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+
+static void erts_lcnt_enable_dist_lock_count(void *dep_raw, void *enable) {
+ DistEntry *dep = (DistEntry*)dep_raw;
+
+ if(enable) {
+ erts_lcnt_install_new_lock_info(&dep->rwmtx.lcnt, "dist_entry", dep->sysname,
+ ERTS_LOCK_TYPE_RWMUTEX | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+ erts_lcnt_install_new_lock_info(&dep->lnk_mtx.lcnt, "dist_entry_links", dep->sysname,
+ ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+ erts_lcnt_install_new_lock_info(&dep->qlock.lcnt, "dist_entry_out_queue", dep->sysname,
+ ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+ } else {
+ erts_lcnt_uninstall(&dep->rwmtx.lcnt);
+ erts_lcnt_uninstall(&dep->lnk_mtx.lcnt);
+ erts_lcnt_uninstall(&dep->qlock.lcnt);
+ }
+}
+
+void erts_lcnt_update_distribution_locks(int enable) {
+ erts_rwmtx_rlock(&erts_dist_table_rwmtx);
+ hash_foreach(&erts_dist_table, erts_lcnt_enable_dist_lock_count,
+ (void*)(UWord)enable);
+ erts_rwmtx_runlock(&erts_dist_table_rwmtx);
+}
#endif
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
@@ -847,16 +1021,19 @@ static Eterm AM_dist_references;
static Eterm AM_node_references;
static Eterm AM_system;
static Eterm AM_timer;
+static Eterm AM_delayed_delete_timer;
+static Eterm AM_thread_progress_delete_timer;
static void setup_reference_table(void);
-static Eterm reference_table_term(Uint **hpp, Uint *szp);
+static Eterm reference_table_term(Uint **hpp, ErlOffHeap *ohp, Uint *szp);
static void delete_reference_table(void);
-#if BIG_UINT_HEAP_SIZE > 3 /* 2-tuple */
-#define ID_HEAP_SIZE BIG_UINT_HEAP_SIZE
-#else
-#define ID_HEAP_SIZE 3 /* 2-tuple */
-#endif
+#undef ERTS_MAX__
+#define ERTS_MAX__(A, B) ((A) > (B) ? (A) : (B))
+
+#define ID_HEAP_SIZE \
+ ERTS_MAX__(ERTS_MAGIC_REF_THING_SIZE, \
+ ERTS_MAX__(BIG_UINT_HEAP_SIZE, 3))
typedef struct node_referrer_ {
struct node_referrer_ *next;
@@ -869,6 +1046,7 @@ typedef struct node_referrer_ {
int system_ref;
Eterm id;
Uint id_heap[ID_HEAP_SIZE];
+ ErlOffHeap off_heap;
} NodeReferrer;
typedef struct {
@@ -881,8 +1059,10 @@ typedef struct dist_referrer_ {
int heap_ref;
int node_ref;
int ctrl_ref;
+ int system_ref;
Eterm id;
Uint creation;
+ Uint id_heap[ID_HEAP_SIZE];
} DistReferrer;
typedef struct {
@@ -911,8 +1091,8 @@ erts_get_node_and_dist_references(struct process *proc)
Uint *endp;
#endif
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
/* No need to lock any thing since we are alone... */
if (references_atoms_need_init) {
@@ -931,6 +1111,8 @@ erts_get_node_and_dist_references(struct process *proc)
INIT_AM(node_references);
INIT_AM(timer);
INIT_AM(system);
+ INIT_AM(delayed_delete_timer);
+ INIT_AM(thread_progress_delete_timer);
references_atoms_need_init = 0;
}
@@ -938,7 +1120,7 @@ erts_get_node_and_dist_references(struct process *proc)
/* Get term size */
size = 0;
- (void) reference_table_term(NULL, &size);
+ (void) reference_table_term(NULL, NULL, &size);
hp = HAlloc(proc, size);
#ifdef DEBUG
@@ -947,14 +1129,14 @@ erts_get_node_and_dist_references(struct process *proc)
#endif
/* Write term */
- res = reference_table_term(&hp, NULL);
+ res = reference_table_term(&hp, &proc->off_heap, NULL);
ASSERT(endp == hp);
delete_reference_table();
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
return res;
}
@@ -988,17 +1170,25 @@ insert_dist_referrer(ReferredDist *referred_dist,
sizeof(DistReferrer));
drp->next = referred_dist->referrers;
referred_dist->referrers = drp;
- drp->id = id;
+ if(IS_CONST(id))
+ drp->id = id;
+ else {
+ Uint *hp = &drp->id_heap[0];
+ ASSERT(is_tuple(id));
+ drp->id = copy_struct(id, size_object(id), &hp, NULL);
+ }
drp->creation = creation;
drp->heap_ref = 0;
drp->node_ref = 0;
drp->ctrl_ref = 0;
+ drp->system_ref = 0;
}
switch (type) {
case NODE_REF: drp->node_ref++; break;
case CTRL_REF: drp->ctrl_ref++; break;
case HEAP_REF: drp->heap_ref++; break;
+ case SYSTEM_REF: drp->system_ref++; break;
default: ASSERT(0);
}
}
@@ -1017,7 +1207,7 @@ insert_dist_entry(DistEntry *dist, int type, Eterm id, Uint creation)
}
if(!rdp)
- erl_exit(1,
+ erts_exit(ERTS_ERROR_EXIT,
"Reference to non-existing distribution table entry found!\n");
insert_dist_referrer(rdp, type, id, creation);
@@ -1036,13 +1226,14 @@ insert_node_referrer(ReferredNode *referred_node, int type, Eterm id)
nrp = (NodeReferrer *) erts_alloc(ERTS_ALC_T_NC_TMP,
sizeof(NodeReferrer));
nrp->next = referred_node->referrers;
+ ERTS_INIT_OFF_HEAP(&nrp->off_heap);
referred_node->referrers = nrp;
if(IS_CONST(id))
nrp->id = id;
else {
Uint *hp = &nrp->id_heap[0];
- ASSERT(is_big(id) || is_tuple(id));
- nrp->id = copy_struct(id, size_object(id), &hp, NULL);
+ ASSERT(is_big(id) || is_tuple(id) || is_internal_magic_ref(id));
+ nrp->id = copy_struct(id, size_object(id), &hp, &nrp->off_heap);
}
nrp->heap_ref = 0;
nrp->link_ref = 0;
@@ -1078,7 +1269,7 @@ insert_node(ErlNode *node, int type, Eterm id)
}
if (!rnp)
- erl_exit(1, "Reference to non-existing node table entry found!\n");
+ erts_exit(ERTS_ERROR_EXIT, "Reference to non-existing node table entry found!\n");
insert_node_referrer(rnp, type, id);
}
@@ -1105,6 +1296,10 @@ insert_offheap2(ErlOffHeap *oh, void *arg)
insert_offheap(oh, a->type, a->id);
}
+#define ErtsIsDistEntryBinary(Bin) \
+ (((Bin)->intern.flags & BIN_FLAG_MAGIC) \
+ && ERTS_MAGIC_BIN_DESTRUCTOR((Bin)) == erts_dist_entry_destructor)
+
static void
insert_offheap(ErlOffHeap *oh, int type, Eterm id)
{
@@ -1114,48 +1309,36 @@ insert_offheap(ErlOffHeap *oh, int type, Eterm id)
for (u.hdr = oh->first; u.hdr; u.hdr = u.hdr->next) {
switch (thing_subtag(u.hdr->thing_word)) {
- case REFC_BINARY_SUBTAG:
- if(IsMatchProgBinary(u.pb->val)) {
+ case REF_SUBTAG:
+ if (ErtsIsDistEntryBinary(u.mref->mb))
+ insert_dist_entry(ErtsBin2DistEntry(u.mref->mb),
+ type, id, 0);
+ else if(IsMatchProgBinary(u.mref->mb)) {
InsertedBin *ib;
int insert_bin = 1;
for (ib = inserted_bins; ib; ib = ib->next)
- if(ib->bin_val == u.pb->val) {
+ if(ib->bin_val == (Binary *) u.mref->mb) {
insert_bin = 0;
break;
}
if (insert_bin) {
-#if HALFWORD_HEAP
- UWord val = (UWord) u.pb->val;
- DeclareTmpHeapNoproc(id_heap,BIG_UINT_HEAP_SIZE*2); /* extra place allocated */
-#else
DeclareTmpHeapNoproc(id_heap,BIG_UINT_HEAP_SIZE);
-#endif
Uint *hp = &id_heap[0];
InsertedBin *nib;
-#if HALFWORD_HEAP
- int actual_need = BIG_UWORD_HEAP_SIZE(val);
- ASSERT(actual_need <= (BIG_UINT_HEAP_SIZE*2));
- UseTmpHeapNoproc(actual_need);
- a.id = erts_bld_uword(&hp, NULL, (UWord) val);
-#else
UseTmpHeapNoproc(BIG_UINT_HEAP_SIZE);
- a.id = erts_bld_uint(&hp, NULL, (Uint) u.pb->val);
-#endif
- erts_match_prog_foreach_offheap(u.pb->val,
+ a.id = erts_bld_uint(&hp, NULL, (Uint) u.mref->mb);
+ erts_match_prog_foreach_offheap((Binary *) u.mref->mb,
insert_offheap2,
(void *) &a);
nib = erts_alloc(ERTS_ALC_T_NC_TMP, sizeof(InsertedBin));
- nib->bin_val = u.pb->val;
+ nib->bin_val = (Binary *) u.mref->mb;
nib->next = inserted_bins;
inserted_bins = nib;
-#if HALFWORD_HEAP
- UnUseTmpHeapNoproc(actual_need);
-#else
UnUseTmpHeapNoproc(BIG_UINT_HEAP_SIZE);
-#endif
}
}
break;
+ case REFC_BINARY_SUBTAG:
case FUN_SUBTAG:
break; /* No need to */
default:
@@ -1169,8 +1352,8 @@ insert_offheap(ErlOffHeap *oh, int type, Eterm id)
static void doit_insert_monitor(ErtsMonitor *monitor, void *p)
{
Eterm *idp = p;
- if(is_external(monitor->pid))
- insert_node(external_thing_ptr(monitor->pid)->node, MONITOR_REF, *idp);
+ if(monitor->type != MON_NIF_TARGET && is_external(monitor->u.pid))
+ insert_node(external_thing_ptr(monitor->u.pid)->node, MONITOR_REF, *idp);
if(is_external(monitor->ref))
insert_node(external_thing_ptr(monitor->ref)->node, MONITOR_REF, *idp);
}
@@ -1214,10 +1397,20 @@ insert_links2(ErtsLink *lnk, Eterm id)
static void
insert_ets_table(DbTable *tab, void *unused)
{
+ ErlOffHeap off_heap;
+ Eterm heap[ERTS_MAGIC_REF_THING_SIZE];
struct insert_offheap2_arg a;
a.type = ETS_REF;
- a.id = tab->common.id;
+ if (tab->common.status & DB_NAMED_TABLE)
+ a.id = tab->common.the_name;
+ else {
+ Eterm *hp = &heap[0];
+ ERTS_INIT_OFF_HEAP(&off_heap);
+ a.id = erts_mk_magic_ref(&hp, &off_heap, tab->common.btid);
+ }
erts_db_foreach_offheap(tab, insert_offheap2, (void *) &a);
+ if (is_not_atom(a.id))
+ erts_cleanup_offheap(&off_heap);
}
static void
@@ -1252,13 +1445,46 @@ init_referred_dist(void *dist, void *unused)
no_referred_dists++;
}
-#ifdef ERTS_SMP
static void
insert_sys_msg(Eterm from, Eterm to, Eterm msg, ErlHeapFragment *bp)
{
insert_offheap(&bp->off_heap, HEAP_REF, to);
}
-#endif
+
+static void
+insert_delayed_delete_node(void *state,
+ ErtsMonotonicTime timeout_pos,
+ void *vnp)
+{
+ Eterm heap[3];
+ insert_node((ErlNode *) vnp,
+ SYSTEM_REF,
+ TUPLE2(&heap[0], AM_system, AM_delayed_delete_timer));
+}
+
+static void
+insert_thr_prgr_delete_dist_entry(void *arg, ErtsThrPrgrVal thr_prgr, void *vbin)
+{
+ DistEntry *dep = ErtsBin2DistEntry(vbin);
+ Eterm heap[3];
+ insert_dist_entry(dep,
+ SYSTEM_REF,
+ TUPLE2(&heap[0], AM_system, AM_thread_progress_delete_timer),
+ 0);
+}
+
+static void
+insert_delayed_delete_dist_entry(void *state,
+ ErtsMonotonicTime timeout_pos,
+ void *vbin)
+{
+ DistEntry *dep = ErtsBin2DistEntry(vbin);
+ Eterm heap[3];
+ insert_dist_entry(dep,
+ SYSTEM_REF,
+ TUPLE2(&heap[0], AM_system, AM_delayed_delete_timer),
+ 0);
+}
static void
setup_reference_table(void)
@@ -1288,11 +1514,25 @@ setup_reference_table(void)
/* Go through the hole system, and build a table of all references
to ErlNode and DistEntry structures */
+ erts_debug_callback_timer_foreach(try_delete_node,
+ insert_delayed_delete_node,
+ NULL);
+ erts_debug_callback_timer_foreach(prepare_try_delete_dist_entry,
+ insert_delayed_delete_dist_entry,
+ NULL);
+ erts_debug_later_op_foreach(try_delete_dist_entry,
+ insert_thr_prgr_delete_dist_entry,
+ NULL);
+
UseTmpHeapNoproc(3);
insert_node(erts_this_node,
SYSTEM_REF,
TUPLE2(&heap[0], AM_system, am_undefined));
+ insert_dist_entry(erts_this_dist_entry,
+ SYSTEM_REF,
+ TUPLE2(&heap[0], AM_system, am_undefined),
+ erts_this_node->creation);
UnUseTmpHeapNoproc(3);
max = erts_ptab_max(&erts_proc);
@@ -1300,73 +1540,65 @@ setup_reference_table(void)
for (i = 0; i < max; i++) {
Process *proc = erts_pix2proc(i);
if (proc) {
- ErlMessage *msg;
+ int mli;
+ ErtsMessage *msg_list[] = {
+ proc->msg.first,
+ proc->msg_inq.first,
+ proc->msg_frag};
/* Insert Heap */
insert_offheap(&(proc->off_heap),
HEAP_REF,
proc->common.id);
- /* Insert message buffers */
+ /* Insert heap fragments buffers */
for(hfp = proc->mbuf; hfp; hfp = hfp->next)
insert_offheap(&(hfp->off_heap),
HEAP_REF,
proc->common.id);
- /* Insert msg msg buffers */
- for (msg = proc->msg.first; msg; msg = msg->next) {
- ErlHeapFragment *heap_frag = NULL;
- if (msg->data.attached) {
- if (is_value(ERL_MESSAGE_TERM(msg)))
- heap_frag = msg->data.heap_frag;
- else {
- if (msg->data.dist_ext->dep)
- insert_dist_entry(msg->data.dist_ext->dep,
- HEAP_REF, proc->common.id, 0);
- if (is_not_nil(ERL_MESSAGE_TOKEN(msg)))
- heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
+
+ /* Insert msg buffers */
+ for (mli = 0; mli < sizeof(msg_list)/sizeof(msg_list[0]); mli++) {
+ ErtsMessage *msg;
+ for (msg = msg_list[mli]; msg; msg = msg->next) {
+ ErlHeapFragment *heap_frag = NULL;
+ if (msg->data.attached) {
+ if (msg->data.attached == ERTS_MSG_COMBINED_HFRAG)
+ heap_frag = &msg->hfrag;
+ else if (is_value(ERL_MESSAGE_TERM(msg)))
+ heap_frag = msg->data.heap_frag;
+ else {
+ if (msg->data.dist_ext->dep)
+ insert_dist_entry(msg->data.dist_ext->dep,
+ HEAP_REF, proc->common.id, 0);
+ if (is_not_nil(ERL_MESSAGE_TOKEN(msg)))
+ heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
+ }
}
- }
- if (heap_frag)
- insert_offheap(&(heap_frag->off_heap),
- HEAP_REF,
- proc->common.id);
- }
-#ifdef ERTS_SMP
- for (msg = proc->msg_inq.first; msg; msg = msg->next) {
- ErlHeapFragment *heap_frag = NULL;
- if (msg->data.attached) {
- if (is_value(ERL_MESSAGE_TERM(msg)))
- heap_frag = msg->data.heap_frag;
- else {
- if (msg->data.dist_ext->dep)
- insert_dist_entry(msg->data.dist_ext->dep,
- HEAP_REF, proc->common.id, 0);
- if (is_not_nil(ERL_MESSAGE_TOKEN(msg)))
- heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
+ while (heap_frag) {
+ insert_offheap(&(heap_frag->off_heap),
+ HEAP_REF,
+ proc->common.id);
+ heap_frag = heap_frag->next;
}
}
- if (heap_frag)
- insert_offheap(&(heap_frag->off_heap),
- HEAP_REF,
- proc->common.id);
}
-#endif
/* Insert links */
if (ERTS_P_LINKS(proc))
insert_links(ERTS_P_LINKS(proc), proc->common.id);
if (ERTS_P_MONITORS(proc))
insert_monitors(ERTS_P_MONITORS(proc), proc->common.id);
- /* Insert controller */
- {
- DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(proc);
- if (dep)
- insert_dist_entry(dep, CTRL_REF, proc->common.id, 0);
- }
+ {
+ DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(proc);
+ if (dep)
+ insert_dist_entry(dep,
+ CTRL_REF,
+ proc->common.id,
+ 0);
+ }
}
}
-#ifdef ERTS_SMP
erts_foreach_sys_msg_in_q(insert_sys_msg);
-#endif
/* Insert all ports */
max = erts_ptab_max(&erts_port);
@@ -1469,7 +1701,7 @@ setup_reference_table(void)
erts_db_foreach_table(insert_ets_table, NULL);
/* Insert all bif timers */
- erts_bif_timer_foreach(insert_bif_timer, NULL);
+ erts_debug_bif_timer_foreach(insert_bif_timer, NULL);
/* Insert node table (references to dist) */
hash_foreach(&erts_node_table, insert_erl_node, NULL);
@@ -1495,7 +1727,7 @@ setup_reference_table(void)
*/
static Eterm
-reference_table_term(Uint **hpp, Uint *szp)
+reference_table_term(Uint **hpp, ErlOffHeap *ohp, Uint *szp)
{
#undef MK_2TUP
#undef MK_3TUP
@@ -1550,12 +1782,11 @@ reference_table_term(Uint **hpp, Uint *szp)
nrid = nrp->id;
if (!IS_CONST(nrp->id)) {
-
Uint nrid_sz = size_object(nrp->id);
if (szp)
*szp += nrid_sz;
if (hpp)
- nrid = copy_struct(nrp->id, nrid_sz, hpp, NULL);
+ nrid = copy_struct(nrp->id, nrid_sz, hpp, ohp);
}
if (is_internal_pid(nrid) || nrid == am_error_logger) {
@@ -1601,7 +1832,7 @@ reference_table_term(Uint **hpp, Uint *szp)
tup = MK_2TUP(referred_nodes[i].node->sysname,
MK_UINT(referred_nodes[i].node->creation));
- tup = MK_3TUP(tup, MK_UINT(erts_refc_read(&referred_nodes[i].node->refc, 1)), nril);
+ tup = MK_3TUP(tup, MK_UINT(erts_refc_read(&referred_nodes[i].node->refc, 0)), nril);
nl = MK_CONS(tup, nl);
}
@@ -1624,6 +1855,10 @@ reference_table_term(Uint **hpp, Uint *szp)
tup = MK_2TUP(AM_heap, MK_UINT(drp->heap_ref));
drl = MK_CONS(tup, drl);
}
+ if(drp->system_ref) {
+ tup = MK_2TUP(AM_system, MK_UINT(drp->system_ref));
+ drl = MK_CONS(tup, drl);
+ }
if (is_internal_pid(drp->id)) {
ASSERT(!drp->node_ref);
@@ -1633,6 +1868,14 @@ reference_table_term(Uint **hpp, Uint *szp)
ASSERT(drp->ctrl_ref && !drp->node_ref);
tup = MK_2TUP(AM_port, drp->id);
}
+ else if (is_tuple(drp->id)) {
+ Eterm *t;
+ ASSERT(drp->system_ref && !drp->node_ref
+ && !drp->ctrl_ref && !drp->heap_ref);
+ t = tuple_val(drp->id);
+ ASSERT(2 == arityval(t[0]));
+ tup = MK_2TUP(t[1], t[2]);
+ }
else {
ASSERT(!drp->ctrl_ref && drp->node_ref);
ASSERT(is_atom(drp->id));
@@ -1650,7 +1893,7 @@ reference_table_term(Uint **hpp, Uint *szp)
/* DistList = [{Dist, Refc, ReferenceIdList}] */
tup = MK_3TUP(referred_dists[i].dist->sysname,
- MK_UINT(erts_refc_read(&referred_dists[i].dist->refc, 1)),
+ MK_UINT(de_refc_read(referred_dists[i].dist, 0)),
dril);
dl = MK_CONS(tup, dl);
}
@@ -1678,6 +1921,7 @@ delete_reference_table(void)
NodeReferrer *tnrp;
nrp = referred_nodes[i].referrers;
while(nrp) {
+ erts_cleanup_offheap(&nrp->off_heap);
tnrp = nrp;
nrp = nrp->next;
erts_free(ERTS_ALC_T_NC_TMP, (void *) tnrp);
@@ -1705,3 +1949,15 @@ delete_reference_table(void)
}
}
+void
+erts_debug_test_node_tab_delayed_delete(Sint64 millisecs)
+{
+ erts_thr_progress_block();
+
+ if (millisecs < 0)
+ node_tab_delete_delay = orig_node_tab_delete_delay;
+ else
+ node_tab_delete_delay = millisecs;
+
+ erts_thr_progress_unblock();
+}
diff --git a/erts/emulator/beam/erl_node_tables.h b/erts/emulator/beam/erl_node_tables.h
index af60071ea5..ee8277b5ea 100644
--- a/erts/emulator/beam/erl_node_tables.h
+++ b/erts/emulator/beam/erl_node_tables.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2001-2012. All Rights Reserved.
+ * Copyright Ericsson AB 2001-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -40,12 +41,19 @@
#include "sys.h"
#include "hash.h"
+#include "erl_alloc.h"
#include "erl_process.h"
#include "erl_monitors.h"
-#include "erl_smp.h"
#define ERTS_PORT_TASK_ONLY_BASIC_TYPES__
#include "erl_port_task.h"
#undef ERTS_PORT_TASK_ONLY_BASIC_TYPES__
+#define ERTS_BINARY_TYPES_ONLY__
+#include "erl_binary.h"
+#undef ERTS_BINARY_TYPES_ONLY__
+
+#define ERTS_NODE_TAB_DELAY_GC_DEFAULT (60)
+#define ERTS_NODE_TAB_DELAY_GC_MAX (100*1000*1000)
+#define ERTS_NODE_TAB_DELAY_GC_INFINITY (ERTS_NODE_TAB_DELAY_GC_MAX+1)
#define ERST_INTERNAL_CHANNEL_NO 0
@@ -55,13 +63,19 @@
#define ERTS_DE_SFLGS_ALL (ERTS_DE_SFLG_CONNECTED \
| ERTS_DE_SFLG_EXITING)
-#define ERTS_DE_QFLG_BUSY (((Uint32) 1) << 0)
-#define ERTS_DE_QFLG_EXIT (((Uint32) 1) << 1)
+#define ERTS_DE_QFLG_BUSY (((erts_aint32_t) 1) << 0)
+#define ERTS_DE_QFLG_EXIT (((erts_aint32_t) 1) << 1)
+#define ERTS_DE_QFLG_REQ_INFO (((erts_aint32_t) 1) << 2)
+#define ERTS_DE_QFLG_PORT_CTRL (((erts_aint32_t) 1) << 3)
+#define ERTS_DE_QFLG_PROC_CTRL (((erts_aint32_t) 1) << 4)
#define ERTS_DE_QFLGS_ALL (ERTS_DE_QFLG_BUSY \
- | ERTS_DE_QFLG_EXIT)
+ | ERTS_DE_QFLG_EXIT \
+ | ERTS_DE_QFLG_REQ_INFO \
+ | ERTS_DE_QFLG_PORT_CTRL \
+ | ERTS_DE_QFLG_PROC_CTRL)
-#if defined(ARCH_64) && !HALFWORD_HEAP
+#if defined(ARCH_64)
#define ERTS_DIST_OUTPUT_BUF_DBG_PATTERN ((Uint) 0xf713f713f713f713UL)
#else
#define ERTS_DIST_OUTPUT_BUF_DBG_PATTERN ((Uint) 0xf713f713)
@@ -101,12 +115,13 @@ typedef struct dist_entry_ {
HashBucket hash_bucket; /* Hash bucket */
struct dist_entry_ *next; /* Next entry in dist_table (not sorted) */
struct dist_entry_ *prev; /* Previous entry in dist_table (not sorted) */
- erts_refc_t refc; /* Reference count */
- erts_smp_rwmtx_t rwmtx; /* Protects all fields below until lck_mtx. */
+ erts_rwmtx_t rwmtx; /* Protects all fields below until lck_mtx. */
Eterm sysname; /* name@host atom for efficiency */
Uint32 creation; /* creation of connected node */
- Eterm cid; /* connection handler (pid or port), NIL == free */
+ erts_atomic_t input_handler; /* Input handler */
+ Eterm cid; /* connection handler (pid or port),
+ NIL == free */
Uint32 connection_id; /* Connection id incremented on connect */
Uint32 status; /* Slot status, like exiting reserved etc */
Uint32 flags; /* Distribution flags, like hidden,
@@ -114,7 +129,7 @@ typedef struct dist_entry_ {
unsigned long version; /* Protocol version */
- erts_smp_mtx_t lnk_mtx; /* Protects node_links, nlinks, and
+ erts_mtx_t lnk_mtx; /* Protects node_links, nlinks, and
monitors. */
ErtsLink *node_links; /* In a dist entry, node links are kept
in a separate tree, while they are
@@ -126,19 +141,24 @@ typedef struct dist_entry_ {
ErtsLink *nlinks; /* Link tree with subtrees */
ErtsMonitor *monitors; /* Monitor tree */
- erts_smp_mtx_t qlock; /* Protects qflgs and out_queue */
- Uint32 qflgs;
- Sint qsize;
+ erts_mtx_t qlock; /* Protects qflgs and out_queue */
+ erts_atomic32_t qflgs;
+ erts_atomic_t qsize;
+ erts_atomic64_t in;
+ erts_atomic64_t out;
ErtsDistOutputQueue out_queue;
struct ErtsProcList_ *suspended;
+ ErtsDistOutputQueue tmp_out_queue;
ErtsDistOutputQueue finalized_out_queue;
- erts_smp_atomic_t dist_cmd_scheduled;
+ erts_atomic_t dist_cmd_scheduled;
ErtsPortTaskHandle dist_cmd;
Uint (*send)(Port *prt, ErtsDistOutputBuf *obuf);
struct cache* cache; /* The atom cache */
+
+ ErtsThrPrgrLaterOp later_op;
} DistEntry;
typedef struct erl_node_ {
@@ -152,8 +172,8 @@ typedef struct erl_node_ {
extern Hash erts_dist_table;
extern Hash erts_node_table;
-extern erts_smp_rwmtx_t erts_dist_table_rwmtx;
-extern erts_smp_rwmtx_t erts_node_table_rwmtx;
+extern erts_rwmtx_t erts_dist_table_rwmtx;
+extern erts_rwmtx_t erts_node_table_rwmtx;
extern DistEntry *erts_hidden_dist_entries;
extern DistEntry *erts_visible_dist_entries;
@@ -166,92 +186,91 @@ extern DistEntry *erts_this_dist_entry;
extern ErlNode *erts_this_node;
extern char *erts_this_node_sysname; /* must match erl_node_tables.c */
+Uint erts_delayed_node_table_gc(void);
DistEntry *erts_channel_no_to_dist_entry(Uint);
DistEntry *erts_sysname_to_connected_dist_entry(Eterm);
DistEntry *erts_find_or_insert_dist_entry(Eterm);
DistEntry *erts_find_dist_entry(Eterm);
-void erts_delete_dist_entry(DistEntry *);
+void erts_schedule_delete_dist_entry(DistEntry *);
Uint erts_dist_table_size(void);
-void erts_dist_table_info(int, void *);
+void erts_dist_table_info(fmtfn_t, void *);
void erts_set_dist_entry_not_connected(DistEntry *);
void erts_set_dist_entry_connected(DistEntry *, Eterm, Uint);
-ErlNode *erts_find_or_insert_node(Eterm, Uint);
-void erts_delete_node(ErlNode *);
+ErlNode *erts_find_or_insert_node(Eterm, Uint32);
+void erts_schedule_delete_node(ErlNode *);
void erts_set_this_node(Eterm, Uint);
Uint erts_node_table_size(void);
-void erts_init_node_tables(void);
-void erts_node_table_info(int, void *);
-void erts_print_node_info(int, void *, Eterm, int*, int*);
+void erts_init_node_tables(int);
+void erts_node_table_info(fmtfn_t, void *);
+void erts_print_node_info(fmtfn_t, void *, Eterm, int*, int*);
Eterm erts_get_node_and_dist_references(struct process *);
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
int erts_lc_is_de_rwlocked(DistEntry *);
int erts_lc_is_de_rlocked(DistEntry *);
#endif
+int erts_dist_entry_destructor(Binary *bin);
+DistEntry *erts_dhandle_to_dist_entry(Eterm dhandle);
+Eterm erts_make_dhandle(Process *c_p, DistEntry *dep);
+void erts_ref_dist_entry(DistEntry *dep);
+void erts_deref_dist_entry(DistEntry *dep);
-ERTS_GLB_INLINE void erts_deref_dist_entry(DistEntry *dep);
ERTS_GLB_INLINE void erts_deref_node_entry(ErlNode *np);
-ERTS_GLB_INLINE void erts_smp_de_rlock(DistEntry *dep);
-ERTS_GLB_INLINE void erts_smp_de_runlock(DistEntry *dep);
-ERTS_GLB_INLINE void erts_smp_de_rwlock(DistEntry *dep);
-ERTS_GLB_INLINE void erts_smp_de_rwunlock(DistEntry *dep);
-ERTS_GLB_INLINE void erts_smp_de_links_lock(DistEntry *dep);
-ERTS_GLB_INLINE void erts_smp_de_links_unlock(DistEntry *dep);
+ERTS_GLB_INLINE void erts_de_rlock(DistEntry *dep);
+ERTS_GLB_INLINE void erts_de_runlock(DistEntry *dep);
+ERTS_GLB_INLINE void erts_de_rwlock(DistEntry *dep);
+ERTS_GLB_INLINE void erts_de_rwunlock(DistEntry *dep);
+ERTS_GLB_INLINE void erts_de_links_lock(DistEntry *dep);
+ERTS_GLB_INLINE void erts_de_links_unlock(DistEntry *dep);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
-erts_deref_dist_entry(DistEntry *dep)
-{
- ASSERT(dep);
- if (erts_refc_dectest(&dep->refc, 0) == 0)
- erts_delete_dist_entry(dep);
-}
-
-ERTS_GLB_INLINE void
erts_deref_node_entry(ErlNode *np)
{
ASSERT(np);
if (erts_refc_dectest(&np->refc, 0) == 0)
- erts_delete_node(np);
+ erts_schedule_delete_node(np);
}
ERTS_GLB_INLINE void
-erts_smp_de_rlock(DistEntry *dep)
+erts_de_rlock(DistEntry *dep)
{
- erts_smp_rwmtx_rlock(&dep->rwmtx);
+ erts_rwmtx_rlock(&dep->rwmtx);
}
ERTS_GLB_INLINE void
-erts_smp_de_runlock(DistEntry *dep)
+erts_de_runlock(DistEntry *dep)
{
- erts_smp_rwmtx_runlock(&dep->rwmtx);
+ erts_rwmtx_runlock(&dep->rwmtx);
}
ERTS_GLB_INLINE void
-erts_smp_de_rwlock(DistEntry *dep)
+erts_de_rwlock(DistEntry *dep)
{
- erts_smp_rwmtx_rwlock(&dep->rwmtx);
+ erts_rwmtx_rwlock(&dep->rwmtx);
}
ERTS_GLB_INLINE void
-erts_smp_de_rwunlock(DistEntry *dep)
+erts_de_rwunlock(DistEntry *dep)
{
- erts_smp_rwmtx_rwunlock(&dep->rwmtx);
+ erts_rwmtx_rwunlock(&dep->rwmtx);
}
ERTS_GLB_INLINE void
-erts_smp_de_links_lock(DistEntry *dep)
+erts_de_links_lock(DistEntry *dep)
{
- erts_smp_mtx_lock(&dep->lnk_mtx);
+ erts_mtx_lock(&dep->lnk_mtx);
}
ERTS_GLB_INLINE void
-erts_smp_de_links_unlock(DistEntry *dep)
+erts_de_links_unlock(DistEntry *dep)
{
- erts_smp_mtx_unlock(&dep->lnk_mtx);
+ erts_mtx_unlock(&dep->lnk_mtx);
}
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+void erts_debug_test_node_tab_delayed_delete(Sint64 millisecs);
+void erts_lcnt_update_distribution_locks(int enable);
#endif
diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h
index ad3f104a68..9117eb1f72 100644
--- a/erts/emulator/beam/erl_port.h
+++ b/erts/emulator/beam/erl_port.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2012-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2012-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -30,6 +31,9 @@ typedef struct ErtsProc2PortSigData_ ErtsProc2PortSigData;
#include "erl_ptab.h"
#include "erl_thr_progress.h"
#include "erl_trace.h"
+#define ERTS_IO_QUEUE_TYPES_ONLY__
+#include "erl_io_queue.h"
+#undef ERTS_IO_QUEUE_TYPES_ONLY__
#ifndef __WIN32__
#define ERTS_DEFAULT_MAX_PORTS (1 << 16)
@@ -74,23 +78,8 @@ typedef struct erts_driver_t_ erts_driver_t;
#define ERTS_Port2ErlDrvPort(PH) ((ErlDrvPort) (PH))
#endif
-#define SMALL_IO_QUEUE 5 /* Number of fixed elements */
-
-typedef struct {
- ErlDrvSizeT size; /* total size in bytes */
-
- SysIOVec* v_start;
- SysIOVec* v_end;
- SysIOVec* v_head;
- SysIOVec* v_tail;
- SysIOVec v_small[SMALL_IO_QUEUE];
+typedef ErtsIOQueue ErlPortIOQueue;
- ErlDrvBinary** b_start;
- ErlDrvBinary** b_end;
- ErlDrvBinary** b_head;
- ErlDrvBinary** b_tail;
- ErlDrvBinary* b_small[SMALL_IO_QUEUE];
-} ErlIOQueue;
typedef struct line_buf { /* Buffer used in line oriented I/O */
ErlDrvSizeT bufsiz; /* Size of character buffer */
@@ -130,9 +119,7 @@ typedef struct {
void *data[ERTS_PRTSD_SIZE];
} ErtsPrtSD;
-#ifdef ERTS_SMP
typedef struct ErtsXPortsList_ ErtsXPortsList;
-#endif
/*
* Port locking:
@@ -157,21 +144,16 @@ struct _erl_drv_port {
ErtsPortTaskSched sched;
ErtsPortTaskHandle timeout_task;
-#ifdef ERTS_SMP
erts_mtx_t *lock;
ErtsXPortsList *xports;
- erts_smp_atomic_t run_queue;
-#else
- erts_atomic32_t refc;
- int cleanup;
-#endif
+ erts_atomic_t run_queue;
erts_atomic_t connected; /* A connected process */
Eterm caller; /* Current caller. */
- erts_smp_atomic_t data; /* Data associated with port. */
+ erts_atomic_t data; /* Data associated with port. */
Uint bytes_in; /* Number of bytes read */
Uint bytes_out; /* Number of bytes written */
- ErlIOQueue ioq; /* driver accessible i/o queue */
+ ErlPortIOQueue ioq; /* driver accessible i/o queue */
DistEntry *dist_entry; /* Dist entry used in DISTRIBUTION */
char *name; /* String used in the open */
erts_driver_t* drv_ptr;
@@ -184,8 +166,13 @@ struct _erl_drv_port {
int control_flags; /* Flags for port_control() */
ErlDrvPDL port_data_lock;
- ErtsPrtSD *psd; /* Port specific data */
+ erts_atomic_t psd; /* Port specific data */
int reds; /* Only used while executing driver callbacks */
+
+ struct {
+ Eterm to;
+ Uint32 ref[ERTS_MAX_REF_NUMBERS];
+ } *async_open_port; /* Reference used with async open port */
};
@@ -215,24 +202,20 @@ ERTS_GLB_INLINE ErtsRunQueue *erts_port_runq(Port *prt);
ERTS_GLB_INLINE ErtsRunQueue *
erts_port_runq(Port *prt)
{
-#ifdef ERTS_SMP
ErtsRunQueue *rq1, *rq2;
- rq1 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue);
+ rq1 = (ErtsRunQueue *) erts_atomic_read_nob(&prt->run_queue);
if (!rq1)
return NULL;
while (1) {
- erts_smp_runq_lock(rq1);
- rq2 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue);
+ erts_runq_lock(rq1);
+ rq2 = (ErtsRunQueue *) erts_atomic_read_nob(&prt->run_queue);
if (rq1 == rq2)
return rq1;
- erts_smp_runq_unlock(rq1);
+ erts_runq_unlock(rq1);
rq1 = rq2;
if (!rq1)
return NULL;
}
-#else
- return ERTS_RUNQ_IX(0);
-#endif
}
#endif
@@ -246,28 +229,54 @@ ERTS_GLB_INLINE void *erts_prtsd_set(Port *p, int ix, void *new);
ERTS_GLB_INLINE void *
erts_prtsd_get(Port *prt, int ix)
{
- return prt->psd ? prt->psd->data[ix] : NULL;
+ ErtsPrtSD *psd = (ErtsPrtSD *) erts_atomic_read_nob(&prt->psd);
+ if (!psd)
+ return NULL;
+ ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
+ return psd->data[ix];
}
ERTS_GLB_INLINE void *
erts_prtsd_set(Port *prt, int ix, void *data)
{
- if (prt->psd) {
- void *old = prt->psd->data[ix];
- prt->psd->data[ix] = data;
+ ErtsPrtSD *psd, *new_psd;
+ void *old;
+ int i;
+
+ psd = (ErtsPrtSD *) erts_atomic_read_nob(&prt->psd);
+
+ if (psd) {
+#ifdef ETHR_ORDERED_READ_DEPEND
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreStore);
+#endif
+ old = psd->data[ix];
+ psd->data[ix] = data;
return old;
}
- else {
- prt->psd = erts_alloc(ERTS_ALC_T_PRTSD, sizeof(ErtsPrtSD));
- prt->psd->data[ix] = data;
+
+ if (!data)
return NULL;
- }
+
+ new_psd = erts_alloc(ERTS_ALC_T_PRTSD, sizeof(ErtsPrtSD));
+ for (i = 0; i < ERTS_PRTSD_SIZE; i++)
+ new_psd->data[i] = NULL;
+ psd = (ErtsPrtSD *) erts_atomic_cmpxchg_mb(&prt->psd,
+ (erts_aint_t) new_psd,
+ (erts_aint_t) NULL);
+ if (psd)
+ erts_free(ERTS_ALC_T_PRTSD, new_psd);
+ else
+ psd = new_psd;
+ old = psd->data[ix];
+ psd->data[ix] = data;
+ return old;
}
#endif
-extern erts_smp_atomic_t erts_bytes_out; /* no bytes written out */
-extern erts_smp_atomic_t erts_bytes_in; /* no bytes sent into the system */
+Eterm erts_request_io_bytes(Process *c_p);
/* port status flags */
@@ -327,101 +336,80 @@ extern erts_smp_atomic_t erts_bytes_in; /* no bytes sent into the system */
#define ERTS_PORT_REDS_CONNECT (CONTEXT_REDS/200)
#define ERTS_PORT_REDS_UNLINK (CONTEXT_REDS/200)
#define ERTS_PORT_REDS_LINK (CONTEXT_REDS/200)
+#define ERTS_PORT_REDS_MONITOR (CONTEXT_REDS/200)
+#define ERTS_PORT_REDS_DEMONITOR (CONTEXT_REDS/200)
#define ERTS_PORT_REDS_BADSIG (CONTEXT_REDS/200)
#define ERTS_PORT_REDS_CONTROL (CONTEXT_REDS/100)
#define ERTS_PORT_REDS_CALL (CONTEXT_REDS/50)
#define ERTS_PORT_REDS_INFO (CONTEXT_REDS/100)
#define ERTS_PORT_REDS_TERMINATE (CONTEXT_REDS/50)
-void print_port_info(Port *, int, void *);
+void print_port_info(Port *, fmtfn_t, void *);
void erts_port_free(Port *);
-#ifndef ERTS_SMP
-void erts_port_cleanup(Port *);
-#endif
void erts_fire_port_monitor(Port *prt, Eterm ref);
-#ifdef ERTS_SMP
int erts_port_handle_xports(Port *);
-#endif
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
int erts_lc_is_port_locked(Port *);
#endif
ERTS_GLB_INLINE void erts_port_inc_refc(Port *prt);
ERTS_GLB_INLINE void erts_port_dec_refc(Port *prt);
ERTS_GLB_INLINE void erts_port_add_refc(Port *prt, Sint32 add_refc);
+ERTS_GLB_INLINE Sint erts_port_read_refc(Port *prt);
-ERTS_GLB_INLINE int erts_smp_port_trylock(Port *prt);
-ERTS_GLB_INLINE void erts_smp_port_lock(Port *prt);
-ERTS_GLB_INLINE void erts_smp_port_unlock(Port *prt);
+ERTS_GLB_INLINE int erts_port_trylock(Port *prt);
+ERTS_GLB_INLINE void erts_port_lock(Port *prt);
+ERTS_GLB_INLINE void erts_port_unlock(Port *prt);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void erts_port_inc_refc(Port *prt)
{
-#ifdef ERTS_SMP
- erts_ptab_inc_refc(&prt->common);
-#else
- erts_atomic32_inc_nob(&prt->refc);
-#endif
+ erts_ptab_atmc_inc_refc(&prt->common);
}
ERTS_GLB_INLINE void erts_port_dec_refc(Port *prt)
{
-#ifdef ERTS_SMP
- int referred = erts_ptab_dec_test_refc(&prt->common);
+ int referred = erts_ptab_atmc_dec_test_refc(&prt->common);
if (!referred)
erts_port_free(prt);
-#else
- int refc = erts_atomic32_dec_read_nob(&prt->refc);
- if (refc == 0)
- erts_port_free(prt);
-#endif
}
ERTS_GLB_INLINE void erts_port_add_refc(Port *prt, Sint32 add_refc)
{
-#ifdef ERTS_SMP
- int referred = erts_ptab_add_test_refc(&prt->common, add_refc);
+ int referred = erts_ptab_atmc_add_test_refc(&prt->common, add_refc);
if (!referred)
erts_port_free(prt);
-#else
- int refc = erts_atomic32_add_read_nob(&prt->refc, add_refc);
- if (refc == 0)
- erts_port_free(prt);
-#endif
+}
+
+ERTS_GLB_INLINE Sint erts_port_read_refc(Port *prt)
+{
+ return erts_ptab_atmc_read_refc(&prt->common);
}
ERTS_GLB_INLINE int
-erts_smp_port_trylock(Port *prt)
+erts_port_trylock(Port *prt)
{
-#ifdef ERTS_SMP
/* *Need* to be a managed thread */
- ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
+ ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread());
return erts_mtx_trylock(prt->lock);
-#else
- return 0;
-#endif
}
ERTS_GLB_INLINE void
-erts_smp_port_lock(Port *prt)
+erts_port_lock(Port *prt)
{
-#ifdef ERTS_SMP
/* *Need* to be a managed thread */
- ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
+ ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread());
erts_mtx_lock(prt->lock);
-#endif
}
ERTS_GLB_INLINE void
-erts_smp_port_unlock(Port *prt)
+erts_port_unlock(Port *prt)
{
-#ifdef ERTS_SMP
/* *Need* to be a managed thread */
- ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
+ ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread());
erts_mtx_unlock(prt->lock);
-#endif
}
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
@@ -452,9 +440,7 @@ extern const Port erts_invalid_port;
int erts_is_port_ioq_empty(Port *);
void erts_terminate_port(Port *);
-#ifdef ERTS_SMP
Port *erts_de2port(DistEntry *, Process *, ErtsProcLocks);
-#endif
ERTS_GLB_INLINE Port *erts_pix2port(int);
ERTS_GLB_INLINE Port *erts_port_lookup_raw(Eterm);
@@ -462,10 +448,9 @@ ERTS_GLB_INLINE Port *erts_port_lookup(Eterm, Uint32);
ERTS_GLB_INLINE Port*erts_id2port(Eterm id);
ERTS_GLB_INLINE Port *erts_id2port_sflgs(Eterm, Process *, ErtsProcLocks, Uint32);
ERTS_GLB_INLINE void erts_port_release(Port *);
-#ifdef ERTS_SMP
+ERTS_GLB_INLINE Port *erts_thr_port_lookup(Eterm id, Uint32 invalid_sflgs);
ERTS_GLB_INLINE Port *erts_thr_id2port_sflgs(Eterm id, Uint32 invalid_sflgs);
ERTS_GLB_INLINE void erts_thr_port_release(Port *prt);
-#endif
ERTS_GLB_INLINE Port *erts_thr_drvport2port(ErlDrvPort, int);
ERTS_GLB_INLINE Port *erts_drvport2port_state(ErlDrvPort, erts_aint32_t *);
ERTS_GLB_INLINE Eterm erts_drvport2id(ErlDrvPort);
@@ -491,7 +476,7 @@ erts_port_lookup_raw(Eterm id)
{
Port *prt;
- ERTS_SMP_LC_ASSERT(erts_thr_progress_lc_is_delaying());
+ ERTS_LC_ASSERT(erts_thr_progress_lc_is_delaying());
if (is_not_internal_port(id))
return NULL;
@@ -520,7 +505,7 @@ erts_id2port(Eterm id)
Port *prt;
/* Only allowed to be called from managed threads */
- ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
+ ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread());
if (is_not_internal_port(id))
return NULL;
@@ -531,10 +516,10 @@ erts_id2port(Eterm id)
if (!prt || prt->common.id != id)
return NULL;
- erts_smp_port_lock(prt);
+ erts_port_lock(prt);
state = erts_atomic32_read_nob(&prt->state);
if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP) {
- erts_smp_port_unlock(prt);
+ erts_port_unlock(prt);
return NULL;
}
@@ -547,14 +532,12 @@ erts_id2port_sflgs(Eterm id,
Process *c_p, ErtsProcLocks c_p_locks,
Uint32 invalid_sflgs)
{
-#ifdef ERTS_SMP
int no_proc_locks = !c_p || !c_p_locks;
-#endif
erts_aint32_t state;
Port *prt;
/* Only allowed to be called from managed threads */
- ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
+ ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread());
if (is_not_internal_port(id))
return NULL;
@@ -565,21 +548,17 @@ erts_id2port_sflgs(Eterm id,
if (!prt || prt->common.id != id)
return NULL;
-#ifdef ERTS_SMP
if (no_proc_locks)
- erts_smp_port_lock(prt);
- else if (erts_smp_port_trylock(prt) == EBUSY) {
+ erts_port_lock(prt);
+ else if (erts_port_trylock(prt) == EBUSY) {
/* Unlock process locks, and acquire locks in lock order... */
- erts_smp_proc_unlock(c_p, c_p_locks);
- erts_smp_port_lock(prt);
- erts_smp_proc_lock(c_p, c_p_locks);
+ erts_proc_unlock(c_p, c_p_locks);
+ erts_port_lock(prt);
+ erts_proc_lock(c_p, c_p_locks);
}
-#endif
state = erts_atomic32_read_nob(&prt->state);
if (state & invalid_sflgs) {
-#ifdef ERTS_SMP
- erts_smp_port_unlock(prt);
-#endif
+ erts_port_unlock(prt);
return NULL;
}
@@ -590,18 +569,48 @@ ERTS_GLB_INLINE void
erts_port_release(Port *prt)
{
/* Only allowed to be called from managed threads */
- ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
-#ifdef ERTS_SMP
- erts_smp_port_unlock(prt);
-#else
- if (prt->cleanup) {
- prt->cleanup = 0;
- erts_port_cleanup(prt);
- }
-#endif
+ ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread());
+ erts_port_unlock(prt);
}
-#ifdef ERTS_SMP
+/*
+ * erts_thr_id2port_sflgs() and erts_port_dec_refc(prt) can
+ * be used by unmanaged threads in the SMP case.
+ */
+ERTS_GLB_INLINE Port *
+erts_thr_port_lookup(Eterm id, Uint32 invalid_sflgs)
+{
+ Port *prt;
+ ErtsThrPrgrDelayHandle dhndl;
+
+ if (is_not_internal_port(id))
+ return NULL;
+
+ dhndl = erts_thr_progress_unmanaged_delay();
+
+ prt = (Port *) erts_ptab_pix2intptr_ddrb(&erts_port,
+ internal_port_index(id));
+
+ if (!prt || prt->common.id != id) {
+ erts_thr_progress_unmanaged_continue(dhndl);
+ return NULL;
+ }
+ else {
+ erts_aint32_t state;
+ erts_port_inc_refc(prt);
+
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
+ erts_thr_progress_unmanaged_continue(dhndl);
+
+ state = erts_atomic32_read_acqb(&prt->state);
+ if (state & invalid_sflgs) {
+ erts_port_dec_refc(prt);
+ return NULL;
+ }
+
+ return prt;
+ }
+}
/*
* erts_thr_id2port_sflgs() and erts_thr_port_release() can
@@ -649,13 +658,10 @@ ERTS_GLB_INLINE void
erts_thr_port_release(Port *prt)
{
erts_mtx_unlock(prt->lock);
-#ifdef ERTS_SMP
if (!erts_thr_progress_is_managed_thread())
erts_port_dec_refc(prt);
-#endif
}
-#endif
ERTS_GLB_INLINE Port *
erts_thr_drvport2port(ErlDrvPort drvport, int lock_pdl)
@@ -668,10 +674,10 @@ erts_thr_drvport2port(ErlDrvPort drvport, int lock_pdl)
if (lock_pdl && prt->port_data_lock)
driver_pdl_lock(prt->port_data_lock);
-#if ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_CHECK
if (!ERTS_IS_CRASH_DUMPING) {
if (erts_lc_is_emu_thr()) {
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
ERTS_LC_ASSERT(!prt->port_data_lock
|| erts_lc_mtx_is_locked(&prt->port_data_lock->mtx));
}
@@ -697,10 +703,10 @@ erts_drvport2port_state(ErlDrvPort drvport, erts_aint32_t *statep)
Port *prt = ERTS_ErlDrvPort2Port(drvport);
erts_aint32_t state;
ASSERT(prt);
- ERTS_LC_ASSERT(erts_lc_is_emu_thr());
+// ERTS_LC_ASSERT(erts_lc_is_emu_thr());
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return ERTS_INVALID_ERL_DRV_PORT;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)
|| ERTS_IS_CRASH_DUMPING);
/*
* This state check is only needed since a driver callback
@@ -757,23 +763,21 @@ erts_port_driver_callback_epilogue(Port *prt, erts_aint32_t *statep)
int reds = 0;
erts_aint32_t state;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
state = erts_atomic32_read_nob(&prt->state);
if ((state & ERTS_PORT_SFLG_CLOSING) && erts_is_port_ioq_empty(prt)) {
reds += ERTS_PORT_REDS_TERMINATE;
erts_terminate_port(prt);
state = erts_atomic32_read_nob(&prt->state);
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
}
-#ifdef ERTS_SMP
if (prt->xports) {
reds += erts_port_handle_xports(prt);
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
ASSERT(!prt->xports);
}
-#endif
if (statep)
*statep = state;
@@ -787,16 +791,20 @@ void erts_port_resume_procs(Port *);
struct binary;
-#define ERTS_P2P_SIG_TYPE_BAD 0
-#define ERTS_P2P_SIG_TYPE_OUTPUT 1
-#define ERTS_P2P_SIG_TYPE_OUTPUTV 2
-#define ERTS_P2P_SIG_TYPE_CONNECT 3
-#define ERTS_P2P_SIG_TYPE_EXIT 4
-#define ERTS_P2P_SIG_TYPE_CONTROL 5
-#define ERTS_P2P_SIG_TYPE_CALL 6
-#define ERTS_P2P_SIG_TYPE_INFO 7
-#define ERTS_P2P_SIG_TYPE_LINK 8
-#define ERTS_P2P_SIG_TYPE_UNLINK 9
+enum {
+ ERTS_P2P_SIG_TYPE_BAD = 0,
+ ERTS_P2P_SIG_TYPE_OUTPUT = 1,
+ ERTS_P2P_SIG_TYPE_OUTPUTV = 2,
+ ERTS_P2P_SIG_TYPE_CONNECT = 3,
+ ERTS_P2P_SIG_TYPE_EXIT = 4,
+ ERTS_P2P_SIG_TYPE_CONTROL = 5,
+ ERTS_P2P_SIG_TYPE_CALL = 6,
+ ERTS_P2P_SIG_TYPE_INFO = 7,
+ ERTS_P2P_SIG_TYPE_LINK = 8,
+ ERTS_P2P_SIG_TYPE_UNLINK = 9,
+ ERTS_P2P_SIG_TYPE_MONITOR = 10,
+ ERTS_P2P_SIG_TYPE_DEMONITOR = 11
+};
#define ERTS_P2P_SIG_TYPE_BITS 4
#define ERTS_P2P_SIG_TYPE_MASK \
@@ -858,6 +866,15 @@ struct ErtsProc2PortSigData_ {
struct {
Eterm from;
} unlink;
+ struct {
+ Eterm origin; /* who receives monitor event, pid */
+ Eterm name; /* either name for named monitor, or port id */
+ } monitor;
+ struct {
+ Eterm origin; /* who is at the other end of the monitor, pid */
+ Eterm name; /* port id */
+ Uint32 ref[ERTS_MAX_REF_NUMBERS]; /* box contents of a ref */
+ } demonitor;
} u;
} ;
@@ -910,17 +927,7 @@ typedef enum {
ERTS_PORT_OP_DONE
} ErtsPortOpResult;
-ErtsPortOpResult
-erts_schedule_proc2port_signal(Process *,
- Port *,
- Eterm,
- Eterm *,
- ErtsProc2PortSigData *,
- int,
- ErtsPortTaskHandle *,
- ErtsProc2PortSigCallback);
-
-int erts_deliver_port_exit(Port *, Eterm, Eterm, int);
+int erts_deliver_port_exit(Port *, Eterm, Eterm, int, int);
/*
* Port signal flags
@@ -954,4 +961,34 @@ ErtsPortOpResult erts_port_control(Process *, Port *, unsigned int, Eterm, Eterm
ErtsPortOpResult erts_port_call(Process *, Port *, unsigned int, Eterm, Eterm *);
ErtsPortOpResult erts_port_info(Process *, Port *, Eterm, Eterm *);
+/* Creates monitor between Origin and Target. Ref must be initialized to
+ * a reference (ref may be rewritten to be used to serve additionally as a
+ * signal id). Name is atom if user monitors port by name or NIL */
+ErtsPortOpResult erts_port_monitor(Process *origin, Port *target, Eterm name,
+ Eterm *ref);
+
+typedef enum {
+ /* Normal demonitor rules apply with locking and reductions bump */
+ ERTS_PORT_DEMONITOR_NORMAL = 1,
+ /* Relaxed demonitor rules when process is about to die, which means that
+ * pid lookup won't work, locks won't work, no reductions bump. */
+ ERTS_PORT_DEMONITOR_ORIGIN_ON_DEATHBED = 2,
+} ErtsDemonitorMode;
+
+/* Removes monitor between origin and target, identified by ref.
+ * origin_is_dying can be 0 (false, normal locking rules and reductions bump
+ * apply) or 1 (true, in case when we avoid origin locking) */
+ErtsPortOpResult erts_port_demonitor(Process *origin, ErtsDemonitorMode mode,
+ Port *target, Eterm ref,
+ Eterm *trap_ref);
+/* defined in erl_bif_port.c */
+Port *erts_sig_lookup_port(Process *c_p, Eterm id_or_name);
+
+int erts_port_output_async(Port *, Eterm, Eterm);
+
+/*
+ * Signals from ports to ports. Used by sys drivers.
+ */
+int erl_drv_port_control(Eterm, char, char*, ErlDrvSizeT);
+
#endif
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 31d9a1e26e..a588477320 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -32,7 +33,10 @@
#include "global.h"
#include "erl_port_task.h"
#include "dist.h"
+#include "erl_check_io.h"
#include "dtrace-wrapper.h"
+#include "lttng-wrapper.h"
+#include "erl_check_io.h"
#include <stdarg.h>
/*
@@ -67,8 +71,25 @@ static void chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_q
#else
#define DTRACE_DRIVER(PROBE_NAME, PP) do {} while(0)
#endif
+#ifdef USE_LTTNG_VM_TRACEPOINTS
+#define LTTNG_DRIVER(TRACEPOINT, PP) \
+ if (LTTNG_ENABLED(TRACEPOINT)) { \
+ lttng_decl_portbuf(port_str); \
+ lttng_decl_procbuf(proc_str); \
+ lttng_pid_to_str(ERTS_PORT_GET_CONNECTED(PP), proc_str); \
+ lttng_port_to_str((PP), port_str); \
+ LTTNG3(TRACEPOINT, proc_str, port_str, (PP)->name); \
+ }
+#else
+#define LTTNG_DRIVER(TRACEPOINT, PP) do {} while(0)
+#endif
-erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
+#define ERTS_LC_VERIFY_RQ(RQ, PP) \
+ do { \
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); \
+ ERTS_LC_ASSERT((RQ) == ((ErtsRunQueue *) \
+ erts_atomic_read_nob(&(PP)->run_queue))); \
+ } while (0)
#define ERTS_PT_STATE_SCHEDULED 0
#define ERTS_PT_STATE_ABORTED 1
@@ -77,7 +98,6 @@ erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
typedef union {
struct { /* I/O tasks */
ErlDrvEvent event;
- ErlDrvEventData event_data;
} io;
struct {
ErtsProc2PortSigCallback callback;
@@ -86,7 +106,7 @@ typedef union {
} ErtsPortTaskTypeData;
struct ErtsPortTask_ {
- erts_smp_atomic32_t state;
+ erts_atomic32_t state;
ErtsPortTaskType type;
union {
struct {
@@ -104,9 +124,7 @@ struct ErtsPortTaskHandleList_ {
ErtsPortTaskHandle handle;
union {
ErtsPortTaskHandleList *next;
-#ifdef ERTS_SMP
ErtsThrPrgrLaterOp release;
-#endif
} u;
};
@@ -129,35 +147,29 @@ static void begin_port_cleanup(Port *pp,
ErtsPortTask **execq,
int *processing_busy_q_p);
-ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(port_task,
- ErtsPortTask,
- 1000,
- ERTS_ALC_T_PORT_TASK)
+ERTS_THR_PREF_QUICK_ALLOC_IMPL(port_task,
+ ErtsPortTask,
+ 1000,
+ ERTS_ALC_T_PORT_TASK)
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(busy_caller_table,
ErtsPortTaskBusyCallerTable,
50,
ERTS_ALC_T_BUSY_CALLER_TAB)
-#ifdef ERTS_SMP
static void
call_port_task_free(void *vptp)
{
port_task_free((ErtsPortTask *) vptp);
}
-#endif
static ERTS_INLINE void
schedule_port_task_free(ErtsPortTask *ptp)
{
-#ifdef ERTS_SMP
erts_schedule_thr_prgr_later_cleanup_op(call_port_task_free,
(void *) ptp,
&ptp->u.release,
sizeof(ErtsPortTask));
-#else
- port_task_free(ptp);
-#endif
}
static ERTS_INLINE ErtsPortTask *
@@ -171,20 +183,44 @@ p2p_sig_data_to_task(ErtsProc2PortSigData *sigdp)
return ptp;
}
-ErtsProc2PortSigData *
-erts_port_task_alloc_p2p_sig_data(void)
+static ERTS_INLINE ErtsProc2PortSigData *
+p2p_sig_data_init(ErtsPortTask *ptp)
{
- ErtsPortTask *ptp = port_task_alloc();
ptp->type = ERTS_PORT_TASK_PROC_SIG;
ptp->u.alive.flags = ERTS_PT_FLG_SIG_DEP;
- erts_smp_atomic32_init_nob(&ptp->state, ERTS_PT_STATE_SCHEDULED);
+ erts_atomic32_init_nob(&ptp->state, ERTS_PT_STATE_SCHEDULED);
ASSERT(ptp == p2p_sig_data_to_task(&ptp->u.alive.td.psig.data));
return &ptp->u.alive.td.psig.data;
}
+ErtsProc2PortSigData *
+erts_port_task_alloc_p2p_sig_data(void)
+{
+ ErtsPortTask *ptp = port_task_alloc();
+
+ return p2p_sig_data_init(ptp);
+}
+
+ErtsProc2PortSigData *
+erts_port_task_alloc_p2p_sig_data_extra(size_t extra, void **extra_ptr)
+{
+ ErtsPortTask *ptp = erts_alloc(ERTS_ALC_T_PORT_TASK,
+ sizeof(ErtsPortTask) + extra);
+
+ *extra_ptr = ptp+1;
+
+ return p2p_sig_data_init(ptp);
+}
+
+void
+erts_port_task_free_p2p_sig_data(ErtsProc2PortSigData *sigdp)
+{
+ schedule_port_task_free(p2p_sig_data_to_task(sigdp));
+}
+
static ERTS_INLINE Eterm
task_caller(ErtsPortTask *ptp)
{
@@ -244,7 +280,7 @@ popped_from_busy_queue(Port *pp, ErtsPortTask *ptp, int last)
#ifdef DEBUG
erts_aint32_t flags =
#endif
- erts_smp_atomic32_read_band_nob(
+ erts_atomic32_read_band_nob(
&pp->sched.flags,
~ERTS_PTS_FLG_HAVE_BUSY_TASKS);
ASSERT(flags & ERTS_PTS_FLG_HAVE_BUSY_TASKS);
@@ -291,7 +327,7 @@ busy_wait_move_to_busy_queue(Port *pp, ErtsPortTask *ptp)
#ifdef DEBUG
flags =
#endif
- erts_smp_atomic32_read_bor_nob(&pp->sched.flags,
+ erts_atomic32_read_bor_nob(&pp->sched.flags,
ERTS_PTS_FLG_HAVE_BUSY_TASKS);
ASSERT(!(flags & ERTS_PTS_FLG_HAVE_BUSY_TASKS));
@@ -431,7 +467,7 @@ no_sig_dep_move_from_busyq(Port *pp)
int bix;
erts_aint32_t flags =
#endif
- erts_smp_atomic32_read_band_nob(
+ erts_atomic32_read_band_nob(
&pp->sched.flags,
~ERTS_PTS_FLG_HAVE_BUSY_TASKS);
ASSERT(flags & ERTS_PTS_FLG_HAVE_BUSY_TASKS);
@@ -464,11 +500,11 @@ chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_queue)
if (!first) {
ASSERT(!tabp);
ASSERT(!pp->sched.taskq.local.busy.last);
- ASSERT(!(erts_smp_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_HAVE_BUSY_TASKS));
+ ASSERT(!(erts_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_HAVE_BUSY_TASKS));
return;
}
- ASSERT(erts_smp_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_HAVE_BUSY_TASKS);
+ ASSERT(erts_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_HAVE_BUSY_TASKS);
ASSERT(tabp);
tot_count = 0;
@@ -524,13 +560,13 @@ chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_queue)
static ERTS_INLINE void
reset_port_task_handle(ErtsPortTaskHandle *pthp)
{
- erts_smp_atomic_set_relb(pthp, (erts_aint_t) NULL);
+ erts_atomic_set_relb(pthp, (erts_aint_t) NULL);
}
static ERTS_INLINE ErtsPortTask *
handle2task(ErtsPortTaskHandle *pthp)
{
- return (ErtsPortTask *) erts_smp_atomic_read_acqb(pthp);
+ return (ErtsPortTask *) erts_atomic_read_acqb(pthp);
}
static ERTS_INLINE void
@@ -543,11 +579,22 @@ reset_handle(ErtsPortTask *ptp)
}
static ERTS_INLINE void
+reset_executed_io_task_handle(ErtsPortTask *ptp)
+{
+ if (ptp->u.alive.handle) {
+ ASSERT(ptp == handle2task(ptp->u.alive.handle));
+ /* The port task handle is reset inside task_executed */
+ erts_io_notify_port_task_executed(ptp->type, ptp->u.alive.handle,
+ reset_port_task_handle);
+ }
+}
+
+static ERTS_INLINE void
set_handle(ErtsPortTask *ptp, ErtsPortTaskHandle *pthp)
{
ptp->u.alive.handle = pthp;
if (pthp) {
- erts_smp_atomic_set_relb(pthp, (erts_aint_t) ptp);
+ erts_atomic_set_relb(pthp, (erts_aint_t) ptp);
ASSERT(ptp == handle2task(ptp->u.alive.handle));
}
}
@@ -561,7 +608,7 @@ set_tmp_handle(ErtsPortTask *ptp, ErtsPortTaskHandle *pthp)
* IMPORTANT! Task either need to be aborted, or task handle
* need to be detached before thread progress has been made.
*/
- erts_smp_atomic_set_relb(pthp, (erts_aint_t) ptp);
+ erts_atomic_set_relb(pthp, (erts_aint_t) ptp);
}
}
@@ -579,20 +626,20 @@ check_unset_busy_port_q(Port *pp,
int resume_procs = 0;
ASSERT(bpq);
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(pp));
erts_port_task_sched_lock(&pp->sched);
- qsize = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->size);
- low = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low);
+ qsize = (ErlDrvSizeT) erts_atomic_read_nob(&bpq->size);
+ low = (ErlDrvSizeT) erts_atomic_read_nob(&bpq->low);
if (qsize < low) {
erts_aint32_t mask = ~(ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q
| ERTS_PTS_FLG_BUSY_PORT_Q);
- flags = erts_smp_atomic32_read_band_relb(&pp->sched.flags, mask);
+ flags = erts_atomic32_read_band_relb(&pp->sched.flags, mask);
if ((flags & ERTS_PTS_FLGS_BUSY) == ERTS_PTS_FLG_BUSY_PORT_Q)
resume_procs = 1;
}
else if (flags & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q) {
- flags = erts_smp_atomic32_read_band_relb(&pp->sched.flags,
+ flags = erts_atomic32_read_band_relb(&pp->sched.flags,
~ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q);
flags &= ~ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q;
}
@@ -617,16 +664,16 @@ aborted_proc2port_data(Port *pp, ErlDrvSizeT size)
bpq = pp->sched.taskq.bpq;
- qsz = (ErlDrvSizeT) erts_smp_atomic_add_read_acqb(&bpq->size,
+ qsz = (ErlDrvSizeT) erts_atomic_add_read_acqb(&bpq->size,
(erts_aint_t) -size);
ASSERT(qsz + size > qsz);
- flags = erts_smp_atomic32_read_nob(&pp->sched.flags);
+ flags = erts_atomic32_read_nob(&pp->sched.flags);
ASSERT(pp->sched.taskq.bpq);
if ((flags & (ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q
| ERTS_PTS_FLG_BUSY_PORT_Q)) != ERTS_PTS_FLG_BUSY_PORT_Q)
return;
- if (qsz < (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low))
- erts_smp_atomic32_read_bor_nob(&pp->sched.flags,
+ if (qsz < (ErlDrvSizeT) erts_atomic_read_nob(&bpq->low))
+ erts_atomic32_read_bor_nob(&pp->sched.flags,
ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q);
}
@@ -644,13 +691,13 @@ dequeued_proc2port_data(Port *pp, ErlDrvSizeT size)
bpq = pp->sched.taskq.bpq;
- qsz = (ErlDrvSizeT) erts_smp_atomic_add_read_acqb(&bpq->size,
+ qsz = (ErlDrvSizeT) erts_atomic_add_read_acqb(&bpq->size,
(erts_aint_t) -size);
ASSERT(qsz + size > qsz);
- flags = erts_smp_atomic32_read_nob(&pp->sched.flags);
+ flags = erts_atomic32_read_nob(&pp->sched.flags);
if (!(flags & ERTS_PTS_FLG_BUSY_PORT_Q))
return;
- if (qsz < (ErlDrvSizeT) erts_smp_atomic_read_acqb(&bpq->low))
+ if (qsz < (ErlDrvSizeT) erts_atomic_read_acqb(&bpq->low))
check_unset_busy_port_q(pp, flags, bpq);
}
@@ -663,19 +710,19 @@ enqueue_proc2port_data(Port *pp,
if (sigdp && bpq) {
ErlDrvSizeT size = erts_proc2port_sig_command_data_size(sigdp);
if (size) {
- erts_aint_t asize = erts_smp_atomic_add_read_acqb(&bpq->size,
+ erts_aint_t asize = erts_atomic_add_read_acqb(&bpq->size,
(erts_aint_t) size);
ErlDrvSizeT qsz = (ErlDrvSizeT) asize;
ASSERT(qsz - size < qsz);
if (!(flags & ERTS_PTS_FLG_BUSY_PORT_Q) && qsz > bpq->high) {
- flags = erts_smp_atomic32_read_bor_acqb(&pp->sched.flags,
+ flags = erts_atomic32_read_bor_acqb(&pp->sched.flags,
ERTS_PTS_FLG_BUSY_PORT_Q);
flags |= ERTS_PTS_FLG_BUSY_PORT_Q;
- qsz = (ErlDrvSizeT) erts_smp_atomic_read_acqb(&bpq->size);
- if (qsz < (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low)) {
- flags = (erts_smp_atomic32_read_bor_relb(
+ qsz = (ErlDrvSizeT) erts_atomic_read_acqb(&bpq->size);
+ if (qsz < (ErlDrvSizeT) erts_atomic_read_nob(&bpq->low)) {
+ flags = (erts_atomic32_read_bor_relb(
&pp->sched.flags,
ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q));
flags |= ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q;
@@ -723,18 +770,18 @@ erl_drv_busy_msgq_limits(ErlDrvPort dport, ErlDrvSizeT *lowp, ErlDrvSizeT *highp
erts_aint32_t flags;
pp->sched.taskq.bpq = NULL;
flags = ~(ERTS_PTS_FLG_BUSY_PORT_Q|ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q);
- flags = erts_smp_atomic32_read_band_acqb(&pp->sched.flags, flags);
+ flags = erts_atomic32_read_band_acqb(&pp->sched.flags, flags);
if ((flags & ERTS_PTS_FLGS_BUSY) == ERTS_PTS_FLG_BUSY_PORT_Q)
resume_procs = 1;
}
else {
if (!low)
- low = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low);
+ low = (ErlDrvSizeT) erts_atomic_read_nob(&bpq->low);
else {
if (bpq->high < low)
bpq->high = low;
- erts_smp_atomic_set_relb(&bpq->low, (erts_aint_t) low);
+ erts_atomic_set_relb(&bpq->low, (erts_aint_t) low);
written = 1;
}
@@ -743,19 +790,19 @@ erl_drv_busy_msgq_limits(ErlDrvPort dport, ErlDrvSizeT *lowp, ErlDrvSizeT *highp
else {
if (low > high) {
low = high;
- erts_smp_atomic_set_relb(&bpq->low, (erts_aint_t) low);
+ erts_atomic_set_relb(&bpq->low, (erts_aint_t) low);
}
bpq->high = high;
written = 1;
}
if (written) {
- ErlDrvSizeT size = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->size);
+ ErlDrvSizeT size = (ErlDrvSizeT) erts_atomic_read_nob(&bpq->size);
if (size > high)
- erts_smp_atomic32_read_bor_relb(&pp->sched.flags,
+ erts_atomic32_read_bor_relb(&pp->sched.flags,
ERTS_PTS_FLG_BUSY_PORT_Q);
else if (size < low)
- erts_smp_atomic32_read_bor_relb(&pp->sched.flags,
+ erts_atomic32_read_bor_relb(&pp->sched.flags,
ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q);
}
}
@@ -774,56 +821,62 @@ erl_drv_busy_msgq_limits(ErlDrvPort dport, ErlDrvSizeT *lowp, ErlDrvSizeT *highp
* No-suspend handles.
*/
-#ifdef ERTS_SMP
static void
free_port_task_handle_list(void *vpthlp)
{
erts_free(ERTS_ALC_T_PT_HNDL_LIST, vpthlp);
}
-#endif
static void
schedule_port_task_handle_list_free(ErtsPortTaskHandleList *pthlp)
{
-#ifdef ERTS_SMP
erts_schedule_thr_prgr_later_cleanup_op(free_port_task_handle_list,
(void *) pthlp,
&pthlp->u.release,
sizeof(ErtsPortTaskHandleList));
-#else
- erts_free(ERTS_ALC_T_PT_HNDL_LIST, pthlp);
-#endif
}
static ERTS_INLINE void
-abort_nosuspend_task(Port *pp,
- ErtsPortTaskType type,
- ErtsPortTaskTypeData *tdp)
+abort_signal_task(Port *pp,
+ int abort_type,
+ ErtsPortTaskType type,
+ ErtsPortTaskTypeData *tdp,
+ int bpq_data)
{
ASSERT(type == ERTS_PORT_TASK_PROC_SIG);
- if (!pp->sched.taskq.bpq)
+ if (!bpq_data)
tdp->psig.callback(NULL,
ERTS_PORT_SFLG_INVALID,
- ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND,
+ abort_type,
&tdp->psig.data);
else {
ErlDrvSizeT size = erts_proc2port_sig_command_data_size(&tdp->psig.data);
tdp->psig.callback(NULL,
ERTS_PORT_SFLG_INVALID,
- ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND,
+ abort_type,
&tdp->psig.data);
aborted_proc2port_data(pp, size);
}
}
+
+static ERTS_INLINE void
+abort_nosuspend_task(Port *pp,
+ ErtsPortTaskType type,
+ ErtsPortTaskTypeData *tdp,
+ int bpq_data)
+{
+ abort_signal_task(pp, ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND, type, tdp, bpq_data);
+}
+
static ErtsPortTaskHandleList *
get_free_nosuspend_handles(Port *pp)
{
ErtsPortTaskHandleList *nshp, *last_nshp = NULL;
- ERTS_SMP_LC_ASSERT(erts_port_task_sched_lock_is_locked(&pp->sched));
+ ERTS_LC_ASSERT(erts_port_task_sched_lock_is_locked(&pp->sched));
nshp = pp->sched.taskq.local.busy.nosuspend;
@@ -839,7 +892,7 @@ get_free_nosuspend_handles(Port *pp)
pp->sched.taskq.local.busy.nosuspend = last_nshp->u.next;
last_nshp->u.next = NULL;
if (!pp->sched.taskq.local.busy.nosuspend)
- erts_smp_atomic32_read_band_nob(&pp->sched.flags,
+ erts_atomic32_read_band_nob(&pp->sched.flags,
~ERTS_PTS_FLG_HAVE_NS_TASKS);
}
return nshp;
@@ -862,7 +915,7 @@ free_nosuspend_handles(ErtsPortTaskHandleList *free_nshp)
static ERTS_INLINE void
enqueue_port(ErtsRunQueue *runq, Port *pp)
{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq));
pp->sched.next = NULL;
if (runq->ports.end) {
ASSERT(runq->ports.start);
@@ -876,19 +929,17 @@ enqueue_port(ErtsRunQueue *runq, Port *pp)
runq->ports.end = pp;
ASSERT(runq->ports.start && runq->ports.end);
- erts_smp_inc_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL);
+ erts_inc_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL);
-#ifdef ERTS_SMP
- if (runq->halt_in_progress)
+ if (ERTS_RUNQ_FLGS_GET_NOB(runq) & ERTS_RUNQ_FLG_HALTING)
erts_non_empty_runq(runq);
-#endif
}
static ERTS_INLINE Port *
pop_port(ErtsRunQueue *runq)
{
Port *pp = runq->ports.start;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq));
if (!pp) {
ASSERT(!runq->ports.end);
}
@@ -898,7 +949,7 @@ pop_port(ErtsRunQueue *runq)
ASSERT(runq->ports.end == pp);
runq->ports.end = NULL;
}
- erts_smp_dec_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL);
+ erts_dec_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL);
}
ASSERT(runq->ports.start || !runq->ports.end);
@@ -925,7 +976,7 @@ enqueue_task(Port *pp,
if (ns_pthlp)
fail_flags |= ERTS_PTS_FLG_BUSY_PORT;
erts_port_task_sched_lock(&pp->sched);
- flags = erts_smp_atomic32_read_nob(&pp->sched.flags);
+ flags = erts_atomic32_read_nob(&pp->sched.flags);
if (flags & fail_flags)
res = 0;
else {
@@ -956,7 +1007,7 @@ enqueue_task(Port *pp,
static ERTS_INLINE void
prepare_exec(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
{
- erts_aint32_t act = erts_smp_atomic32_read_nob(&pp->sched.flags);
+ erts_aint32_t act = erts_atomic32_read_nob(&pp->sched.flags);
if (!pp->sched.taskq.local.busy.first || (act & ERTS_PTS_FLG_BUSY_PORT)) {
*execqp = pp->sched.taskq.local.first;
@@ -977,7 +1028,7 @@ prepare_exec(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
new &= ~ERTS_PTS_FLG_IN_RUNQ;
new |= ERTS_PTS_FLG_EXEC;
- act = erts_smp_atomic32_cmpxchg_nob(&pp->sched.flags, new, exp);
+ act = erts_atomic32_cmpxchg_nob(&pp->sched.flags, new, exp);
ASSERT(act & ERTS_PTS_FLG_IN_RUNQ);
@@ -991,6 +1042,7 @@ static ERTS_INLINE int
finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q)
{
erts_aint32_t act;
+ unsigned int prof_runnable_ports;
if (!processing_busy_q)
pp->sched.taskq.local.first = *execq;
@@ -1003,10 +1055,14 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q)
*execq = NULL;
- act = erts_smp_atomic32_read_nob(&pp->sched.flags);
+ act = erts_atomic32_read_nob(&pp->sched.flags);
if (act & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q)
act = check_unset_busy_port_q(pp, act, pp->sched.taskq.bpq);
+ prof_runnable_ports = erts_system_profile_flags.runnable_ports;
+ if (prof_runnable_ports)
+ erts_port_task_sched_lock(&pp->sched);
+
while (1) {
erts_aint32_t new, exp;
@@ -1016,21 +1072,33 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q)
if (act & ERTS_PTS_FLG_HAVE_TASKS)
new |= ERTS_PTS_FLG_IN_RUNQ;
- act = erts_smp_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp);
+ act = erts_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp);
- ASSERT(!(act & ERTS_PTS_FLG_IN_RUNQ));
+ ERTS_LC_ASSERT(!(act & ERTS_PTS_FLG_IN_RUNQ));
+ ERTS_LC_ASSERT(!(act & ERTS_PTS_FLG_EXEC_IMM));
if (exp == act)
break;
}
+ if (prof_runnable_ports | IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
+ /* trace port scheduling, out */
+ if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS))
+ trace_sched_ports(pp, am_out);
+ if (prof_runnable_ports) {
+ if (!(act & (ERTS_PTS_FLG_EXEC_IMM|ERTS_PTS_FLG_HAVE_TASKS)))
+ profile_runnable_port(pp, am_inactive);
+ erts_port_task_sched_unlock(&pp->sched);
+ }
+ }
+
return (act & ERTS_PTS_FLG_HAVE_TASKS) != 0;
}
static ERTS_INLINE erts_aint32_t
select_queue_for_exec(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
{
- erts_aint32_t flags = erts_smp_atomic32_read_nob(&pp->sched.flags);
+ erts_aint32_t flags = erts_atomic32_read_nob(&pp->sched.flags);
if (flags & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q)
flags = check_unset_busy_port_q(pp, flags, pp->sched.taskq.bpq);
@@ -1140,7 +1208,7 @@ fetch_in_queue(Port *pp, ErtsPortTask **execqp)
if (ptp)
*execqp = ptp->u.alive.next;
else
- erts_smp_atomic32_read_band_nob(&pp->sched.flags,
+ erts_atomic32_read_band_nob(&pp->sched.flags,
~ERTS_PTS_FLG_HAVE_TASKS);
@@ -1203,7 +1271,7 @@ erl_drv_consume_timeslice(ErlDrvPort dprt, int percent)
void
erts_port_task_tmp_handle_detach(ErtsPortTaskHandle *pthp)
{
- ERTS_SMP_LC_ASSERT(erts_thr_progress_lc_is_delaying());
+ ERTS_LC_ASSERT(erts_thr_progress_lc_is_delaying());
reset_port_task_handle(pthp);
}
@@ -1216,9 +1284,7 @@ erts_port_task_abort(ErtsPortTaskHandle *pthp)
{
int res;
ErtsPortTask *ptp;
-#ifdef ERTS_SMP
ErtsThrPrgrDelayHandle dhndl = erts_thr_progress_unmanaged_delay();
-#endif
ptp = handle2task(pthp);
if (!ptp)
@@ -1228,41 +1294,25 @@ erts_port_task_abort(ErtsPortTaskHandle *pthp)
#ifdef DEBUG
ErtsPortTaskHandle *saved_pthp = ptp->u.alive.handle;
- ERTS_SMP_READ_MEMORY_BARRIER;
- old_state = erts_smp_atomic32_read_nob(&ptp->state);
+ ERTS_THR_READ_MEMORY_BARRIER;
+ old_state = erts_atomic32_read_nob(&ptp->state);
if (old_state == ERTS_PT_STATE_SCHEDULED) {
ASSERT(!saved_pthp || saved_pthp == pthp);
}
#endif
- old_state = erts_smp_atomic32_cmpxchg_nob(&ptp->state,
+ old_state = erts_atomic32_cmpxchg_nob(&ptp->state,
ERTS_PT_STATE_ABORTED,
ERTS_PT_STATE_SCHEDULED);
if (old_state != ERTS_PT_STATE_SCHEDULED)
res = - 1; /* Task already aborted, executing, or executed */
else {
-
reset_port_task_handle(pthp);
-
- switch (ptp->type) {
- case ERTS_PORT_TASK_INPUT:
- case ERTS_PORT_TASK_OUTPUT:
- case ERTS_PORT_TASK_EVENT:
- ASSERT(erts_smp_atomic_read_nob(
- &erts_port_task_outstanding_io_tasks) > 0);
- erts_smp_atomic_dec_relb(&erts_port_task_outstanding_io_tasks);
- break;
- default:
- break;
- }
-
res = 0;
}
}
-#ifdef ERTS_SMP
erts_thr_progress_unmanaged_continue(dhndl);
-#endif
return res;
}
@@ -1271,12 +1321,10 @@ void
erts_port_task_abort_nosuspend_tasks(Port *pp)
{
ErtsPortTaskHandleList *abort_list;
-#ifdef ERTS_SMP
ErtsThrPrgrDelayHandle dhndl = ERTS_THR_PRGR_DHANDLE_INVALID;
-#endif
erts_port_task_sched_lock(&pp->sched);
- erts_smp_atomic32_read_band_nob(&pp->sched.flags,
+ erts_atomic32_read_band_nob(&pp->sched.flags,
~ERTS_PTS_FLG_HAVE_NS_TASKS);
abort_list = pp->sched.taskq.local.busy.nosuspend;
pp->sched.taskq.local.busy.nosuspend = NULL;
@@ -1296,40 +1344,34 @@ erts_port_task_abort_nosuspend_tasks(Port *pp)
pthlp = abort_list;
abort_list = pthlp->u.next;
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
dhndl = erts_thr_progress_unmanaged_delay();
-#endif
pthp = &pthlp->handle;
ptp = handle2task(pthp);
if (!ptp) {
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
erts_thr_progress_unmanaged_continue(dhndl);
-#endif
schedule_port_task_handle_list_free(pthlp);
continue;
}
#ifdef DEBUG
saved_pthp = ptp->u.alive.handle;
- ERTS_SMP_READ_MEMORY_BARRIER;
- old_state = erts_smp_atomic32_read_nob(&ptp->state);
+ ERTS_THR_READ_MEMORY_BARRIER;
+ old_state = erts_atomic32_read_nob(&ptp->state);
if (old_state == ERTS_PT_STATE_SCHEDULED) {
ASSERT(saved_pthp == pthp);
}
#endif
- old_state = erts_smp_atomic32_cmpxchg_nob(&ptp->state,
+ old_state = erts_atomic32_cmpxchg_nob(&ptp->state,
ERTS_PT_STATE_ABORTED,
ERTS_PT_STATE_SCHEDULED);
if (old_state != ERTS_PT_STATE_SCHEDULED) {
/* Task already aborted, executing, or executed */
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
erts_thr_progress_unmanaged_continue(dhndl);
-#endif
schedule_port_task_handle_list_free(pthlp);
continue;
}
@@ -1339,13 +1381,11 @@ erts_port_task_abort_nosuspend_tasks(Port *pp)
type = ptp->type;
td = ptp->u.alive.td;
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
erts_thr_progress_unmanaged_continue(dhndl);
-#endif
schedule_port_task_handle_list_free(pthlp);
- abort_nosuspend_task(pp, type, &td);
+ abort_nosuspend_task(pp, type, &td, pp->sched.taskq.bpq != NULL);
}
}
@@ -1361,46 +1401,38 @@ erts_port_task_schedule(Eterm id,
{
ErtsProc2PortSigData *sigdp = NULL;
ErtsPortTaskHandleList *ns_pthlp = NULL;
-#ifdef ERTS_SMP
ErtsRunQueue *xrunq;
ErtsThrPrgrDelayHandle dhndl;
-#endif
ErtsRunQueue *runq;
Port *pp;
ErtsPortTask *ptp = NULL;
erts_aint32_t act, add_flags;
+ unsigned int prof_runnable_ports;
- if (pthp && erts_port_task_is_scheduled(pthp)) {
- ASSERT(0);
- erts_port_task_abort(pthp);
- }
+ ERTS_LC_ASSERT(!pthp || !erts_port_task_is_scheduled(pthp));
ASSERT(is_internal_port(id));
-#ifdef ERTS_SMP
dhndl = erts_thr_progress_unmanaged_delay();
-#endif
pp = erts_port_lookup_raw(id);
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) {
if (pp)
erts_port_inc_refc(pp);
erts_thr_progress_unmanaged_continue(dhndl);
}
-#endif
-
- if (!pp)
- goto fail;
if (type != ERTS_PORT_TASK_PROC_SIG) {
+ if (!pp)
+ goto fail;
+
ptp = port_task_alloc();
ptp->type = type;
ptp->u.alive.flags = 0;
- erts_smp_atomic32_init_nob(&ptp->state, ERTS_PT_STATE_SCHEDULED);
+ erts_atomic32_init_nob(&ptp->state, ERTS_PT_STATE_SCHEDULED);
set_handle(ptp, pthp);
}
@@ -1412,16 +1444,6 @@ erts_port_task_schedule(Eterm id,
va_start(argp, type);
ptp->u.alive.td.io.event = va_arg(argp, ErlDrvEvent);
va_end(argp);
- erts_smp_atomic_inc_relb(&erts_port_task_outstanding_io_tasks);
- break;
- }
- case ERTS_PORT_TASK_EVENT: {
- va_list argp;
- va_start(argp, type);
- ptp->u.alive.td.io.event = va_arg(argp, ErlDrvEvent);
- ptp->u.alive.td.io.event_data = va_arg(argp, ErlDrvEventData);
- va_end(argp);
- erts_smp_atomic_inc_relb(&erts_port_task_outstanding_io_tasks);
break;
}
case ERTS_PORT_TASK_PROC_SIG: {
@@ -1432,6 +1454,9 @@ erts_port_task_schedule(Eterm id,
ptp->u.alive.td.psig.callback = va_arg(argp, ErtsProc2PortSigCallback);
ptp->u.alive.flags |= va_arg(argp, int);
va_end(argp);
+ if (!pp)
+ goto fail;
+
if (!(ptp->u.alive.flags & ERTS_PT_FLG_NOSUSPEND))
set_tmp_handle(ptp, pthp);
else {
@@ -1457,6 +1482,10 @@ erts_port_task_schedule(Eterm id,
if (ns_pthlp)
add_flags |= ERTS_PTS_FLG_HAVE_NS_TASKS;
+ prof_runnable_ports = erts_system_profile_flags.runnable_ports;
+ if (prof_runnable_ports)
+ erts_port_task_sched_lock(&pp->sched);
+
while (1) {
erts_aint32_t new, exp;
@@ -1469,7 +1498,7 @@ erts_port_task_schedule(Eterm id,
if (!(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
new |= ERTS_PTS_FLG_IN_RUNQ;
- act = erts_smp_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp);
+ act = erts_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp);
if (exp == act) {
if (!(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
@@ -1481,72 +1510,76 @@ erts_port_task_schedule(Eterm id,
goto done; /* Died after our task insert... */
}
+ if (prof_runnable_ports) {
+ if (!(act & ERTS_PTS_FLG_EXEC_IMM))
+ profile_runnable_port(pp, am_active);
+ erts_port_task_sched_unlock(&pp->sched);
+ prof_runnable_ports = 0;
+ }
+
/* Enqueue port on run-queue */
runq = erts_port_runq(pp);
if (!runq)
ERTS_INTERNAL_ERROR("Missing run-queue");
-#ifdef ERTS_SMP
xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
+ ERTS_LC_ASSERT(runq != xrunq);
+ ERTS_LC_VERIFY_RQ(runq, pp);
if (xrunq) {
- /* Port emigrated ... */
- erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
- erts_smp_runq_unlock(runq);
+ /* Emigrate port ... */
+ erts_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
+ erts_runq_unlock(runq);
runq = erts_port_runq(pp);
if (!runq)
ERTS_INTERNAL_ERROR("Missing run-queue");
}
-#endif
enqueue_port(runq, pp);
-
- if (erts_system_profile_flags.runnable_ports) {
- profile_runnable_port(pp, am_active);
- }
- erts_smp_runq_unlock(runq);
+ erts_runq_unlock(runq);
- erts_smp_notify_inc_runq(runq);
+ erts_notify_inc_runq(runq);
done:
-#ifdef ERTS_SMP
+ if (prof_runnable_ports)
+ erts_port_task_sched_unlock(&pp->sched);
+
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
erts_port_dec_refc(pp);
-#endif
return 0;
abort_nosuspend:
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
erts_port_dec_refc(pp);
-#endif
- abort_nosuspend_task(pp, ptp->type, &ptp->u.alive.td);
+ abort_nosuspend_task(pp, ptp->type, &ptp->u.alive.td, 0);
ASSERT(ns_pthlp);
erts_free(ERTS_ALC_T_PT_HNDL_LIST, ns_pthlp);
- if (ptp)
- port_task_free(ptp);
+
+ ASSERT(ptp);
+ port_task_free(ptp);
return 0;
fail:
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
erts_port_dec_refc(pp);
-#endif
+
+ if (ptp) {
+ abort_signal_task(pp, ERTS_PROC2PORT_SIG_ABORT,
+ ptp->type, &ptp->u.alive.td, 0);
+ port_task_free(ptp);
+ }
if (ns_pthlp)
erts_free(ERTS_ALC_T_PT_HNDL_LIST, ns_pthlp);
- if (ptp)
- port_task_free(ptp);
-
return -1;
}
@@ -1556,14 +1589,14 @@ erts_port_task_free_port(Port *pp)
erts_aint32_t flags;
ErtsRunQueue *runq;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(pp));
ASSERT(!(erts_atomic32_read_nob(&pp->state) & ERTS_PORT_SFLGS_DEAD));
runq = erts_port_runq(pp);
if (!runq)
ERTS_INTERNAL_ERROR("Missing run-queue");
erts_port_task_sched_lock(&pp->sched);
- flags = erts_smp_atomic32_read_bor_relb(&pp->sched.flags,
+ flags = erts_atomic32_read_bor_relb(&pp->sched.flags,
ERTS_PTS_FLG_EXIT);
erts_port_task_sched_unlock(&pp->sched);
erts_atomic32_read_bset_relb(&pp->state,
@@ -1573,7 +1606,7 @@ erts_port_task_free_port(Port *pp)
| ERTS_PORT_SFLG_FREE),
ERTS_PORT_SFLG_FREE);
- erts_smp_runq_unlock(runq);
+ erts_runq_unlock(runq);
if (!(flags & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
begin_port_cleanup(pp, NULL, NULL);
@@ -1586,13 +1619,12 @@ erts_port_task_free_port(Port *pp)
* scheduling of processes. Run-queue lock should be held by caller.
*/
-int
+void
erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
{
Port *pp;
ErtsPortTask *execq;
int processing_busy_q;
- int res = 0;
int vreds = 0;
int reds = 0;
erts_aint_t io_tasks_executed = 0;
@@ -1600,37 +1632,39 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_aint32_t state;
int active;
Uint64 start_time = 0;
+ ErtsSchedulerData *esdp = runq->scheduler;
+ ERTS_MSACC_PUSH_STATE_M();
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq));
pp = pop_port(runq);
if (!pp) {
- res = 0;
goto done;
}
- erts_smp_runq_unlock(runq);
+ ERTS_LC_VERIFY_RQ(runq, pp);
+
+ erts_runq_unlock(runq);
*curr_port_pp = pp;
if (erts_sched_stat.enabled) {
- ErtsSchedulerData *esdp = erts_get_scheduler_data();
Uint old = ERTS_PORT_SCHED_ID(pp, esdp->no);
int migrated = old && old != esdp->no;
- erts_smp_spin_lock(&erts_sched_stat.lock);
+ erts_spin_lock(&erts_sched_stat.lock);
erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_executed++;
erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].executed++;
if (migrated) {
erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_migrated++;
erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].migrated++;
}
- erts_smp_spin_unlock(&erts_sched_stat.lock);
+ erts_spin_unlock(&erts_sched_stat.lock);
}
prepare_exec(pp, &execq, &processing_busy_q);
- erts_smp_port_lock(pp);
+ erts_port_lock(pp);
/* trace port scheduling, in */
if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
@@ -1641,6 +1675,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
state = erts_atomic32_read_nob(&pp->state);
pp->reds = ERTS_PORT_REDS_EXECUTE;
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_PORT);
goto begin_handle_tasks;
while (1) {
@@ -1651,7 +1686,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
if (!ptp)
break;
- task_state = erts_smp_atomic32_cmpxchg_nob(&ptp->state,
+ task_state = erts_atomic32_cmpxchg_nob(&ptp->state,
ERTS_PT_STATE_EXECUTING,
ERTS_PT_STATE_SCHEDULED);
if (task_state != ERTS_PT_STATE_SCHEDULED) {
@@ -1659,53 +1694,56 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
goto aborted_port_task;
}
- reset_handle(ptp);
-
if (erts_system_monitor_long_schedule != 0) {
start_time = erts_timestamp_millis();
}
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(pp));
+ ERTS_CHK_NO_PROC_LOCKS;
ASSERT(pp->drv_ptr);
switch (ptp->type) {
case ERTS_PORT_TASK_TIMEOUT:
- reds = ERTS_PORT_REDS_TIMEOUT;
- if (!(state & ERTS_PORT_SFLGS_DEAD)) {
- DTRACE_DRIVER(driver_timeout, pp);
- (*pp->drv_ptr->timeout)((ErlDrvData) pp->drv_data);
- }
+ reset_handle(ptp);
+ if (!ERTS_PTMR_IS_TIMED_OUT(pp))
+ reds = 0;
+ else {
+ ERTS_PTMR_CLEAR(pp);
+ reds = ERTS_PORT_REDS_TIMEOUT;
+ if (!(state & ERTS_PORT_SFLGS_DEAD)) {
+ DTRACE_DRIVER(driver_timeout, pp);
+ LTTNG_DRIVER(driver_timeout, pp);
+ if (IS_TRACED_FL(pp, F_TRACE_RECEIVE))
+ trace_port(pp, am_receive, am_timeout);
+ (*pp->drv_ptr->timeout)((ErlDrvData) pp->drv_data);
+ }
+ }
break;
case ERTS_PORT_TASK_INPUT:
reds = ERTS_PORT_REDS_INPUT;
ASSERT((state & ERTS_PORT_SFLGS_DEAD) == 0);
DTRACE_DRIVER(driver_ready_input, pp);
- /* NOTE some windows/ose drivers use ->ready_input
+ LTTNG_DRIVER(driver_ready_input, pp);
+ /* NOTE some windows drivers use ->ready_input
for input and output */
(*pp->drv_ptr->ready_input)((ErlDrvData) pp->drv_data,
ptp->u.alive.td.io.event);
+ reset_executed_io_task_handle(ptp);
io_tasks_executed++;
break;
case ERTS_PORT_TASK_OUTPUT:
reds = ERTS_PORT_REDS_OUTPUT;
ASSERT((state & ERTS_PORT_SFLGS_DEAD) == 0);
DTRACE_DRIVER(driver_ready_output, pp);
+ LTTNG_DRIVER(driver_ready_output, pp);
(*pp->drv_ptr->ready_output)((ErlDrvData) pp->drv_data,
ptp->u.alive.td.io.event);
- io_tasks_executed++;
- break;
- case ERTS_PORT_TASK_EVENT:
- reds = ERTS_PORT_REDS_EVENT;
- ASSERT((state & ERTS_PORT_SFLGS_DEAD) == 0);
- DTRACE_DRIVER(driver_event, pp);
- (*pp->drv_ptr->event)((ErlDrvData) pp->drv_data,
- ptp->u.alive.td.io.event,
- ptp->u.alive.td.io.event_data);
+ reset_executed_io_task_handle(ptp);
io_tasks_executed++;
break;
case ERTS_PORT_TASK_PROC_SIG: {
ErtsProc2PortSigData *sigdp = &ptp->u.alive.td.psig.data;
+ reset_handle(ptp);
ASSERT((state & ERTS_PORT_SFLGS_DEAD) == 0);
if (!pp->sched.taskq.bpq)
reds = ptp->u.alive.td.psig.callback(pp,
@@ -1723,10 +1761,11 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
break;
}
case ERTS_PORT_TASK_DIST_CMD:
+ reset_handle(ptp);
reds = erts_dist_command(pp, CONTEXT_REDS - pp->reds);
break;
default:
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"Invalid port task type: %d\n",
(int) ptp->type);
break;
@@ -1764,22 +1803,9 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
}
erts_unblock_fpe(fpe_was_unmasked);
+ ERTS_MSACC_POP_STATE_M();
- /* trace port scheduling, out */
- if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports(pp, am_out);
- }
-
- if (io_tasks_executed) {
- ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
- >= io_tasks_executed);
- erts_smp_atomic_add_relb(&erts_port_task_outstanding_io_tasks,
- -1*io_tasks_executed);
- }
-
-#ifdef ERTS_SMP
- ASSERT(runq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue));
-#endif
+ ASSERT(runq == (ErtsRunQueue *) erts_atomic_read_nob(&pp->run_queue));
active = finalize_exec(pp, &execq, processing_busy_q);
@@ -1789,56 +1815,43 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
*curr_port_pp = NULL;
- erts_smp_runq_lock(runq);
+ erts_runq_lock(runq);
- if (!active) {
- if (erts_system_profile_flags.runnable_ports)
- profile_runnable_port(pp, am_inactive);
- }
- else {
-#ifdef ERTS_SMP
+ if (active) {
ErtsRunQueue *xrunq;
-#endif
ASSERT(!(erts_atomic32_read_nob(&pp->state) & ERTS_PORT_SFLGS_DEAD));
-#ifdef ERTS_SMP
xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
+ ERTS_LC_ASSERT(runq != xrunq);
+ ERTS_LC_VERIFY_RQ(runq, pp);
if (!xrunq) {
-#endif
enqueue_port(runq, pp);
/* No need to notify ourselves about inc in runq. */
-#ifdef ERTS_SMP
}
else {
- /* Port emigrated ... */
- erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
- erts_smp_runq_unlock(runq);
+ /* Emigrate port... */
+ erts_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
+ erts_runq_unlock(runq);
xrunq = erts_port_runq(pp);
ASSERT(xrunq);
enqueue_port(xrunq, pp);
- erts_smp_runq_unlock(xrunq);
- erts_smp_notify_inc_runq(xrunq);
+ erts_runq_unlock(xrunq);
+ erts_notify_inc_runq(xrunq);
- erts_smp_runq_lock(runq);
+ erts_runq_lock(runq);
}
-#endif
}
done:
- res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
- != (erts_aint_t) 0);
runq->scheduler->reductions += reds;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
- ERTS_PORT_REDUCTIONS_EXECUTED(runq, reds);
-
- return res;
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq));
+ ERTS_PORT_REDUCTIONS_EXECUTED(esdp, runq, reds);
}
-#ifdef ERTS_SMP
static void
release_port(void *vport)
{
@@ -1854,7 +1867,6 @@ schedule_release_port(void *vport) {
&pp->common.u.release);
}
-#endif
static void
begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
@@ -1865,7 +1877,7 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
ErtsPortTaskHandleList *free_nshp = NULL;
ErtsProcList *plp;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(pp));
/*
* Abort remaining tasks...
@@ -1938,11 +1950,11 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
qs[i] = ptp->u.alive.next;
/* Normal case here is aborted tasks... */
- state = erts_smp_atomic32_read_nob(&ptp->state);
+ state = erts_atomic32_read_nob(&ptp->state);
if (state == ERTS_PT_STATE_ABORTED)
goto aborted_port_task;
- state = erts_smp_atomic32_cmpxchg_nob(&ptp->state,
+ state = erts_atomic32_cmpxchg_nob(&ptp->state,
ERTS_PT_STATE_EXECUTING,
ERTS_PT_STATE_SCHEDULED);
if (state != ERTS_PT_STATE_SCHEDULED) {
@@ -1969,13 +1981,6 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
DO_WRITE,
1);
break;
- case ERTS_PORT_TASK_EVENT:
- erts_stale_drv_select(pp->common.id,
- ERTS_Port2ErlDrvPort(pp),
- ptp->u.alive.td.io.event,
- 0,
- 1);
- break;
case ERTS_PORT_TASK_DIST_CMD:
break;
case ERTS_PORT_TASK_PROC_SIG: {
@@ -1996,7 +2001,7 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
break;
}
default:
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"Invalid port task type: %d\n",
(int) ptp->type);
}
@@ -2006,7 +2011,7 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
}
}
- erts_smp_atomic32_read_band_nob(&pp->sched.flags,
+ erts_atomic32_read_band_nob(&pp->sched.flags,
~(ERTS_PTS_FLG_HAVE_BUSY_TASKS
|ERTS_PTS_FLG_HAVE_TASKS
|ERTS_PTS_FLGS_BUSY));
@@ -2048,7 +2053,6 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
/*
* Schedule cleanup of port structure...
*/
-#ifdef ERTS_SMP
/* We might not be a scheduler, eg. traceing to port we are sys_msg_dispatcher */
if (!erts_get_scheduler_data()) {
erts_schedule_misc_aux_work(1, schedule_release_port, (void*)pp);
@@ -2058,26 +2062,15 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
(void *) pp,
&pp->common.u.release);
}
-#else
- pp->cleanup = 1;
-#endif
-}
-
-int
-erts_port_is_scheduled(Port *pp)
-{
- erts_aint32_t flags = erts_smp_atomic32_read_acqb(&pp->sched.flags);
- return (flags & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)) != 0;
}
-#ifdef ERTS_SMP
void
erts_enqueue_port(ErtsRunQueue *rq, Port *pp)
{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- ASSERT(rq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue));
- ASSERT(erts_smp_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_IN_RUNQ);
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
+ ASSERT(rq == (ErtsRunQueue *) erts_atomic_read_nob(&pp->run_queue));
+ ASSERT(erts_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_IN_RUNQ);
enqueue_port(rq, pp);
}
@@ -2085,16 +2078,15 @@ Port *
erts_dequeue_port(ErtsRunQueue *rq)
{
Port *pp;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
pp = pop_port(rq);
ASSERT(!pp
- || rq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue));
- ASSERT(!pp || (erts_smp_atomic32_read_nob(&pp->sched.flags)
+ || rq == (ErtsRunQueue *) erts_atomic_read_nob(&pp->run_queue));
+ ASSERT(!pp || (erts_atomic32_read_nob(&pp->sched.flags)
& ERTS_PTS_FLG_IN_RUNQ));
return pp;
}
-#endif
/*
* Initialize the module.
@@ -2102,8 +2094,7 @@ erts_dequeue_port(ErtsRunQueue *rq)
void
erts_port_task_init(void)
{
- erts_smp_atomic_init_nob(&erts_port_task_outstanding_io_tasks,
- (erts_aint_t) 0);
- init_port_task_alloc();
+ init_port_task_alloc(erts_no_schedulers + erts_no_poll_threads
+ + 1); /* aux_thread */
init_busy_caller_table_alloc();
}
diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h
index 1d30465ec9..ae78a7d8a3 100644
--- a/erts/emulator/beam/erl_port_task.h
+++ b/erts/emulator/beam/erl_port_task.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -26,11 +27,11 @@
#ifndef ERTS_PORT_TASK_H_BASIC_TYPES__
#define ERTS_PORT_TASK_H_BASIC_TYPES__
#include "erl_sys_driver.h"
-#include "erl_smp.h"
+#include "erl_threads.h"
#define ERL_PORT_GET_PORT_TYPE_ONLY__
#include "erl_port.h"
#undef ERL_PORT_GET_PORT_TYPE_ONLY__
-typedef erts_smp_atomic_t ErtsPortTaskHandle;
+typedef erts_atomic_t ErtsPortTaskHandle;
#endif
#ifndef ERTS_PORT_TASK_ONLY_BASIC_TYPES__
@@ -55,17 +56,11 @@ typedef erts_smp_atomic_t ErtsPortTaskHandle;
typedef enum {
ERTS_PORT_TASK_INPUT,
ERTS_PORT_TASK_OUTPUT,
- ERTS_PORT_TASK_EVENT,
ERTS_PORT_TASK_TIMEOUT,
ERTS_PORT_TASK_DIST_CMD,
ERTS_PORT_TASK_PROC_SIG
} ErtsPortTaskType;
-#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
-/* NOTE: Do not access any of the exported variables directly */
-extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
-#endif
-
#define ERTS_PTS_FLG_IN_RUNQ (((erts_aint32_t) 1) << 0)
#define ERTS_PTS_FLG_EXEC (((erts_aint32_t) 1) << 1)
#define ERTS_PTS_FLG_HAVE_TASKS (((erts_aint32_t) 1) << 2)
@@ -78,6 +73,7 @@ extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
#define ERTS_PTS_FLG_PARALLELISM (((erts_aint32_t) 1) << 9)
#define ERTS_PTS_FLG_FORCE_SCHED (((erts_aint32_t) 1) << 10)
#define ERTS_PTS_FLG_EXITING (((erts_aint32_t) 1) << 11)
+#define ERTS_PTS_FLG_EXEC_IMM (((erts_aint32_t) 1) << 12)
#define ERTS_PTS_FLGS_BUSY \
(ERTS_PTS_FLG_BUSY_PORT | ERTS_PTS_FLG_BUSY_PORT_Q)
@@ -87,6 +83,7 @@ extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
| ERTS_PTS_FLG_HAVE_BUSY_TASKS \
| ERTS_PTS_FLG_HAVE_TASKS \
| ERTS_PTS_FLG_EXEC \
+ | ERTS_PTS_FLG_EXEC_IMM \
| ERTS_PTS_FLG_FORCE_SCHED \
| ERTS_PTS_FLG_EXITING)
@@ -95,8 +92,8 @@ extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
typedef struct {
ErlDrvSizeT high;
- erts_smp_atomic_t low;
- erts_smp_atomic_t size;
+ erts_atomic_t low;
+ erts_atomic_t size;
} ErtsPortTaskBusyPortQ;
typedef struct ErtsPortTask_ ErtsPortTask;
@@ -121,10 +118,8 @@ typedef struct {
} in;
ErtsPortTaskBusyPortQ *bpq;
} taskq;
- erts_smp_atomic32_t flags;
-#ifdef ERTS_SMP
+ erts_atomic32_t flags;
erts_mtx_t mtx;
-#endif
} ErtsPortTaskSched;
ERTS_GLB_INLINE void erts_port_task_handle_init(ErtsPortTaskHandle *pthp);
@@ -139,22 +134,18 @@ ERTS_GLB_INLINE void erts_port_task_sched_unlock(ErtsPortTaskSched *ptsp);
ERTS_GLB_INLINE int erts_port_task_sched_lock_is_locked(ErtsPortTaskSched *ptsp);
ERTS_GLB_INLINE void erts_port_task_sched_enter_exiting_state(ErtsPortTaskSched *ptsp);
-#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
-ERTS_GLB_INLINE int erts_port_task_have_outstanding_io_tasks(void);
-#endif
-
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
erts_port_task_handle_init(ErtsPortTaskHandle *pthp)
{
- erts_smp_atomic_init_nob(pthp, (erts_aint_t) NULL);
+ erts_atomic_init_nob(pthp, (erts_aint_t) NULL);
}
ERTS_GLB_INLINE int
erts_port_task_is_scheduled(ErtsPortTaskHandle *pthp)
{
- return ((void *) erts_smp_atomic_read_nob(pthp)) != NULL;
+ return ((void *) erts_atomic_read_acqb(pthp)) != NULL;
}
ERTS_GLB_INLINE void erts_port_task_pre_init_sched(ErtsPortTaskSched *ptsp,
@@ -162,9 +153,9 @@ ERTS_GLB_INLINE void erts_port_task_pre_init_sched(ErtsPortTaskSched *ptsp,
{
if (bpq) {
erts_aint_t low = (erts_aint_t) ERTS_PORT_TASK_DEFAULT_BUSY_PORT_Q_LOW;
- erts_smp_atomic_init_nob(&bpq->low, low);
+ erts_atomic_init_nob(&bpq->low, low);
bpq->high = (ErlDrvSizeT) ERTS_PORT_TASK_DEFAULT_BUSY_PORT_Q_HIGH;
- erts_smp_atomic_init_nob(&bpq->size, (erts_aint_t) 0);
+ erts_atomic_init_nob(&bpq->size, (erts_aint_t) 0);
}
ptsp->taskq.bpq = bpq;
}
@@ -172,9 +163,7 @@ ERTS_GLB_INLINE void erts_port_task_pre_init_sched(ErtsPortTaskSched *ptsp,
ERTS_GLB_INLINE void
erts_port_task_init_sched(ErtsPortTaskSched *ptsp, Eterm instr_id)
{
-#ifdef ERTS_SMP
char *lock_str = "port_sched_lock";
-#endif
ptsp->next = NULL;
ptsp->taskq.local.busy.first = NULL;
ptsp->taskq.local.busy.last = NULL;
@@ -183,38 +172,26 @@ erts_port_task_init_sched(ErtsPortTaskSched *ptsp, Eterm instr_id)
ptsp->taskq.local.first = NULL;
ptsp->taskq.in.first = NULL;
ptsp->taskq.in.last = NULL;
- erts_smp_atomic32_init_nob(&ptsp->flags, 0);
-#ifdef ERTS_SMP
- erts_mtx_init_x(&ptsp->mtx, lock_str, instr_id,
-#ifdef ERTS_ENABLE_LOCK_COUNT
- (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK)
-#else
- 1
-#endif
- );
-#endif
+ erts_atomic32_init_nob(&ptsp->flags, 0);
+ erts_mtx_init(&ptsp->mtx, lock_str, instr_id, ERTS_LOCK_FLAGS_CATEGORY_IO);
}
ERTS_GLB_INLINE void
erts_port_task_sched_lock(ErtsPortTaskSched *ptsp)
{
-#ifdef ERTS_SMP
erts_mtx_lock(&ptsp->mtx);
-#endif
}
ERTS_GLB_INLINE void
erts_port_task_sched_unlock(ErtsPortTaskSched *ptsp)
{
-#ifdef ERTS_SMP
erts_mtx_unlock(&ptsp->mtx);
-#endif
}
ERTS_GLB_INLINE int
erts_port_task_sched_lock_is_locked(ErtsPortTaskSched *ptsp)
{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
return erts_lc_mtx_is_locked(&ptsp->mtx);
#else
return 0;
@@ -225,35 +202,25 @@ erts_port_task_sched_lock_is_locked(ErtsPortTaskSched *ptsp)
ERTS_GLB_INLINE void
erts_port_task_fini_sched(ErtsPortTaskSched *ptsp)
{
-#ifdef ERTS_SMP
erts_mtx_destroy(&ptsp->mtx);
-#endif
}
ERTS_GLB_INLINE void
erts_port_task_sched_enter_exiting_state(ErtsPortTaskSched *ptsp)
{
- erts_smp_atomic32_read_bor_nob(&ptsp->flags, ERTS_PTS_FLG_EXITING);
+ erts_atomic32_read_bor_nob(&ptsp->flags, ERTS_PTS_FLG_EXITING);
}
-#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
-
-ERTS_GLB_INLINE int
-erts_port_task_have_outstanding_io_tasks(void)
-{
- return (erts_smp_atomic_read_acqb(&erts_port_task_outstanding_io_tasks)
- != 0);
-}
-
-#endif /* ERTS_INCLUDE_SCHEDULER_INTERNALS */
-
#endif
#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
-int erts_port_task_execute(ErtsRunQueue *, Port **);
+void erts_port_task_execute(ErtsRunQueue *, Port **);
void erts_port_task_init(void);
#endif
+/* generated for 'port_task' quick allocator */
+void erts_port_task_pre_alloc_init_thread(void);
+
void erts_port_task_tmp_handle_detach(ErtsPortTaskHandle *);
int erts_port_task_abort(ErtsPortTaskHandle *);
@@ -264,13 +231,12 @@ int erts_port_task_schedule(Eterm,
ErtsPortTaskType,
...);
void erts_port_task_free_port(Port *);
-int erts_port_is_scheduled(Port *);
ErtsProc2PortSigData *erts_port_task_alloc_p2p_sig_data(void);
+ErtsProc2PortSigData *erts_port_task_alloc_p2p_sig_data_extra(size_t extra, void **extra_ptr);
+void erts_port_task_free_p2p_sig_data(ErtsProc2PortSigData *sigdp);
-#ifdef ERTS_SMP
void erts_enqueue_port(ErtsRunQueue *rq, Port *pp);
Port *erts_dequeue_port(ErtsRunQueue *rq);
-#endif
#undef ERTS_INCLUDE_SCHEDULER_INTERNALS
#endif /* ERL_PORT_TASK_H__ */
#endif /* ERTS_PORT_TASK_ONLY_BASIC_TYPES__ */
diff --git a/erts/emulator/beam/erl_printf_term.c b/erts/emulator/beam/erl_printf_term.c
index d18760dc43..e6f8460164 100644
--- a/erts/emulator/beam/erl_printf_term.c
+++ b/erts/emulator/beam/erl_printf_term.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -25,6 +26,7 @@
#include "sys.h"
#include "big.h"
#include "erl_map.h"
+#include "erl_binary.h"
#define PRINT_CHAR(CNT, FN, ARG, C) \
do { \
@@ -115,13 +117,12 @@ do { \
/* return 0 if list is not a non-empty flat list of printable characters */
static int
-is_printable_string(Eterm list, Eterm* base)
-{
+is_printable_string(Eterm list) {
int len = 0;
int c;
while(is_list(list)) {
- Eterm* consp = list_val_rel(list, base);
+ Eterm* consp = list_val(list);
Eterm hd = CAR(consp);
if (!is_byte(hd))
@@ -138,6 +139,25 @@ is_printable_string(Eterm list, Eterm* base)
return 0;
}
+static int is_printable_ascii(byte* bytep, Uint bytesize, Uint bitoffs)
+{
+ if (!bitoffs) {
+ while (bytesize--) {
+ if (*bytep < ' ' || *bytep >= 127)
+ return 0;
+ bytep++;
+ }
+ } else {
+ while (bytesize--) {
+ byte octet = (bytep[0] << bitoffs) | (bytep[1] >> (8-bitoffs));
+ if (octet < ' ' || octet >= 127)
+ return 0;
+ bytep++;
+ }
+ }
+ return 1;
+}
+
/* print a atom doing what quoting is necessary */
static int print_atom_name(fmtfn_t fn, void* arg, Eterm atom, long *dcount)
{
@@ -227,10 +247,19 @@ static int print_atom_name(fmtfn_t fn, void* arg, Eterm atom, long *dcount)
#define PRT_PATCH_FUN_SIZE ((Eterm) 7)
#define PRT_LAST_ARRAY_ELEMENT ((Eterm) 8) /* Note! Must be last... */
+#if 0
+static char *format_binary(Uint16 x, char *b) {
+ int z;
+ b[16] = '\0';
+ for (z = 0; z < 16; z++) {
+ b[15-z] = ((x>>z) & 0x1) ? '1' : '0';
+ }
+ return b;
+}
+#endif
+
static int
-print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount,
- Eterm* obj_base) /* ignored if !HALFWORD_HEAP */
-{
+print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount) {
DECLARE_WSTACK(s);
int res;
int i;
@@ -276,20 +305,16 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount,
obj = (Eterm) popped.word;
L_print_one_cons:
{
- Eterm* cons = list_val_rel(obj, obj_base);
+ Eterm* cons = list_val(obj);
Eterm tl;
obj = CAR(cons);
tl = CDR(cons);
if (is_not_nil(tl)) {
if (is_list(tl)) {
- WSTACK_PUSH(s, tl);
- WSTACK_PUSH(s, PRT_ONE_CONS);
- WSTACK_PUSH(s, PRT_COMMA);
+ WSTACK_PUSH3(s, tl, PRT_ONE_CONS, PRT_COMMA);
} else {
- WSTACK_PUSH(s, tl);
- WSTACK_PUSH(s, PRT_TERM);
- WSTACK_PUSH(s, PRT_BAR);
+ WSTACK_PUSH3(s, tl, PRT_TERM, PRT_BAR);
}
}
}
@@ -299,9 +324,7 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount,
break;
default: /* PRT_LAST_ARRAY_ELEMENT+1 and upwards */
obj = *popped.ptr;
- WSTACK_PUSH(s, (UWord) (popped.ptr + 1));
- WSTACK_PUSH(s, val-1);
- WSTACK_PUSH(s, PRT_COMMA);
+ WSTACK_PUSH3(s, (UWord) (popped.ptr + 1), val-1, PRT_COMMA);
break;
}
break;
@@ -318,11 +341,7 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount,
PRINT_CHAR(res, fn, arg, '>');
goto L_done;
}
-#if HALFWORD_HEAP
- wobj = is_immed(obj) ? (Wterm)obj : rterm2wterm(obj, obj_base);
-#else
wobj = (Wterm)obj;
-#endif
switch (tag_val_def(wobj)) {
case NIL_DEF:
PRINT_STRING(res, fn, arg, "[]");
@@ -363,12 +382,15 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount,
break;
}
case REF_DEF:
+ if (!ERTS_IS_CRASH_DUMPING)
+ erts_magic_ref_save_bin(obj);
+ /* fall through... */
case EXTERNAL_REF_DEF:
PRINT_STRING(res, fn, arg, "#Ref<");
PRINT_UWORD(res, fn, arg, 'u', 0, 1,
(ErlPfUWord) ref_channel_no(wobj));
ref_num = ref_numbers(wobj);
- for (i = ref_no_of_numbers(wobj)-1; i >= 0; i--) {
+ for (i = ref_no_numbers(wobj)-1; i >= 0; i--) {
PRINT_CHAR(res, fn, arg, '.');
PRINT_UWORD(res, fn, arg, 'u', 0, 1, (ErlPfUWord) ref_num[i]);
}
@@ -398,10 +420,10 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount,
PRINT_CHAR(res, fn, arg, '>');
break;
case LIST_DEF:
- if (is_printable_string(obj, obj_base)) {
+ if (is_printable_string(obj)) {
int c;
PRINT_CHAR(res, fn, arg, '"');
- nobj = list_val_rel(obj, obj_base);
+ nobj = list_val(obj);
while (1) {
if ((*dcount)-- <= 0)
goto L_done;
@@ -415,7 +437,7 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount,
}
if (is_not_list(*nobj))
break;
- nobj = list_val_rel(*nobj, obj_base);
+ nobj = list_val(*nobj);
}
PRINT_CHAR(res, fn, arg, '"');
} else {
@@ -431,8 +453,7 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount,
WSTACK_PUSH(s,PRT_CLOSE_TUPLE);
++nobj;
if (i > 0) {
- WSTACK_PUSH(s, (UWord) nobj);
- WSTACK_PUSH(s, PRT_LAST_ARRAY_ELEMENT+i-1);
+ WSTACK_PUSH2(s, (UWord) nobj, PRT_LAST_ARRAY_ELEMENT+i-1);
}
break;
case FLOAT_DEF: {
@@ -442,25 +463,74 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount,
}
break;
case BINARY_DEF:
- if (header_is_bin_matchstate(*boxed_val(wobj))) {
- PRINT_STRING(res, fn, arg, "#MatchState");
- }
- else {
- ProcBin* pb = (ProcBin *) binary_val(wobj);
- if (pb->size == 1)
- PRINT_STRING(res, fn, arg, "<<1 byte>>");
- else {
+ {
+ byte* bytep;
+ Uint bytesize = binary_size(obj);
+ Uint bitoffs;
+ Uint bitsize;
+ byte octet;
+ ERTS_GET_BINARY_BYTES(obj, bytep, bitoffs, bitsize);
+
+ if (bitsize || !bytesize
+ || !is_printable_ascii(bytep, bytesize, bitoffs)) {
+ int is_first = 1;
PRINT_STRING(res, fn, arg, "<<");
- PRINT_UWORD(res, fn, arg, 'u', 0, 1, (ErlPfUWord) pb->size);
- PRINT_STRING(res, fn, arg, " bytes>>");
+ while (bytesize) {
+ if (is_first)
+ is_first = 0;
+ else
+ PRINT_CHAR(res, fn, arg, ',');
+ if (bitoffs)
+ octet = (bytep[0] << bitoffs) | (bytep[1] >> (8-bitoffs));
+ else
+ octet = bytep[0];
+ PRINT_UWORD(res, fn, arg, 'u', 0, 1, octet);
+ ++bytep;
+ --bytesize;
+ }
+ if (bitsize) {
+ Uint bits = bitoffs + bitsize;
+ octet = bytep[0];
+ if (bits < 8)
+ octet >>= 8 - bits;
+ else if (bits > 8) {
+ bits -= 8; /* bits in last byte */
+ octet <<= bits;
+ octet |= bytep[1] >> (8 - bits);
+ }
+ octet &= (1 << bitsize) - 1;
+ if (is_first)
+ is_first = 0;
+ else
+ PRINT_CHAR(res, fn, arg, ',');
+ PRINT_UWORD(res, fn, arg, 'u', 0, 1, octet);
+ PRINT_CHAR(res, fn, arg, ':');
+ PRINT_UWORD(res, fn, arg, 'u', 0, 1, bitsize);
+ }
+ PRINT_STRING(res, fn, arg, ">>");
+ }
+ else {
+ PRINT_STRING(res, fn, arg, "<<\"");
+ while (bytesize) {
+ if (bitoffs)
+ octet = (bytep[0] << bitoffs) | (bytep[1] >> (8-bitoffs));
+ else
+ octet = bytep[0];
+ if (octet == '"')
+ PRINT_CHAR(res, fn, arg, '\\');
+ PRINT_CHAR(res, fn, arg, octet);
+ ++bytep;
+ --bytesize;
+ }
+ PRINT_STRING(res, fn, arg, "\">>");
}
}
break;
case EXPORT_DEF:
{
Export* ep = *((Export **) (export_val(wobj) + 1));
- Atom* module = atom_tab(atom_val(ep->code[0]));
- Atom* name = atom_tab(atom_val(ep->code[1]));
+ Atom* module = atom_tab(atom_val(ep->info.mfa.module));
+ Atom* name = atom_tab(atom_val(ep->info.mfa.function));
PRINT_STRING(res, fn, arg, "#Fun<");
PRINT_BUF(res, fn, arg, module->name, module->len);
@@ -468,7 +538,7 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount,
PRINT_BUF(res, fn, arg, name->name, name->len);
PRINT_CHAR(res, fn, arg, '.');
PRINT_SWORD(res, fn, arg, 'd', 0, 1,
- (ErlPfSWord) ep->code[2]);
+ (ErlPfSWord) ep->info.mfa.arity);
PRINT_CHAR(res, fn, arg, '>');
}
break;
@@ -489,37 +559,77 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount,
}
break;
case MAP_DEF:
- {
- Uint n;
- Eterm *ks, *vs;
- map_t *mp = (map_t *)map_val(wobj);
- n = map_get_size(mp);
- ks = map_get_keys(mp);
- vs = map_get_values(mp);
-
- PRINT_CHAR(res, fn, arg, '#');
- PRINT_CHAR(res, fn, arg, '{');
- WSTACK_PUSH(s, PRT_CLOSE_TUPLE);
- if (n > 0) {
- n--;
- WSTACK_PUSH(s, vs[n]);
- WSTACK_PUSH(s, PRT_TERM);
- WSTACK_PUSH(s, PRT_ASSOC);
- WSTACK_PUSH(s, ks[n]);
- WSTACK_PUSH(s, PRT_TERM);
-
- while (n--) {
- WSTACK_PUSH(s, PRT_COMMA);
- WSTACK_PUSH(s, vs[n]);
- WSTACK_PUSH(s, PRT_TERM);
- WSTACK_PUSH(s, PRT_ASSOC);
- WSTACK_PUSH(s, ks[n]);
- WSTACK_PUSH(s, PRT_TERM);
- }
- }
- }
+ if (is_flatmap(wobj)) {
+ Uint n;
+ Eterm *ks, *vs;
+ flatmap_t *mp = (flatmap_t *)flatmap_val(wobj);
+ n = flatmap_get_size(mp);
+ ks = flatmap_get_keys(mp);
+ vs = flatmap_get_values(mp);
+
+ PRINT_CHAR(res, fn, arg, '#');
+ PRINT_CHAR(res, fn, arg, '{');
+ WSTACK_PUSH(s, PRT_CLOSE_TUPLE);
+ if (n > 0) {
+ n--;
+ WSTACK_PUSH5(s, vs[n], PRT_TERM, PRT_ASSOC, ks[n], PRT_TERM);
+ while (n--) {
+ WSTACK_PUSH6(s, PRT_COMMA, vs[n], PRT_TERM, PRT_ASSOC,
+ ks[n], PRT_TERM);
+ }
+ }
+ } else {
+ Uint n, mapval;
+ Eterm *head;
+ head = hashmap_val(wobj);
+ mapval = MAP_HEADER_VAL(*head);
+ switch (MAP_HEADER_TYPE(*head)) {
+ case MAP_HEADER_TAG_HAMT_HEAD_ARRAY:
+ case MAP_HEADER_TAG_HAMT_HEAD_BITMAP:
+ PRINT_STRING(res, fn, arg, "#<");
+ PRINT_UWORD(res, fn, arg, 'x', 0, 1, mapval);
+ PRINT_STRING(res, fn, arg, ">{");
+ WSTACK_PUSH(s,PRT_CLOSE_TUPLE);
+ n = hashmap_bitcount(mapval);
+ ASSERT(n < 17);
+ head += 2;
+ if (n > 0) {
+ n--;
+ WSTACK_PUSH(s, head[n]);
+ WSTACK_PUSH(s, PRT_TERM);
+ while (n--) {
+ WSTACK_PUSH(s, PRT_COMMA);
+ WSTACK_PUSH(s, head[n]);
+ WSTACK_PUSH(s, PRT_TERM);
+ }
+ }
+ break;
+ case MAP_HEADER_TAG_HAMT_NODE_BITMAP:
+ n = hashmap_bitcount(mapval);
+ head++;
+ PRINT_CHAR(res, fn, arg, '<');
+ PRINT_UWORD(res, fn, arg, 'x', 0, 1, mapval);
+ PRINT_STRING(res, fn, arg, ">{");
+ WSTACK_PUSH(s,PRT_CLOSE_TUPLE);
+ ASSERT(n < 17);
+ if (n > 0) {
+ n--;
+ WSTACK_PUSH(s, head[n]);
+ WSTACK_PUSH(s, PRT_TERM);
+ while (n--) {
+ WSTACK_PUSH(s, PRT_COMMA);
+ WSTACK_PUSH(s, head[n]);
+ WSTACK_PUSH(s, PRT_TERM);
+ }
+ }
+ break;
+ }
+ }
+ break;
+ case MATCHSTATE_DEF:
+ PRINT_STRING(res, fn, arg, "#MatchState");
break;
- default:
+ default:
PRINT_STRING(res, fn, arg, "<unknown:");
PRINT_POINTER(res, fn, arg, wobj);
PRINT_CHAR(res, fn, arg, '>');
@@ -528,19 +638,17 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount,
}
L_done:
-
DESTROY_WSTACK(s);
return res;
}
+
int
-erts_printf_term(fmtfn_t fn, void* arg, ErlPfEterm term, long precision,
- ErlPfEterm* term_base)
-{
+erts_printf_term(fmtfn_t fn, void* arg, ErlPfEterm term, long precision) {
int res;
- ASSERT(sizeof(ErlPfEterm) == sizeof(Eterm));
+ ERTS_CT_ASSERT(sizeof(ErlPfEterm) == sizeof(Eterm));
- res = print_term(fn, arg, (Eterm)term, &precision, (Eterm*)term_base);
+ res = print_term(fn, arg, (Eterm)term, &precision);
if (res < 0)
return res;
if (precision <= 0)
diff --git a/erts/emulator/beam/erl_printf_term.h b/erts/emulator/beam/erl_printf_term.h
index f92c99d713..8a30286fd8 100644
--- a/erts/emulator/beam/erl_printf_term.h
+++ b/erts/emulator/beam/erl_printf_term.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -21,6 +22,5 @@
#define ERL_PRINTF_TERM_H__
#include "erl_printf_format.h"
-int erts_printf_term(fmtfn_t fn, void* arg, ErlPfEterm term, long precision,
- ErlPfEterm* term_base);
+int erts_printf_term(fmtfn_t fn, void* arg, ErlPfEterm term, long precision);
#endif
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index fd02f10540..3c0a126fe2 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2014. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -23,6 +24,8 @@
# include "config.h"
#endif
+#define ERTS_WANT_BREAK_HANDLING
+
#include <stddef.h> /* offsetof() */
#include "sys.h"
#include "erl_vm.h"
@@ -42,9 +45,16 @@
#include "erl_thr_queue.h"
#include "erl_async.h"
#include "dtrace-wrapper.h"
+#include "lttng-wrapper.h"
#include "erl_ptab.h"
-
-
+#include "erl_bif_unique.h"
+#define ERTS_WANT_TIMER_WHEEL_API
+#include "erl_time.h"
+#include "erl_nfunc_sched.h"
+#include "erl_check_io.h"
+#include "erl_poll.h"
+
+#define ERTS_CHECK_TIME_REDS CONTEXT_REDS
#define ERTS_DELAYED_WAKEUP_INFINITY (~(Uint64) 0)
#define ERTS_DELAYED_WAKEUP_REDUCTIONS ((Uint64) CONTEXT_REDS/2)
@@ -54,11 +64,7 @@
#define ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST (CONTEXT_REDS/10)
-#ifndef ERTS_SCHED_MIN_SPIN
#define ERTS_SCHED_SPIN_UNTIL_YIELD 100
-#else
-#define ERTS_SCHED_SPIN_UNTIL_YIELD 1
-#endif
#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT_VERY_LONG 40
#define ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_VERY_LONG 1000
@@ -105,17 +111,39 @@
#define LOW_BIT (1 << PRIORITY_LOW)
#define PORT_BIT (1 << ERTS_PORT_PRIO_LEVEL)
-#define ERTS_EMPTY_RUNQ(RQ) \
- ((ERTS_RUNQ_FLGS_GET_NOB((RQ)) & ERTS_RUNQ_FLGS_QMASK) == 0 \
- && (RQ)->misc.start == NULL)
+#define ERTS_IS_RUNQ_EMPTY_FLGS(FLGS) \
+ (!((FLGS) & (ERTS_RUNQ_FLGS_QMASK|ERTS_RUNQ_FLG_MISC_OP)))
+
+#define ERTS_IS_RUNQ_EMPTY_PORTS_FLGS(FLGS) \
+ (!((FLGS) & (PORT_BIT|ERTS_RUNQ_FLG_MISC_OP)))
+
+#define ERTS_EMPTY_RUNQ(RQ) \
+ ERTS_IS_RUNQ_EMPTY_FLGS(ERTS_RUNQ_FLGS_GET_NOB((RQ)))
+
+#define ERTS_EMPTY_RUNQ_PORTS(RQ) \
+ ERTS_IS_RUNQ_EMPTY_FLGS(ERTS_RUNQ_FLGS_GET_NOB((RQ)))
+
+static ERTS_INLINE int
+runq_got_work_to_execute_flags(Uint32 flags)
+{
+ if (flags & ERTS_RUNQ_FLG_HALTING)
+ return !ERTS_IS_RUNQ_EMPTY_PORTS_FLGS(flags);
+ return !ERTS_IS_RUNQ_EMPTY_FLGS(flags);
+}
+
+static ERTS_INLINE int
+runq_got_work_to_execute(ErtsRunQueue *rq)
+{
+ return runq_got_work_to_execute_flags(ERTS_RUNQ_FLGS_GET_NOB(rq));
+}
#undef RUNQ_READ_RQ
#undef RUNQ_SET_RQ
-#define RUNQ_READ_RQ(X) ((ErtsRunQueue *) erts_smp_atomic_read_nob((X)))
-#define RUNQ_SET_RQ(X, RQ) erts_smp_atomic_set_nob((X), (erts_aint_t) (RQ))
+#define RUNQ_READ_RQ(X) ((ErtsRunQueue *) erts_atomic_read_nob((X)))
+#define RUNQ_SET_RQ(X, RQ) erts_atomic_set_nob((X), (erts_aint_t) (RQ))
#ifdef DEBUG
-# if defined(ARCH_64) && !HALFWORD_HEAP
+# if defined(ARCH_64)
# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N) \
(RUNQ_SET_RQ((RQP), (0xdeadbeefdead0003LL | ((N) << 4)))
# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP) \
@@ -139,22 +167,22 @@ do { \
# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP)
#endif
-#define ERTS_EMPTY_RUNQ_PORTS(RQ) \
- (RUNQ_READ_LEN(&(RQ)->ports.info.len) == 0 && (RQ)->misc.start == NULL)
-
const Process erts_invalid_process = {{ERTS_INVALID_PID}};
extern BeamInstr beam_apply[];
extern BeamInstr beam_exit[];
extern BeamInstr beam_continue_exit[];
-int erts_sched_compact_load;
-int erts_sched_balance_util = 0;
-Uint erts_no_schedulers;
-#ifdef ERTS_DIRTY_SCHEDULERS
-Uint erts_no_dirty_cpu_schedulers;
-Uint erts_no_dirty_io_schedulers;
-#endif
+int ERTS_WRITE_UNLIKELY(erts_default_spo_flags) = SPO_ON_HEAP_MSGQ;
+int ERTS_WRITE_UNLIKELY(erts_sched_compact_load);
+int ERTS_WRITE_UNLIKELY(erts_sched_balance_util) = 0;
+Uint ERTS_WRITE_UNLIKELY(erts_no_schedulers);
+Uint ERTS_WRITE_UNLIKELY(erts_no_total_schedulers);
+Uint ERTS_WRITE_UNLIKELY(erts_no_dirty_cpu_schedulers) = 0;
+Uint ERTS_WRITE_UNLIKELY(erts_no_dirty_io_schedulers) = 0;
+
+static char *erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_NO_FLAGS] = {0};
+int erts_aux_work_no_flags = ERTS_SSI_AUX_WORK_NO_FLAGS;
#define ERTS_THR_PRGR_LATER_CLEANUP_OP_THRESHOLD_VERY_LAZY (4*1024*1024)
#define ERTS_THR_PRGR_LATER_CLEANUP_OP_THRESHOLD_LAZY (512*1024)
@@ -167,107 +195,157 @@ static UWord thr_prgr_later_cleanup_op_threshold = ERTS_THR_PRGR_LATER_CLEANUP_O
ErtsPTab erts_proc erts_align_attribute(ERTS_CACHE_LINE_SIZE);
int erts_sched_thread_suggested_stack_size = -1;
-
+int erts_dcpu_sched_thread_suggested_stack_size = -1;
+int erts_dio_sched_thread_suggested_stack_size = -1;
#ifdef ERTS_ENABLE_LOCK_CHECK
ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE];
#endif
-static struct {
+static struct ErtsSchedBusyWait_ {
int aux_work;
int tse;
int sys_schedule;
} sched_busy_wait;
-#ifdef ERTS_SMP
int erts_disable_proc_not_running_opt;
static ErtsAuxWorkData *aux_thread_aux_work_data;
+static ErtsAuxWorkData *poll_thread_aux_work_data;
-#define ERTS_SCHDLR_SSPND_CHNG_WAITER (((erts_aint32_t) 1) << 0)
+#define ERTS_SCHDLR_SSPND_CHNG_NMSB (((erts_aint32_t) 1) << 0)
#define ERTS_SCHDLR_SSPND_CHNG_MSB (((erts_aint32_t) 1) << 1)
#define ERTS_SCHDLR_SSPND_CHNG_ONLN (((erts_aint32_t) 1) << 2)
+#define ERTS_SCHDLR_SSPND_CHNG_DCPU_ONLN (((erts_aint32_t) 1) << 3)
+
+typedef struct ErtsMultiSchedulingBlock_ {
+ int ongoing;
+ ErtsProcList *blckrs;
+ ErtsProcList *chngq;
+} ErtsMultiSchedulingBlock;
+
+typedef struct ErtsSchedTypeCounters_ {
+ Uint32 normal;
+ Uint32 dirty_cpu;
+ Uint32 dirty_io;
+} ErtsSchedTypeCounters;
+
+static struct ErtsSchedSuspend_ {
+ erts_mtx_t mtx;
+ ErtsSchedTypeCounters online;
+ ErtsSchedTypeCounters curr_online;
+ ErtsSchedTypeCounters active;
+ erts_atomic32_t changing;
+ ErtsProcList *chngq;
+ Eterm changer;
+ ErtsMultiSchedulingBlock nmsb; /* Normal multi Scheduling Block */
+ ErtsMultiSchedulingBlock msb; /* Multi Scheduling Block */
+ ErtsSchedType last_msb_dirty_type;
+} schdlr_sspnd;
-#ifndef DEBUG
-
-#define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
- erts_smp_atomic32_set_nob(&schdlr_sspnd.changing, (VAL))
+static void init_scheduler_suspend(void);
-#ifdef ERTS_DIRTY_SCHEDULERS
-#define ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(VAL, OLD_VAL) \
- erts_smp_atomic32_set_nob(&schdlr_sspnd.dirty_cpu_changing, (VAL))
-#define ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(VAL, OLD_VAL) \
- erts_smp_atomic32_set_nob(&schdlr_sspnd.dirty_io_changing, (VAL))
-#endif
-
-#else
+static ERTS_INLINE Uint32
+schdlr_sspnd_eq_nscheds(ErtsSchedTypeCounters *val1p, ErtsSchedTypeCounters *val2p)
+{
+ int res = val1p->normal == val2p->normal;
+ res &= val1p->dirty_cpu == val2p->dirty_cpu;
+ res &= val1p->dirty_io == val2p->dirty_io;
+ return res;
+}
-#define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
-do { \
- erts_aint32_t old_val__; \
- old_val__ = erts_smp_atomic32_xchg_nob(&schdlr_sspnd.changing, \
- (VAL)); \
- ASSERT(old_val__ == (OLD_VAL)); \
-} while (0)
+static ERTS_INLINE Uint32
+schdlr_sspnd_get_nscheds(ErtsSchedTypeCounters *valp,
+ ErtsSchedType type)
+{
+ switch (type) {
+ case ERTS_SCHED_NORMAL:
+ return valp->normal;
+ case ERTS_SCHED_DIRTY_CPU:
+ return valp->dirty_cpu;
+ case ERTS_SCHED_DIRTY_IO:
+ return valp->dirty_io;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid scheduler type");
+ return 0;
+ }
+}
-#ifdef ERTS_DIRTY_SCHEDULERS
-#define ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(VAL, OLD_VAL) \
-do { \
- erts_aint32_t old_val__; \
- old_val__ = erts_smp_atomic32_xchg_nob(&schdlr_sspnd.dirty_cpu_changing, \
- (VAL)); \
- ASSERT(old_val__ == (OLD_VAL)); \
-} while (0)
-#define ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(VAL, OLD_VAL) \
-do { \
- erts_aint32_t old_val__; \
- old_val__ = erts_smp_atomic32_xchg_nob(&schdlr_sspnd.dirty_io_changing, \
- (VAL)); \
- ASSERT(old_val__ == (OLD_VAL)); \
-} while (0)
+#ifdef DEBUG
+static ERTS_INLINE Uint32
+schdlr_sspnd_get_nscheds_tot(ErtsSchedTypeCounters *valp)
+{
+ Uint32 res = valp->normal;
+ res += valp->dirty_cpu;
+ res += valp->dirty_io;
+ return res;
+}
#endif
-#endif
+static ERTS_INLINE void
+schdlr_sspnd_dec_nscheds(ErtsSchedTypeCounters *valp,
+ ErtsSchedType type)
+{
+ ASSERT(schdlr_sspnd_get_nscheds(valp, type) > 0);
+ switch (type) {
+ case ERTS_SCHED_NORMAL:
+ valp->normal--;
+ break;
+ case ERTS_SCHED_DIRTY_CPU:
+ valp->dirty_cpu--;
+ break;
+ case ERTS_SCHED_DIRTY_IO:
+ valp->dirty_io--;
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid scheduler type");
+ }
+}
-static struct {
- erts_smp_mtx_t mtx;
- erts_smp_cnd_t cnd;
- int online;
- int curr_online;
- int wait_curr_online;
-#ifdef ERTS_DIRTY_SCHEDULERS
- int dirty_cpu_online;
- int dirty_cpu_curr_online;
- int dirty_cpu_wait_curr_online;
- int dirty_io_online;
- int dirty_io_curr_online;
- int dirty_io_wait_curr_online;
-#endif
- erts_smp_atomic32_t changing;
- erts_smp_atomic32_t active;
-#ifdef ERTS_DIRTY_SCHEDULERS
- erts_smp_atomic32_t dirty_cpu_changing;
- erts_smp_atomic32_t dirty_cpu_active;
- erts_smp_atomic32_t dirty_io_changing;
- erts_smp_atomic32_t dirty_io_active;
-#endif
- struct {
- int ongoing;
- long wait_active;
-#ifdef ERTS_DIRTY_SCHEDULERS
- long dirty_cpu_wait_active;
- long dirty_io_wait_active;
-#endif
- ErtsProcList *procs;
- } msb; /* Multi Scheduling Block */
-} schdlr_sspnd;
+static ERTS_INLINE void
+schdlr_sspnd_inc_nscheds(ErtsSchedTypeCounters *valp,
+ ErtsSchedType type)
+{
+ switch (type) {
+ case ERTS_SCHED_NORMAL:
+ valp->normal++;
+ break;
+ case ERTS_SCHED_DIRTY_CPU:
+ valp->dirty_cpu++;
+ break;
+ case ERTS_SCHED_DIRTY_IO:
+ valp->dirty_io++;
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid scheduler type");
+ }
+}
+
+static ERTS_INLINE void
+schdlr_sspnd_set_nscheds(ErtsSchedTypeCounters *valp,
+ ErtsSchedType type, Uint32 no)
+{
+ switch (type) {
+ case ERTS_SCHED_NORMAL:
+ valp->normal = no;
+ break;
+ case ERTS_SCHED_DIRTY_CPU:
+ valp->dirty_cpu = no;
+ break;
+ case ERTS_SCHED_DIRTY_IO:
+ valp->dirty_io = no;
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid scheduler type");
+ }
+}
static struct {
- erts_smp_mtx_t update_mtx;
- erts_smp_atomic32_t no_runqs;
+ erts_mtx_t update_mtx;
+ erts_atomic32_t no_runqs;
int last_active_runqs;
int forced_check_balance;
- erts_smp_atomic32_t checking_balance;
+ erts_atomic32_t checking_balance;
int halftime;
int full_reds_history_index;
struct {
@@ -285,35 +363,69 @@ do { \
balance_info.prev_rise.reds = (REDS); \
} while (0)
-#endif
erts_sched_stat_t erts_sched_stat;
-#ifdef USE_THREADS
-static erts_tsd_key_t sched_data_key;
-#endif
+static erts_tsd_key_t ERTS_WRITE_UNLIKELY(sched_data_key);
-static erts_smp_atomic32_t function_calls;
-
-#ifdef ERTS_SMP
-static erts_smp_atomic32_t doing_sys_schedule;
-static erts_smp_atomic32_t no_empty_run_queues;
+static erts_atomic32_t no_empty_run_queues;
long erts_runq_supervision_interval = 0;
static ethr_event runq_supervision_event;
static erts_tid_t runq_supervisor_tid;
static erts_atomic_t runq_supervisor_sleeping;
-#else /* !ERTS_SMP */
-ErtsSchedulerData *erts_scheduler_data;
-#endif
-ErtsAlignedRunQueue *erts_aligned_run_queues;
-Uint erts_no_run_queues;
+ErtsAlignedRunQueue * ERTS_WRITE_UNLIKELY(erts_aligned_run_queues);
+Uint ERTS_WRITE_UNLIKELY(erts_no_run_queues);
-ErtsAlignedSchedulerData *erts_aligned_scheduler_data;
-#ifdef ERTS_DIRTY_SCHEDULERS
-ErtsAlignedSchedulerData *erts_aligned_dirty_cpu_scheduler_data;
-ErtsAlignedSchedulerData *erts_aligned_dirty_io_scheduler_data;
-#endif
+
+struct {
+ union {
+ erts_atomic32_t active;
+ char align__[ERTS_CACHE_LINE_SIZE];
+ } cpu;
+ union {
+ erts_atomic32_t active;
+ char align__[ERTS_CACHE_LINE_SIZE];
+ } io;
+} dirty_count erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+
+
+static ERTS_INLINE void
+dirty_active(ErtsSchedulerData *esdp, erts_aint32_t add)
+{
+ erts_aint32_t val;
+ erts_atomic32_t *ap;
+ switch (esdp->type) {
+ case ERTS_SCHED_DIRTY_CPU:
+ ap = &dirty_count.cpu.active;
+ break;
+ case ERTS_SCHED_DIRTY_IO:
+ ap = &dirty_count.io.active;
+ break;
+ default:
+ ap = NULL;
+ ERTS_INTERNAL_ERROR("Not a dirty scheduler");
+ break;
+ }
+
+ /*
+ * All updates done under run-queue lock, so
+ * no inc or dec needed...
+ */
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(esdp->run_queue));
+
+ val = erts_atomic32_read_nob(ap);
+ val += add;
+ erts_atomic32_set_nob(ap, val);
+}
+
+ErtsAlignedSchedulerData * ERTS_WRITE_UNLIKELY(erts_aligned_scheduler_data);
+ErtsAlignedSchedulerData * ERTS_WRITE_UNLIKELY(erts_aligned_dirty_cpu_scheduler_data);
+ErtsAlignedSchedulerData * ERTS_WRITE_UNLIKELY(erts_aligned_dirty_io_scheduler_data);
+typedef union {
+ Process dsp;
+ char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(Process))];
+} ErtsAlignedDirtyShadowProcess;
typedef union {
ErtsSchedulerSleepInfo ssi;
@@ -321,33 +433,35 @@ typedef union {
} ErtsAlignedSchedulerSleepInfo;
static ErtsAlignedSchedulerSleepInfo *aligned_sched_sleep_info;
-#ifdef ERTS_DIRTY_SCHEDULERS
-#ifdef ERTS_SMP
static ErtsAlignedSchedulerSleepInfo *aligned_dirty_cpu_sched_sleep_info;
static ErtsAlignedSchedulerSleepInfo *aligned_dirty_io_sched_sleep_info;
-#endif
-#endif
+static ErtsAlignedSchedulerSleepInfo *aligned_poll_thread_sleep_info;
static Uint last_reductions;
static Uint last_exact_reductions;
-Uint erts_default_process_flags;
-Eterm erts_system_monitor;
-Eterm erts_system_monitor_long_gc;
-Uint erts_system_monitor_long_schedule;
-Eterm erts_system_monitor_large_heap;
+Eterm ERTS_WRITE_UNLIKELY(erts_system_monitor);
+Eterm ERTS_WRITE_UNLIKELY(erts_system_monitor_long_gc);
+Uint ERTS_WRITE_UNLIKELY(erts_system_monitor_long_schedule);
+Eterm ERTS_WRITE_UNLIKELY(erts_system_monitor_large_heap);
struct erts_system_monitor_flags_t erts_system_monitor_flags;
/* system performance monitor */
Eterm erts_system_profile;
struct erts_system_profile_flags_t erts_system_profile_flags;
+int erts_system_profile_ts_type = ERTS_TRACE_FLG_NOW_TIMESTAMP;
#if ERTS_MAX_PROCESSES > 0x7fffffff
#error "Need to store process_count in another type"
#endif
typedef enum {
- ERTS_PSTT_GC, /* Garbage Collect */
- ERTS_PSTT_CPC /* Check Process Code */
+ ERTS_PSTT_GC_MAJOR, /* Garbage Collect: Fullsweep */
+ ERTS_PSTT_GC_MINOR, /* Garbage Collect: Generational */
+ ERTS_PSTT_CPC, /* Check Process Code */
+ ERTS_PSTT_CLA, /* Copy Literal Area */
+ ERTS_PSTT_COHMQ, /* Change off heap message queue */
+ ERTS_PSTT_FTMQ, /* Flush trace msg queue */
+ ERTS_PSTT_ETS_FREE_FIXATION
} ErtsProcSysTaskType;
#define ERTS_MAX_PROC_SYS_TASK_ARGS 2
@@ -389,11 +503,14 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proclist,
200,
ERTS_ALC_T_PROC_LIST)
+#define ERTS_POLL_THREAD_SLEEP_INFO_IX(IX) \
+ (ASSERT(0 <= ((int) (IX)) \
+ && ((int) (IX)) < ((int) erts_no_poll_threads)), \
+ &aligned_poll_thread_sleep_info[(IX)].ssi)
#define ERTS_SCHED_SLEEP_INFO_IX(IX) \
- (ASSERT(-1 <= ((int) (IX)) \
- && ((int) (IX)) < ((int) erts_no_schedulers)), \
+ (ASSERT(((int)-1) <= ((int) (IX)) \
+ && ((int) (IX)) < ((int) erts_no_schedulers)), \
&aligned_sched_sleep_info[(IX)].ssi)
-#ifdef ERTS_DIRTY_SCHEDULERS
#define ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(IX) \
(ASSERT(0 <= ((int) (IX)) \
&& ((int) (IX)) < ((int) erts_no_dirty_cpu_schedulers)), \
@@ -402,7 +519,6 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proclist,
(ASSERT(0 <= ((int) (IX)) \
&& ((int) (IX)) < ((int) erts_no_dirty_io_schedulers)), \
&aligned_dirty_io_sched_sleep_info[(IX)].ssi)
-#endif
#define ERTS_FOREACH_RUNQ(RQVAR, DO) \
do { \
@@ -410,9 +526,9 @@ do { \
int ix__; \
for (ix__ = 0; ix__ < erts_no_run_queues; ix__++) { \
RQVAR = ERTS_RUNQ_IX(ix__); \
- erts_smp_runq_lock(RQVAR); \
+ erts_runq_lock(RQVAR); \
{ DO; } \
- erts_smp_runq_unlock(RQVAR); \
+ erts_runq_unlock(RQVAR); \
} \
} while (0)
@@ -420,44 +536,50 @@ do { \
do { \
ErtsRunQueue *RQVAR; \
int ix__; \
- ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&schdlr_sspnd.mtx)); \
- for (ix__ = 0; ix__ < schdlr_sspnd.online; ix__++) { \
+ int online__ = (int) schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, \
+ ERTS_SCHED_NORMAL); \
+ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&schdlr_sspnd.mtx)); \
+ for (ix__ = 0; ix__ < online__; ix__++) { \
RQVAR = ERTS_RUNQ_IX(ix__); \
- erts_smp_runq_lock(RQVAR); \
+ erts_runq_lock(RQVAR); \
{ DO; } \
- erts_smp_runq_unlock(RQVAR); \
+ erts_runq_unlock(RQVAR); \
} \
} while (0)
-#define ERTS_ATOMIC_FOREACH_RUNQ_X(RQVAR, DO, DOX) \
+#define ERTS_ATOMIC_FOREACH_RUNQ_X(RQVAR, NRQS, DO, DOX) \
do { \
ErtsRunQueue *RQVAR; \
+ int nrqs = (NRQS); \
int ix__; \
- for (ix__ = 0; ix__ < erts_no_run_queues; ix__++) { \
+ for (ix__ = 0; ix__ < nrqs; ix__++) { \
RQVAR = ERTS_RUNQ_IX(ix__); \
- erts_smp_runq_lock(RQVAR); \
+ erts_runq_lock(RQVAR); \
{ DO; } \
} \
{ DOX; } \
- for (ix__ = 0; ix__ < erts_no_run_queues; ix__++) \
- erts_smp_runq_unlock(ERTS_RUNQ_IX(ix__)); \
+ for (ix__ = 0; ix__ < nrqs; ix__++) \
+ erts_runq_unlock(ERTS_RUNQ_IX(ix__)); \
} while (0)
-#define ERTS_ATOMIC_FOREACH_RUNQ(RQVAR, DO) \
- ERTS_ATOMIC_FOREACH_RUNQ_X(RQVAR, DO, )
+#define ERTS_ATOMIC_FOREACH_RUNQ(RQVAR, DO) \
+ ERTS_ATOMIC_FOREACH_RUNQ_X(RQVAR, erts_no_run_queues + ERTS_NUM_DIRTY_RUNQS, DO, )
+
+#define ERTS_ATOMIC_FOREACH_NORMAL_RUNQ(RQVAR, DO) \
+ ERTS_ATOMIC_FOREACH_RUNQ_X(RQVAR, erts_no_run_queues, DO, )
+
+
/*
* Local functions.
*/
static void exec_misc_ops(ErtsRunQueue *);
-static void print_function_from_pc(int to, void *to_arg, BeamInstr* x);
-static int stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp,
- int yreg);
+static void print_function_from_pc(fmtfn_t to, void *to_arg, BeamInstr* x);
+static int stack_element_dump(fmtfn_t to, void *to_arg, Eterm* sp, int yreg);
static void aux_work_timeout(void *unused);
static void aux_work_timeout_early_init(int no_schedulers);
-static void aux_work_timeout_late_init(void);
-static void setup_aux_work_timer(void);
+static void setup_aux_work_timer(ErtsSchedulerData *esdp);
static int execute_sys_tasks(Process *c_p,
erts_aint32_t *statep,
@@ -478,29 +600,27 @@ dbg_chk_aux_work_val(erts_aint32_t value)
valid |= ERTS_SSI_AUX_WORK_MISC;
valid |= ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM;
valid |= ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC;
-#if ERTS_USE_ASYNC_READY_Q
valid |= ERTS_SSI_AUX_WORK_ASYNC_READY;
valid |= ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
-#endif
-#ifdef ERTS_SMP
valid |= ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP;
valid |= ERTS_SSI_AUX_WORK_MISC_THR_PRGR;
valid |= ERTS_SSI_AUX_WORK_DD;
valid |= ERTS_SSI_AUX_WORK_DD_THR_PRGR;
+ valid |= ERTS_SSI_AUX_WORK_CNCLD_TMRS;
+ valid |= ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR;
valid |= ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP;
-#endif
+ valid |= ERTS_SSI_AUX_WORK_PENDING_EXITERS;
#if HAVE_ERTS_MSEG
valid |= ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK;
#endif
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- valid |= ERTS_SSI_AUX_WORK_CHECK_CHILDREN;
-#endif
#ifdef ERTS_SSI_AUX_WORK_REAP_PORTS
valid |= ERTS_SSI_AUX_WORK_REAP_PORTS;
#endif
+ valid |= ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED;
+ valid |= ERTS_SSI_AUX_WORK_YIELD;
if (~valid & value)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"Invalid aux_work value found: 0x%x\n",
~valid & value);
}
@@ -511,16 +631,14 @@ dbg_chk_aux_work_val(erts_aint32_t value)
#define ERTS_DBG_CHK_SSI_AUX_WORK(SSI)
#endif
-#ifdef ERTS_SMP
-static void handle_pending_exiters(ErtsProcList *);
+static void do_handle_pending_exiters(ErtsProcList *);
static void wake_scheduler(ErtsRunQueue *rq);
-#endif
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
int
-erts_smp_lc_runq_is_locked(ErtsRunQueue *runq)
+erts_lc_runq_is_locked(ErtsRunQueue *runq)
{
- return erts_smp_lc_mtx_is_locked(&runq->mtx);
+ return erts_lc_mtx_is_locked(&runq->mtx);
}
#endif
@@ -528,13 +646,13 @@ erts_smp_lc_runq_is_locked(ErtsRunQueue *runq)
static ERTS_INLINE Uint64
ensure_later_proc_interval(Uint64 interval)
{
- return erts_smp_ensure_later_interval_nob(erts_ptab_interval(&erts_proc), interval);
+ return erts_ensure_later_interval_nob(erts_ptab_interval(&erts_proc), interval);
}
Uint64
erts_get_proc_interval(void)
{
- return erts_smp_current_interval_nob(erts_ptab_interval(&erts_proc));
+ return erts_current_interval_nob(erts_ptab_interval(&erts_proc));
}
Uint64
@@ -546,102 +664,129 @@ erts_ensure_later_proc_interval(Uint64 interval)
Uint64
erts_step_proc_interval(void)
{
- return erts_smp_step_interval_nob(erts_ptab_interval(&erts_proc));
+ return erts_step_interval_nob(erts_ptab_interval(&erts_proc));
}
void
erts_pre_init_process(void)
{
-#ifdef USE_THREADS
erts_tsd_key_create(&sched_data_key, "erts_sched_data_key");
-#endif
+
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP_IX]
+ = "DELAYED_AW_WAKEUP";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_DD_IX]
+ = "DD";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_DD_THR_PRGR_IX]
+ = "DD_THR_PRGR";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC_IX]
+ = "FIX_ALLOC_DEALLOC";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM_IX]
+ = "FIX_ALLOC_LOWER_LIM";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP_IX]
+ = "THR_PRGR_LATER_OP";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_CNCLD_TMRS_IX]
+ = "CNCLD_TMRS";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR_IX]
+ = "CNCLD_TMRS_THR_PRGR";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_ASYNC_READY_IX]
+ = "ASYNC_READY";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN_IX]
+ = "ASYNC_READY_CLEAN";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MISC_THR_PRGR_IX]
+ = "MISC_THR_PRGR";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MISC_IX]
+ = "MISC";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_PENDING_EXITERS_IX]
+ = "PENDING_EXITERS";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_SET_TMO_IX]
+ = "SET_TMO";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX]
+ = "MSEG_CACHE_CHECK";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_YIELD_IX]
+ = "YIELD";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_REAP_PORTS_IX]
+ = "REAP_PORTS";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED_IX]
+ = "DEBUG_WAIT_COMPLETED";
#ifdef ERTS_ENABLE_LOCK_CHECK
- {
- int ix;
-
- erts_psd_required_locks[ERTS_PSD_ERROR_HANDLER].get_locks
- = ERTS_PSD_ERROR_HANDLER_BUF_GET_LOCKS;
- erts_psd_required_locks[ERTS_PSD_ERROR_HANDLER].set_locks
- = ERTS_PSD_ERROR_HANDLER_BUF_SET_LOCKS;
-
- erts_psd_required_locks[ERTS_PSD_SAVED_CALLS_BUF].get_locks
- = ERTS_PSD_SAVED_CALLS_BUF_GET_LOCKS;
- erts_psd_required_locks[ERTS_PSD_SAVED_CALLS_BUF].set_locks
- = ERTS_PSD_SAVED_CALLS_BUF_SET_LOCKS;
-
- erts_psd_required_locks[ERTS_PSD_SCHED_ID].get_locks
- = ERTS_PSD_SCHED_ID_GET_LOCKS;
- erts_psd_required_locks[ERTS_PSD_SCHED_ID].set_locks
- = ERTS_PSD_SCHED_ID_SET_LOCKS;
-
- erts_psd_required_locks[ERTS_PSD_DIST_ENTRY].get_locks
- = ERTS_PSD_DIST_ENTRY_GET_LOCKS;
- erts_psd_required_locks[ERTS_PSD_DIST_ENTRY].set_locks
- = ERTS_PSD_DIST_ENTRY_SET_LOCKS;
-
- erts_psd_required_locks[ERTS_PSD_CALL_TIME_BP].get_locks
- = ERTS_PSD_CALL_TIME_BP_GET_LOCKS;
- erts_psd_required_locks[ERTS_PSD_CALL_TIME_BP].set_locks
- = ERTS_PSD_CALL_TIME_BP_SET_LOCKS;
-
- erts_psd_required_locks[ERTS_PSD_DELAYED_GC_TASK_QS].get_locks
- = ERTS_PSD_DELAYED_GC_TASK_QS_GET_LOCKS;
- erts_psd_required_locks[ERTS_PSD_DELAYED_GC_TASK_QS].set_locks
- = ERTS_PSD_DELAYED_GC_TASK_QS_SET_LOCKS;
-
-#ifdef ERTS_DIRTY_SCHEDULERS
- erts_psd_required_locks[ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT].get_locks
- = ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_GET_LOCKS;
- erts_psd_required_locks[ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT].set_locks
- = ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_SET_LOCKS;
-#endif
- /* Check that we have locks for all entries */
- for (ix = 0; ix < ERTS_PSD_SIZE; ix++) {
- ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].get_locks);
- ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].set_locks);
- }
- }
+ erts_psd_required_locks[ERTS_PSD_ERROR_HANDLER].get_locks
+ = ERTS_PSD_ERROR_HANDLER_BUF_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_ERROR_HANDLER].set_locks
+ = ERTS_PSD_ERROR_HANDLER_BUF_SET_LOCKS;
+
+ erts_psd_required_locks[ERTS_PSD_SAVED_CALLS_BUF].get_locks
+ = ERTS_PSD_SAVED_CALLS_BUF_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_SAVED_CALLS_BUF].set_locks
+ = ERTS_PSD_SAVED_CALLS_BUF_SET_LOCKS;
+
+ erts_psd_required_locks[ERTS_PSD_SCHED_ID].get_locks
+ = ERTS_PSD_SCHED_ID_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_SCHED_ID].set_locks
+ = ERTS_PSD_SCHED_ID_SET_LOCKS;
+
+ erts_psd_required_locks[ERTS_PSD_CALL_TIME_BP].get_locks
+ = ERTS_PSD_CALL_TIME_BP_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_CALL_TIME_BP].set_locks
+ = ERTS_PSD_CALL_TIME_BP_SET_LOCKS;
+
+ erts_psd_required_locks[ERTS_PSD_DELAYED_GC_TASK_QS].get_locks
+ = ERTS_PSD_DELAYED_GC_TASK_QS_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_DELAYED_GC_TASK_QS].set_locks
+ = ERTS_PSD_DELAYED_GC_TASK_QS_SET_LOCKS;
+
+ erts_psd_required_locks[ERTS_PSD_NIF_TRAP_EXPORT].get_locks
+ = ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_NIF_TRAP_EXPORT].set_locks
+ = ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS;
+
+ erts_psd_required_locks[ERTS_PSD_ETS_OWNED_TABLES].get_locks
+ = ERTS_PSD_ETS_OWNED_TABLES_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_ETS_OWNED_TABLES].set_locks
+ = ERTS_PSD_ETS_OWNED_TABLES_SET_LOCKS;
+
+ erts_psd_required_locks[ERTS_PSD_ETS_FIXED_TABLES].get_locks
+ = ERTS_PSD_ETS_FIXED_TABLES_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_ETS_FIXED_TABLES].set_locks
+ = ERTS_PSD_ETS_FIXED_TABLES_SET_LOCKS;
+
+ erts_psd_required_locks[ERTS_PSD_DIST_ENTRY].get_locks
+ = ERTS_PSD_DIST_ENTRY_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_DIST_ENTRY].set_locks
+ = ERTS_PSD_DIST_ENTRY_SET_LOCKS;
#endif
}
-#ifdef ERTS_SMP
static void
release_process(void *vproc)
{
- erts_smp_proc_dec_refc((Process *) vproc);
+ erts_proc_dec_refc((Process *) vproc);
}
-#endif
/* initialize the scheduler */
void
erts_init_process(int ncpu, int proc_tab_size, int legacy_proc_tab)
{
-#ifdef ERTS_SMP
erts_disable_proc_not_running_opt = 0;
erts_init_proc_lock(ncpu);
-#endif
init_proclist_alloc();
erts_ptab_init_table(&erts_proc,
ERTS_ALC_T_PROC_TABLE,
-#ifdef ERTS_SMP
release_process,
-#else
- NULL,
-#endif
(ErtsPTabElementCommon *) &erts_invalid_process.common,
proc_tab_size,
sizeof(Process),
"process_table",
- legacy_proc_tab);
+ legacy_proc_tab,
+ 1
+ );
last_reductions = 0;
last_exact_reductions = 0;
- erts_default_process_flags = 0;
}
void
@@ -649,7 +794,9 @@ erts_late_init_process(void)
{
int ix;
- erts_smp_spinlock_init(&erts_sched_stat.lock, "sched_stat");
+ erts_spinlock_init(&erts_sched_stat.lock, "sched_stat", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
+
for (ix = 0; ix < ERTS_NO_PRIO_LEVELS; ix++) {
Eterm atom;
char *atom_str;
@@ -684,22 +831,33 @@ erts_late_init_process(void)
}
+#define ERTS_SCHED_WTIME_IDLE ~((Uint64) 0)
+
static void
-init_sched_wall_time(ErtsSchedWallTime *swtp)
+init_sched_wall_time(ErtsSchedulerData *esdp, Uint64 time_stamp)
{
- swtp->need = erts_sched_balance_util;
- swtp->enabled = 0;
- swtp->start = 0;
- swtp->working.total = 0;
- swtp->working.start = 0;
- swtp->working.currently = 0;
+ if (esdp->type != ERTS_SCHED_NORMAL) {
+ erts_atomic32_init_nob(&esdp->sched_wall_time.u.mod, 0);
+ esdp->sched_wall_time.enabled = 1;
+ esdp->sched_wall_time.start = time_stamp;
+ esdp->sched_wall_time.working.total = 0;
+ esdp->sched_wall_time.working.start = ERTS_SCHED_WTIME_IDLE;
+ }
+ else
+ {
+ esdp->sched_wall_time.u.need = erts_sched_balance_util;
+ esdp->sched_wall_time.enabled = 0;
+ esdp->sched_wall_time.start = 0;
+ esdp->sched_wall_time.working.total = 0;
+ esdp->sched_wall_time.working.start = 0;
+ }
}
static ERTS_INLINE Uint64
sched_wall_time_ts(void)
{
-#ifdef HAVE_GETHRTIME
- return (Uint64) sys_gethrtime();
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ return (Uint64) erts_os_monotonic_time();
#else
Uint64 res;
SysTimeval tv;
@@ -712,72 +870,24 @@ sched_wall_time_ts(void)
#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
-#ifdef ARCH_64
-
static ERTS_INLINE Uint64
aschedtime_read(ErtsAtomicSchedTime *var)
{
- return (Uint64) erts_atomic_read_nob((erts_atomic_t *) var);
+ return (Uint64) erts_atomic64_read_nob((erts_atomic64_t *) var);
}
static ERTS_INLINE void
aschedtime_set(ErtsAtomicSchedTime *var, Uint64 val)
{
- erts_atomic_set_nob((erts_atomic_t *) var, (erts_aint_t) val);
+ erts_atomic64_set_nob((erts_atomic64_t *) var, (erts_aint64_t) val);
}
static ERTS_INLINE void
aschedtime_init(ErtsAtomicSchedTime *var)
{
- erts_atomic_init_nob((erts_atomic_t *) var, (erts_aint_t) 0);
+ erts_atomic64_init_nob((erts_atomic64_t *) var, (erts_aint64_t) 0);
}
-#elif defined(ARCH_32)
-
-static ERTS_INLINE Uint64
-aschedtime_read(ErtsAtomicSchedTime *var)
-{
- erts_dw_aint_t dw;
- erts_dw_atomic_read_nob((erts_dw_atomic_t *) var, &dw);
-#ifdef ETHR_SU_DW_NAINT_T__
- return (Uint64) dw.dw_sint;
-#else
- {
- Uint64 res;
- res = (Uint64) ((Uint32) dw.sint[ERTS_DW_AINT_HIGH_WORD]);
- res <<= 32;
- res |= (Uint64) ((Uint32) dw.sint[ERTS_DW_AINT_LOW_WORD]);
- return res;
- }
-#endif
-}
-
-static ERTS_INLINE void
-aschedtime_set(ErtsAtomicSchedTime *var, Uint64 val)
-{
- erts_dw_aint_t dw;
-#ifdef ETHR_SU_DW_NAINT_T__
- dw.dw_sint = (ETHR_SU_DW_NAINT_T__) val;
-#else
- dw.sint[ERTS_DW_AINT_LOW_WORD] = (erts_aint_t) (val & 0xffffffff);
- dw.sint[ERTS_DW_AINT_HIGH_WORD] = (erts_aint_t) ((val >> 32) & 0xffffffff);
-#endif
- erts_dw_atomic_set_nob((erts_dw_atomic_t *) var, &dw);
-}
-
-static ERTS_INLINE void
-aschedtime_init(ErtsAtomicSchedTime *var)
-{
- erts_dw_aint_t dw;
- dw.sint[ERTS_DW_AINT_LOW_WORD] = (erts_aint_t) 0;
- dw.sint[ERTS_DW_AINT_HIGH_WORD] = (erts_aint_t) 0;
- erts_dw_atomic_init_nob((erts_dw_atomic_t *) var, &dw);
-}
-
-#else
-# error :-/
-#endif
-
#define ERTS_GET_AVG_MAX_UNLOCKED_TRY 50
#define ERTS_SCHED_AVG_UTIL_WRITE_MARKER (~((Uint64) 0))
@@ -882,14 +992,14 @@ erts_get_sched_util(ErtsRunQueue *rq, int initially_locked, int short_interval)
if (!locked) {
if (++try >= ERTS_GET_AVG_MAX_UNLOCKED_TRY) {
/* Writer will eventually block on runq-lock */
- erts_smp_runq_lock(rq);
+ erts_runq_lock(rq);
locked = 1;
}
}
}
if (!initially_locked && locked)
- erts_smp_runq_unlock(rq);
+ erts_runq_unlock(rq);
now = sched_wall_time_ts();
worktime = calc_sched_worktime(is_working, now, last, interval, old_worktime);
@@ -931,34 +1041,148 @@ init_runq_sched_util(ErtsRunQueueSchedUtil *rqsu, int enabled)
#endif /* ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT */
-static ERTS_INLINE void
+
+typedef struct {
+ Uint64 working;
+ Uint64 total;
+} ErtsDirtySchedWallTime;
+
+static void
+read_dirty_sched_wall_time(ErtsSchedulerData *esdp, ErtsDirtySchedWallTime *info)
+{
+ erts_aint32_t mod1;
+ Uint64 working, start, ts;
+
+ mod1 = erts_atomic32_read_nob(&esdp->sched_wall_time.u.mod);
+
+ while (1) {
+ erts_aint32_t mod2;
+
+ /* Spin until values are not written... */
+ while (1) {
+ if ((mod1 & 1) == 0)
+ break;
+ ERTS_SPIN_BODY;
+ mod1 = erts_atomic32_read_nob(&esdp->sched_wall_time.u.mod);
+ }
+
+ ERTS_THR_READ_MEMORY_BARRIER;
+
+ working = esdp->sched_wall_time.working.total;
+ start = esdp->sched_wall_time.working.start;
+
+ ERTS_THR_READ_MEMORY_BARRIER;
+
+ mod2 = erts_atomic32_read_nob(&esdp->sched_wall_time.u.mod);
+ if (mod1 == mod2)
+ break;
+ mod1 = mod2;
+ }
+
+ ts = sched_wall_time_ts();
+ ts -= esdp->sched_wall_time.start;
+
+ info->total = ts;
+
+ if (start == ERTS_SCHED_WTIME_IDLE || ts < start)
+ info->working = working;
+ else
+ info->working = working + (ts - start);
+
+ if (info->working > info->total)
+ info->working = info->total;
+}
+
+
+
+static void
+dirty_sched_wall_time_change(ErtsSchedulerData *esdp, int working)
+{
+ erts_aint32_t mod;
+ Uint64 ts = sched_wall_time_ts();
+
+ ts -= esdp->sched_wall_time.start;
+
+ /*
+ * This thread is the only thread writing in
+ * this sched_wall_time struct. We set 'mod' to
+ * an odd value while writing...
+ */
+ mod = erts_atomic32_read_dirty(&esdp->sched_wall_time.u.mod);
+ ASSERT((mod & 1) == 0);
+ mod++;
+
+ erts_atomic32_set_nob(&esdp->sched_wall_time.u.mod, mod);
+ ERTS_THR_WRITE_MEMORY_BARRIER;
+
+ if (working) {
+ ASSERT(esdp->sched_wall_time.working.start
+ == ERTS_SCHED_WTIME_IDLE);
+
+ esdp->sched_wall_time.working.start = ts;
+
+ }
+ else {
+ Uint64 total;
+
+ ASSERT(esdp->sched_wall_time.working.start
+ != ERTS_SCHED_WTIME_IDLE);
+
+ total = esdp->sched_wall_time.working.total;
+ total += ts - esdp->sched_wall_time.working.start;
+
+ esdp->sched_wall_time.working.total = total;
+ esdp->sched_wall_time.working.start = ERTS_SCHED_WTIME_IDLE;
+
+
+ }
+
+ ERTS_THR_WRITE_MEMORY_BARRIER;
+ mod++;
+ erts_atomic32_set_nob(&esdp->sched_wall_time.u.mod, mod);
+
+ if (!working) {
+ ERTS_MSACC_SET_STATE_X(ERTS_MSACC_STATE_BUSY_WAIT);
+ } else {
+ ERTS_MSACC_SET_STATE_X(ERTS_MSACC_STATE_OTHER);
+ }
+}
+
+
+static void
sched_wall_time_change(ErtsSchedulerData *esdp, int working)
{
- if (esdp->sched_wall_time.need) {
+ if (esdp->sched_wall_time.u.need) {
Uint64 ts = sched_wall_time_ts();
#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
- update_avg_sched_util(esdp, ts, working);
+ update_avg_sched_util(esdp, ts, working);
#endif
if (esdp->sched_wall_time.enabled) {
if (working) {
-#ifdef DEBUG
- ASSERT(!esdp->sched_wall_time.working.currently);
- esdp->sched_wall_time.working.currently = 1;
-#endif
+ ASSERT(esdp->sched_wall_time.working.start
+ == ERTS_SCHED_WTIME_IDLE);
ts -= esdp->sched_wall_time.start;
esdp->sched_wall_time.working.start = ts;
}
else {
-#ifdef DEBUG
- ASSERT(esdp->sched_wall_time.working.currently);
- esdp->sched_wall_time.working.currently = 0;
-#endif
+ ASSERT(esdp->sched_wall_time.working.start
+ != ERTS_SCHED_WTIME_IDLE);
ts -= esdp->sched_wall_time.start;
ts -= esdp->sched_wall_time.working.start;
esdp->sched_wall_time.working.total += ts;
+#ifdef DEBUG
+ esdp->sched_wall_time.working.start
+ = ERTS_SCHED_WTIME_IDLE;
+#endif
}
}
}
+ if (!working) {
+ ERTS_MSACC_SET_STATE_M_X(ERTS_MSACC_STATE_BUSY_WAIT);
+ } else {
+ ERTS_MSACC_SET_STATE_M_X(ERTS_MSACC_STATE_OTHER);
+ }
+
}
typedef struct {
@@ -966,30 +1190,32 @@ typedef struct {
int enable;
Process *proc;
Eterm ref;
- Eterm ref_heap[REF_THING_SIZE];
+ Eterm ref_heap[ERTS_REF_THING_SIZE];
Uint req_sched;
- erts_smp_atomic32_t refc;
+ erts_atomic32_t refc;
+ int want_dirty_cpu;
+ int want_dirty_io;
} ErtsSchedWallTimeReq;
-#if !HALFWORD_HEAP
+typedef struct {
+ Process *proc;
+ Eterm ref;
+ Eterm ref_heap[ERTS_REF_THING_SIZE];
+ Uint req_sched;
+ erts_atomic32_t refc;
+} ErtsSystemCheckReq;
+
+
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(swtreq,
ErtsSchedWallTimeReq,
5,
ERTS_ALC_T_SCHED_WTIME_REQ)
-#else
-static ERTS_INLINE ErtsSchedWallTimeReq *
-swtreq_alloc(void)
-{
- return erts_alloc(ERTS_ALC_T_SCHED_WTIME_REQ,
- sizeof(ErtsSchedWallTimeReq));
-}
-static ERTS_INLINE void
-swtreq_free(ErtsSchedWallTimeReq *ptr)
-{
- erts_free(ERTS_ALC_T_SCHED_WTIME_REQ, ptr);
-}
-#endif
+ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(screq,
+ ErtsSystemCheckReq,
+ 5,
+ ERTS_ALC_T_SYS_CHECK_REQ)
+
static void
reply_sched_wall_time(void *vswtrp)
@@ -1006,31 +1232,29 @@ reply_sched_wall_time(void *vswtrp)
Eterm **hpp;
Uint sz, *szp;
ErlOffHeap *ohp = NULL;
- ErlHeapFragment *bp = NULL;
+ ErtsMessage *mp = NULL;
+
+ ASSERT(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp));
- ASSERT(esdp);
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
-#endif
if (swtrp->set) {
if (!swtrp->enable && esdp->sched_wall_time.enabled) {
- esdp->sched_wall_time.need = erts_sched_balance_util;
+ esdp->sched_wall_time.u.need = erts_sched_balance_util;
esdp->sched_wall_time.enabled = 0;
}
else if (swtrp->enable && !esdp->sched_wall_time.enabled) {
Uint64 ts = sched_wall_time_ts();
- esdp->sched_wall_time.need = 1;
+ esdp->sched_wall_time.u.need = 1;
esdp->sched_wall_time.enabled = 1;
esdp->sched_wall_time.start = ts;
esdp->sched_wall_time.working.total = 0;
esdp->sched_wall_time.working.start = 0;
- esdp->sched_wall_time.working.currently = 1;
}
}
if (esdp->sched_wall_time.enabled) {
Uint64 ts = sched_wall_time_ts();
- ASSERT(esdp->sched_wall_time.working.currently);
+ ASSERT(esdp->sched_wall_time.working.start
+ != ERTS_SCHED_WTIME_IDLE);
ts -= esdp->sched_wall_time.start;
total = ts;
ts -= esdp->sched_wall_time.working.start;
@@ -1041,63 +1265,144 @@ reply_sched_wall_time(void *vswtrp)
hpp = NULL;
szp = &sz;
- while (1) {
- if (hpp)
- ref_copy = STORE_NC(hpp, ohp, swtrp->ref);
- else
- *szp += REF_THING_SIZE;
+ if (esdp->sched_wall_time.enabled
+ && swtrp->req_sched == esdp->no
+ && (swtrp->want_dirty_cpu || swtrp->want_dirty_io)) {
+ /* Reply with info about this scheduler and all dirty schedulers... */
+ ErtsDirtySchedWallTime *dswt;
+ int ix, no_dirty_scheds, want_dcpu, want_dio, soffset;
+
+ want_dcpu = swtrp->want_dirty_cpu;
+ want_dio = swtrp->want_dirty_io;
+
+ no_dirty_scheds = 0;
+ if (want_dcpu)
+ no_dirty_scheds += erts_no_dirty_cpu_schedulers;
+ if (want_dio)
+ no_dirty_scheds += erts_no_dirty_io_schedulers;
+
+ ASSERT(no_dirty_scheds);
+
+ dswt = erts_alloc(ERTS_ALC_T_TMP,
+ sizeof(ErtsDirtySchedWallTime)
+ * no_dirty_scheds);
+
+ for (ix = 0; ix < no_dirty_scheds; ix++) {
+ ErtsSchedulerData *esdp;
+ if (want_dcpu && ix < erts_no_dirty_cpu_schedulers)
+ esdp = &erts_aligned_dirty_cpu_scheduler_data[ix].esd;
+ else {
+ int dio_ix = ix - erts_no_dirty_cpu_schedulers;
+ esdp = &erts_aligned_dirty_io_scheduler_data[dio_ix].esd;
+ }
+ read_dirty_sched_wall_time(esdp, &dswt[ix]);
+ }
- if (swtrp->set)
- msg = ref_copy;
- else {
- msg = (!esdp->sched_wall_time.enabled
- ? am_notsup
- : erts_bld_tuple(hpp, szp, 3,
- make_small(esdp->no),
- erts_bld_uint64(hpp, szp, working),
- erts_bld_uint64(hpp, szp, total)));
+ soffset = erts_no_schedulers + 1;
- msg = erts_bld_tuple(hpp, szp, 2, ref_copy, msg);
- }
- if (hpp)
- break;
+ if (!want_dcpu) {
+ ASSERT(want_dio);
+ soffset += erts_no_dirty_cpu_schedulers;
+ }
- hp = erts_alloc_message_heap(sz, &bp, &ohp, rp, &rp_locks);
- szp = NULL;
- hpp = &hp;
+ while (1) {
+ if (hpp)
+ ref_copy = STORE_NC(hpp, ohp, swtrp->ref);
+ else
+ *szp += ERTS_REF_THING_SIZE;
+
+ ASSERT(!swtrp->set);
+
+ /* info about dirty schedulers... */
+ msg = NIL;
+ for (ix = no_dirty_scheds-1; ix >= 0; ix--) {
+ msg = erts_bld_cons(hpp, szp,
+ erts_bld_tuple(hpp, szp, 3,
+ make_small(ix+soffset),
+ erts_bld_uint64(hpp, szp,
+ dswt[ix].working),
+ erts_bld_uint64(hpp, szp,
+ dswt[ix].total)),
+ msg);
+ }
+ /* info about this scheduler... */
+ msg = erts_bld_cons(hpp, szp,
+ erts_bld_tuple(hpp, szp, 3,
+ make_small(esdp->no),
+ erts_bld_uint64(hpp, szp, working),
+ erts_bld_uint64(hpp, szp, total)),
+ msg);
+
+ msg = erts_bld_tuple(hpp, szp, 2, ref_copy, msg);
+
+ if (hpp)
+ break;
+
+ mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
+ szp = NULL;
+ hpp = &hp;
+ }
+
+ erts_free(ERTS_ALC_T_TMP, dswt);
+ }
+ else
+ {
+ /* Reply with info about this scheduler only... */
+
+ while (1) {
+ if (hpp)
+ ref_copy = STORE_NC(hpp, ohp, swtrp->ref);
+ else
+ *szp += ERTS_REF_THING_SIZE;
+
+ if (swtrp->set)
+ msg = ref_copy;
+ else {
+ msg = (!esdp->sched_wall_time.enabled
+ ? am_undefined
+ : erts_bld_tuple(hpp, szp, 3,
+ make_small(esdp->no),
+ erts_bld_uint64(hpp, szp, working),
+ erts_bld_uint64(hpp, szp, total)));
+
+ msg = erts_bld_tuple(hpp, szp, 2, ref_copy, msg);
+ }
+ if (hpp)
+ break;
+
+ mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
+ szp = NULL;
+ hpp = &hp;
+ }
}
- erts_queue_message(rp, &rp_locks, bp, msg, NIL
-#ifdef USE_VM_PROBES
- , NIL
-#endif
- );
+ erts_queue_message(rp, rp_locks, mp, msg, am_system);
if (swtrp->req_sched == esdp->no)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
- erts_smp_proc_dec_refc(rp);
+ erts_proc_dec_refc(rp);
- if (erts_smp_atomic32_dec_read_nob(&swtrp->refc) == 0)
+ if (erts_atomic32_dec_read_nob(&swtrp->refc) == 0)
swtreq_free(vswtrp);
}
Eterm
-erts_sched_wall_time_request(Process *c_p, int set, int enable)
+erts_sched_wall_time_request(Process *c_p, int set, int enable,
+ int want_dirty_cpu, int want_dirty_io)
{
- ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
Eterm ref;
ErtsSchedWallTimeReq *swtrp;
Eterm *hp;
+ ASSERT(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp));
+
if (!set && !esdp->sched_wall_time.enabled)
return THE_NON_VALUE;
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
-#endif
swtrp = swtreq_alloc();
ref = erts_make_ref(c_p);
@@ -1108,24 +1413,88 @@ erts_sched_wall_time_request(Process *c_p, int set, int enable)
swtrp->proc = c_p;
swtrp->ref = STORE_NC(&hp, NULL, ref);
swtrp->req_sched = esdp->no;
- erts_smp_atomic32_init_nob(&swtrp->refc,
+ swtrp->want_dirty_cpu = want_dirty_cpu;
+ swtrp->want_dirty_io = want_dirty_io;
+ erts_atomic32_init_nob(&swtrp->refc,
(erts_aint32_t) erts_no_schedulers);
- erts_smp_proc_add_refc(c_p, (Sint32) erts_no_schedulers);
+ erts_proc_add_refc(c_p, (Sint32) erts_no_schedulers);
-#ifdef ERTS_SMP
if (erts_no_schedulers > 1)
erts_schedule_multi_misc_aux_work(1,
erts_no_schedulers,
reply_sched_wall_time,
(void *) swtrp);
-#endif
reply_sched_wall_time((void *) swtrp);
return ref;
}
+static void
+reply_system_check(void *vscrp)
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ErtsSystemCheckReq *scrp = (ErtsSystemCheckReq *) vscrp;
+ ErtsProcLocks rp_locks = (scrp->req_sched == esdp->no ? ERTS_PROC_LOCK_MAIN : 0);
+ Process *rp = scrp->proc;
+ Eterm msg;
+ Eterm *hp = NULL;
+ Eterm **hpp;
+ Uint sz;
+ ErlOffHeap *ohp = NULL;
+ ErtsMessage *mp = NULL;
+
+ ASSERT(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp));
+
+ sz = ERTS_REF_THING_SIZE;
+ mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
+ hpp = &hp;
+ msg = STORE_NC(hpp, ohp, scrp->ref);
+
+ erts_queue_message(rp, rp_locks, mp, msg, am_system);
+
+ if (scrp->req_sched == esdp->no)
+ rp_locks &= ~ERTS_PROC_LOCK_MAIN;
+
+ if (rp_locks)
+ erts_proc_unlock(rp, rp_locks);
+
+ erts_proc_dec_refc(rp);
+
+ if (erts_atomic32_dec_read_nob(&scrp->refc) == 0)
+ screq_free(vscrp);
+}
+
+
+Eterm erts_system_check_request(Process *c_p) {
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
+ Eterm ref;
+ ErtsSystemCheckReq *scrp;
+ Eterm *hp;
+
+ scrp = screq_alloc();
+ ref = erts_make_ref(c_p);
+ hp = &scrp->ref_heap[0];
+
+ scrp->proc = c_p;
+ scrp->ref = STORE_NC(&hp, NULL, ref);
+ scrp->req_sched = esdp->no;
+ erts_atomic32_init_nob(&scrp->refc, (erts_aint32_t) erts_no_schedulers);
+
+ erts_proc_add_refc(c_p, (Sint) erts_no_schedulers);
+
+ if (erts_no_schedulers > 1)
+ erts_schedule_multi_misc_aux_work(1,
+ erts_no_schedulers,
+ reply_system_check,
+ (void *) scrp);
+
+ reply_system_check((void *) scrp);
+
+ return ref;
+}
+
static ERTS_INLINE ErtsProcList *
proclist_create(Process *p)
{
@@ -1136,6 +1505,15 @@ proclist_create(Process *p)
return plp;
}
+static ERTS_INLINE ErtsProcList *
+proclist_copy(ErtsProcList *plp0)
+{
+ ErtsProcList *plp1 = proclist_alloc();
+ plp1->pid = plp0->pid;
+ plp1->started_interval = plp0->started_interval;
+ return plp1;
+}
+
static ERTS_INLINE void
proclist_destroy(ErtsProcList *plp)
{
@@ -1148,6 +1526,12 @@ erts_proclist_create(Process *p)
return proclist_create(p);
}
+ErtsProcList *
+erts_proclist_copy(ErtsProcList *plp)
+{
+ return proclist_copy(plp);
+}
+
void
erts_proclist_destroy(ErtsProcList *plp)
{
@@ -1155,64 +1539,43 @@ erts_proclist_destroy(ErtsProcList *plp)
}
void *
-erts_psd_set_init(Process *p, ErtsProcLocks plocks, int ix, void *data)
+erts_psd_set_init(Process *p, int ix, void *data)
{
void *old;
- ErtsProcLocks xplocks;
- int refc = 0;
- ErtsPSD *psd = erts_alloc(ERTS_ALC_T_PSD, sizeof(ErtsPSD));
+ ErtsPSD *psd, *new_psd;
int i;
- for (i = 0; i < ERTS_PSD_SIZE; i++)
- psd->data[i] = NULL;
- ERTS_SMP_LC_ASSERT(plocks);
- ERTS_SMP_LC_ASSERT(plocks == erts_proc_lc_my_proc_locks(p));
+ new_psd = erts_alloc(ERTS_ALC_T_PSD, sizeof(ErtsPSD));
+ for (i = 0; i < ERTS_PSD_SIZE; i++)
+ new_psd->data[i] = NULL;
- xplocks = ERTS_PROC_LOCKS_ALL;
- xplocks &= ~plocks;
- if (xplocks && erts_smp_proc_trylock(p, xplocks) == EBUSY) {
- if (xplocks & ERTS_PROC_LOCK_MAIN) {
- erts_smp_proc_inc_refc(p);
- erts_smp_proc_unlock(p, plocks);
- erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL);
- refc = 1;
- }
- else {
- if (plocks & ERTS_PROC_LOCKS_ALL_MINOR)
- erts_smp_proc_unlock(p, plocks & ERTS_PROC_LOCKS_ALL_MINOR);
- erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- }
- if (!p->psd)
- p->psd = psd;
- if (xplocks)
- erts_smp_proc_unlock(p, xplocks);
- if (refc)
- erts_smp_proc_dec_refc(p);
- ASSERT(p->psd);
- if (p->psd != psd)
- erts_free(ERTS_ALC_T_PSD, psd);
- old = p->psd->data[ix];
- p->psd->data[ix] = data;
- ERTS_SMP_LC_ASSERT(plocks == erts_proc_lc_my_proc_locks(p));
+ psd = (ErtsPSD *) erts_atomic_cmpxchg_mb(&p->psd,
+ (erts_aint_t) new_psd,
+ (erts_aint_t) NULL);
+ if (psd)
+ erts_free(ERTS_ALC_T_PSD, new_psd);
+ else
+ psd = new_psd;
+ old = psd->data[ix];
+ psd->data[ix] = data;
return old;
}
-#ifdef ERTS_SMP
void
-erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flags)
+erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi,
+ erts_aint32_t flags)
{
switch (flags & ERTS_SSI_FLGS_SLEEP_TYPE) {
case ERTS_SSI_FLG_POLL_SLEEPING:
- erts_sys_schedule_interrupt(1);
+ erts_check_io_interrupt(ssi->psi, 1);
break;
case ERTS_SSI_FLG_POLL_SLEEPING|ERTS_SSI_FLG_TSE_SLEEPING:
/*
* Thread progress blocking while poll sleeping; need
* to signal on both...
*/
- erts_sys_schedule_interrupt(1);
+ erts_check_io_interrupt(ssi->psi, 1);
/* fall through */
case ERTS_SSI_FLG_TSE_SLEEPING:
erts_tse_set(ssi->event);
@@ -1220,13 +1583,12 @@ erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flags)
case 0:
break;
default:
- erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error\n",
+ erts_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error\n",
__FILE__, __LINE__);
break;
}
}
-#endif
static ERTS_INLINE void
set_aux_work_flags_wakeup_nob(ErtsSchedulerSleepInfo *ssi,
@@ -1237,16 +1599,12 @@ set_aux_work_flags_wakeup_nob(ErtsSchedulerSleepInfo *ssi,
ERTS_DBG_CHK_SSI_AUX_WORK(ssi);
old_flgs = erts_atomic32_read_nob(&ssi->aux_work);
- if ((old_flgs & flgs) == 0) {
+ if ((old_flgs & flgs) != flgs) {
old_flgs = erts_atomic32_read_bor_nob(&ssi->aux_work, flgs);
- if ((old_flgs & flgs) == 0) {
-#ifdef ERTS_SMP
+ if ((old_flgs & flgs) != flgs) {
erts_sched_poke(ssi);
-#else
- erts_sys_schedule_interrupt(1);
-#endif
}
}
}
@@ -1261,12 +1619,8 @@ set_aux_work_flags_wakeup_relb(ErtsSchedulerSleepInfo *ssi,
old_flgs = erts_atomic32_read_bor_relb(&ssi->aux_work, flgs);
- if ((old_flgs & flgs) == 0) {
-#ifdef ERTS_SMP
+ if ((old_flgs & flgs) != flgs) {
erts_sched_poke(ssi);
-#else
- erts_sys_schedule_interrupt(1);
-#endif
}
}
@@ -1282,7 +1636,6 @@ unset_aux_work_flags(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flgs)
return erts_atomic32_read_band_nob(&ssi->aux_work, ~flgs);
}
-#ifdef ERTS_SMP
static ERTS_INLINE void
haw_chk_later_cleanup_op_wakeup(ErtsAuxWorkData *awdp, ErtsThrPrgrVal val)
@@ -1352,9 +1705,9 @@ static ERTS_INLINE void
haw_thr_prgr_current_check_progress(ErtsAuxWorkData *awdp)
{
ErtsThrPrgrVal current = awdp->current_thr_prgr;
-#ifdef ERTS_DIRTY_SCHEDULERS
+
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
-#endif
+
if (current != ERTS_THR_PRGR_INVALID
&& !erts_thr_progress_equal(current, erts_thr_progress_current())) {
/*
@@ -1371,9 +1724,7 @@ handle_delayed_aux_work_wakeup(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, in
{
int jix, max_jix;
-#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
-#endif
ASSERT(awdp->delayed_wakeup.next != ERTS_DELAYED_WAKEUP_INFINITY);
@@ -1431,7 +1782,6 @@ schedule_aux_work_wakeup(ErtsAuxWorkData *awdp,
}
}
-#endif
typedef struct erts_misc_aux_work_t_ erts_misc_aux_work_t;
struct erts_misc_aux_work_t_ {
@@ -1472,11 +1822,7 @@ init_misc_aux_work(void)
sizeof(erts_algnd_misc_aux_work_q_t)
* (erts_no_schedulers+1));
-#ifdef ERTS_SMP
ix = 0; /* aux_thread + schedulers */
-#else
- ix = 1; /* scheduler only */
-#endif
for (; ix <= erts_no_schedulers; ix++) {
qinit.arg = (void *) ERTS_SCHED_SLEEP_INFO_IX(ix-1);
@@ -1494,10 +1840,8 @@ misc_aux_work_clean(ErtsThrQ_t *q,
set_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MISC);
return aux_work | ERTS_SSI_AUX_WORK_MISC;
case ERTS_THR_Q_NEED_THR_PRGR:
-#ifdef ERTS_SMP
set_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MISC_THR_PRGR);
haw_thr_prgr_soft_wakeup(awdp, erts_thr_q_need_thr_progress(q));
-#endif
case ERTS_THR_Q_CLEAN:
break;
}
@@ -1523,16 +1867,14 @@ handle_misc_aux_work(ErtsAuxWorkData *awdp,
return misc_aux_work_clean(q, awdp, aux_work & ~ERTS_SSI_AUX_WORK_MISC);
}
-#ifdef ERTS_SMP
static ERTS_INLINE erts_aint32_t
handle_misc_aux_work_thr_prgr(ErtsAuxWorkData *awdp,
erts_aint32_t aux_work,
int waiting)
{
-#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
-#endif
+
if (!erts_thr_progress_has_reached_this(haw_thr_prgr_current(awdp),
awdp->misc.thr_prgr))
return aux_work & ~ERTS_SSI_AUX_WORK_MISC_THR_PRGR;
@@ -1544,7 +1886,6 @@ handle_misc_aux_work_thr_prgr(ErtsAuxWorkData *awdp,
aux_work & ~ERTS_SSI_AUX_WORK_MISC_THR_PRGR);
}
-#endif
static ERTS_INLINE void
schedule_misc_aux_work(int sched_id,
@@ -1554,11 +1895,7 @@ schedule_misc_aux_work(int sched_id,
ErtsThrQ_t *q;
erts_misc_aux_work_t *mawp;
-#ifdef ERTS_SMP
ASSERT(0 <= sched_id && sched_id <= erts_no_schedulers);
-#else
- ASSERT(sched_id == 1);
-#endif
q = &misc_aux_work_queues[sched_id].q;
mawp = misc_aux_work_alloc();
@@ -1584,12 +1921,13 @@ erts_schedule_multi_misc_aux_work(int ignore_self,
int id, self = 0;
if (ignore_self) {
- ErtsSchedulerData *esdp = erts_get_scheduler_data();
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
-#endif
- if (esdp)
- self = (int) esdp->no;
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+
+ /* ignore_self is meaningless on dirty schedulers since aux work can
+ * only run on normal schedulers, and their ids do not translate. */
+ if(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ self = (int)esdp->no;
+ }
}
ASSERT(0 < max_sched && max_sched <= erts_no_schedulers);
@@ -1601,7 +1939,6 @@ erts_schedule_multi_misc_aux_work(int ignore_self,
}
}
-#if ERTS_USE_ASYNC_READY_Q
void
erts_notify_check_async_ready_queue(void *vno)
@@ -1617,9 +1954,9 @@ handle_async_ready(ErtsAuxWorkData *awdp,
int waiting)
{
ErtsSchedulerSleepInfo *ssi = awdp->ssi;
-#ifdef ERTS_DIRTY_SCHEDULERS
+
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
-#endif
+
unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY);
if (erts_check_async_ready(awdp->async_ready.queue)) {
if (set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY)
@@ -1629,9 +1966,7 @@ handle_async_ready(ErtsAuxWorkData *awdp,
}
return aux_work;
}
-#ifdef ERTS_SMP
awdp->async_ready.need_thr_prgr = 0;
-#endif
set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN);
return ((aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY)
| ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN);
@@ -1644,10 +1979,8 @@ handle_async_ready_clean(ErtsAuxWorkData *awdp,
{
void *thr_prgr_p;
-#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
-#endif
-#ifdef ERTS_SMP
+
if (awdp->async_ready.need_thr_prgr
&& !erts_thr_progress_has_reached_this(haw_thr_prgr_current(awdp),
awdp->async_ready.thr_prgr)) {
@@ -1656,26 +1989,20 @@ handle_async_ready_clean(ErtsAuxWorkData *awdp,
awdp->async_ready.need_thr_prgr = 0;
thr_prgr_p = (void *) &awdp->async_ready.thr_prgr;
-#else
- thr_prgr_p = NULL;
-#endif
switch (erts_async_ready_clean(awdp->async_ready.queue, thr_prgr_p)) {
case ERTS_ASYNC_READY_CLEAN:
unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN);
return aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
-#ifdef ERTS_SMP
case ERTS_ASYNC_READY_NEED_THR_PRGR:
haw_thr_prgr_soft_wakeup(awdp, awdp->async_ready.thr_prgr);
awdp->async_ready.need_thr_prgr = 1;
return aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
-#endif
default:
return aux_work;
}
}
-#endif /* ERTS_USE_ASYNC_READY_Q */
static ERTS_INLINE erts_aint32_t
@@ -1684,9 +2011,8 @@ handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
ErtsSchedulerSleepInfo *ssi = awdp->ssi;
erts_aint32_t res;
-#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
-#endif
+
unset_aux_work_flags(ssi, (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
| ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC));
aux_work &= ~(ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
@@ -1700,7 +2026,6 @@ handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
return aux_work;
}
-#ifdef ERTS_SMP
void
erts_alloc_notify_delayed_dealloc(int ix)
@@ -1733,15 +2058,17 @@ handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waitin
int need_thr_progress = 0;
ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID;
int more_work = 0;
+ ERTS_MSACC_PUSH_STATE_M_X();
-#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
-#endif
+
unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD);
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_ALLOC);
erts_alloc_scheduler_handle_delayed_dealloc((void *) awdp->esdp,
&need_thr_progress,
&wakeup,
&more_work);
+ ERTS_MSACC_POP_STATE_M_X();
if (more_work) {
if (set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD)
& ERTS_SSI_AUX_WORK_DD_THR_PRGR) {
@@ -1759,11 +2086,6 @@ handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waitin
awdp->dd.thr_prgr = wakeup;
haw_thr_prgr_soft_wakeup(awdp, wakeup);
}
- else if (awdp->dd.completed_callback) {
- awdp->dd.completed_callback(awdp->dd.completed_arg);
- awdp->dd.completed_callback = NULL;
- awdp->dd.completed_arg = NULL;
- }
return aux_work & ~ERTS_SSI_AUX_WORK_DD;
}
@@ -1776,9 +2098,8 @@ handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, i
ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID;
ErtsThrPrgrVal current = haw_thr_prgr_current(awdp);
-#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
-#endif
+
if (!erts_thr_progress_has_reached_this(current, awdp->dd.thr_prgr))
return aux_work & ~ERTS_SSI_AUX_WORK_DD_THR_PRGR;
@@ -1805,17 +2126,105 @@ handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, i
}
else {
unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD_THR_PRGR);
- if (awdp->dd.completed_callback) {
- awdp->dd.completed_callback(awdp->dd.completed_arg);
- awdp->dd.completed_callback = NULL;
- awdp->dd.completed_arg = NULL;
- }
}
return aux_work & ~ERTS_SSI_AUX_WORK_DD_THR_PRGR;
}
/*
+ * Canceled timers
+ */
+
+void
+erts_notify_canceled_timer(ErtsSchedulerData *esdp, int rsid)
+{
+ ASSERT(esdp && esdp == erts_get_scheduler_data());
+ if (esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp))
+ schedule_aux_work_wakeup(&esdp->aux_work_data,
+ rsid,
+ ERTS_SSI_AUX_WORK_CNCLD_TMRS);
+ else
+ set_aux_work_flags_wakeup_relb(ERTS_SCHED_SLEEP_INFO_IX(rsid-1),
+ ERTS_SSI_AUX_WORK_CNCLD_TMRS);
+}
+
+static ERTS_INLINE erts_aint32_t
+handle_canceled_timers(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
+{
+ ErtsSchedulerSleepInfo *ssi = awdp->ssi;
+ int need_thr_progress = 0;
+ ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID;
+ int more_work = 0;
+
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+
+ unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS);
+ erts_handle_canceled_timers((void *) awdp->esdp,
+ &need_thr_progress,
+ &wakeup,
+ &more_work);
+ if (more_work) {
+ if (set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS)
+ & ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR) {
+ unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR);
+ aux_work &= ~ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR;
+ }
+ return aux_work;
+ }
+
+ if (need_thr_progress) {
+ if (wakeup == ERTS_THR_PRGR_INVALID)
+ wakeup = erts_thr_progress_later(awdp->esdp);
+ awdp->cncld_tmrs.thr_prgr = wakeup;
+ set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR);
+ haw_thr_prgr_soft_wakeup(awdp, wakeup);
+ }
+ return aux_work & ~ERTS_SSI_AUX_WORK_CNCLD_TMRS;
+}
+
+static ERTS_INLINE erts_aint32_t
+handle_canceled_timers_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
+{
+ ErtsSchedulerSleepInfo *ssi;
+ int need_thr_progress;
+ int more_work;
+ ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID;
+ ErtsThrPrgrVal current = haw_thr_prgr_current(awdp);
+
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+
+ if (!erts_thr_progress_has_reached_this(current, awdp->cncld_tmrs.thr_prgr))
+ return aux_work & ~ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR;
+
+ ssi = awdp->ssi;
+ need_thr_progress = 0;
+ more_work = 0;
+
+ erts_handle_canceled_timers((void *) awdp->esdp,
+ &need_thr_progress,
+ &wakeup,
+ &more_work);
+ if (more_work) {
+ set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS);
+ unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR);
+ return ((aux_work & ~ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR)
+ | ERTS_SSI_AUX_WORK_CNCLD_TMRS);
+ }
+
+ if (need_thr_progress) {
+ if (wakeup == ERTS_THR_PRGR_INVALID)
+ wakeup = erts_thr_progress_later(awdp->esdp);
+ awdp->cncld_tmrs.thr_prgr = wakeup;
+ haw_thr_prgr_soft_wakeup(awdp, wakeup);
+ }
+ else {
+ unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR);
+ }
+
+ return aux_work & ~ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR;
+}
+
+/*
* Handle scheduled thread progress later operations.
*/
#define ERTS_MAX_THR_PRGR_LATER_OPS 50
@@ -1826,11 +2235,11 @@ handle_thr_prgr_later_op(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int wait
int lops;
ErtsThrPrgrVal current = haw_thr_prgr_current(awdp);
-#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
-#endif
+
for (lops = 0; lops < ERTS_MAX_THR_PRGR_LATER_OPS; lops++) {
ErtsThrPrgrLaterOp *lop = awdp->later_op.first;
+
if (!erts_thr_progress_has_reached_this(current, lop->later))
return aux_work & ~ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP;
awdp->later_op.first = lop->next;
@@ -1857,7 +2266,7 @@ enqueue_later_op(ErtsSchedulerData *esdp,
ErtsThrPrgrLaterOp *lop)
{
ErtsThrPrgrVal later = erts_thr_progress_later(esdp);
- ASSERT(esdp);
+ ASSERT(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp));
lop->func = later_func;
lop->data = later_data;
@@ -1873,20 +2282,15 @@ enqueue_later_op(ErtsSchedulerData *esdp,
return later;
}
-#endif /* ERTS_SMP */
void
erts_schedule_thr_prgr_later_op(void (*later_func)(void *),
void *later_data,
ErtsThrPrgrLaterOp *lop)
{
-#ifndef ERTS_SMP
- later_func(later_data);
-#else
ErtsSchedulerData *esdp = erts_get_scheduler_data();
ErtsThrPrgrVal later = enqueue_later_op(esdp, later_func, later_data, lop);
haw_thr_prgr_wakeup(&esdp->aux_work_data, later);
-#endif
}
void
@@ -1895,118 +2299,143 @@ erts_schedule_thr_prgr_later_cleanup_op(void (*later_func)(void *),
ErtsThrPrgrLaterOp *lop,
UWord size)
{
-#ifndef ERTS_SMP
- later_func(later_data);
-#else
ErtsSchedulerData *esdp = erts_get_scheduler_data();
ErtsThrPrgrVal later = enqueue_later_op(esdp, later_func, later_data, lop);
haw_thr_prgr_later_cleanup_op_wakeup(&esdp->aux_work_data, later, size);
-#endif
}
-#ifdef ERTS_SMP
+static ERTS_INLINE erts_aint32_t
+handle_debug_wait_completed(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
+{
+ ErtsSchedulerSleepInfo *ssi = awdp->ssi;
+ erts_aint32_t saved_aux_work, flags;
+
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+
+ flags = awdp->debug.wait_completed.flags;
+
+ if (aux_work & flags)
+ return aux_work;
-static erts_atomic32_t completed_dealloc_count;
+ saved_aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
+
+ if (saved_aux_work & flags)
+ return aux_work & ~ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED;
+
+ awdp->debug.wait_completed.callback(awdp->debug.wait_completed.arg);
+
+ awdp->debug.wait_completed.flags = 0;
+ awdp->debug.wait_completed.callback = NULL;
+ awdp->debug.wait_completed.arg = NULL;
+
+ unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED);
+
+ return aux_work & ~ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED;
+}
+
+static erts_atomic32_t debug_wait_completed_count;
+static int debug_wait_completed_flags;
static void
-completed_dealloc(void *vproc)
+thr_debug_wait_completed(void *vproc)
{
- if (erts_atomic32_dec_read_mb(&completed_dealloc_count) == 0) {
+ if (erts_atomic32_dec_read_mb(&debug_wait_completed_count) == 0) {
erts_resume((Process *) vproc, (ErtsProcLocks) 0);
- erts_smp_proc_dec_refc((Process *) vproc);
+ erts_proc_dec_refc((Process *) vproc);
}
}
static void
-setup_completed_dealloc(void *vproc)
+setup_thr_debug_wait_completed(void *vproc)
{
ErtsSchedulerData *esdp = erts_get_scheduler_data();
- ErtsAuxWorkData *awdp = (esdp
- ? &esdp->aux_work_data
- : aux_thread_aux_work_data);
- erts_alloc_fix_alloc_shrink(awdp->sched_id, 0);
- set_aux_work_flags_wakeup_nob(awdp->ssi, ERTS_SSI_AUX_WORK_DD);
- awdp->dd.completed_callback = completed_dealloc;
- awdp->dd.completed_arg = vproc;
+ ErtsAuxWorkData *awdp;
+ erts_aint32_t wait_flags, aux_work_flags;
+ awdp = esdp ? &esdp->aux_work_data : aux_thread_aux_work_data;
+
+ wait_flags = 0;
+ aux_work_flags = ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED;
+
+ if (debug_wait_completed_flags & ERTS_DEBUG_WAIT_COMPLETED_DEALLOCATIONS) {
+ erts_alloc_fix_alloc_shrink(awdp->sched_id, 0);
+ wait_flags |= (ERTS_SSI_AUX_WORK_DD
+ | ERTS_SSI_AUX_WORK_DD_THR_PRGR);
+ aux_work_flags |= ERTS_SSI_AUX_WORK_DD;
+ }
+
+ if (debug_wait_completed_flags & ERTS_DEBUG_WAIT_COMPLETED_TIMER_CANCELLATIONS) {
+ wait_flags |= (ERTS_SSI_AUX_WORK_CNCLD_TMRS
+ | ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR);
+ if (awdp->esdp && !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp))
+ aux_work_flags |= ERTS_SSI_AUX_WORK_CNCLD_TMRS;
+ }
+
+ set_aux_work_flags_wakeup_nob(awdp->ssi, aux_work_flags);
+
+ awdp->debug.wait_completed.flags = wait_flags;
+ awdp->debug.wait_completed.callback = thr_debug_wait_completed;
+ awdp->debug.wait_completed.arg = vproc;
}
-static void
-prep_setup_completed_dealloc(void *vproc)
+struct debug_lop {
+ ErtsThrPrgrLaterOp lop;
+ Process *proc;
+};
+
+static void later_thr_debug_wait_completed(void *vlop)
{
- erts_aint32_t count = (erts_aint32_t) (erts_no_schedulers+1);
- if (erts_atomic32_dec_read_mb(&completed_dealloc_count) == count) {
- /* scheduler threads */
- erts_schedule_multi_misc_aux_work(0,
- erts_no_schedulers,
- setup_completed_dealloc,
- vproc);
- /* aux_thread */
- erts_schedule_misc_aux_work(0,
- setup_completed_dealloc,
- vproc);
+ struct debug_lop *lop = vlop;
+ erts_aint32_t count = (erts_aint32_t) erts_no_schedulers;
+ count += 1; /* aux thread */
+ if (erts_atomic32_dec_read_mb(&debug_wait_completed_count) == count) {
+ /* scheduler threads */
+ erts_schedule_multi_misc_aux_work(0,
+ erts_no_schedulers,
+ setup_thr_debug_wait_completed,
+ lop->proc);
+ /* aux_thread */
+ erts_schedule_misc_aux_work(0,
+ setup_thr_debug_wait_completed,
+ lop->proc);
}
+ erts_free(ERTS_ALC_T_DEBUG, lop);
+}
+
+
+static void
+init_thr_debug_wait_completed(void *vproc)
+{
+ struct debug_lop* lop = erts_alloc(ERTS_ALC_T_DEBUG,
+ sizeof(struct debug_lop));
+ lop->proc = vproc;
+ erts_schedule_thr_prgr_later_op(later_thr_debug_wait_completed, lop, &lop->lop);
}
-#endif /* ERTS_SMP */
int
-erts_debug_wait_deallocations(Process *c_p)
+erts_debug_wait_completed(Process *c_p, int flags)
{
-#ifndef ERTS_SMP
- erts_alloc_fix_alloc_shrink(1, 0);
- return 1;
-#else
/* Only one process at a time can do this */
- erts_aint32_t count = (erts_aint32_t) (2*(erts_no_schedulers+1));
- if (0 == erts_atomic32_cmpxchg_mb(&completed_dealloc_count,
+ erts_aint32_t count = (erts_aint32_t) (2*erts_no_schedulers);
+ count += 1; /* aux thread */
+ if (0 == erts_atomic32_cmpxchg_mb(&debug_wait_completed_count,
count,
0)) {
+ debug_wait_completed_flags = flags;
erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
- erts_smp_proc_inc_refc(c_p);
- /* scheduler threads */
+ erts_proc_inc_refc(c_p);
+
+ /* First flush later-ops on all scheduler threads */
erts_schedule_multi_misc_aux_work(0,
erts_no_schedulers,
- prep_setup_completed_dealloc,
+ init_thr_debug_wait_completed,
(void *) c_p);
- /* aux_thread */
- erts_schedule_misc_aux_work(0,
- prep_setup_completed_dealloc,
- (void *) c_p);
return 1;
}
return 0;
-#endif
}
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
-void
-erts_smp_notify_check_children_needed(void)
-{
- int i;
- for (i = 0; i < erts_no_schedulers; i++)
- set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(i),
- ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
-#ifdef ERTS_DIRTY_SCHEDULERS
- for (i = 0; i < erts_no_dirty_cpu_schedulers; i++)
- set_aux_work_flags_wakeup_nob(ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(i),
- ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
- for (i = 0; i < erts_no_dirty_io_schedulers; i++)
- set_aux_work_flags_wakeup_nob(ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(i),
- ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
-#endif
-}
-
-static ERTS_INLINE erts_aint32_t
-handle_check_children(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
-{
- unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
- erts_check_children();
- return aux_work & ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN;
-}
-
-#endif
-
static void
notify_reap_ports_relb(void)
{
@@ -2017,17 +2446,18 @@ notify_reap_ports_relb(void)
}
}
-erts_smp_atomic32_t erts_halt_progress;
+erts_atomic32_t erts_halt_progress;
int erts_halt_code;
static ERTS_INLINE erts_aint32_t
handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_REAP_PORTS);
- awdp->esdp->run_queue->halt_in_progress = 1;
- if (erts_smp_atomic32_dec_read_acqb(&erts_halt_progress) == 0) {
+ ERTS_RUNQ_FLGS_SET(awdp->esdp->run_queue, ERTS_RUNQ_FLG_HALTING);
+
+ if (erts_atomic32_dec_read_acqb(&erts_halt_progress) == 0) {
int i, max = erts_ptab_max(&erts_port);
- erts_smp_atomic32_set_nob(&erts_halt_progress, 1);
+ erts_atomic32_set_nob(&erts_halt_progress, 1);
for (i = 0; i < max; i++) {
erts_aint32_t state;
Port *prt = erts_pix2port(i);
@@ -2040,27 +2470,69 @@ handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
/* We need to set the halt flag - get the port lock */
- erts_smp_port_lock(prt);
+ erts_port_lock(prt);
state = erts_atomic32_read_nob(&prt->state);
if (!(state & (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
| ERTS_PORT_SFLG_HALT))) {
state = erts_atomic32_read_bor_relb(&prt->state,
ERTS_PORT_SFLG_HALT);
- erts_smp_atomic32_inc_nob(&erts_halt_progress);
+ erts_atomic32_inc_nob(&erts_halt_progress);
if (!(state & (ERTS_PORT_SFLG_EXITING|ERTS_PORT_SFLG_CLOSING)))
- erts_deliver_port_exit(prt, prt->common.id, am_killed, 0);
+ erts_deliver_port_exit(prt, prt->common.id, am_killed, 0, 1);
}
erts_port_release(prt);
}
- if (erts_smp_atomic32_dec_read_nob(&erts_halt_progress) == 0) {
- erl_exit_flush_async(erts_halt_code, "");
+ if (erts_atomic32_dec_read_nob(&erts_halt_progress) == 0) {
+ erts_flush_async_exit(erts_halt_code, "");
}
}
return aux_work & ~ERTS_SSI_AUX_WORK_REAP_PORTS;
}
+void
+erts_notify_new_aux_yield_work(ErtsSchedulerData *esdp)
+{
+ ASSERT(esdp == erts_get_scheduler_data());
+ /* Always called by the scheduler itself... */
+ set_aux_work_flags_wakeup_nob(esdp->ssi, ERTS_SSI_AUX_WORK_YIELD);
+}
+
+static ERTS_INLINE erts_aint32_t
+handle_yield(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
+{
+ int yield = 0;
+ /*
+ * Yield operations are always requested by the scheduler itself.
+ *
+ * The following handlers should *not* set the ERTS_SSI_AUX_WORK_YIELD
+ * flag in order to indicate more work. They should instead return
+ * information so this "main handler" can manipulate the flag...
+ *
+ * The following handlers should be able to handle being called
+ * even though no work is to be done...
+ */
+
+ /* Various yielding operations... */
+
+ yield |= erts_handle_yielded_ets_all_request(awdp->esdp,
+ &awdp->yield.ets_all);
+
+ /*
+ * Other yielding operations...
+ *
+ */
+
+ if (!yield) {
+ unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_YIELD);
+ return aux_work & ~ERTS_SSI_AUX_WORK_YIELD;
+ }
+
+ return aux_work;
+}
+
+
#if HAVE_ERTS_MSEG
static ERTS_INLINE erts_aint32_t
@@ -2073,11 +2545,33 @@ handle_mseg_cache_check(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiti
#endif
+
+static ERTS_INLINE erts_aint32_t
+handle_pending_exiters(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
+{
+ ErtsProcList *pnd_xtrs;
+ ErtsRunQueue *rq;
+
+ rq = awdp->esdp->run_queue;
+ unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_PENDING_EXITERS);
+
+ erts_runq_lock(rq);
+ pnd_xtrs = rq->procs.pending_exiters;
+ rq->procs.pending_exiters = NULL;
+ erts_runq_unlock(rq);
+
+ if (erts_proclist_fetch(&pnd_xtrs, NULL))
+ do_handle_pending_exiters(pnd_xtrs);
+
+ return aux_work & ~ERTS_SSI_AUX_WORK_PENDING_EXITERS;
+}
+
+
static ERTS_INLINE erts_aint32_t
handle_setup_aux_work_timer(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_SET_TMO);
- setup_aux_work_timer();
+ setup_aux_work_timer(awdp->esdp);
return aux_work & ~ERTS_SSI_AUX_WORK_SET_TMO;
}
@@ -2092,16 +2586,18 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
ERTS_DBG_CHK_AUX_WORK_VAL(aux_work); \
if (!(aux_work & ~ignore)) { \
ERTS_DBG_CHK_AUX_WORK_VAL(aux_work); \
+ ERTS_MSACC_UPDATE_CACHE(); \
+ ERTS_MSACC_POP_STATE_M(); \
return aux_work; \
} \
}
erts_aint32_t aux_work = orig_aux_work;
erts_aint32_t ignore = 0;
+ ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_AUX);
-#ifdef ERTS_SMP
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
haw_thr_prgr_current_reset(awdp);
-#endif
ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
ASSERT(aux_work);
@@ -2120,7 +2616,6 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
* Keep ERTS_SSI_AUX_WORK flags in expected frequency order relative
* eachother. Most frequent first.
*/
-#ifdef ERTS_SMP
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP,
handle_delayed_aux_work_wakeup);
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_DD,
@@ -2128,37 +2623,33 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
/* DD must be before DD_THR_PRGR */
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_DD_THR_PRGR,
handle_delayed_dealloc_thr_prgr);
-#endif
HANDLE_AUX_WORK((ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
| ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC),
handle_fix_alloc);
-#ifdef ERTS_SMP
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP,
handle_thr_prgr_later_op);
-#endif
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_CNCLD_TMRS,
+ handle_canceled_timers);
+ /* CNCLD_TMRS must be before CNCLD_TMRS_THR_PRGR */
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR,
+ handle_canceled_timers_thr_prgr);
-#if ERTS_USE_ASYNC_READY_Q
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_ASYNC_READY,
handle_async_ready);
/* ASYNC_READY must be before ASYNC_READY_CLEAN */
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN,
handle_async_ready_clean);
-#endif
-#ifdef ERTS_SMP
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_MISC_THR_PRGR,
handle_misc_aux_work_thr_prgr);
-#endif
/* MISC_THR_PRGR must be before MISC */
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_MISC,
handle_misc_aux_work);
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_CHECK_CHILDREN,
- handle_check_children);
-#endif
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_PENDING_EXITERS,
+ handle_pending_exiters);
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_SET_TMO,
handle_setup_aux_work_timer);
@@ -2168,16 +2659,27 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
handle_mseg_cache_check);
#endif
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_YIELD,
+ handle_yield);
+
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_REAP_PORTS,
handle_reap_ports);
+ /*
+ * ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED *need* to be
+ * the last flag checked!
+ */
+
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED,
+ handle_debug_wait_completed);
+
ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
-#ifdef ERTS_SMP
if (waiting && !aux_work)
haw_thr_prgr_current_check_progress(awdp);
-#endif
+ ERTS_MSACC_UPDATE_CACHE();
+ ERTS_MSACC_POP_STATE_M();
return aux_work;
#undef HANDLE_AUX_WORK
@@ -2186,17 +2688,42 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
typedef struct {
union {
- ErlTimer data;
- char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErlTimer))];
+ ErtsTWheelTimer data;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsTWheelTimer))];
} timer;
int initialized;
erts_atomic32_t refc;
+#ifdef DEBUG
+ erts_atomic32_t used;
+#endif
erts_atomic32_t type[1];
} ErtsAuxWorkTmo;
static ErtsAuxWorkTmo *aux_work_tmo;
+static ERTS_INLINE void
+start_aux_work_timer(ErtsSchedulerData *esdp)
+{
+ ErtsMonotonicTime tmo = erts_get_monotonic_time(esdp);
+#ifdef DEBUG
+ Uint no = (Uint) erts_atomic32_xchg_mb(&aux_work_tmo->used,
+ (erts_aint32_t) esdp->no);
+ ASSERT(esdp->type == ERTS_SCHED_NORMAL);
+ ASSERT(!no);
+#endif
+
+ tmo = ERTS_MONOTONIC_TO_CLKTCKS(tmo-1);
+ tmo += ERTS_MSEC_TO_CLKTCKS(1000) + 1;
+ erts_twheel_init_timer(&aux_work_tmo->timer.data);
+ ASSERT(esdp);
+ erts_twheel_set_timer(esdp->timer_wheel,
+ &aux_work_tmo->timer.data,
+ aux_work_timeout,
+ (void *) esdp,
+ tmo);
+}
+
static void
aux_work_timeout_early_init(int no_schedulers)
{
@@ -2211,6 +2738,9 @@ aux_work_timeout_early_init(int no_schedulers)
p = (UWord) malloc((sizeof(ErtsAuxWorkTmo)
+ sizeof(erts_atomic32_t)*(no_schedulers+1))
+ ERTS_CACHE_LINE_SIZE-1);
+ if (!p) {
+ ERTS_INTERNAL_ERROR("malloc failed to allocate memory!");
+ }
if (p & ERTS_CACHE_LINE_MASK)
p = (p & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE;
ASSERT((p & ERTS_CACHE_LINE_MASK) == 0);
@@ -2218,35 +2748,35 @@ aux_work_timeout_early_init(int no_schedulers)
aux_work_tmo = (ErtsAuxWorkTmo *) p;
aux_work_tmo->initialized = 0;
erts_atomic32_init_nob(&aux_work_tmo->refc, 0);
+#ifdef DEBUG
+ erts_atomic32_init_nob(&aux_work_tmo->used, 0);
+#endif
for (i = 0; i <= no_schedulers; i++)
erts_atomic32_init_nob(&aux_work_tmo->type[i], 0);
}
void
-aux_work_timeout_late_init(void)
+erts_aux_work_timeout_late_init(ErtsSchedulerData *esdp)
{
aux_work_tmo->initialized = 1;
- if (erts_atomic32_read_nob(&aux_work_tmo->refc)) {
- aux_work_tmo->timer.data.active = 0;
- erts_set_timer(&aux_work_tmo->timer.data,
- aux_work_timeout,
- NULL,
- NULL,
- 1000);
- }
+ if (erts_atomic32_read_acqb(&aux_work_tmo->refc))
+ start_aux_work_timer(esdp);
}
static void
-aux_work_timeout(void *unused)
+aux_work_timeout(void *vesdp)
{
erts_aint32_t refc;
int i;
-#ifdef ERTS_SMP
- i = 0;
-#else
- i = 1;
+#ifdef DEBUG
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ Uint no = (Uint) erts_atomic32_xchg_mb(&aux_work_tmo->used, 0);
+ ASSERT(no == esdp->no);
+ ASSERT(esdp == (ErtsSchedulerData *) vesdp);
#endif
+ i = 0;
+
for (; i <= erts_no_schedulers; i++) {
erts_aint32_t type;
type = erts_atomic32_read_acqb(&aux_work_tmo->type[i]);
@@ -2260,32 +2790,18 @@ aux_work_timeout(void *unused)
if (refc != 1
|| 1 != erts_atomic32_cmpxchg_relb(&aux_work_tmo->refc, 0, 1)) {
/* Setup next timeout... */
- aux_work_tmo->timer.data.active = 0;
- erts_set_timer(&aux_work_tmo->timer.data,
- aux_work_timeout,
- NULL,
- NULL,
- 1000);
+ start_aux_work_timer((ErtsSchedulerData *) vesdp);
}
}
static void
-setup_aux_work_timer(void)
+setup_aux_work_timer(ErtsSchedulerData *esdp)
{
-#ifndef ERTS_SMP
- if (!erts_get_scheduler_data())
+ if (!esdp || !esdp->timer_wheel)
set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(0),
ERTS_SSI_AUX_WORK_SET_TMO);
else
-#endif
- {
- aux_work_tmo->timer.data.active = 0;
- erts_set_timer(&aux_work_tmo->timer.data,
- aux_work_timeout,
- NULL,
- NULL,
- 1000);
- }
+ start_aux_work_timer(esdp);
}
erts_aint32_t
@@ -2293,9 +2809,6 @@ erts_set_aux_work_timeout(int ix, erts_aint32_t type, int enable)
{
erts_aint32_t old, refc;
-#ifndef ERTS_SMP
- ix = 1;
-#endif
ERTS_DBG_CHK_AUX_WORK_VAL(type);
ERTS_DBG_CHK_AUX_WORK_VAL(erts_atomic32_read_nob(&aux_work_tmo->type[ix]));
@@ -2316,112 +2829,26 @@ erts_set_aux_work_timeout(int ix, erts_aint32_t type, int enable)
if (refc == 1) {
erts_atomic32_inc_acqb(&aux_work_tmo->refc);
if (aux_work_tmo->initialized)
- setup_aux_work_timer();
+ setup_aux_work_timer(erts_get_scheduler_data());
}
}
return old;
}
-
-
-static ERTS_INLINE void
-sched_waiting_sys(Uint no, ErtsRunQueue *rq)
-{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- ASSERT(rq->waiting >= 0);
- (void) ERTS_RUNQ_FLGS_SET(rq, (ERTS_RUNQ_FLG_OUT_OF_WORK
- | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK));
- rq->waiting++;
- rq->waiting *= -1;
- rq->woken = 0;
- if (erts_system_profile_flags.scheduler)
- profile_scheduler(make_small(no), am_inactive);
-}
-
-static ERTS_INLINE void
-sched_active_sys(Uint no, ErtsRunQueue *rq)
-{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
-#endif
- ASSERT(rq->waiting < 0);
- rq->waiting *= -1;
- rq->waiting--;
- if (erts_system_profile_flags.scheduler)
- profile_scheduler(make_small(no), am_active);
-}
-
Uint
erts_active_schedulers(void)
{
Uint as = erts_no_schedulers;
- ERTS_ATOMIC_FOREACH_RUNQ(rq, as -= abs(rq->waiting));
+ ERTS_ATOMIC_FOREACH_NORMAL_RUNQ(rq, as -= abs(rq->waiting));
- ASSERT(as >= 0);
return as;
}
-#ifdef ERTS_SMP
-
-static ERTS_INLINE void
-clear_sys_scheduling(void)
-{
- erts_smp_atomic32_set_mb(&doing_sys_schedule, 0);
-}
-
-static ERTS_INLINE int
-try_set_sys_scheduling(void)
-{
- return 0 == erts_smp_atomic32_cmpxchg_acqb(&doing_sys_schedule, 1, 0);
-}
-
-#endif
-
-static ERTS_INLINE int
-prepare_for_sys_schedule(ErtsSchedulerData *esdp)
-{
-#ifdef ERTS_SMP
- while (!erts_port_task_have_outstanding_io_tasks()
- && try_set_sys_scheduling()) {
-#ifdef ERTS_SCHED_ONLY_POLL_SCHED_1
- if (esdp->no != 1) {
- /* If we are not scheduler 1 and ERTS_SCHED_ONLY_POLL_SCHED_1 is used
- then we make sure to wake scheduler 1 */
- ErtsRunQueue *rq = ERTS_RUNQ_IX(0);
- clear_sys_scheduling();
- wake_scheduler(rq);
- return 0;
- }
-#endif
- if (!erts_port_task_have_outstanding_io_tasks())
- return 1;
- clear_sys_scheduling();
- }
- return 0;
-#else
- return !erts_port_task_have_outstanding_io_tasks();
-#endif
-}
-
-#ifdef ERTS_SMP
-
-static ERTS_INLINE void
-sched_change_waiting_sys_to_waiting(Uint no, ErtsRunQueue *rq)
-{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
-#endif
- ASSERT(rq->waiting < 0);
- rq->waiting *= -1;
-}
-
static ERTS_INLINE void
sched_waiting(Uint no, ErtsRunQueue *rq)
{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
(void) ERTS_RUNQ_FLGS_SET(rq, (ERTS_RUNQ_FLG_OUT_OF_WORK
| ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK));
if (rq->waiting < 0)
@@ -2436,7 +2863,7 @@ sched_waiting(Uint no, ErtsRunQueue *rq)
static ERTS_INLINE void
sched_active(Uint no, ErtsRunQueue *rq)
{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
if (rq->waiting < 0)
rq->waiting++;
else
@@ -2445,19 +2872,12 @@ sched_active(Uint no, ErtsRunQueue *rq)
profile_scheduler(make_small(no), am_active);
}
-static int ERTS_INLINE
-ongoing_multi_scheduling_block(void)
-{
- ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&schdlr_sspnd.mtx));
- return schdlr_sspnd.msb.ongoing;
-}
-
static ERTS_INLINE void
empty_runq_aux(ErtsRunQueue *rq, Uint32 old_flags)
{
if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && old_flags & ERTS_RUNQ_FLG_NONEMPTY) {
#ifdef DEBUG
- erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues);
+ erts_aint32_t empty = erts_atomic32_read_nob(&no_empty_run_queues);
/*
* For a short period of time no_empty_run_queues may have
* been increased twice for a specific run queue.
@@ -2465,9 +2885,9 @@ empty_runq_aux(ErtsRunQueue *rq, Uint32 old_flags)
ASSERT(0 <= empty && empty < 2*erts_no_run_queues);
#endif
if (!erts_runq_supervision_interval)
- erts_smp_atomic32_inc_relb(&no_empty_run_queues);
+ erts_atomic32_inc_relb(&no_empty_run_queues);
else {
- erts_smp_atomic32_inc_mb(&no_empty_run_queues);
+ erts_atomic32_inc_mb(&no_empty_run_queues);
if (erts_atomic_read_nob(&runq_supervisor_sleeping))
ethr_event_set(&runq_supervision_event);
}
@@ -2497,7 +2917,7 @@ non_empty_runq(ErtsRunQueue *rq)
Uint32 old_flags = ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_NONEMPTY);
if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && (!(old_flags & ERTS_RUNQ_FLG_NONEMPTY))) {
#ifdef DEBUG
- erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues);
+ erts_aint32_t empty = erts_atomic32_read_nob(&no_empty_run_queues);
/*
* For a short period of time no_empty_run_queues may have
* been increased twice for a specific run queue.
@@ -2505,10 +2925,10 @@ non_empty_runq(ErtsRunQueue *rq)
ASSERT(0 < empty && empty <= 2*erts_no_run_queues);
#endif
if (!erts_runq_supervision_interval)
- erts_smp_atomic32_dec_relb(&no_empty_run_queues);
+ erts_atomic32_dec_relb(&no_empty_run_queues);
else {
erts_aint32_t no;
- no = erts_smp_atomic32_dec_read_mb(&no_empty_run_queues);
+ no = erts_atomic32_dec_read_mb(&no_empty_run_queues);
if (no > 0 && erts_atomic_read_nob(&runq_supervisor_sleeping))
ethr_event_set(&runq_supervision_event);
}
@@ -2531,12 +2951,13 @@ static erts_aint32_t
sched_prep_spin_wait(ErtsSchedulerSleepInfo *ssi)
{
erts_aint32_t oflgs;
- erts_aint32_t nflgs = (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_WAITING);
+ erts_aint32_t nflgs;
erts_aint32_t xflgs = 0;
do {
- oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
+ nflgs = (xflgs & ERTS_SSI_FLG_MSB_EXEC);
+ nflgs |= ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
+ oflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
xflgs = oflgs;
@@ -2553,11 +2974,11 @@ sched_prep_cont_spin_wait(ErtsSchedulerSleepInfo *ssi)
erts_aint32_t xflgs = ERTS_SSI_FLG_WAITING;
do {
- oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
xflgs = oflgs;
- nflgs |= oflgs & ERTS_SSI_FLG_SUSPENDED;
+ nflgs |= oflgs & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC);
} while (oflgs & ERTS_SSI_FLG_WAITING);
return oflgs;
}
@@ -2570,7 +2991,7 @@ sched_spin_wait(ErtsSchedulerSleepInfo *ssi, int spincount)
erts_aint32_t flgs;
do {
- flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
+ flgs = erts_atomic32_read_acqb(&ssi->flags);
if ((flgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING))
!= (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) {
break;
@@ -2595,11 +3016,11 @@ sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, erts_aint32_t sleep_type)
erts_tse_reset(ssi->event);
else {
ASSERT(sleep_type == ERTS_SSI_FLG_POLL_SLEEPING);
- erts_sys_schedule_interrupt(0);
+ erts_check_io_interrupt(ssi->psi, 0);
}
while (1) {
- oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
if ((oflgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING))
@@ -2607,7 +3028,7 @@ sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, erts_aint32_t sleep_type)
return oflgs;
}
xflgs = oflgs;
- nflgs |= oflgs & ERTS_SSI_FLG_SUSPENDED;
+ nflgs |= oflgs & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC);
}
}
@@ -2626,7 +3047,7 @@ static void
thr_prgr_prep_wait(void *vssi)
{
ErtsSchedulerSleepInfo *ssi = (ErtsSchedulerSleepInfo *) vssi;
- erts_smp_atomic32_read_bor_acqb(&ssi->flags,
+ erts_atomic32_read_bor_acqb(&ssi->flags,
ERTS_SSI_FLG_SLEEPING);
}
@@ -2641,7 +3062,7 @@ thr_prgr_wait(void *vssi)
while (1) {
erts_aint32_t aflgs, nflgs;
nflgs = xflgs | ERTS_SSI_FLG_TSE_SLEEPING;
- aflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
+ aflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
if (aflgs == xflgs) {
erts_tse_wait(ssi->event);
break;
@@ -2656,13 +3077,19 @@ static void
thr_prgr_fin_wait(void *vssi)
{
ErtsSchedulerSleepInfo *ssi = (ErtsSchedulerSleepInfo *) vssi;
- erts_smp_atomic32_read_band_nob(&ssi->flags,
+ erts_atomic32_read_band_nob(&ssi->flags,
~(ERTS_SSI_FLG_SLEEPING
| ERTS_SSI_FLG_TSE_SLEEPING));
}
static void init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp);
+void
+erts_aux_thread_poke()
+{
+ erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(-1));
+}
+
static void *
aux_thread(void *unused)
{
@@ -2671,9 +3098,20 @@ aux_thread(void *unused)
erts_aint32_t aux_work;
ErtsThrPrgrCallbacks callbacks;
int thr_prgr_active = 1;
+ ERTS_MSACC_DECLARE_CACHE();
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ {
+ char buf[] = "aux_thread";
+ erts_lc_set_thread_name(buf);
+ }
+#endif
+ erts_port_task_pre_alloc_init_thread();
ssi->event = erts_tse_fetch();
+ erts_msacc_init_thread("aux", 1, 1);
+
callbacks.arg = (void *) ssi;
callbacks.wakeup = thr_prgr_wakeup;
callbacks.prepare_wait = thr_prgr_prep_wait;
@@ -2684,8 +3122,14 @@ aux_thread(void *unused)
init_aux_work_data(awdp, NULL, NULL);
awdp->ssi = ssi;
+#if ERTS_POLL_USE_FALLBACK
+ ssi->psi = erts_create_pollset_thread(-1);
+#endif
+
sched_prep_spin_wait(ssi);
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_OTHER);
+
while (1) {
erts_aint32_t flgs;
@@ -2694,32 +3138,54 @@ aux_thread(void *unused)
if (!thr_prgr_active)
erts_thr_progress_active(NULL, thr_prgr_active = 1);
aux_work = handle_aux_work(awdp, aux_work, 1);
+ ERTS_MSACC_UPDATE_CACHE();
if (aux_work && erts_thr_progress_update(NULL))
erts_thr_progress_leader_update(NULL);
}
if (!aux_work) {
+
+#ifdef ERTS_BREAK_REQUESTED
+ if (ERTS_BREAK_REQUESTED)
+ erts_do_break_handling();
+#endif
+
if (thr_prgr_active)
erts_thr_progress_active(NULL, thr_prgr_active = 0);
- erts_thr_progress_prepare_wait(NULL);
- ERTS_SCHED_FAIR_YIELD();
+#if ERTS_POLL_USE_FALLBACK
flgs = sched_spin_wait(ssi, 0);
if (flgs & ERTS_SSI_FLG_SLEEPING) {
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ erts_check_io(ssi->psi);
+ }
+ }
+#else
+ erts_thr_progress_prepare_wait(NULL);
+
+ flgs = sched_spin_wait(ssi, 0);
+
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
if (flgs & ERTS_SSI_FLG_SLEEPING) {
- int res;
+ int res;
ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- do {
- res = erts_tse_wait(ssi->event);
- } while (res == EINTR);
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_SLEEP);
+ do {
+ res = erts_tse_wait(ssi->event);
+ } while (res == EINTR);
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_OTHER);
}
- }
- erts_thr_progress_finalize_wait(NULL);
+ }
+ erts_thr_progress_finalize_wait(NULL);
+#endif
}
flgs = sched_prep_spin_wait(ssi);
@@ -2727,7 +3193,79 @@ aux_thread(void *unused)
return NULL;
}
-#endif /* ERTS_SMP */
+static void *
+poll_thread(void *arg)
+{
+ int id = (int)(UWord)arg;
+ ErtsAuxWorkData *awdp = poll_thread_aux_work_data+id;
+ ErtsSchedulerSleepInfo *ssi = ERTS_POLL_THREAD_SLEEP_INFO_IX(id);
+ erts_aint32_t aux_work;
+ ErtsThrPrgrCallbacks callbacks;
+ int thr_prgr_active = 1;
+ struct erts_poll_thread *psi = erts_create_pollset_thread(id);
+ ERTS_MSACC_DECLARE_CACHE();
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ {
+ char buf[] = "poll_thread";
+ erts_lc_set_thread_name(buf);
+ }
+#endif
+
+ erts_port_task_pre_alloc_init_thread();
+ ssi->event = erts_tse_fetch();
+
+ erts_msacc_init_thread("poll", id, 0);
+
+ callbacks.arg = (void *) ssi;
+ callbacks.wakeup = thr_prgr_wakeup;
+ callbacks.prepare_wait = thr_prgr_prep_wait;
+ callbacks.wait = thr_prgr_wait;
+ callbacks.finalize_wait = thr_prgr_fin_wait;
+
+ erts_thr_progress_register_managed_thread(NULL, &callbacks, 0);
+ init_aux_work_data(awdp, NULL, NULL);
+ awdp->ssi = ssi;
+ ssi->psi = psi;
+
+ sched_prep_spin_wait(ssi);
+
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_OTHER);
+
+ while (1) {
+ erts_aint32_t flgs;
+
+ aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
+ if (aux_work) {
+ if (!thr_prgr_active)
+ erts_thr_progress_active(NULL, thr_prgr_active = 1);
+ aux_work = handle_aux_work(awdp, aux_work, 1);
+ ERTS_MSACC_UPDATE_CACHE();
+ if (aux_work && erts_thr_progress_update(NULL))
+ erts_thr_progress_leader_update(NULL);
+ }
+
+ if (!aux_work) {
+ if (thr_prgr_active)
+ erts_thr_progress_active(NULL, thr_prgr_active = 0);
+
+ flgs = sched_spin_wait(ssi, 0);
+
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ erts_check_io(psi);
+ }
+ }
+ }
+
+ flgs = sched_prep_spin_wait(ssi);
+ }
+ return NULL;
+}
static void
scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
@@ -2736,314 +3274,168 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
ErtsSchedulerSleepInfo *ssi = esdp->ssi;
int spincount;
erts_aint32_t aux_work = 0;
-#ifdef ERTS_SMP
int thr_prgr_active = 1;
erts_aint32_t flgs;
+ ERTS_MSACC_PUSH_STATE();
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
-#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
- erts_smp_spin_lock(&rq->sleepers.lock);
-#endif
+ erts_spin_lock(&rq->sleepers.lock);
flgs = sched_prep_spin_wait(ssi);
if (flgs & ERTS_SSI_FLG_SUSPENDED) {
/* Go suspend instead... */
-#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
- erts_smp_spin_unlock(&rq->sleepers.lock);
-#endif
+ erts_spin_unlock(&rq->sleepers.lock);
return;
}
-#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) {
ssi->prev = NULL;
ssi->next = rq->sleepers.list;
if (rq->sleepers.list)
rq->sleepers.list->prev = ssi;
rq->sleepers.list = ssi;
- erts_smp_spin_unlock(&rq->sleepers.lock);
+ erts_spin_unlock(&rq->sleepers.lock);
+ dirty_active(esdp, -1);
}
-#endif
-
- /*
- * If all schedulers are waiting, one of them *should*
- * be waiting in erl_sys_schedule()
- */
-
- if (ERTS_SCHEDULER_IS_DIRTY(esdp) || !prepare_for_sys_schedule(esdp)) {
-
- sched_waiting(esdp->no, rq);
-
- erts_smp_runq_unlock(rq);
-
- spincount = sched_busy_wait.tse;
-
- tse_wait:
-
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && thr_prgr_active != working)
- sched_wall_time_change(esdp, thr_prgr_active);
-
- while (1) {
-
- aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
- if (aux_work) {
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
- }
- aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1);
- if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)
- && erts_thr_progress_update(esdp))
- erts_thr_progress_leader_update(esdp);
- }
- if (aux_work)
- flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
- else {
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- if (thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 0);
- sched_wall_time_change(esdp, 0);
- }
- erts_thr_progress_prepare_wait(esdp);
- }
+ sched_waiting(esdp->no, rq);
- ERTS_SCHED_FAIR_YIELD();
+ erts_runq_unlock(rq);
- flgs = sched_spin_wait(ssi, spincount);
- if (flgs & ERTS_SSI_FLG_SLEEPING) {
- ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
- if (flgs & ERTS_SSI_FLG_SLEEPING) {
- int res;
- ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
- ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- do {
- res = erts_tse_wait(ssi->event);
- } while (res == EINTR);
- }
- }
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
- erts_thr_progress_finalize_wait(esdp);
- }
+ spincount = sched_busy_wait.tse;
- if (!(flgs & ERTS_SSI_FLG_WAITING)) {
- ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
- break;
- }
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ dirty_sched_wall_time_change(esdp, working = 0);
+ else if (thr_prgr_active != working)
+ sched_wall_time_change(esdp, working = thr_prgr_active);
- flgs = sched_prep_cont_spin_wait(ssi);
- spincount = sched_busy_wait.aux_work;
+ while (1) {
+ ErtsMonotonicTime current_time = 0;
- if (!(flgs & ERTS_SSI_FLG_WAITING)) {
- ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
- break;
- }
+ aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
+ if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1);
+ ERTS_MSACC_UPDATE_CACHE();
+ if (aux_work && erts_thr_progress_update(esdp))
+ erts_thr_progress_leader_update(esdp);
+ }
- }
+ if (aux_work) {
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ flgs = erts_atomic32_read_acqb(&ssi->flags);
+ current_time = erts_get_monotonic_time(esdp);
+ if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) {
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ erts_bump_timers(esdp->timer_wheel, current_time);
+ }
+ }
+ }
+ else {
+ ErtsMonotonicTime timeout_time;
+ int do_timeout = 0;
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ timeout_time = erts_check_next_timeout_time(esdp);
+ current_time = erts_get_monotonic_time(esdp);
+ do_timeout = (current_time >= timeout_time);
+ } else {
+ current_time = 0;
+ timeout_time = ERTS_MONOTONIC_TIME_MAX;
+ }
+ if (do_timeout) {
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ }
+ else {
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 0);
+ sched_wall_time_change(esdp, 0);
+ }
+ erts_thr_progress_prepare_wait(esdp);
+ }
+
+ flgs = sched_spin_wait(ssi, spincount);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ int res;
+ ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ current_time = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 :
+ erts_get_monotonic_time(esdp);
+ do {
+ Sint64 timeout;
+ if (current_time >= timeout_time)
+ break;
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ timeout = ERTS_MONOTONIC_TO_NSEC(timeout_time
+ - current_time
+ - 1) + 1;
+ } else
+ timeout = -1;
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_SLEEP);
+ res = erts_tse_twait(ssi->event, timeout);
+ ERTS_MSACC_POP_STATE();
+ current_time = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 :
+ erts_get_monotonic_time(esdp);
+ } while (res == EINTR);
+ }
+ }
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
+ erts_thr_progress_finalize_wait(esdp);
+ }
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && current_time >= timeout_time)
+ erts_bump_timers(esdp->timer_wheel, current_time);
+ }
- if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
- erts_smp_atomic32_read_band_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ break;
+ }
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
- }
+ flgs = sched_prep_cont_spin_wait(ssi);
+ spincount = sched_busy_wait.aux_work;
- erts_smp_runq_lock(rq);
- sched_active(esdp->no, rq);
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ break;
+ }
}
- else
-#endif
- {
- erts_aint_t dt;
-
- erts_smp_atomic32_set_relb(&function_calls, 0);
- *fcalls = 0;
-
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
-#endif
-
-#ifdef ERTS_SCHED_ONLY_POLL_SCHED_1
- ASSERT(esdp->no == 1);
-#endif
- sched_waiting_sys(esdp->no, rq);
-
-
- erts_smp_runq_unlock(rq);
-
- ASSERT(working);
- sched_wall_time_change(esdp, working = 0);
-
- spincount = sched_busy_wait.sys_schedule;
- if (spincount == 0)
- goto sys_aux_work;
-
- while (spincount-- > 0) {
-
- sys_poll_aux_work:
-
- if (working)
- sched_wall_time_change(esdp, working = 0);
- ASSERT(!erts_port_task_have_outstanding_io_tasks());
- erl_sys_schedule(1); /* Might give us something to do */
+ if (flgs & ~(ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC))
+ erts_atomic32_read_band_nob(&ssi->flags,
+ (ERTS_SSI_FLG_SUSPENDED
+ | ERTS_SSI_FLG_MSB_EXEC));
- dt = erts_do_time_read_and_reset();
- if (dt) erts_bump_timer(dt);
-
- sys_aux_work:
-#ifndef ERTS_SMP
- erts_sys_schedule_interrupt(0);
-#endif
-
- aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
- if (aux_work) {
- if (!working)
- sched_wall_time_change(esdp, working = 1);
-#ifdef ERTS_SMP
- if (!thr_prgr_active)
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
-#endif
- aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1);
-#ifdef ERTS_SMP
- if (aux_work && erts_thr_progress_update(esdp))
- erts_thr_progress_leader_update(esdp);
-#endif
- }
-
-#ifndef ERTS_SMP
- if (rq->len != 0 || rq->misc.start)
- goto sys_woken;
-#else
- flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
- if (!(flgs & ERTS_SSI_FLG_WAITING)) {
- ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
- goto sys_woken;
- }
-
- /*
- * If we got new I/O tasks we aren't allowed to
- * call erl_sys_schedule() until it is handled.
- */
- if (erts_port_task_have_outstanding_io_tasks()) {
- clear_sys_scheduling();
- /*
- * Got to check that we still got I/O tasks; otherwise
- * we have to continue checking for I/O...
- */
- if (!prepare_for_sys_schedule(esdp)) {
- spincount *= ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT;
- goto tse_wait;
- }
- }
-#endif
- }
-
- erts_smp_runq_lock(rq);
-
-#ifdef ERTS_SMP
- /*
- * If we got new I/O tasks we aren't allowed to
- * sleep in erl_sys_schedule().
- */
- if (erts_port_task_have_outstanding_io_tasks()) {
- clear_sys_scheduling();
-
- /*
- * Got to check that we still got I/O tasks; otherwise
- * we have to wait in erl_sys_schedule() after all...
- */
- if (!prepare_for_sys_schedule(esdp)) {
- /*
- * Not allowed to wait in erl_sys_schedule;
- * do tse wait instead...
- */
- sched_change_waiting_sys_to_waiting(esdp->no, rq);
- erts_smp_runq_unlock(rq);
- spincount = 0;
- goto tse_wait;
- }
- }
-#endif
- if (aux_work) {
- erts_smp_runq_unlock(rq);
- goto sys_poll_aux_work;
- }
-#ifdef ERTS_SMP
- flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING);
- if (!(flgs & ERTS_SSI_FLG_SLEEPING)) {
- if (!(flgs & ERTS_SSI_FLG_WAITING)) {
- ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
- goto sys_locked_woken;
- }
- erts_smp_runq_unlock(rq);
- flgs = sched_prep_cont_spin_wait(ssi);
- if (!(flgs & ERTS_SSI_FLG_WAITING)) {
- ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
- goto sys_woken;
- }
- ASSERT(!erts_port_task_have_outstanding_io_tasks());
- goto sys_poll_aux_work;
- }
-
- ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
- ASSERT(flgs & ERTS_SSI_FLG_WAITING);
-#endif
-
- erts_smp_runq_unlock(rq);
-
- if (working)
- sched_wall_time_change(esdp, working = 0);
-
-#ifdef ERTS_SMP
- if (thr_prgr_active)
- erts_thr_progress_active(esdp, thr_prgr_active = 0);
-#endif
-
- ASSERT(!erts_port_task_have_outstanding_io_tasks());
-
- erl_sys_schedule(0);
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ dirty_sched_wall_time_change(esdp, working = 1);
+ else if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
- dt = erts_do_time_read_and_reset();
- if (dt) erts_bump_timer(dt);
+ erts_runq_lock(rq);
+ sched_active(esdp->no, rq);
-#ifndef ERTS_SMP
- if (rq->len == 0 && !rq->misc.start)
- goto sys_aux_work;
- sys_woken:
-#else
- flgs = sched_prep_cont_spin_wait(ssi);
- if (flgs & ERTS_SSI_FLG_WAITING)
- goto sys_aux_work;
-
- sys_woken:
- if (!thr_prgr_active)
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- erts_smp_runq_lock(rq);
- sys_locked_woken:
- if (!thr_prgr_active) {
- erts_smp_runq_unlock(rq);
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- erts_smp_runq_lock(rq);
- }
- clear_sys_scheduling();
- if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
- erts_smp_atomic32_read_band_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
-#endif
- if (!working)
- sched_wall_time_change(esdp, working = 1);
- sched_active_sys(esdp->no, rq);
- }
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ dirty_active(esdp, 1);
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
}
-#ifdef ERTS_SMP
static ERTS_INLINE erts_aint32_t
ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi)
@@ -3053,20 +3445,55 @@ ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi)
erts_aint32_t nflgs = 0;
erts_aint32_t xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
while (1) {
- oflgs = erts_smp_atomic32_cmpxchg_relb(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_atomic32_cmpxchg_relb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return oflgs;
- nflgs = oflgs & ERTS_SSI_FLG_SUSPENDED;
+ nflgs = oflgs & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC);
xflgs = oflgs;
}
}
+static ERTS_INLINE void
+ssi_wake(ErtsSchedulerSleepInfo *ssi)
+{
+ erts_sched_finish_poke(ssi, ssi_flags_set_wake(ssi));
+}
+
+
static void
-wake_scheduler(ErtsRunQueue *rq)
+dcpu_sched_ix_suspend_wake(Uint ix)
{
- ErtsSchedulerSleepInfo *ssi;
- erts_aint32_t flgs;
+ ErtsSchedulerSleepInfo* ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
+ erts_atomic32_read_bor_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ ssi_wake(ssi);
+}
+
+static void
+dio_sched_ix_suspend_wake(Uint ix)
+{
+ ErtsSchedulerSleepInfo* ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix);
+ erts_atomic32_read_bor_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ ssi_wake(ssi);
+}
+static void
+dcpu_sched_ix_wake(Uint ix)
+{
+ ssi_wake(ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix));
+}
+
+#if 0
+static void
+dio_sched_ix_wake(Uint ix)
+{
+ ssi_wake(ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix));
+}
+#endif
+
+
+static void
+wake_scheduler(ErtsRunQueue *rq)
+{
/*
* The unlocked run queue is not strictly necessary
* from a thread safety or deadlock prevention
@@ -3075,15 +3502,12 @@ wake_scheduler(ErtsRunQueue *rq)
* so all code *should* handle this without having
* the lock on the run queue.
*/
- ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked(rq));
-
- ssi = rq->scheduler->ssi;
+ ERTS_LC_ASSERT(!erts_lc_runq_is_locked(rq)
+ || ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
- flgs = ssi_flags_set_wake(ssi);
- erts_sched_finish_poke(ssi, flgs);
+ ssi_wake(rq->scheduler->ssi);
}
-#ifdef ERTS_DIRTY_SCHEDULERS
static void
wake_dirty_schedulers(ErtsRunQueue *rq, int one)
{
@@ -3093,10 +3517,10 @@ wake_dirty_schedulers(ErtsRunQueue *rq, int one)
ASSERT(ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
sl = &rq->sleepers;
- erts_smp_spin_lock(&sl->lock);
+ erts_spin_lock(&sl->lock);
ssi = sl->list;
if (!ssi) {
- erts_smp_spin_unlock(&sl->lock);
+ erts_spin_unlock(&sl->lock);
if (one)
wake_scheduler(rq);
} else if (one) {
@@ -3110,14 +3534,14 @@ wake_dirty_schedulers(ErtsRunQueue *rq, int one)
if (ssi->next)
ssi->next->prev = ssi->prev;
- erts_smp_spin_unlock(&sl->lock);
+ erts_spin_unlock(&sl->lock);
ERTS_THR_MEMORY_BARRIER;
flgs = ssi_flags_set_wake(ssi);
erts_sched_finish_poke(ssi, flgs);
} else {
sl->list = NULL;
- erts_smp_spin_unlock(&sl->lock);
+ erts_spin_unlock(&sl->lock);
ERTS_THR_MEMORY_BARRIER;
do {
@@ -3127,10 +3551,16 @@ wake_dirty_schedulers(ErtsRunQueue *rq, int one)
} while (ssi);
}
}
-#endif
+
+static void
+wake_dirty_scheduler(ErtsRunQueue *rq)
+{
+ wake_dirty_schedulers(rq, 1);
+}
+
#define ERTS_NO_USED_RUNQS_SHIFT 16
-#define ERTS_NO_RUNQS_MASK 0xffff
+#define ERTS_NO_RUNQS_MASK 0xffffU
#if ERTS_MAX_NO_OF_SCHEDULERS > ERTS_NO_RUNQS_MASK
# error "Too large amount of schedulers allowed"
@@ -3141,13 +3571,13 @@ init_no_runqs(int active, int used)
{
erts_aint32_t no_runqs = (erts_aint32_t) (active & ERTS_NO_RUNQS_MASK);
no_runqs |= (erts_aint32_t) ((used & ERTS_NO_RUNQS_MASK) << ERTS_NO_USED_RUNQS_SHIFT);
- erts_smp_atomic32_init_nob(&balance_info.no_runqs, no_runqs);
+ erts_atomic32_init_nob(&balance_info.no_runqs, no_runqs);
}
static ERTS_INLINE void
get_no_runqs(int *active, int *used)
{
- erts_aint32_t no_runqs = erts_smp_atomic32_read_nob(&balance_info.no_runqs);
+ erts_aint32_t no_runqs = erts_atomic32_read_nob(&balance_info.no_runqs);
if (active)
*active = (int) (no_runqs & ERTS_NO_RUNQS_MASK);
if (used)
@@ -3157,12 +3587,12 @@ get_no_runqs(int *active, int *used)
static ERTS_INLINE void
set_no_used_runqs(int used)
{
- erts_aint32_t exp = erts_smp_atomic32_read_nob(&balance_info.no_runqs);
+ erts_aint32_t exp = erts_atomic32_read_nob(&balance_info.no_runqs);
while (1) {
erts_aint32_t act, new;
new = (used & ERTS_NO_RUNQS_MASK) << ERTS_NO_USED_RUNQS_SHIFT;
new |= exp & ERTS_NO_RUNQS_MASK;
- act = erts_smp_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp);
+ act = erts_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp);
if (act == exp)
break;
exp = act;
@@ -3172,14 +3602,14 @@ set_no_used_runqs(int used)
static ERTS_INLINE void
set_no_active_runqs(int active)
{
- erts_aint32_t exp = erts_smp_atomic32_read_nob(&balance_info.no_runqs);
+ erts_aint32_t exp = erts_atomic32_read_nob(&balance_info.no_runqs);
while (1) {
erts_aint32_t act, new;
if ((exp & ERTS_NO_RUNQS_MASK) == active)
break;
new = exp & (ERTS_NO_RUNQS_MASK << ERTS_NO_USED_RUNQS_SHIFT);
new |= active & ERTS_NO_RUNQS_MASK;
- act = erts_smp_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp);
+ act = erts_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp);
if (act == exp)
break;
exp = act;
@@ -3189,14 +3619,14 @@ set_no_active_runqs(int active)
static ERTS_INLINE int
try_inc_no_active_runqs(int active)
{
- erts_aint32_t exp = erts_smp_atomic32_read_nob(&balance_info.no_runqs);
+ erts_aint32_t exp = erts_atomic32_read_nob(&balance_info.no_runqs);
if (((exp >> ERTS_NO_USED_RUNQS_SHIFT) & ERTS_NO_RUNQS_MASK) < active)
return 0;
if ((exp & ERTS_NO_RUNQS_MASK) + 1 == active) {
erts_aint32_t new, act;
new = exp & (ERTS_NO_RUNQS_MASK << ERTS_NO_USED_RUNQS_SHIFT);
new |= active & ERTS_NO_RUNQS_MASK;
- act = erts_smp_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp);
+ act = erts_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp);
if (act == exp)
return 1;
}
@@ -3212,11 +3642,11 @@ chk_wake_sched(ErtsRunQueue *crq, int ix, int activate)
return 0;
wrq = ERTS_RUNQ_IX(ix);
flags = ERTS_RUNQ_FLGS_GET(wrq);
+ if (activate && !(flags & ERTS_RUNQ_FLG_SUSPENDED)) {
+ if (try_inc_no_active_runqs(ix+1))
+ (void) ERTS_RUNQ_FLGS_UNSET(wrq, ERTS_RUNQ_FLG_INACTIVE);
+ }
if (!(flags & (ERTS_RUNQ_FLG_SUSPENDED|ERTS_RUNQ_FLG_NONEMPTY))) {
- if (activate) {
- if (try_inc_no_active_runqs(ix+1))
- (void) ERTS_RUNQ_FLGS_UNSET(wrq, ERTS_RUNQ_FLG_INACTIVE);
- }
wake_scheduler(wrq);
return 1;
}
@@ -3258,25 +3688,20 @@ wake_scheduler_on_empty_runq(ErtsRunQueue *crq)
}
}
-#endif /* ERTS_SMP */
static ERTS_INLINE void
smp_notify_inc_runq(ErtsRunQueue *runq)
{
-#ifdef ERTS_SMP
if (runq) {
-#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix))
- wake_dirty_schedulers(runq, 1);
+ wake_dirty_scheduler(runq);
else
-#endif
wake_scheduler(runq);
}
-#endif
}
void
-erts_smp_notify_inc_runq(ErtsRunQueue *runq)
+erts_notify_inc_runq(ErtsRunQueue *runq)
{
smp_notify_inc_runq(runq);
}
@@ -3284,16 +3709,12 @@ erts_smp_notify_inc_runq(ErtsRunQueue *runq)
void
erts_sched_notify_check_cpu_bind(void)
{
-#ifdef ERTS_SMP
int ix;
for (ix = 0; ix < erts_no_run_queues; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
(void) ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_CHK_CPU_BIND);
wake_scheduler(rq);
}
-#else
- erts_sched_check_cpu_bind(erts_get_scheduler_data());
-#endif
}
@@ -3302,9 +3723,9 @@ enqueue_process(ErtsRunQueue *runq, int prio, Process *p)
{
ErtsRunPrioQueue *rpq;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq));
- erts_smp_inc_runq_len(runq, &runq->procs.prio_info[prio], prio);
+ erts_inc_runq_len(runq, &runq->procs.prio_info[prio], prio);
if (prio == PRIORITY_LOW) {
p->schedule_count = RESCHEDULE_LOW;
@@ -3332,7 +3753,7 @@ unqueue_process(ErtsRunQueue *runq,
Process *prev_proc,
Process *proc)
{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq));
if (prev_proc)
prev_proc->next = proc->next;
@@ -3344,7 +3765,7 @@ unqueue_process(ErtsRunQueue *runq,
if (!rpq->first)
rpq->last = NULL;
- erts_smp_dec_runq_len(runq, rqi, prio);
+ erts_dec_runq_len(runq, rqi, prio);
}
@@ -3357,7 +3778,7 @@ dequeue_process(ErtsRunQueue *runq, int prio_q, erts_aint32_t *statep)
ErtsRunQueueInfo *rqi;
Process *p;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq));
ASSERT(PRIORITY_NORMAL == prio_q
|| PRIORITY_HIGH == prio_q
@@ -3368,9 +3789,9 @@ dequeue_process(ErtsRunQueue *runq, int prio_q, erts_aint32_t *statep)
if (!p)
return NULL;
- ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
+ ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
- state = erts_smp_atomic32_read_nob(&p->state);
+ state = erts_atomic32_read_nob(&p->state);
if (statep)
*statep = state;
@@ -3400,7 +3821,13 @@ check_requeue_process(ErtsRunQueue *rq, int prio_q)
return 0;
}
-#ifdef ERTS_SMP
+static ERTS_INLINE void
+free_proxy_proc(Process *proxy)
+{
+ ASSERT(erts_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY);
+ erts_free(ERTS_ALC_T_PROC, proxy);
+}
+
static ErtsRunQueue *
check_immigration_need(ErtsRunQueue *c_rq, ErtsMigrationPath *mp, int prio)
@@ -3453,7 +3880,7 @@ static void
immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp)
{
Uint32 iflags, iflag;
- erts_smp_runq_unlock(c_rq);
+ erts_runq_unlock(c_rq);
ASSERT(erts_thr_progress_is_managed_thread());
@@ -3482,7 +3909,7 @@ immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp)
prio = ERTS_PORT_PRIO_LEVEL;
break;
default:
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"%s:%d:%s(): Invalid immigrate queue mask",
__FILE__, __LINE__, __func__);
prio = 0;
@@ -3494,25 +3921,25 @@ immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp)
rq = check_immigration_need(c_rq, mp, prio);
if (rq) {
- erts_smp_runq_lock(rq);
+ erts_runq_lock(rq);
if (prio == ERTS_PORT_PRIO_LEVEL) {
Port *prt;
prt = erts_dequeue_port(rq);
if (prt)
RUNQ_SET_RQ(&prt->run_queue, c_rq);
- erts_smp_runq_unlock(rq);
+ erts_runq_unlock(rq);
if (prt) {
/* port might terminate while we have no lock... */
rq = erts_port_runq(prt);
if (rq) {
if (rq != c_rq)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"%s:%d:%s(): Internal error",
__FILE__, __LINE__, __func__);
erts_enqueue_port(c_rq, prt);
if (!iflag)
return; /* done */
- erts_smp_runq_unlock(c_rq);
+ erts_runq_unlock(c_rq);
}
}
}
@@ -3526,76 +3953,84 @@ immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp)
while (proc) {
erts_aint32_t state;
- state = erts_smp_atomic32_read_acqb(&proc->state);
+ state = erts_atomic32_read_acqb(&proc->state);
if (!(ERTS_PSFLG_BOUND & state)
&& (prio == (int) ERTS_PSFLGS_GET_PRQ_PRIO(state))) {
ErtsRunQueueInfo *rqi = &rq->procs.prio_info[prio];
unqueue_process(rq, rpq, rqi, prio, prev_proc, proc);
- erts_smp_runq_unlock(rq);
+ erts_runq_unlock(rq);
RUNQ_SET_RQ(&proc->run_queue, c_rq);
rq_locked = 0;
- erts_smp_runq_lock(c_rq);
+ erts_runq_lock(c_rq);
enqueue_process(c_rq, prio, proc);
if (!iflag)
return; /* done */
- erts_smp_runq_unlock(c_rq);
+ erts_runq_unlock(c_rq);
break;
}
prev_proc = proc;
proc = proc->next;
}
if (rq_locked)
- erts_smp_runq_unlock(rq);
+ erts_runq_unlock(rq);
}
}
}
- erts_smp_runq_lock(c_rq);
+ erts_runq_lock(c_rq);
}
static ERTS_INLINE void
suspend_run_queue(ErtsRunQueue *rq)
{
- erts_smp_atomic32_read_bor_nob(&rq->scheduler->ssi->flags,
+ erts_atomic32_read_bor_nob(&rq->scheduler->ssi->flags,
ERTS_SSI_FLG_SUSPENDED);
(void) ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_SUSPENDED);
wake_scheduler(rq);
}
-static void scheduler_ix_resume_wake(Uint ix);
-static void scheduler_ssi_resume_wake(ErtsSchedulerSleepInfo *ssi);
+static void nrml_sched_ix_resume_wake(Uint ix);
static ERTS_INLINE void
resume_run_queue(ErtsRunQueue *rq)
{
int pix;
+ Uint32 oflgs;
- erts_smp_runq_lock(rq);
+ ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
+
+ erts_runq_lock(rq);
+
+ oflgs = ERTS_RUNQ_FLGS_READ_BSET(rq,
+ (ERTS_RUNQ_FLG_OUT_OF_WORK
+ | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK
+ | ERTS_RUNQ_FLG_SUSPENDED
+ | ERTS_RUNQ_FLG_MSB_EXEC),
+ (ERTS_RUNQ_FLG_OUT_OF_WORK
+ | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK));
- (void) ERTS_RUNQ_FLGS_READ_BSET(rq,
- (ERTS_RUNQ_FLG_OUT_OF_WORK
- | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK
- | ERTS_RUNQ_FLG_SUSPENDED),
- (ERTS_RUNQ_FLG_OUT_OF_WORK
- | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK));
+ if (oflgs & (ERTS_RUNQ_FLG_SUSPENDED|ERTS_RUNQ_FLG_MSB_EXEC)) {
+ erts_aint32_t len;
+
+ rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS;
+ for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) {
+ len = erts_atomic32_read_dirty(&rq->procs.prio_info[pix].len);
+ rq->procs.prio_info[pix].max_len = len;
+ rq->procs.prio_info[pix].reds = 0;
+ }
+ len = erts_atomic32_read_dirty(&rq->ports.info.len);
+ rq->ports.info.max_len = len;
+ rq->ports.info.reds = 0;
+ len = erts_atomic32_read_dirty(&rq->len);
+ rq->max_len = len;
- rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS;
- for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) {
- rq->procs.prio_info[pix].max_len = 0;
- rq->procs.prio_info[pix].reds = 0;
}
- rq->ports.info.max_len = 0;
- rq->ports.info.reds = 0;
- rq->max_len = 0;
- erts_smp_runq_unlock(rq);
+ erts_runq_unlock(rq);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
-#endif
- scheduler_ix_resume_wake(rq->ix);
+ nrml_sched_ix_resume_wake(rq->ix);
}
typedef struct {
@@ -3608,17 +4043,42 @@ schedule_bound_processes(ErtsRunQueue *rq,
ErtsStuckBoundProcesses *sbpp)
{
Process *proc, *next;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
proc = sbpp->first;
while (proc) {
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&proc->state);
+ erts_aint32_t state = erts_atomic32_read_acqb(&proc->state);
next = proc->next;
enqueue_process(rq, (int) ERTS_PSFLGS_GET_PRQ_PRIO(state), proc);
proc = next;
}
}
+
+static ERTS_INLINE void
+clear_proc_dirty_queue_bit(Process *p, ErtsRunQueue *rq, int prio_bit)
+{
+#ifdef DEBUG
+ erts_aint32_t old;
+#endif
+ erts_aint32_t qb = prio_bit;
+ if (rq == ERTS_DIRTY_CPU_RUNQ)
+ qb <<= ERTS_PDSFLGS_IN_CPU_PRQ_MASK_OFFSET;
+ else {
+ ASSERT(rq == ERTS_DIRTY_IO_RUNQ);
+ qb <<= ERTS_PDSFLGS_IN_IO_PRQ_MASK_OFFSET;
+ }
+#ifdef DEBUG
+ old = (int)
+#else
+ (void)
+#endif
+ erts_atomic32_read_band_mb(&p->dirty_state, ~qb);
+ ASSERT(old & qb);
+}
+
+
+
static void
evacuate_run_queue(ErtsRunQueue *rq,
ErtsStuckBoundProcesses *sbpp)
@@ -3626,28 +4086,22 @@ evacuate_run_queue(ErtsRunQueue *rq,
int prio_q;
ErtsRunQueue *to_rq;
ErtsMigrationPaths *mps;
- ErtsMigrationPath *mp = NULL;
+ ErtsMigrationPath *mp;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
(void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
-#endif
- {
- mps = erts_get_migration_paths_managed();
- mp = &mps->mpath[rq->ix];
- }
+ ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
+
+ mps = erts_get_migration_paths_managed();
+ mp = &mps->mpath[rq->ix];
/* Evacuate scheduled misc ops */
if (rq->misc.start) {
ErtsMiscOpList *start, *end;
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
-#endif
to_rq = mp->misc_evac_runq;
if (!to_rq)
return;
@@ -3656,9 +4110,10 @@ evacuate_run_queue(ErtsRunQueue *rq,
end = rq->misc.end;
rq->misc.start = NULL;
rq->misc.end = NULL;
- erts_smp_runq_unlock(rq);
+ ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_MISC_OP);
+ erts_runq_unlock(rq);
- erts_smp_runq_lock(to_rq);
+ erts_runq_lock(to_rq);
if (to_rq->misc.end)
to_rq->misc.end->next = start;
else
@@ -3668,17 +4123,14 @@ evacuate_run_queue(ErtsRunQueue *rq,
non_empty_runq(to_rq);
- erts_smp_runq_unlock(to_rq);
+ erts_runq_unlock(to_rq);
smp_notify_inc_runq(to_rq);
- erts_smp_runq_lock(to_rq);
+ erts_runq_lock(to_rq);
}
if (rq->ports.start) {
Port *prt;
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
-#endif
to_rq = mp->prio[ERTS_PORT_PRIO_LEVEL].runq;
if (!to_rq)
return;
@@ -3689,7 +4141,7 @@ evacuate_run_queue(ErtsRunQueue *rq,
ErtsRunQueue *prt_rq;
prt = erts_dequeue_port(rq);
RUNQ_SET_RQ(&prt->run_queue, to_rq);
- erts_smp_runq_unlock(rq);
+ erts_runq_unlock(rq);
/*
* The port might terminate while
* we have no lock on it...
@@ -3697,13 +4149,13 @@ evacuate_run_queue(ErtsRunQueue *rq,
prt_rq = erts_port_runq(prt);
if (prt_rq) {
if (prt_rq != to_rq)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"%s:%d:%s() internal error\n",
__FILE__, __LINE__, __func__);
erts_enqueue_port(to_rq, prt);
- erts_smp_runq_unlock(to_rq);
+ erts_runq_unlock(to_rq);
}
- erts_smp_runq_lock(rq);
+ erts_runq_lock(rq);
prt = rq->ports.start;
}
smp_notify_inc_runq(to_rq);
@@ -3714,27 +4166,68 @@ evacuate_run_queue(ErtsRunQueue *rq,
erts_aint32_t state;
Process *proc;
int notify = 0;
-#ifdef ERTS_DIRTY_SCHEDULERS
- int requeue;
-#endif
to_rq = NULL;
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
-#endif
- {
- if (!mp->prio[prio_q].runq)
- return;
- if (prio_q == PRIORITY_NORMAL && !mp->prio[PRIORITY_LOW].runq)
- return;
- }
+ if (!mp->prio[prio_q].runq)
+ return;
+ if (prio_q == PRIORITY_NORMAL && !mp->prio[PRIORITY_LOW].runq)
+ return;
proc = dequeue_process(rq, prio_q, &state);
while (proc) {
-#ifdef ERTS_DIRTY_SCHEDULERS
- requeue = 1;
+ Process *real_proc;
+ int prio;
+ erts_aint32_t max_qbit, qbit, real_state;
+
+ prio = ERTS_PSFLGS_GET_PRQ_PRIO(state);
+ qbit = ((erts_aint32_t) 1) << prio;
+
+ if (!(state & ERTS_PSFLG_PROXY)) {
+ real_proc = proc;
+ real_state = state;
+ }
+ else {
+ real_proc = erts_proc_lookup_raw(proc->common.id);
+ if (!real_proc) {
+ free_proxy_proc(proc);
+ goto handle_next_proc;
+ }
+ real_state = erts_atomic32_read_acqb(&real_proc->state);
+ }
+
+ max_qbit = (state >> ERTS_PSFLGS_IN_PRQ_MASK_OFFSET);
+ max_qbit &= ERTS_PSFLGS_QMASK;
+ max_qbit |= 1 << ERTS_PSFLGS_QMASK_BITS;
+ max_qbit &= -max_qbit;
+
+ if (qbit > max_qbit) {
+ /* Process already queued with higher prio; drop it... */
+ if (real_proc != proc)
+ free_proxy_proc(proc);
+ else {
+ erts_aint32_t clr_bits;
+#ifdef DEBUG
+ erts_aint32_t old;
+#endif
+
+ clr_bits = ERTS_PSFLG_IN_RUNQ;
+ clr_bits |= qbit << ERTS_PSFLGS_IN_PRQ_MASK_OFFSET;
+
+#ifdef DEBUG
+ old =
+#else
+ (void)
#endif
- if (ERTS_PSFLG_BOUND & state) {
+ erts_atomic32_read_band_mb(&proc->state,
+ ~clr_bits);
+ ASSERT((old & clr_bits) == clr_bits);
+
+ }
+
+ goto handle_next_proc;
+ }
+
+ if (ERTS_PSFLG_BOUND & real_state) {
/* Bound processes get stuck here... */
proc->next = NULL;
if (sbpp->last)
@@ -3742,52 +4235,23 @@ evacuate_run_queue(ErtsRunQueue *rq,
else
sbpp->first = proc;
sbpp->last = proc;
-#ifdef ERTS_DIRTY_SCHEDULERS
- requeue = 0;
-#endif
- }
-#ifdef ERTS_DIRTY_SCHEDULERS
- else if (state & ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q) {
- erts_aint32_t old;
- old = erts_smp_atomic32_read_band_nob(&proc->state,
- ~(ERTS_PSFLG_DIRTY_CPU_PROC
- | ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q));
- /* assert that no other dirty flags are set */
- ASSERT(!(old & (ERTS_PSFLG_DIRTY_IO_PROC|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)));
- } else if (state & ERTS_PSFLG_DIRTY_IO_PROC_IN_Q) {
- erts_aint32_t old;
- old = erts_smp_atomic32_read_band_nob(&proc->state,
- ~(ERTS_PSFLG_DIRTY_IO_PROC
- | ERTS_PSFLG_DIRTY_IO_PROC_IN_Q));
- /* assert that no other dirty flags are set */
- ASSERT(!(old & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q)));
}
- if (requeue) {
-#else
else {
-#endif
int prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state);
- erts_smp_runq_unlock(rq);
+ erts_runq_unlock(rq);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
- /*
- * dirty run queues evacuate only to run
- * queue 0 during multi-scheduling blocking
- */
- to_rq = ERTS_RUNQ_IX(0);
- else
-#endif
- to_rq = mp->prio[prio].runq;
+ to_rq = mp->prio[prio].runq;
RUNQ_SET_RQ(&proc->run_queue, to_rq);
- erts_smp_runq_lock(to_rq);
+ erts_runq_lock(to_rq);
enqueue_process(to_rq, prio, proc);
- erts_smp_runq_unlock(to_rq);
+ erts_runq_unlock(to_rq);
notify = 1;
- erts_smp_runq_lock(rq);
+ erts_runq_lock(rq);
}
+
+ handle_next_proc:
proc = dequeue_process(rq, prio_q, &state);
}
if (notify)
@@ -3803,15 +4267,15 @@ try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq,
ErtsRunPrioQueue *rpq;
if (*rq_lockedp) {
- erts_smp_runq_unlock(rq);
+ erts_runq_unlock(rq);
*rq_lockedp = 0;
}
- ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(!erts_lc_runq_is_locked(rq));
- erts_smp_runq_lock(vrq);
+ erts_runq_lock(vrq);
- if (rq->halt_in_progress)
+ if (ERTS_RUNQ_FLGS_GET_NOB(rq) & ERTS_RUNQ_FLG_HALTING)
goto no_procs;
/*
@@ -3845,16 +4309,16 @@ try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq,
proc = rpq->first;
while (proc) {
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&proc->state);
+ erts_aint32_t state = erts_atomic32_read_acqb(&proc->state);
if (!(ERTS_PSFLG_BOUND & state)) {
/* Steal process */
int prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state);
ErtsRunQueueInfo *rqi = &vrq->procs.prio_info[prio];
unqueue_process(vrq, rpq, rqi, prio, prev_proc, proc);
- erts_smp_runq_unlock(vrq);
+ erts_runq_unlock(vrq);
RUNQ_SET_RQ(&proc->run_queue, rq);
- erts_smp_runq_lock(rq);
+ erts_runq_lock(rq);
*rq_lockedp = 1;
enqueue_process(rq, prio, proc);
return !0;
@@ -3868,7 +4332,7 @@ try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq,
no_procs:
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(vrq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(vrq));
/*
* Check for a runnable port to steal...
@@ -3878,7 +4342,7 @@ no_procs:
ErtsRunQueue *prt_rq;
Port *prt = erts_dequeue_port(vrq);
RUNQ_SET_RQ(&prt->run_queue, rq);
- erts_smp_runq_unlock(vrq);
+ erts_runq_unlock(vrq);
/*
* The port might terminate while
@@ -3890,7 +4354,7 @@ no_procs:
return 0;
else {
if (prt_rq != rq)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"%s:%d:%s() internal error\n",
__FILE__, __LINE__, __func__);
*rq_lockedp = 1;
@@ -3899,7 +4363,7 @@ no_procs:
}
}
- erts_smp_runq_unlock(vrq);
+ erts_runq_unlock(vrq);
return 0;
}
@@ -3910,8 +4374,7 @@ check_possible_steal_victim(ErtsRunQueue *rq, int *rq_lockedp, int vix)
{
ErtsRunQueue *vrq = ERTS_RUNQ_IX(vix);
Uint32 flags = ERTS_RUNQ_FLGS_GET(vrq);
- if ((flags & (ERTS_RUNQ_FLG_NONEMPTY
- | ERTS_RUNQ_FLG_PROTECTED)) == ERTS_RUNQ_FLG_NONEMPTY)
+ if (runq_got_work_to_execute_flags(flags) & (!(flags & ERTS_RUNQ_FLG_PROTECTED)))
return try_steal_task_from_victim(rq, rq_lockedp, vrq, flags);
else
return 0;
@@ -3932,7 +4395,7 @@ try_steal_task(ErtsRunQueue *rq)
res = 0;
rq_locked = 1;
- ERTS_SMP_LC_CHK_RUNQ_LOCK(rq, rq_locked);
+ ERTS_LC_CHK_RUNQ_LOCK(rq, rq_locked);
get_no_runqs(&active_rqs, &blnc_rqs);
@@ -3945,7 +4408,7 @@ try_steal_task(ErtsRunQueue *rq)
if (active_rqs < blnc_rqs) {
int no = blnc_rqs - active_rqs;
int stop_ix = vix = active_rqs + rq->ix % no;
- while (erts_smp_atomic32_read_acqb(&no_empty_run_queues) < blnc_rqs) {
+ while (erts_atomic32_read_acqb(&no_empty_run_queues) < blnc_rqs) {
res = check_possible_steal_victim(rq, &rq_locked, vix);
if (res)
goto done;
@@ -3960,7 +4423,7 @@ try_steal_task(ErtsRunQueue *rq)
vix = rq->ix;
/* ... then try to steal a job from another active queue... */
- while (erts_smp_atomic32_read_acqb(&no_empty_run_queues) < blnc_rqs) {
+ while (erts_atomic32_read_acqb(&no_empty_run_queues) < blnc_rqs) {
vix++;
if (vix >= active_rqs)
vix = 0;
@@ -3977,13 +4440,11 @@ try_steal_task(ErtsRunQueue *rq)
done:
if (!rq_locked)
- erts_smp_runq_lock(rq);
-
- if (!res)
- res = rq->halt_in_progress ?
- !ERTS_EMPTY_RUNQ_PORTS(rq) : !ERTS_EMPTY_RUNQ(rq);
+ erts_runq_lock(rq);
- return res;
+ if (res)
+ return res;
+ return runq_got_work_to_execute(rq);
}
/* Run queue balancing */
@@ -4105,7 +4566,7 @@ alloc_mpaths(void)
{
void *block;
ErtsMigrationPaths *res;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&balance_info.update_mtx));
+ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&balance_info.update_mtx));
res = mpaths.freelist;
if (res) {
@@ -4128,7 +4589,7 @@ retire_mpaths(ErtsMigrationPaths *mps)
{
ErtsThrPrgrVal current;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&balance_info.update_mtx));
+ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&balance_info.update_mtx));
current = erts_thr_progress_current();
@@ -4174,7 +4635,7 @@ check_balance(ErtsRunQueue *c_rq)
int sched_util_balancing;
#endif
- if (erts_smp_atomic32_xchg_nob(&balance_info.checking_balance, 1)) {
+ if (erts_atomic32_xchg_nob(&balance_info.checking_balance, 1)) {
c_rq->check_balance_reds = INT_MAX;
return;
}
@@ -4182,15 +4643,15 @@ check_balance(ErtsRunQueue *c_rq)
get_no_runqs(NULL, &blnc_no_rqs);
if (blnc_no_rqs == 1) {
c_rq->check_balance_reds = INT_MAX;
- erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0);
+ erts_atomic32_set_nob(&balance_info.checking_balance, 0);
return;
}
- erts_smp_runq_unlock(c_rq);
+ erts_runq_unlock(c_rq);
if (balance_info.halftime) {
balance_info.halftime = 0;
- erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0);
+ erts_atomic32_set_nob(&balance_info.checking_balance, 0);
ERTS_FOREACH_RUNQ(rq,
{
if (rq->waiting)
@@ -4200,7 +4661,7 @@ check_balance(ErtsRunQueue *c_rq)
rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS;
});
- erts_smp_runq_lock(c_rq);
+ erts_runq_lock(c_rq);
return;
}
@@ -4213,7 +4674,7 @@ check_balance(ErtsRunQueue *c_rq)
* is manipulated. Such updates of the migration information
* might clash with balancing.
*/
- erts_smp_mtx_lock(&balance_info.update_mtx);
+ erts_mtx_lock(&balance_info.update_mtx);
forced = balance_info.forced_check_balance;
balance_info.forced_check_balance = 0;
@@ -4221,10 +4682,10 @@ check_balance(ErtsRunQueue *c_rq)
get_no_runqs(&current_active, &blnc_no_rqs);
if (blnc_no_rqs == 1) {
- erts_smp_mtx_unlock(&balance_info.update_mtx);
- erts_smp_runq_lock(c_rq);
+ erts_mtx_unlock(&balance_info.update_mtx);
+ erts_runq_lock(c_rq);
c_rq->check_balance_reds = INT_MAX;
- erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0);
+ erts_atomic32_set_nob(&balance_info.checking_balance, 0);
return;
}
@@ -4240,7 +4701,7 @@ check_balance(ErtsRunQueue *c_rq)
/* Read balance information for all run queues */
for (qix = 0; qix < blnc_no_rqs; qix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(qix);
- erts_smp_runq_lock(rq);
+ erts_runq_lock(rq);
run_queue_info[qix].flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) {
@@ -4268,7 +4729,7 @@ check_balance(ErtsRunQueue *c_rq)
run_queue_info[qix].sched_util = erts_get_sched_util(rq, 1, 0);
#endif
- erts_smp_runq_unlock(rq);
+ erts_runq_unlock(rq);
}
full_scheds = 0;
@@ -4350,7 +4811,7 @@ check_balance(ErtsRunQueue *c_rq)
sched_util_balancing = 1;
/*
* In order to avoid renaming a large amount of fields
- * we write utilization values instead of lenght values
+ * we write utilization values instead of length values
* in the 'max_len' and 'migration_limit' fields...
*/
for (qix = 0; qix < blnc_no_rqs; qix++) {
@@ -4707,7 +5168,7 @@ erts_fprintf(stderr, "--------------------------------\n");
Uint32 flags = run_queue_info[qix].flags;
ErtsRunQueue *rq = ERTS_RUNQ_IX(qix);
- erts_smp_runq_lock(rq);
+ erts_runq_lock(rq);
ASSERT(!(flags & ERTS_RUNQ_FLG_OUT_OF_WORK));
if (rq->waiting)
flags |= ERTS_RUNQ_FLG_OUT_OF_WORK;
@@ -4722,27 +5183,27 @@ erts_fprintf(stderr, "--------------------------------\n");
rq->out_of_work_count = 0;
(void) ERTS_RUNQ_FLGS_READ_BSET(rq, ERTS_RUNQ_FLGS_MIGRATION_INFO, flags);
- rq->max_len = rq->len;
+ rq->max_len = erts_atomic32_read_dirty(&rq->len);
for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
ErtsRunQueueInfo *rqi;
rqi = (pix == ERTS_PORT_PRIO_LEVEL
? &rq->ports.info
: &rq->procs.prio_info[pix]);
- erts_smp_reset_max_len(rq, rqi);
+ erts_reset_max_len(rq, rqi);
rqi->reds = 0;
}
rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS;
- erts_smp_runq_unlock(rq);
+ erts_runq_unlock(rq);
}
- erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0);
+ erts_atomic32_set_nob(&balance_info.checking_balance, 0);
balance_info.n++;
retire_mpaths(old_mpaths);
- erts_smp_mtx_unlock(&balance_info.update_mtx);
+ erts_mtx_unlock(&balance_info.update_mtx);
- erts_smp_runq_lock(c_rq);
+ erts_runq_lock(c_rq);
}
static void
@@ -4750,7 +5211,7 @@ change_no_used_runqs(int used)
{
ErtsMigrationPaths *new_mpaths, *old_mpaths;
int qix;
- erts_smp_mtx_lock(&balance_info.update_mtx);
+ erts_mtx_lock(&balance_info.update_mtx);
set_no_used_runqs(used);
old_mpaths = erts_get_migration_paths_managed();
@@ -4797,28 +5258,23 @@ change_no_used_runqs(int used)
/* Make sure that we balance soon... */
balance_info.forced_check_balance = 1;
- erts_smp_mtx_unlock(&balance_info.update_mtx);
+ erts_mtx_unlock(&balance_info.update_mtx);
- erts_smp_runq_lock(ERTS_RUNQ_IX(0));
+ erts_runq_lock(ERTS_RUNQ_IX(0));
ERTS_RUNQ_IX(0)->check_balance_reds = 0;
- erts_smp_runq_unlock(ERTS_RUNQ_IX(0));
+ erts_runq_unlock(ERTS_RUNQ_IX(0));
}
-#endif /* #ifdef ERTS_SMP */
Uint
erts_debug_nbalance(void)
{
-#ifdef ERTS_SMP
Uint n;
- erts_smp_mtx_lock(&balance_info.update_mtx);
+ erts_mtx_lock(&balance_info.update_mtx);
n = balance_info.n;
- erts_smp_mtx_unlock(&balance_info.update_mtx);
+ erts_mtx_unlock(&balance_info.update_mtx);
return n;
-#else
- return 0;
-#endif
}
/* Wakeup other schedulers */
@@ -4864,7 +5320,6 @@ typedef enum {
#define ERTS_WAKEUP_OTHER_DEC_LEGACY 10
#define ERTS_WAKEUP_OTHER_FIXED_INC_LEGACY (CONTEXT_REDS/10)
-#ifdef ERTS_SMP
static struct {
ErtsSchedWakeupOtherThreshold threshold;
@@ -4880,7 +5335,7 @@ wakeup_other_check(ErtsRunQueue *rq, Uint32 flags)
{
int wo_reds = rq->wakeup_other_reds;
if (wo_reds) {
- int left_len = rq->len - 1;
+ int left_len = erts_atomic32_read_dirty(&rq->len) - 1;
if (left_len < 1) {
int wo_reduce = wo_reds << wakeup_other.dec_shift;
wo_reduce &= wakeup_other.dec_mask;
@@ -4892,14 +5347,14 @@ wakeup_other_check(ErtsRunQueue *rq, Uint32 flags)
rq->wakeup_other += (left_len*wo_reds
+ ERTS_WAKEUP_OTHER_FIXED_INC);
if (rq->wakeup_other > wakeup_other.limit) {
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && rq->waiting)
- wake_dirty_schedulers(rq, 1);
- else
-#endif
+ if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) {
+ if (rq->waiting) {
+ wake_dirty_scheduler(rq);
+ }
+ } else
{
int empty_rqs =
- erts_smp_atomic32_read_acqb(&no_empty_run_queues);
+ erts_atomic32_read_acqb(&no_empty_run_queues);
if (flags & ERTS_RUNQ_FLG_PROTECTED)
(void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
if (empty_rqs != 0)
@@ -4951,7 +5406,7 @@ wakeup_other_check_legacy(ErtsRunQueue *rq, Uint32 flags)
{
int wo_reds = rq->wakeup_other_reds;
if (wo_reds) {
- erts_aint32_t len = rq->len;
+ erts_aint32_t len = erts_atomic32_read_dirty(&rq->len);
if (len < 2) {
rq->wakeup_other -= ERTS_WAKEUP_OTHER_DEC_LEGACY*wo_reds;
if (rq->wakeup_other < 0)
@@ -4962,7 +5417,7 @@ wakeup_other_check_legacy(ErtsRunQueue *rq, Uint32 flags)
else {
if (flags & ERTS_RUNQ_FLG_PROTECTED)
(void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
- if (erts_smp_atomic32_read_acqb(&no_empty_run_queues) != 0) {
+ if (erts_atomic32_read_acqb(&no_empty_run_queues) != 0) {
wake_scheduler_on_empty_runq(rq);
rq->wakeup_other = 0;
}
@@ -5013,7 +5468,7 @@ static int
no_runqs_to_supervise(void)
{
int used;
- erts_aint32_t nerq = erts_smp_atomic32_read_acqb(&no_empty_run_queues);
+ erts_aint32_t nerq = erts_atomic32_read_acqb(&no_empty_run_queues);
if (nerq <= 0)
return 0;
get_no_runqs(NULL, &used);
@@ -5046,43 +5501,33 @@ runq_supervisor(void *unused)
for (ix = 0; ix < no_rqs; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
if (ERTS_RUNQ_FLGS_GET(rq) & ERTS_RUNQ_FLG_NONEMPTY) {
- erts_smp_runq_lock(rq);
- if (rq->len != 0)
+ erts_runq_lock(rq);
+ if (erts_atomic32_read_dirty(&rq->len) != 0)
wake_scheduler_on_empty_runq(rq); /* forced wakeup... */
- erts_smp_runq_unlock(rq);
+ erts_runq_unlock(rq);
}
}
}
return NULL;
}
-#endif
void
erts_early_init_scheduling(int no_schedulers)
{
aux_work_timeout_early_init(no_schedulers);
-#ifdef ERTS_SMP
wakeup_other.threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM;
wakeup_other.type = ERTS_SCHED_WAKEUP_OTHER_TYPE_DEFAULT;
-#endif
-#ifndef ERTS_SCHED_MIN_SPIN
sched_busy_wait.sys_schedule = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM;
sched_busy_wait.tse = (ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM
* ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT);
sched_busy_wait.aux_work = (ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM
* ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_MEDIUM);
-#else
- sched_busy_wait.sys_schedule = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE;
- sched_busy_wait.tse = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE;
- sched_busy_wait.aux_work = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE;
-#endif
}
int
erts_sched_set_wakeup_other_thresold(char *str)
{
-#ifdef ERTS_SMP
ErtsSchedWakeupOtherThreshold threshold;
if (sys_strcmp(str, "very_high") == 0)
threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_HIGH;
@@ -5099,20 +5544,11 @@ erts_sched_set_wakeup_other_thresold(char *str)
wakeup_other.threshold = threshold;
set_wakeup_other_data();
return 0;
-#else
- if (sys_strcmp(str, "very_high") == 0 || sys_strcmp(str, "high") == 0 ||
- sys_strcmp(str, "medium") == 0 || sys_strcmp(str, "low") == 0 ||
- sys_strcmp(str, "very_low") == 0) {
- return 0;
- }
- return EINVAL;
-#endif
}
int
erts_sched_set_wakeup_other_type(char *str)
{
-#ifdef ERTS_SMP
ErtsSchedWakeupOtherType type;
if (sys_strcmp(str, "default") == 0)
type = ERTS_SCHED_WAKEUP_OTHER_TYPE_DEFAULT;
@@ -5122,12 +5558,6 @@ erts_sched_set_wakeup_other_type(char *str)
return EINVAL;
wakeup_other.type = type;
return 0;
-#else
- if (sys_strcmp(str, "default") == 0 || sys_strcmp(str, "legacy") == 0) {
- return 0;
- }
- return EINVAL;
-#endif
}
int
@@ -5192,35 +5622,41 @@ erts_sched_set_wake_cleanup_threshold(char *str)
static void
init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp)
{
- if (!esdp)
- awdp->sched_id = 0;
-#ifdef ERTS_DIRTY_SCHEDULERS
- else if (ERTS_SCHEDULER_IS_DIRTY(esdp))
- awdp->sched_id = (int) ERTS_DIRTY_SCHEDULER_NO(esdp);
-#endif
- else
- awdp->sched_id = (int) esdp->no;
+ int id = 0;
+ if (esdp) {
+ switch (esdp->type) {
+ case ERTS_SCHED_NORMAL:
+ id = (int) esdp->no;
+ break;
+ case ERTS_SCHED_DIRTY_CPU:
+ id = (int) erts_no_schedulers;
+ id += (int) esdp->dirty_no;
+ break;
+ case ERTS_SCHED_DIRTY_IO:
+ id = (int) erts_no_schedulers;
+ id += (int) erts_no_dirty_cpu_schedulers;
+ id += (int) esdp->dirty_no;
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid scheduler type");
+ break;
+ }
+ }
+
+ awdp->sched_id = id;
awdp->esdp = esdp;
awdp->ssi = esdp ? esdp->ssi : NULL;
-#ifdef ERTS_SMP
awdp->latest_wakeup = ERTS_THR_PRGR_VAL_FIRST;
awdp->misc.thr_prgr = ERTS_THR_PRGR_VAL_WAITING;
awdp->dd.thr_prgr = ERTS_THR_PRGR_VAL_WAITING;
- awdp->dd.completed_callback = NULL;
- awdp->dd.completed_arg = NULL;
+ awdp->cncld_tmrs.thr_prgr = ERTS_THR_PRGR_VAL_WAITING;
awdp->later_op.thr_prgr = ERTS_THR_PRGR_VAL_FIRST;
awdp->later_op.size = 0;
awdp->later_op.first = NULL;
awdp->later_op.last = NULL;
-#endif
-#ifdef ERTS_USE_ASYNC_READY_Q
-#ifdef ERTS_SMP
awdp->async_ready.need_thr_prgr = 0;
awdp->async_ready.thr_prgr = ERTS_THR_PRGR_VAL_WAITING;
-#endif
awdp->async_ready.queue = NULL;
-#endif
-#ifdef ERTS_SMP
awdp->delayed_wakeup.next = ERTS_DELAYED_WAKEUP_INFINITY;
if (!dawwp) {
awdp->delayed_wakeup.job = NULL;
@@ -5236,20 +5672,23 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp)
for (i = 0; i <= erts_no_schedulers; i++)
awdp->delayed_wakeup.sched2jix[i] = -1;
}
-#endif
+ awdp->debug.wait_completed.flags = 0;
+ awdp->debug.wait_completed.callback = NULL;
+ awdp->debug.wait_completed.arg = NULL;
}
static void
init_scheduler_data(ErtsSchedulerData* esdp, int num,
ErtsSchedulerSleepInfo* ssi,
ErtsRunQueue* runq,
- char** daww_ptr, size_t daww_sz)
+ char** daww_ptr, size_t daww_sz,
+ Process *shadow_proc,
+ Uint64 time_stamp)
{
-#ifdef ERTS_SMP
+ esdp->timer_wheel = NULL;
erts_bits_init_state(&esdp->erl_bits_state);
esdp->match_pseudo_process = NULL;
esdp->free_process = NULL;
-#endif
esdp->x_reg_array =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_BEAM_REGISTER,
ERTS_X_REGS_ALLOCATED *
@@ -5257,21 +5696,41 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
esdp->f_reg_array =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_BEAM_REGISTER,
MAX_REG * sizeof(FloatDef));
-#if !HEAP_ON_C_STACK
- esdp->num_tmp_heap_used = 0;
-#endif
-#ifdef ERTS_DIRTY_SCHEDULERS
+ esdp->run_queue = runq;
if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix)) {
esdp->no = 0;
- ERTS_DIRTY_SCHEDULER_NO(esdp) = (Uint) num;
+ if (runq == ERTS_DIRTY_CPU_RUNQ)
+ esdp->type = ERTS_SCHED_DIRTY_CPU;
+ else {
+ ASSERT(runq == ERTS_DIRTY_IO_RUNQ);
+ esdp->type = ERTS_SCHED_DIRTY_IO;
+ }
+ esdp->dirty_no = (Uint) num;
+ if (num == 1) {
+ /*
+ * Multi-scheduling block functionality depends
+ * on finding dirty scheduler number 1 here...
+ */
+ runq->scheduler = esdp;
+ }
}
else {
+ esdp->type = ERTS_SCHED_NORMAL;
esdp->no = (Uint) num;
- ERTS_DIRTY_SCHEDULER_NO(esdp) = 0;
+ esdp->dirty_no = 0;
+ runq->scheduler = esdp;
}
-#else
- esdp->no = (Uint) num;
-#endif
+ esdp->dirty_shadow_process = shadow_proc;
+ if (shadow_proc) {
+ erts_init_empty_process(shadow_proc);
+ erts_atomic32_init_nob(&shadow_proc->state,
+ (ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_PROXY));
+ shadow_proc->static_flags = ERTS_STC_FLG_SHADOW_PROC;
+ }
+
+ ssi->esdp = esdp;
esdp->ssi = ssi;
esdp->current_process = NULL;
esdp->current_port = NULL;
@@ -5281,41 +5740,42 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
erts_init_atom_cache_map(&esdp->atom_cache_map);
- esdp->run_queue = runq;
- esdp->run_queue->scheduler = esdp;
+ esdp->last_monotonic_time = 0;
+ esdp->check_time_reds = 0;
+
+ esdp->thr_id = (Uint32) num;
+ erts_sched_bif_unique_init(esdp);
+
+ esdp->io.out = (Uint64) 0;
+ esdp->io.in = (Uint64) 0;
if (daww_ptr) {
init_aux_work_data(&esdp->aux_work_data, esdp, *daww_ptr);
-#ifdef ERTS_SMP
*daww_ptr += daww_sz;
-#endif
}
esdp->reductions = 0;
- init_sched_wall_time(&esdp->sched_wall_time);
+ init_sched_wall_time(esdp, time_stamp);
erts_port_task_handle_init(&esdp->nosuspend_port_task_handle);
}
void
-erts_init_scheduling(int no_schedulers, int no_schedulers_online
-#ifdef ERTS_DIRTY_SCHEDULERS
- , int no_dirty_cpu_schedulers, int no_dirty_cpu_schedulers_online,
+erts_init_scheduling(int no_schedulers, int no_schedulers_online, int no_poll_threads,
+ int no_dirty_cpu_schedulers, int no_dirty_cpu_schedulers_online,
int no_dirty_io_schedulers
-#endif
)
{
- int ix, n, no_ssi;
+ int ix, n, no_ssi, tot_rqs;
char *daww_ptr;
size_t daww_sz;
size_t size_runqs;
+ erts_aint32_t set_schdlr_sspnd_change_flags;
init_misc_op_list_alloc();
init_proc_sys_task_queues_alloc();
-#ifdef ERTS_SMP
set_wakeup_other_data();
-#endif
#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
if (erts_sched_balance_util)
@@ -5325,36 +5785,26 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
ASSERT(no_schedulers_online <= no_schedulers);
ASSERT(no_schedulers_online >= 1);
ASSERT(no_schedulers >= 1);
-#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(no_dirty_cpu_schedulers <= no_schedulers);
ASSERT(no_dirty_cpu_schedulers >= 1);
ASSERT(no_dirty_cpu_schedulers_online <= no_schedulers_online);
ASSERT(no_dirty_cpu_schedulers_online >= 1);
-#endif
+ ASSERT(erts_no_poll_threads == no_poll_threads);
/* Create and initialize run queues */
n = no_schedulers;
- size_runqs = sizeof(ErtsAlignedRunQueue) * (n + ERTS_NUM_DIRTY_RUNQS);
+ tot_rqs = (n + ERTS_NUM_DIRTY_RUNQS);
+ size_runqs = sizeof(ErtsAlignedRunQueue) * tot_rqs;
erts_aligned_run_queues =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_RUNQS, size_runqs);
-#ifdef ERTS_SMP
-#ifdef ERTS_DIRTY_SCHEDULERS
- erts_aligned_run_queues += ERTS_NUM_DIRTY_RUNQS;
-#endif
- erts_smp_atomic32_init_nob(&no_empty_run_queues, 0);
-#endif
+ erts_atomic32_init_nob(&no_empty_run_queues, 0);
erts_no_run_queues = n;
- for (ix = -(ERTS_NUM_DIRTY_RUNQS); ix < n; ix++) {
+ for (ix = 0; ix < tot_rqs; ix++) {
int pix, rix;
-#ifdef ERTS_DIRTY_SCHEDULERS
- ErtsRunQueue *rq = ERTS_RUNQ_IX_IS_DIRTY(ix) ?
- ERTS_DIRTY_RUNQ_IX(ix) : ERTS_RUNQ_IX(ix);
-#else
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
-#endif
rq->ix = ix;
@@ -5362,16 +5812,16 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
* id if the esdp->no <-> ix+1 mapping change.
*/
- erts_smp_mtx_init_x(&rq->mtx, "run_queue", make_small(ix + 1));
- erts_smp_cnd_init(&rq->cnd);
+ erts_mtx_init(&rq->mtx, "run_queue", make_small(ix + 1),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
+ erts_cnd_init(&rq->cnd);
-#ifdef ERTS_DIRTY_SCHEDULERS
-#ifdef ERTS_SMP
- if (ERTS_RUNQ_IX_IS_DIRTY(ix))
- erts_smp_spinlock_init(&rq->sleepers.lock, "dirty_run_queue_sleep_list");
+ if (ERTS_RUNQ_IX_IS_DIRTY(ix)) {
+ erts_spinlock_init(&rq->sleepers.lock, "dirty_run_queue_sleep_list",
+ make_small(ix + 1),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
+ }
rq->sleepers.list = NULL;
-#endif
-#endif
rq->waiting = 0;
rq->woken = 0;
@@ -5384,17 +5834,16 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
}
rq->out_of_work_count = 0;
rq->max_len = 0;
- rq->len = 0;
+ erts_atomic32_set_nob(&rq->len, 0);
rq->wakeup_other = 0;
rq->wakeup_other_reds = 0;
- rq->halt_in_progress = 0;
rq->procs.pending_exiters = NULL;
rq->procs.context_switches = 0;
rq->procs.reductions = 0;
for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) {
- erts_smp_atomic32_init_nob(&rq->procs.prio_info[pix].len, 0);
+ erts_atomic32_init_nob(&rq->procs.prio_info[pix].len, 0);
rq->procs.prio_info[pix].max_len = 0;
rq->procs.prio_info[pix].reds = 0;
if (pix < ERTS_NO_PROC_PRIO_LEVELS - 1) {
@@ -5406,7 +5855,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
rq->misc.start = NULL;
rq->misc.end = NULL;
- erts_smp_atomic32_init_nob(&rq->ports.info.len, 0);
+ erts_atomic32_init_nob(&rq->ports.info.len, 0);
rq->ports.info.max_len = 0;
rq->ports.info.reds = 0;
rq->ports.start = NULL;
@@ -5418,7 +5867,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
}
-#ifdef ERTS_SMP
if (erts_no_run_queues != 1) {
run_queue_info = erts_alloc(ERTS_ALC_T_RUNQ_BLNS,
@@ -5429,49 +5877,42 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
* erts_no_run_queues));
}
-#endif
n = (int) no_schedulers;
erts_no_schedulers = n;
-#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_no_total_schedulers = n;
erts_no_dirty_cpu_schedulers = no_dirty_cpu_schedulers;
+ erts_no_total_schedulers += no_dirty_cpu_schedulers;
erts_no_dirty_io_schedulers = no_dirty_io_schedulers;
-#endif
+ erts_no_total_schedulers += no_dirty_io_schedulers;
/* Create and initialize scheduler sleep info */
-#ifdef ERTS_SMP
- no_ssi = n+1;
-#else
- no_ssi = 1;
-#endif
+ no_ssi = n + 1 /* aux thread */;
aligned_sched_sleep_info =
erts_alloc_permanent_cache_aligned(
ERTS_ALC_T_SCHDLR_SLP_INFO,
no_ssi*sizeof(ErtsAlignedSchedulerSleepInfo));
for (ix = 0; ix < no_ssi; ix++) {
ErtsSchedulerSleepInfo *ssi = &aligned_sched_sleep_info[ix].ssi;
-#ifdef ERTS_SMP
#if 0 /* no need to initialize these... */
ssi->next = NULL;
ssi->prev = NULL;
#endif
- erts_smp_atomic32_init_nob(&ssi->flags, 0);
+ ssi->esdp = NULL;
+ erts_atomic32_init_nob(&ssi->flags, 0);
ssi->event = NULL; /* initialized in sched_thread_func */
-#endif
erts_atomic32_init_nob(&ssi->aux_work, 0);
}
-#ifdef ERTS_SMP
- aligned_sched_sleep_info++;
+ aligned_sched_sleep_info += 1 /* aux thread */;
-#ifdef ERTS_DIRTY_SCHEDULERS
aligned_dirty_cpu_sched_sleep_info =
erts_alloc_permanent_cache_aligned(
ERTS_ALC_T_SCHDLR_SLP_INFO,
no_dirty_cpu_schedulers*sizeof(ErtsAlignedSchedulerSleepInfo));
for (ix = 0; ix < no_dirty_cpu_schedulers; ix++) {
ErtsSchedulerSleepInfo *ssi = &aligned_dirty_cpu_sched_sleep_info[ix].ssi;
- erts_smp_atomic32_init_nob(&ssi->flags, 0);
+ erts_atomic32_init_nob(&ssi->flags, 0);
ssi->event = NULL; /* initialized in sched_dirty_cpu_thread_func */
erts_atomic32_init_nob(&ssi->aux_work, 0);
}
@@ -5481,98 +5922,95 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
no_dirty_io_schedulers*sizeof(ErtsAlignedSchedulerSleepInfo));
for (ix = 0; ix < no_dirty_io_schedulers; ix++) {
ErtsSchedulerSleepInfo *ssi = &aligned_dirty_io_sched_sleep_info[ix].ssi;
- erts_smp_atomic32_init_nob(&ssi->flags, 0);
+ erts_atomic32_init_nob(&ssi->flags, 0);
ssi->event = NULL; /* initialized in sched_dirty_io_thread_func */
erts_atomic32_init_nob(&ssi->aux_work, 0);
}
-#endif
-#endif
+
+ aligned_poll_thread_sleep_info =
+ erts_alloc_permanent_cache_aligned(
+ ERTS_ALC_T_SCHDLR_SLP_INFO,
+ no_poll_threads*sizeof(ErtsAlignedSchedulerSleepInfo));
+ for (ix = 0; ix < no_poll_threads; ix++) {
+ ErtsSchedulerSleepInfo *ssi = &aligned_poll_thread_sleep_info[ix].ssi;
+ ssi->esdp = NULL;
+ erts_atomic32_init_nob(&ssi->flags, 0);
+ ssi->event = NULL; /* initialized in poll_thread */
+ erts_atomic32_init_nob(&ssi->aux_work, 0);
+ }
/* Create and initialize scheduler specific data */
-#ifdef ERTS_SMP
daww_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE((sizeof(ErtsDelayedAuxWorkWakeupJob)
+ sizeof(int))*(n+1));
daww_ptr = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA,
daww_sz*n);
-#else
- daww_sz = 0;
- daww_ptr = NULL;
-#endif
erts_aligned_scheduler_data =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA,
- n*sizeof(ErtsAlignedSchedulerData));
+ n*sizeof(ErtsAlignedSchedulerData));
for (ix = 0; ix < n; ix++) {
ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(ix);
init_scheduler_data(esdp, ix+1, ERTS_SCHED_SLEEP_INFO_IX(ix),
- ERTS_RUNQ_IX(ix), &daww_ptr, daww_sz);
+ ERTS_RUNQ_IX(ix), &daww_ptr, daww_sz,
+ NULL, 0);
}
-#ifdef ERTS_DIRTY_SCHEDULERS
-#ifdef ERTS_SMP
- erts_aligned_dirty_cpu_scheduler_data =
- erts_alloc_permanent_cache_aligned(
- ERTS_ALC_T_SCHDLR_DATA,
- no_dirty_cpu_schedulers*sizeof(ErtsAlignedSchedulerData));
- for (ix = 0; ix < no_dirty_cpu_schedulers; ix++) {
- ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
- init_scheduler_data(esdp, ix+1, ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix),
- ERTS_DIRTY_CPU_RUNQ, NULL, 0);
- }
- erts_aligned_dirty_io_scheduler_data =
- erts_alloc_permanent_cache_aligned(
- ERTS_ALC_T_SCHDLR_DATA,
- no_dirty_io_schedulers*sizeof(ErtsAlignedSchedulerData));
- for (ix = 0; ix < no_dirty_io_schedulers; ix++) {
- ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix);
- init_scheduler_data(esdp, ix+1, ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix),
- ERTS_DIRTY_IO_RUNQ, NULL, 0);
+ {
+ Uint64 ts = sched_wall_time_ts();
+ int dirty_scheds = no_dirty_cpu_schedulers + no_dirty_io_schedulers;
+ int adspix = 0;
+ ErtsAlignedDirtyShadowProcess *adsp =
+ erts_alloc_permanent_cache_aligned(
+ ERTS_ALC_T_SCHDLR_DATA,
+ dirty_scheds * sizeof(ErtsAlignedDirtyShadowProcess));
+
+ erts_aligned_dirty_cpu_scheduler_data =
+ erts_alloc_permanent_cache_aligned(
+ ERTS_ALC_T_SCHDLR_DATA,
+ dirty_scheds * sizeof(ErtsAlignedSchedulerData));
+
+ erts_aligned_dirty_io_scheduler_data =
+ &erts_aligned_dirty_cpu_scheduler_data[no_dirty_cpu_schedulers];
+
+ for (ix = 0; ix < no_dirty_cpu_schedulers; ix++) {
+ ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
+ init_scheduler_data(esdp, ix+1, ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix),
+ ERTS_DIRTY_CPU_RUNQ, NULL, 0,
+ &adsp[adspix++].dsp, ts);
+ }
+ for (ix = 0; ix < no_dirty_io_schedulers; ix++) {
+ ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix);
+ init_scheduler_data(esdp, ix+1, ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix),
+ ERTS_DIRTY_IO_RUNQ, NULL, 0,
+ &adsp[adspix++].dsp, ts);
+ }
}
-#endif
-#endif
init_misc_aux_work();
-#if !HALFWORD_HEAP
init_swtreq_alloc();
-#endif
-
-
-#ifdef ERTS_SMP
+ init_screq_alloc();
- erts_atomic32_init_nob(&completed_dealloc_count, 0); /* debug only */
+ erts_atomic32_init_nob(&debug_wait_completed_count, 0); /* debug only */
+ debug_wait_completed_flags = 0;
aux_thread_aux_work_data =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA,
sizeof(ErtsAuxWorkData));
- erts_smp_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd");
- erts_smp_cnd_init(&schdlr_sspnd.cnd);
+ poll_thread_aux_work_data =
+ erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA,
+ no_poll_threads * sizeof(ErtsAuxWorkData));
- erts_smp_atomic32_init_nob(&schdlr_sspnd.changing, 0);
- schdlr_sspnd.online = no_schedulers_online;
- schdlr_sspnd.curr_online = no_schedulers;
- schdlr_sspnd.msb.ongoing = 0;
- erts_smp_atomic32_init_nob(&schdlr_sspnd.active, no_schedulers);
-#ifdef ERTS_DIRTY_SCHEDULERS
- erts_smp_atomic32_init_nob(&schdlr_sspnd.dirty_cpu_changing, 0);
- schdlr_sspnd.dirty_cpu_online = no_dirty_cpu_schedulers_online;
- schdlr_sspnd.dirty_cpu_curr_online = no_dirty_cpu_schedulers;
- erts_smp_atomic32_init_nob(&schdlr_sspnd.dirty_cpu_active, no_dirty_cpu_schedulers);
- erts_smp_atomic32_init_nob(&schdlr_sspnd.dirty_io_changing, 0);
- schdlr_sspnd.dirty_io_online = no_dirty_io_schedulers;
- schdlr_sspnd.dirty_io_curr_online = no_dirty_io_schedulers;
- erts_smp_atomic32_init_nob(&schdlr_sspnd.dirty_io_active, no_dirty_io_schedulers);
-#endif
- schdlr_sspnd.msb.procs = NULL;
init_no_runqs(no_schedulers_online, no_schedulers_online);
balance_info.last_active_runqs = no_schedulers;
- erts_smp_mtx_init(&balance_info.update_mtx, "migration_info_update");
+ erts_mtx_init(&balance_info.update_mtx, "migration_info_update", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
balance_info.forced_check_balance = 0;
balance_info.halftime = 1;
balance_info.full_reds_history_index = 0;
- erts_smp_atomic32_init_nob(&balance_info.checking_balance, 0);
+ erts_atomic32_init_nob(&balance_info.checking_balance, 0);
balance_info.prev_rise.active_runqs = 0;
balance_info.prev_rise.max_len = 0;
balance_info.prev_rise.reds = 0;
@@ -5580,74 +6018,79 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
init_migration_paths();
- if (no_schedulers_online < no_schedulers) {
+ init_scheduler_suspend();
+
+ set_schdlr_sspnd_change_flags = 0;
+
+ schdlr_sspnd_set_nscheds(&schdlr_sspnd.online,
+ ERTS_SCHED_NORMAL,
+ no_schedulers_online);
+ schdlr_sspnd_set_nscheds(&schdlr_sspnd.curr_online,
+ ERTS_SCHED_NORMAL,
+ no_schedulers);
+ schdlr_sspnd_set_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_NORMAL,
+ no_schedulers);
+
+ if (no_schedulers_online != no_schedulers) {
+ ASSERT(no_schedulers_online < no_schedulers);
+ set_schdlr_sspnd_change_flags |= ERTS_SCHDLR_SSPND_CHNG_ONLN;
+ schdlr_sspnd.changer = am_init;
change_no_used_runqs(no_schedulers_online);
for (ix = no_schedulers_online; ix < erts_no_run_queues; ix++)
suspend_run_queue(ERTS_RUNQ_IX(ix));
}
- schdlr_sspnd.wait_curr_online = no_schedulers_online;
- schdlr_sspnd.curr_online *= 2; /* Boot strapping... */
- ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
- | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
-#ifdef ERTS_DIRTY_SCHEDULERS
- schdlr_sspnd.dirty_cpu_wait_curr_online = no_dirty_cpu_schedulers_online;
- schdlr_sspnd.dirty_cpu_curr_online *= 2;
- ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
- | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
- for (ix = no_dirty_cpu_schedulers_online; ix < no_dirty_cpu_schedulers; ix++) {
- ErtsSchedulerData* esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
- erts_smp_atomic32_read_bor_nob(&esdp->ssi->flags, ERTS_SSI_FLG_SUSPENDED);
- }
-
- schdlr_sspnd.dirty_io_wait_curr_online = no_dirty_io_schedulers;
- schdlr_sspnd.dirty_io_curr_online *= 2;
- ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
- | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
-#endif
- erts_smp_atomic32_init_nob(&doing_sys_schedule, 0);
+ schdlr_sspnd_set_nscheds(&schdlr_sspnd.online,
+ ERTS_SCHED_DIRTY_CPU,
+ no_dirty_cpu_schedulers_online);
+ schdlr_sspnd_set_nscheds(&schdlr_sspnd.curr_online,
+ ERTS_SCHED_DIRTY_CPU,
+ no_dirty_cpu_schedulers);
+ schdlr_sspnd_set_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_DIRTY_CPU,
+ no_dirty_cpu_schedulers);
- init_misc_aux_work();
-
-#else /* !ERTS_SMP */
- {
- ErtsSchedulerData *esdp;
- esdp = ERTS_SCHEDULER_IX(0);
- erts_scheduler_data = esdp;
-#ifdef USE_THREADS
- erts_tsd_set(sched_data_key, (void *) esdp);
-#endif
+ if (no_dirty_cpu_schedulers_online != no_dirty_cpu_schedulers) {
+ ASSERT(no_dirty_cpu_schedulers_online < no_dirty_cpu_schedulers);
+ set_schdlr_sspnd_change_flags |= ERTS_SCHDLR_SSPND_CHNG_DCPU_ONLN;
+ for (ix = no_dirty_cpu_schedulers_online; ix < no_dirty_cpu_schedulers; ix++) {
+ ErtsSchedulerData* esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
+ erts_atomic32_read_bor_nob(&esdp->ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ }
}
- erts_no_schedulers = 1;
-#ifdef ERTS_DIRTY_SCHEDULERS
- erts_no_dirty_cpu_schedulers = 0;
- erts_no_dirty_io_schedulers = 0;
-#endif
-#endif
- erts_smp_atomic32_init_nob(&function_calls, 0);
+ schdlr_sspnd_set_nscheds(&schdlr_sspnd.online,
+ ERTS_SCHED_DIRTY_IO,
+ no_dirty_io_schedulers);
+ schdlr_sspnd_set_nscheds(&schdlr_sspnd.curr_online,
+ ERTS_SCHED_DIRTY_IO,
+ no_dirty_io_schedulers);
+ schdlr_sspnd_set_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_DIRTY_IO,
+ no_dirty_io_schedulers);
+
+ erts_atomic32_init_nob(&dirty_count.cpu.active,
+ (erts_aint32_t) no_dirty_cpu_schedulers);
+ erts_atomic32_init_nob(&dirty_count.io.active,
+ (erts_aint32_t) no_dirty_io_schedulers);
+
+
+ if (set_schdlr_sspnd_change_flags)
+ erts_atomic32_set_nob(&schdlr_sspnd.changing,
+ set_schdlr_sspnd_change_flags);
+
+ init_misc_aux_work();
+
/* init port tasks */
erts_port_task_init();
- aux_work_timeout_late_init();
-#ifndef ERTS_SMP
-#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
- erts_scheduler_data->verify_unused_temp_alloc
- = erts_alloc_get_verify_unused_temp_alloc(
- &erts_scheduler_data->verify_unused_temp_alloc_data);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(NULL);
-#endif
-#endif
-
- erts_smp_atomic32_init_relb(&erts_halt_progress, -1);
+ erts_atomic32_init_relb(&erts_halt_progress, -1);
erts_halt_code = 0;
-#if !defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
- erts_lc_set_thread_name("scheduler 1");
-#endif
}
@@ -5660,7 +6103,6 @@ erts_schedid2runq(Uint id)
return ERTS_RUNQ_IX(ix);
}
-#ifdef USE_THREADS
ErtsSchedulerData *
erts_get_scheduler_data(void)
@@ -5668,16 +6110,13 @@ erts_get_scheduler_data(void)
return (ErtsSchedulerData *) erts_tsd_get(sched_data_key);
}
-#endif
static Process *
make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio)
{
erts_aint32_t state;
Process *proxy;
-#ifdef ERTS_SMP
ErtsRunQueue *rq = RUNQ_READ_RQ(&proc->run_queue);
-#endif
state = (ERTS_PSFLG_PROXY
| ERTS_PSFLG_IN_RUNQ
@@ -5688,11 +6127,9 @@ make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio)
if (prev_proxy) {
proxy = prev_proxy;
- ASSERT(erts_smp_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY);
- erts_smp_atomic32_set_nob(&proxy->state, state);
-#ifdef ERTS_SMP
+ ASSERT(erts_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY);
+ erts_atomic32_set_nob(&proxy->state, state);
RUNQ_SET_RQ(&proc->run_queue, rq);
-#endif
}
else {
proxy = erts_alloc(ERTS_ALC_T_PROC, sizeof(Process));
@@ -5704,11 +6141,9 @@ make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio)
ui32[i] = (Uint32) 0xdeadbeef;
}
#endif
- erts_smp_atomic32_init_nob(&proxy->state, state);
-#ifdef ERTS_SMP
- erts_smp_atomic_init_nob(&proxy->run_queue,
- erts_smp_atomic_read_nob(&proc->run_queue));
-#endif
+ erts_atomic32_init_nob(&proxy->state, state);
+ erts_atomic_init_nob(&proxy->run_queue,
+ erts_atomic_read_nob(&proc->run_queue));
}
proxy->common.id = proc->common.id;
@@ -5716,19 +6151,104 @@ make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio)
return proxy;
}
-static ERTS_INLINE void
-free_proxy_proc(Process *proxy)
-{
- ASSERT(erts_smp_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY);
- erts_free(ERTS_ALC_T_PROC, proxy);
-}
-
#define ERTS_ENQUEUE_NOT 0
#define ERTS_ENQUEUE_NORMAL_QUEUE 1
-#ifdef ERTS_DIRTY_SCHEDULERS
#define ERTS_ENQUEUE_DIRTY_CPU_QUEUE 2
#define ERTS_ENQUEUE_DIRTY_IO_QUEUE 3
+
+
+static int
+check_dirty_enqueue_in_prio_queue(Process *c_p,
+ erts_aint32_t *newp,
+ erts_aint32_t actual,
+ erts_aint32_t aprio,
+ erts_aint32_t qbit)
+{
+ int queue;
+ erts_aint32_t dact, max_qbit;
+
+ /* Do not enqueue free process... */
+ if (actual & ERTS_PSFLG_FREE) {
+ *newp &= ~ERTS_PSFLGS_DIRTY_WORK;
+ return ERTS_ENQUEUE_NOT;
+ }
+
+ /* Termination should be done on an ordinary scheduler */
+ if ((*newp) & ERTS_PSFLG_EXITING) {
+ *newp &= ~ERTS_PSFLGS_DIRTY_WORK;
+ return ERTS_ENQUEUE_NORMAL_QUEUE;
+ }
+
+ /*
+ * If we have system tasks, we enqueue on ordinary run-queue
+ * and take care of those system tasks first.
+ */
+ if ((*newp) & ERTS_PSFLG_ACTIVE_SYS)
+ return ERTS_ENQUEUE_NORMAL_QUEUE;
+
+ dact = erts_atomic32_read_mb(&c_p->dirty_state);
+ if (actual & (ERTS_PSFLG_DIRTY_ACTIVE_SYS
+ | ERTS_PSFLG_DIRTY_CPU_PROC)) {
+ max_qbit = ((dact >> ERTS_PDSFLGS_IN_CPU_PRQ_MASK_OFFSET)
+ & ERTS_PDSFLGS_QMASK);
+ queue = ERTS_ENQUEUE_DIRTY_CPU_QUEUE;
+ }
+ else {
+ ASSERT(actual & ERTS_PSFLG_DIRTY_IO_PROC);
+ max_qbit = ((dact >> ERTS_PDSFLGS_IN_IO_PRQ_MASK_OFFSET)
+ & ERTS_PDSFLGS_QMASK);
+ queue = ERTS_ENQUEUE_DIRTY_IO_QUEUE;
+ }
+
+ max_qbit |= 1 << ERTS_PSFLGS_QMASK_BITS;
+ max_qbit &= -max_qbit;
+
+ if (qbit >= max_qbit)
+ return ERTS_ENQUEUE_NOT; /* Already queued in higher or equal prio */
+ if ((actual & (ERTS_PSFLG_IN_RUNQ|ERTS_PSFLGS_USR_PRIO_MASK))
+ != (aprio << ERTS_PSFLGS_USR_PRIO_OFFSET)) {
+ /*
+ * Process struct already enqueued, or actual prio not
+ * equal to user prio, i.e., enqueue using proxy.
+ */
+ return -1*queue;
+ }
+
+ /*
+ * Enqueue using process struct.
+ */
+ *newp &= ~ERTS_PSFLGS_PRQ_PRIO_MASK;
+ *newp |= ERTS_PSFLG_IN_RUNQ | (aprio << ERTS_PSFLGS_PRQ_PRIO_OFFSET);
+ return queue;
+}
+
+static ERTS_INLINE int
+fin_dirty_enq_s_change(Process *p,
+ int pstruct_reserved,
+ erts_aint32_t enq_prio,
+ int qmask_offset)
+{
+ erts_aint32_t qbit = 1 << enq_prio;
+ qbit <<= qmask_offset;
+
+ if (qbit & erts_atomic32_read_bor_mb(&p->dirty_state, qbit)) {
+ /* Already enqueue by someone else... */
+ if (pstruct_reserved) {
+ /* We reserved process struct for enqueue; clear it... */
+#ifdef DEBUG
+ erts_aint32_t old =
+#else
+ (void)
#endif
+ erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_IN_RUNQ);
+ ASSERT(old & ERTS_PSFLG_IN_RUNQ);
+ }
+ return 0;
+ }
+
+ return !0;
+}
+
static ERTS_INLINE int
check_enqueue_in_prio_queue(Process *c_p,
@@ -5743,62 +6263,13 @@ check_enqueue_in_prio_queue(Process *c_p,
*prq_prio_p = aprio;
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (actual & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)) {
- /*
- * If we have system tasks of a priority higher
- * or equal to the user priority, we enqueue
- * on ordinary run-queue and take care of
- * those system tasks first.
- */
- if (actual & ERTS_PSFLG_ACTIVE_SYS) {
- erts_aint32_t uprio, stprio, qmask;
- uprio = (actual >> ERTS_PSFLGS_USR_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK;
- if (aprio < uprio)
- goto enqueue_normal_runq; /* system tasks with higher prio */
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
- qmask = c_p->sys_task_qs->qmask;
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
- switch (qmask & -qmask) {
- case MAX_BIT:
- stprio = PRIORITY_MAX;
- break;
- case HIGH_BIT:
- stprio = PRIORITY_HIGH;
- break;
- case NORMAL_BIT:
- stprio = PRIORITY_NORMAL;
- break;
- case LOW_BIT:
- stprio = PRIORITY_LOW;
- break;
- default:
- stprio = PRIORITY_LOW+1;
- break;
- }
- if (stprio <= uprio)
- goto enqueue_normal_runq; /* system tasks with higher prio */
- }
-
- /* Enqueue in dirty run queue if not already enqueued */
- if (actual & (ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q))
- return ERTS_ENQUEUE_NOT; /* already in queue */
- if (actual & ERTS_PSFLG_DIRTY_CPU_PROC) {
- *newp |= ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q;
- if (actual & ERTS_PSFLG_IN_RUNQ)
- return -ERTS_ENQUEUE_DIRTY_CPU_QUEUE; /* use proxy */
- *newp |= ERTS_PSFLG_IN_RUNQ;
- return ERTS_ENQUEUE_DIRTY_CPU_QUEUE;
- }
- *newp |= ERTS_PSFLG_DIRTY_IO_PROC_IN_Q;
- if (actual & ERTS_PSFLG_IN_RUNQ)
- return -ERTS_ENQUEUE_DIRTY_IO_QUEUE; /* use proxy */
- *newp |= ERTS_PSFLG_IN_RUNQ;
- return ERTS_ENQUEUE_DIRTY_IO_QUEUE;
+ if (actual & ERTS_PSFLGS_DIRTY_WORK) {
+ int res = check_dirty_enqueue_in_prio_queue(c_p, newp, actual,
+ aprio, qbit);
+ if (res != ERTS_ENQUEUE_NORMAL_QUEUE)
+ return res;
}
- enqueue_normal_runq:
-#endif
max_qbit = (actual >> ERTS_PSFLGS_IN_PRQ_MASK_OFFSET) & ERTS_PSFLGS_QMASK;
max_qbit |= 1 << ERTS_PSFLGS_QMASK_BITS;
max_qbit &= -max_qbit;
@@ -5830,46 +6301,128 @@ check_enqueue_in_prio_queue(Process *c_p,
return ERTS_ENQUEUE_NORMAL_QUEUE;
}
+static ERTS_INLINE ErtsRunQueue *
+select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t state)
+{
+
+ switch (enqueue) {
+
+ case ERTS_ENQUEUE_NOT:
+
+ return NULL;
+
+
+ case ERTS_ENQUEUE_DIRTY_CPU_QUEUE:
+ case -ERTS_ENQUEUE_DIRTY_CPU_QUEUE:
+
+ if (fin_dirty_enq_s_change(p, enqueue > 0, enq_prio,
+ ERTS_PDSFLGS_IN_CPU_PRQ_MASK_OFFSET))
+ return ERTS_DIRTY_CPU_RUNQ;
+
+ return NULL;
+
+
+ case ERTS_ENQUEUE_DIRTY_IO_QUEUE:
+ case -ERTS_ENQUEUE_DIRTY_IO_QUEUE:
+
+ if (fin_dirty_enq_s_change(p, enqueue > 0, enq_prio,
+ ERTS_PDSFLGS_IN_IO_PRQ_MASK_OFFSET))
+ return ERTS_DIRTY_IO_RUNQ;
+
+ return NULL;
+
+
+ default: {
+ ErtsRunQueue* runq;
+
+ ASSERT(enqueue == ERTS_ENQUEUE_NORMAL_QUEUE
+ || enqueue == -ERTS_ENQUEUE_NORMAL_QUEUE);
+
+ runq = erts_get_runq_proc(p);
+
+ if (!(ERTS_PSFLG_BOUND & state)) {
+ ErtsRunQueue *new_runq = erts_check_emigration_need(runq, enq_prio);
+ if (new_runq) {
+ RUNQ_SET_RQ(&p->run_queue, new_runq);
+ runq = new_runq;
+ }
+ }
+
+ ASSERT(runq);
+
+ return runq;
+ }
+ }
+}
+
+
/*
* schedule_out_process() return with c_rq locked.
+ *
+ * Return non-zero value if caller should decrease
+ * reference count on the process when done with it...
*/
static ERTS_INLINE int
-schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Process *proxy)
+schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
+ Process *proxy, int is_normal_sched)
{
- erts_aint32_t a, e, n, enq_prio = -1;
+ erts_aint32_t a, e, n, enq_prio = -1, running_flgs;
int enqueue; /* < 0 -> use proxy */
- Process* sched_p;
ErtsRunQueue* runq;
-#ifdef ERTS_SMP
- int check_emigration_need;
-#endif
+
+ if (!is_normal_sched)
+ running_flgs = ERTS_PSFLG_DIRTY_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS;
+ else {
+ running_flgs = ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS;
+ if (state & ERTS_PSFLG_DIRTY_ACTIVE_SYS
+ && (p->flags & (F_DELAY_GC|F_DISABLE_GC))) {
+ /*
+ * Delay dirty GC; will be enabled automatically
+ * again by next GC...
+ */
+
+ /*
+ * No normal execution until dirty CLA or hibernat has
+ * been handled...
+ */
+ ASSERT(!(p->flags & (F_DIRTY_CLA | F_DIRTY_GC_HIBERNATE)));
+
+ state = erts_atomic32_read_band_nob(&p->state,
+ ~ERTS_PSFLG_DIRTY_ACTIVE_SYS);
+ state &= ~ERTS_PSFLG_DIRTY_ACTIVE_SYS;
+ }
+ }
a = state;
while (1) {
n = e = a;
- ASSERT(a & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS));
+ ASSERT(a & running_flgs);
enqueue = ERTS_ENQUEUE_NOT;
- n &= ~(ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS);
- if (a & ERTS_PSFLG_ACTIVE_SYS
+ n &= ~running_flgs;
+ if ((a & (ERTS_PSFLG_ACTIVE_SYS|ERTS_PSFLG_DIRTY_ACTIVE_SYS))
|| (a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) {
enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a);
}
- a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
+ a = erts_atomic32_cmpxchg_mb(&p->state, n, e);
if (a == e)
break;
}
- switch (enqueue) {
- case ERTS_ENQUEUE_NOT:
+ runq = select_enqueue_run_queue(enqueue, enq_prio, p, n);
+
+ if (!runq) {
+
if (erts_system_profile_flags.runnable_procs) {
- if (!(a & ERTS_PSFLG_ACTIVE_SYS)
- && (!(a & ERTS_PSFLG_ACTIVE)
- || (a & ERTS_PSFLG_SUSPENDED))) {
+ /* Status lock prevents out of order "runnable proc" trace msgs */
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+
+ if (!(a & (ERTS_PSFLG_ACTIVE_SYS|ERTS_PSFLG_DIRTY_ACTIVE_SYS))
+ && (!(a & ERTS_PSFLG_ACTIVE) || (a & ERTS_PSFLG_SUSPENDED))) {
/* Process inactive */
profile_runnable_proc(p, am_inactive);
}
@@ -5878,101 +6431,99 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Proces
if (proxy)
free_proxy_proc(proxy);
- erts_smp_runq_lock(c_rq);
- return 0;
+ erts_runq_lock(c_rq);
-#ifdef ERTS_DIRTY_SCHEDULERS
-#ifdef ERTS_SMP
- case ERTS_ENQUEUE_DIRTY_CPU_QUEUE:
- case -ERTS_ENQUEUE_DIRTY_CPU_QUEUE:
- runq = ERTS_DIRTY_CPU_RUNQ;
- ASSERT(ERTS_SCHEDULER_IS_DIRTY_CPU(runq->scheduler));
-#ifdef ERTS_SMP
- check_emigration_need = 0;
-#endif
- break;
+ /* Decrement refc if scheduled out from dirty scheduler... */
+ return !is_normal_sched;
+ }
+ else {
+ Process* sched_p;
- case ERTS_ENQUEUE_DIRTY_IO_QUEUE:
- case -ERTS_ENQUEUE_DIRTY_IO_QUEUE:
- runq = ERTS_DIRTY_IO_RUNQ;
- ASSERT(ERTS_SCHEDULER_IS_DIRTY_IO(runq->scheduler));
-#ifdef ERTS_SMP
- check_emigration_need = 0;
-#endif
- break;
-#endif
-#endif
+ ASSERT(!(n & ERTS_PSFLG_FREE));
+ ASSERT(!(n & ERTS_PSFLG_SUSPENDED) || (n & (ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_DIRTY_ACTIVE_SYS)));
- default:
- ASSERT(enqueue == ERTS_ENQUEUE_NORMAL_QUEUE
- || enqueue == -ERTS_ENQUEUE_NORMAL_QUEUE);
+ if (enqueue < 0)
+ sched_p = make_proxy_proc(proxy, p, enq_prio);
+ else {
+ sched_p = p;
+ if (proxy)
+ free_proxy_proc(proxy);
+ }
- runq = erts_get_runq_proc(p);
-#ifdef ERTS_SMP
- check_emigration_need = !(ERTS_PSFLG_BOUND & n);
-#endif
- break;
- }
+ ASSERT(runq);
- ASSERT(!(n & ERTS_PSFLG_SUSPENDED) || (n & ERTS_PSFLG_ACTIVE_SYS));
+ erts_runq_lock(runq);
- if (enqueue < 0)
- sched_p = make_proxy_proc(proxy, p, enq_prio);
- else {
- sched_p = p;
- if (proxy)
- free_proxy_proc(proxy);
- }
+ if (is_normal_sched && sched_p == p && ERTS_RUNQ_IX_IS_DIRTY(runq->ix))
+ erts_proc_inc_refc(p); /* Needs to be done before enqueue_process() */
-#ifdef ERTS_SMP
- if (check_emigration_need) {
- ErtsRunQueue *new_runq = erts_check_emigration_need(runq, enq_prio);
- if (new_runq) {
- RUNQ_SET_RQ(&sched_p->run_queue, new_runq);
- runq = new_runq;
- }
- }
-#endif
+ /* Enqueue the process */
+ enqueue_process(runq, (int) enq_prio, sched_p);
- ASSERT(runq);
+ if (runq == c_rq)
+ return 0;
- erts_smp_runq_lock(runq);
+ erts_runq_unlock(runq);
- /* Enqueue the process */
- enqueue_process(runq, (int) enq_prio, sched_p);
+ smp_notify_inc_runq(runq);
- if (runq == c_rq)
- return 1;
- erts_smp_runq_unlock(runq);
- smp_notify_inc_runq(runq);
- erts_smp_runq_lock(c_rq);
- return 1;
+ erts_runq_lock(c_rq);
+
+ /*
+ * Decrement refc if process is scheduled out by a
+ * dirty scheduler, and we have not just scheduled
+ * the process using the ordinary process struct
+ * on a dirty run-queue again...
+ */
+ return !is_normal_sched && (sched_p != p
+ || !ERTS_RUNQ_IX_IS_DIRTY(runq->ix));
+ }
}
static ERTS_INLINE void
-add2runq(Process *p, erts_aint32_t state, erts_aint32_t prio)
+add2runq(int enqueue, erts_aint32_t prio,
+ Process *proc, erts_aint32_t state,
+ Process **proxy)
{
- ErtsRunQueue *runq = erts_get_runq_proc(p);
+ ErtsRunQueue *runq;
-#ifdef ERTS_SMP
- if (!(ERTS_PSFLG_BOUND & state)) {
- ErtsRunQueue *new_runq = erts_check_emigration_need(runq, (int) prio);
- if (new_runq) {
- RUNQ_SET_RQ(&p->run_queue, new_runq);
- runq = new_runq;
- }
- }
-#endif
- ASSERT(runq);
+ runq = select_enqueue_run_queue(enqueue, prio, proc, state);
- erts_smp_runq_lock(runq);
+ if (runq) {
+ Process *sched_p;
- /* Enqueue the process */
- enqueue_process(runq, (int) prio, p);
+ if (enqueue > 0) {
+ sched_p = proc;
+ /*
+ * Refc on process struct (i.e. true struct,
+ * not proxy-struct) increased while in a
+ * dirty run-queue or executing on a dirty
+ * scheduler.
+ */
+ if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix))
+ erts_proc_inc_refc(proc);
+ }
+ else {
+ Process *pxy;
- erts_smp_runq_unlock(runq);
- smp_notify_inc_runq(runq);
+ if (!proxy)
+ pxy = NULL;
+ else {
+ pxy = *proxy;
+ *proxy = NULL;
+ }
+ sched_p = make_proxy_proc(pxy, proc, prio);
+ }
+
+ erts_runq_lock(runq);
+ /* Enqueue the process */
+ enqueue_process(runq, (int) prio, sched_p);
+
+ erts_runq_unlock(runq);
+ smp_notify_inc_runq(runq);
+ }
}
static ERTS_INLINE int
@@ -5980,24 +6531,38 @@ change_proc_schedule_state(Process *p,
erts_aint32_t clear_state_flags,
erts_aint32_t set_state_flags,
erts_aint32_t *statep,
- erts_aint32_t *enq_prio_p)
+ erts_aint32_t *enq_prio_p,
+ ErtsProcLocks locks)
{
/*
- * NOTE: ERTS_PSFLG_RUNNING, ERTS_PSFLG_RUNNING_SYS and
- * ERTS_PSFLG_ACTIVE_SYS are not allowed to be
+ * NOTE: ERTS_PSFLG_RUNNING, ERTS_PSFLG_RUNNING_SYS,
+ * ERTS_PSFLG_DIRTY_RUNNING, ERTS_PSFLG_DIRTY_RUNNING_SYS
+ * and ERTS_PSFLG_ACTIVE_SYS are not allowed to be
* altered by this function!
*/
erts_aint32_t a = *statep, n;
int enqueue; /* < 0 -> use proxy */
+ unsigned int prof_runnable_procs = erts_system_profile_flags.runnable_procs;
+ unsigned int lock_status = (prof_runnable_procs
+ && !(locks & ERTS_PROC_LOCK_STATUS));
+
+ ERTS_LC_ASSERT(locks == erts_proc_lc_my_proc_locks(p));
ASSERT(!(a & ERTS_PSFLG_PROXY));
ASSERT((clear_state_flags & (ERTS_PSFLG_RUNNING
| ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS
| ERTS_PSFLG_ACTIVE_SYS)) == 0);
ASSERT((set_state_flags & (ERTS_PSFLG_RUNNING
| ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS
| ERTS_PSFLG_ACTIVE_SYS)) == 0);
+ if (lock_status)
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+
while (1) {
erts_aint32_t e;
n = e = a;
@@ -6016,8 +6581,14 @@ change_proc_schedule_state(Process *p,
if ((n & (ERTS_PSFLG_SUSPENDED
| ERTS_PSFLG_RUNNING
| ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS
| ERTS_PSFLG_IN_RUNQ
- | ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE) {
+ | ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE
+ || (n & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_EXITING)) == ERTS_PSFLG_EXITING
+ ) {
/*
* Active and seemingly need to be enqueued, but
* process may be in a run queue via proxy, need
@@ -6026,35 +6597,42 @@ change_proc_schedule_state(Process *p,
enqueue = check_enqueue_in_prio_queue(p, enq_prio_p, &n, a);
}
- a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
+ a = erts_atomic32_cmpxchg_mb(&p->state, n, e);
if (a == e)
break;
if (enqueue == ERTS_ENQUEUE_NOT && n == a)
break;
}
- if (erts_system_profile_flags.runnable_procs) {
+ if (prof_runnable_procs) {
+
+ /* Status lock prevents out of order "runnable proc" trace msgs */
if (((n & (ERTS_PSFLG_SUSPENDED
| ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE)
&& (!(a & (ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS)
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)
&& (!(a & ERTS_PSFLG_ACTIVE)
|| (a & ERTS_PSFLG_SUSPENDED))))) {
/* We activated a prevously inactive process */
profile_runnable_proc(p, am_active);
}
+ if (lock_status)
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
}
+
*statep = a;
return enqueue;
}
static ERTS_INLINE void
-schedule_process(Process *p, erts_aint32_t in_state)
+schedule_process(Process *p, erts_aint32_t in_state, ErtsProcLocks locks)
{
erts_aint32_t enq_prio = -1;
erts_aint32_t state = in_state;
@@ -6062,24 +6640,114 @@ schedule_process(Process *p, erts_aint32_t in_state)
0,
ERTS_PSFLG_ACTIVE,
&state,
- &enq_prio);
- if (enqueue != ERTS_ENQUEUE_NOT)
- add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio),
- state,
- enq_prio);
+ &enq_prio,
+ locks);
+ add2runq(enqueue, enq_prio, p, state, NULL);
}
void
-erts_schedule_process(Process *p, erts_aint32_t state)
+erts_schedule_process(Process *p, erts_aint32_t state, ErtsProcLocks locks)
{
- schedule_process(p, state);
+ schedule_process(p, state, locks);
}
-static void
-schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
+static int
+schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st,
+ erts_aint32_t *fail_state_p)
{
- erts_aint32_t a = state, n, enq_prio = -1;
+ int res;
+ int locked;
+ ErtsProcSysTaskQs *stqs, *free_stqs;
+ erts_aint32_t fail_state, state, a, n, enq_prio;
int enqueue; /* < 0 -> use proxy */
+ unsigned int prof_runnable_procs;
+
+ fail_state = *fail_state_p;
+
+ res = 1; /* prepare for success */
+ st->next = st->prev = st; /* Prep for empty prio queue */
+ state = erts_atomic32_read_nob(&p->state);
+ prof_runnable_procs = erts_system_profile_flags.runnable_procs;
+ locked = 0;
+ free_stqs = NULL;
+ if (state & ERTS_PSFLG_ACTIVE_SYS)
+ stqs = NULL;
+ else {
+ alloc_qs:
+ stqs = proc_sys_task_queues_alloc();
+ stqs->qmask = 1 << prio;
+ stqs->ncount = 0;
+ stqs->q[PRIORITY_MAX] = NULL;
+ stqs->q[PRIORITY_HIGH] = NULL;
+ stqs->q[PRIORITY_NORMAL] = NULL;
+ stqs->q[PRIORITY_LOW] = NULL;
+ stqs->q[prio] = st;
+ }
+
+ if (!locked) {
+ locked = 1;
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+
+ state = erts_atomic32_read_nob(&p->state);
+ if (state & fail_state) {
+ *fail_state_p = (state & fail_state);
+ free_stqs = stqs;
+ res = 0;
+ goto cleanup;
+ }
+ }
+
+ if (!p->sys_task_qs) {
+ if (stqs)
+ p->sys_task_qs = stqs;
+ else
+ goto alloc_qs;
+ }
+ else {
+ free_stqs = stqs;
+ stqs = p->sys_task_qs;
+ if (!stqs->q[prio]) {
+ stqs->q[prio] = st;
+ stqs->qmask |= 1 << prio;
+ }
+ else {
+ st->next = stqs->q[prio];
+ st->prev = stqs->q[prio]->prev;
+ st->next->prev = st;
+ st->prev->next = st;
+ ASSERT(stqs->qmask & (1 << prio));
+ }
+ }
+
+ if (ERTS_PSFLGS_GET_ACT_PRIO(state) > prio) {
+ erts_aint32_t n, a, e;
+ /* Need to elevate actual prio */
+
+ a = state;
+ do {
+ if (ERTS_PSFLGS_GET_ACT_PRIO(a) <= prio) {
+ n = a;
+ break;
+ }
+ n = e = a;
+ n &= ~ERTS_PSFLGS_ACT_PRIO_MASK;
+ n |= (prio << ERTS_PSFLGS_ACT_PRIO_OFFSET);
+ a = erts_atomic32_cmpxchg_nob(&p->state, n, e);
+ } while (a != e);
+ state = n;
+ }
+
+
+ a = state;
+ enq_prio = -1;
+
+ /* Status lock prevents out of order "runnable proc" trace msgs */
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+
+ if (!prof_runnable_procs) {
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ locked = 0;
+ }
ASSERT(!(state & ERTS_PSFLG_PROXY));
@@ -6088,45 +6756,51 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
n = e = a;
if (a & ERTS_PSFLG_FREE)
- return; /* We don't want to schedule free processes... */
+ goto cleanup; /* We don't want to schedule free processes... */
enqueue = ERTS_ENQUEUE_NOT;
n |= ERTS_PSFLG_ACTIVE_SYS;
- if (!(a & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)))
+ if (!(a & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)))
enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a);
- a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
+ a = erts_atomic32_cmpxchg_mb(&p->state, n, e);
if (a == e)
break;
if (a == n && enqueue == ERTS_ENQUEUE_NOT)
goto cleanup;
}
- if (erts_system_profile_flags.runnable_procs) {
+ if (prof_runnable_procs) {
if (!(a & (ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS))
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS))
&& (!(a & ERTS_PSFLG_ACTIVE) || (a & ERTS_PSFLG_SUSPENDED))) {
/* We activated a prevously inactive process */
profile_runnable_proc(p, am_active);
}
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ locked = 0;
}
- if (enqueue != ERTS_ENQUEUE_NOT) {
- Process *sched_p;
- if (enqueue > 0)
- sched_p = p;
- else {
- sched_p = make_proxy_proc(proxy, p, enq_prio);
- proxy = NULL;
- }
- add2runq(sched_p, n, enq_prio);
- }
+ add2runq(enqueue, enq_prio, p, n, NULL);
cleanup:
- if (proxy)
- free_proxy_proc(proxy);
+
+ if (locked)
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+
+ if (free_stqs)
+ proc_sys_task_queues_free(free_stqs);
+
+ ERTS_LC_ASSERT(!(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)));
+
+ return res;
}
static ERTS_INLINE int
@@ -6134,26 +6808,31 @@ suspend_process(Process *c_p, Process *p)
{
erts_aint32_t state;
int suspended = 0;
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
- state = erts_smp_atomic32_read_acqb(&p->state);
+ state = erts_atomic32_read_acqb(&p->state);
if ((state & ERTS_PSFLG_SUSPENDED))
suspended = -1;
else {
if (c_p == p) {
- state = erts_smp_atomic32_read_bor_relb(&p->state,
+ state = erts_atomic32_read_bor_relb(&p->state,
ERTS_PSFLG_SUSPENDED);
- ASSERT(state & ERTS_PSFLG_RUNNING);
+ ASSERT(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS));
suspended = (state & ERTS_PSFLG_SUSPENDED) ? -1: 1;
}
else {
- while (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_EXITING))) {
+ while (!(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_EXITING))) {
erts_aint32_t n, e;
n = e = state;
n |= ERTS_PSFLG_SUSPENDED;
- state = erts_smp_atomic32_cmpxchg_relb(&p->state, n, e);
+ state = erts_atomic32_cmpxchg_relb(&p->state, n, e);
if (state == e) {
suspended = 1;
break;
@@ -6168,17 +6847,17 @@ suspend_process(Process *c_p, Process *p)
if (suspended) {
- ASSERT(!(ERTS_PSFLG_RUNNING & state)
- || p == erts_get_current_process());
-
if (suspended > 0 && erts_system_profile_flags.runnable_procs) {
/* 'state' is before our change... */
if ((state & (ERTS_PSFLG_ACTIVE
| ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_DIRTY_ACTIVE_SYS
| ERTS_PSFLG_RUNNING
| ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS
| ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) {
/* We made process inactive */
profile_runnable_proc(p, am_inactive);
@@ -6193,54 +6872,31 @@ suspend_process(Process *c_p, Process *p)
}
static ERTS_INLINE void
-resume_process(Process *p)
+resume_process(Process *p, ErtsProcLocks locks)
{
erts_aint32_t state, enq_prio = -1;
int enqueue;
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
ASSERT(p->rcount > 0);
if (--p->rcount > 0) /* multiple suspend */
return;
- state = erts_smp_atomic32_read_nob(&p->state);
+ state = erts_atomic32_read_nob(&p->state);
enqueue = change_proc_schedule_state(p,
ERTS_PSFLG_SUSPENDED,
0,
&state,
- &enq_prio);
- if (enqueue)
- add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio),
- state,
- enq_prio);
-}
-
-int
-erts_get_max_no_executing_schedulers(void)
-{
-#ifdef ERTS_SMP
- if (erts_smp_atomic32_read_nob(&schdlr_sspnd.changing))
- return (int) erts_no_schedulers;
- ERTS_THR_MEMORY_BARRIER;
- return (int) erts_smp_atomic32_read_nob(&schdlr_sspnd.active);
-#else
- return 1;
-#endif
+ &enq_prio,
+ locks);
+ add2runq(enqueue, enq_prio, p, state, NULL);
}
-#ifdef ERTS_SMP
-
-static void
-scheduler_ix_resume_wake(Uint ix)
-{
- ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
- scheduler_ssi_resume_wake(ssi);
-}
-static void
-scheduler_ssi_resume_wake(ErtsSchedulerSleepInfo *ssi)
+static ERTS_INLINE void
+sched_resume_wake__(ErtsSchedulerSleepInfo *ssi)
{
erts_aint32_t xflgs = (ERTS_SSI_FLG_SLEEPING
| ERTS_SSI_FLG_TSE_SLEEPING
@@ -6248,15 +6904,35 @@ scheduler_ssi_resume_wake(ErtsSchedulerSleepInfo *ssi)
| ERTS_SSI_FLG_SUSPENDED);
erts_aint32_t oflgs;
do {
- oflgs = erts_smp_atomic32_cmpxchg_relb(&ssi->flags, 0, xflgs);
+ oflgs = erts_atomic32_cmpxchg_relb(&ssi->flags, 0, xflgs);
if (oflgs == xflgs) {
erts_sched_finish_poke(ssi, oflgs);
break;
}
xflgs = oflgs;
- } while (oflgs & ERTS_SSI_FLG_SUSPENDED);
+ } while (oflgs & (ERTS_SSI_FLG_MSB_EXEC|ERTS_SSI_FLG_SUSPENDED));
+}
+
+static void
+nrml_sched_ix_resume_wake(Uint ix)
+{
+ sched_resume_wake__(ERTS_SCHED_SLEEP_INFO_IX(ix));
}
+
+static void
+dcpu_sched_ix_resume_wake(Uint ix)
+{
+ sched_resume_wake__(ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix));
+}
+
+static void
+dio_sched_ix_resume_wake(Uint ix)
+{
+ sched_resume_wake__(ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix));
+}
+
+
static erts_aint32_t
sched_prep_spin_suspended(ErtsSchedulerSleepInfo *ssi, erts_aint32_t xpct)
{
@@ -6267,7 +6943,7 @@ sched_prep_spin_suspended(ErtsSchedulerSleepInfo *ssi, erts_aint32_t xpct)
erts_aint32_t xflgs = xpct;
do {
- oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
xflgs = oflgs;
@@ -6284,7 +6960,7 @@ sched_spin_suspended(ErtsSchedulerSleepInfo *ssi, int spincount)
erts_aint32_t flgs;
do {
- flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
+ flgs = erts_atomic32_read_acqb(&ssi->flags);
if ((flgs & (ERTS_SSI_FLG_SLEEPING
| ERTS_SSI_FLG_WAITING
| ERTS_SSI_FLG_SUSPENDED))
@@ -6303,21 +6979,23 @@ sched_spin_suspended(ErtsSchedulerSleepInfo *ssi, int spincount)
}
static erts_aint32_t
-sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi)
+sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi,
+ erts_aint32_t sleep_type)
{
erts_aint32_t oflgs;
- erts_aint32_t nflgs = (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_TSE_SLEEPING
- | ERTS_SSI_FLG_WAITING
- | ERTS_SSI_FLG_SUSPENDED);
+ erts_aint32_t nflgs = ((ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)
+ | sleep_type);
erts_aint32_t xflgs = (ERTS_SSI_FLG_SLEEPING
| ERTS_SSI_FLG_WAITING
| ERTS_SSI_FLG_SUSPENDED);
+ ASSERT(sleep_type == ERTS_SSI_FLG_TSE_SLEEPING);
erts_tse_reset(ssi->event);
while (1) {
- oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
if ((oflgs & (ERTS_SSI_FLG_SLEEPING
@@ -6332,1154 +7010,1143 @@ sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi)
}
}
-#ifdef ERTS_DIRTY_SCHEDULERS
+static void
+init_scheduler_suspend(void)
+{
+ erts_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
+ schdlr_sspnd.online.normal = 1;
+ schdlr_sspnd.curr_online.normal = 1;
+ schdlr_sspnd.active.normal = 1;
+ schdlr_sspnd.online.dirty_cpu = 0;
+ schdlr_sspnd.curr_online.dirty_cpu = 0;
+ schdlr_sspnd.active.dirty_cpu = 0;
+ schdlr_sspnd.online.dirty_io = 0;
+ schdlr_sspnd.curr_online.dirty_io = 0;
+ schdlr_sspnd.active.dirty_io = 0;
+ schdlr_sspnd.last_msb_dirty_type = ERTS_SCHED_DIRTY_IO;
+ erts_atomic32_init_nob(&schdlr_sspnd.changing, 0);
+ schdlr_sspnd.chngq = NULL;
+ schdlr_sspnd.changer = am_false;
+ schdlr_sspnd.nmsb.ongoing = 0;
+ schdlr_sspnd.nmsb.blckrs = NULL;
+ schdlr_sspnd.nmsb.chngq = NULL;
+ schdlr_sspnd.msb.ongoing = 0;
+ schdlr_sspnd.msb.blckrs = NULL;
+ schdlr_sspnd.msb.chngq = NULL;
+}
+
+typedef struct {
+ struct {
+ Eterm chngr;
+ Eterm nxt;
+ } onln;
+ struct {
+ ErtsProcList *chngrs;
+ } msb;
+} ErtsSchdlrSspndResume;
static void
-suspend_scheduler(ErtsSchedulerData *esdp)
+schdlr_sspnd_resume_proc(ErtsSchedType sched_type, Eterm pid)
{
- erts_aint32_t flgs;
- erts_aint32_t changing;
-#ifdef ERTS_DIRTY_SCHEDULERS
- long no = (long) (ERTS_SCHEDULER_IS_DIRTY(esdp)
- ? ERTS_DIRTY_SCHEDULER_NO(esdp)
- : esdp->no);
-#else
- long no = (long) esdp->no;
-#endif
- ErtsSchedulerSleepInfo *ssi = esdp->ssi;
- long active_schedulers;
- int curr_online = 1;
- int wake = 0;
- erts_aint32_t aux_work;
- int thr_prgr_active = 1;
- ErtsStuckBoundProcesses sbp = {NULL, NULL};
- int* ss_onlinep;
- int* ss_curr_onlinep;
- int* ss_wait_curr_onlinep;
- long* ss_wait_activep;
- long ss_wait_active_target;
- erts_smp_atomic32_t* ss_changingp;
- erts_smp_atomic32_t* ss_activep;
+ Process *p;
+ p = erts_pid2proc_opt(NULL, 0, pid, ERTS_PROC_LOCK_STATUS,
+ (sched_type != ERTS_SCHED_NORMAL
+ ? ERTS_P2P_FLG_INC_REFC
+ : 0));
+ if (p) {
+ resume_process(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ if (sched_type != ERTS_SCHED_NORMAL)
+ erts_proc_dec_refc(p);
+ }
+}
- /*
- * Schedulers may be suspended in two different ways:
- * - A scheduler may be suspended since it is not online.
- * All schedulers with scheduler ids greater than
- * schdlr_sspnd.online are suspended; same for dirty
- * schedulers and schdlr_sspnd.dirty_cpu_online and
- * schdlr_sspnd.dirty_io_online.
- * - Multi scheduling is blocked. All schedulers except the
- * scheduler with scheduler id 1 are suspended, and all
- * dirty CPU and dirty I/O schedulers are suspended.
- *
- * Regardless of why a scheduler is suspended, it ends up here.
- */
+static ERTS_INLINE void
+schdlr_sspnd_resume_procs(ErtsSchedType sched_type,
+ ErtsSchdlrSspndResume *resume)
+{
+ if (is_internal_pid(resume->onln.chngr)) {
+ schdlr_sspnd_resume_proc(sched_type,
+ resume->onln.chngr);
+ resume->onln.chngr = NIL;
+ }
+ if (is_internal_pid(resume->onln.nxt)) {
+ schdlr_sspnd_resume_proc(sched_type,
+ resume->onln.nxt);
+ resume->onln.nxt = NIL;
+ }
+ while (resume->msb.chngrs) {
+ ErtsProcList *plp = resume->msb.chngrs;
+ resume->msb.chngrs = plp->next;
+ schdlr_sspnd_resume_proc(sched_type,
+ plp->pid);
+ proclist_destroy(plp);
+ }
+}
- ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp) || no != 1);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- if (erts_smp_mtx_trylock(&schdlr_sspnd.mtx) == EBUSY) {
- erts_smp_runq_unlock(esdp->run_queue);
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- erts_smp_runq_lock(esdp->run_queue);
- }
- if (ongoing_multi_scheduling_block())
- evacuate_run_queue(esdp->run_queue, &sbp);
- } else
-#endif
- evacuate_run_queue(esdp->run_queue, &sbp);
+static ERTS_INLINE int
+have_dirty_work(void)
+{
+ return !(ERTS_EMPTY_RUNQ(ERTS_DIRTY_CPU_RUNQ)
+ | ERTS_EMPTY_RUNQ(ERTS_DIRTY_IO_RUNQ));
+}
- erts_smp_runq_unlock(esdp->run_queue);
+#define ERTS_MSB_NONE_PRIO_BIT PORT_BIT
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
-#endif
- {
- erts_sched_check_cpu_bind_prep_suspend(esdp);
+static ERTS_INLINE Uint32
+msb_runq_prio_bit(Uint32 flgs)
+{
+ int pbit;
- if (erts_system_profile_flags.scheduler)
- profile_scheduler(make_small(esdp->no), am_inactive);
+ pbit = (int) (flgs & ERTS_RUNQ_FLGS_PROCS_QMASK);
+ if (flgs & PORT_BIT) {
+ /* rate ports as proc prio high */
+ pbit |= HIGH_BIT;
+ }
+ if (flgs & ERTS_RUNQ_FLG_MISC_OP) {
+ /* rate misc ops as proc prio normal */
+ pbit |= NORMAL_BIT;
+ }
+ if (flgs & LOW_BIT) {
+ /* rate low prio as normal (avoid starvation) */
+ pbit |= NORMAL_BIT;
+ }
+ if (!pbit)
+ pbit = (int) ERTS_MSB_NONE_PRIO_BIT;
+ else
+ pbit &= -pbit; /* least significant bit set... */
+ ASSERT(pbit);
- sched_wall_time_change(esdp, 0);
+ /* High prio low value; low prio high value... */
+ return (Uint32) pbit;
+}
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+static ERTS_INLINE void
+msb_runq_prio_bits(Uint32 *nrmlp, Uint32 *dcpup, Uint32 *diop)
+{
+ Uint32 flgs = ERTS_RUNQ_FLGS_GET(ERTS_RUNQ_IX(0));
+ if (flgs & ERTS_RUNQ_FLG_HALTING) {
+ /*
+ * Emulator is halting; only execute port jobs
+ * on normal scheduler. Ensure that we switch
+ * to the normal scheduler.
+ */
+ *nrmlp = HIGH_BIT;
+ *dcpup = ERTS_MSB_NONE_PRIO_BIT;
+ *diop = ERTS_MSB_NONE_PRIO_BIT;
}
+ else {
+ *nrmlp = msb_runq_prio_bit(flgs);
- flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED);
- if (flgs & ERTS_SSI_FLG_SUSPENDED) {
+ flgs = ERTS_RUNQ_FLGS_GET(ERTS_DIRTY_CPU_RUNQ);
+ *dcpup = msb_runq_prio_bit(flgs);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- if (ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(esdp->run_queue)) {
- active_schedulers = erts_smp_atomic32_dec_read_nob(&schdlr_sspnd.dirty_cpu_active);
- ASSERT(active_schedulers >= 0);
- changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing);
- ss_onlinep = &schdlr_sspnd.dirty_cpu_online;
- ss_curr_onlinep = &schdlr_sspnd.dirty_cpu_curr_online;
- ss_wait_curr_onlinep = &schdlr_sspnd.dirty_cpu_wait_curr_online;
- ss_changingp = &schdlr_sspnd.dirty_cpu_changing;
- ss_wait_activep = &schdlr_sspnd.msb.dirty_cpu_wait_active;
- ss_activep = &schdlr_sspnd.dirty_cpu_active;
- } else {
- active_schedulers = erts_smp_atomic32_dec_read_nob(&schdlr_sspnd.dirty_io_active);
- ASSERT(active_schedulers >= 0);
- changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_changing);
- ss_onlinep = &schdlr_sspnd.dirty_io_online;
- ss_curr_onlinep = &schdlr_sspnd.dirty_io_curr_online;
- ss_wait_curr_onlinep = &schdlr_sspnd.dirty_io_wait_curr_online;
- ss_changingp = &schdlr_sspnd.dirty_io_changing;
- ss_wait_activep = &schdlr_sspnd.msb.dirty_io_wait_active;
- ss_activep = &schdlr_sspnd.dirty_io_active;
- }
- ss_wait_active_target = 0;
- }
- else
+ flgs = ERTS_RUNQ_FLGS_GET(ERTS_DIRTY_IO_RUNQ);
+ *diop = msb_runq_prio_bit(flgs);
+ }
+}
+
+static int
+msb_scheduler_type_switch(ErtsSchedType sched_type,
+ ErtsSchedulerData *esdp,
+ long no)
+{
+ Uint32 nrml_prio, dcpu_prio, dio_prio;
+ ErtsSchedType exec_type;
+ ErtsRunQueue *exec_rq;
+#ifdef DEBUG
+ erts_aint32_t dbg_val;
#endif
- {
- active_schedulers = erts_smp_atomic32_dec_read_nob(&schdlr_sspnd.active);
- ASSERT(active_schedulers >= 1);
- changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
- ss_onlinep = &schdlr_sspnd.online;
- ss_curr_onlinep = &schdlr_sspnd.curr_online;
- ss_wait_curr_onlinep = &schdlr_sspnd.wait_curr_online;
- ss_changingp = &schdlr_sspnd.changing;
- ss_wait_activep = &schdlr_sspnd.msb.wait_active;
- ss_activep = &schdlr_sspnd.active;
- ss_wait_active_target = 1;
- }
- if (changing & ERTS_SCHDLR_SSPND_CHNG_MSB) {
- if (active_schedulers == *ss_wait_activep)
- wake = 1;
- if (active_schedulers == ss_wait_active_target) {
- changing = erts_smp_atomic32_read_band_nob(ss_changingp,
- ~ERTS_SCHDLR_SSPND_CHNG_MSB);
- changing &= ~ERTS_SCHDLR_SSPND_CHNG_MSB;
- }
- }
- while (1) {
- if (changing & ERTS_SCHDLR_SSPND_CHNG_ONLN) {
- int changed = 0;
- if (no > *ss_onlinep && curr_online) {
- (*ss_curr_onlinep)--;
- curr_online = 0;
- changed = 1;
- }
- else if (no <= *ss_onlinep && !curr_online) {
- (*ss_curr_onlinep)++;
- curr_online = 1;
- changed = 1;
- }
- if (changed
- && *ss_curr_onlinep == *ss_wait_curr_onlinep)
- wake = 1;
- if (*ss_onlinep == *ss_curr_onlinep) {
- changing = erts_smp_atomic32_read_band_nob(ss_changingp,
- ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
- changing &= ~ERTS_SCHDLR_SSPND_CHNG_ONLN;
- }
- }
+ ASSERT(schdlr_sspnd.msb.ongoing);
- if (wake) {
- erts_smp_cnd_signal(&schdlr_sspnd.cnd);
- wake = 0;
- }
+ /*
+ * This function determines how to switch
+ * between scheduler types when multi-scheduling
+ * is blocked.
+ *
+ * If no dirty work exist, we always select
+ * execution of normal scheduler. If nothing
+ * executes, normal scheduler 1 should be waiting
+ * in sys_schedule(), otherwise we cannot react
+ * on I/O events.
+ *
+ * We unconditionally switch back to normal
+ * scheduler after executing dirty in order to
+ * make sure we check for I/O...
+ */
- if (curr_online && !ongoing_multi_scheduling_block()) {
- flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
- if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
- break;
- }
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ msb_runq_prio_bits(&nrml_prio, &dcpu_prio, &dio_prio);
- while (1) {
- erts_aint32_t qmask;
- erts_aint32_t flgs;
-
- qmask = (ERTS_RUNQ_FLGS_GET(esdp->run_queue)
- & ERTS_RUNQ_FLGS_QMASK);
- aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
- if (aux_work|qmask) {
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
- }
- if (aux_work)
- aux_work = handle_aux_work(&esdp->aux_work_data,
- aux_work,
- 1);
-
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) &&
- (aux_work && erts_thr_progress_update(esdp)))
- erts_thr_progress_leader_update(esdp);
- if (qmask) {
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- erts_smp_runq_lock(esdp->run_queue);
- if (ongoing_multi_scheduling_block())
- evacuate_run_queue(esdp->run_queue, &sbp);
- erts_smp_runq_unlock(esdp->run_queue);
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- } else
-#endif
- {
- erts_smp_runq_lock(esdp->run_queue);
- evacuate_run_queue(esdp->run_queue, &sbp);
- erts_smp_runq_unlock(esdp->run_queue);
- }
- }
- }
+ exec_type = ERTS_SCHED_NORMAL;
+ if (sched_type == ERTS_SCHED_NORMAL) {
- if (!aux_work) {
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
-#endif
- {
- if (thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 0);
- sched_wall_time_change(esdp, 0);
- }
- erts_thr_progress_prepare_wait(esdp);
- }
- flgs = sched_spin_suspended(ssi,
- ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT);
- if (flgs == (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_WAITING
- | ERTS_SSI_FLG_SUSPENDED)) {
- flgs = sched_set_suspended_sleeptype(ssi);
- if (flgs == (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_TSE_SLEEPING
- | ERTS_SSI_FLG_WAITING
- | ERTS_SSI_FLG_SUSPENDED)) {
- int res;
-
- do {
- res = erts_tse_wait(ssi->event);
- } while (res == EINTR);
- }
- }
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
-#endif
- erts_thr_progress_finalize_wait(esdp);
- }
+ /*
+ * Check priorities of work in the
+ * different run-queues and determine
+ * run-queue with highest prio job...
+ */
- flgs = sched_prep_spin_suspended(ssi, (ERTS_SSI_FLG_WAITING
- | ERTS_SSI_FLG_SUSPENDED));
- if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
- break;
- changing = erts_smp_atomic32_read_nob(ss_changingp);
- if (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER)
- break;
- }
+ if ((dcpu_prio == ERTS_MSB_NONE_PRIO_BIT)
+ & (dio_prio == ERTS_MSB_NONE_PRIO_BIT)) {
+ /*
+ * No dirty work exist; continue on normal
+ * scheduler...
+ */
+ return 0;
+ }
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- changing = erts_smp_atomic32_read_nob(ss_changingp);
- }
+ if (dcpu_prio < nrml_prio) {
+ exec_type = ERTS_SCHED_DIRTY_CPU;
+ if (dio_prio < dcpu_prio)
+ exec_type = ERTS_SCHED_DIRTY_IO;
+ }
+ else {
+ if (dio_prio < nrml_prio)
+ exec_type = ERTS_SCHED_DIRTY_IO;
+ }
- active_schedulers = erts_smp_atomic32_inc_read_nob(ss_activep);
- changing = erts_smp_atomic32_read_nob(ss_changingp);
- if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
- && *ss_onlinep == active_schedulers) {
- erts_smp_atomic32_read_band_nob(ss_changingp,
- ~ERTS_SCHDLR_SSPND_CHNG_MSB);
- }
+ /*
+ * Make sure to alternate between dirty types
+ * inbetween normal execution if highest
+ * priorities are equal.
+ */
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
-#endif
- ASSERT(no <= *ss_onlinep);
- ASSERT(!ongoing_multi_scheduling_block());
+ if (exec_type == ERTS_SCHED_NORMAL) {
+ if (dcpu_prio == nrml_prio)
+ exec_type = ERTS_SCHED_DIRTY_CPU;
+ else if (dio_prio == nrml_prio)
+ exec_type = ERTS_SCHED_DIRTY_IO;
+ else {
+ /*
+ * Normal work has higher prio than
+ * dirty work; continue on normal
+ * scheduler...
+ */
+ return 0;
+ }
+ }
+ ASSERT(exec_type != ERTS_SCHED_NORMAL);
+ if (dio_prio == dcpu_prio) {
+ /* Alter between dirty types... */
+ if (schdlr_sspnd.last_msb_dirty_type == ERTS_SCHED_DIRTY_IO)
+ exec_type = ERTS_SCHED_DIRTY_CPU;
+ else
+ exec_type = ERTS_SCHED_DIRTY_IO;
+ }
}
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
-
- ASSERT(curr_online);
+ ASSERT(sched_type != exec_type);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
-#endif
- {
- if (erts_system_profile_flags.scheduler)
- profile_scheduler(make_small(esdp->no), am_active);
+ if (exec_type != ERTS_SCHED_NORMAL)
+ schdlr_sspnd.last_msb_dirty_type = exec_type;
+ else {
- if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
- }
+ if ((nrml_prio == ERTS_MSB_NONE_PRIO_BIT)
+ & ((dcpu_prio != ERTS_MSB_NONE_PRIO_BIT)
+ | (dio_prio != ERTS_MSB_NONE_PRIO_BIT))) {
+ /*
+ * We have dirty work, but an empty
+ * normal run-queue.
+ *
+ * Since the normal run-queue is
+ * empty, the normal scheduler will
+ * go to sleep when selected for
+ * execution. We have dirty work to
+ * do, so we only want it to check
+ * I/O, and then come back here and
+ * switch to dirty execution.
+ *
+ * To prevent the scheduler from going
+ * to sleep we trick it into believing
+ * it has work to do...
+ */
+ ERTS_RUNQ_FLGS_SET_NOB(ERTS_RUNQ_IX(0),
+ ERTS_RUNQ_FLG_MISC_OP);
+ }
}
- erts_smp_runq_lock(esdp->run_queue);
- non_empty_runq(esdp->run_queue);
+ /*
+ * Suspend this scheduler and wake up scheduler
+ * number one of another type...
+ */
+#ifdef DEBUG
+ dbg_val =
+#else
+ (void)
+#endif
+ erts_atomic32_read_bset_mb(&esdp->ssi->flags,
+ (ERTS_SSI_FLG_SUSPENDED
+ | ERTS_SSI_FLG_MSB_EXEC),
+ ERTS_SSI_FLG_SUSPENDED);
+ ASSERT(dbg_val & ERTS_SSI_FLG_MSB_EXEC);
+
+ switch (exec_type) {
+ case ERTS_SCHED_NORMAL:
+ exec_rq = ERTS_RUNQ_IX(0);
+ break;
+ case ERTS_SCHED_DIRTY_CPU:
+ exec_rq = ERTS_DIRTY_CPU_RUNQ;
+ break;
+ case ERTS_SCHED_DIRTY_IO:
+ exec_rq = ERTS_DIRTY_IO_RUNQ;
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid scheduler type");
+ exec_rq = NULL;
+ break;
+ }
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
+#ifdef DEBUG
+ dbg_val =
+#else
+ (void)
#endif
- {
- schedule_bound_processes(esdp->run_queue, &sbp);
+ erts_atomic32_read_bset_mb(&exec_rq->scheduler->ssi->flags,
+ (ERTS_SSI_FLG_SUSPENDED
+ | ERTS_SSI_FLG_MSB_EXEC),
+ ERTS_SSI_FLG_MSB_EXEC);
+ ASSERT(dbg_val & ERTS_SSI_FLG_SUSPENDED);
- erts_sched_check_cpu_bind_post_suspend(esdp);
+ wake_scheduler(exec_rq);
+
+ return 1; /* suspend this scheduler... */
+
+}
+
+static ERTS_INLINE void
+suspend_normal_scheduler_sleep(ErtsSchedulerData *esdp)
+{
+ ErtsSchedulerSleepInfo *ssi = esdp->ssi;
+ erts_aint32_t flgs = sched_spin_suspended(ssi,
+ ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT);
+ if (flgs == (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ flgs = sched_set_suspended_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
+ if (flgs == (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ int res;
+
+ do {
+ res = erts_tse_wait(ssi->event);
+ } while (res == EINTR);
+ }
}
}
-#else /* !ERTS_DIRTY_SCHEDULERS */
+static ERTS_INLINE void
+suspend_dirty_scheduler_sleep(ErtsSchedulerData *esdp)
+{
+ suspend_normal_scheduler_sleep(esdp);
+}
static void
suspend_scheduler(ErtsSchedulerData *esdp)
{
erts_aint32_t flgs;
erts_aint32_t changing;
- long no = (long) esdp->no;
+ long no;
ErtsSchedulerSleepInfo *ssi = esdp->ssi;
- long active_schedulers;
int curr_online = 1;
- int wake = 0;
+ ErtsSchdlrSspndResume resume = {{NIL, NIL}, {NULL}};
erts_aint32_t aux_work;
int thr_prgr_active = 1;
ErtsStuckBoundProcesses sbp = {NULL, NULL};
+ ErtsSchedType sched_type;
+ erts_aint32_t online_flag;
/*
* Schedulers may be suspended in two different ways:
* - A scheduler may be suspended since it is not online.
- * All schedulers with scheduler ids greater than
- * schdlr_sspnd.online are suspended.
* - Multi scheduling is blocked. All schedulers except the
- * scheduler with scheduler id 1 are suspended.
+ * scheduler with scheduler id 1 are suspended, and all
+ * dirty CPU and dirty I/O schedulers are suspended.
*
* Regardless of why a scheduler is suspended, it ends up here.
*/
- ASSERT(no != 1);
- evacuate_run_queue(esdp->run_queue, &sbp);
- erts_smp_runq_unlock(esdp->run_queue);
+ sched_type = esdp->type;
+ switch (sched_type) {
+ case ERTS_SCHED_NORMAL:
+ online_flag = ERTS_SCHDLR_SSPND_CHNG_ONLN;
+ no = esdp->no;
+ break;
+ case ERTS_SCHED_DIRTY_CPU:
+ online_flag = ERTS_SCHDLR_SSPND_CHNG_DCPU_ONLN;
+ no = esdp->dirty_no;
+ break;
+ case ERTS_SCHED_DIRTY_IO:
+ online_flag = 0;
+ no = esdp->dirty_no;
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid scheduler type");
+ return;
+ }
+
+ if (erts_atomic32_read_nob(&ssi->flags) & ERTS_SSI_FLG_MSB_EXEC) {
+ ASSERT(no == 1);
+ if (!msb_scheduler_type_switch(sched_type, esdp, no))
+ return;
+ /* Suspend and let scheduler 1 of another type execute... */
+ }
- erts_sched_check_cpu_bind_prep_suspend(esdp);
- if (erts_system_profile_flags.scheduler)
- profile_scheduler(make_small(esdp->no), am_inactive);
+ if (sched_type != ERTS_SCHED_NORMAL) {
+ dirty_active(esdp, -1);
+ erts_runq_unlock(esdp->run_queue);
+ dirty_sched_wall_time_change(esdp, 0);
+ }
+ else {
+ if (no != 1)
+ evacuate_run_queue(esdp->run_queue, &sbp);
- sched_wall_time_change(esdp, 0);
+ erts_runq_unlock(esdp->run_queue);
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ erts_sched_check_cpu_bind_prep_suspend(esdp);
+
+ if (erts_system_profile_flags.scheduler)
+ profile_scheduler(make_small(esdp->no), am_inactive);
+ }
+
+ erts_mtx_lock(&schdlr_sspnd.mtx);
flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED);
if (flgs & ERTS_SSI_FLG_SUSPENDED) {
- active_schedulers = erts_smp_atomic32_dec_read_nob(&schdlr_sspnd.active);
- ASSERT(active_schedulers >= 1);
- changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
- if (changing & ERTS_SCHDLR_SSPND_CHNG_MSB) {
- if (active_schedulers == schdlr_sspnd.msb.wait_active)
- wake = 1;
- if (active_schedulers == 1) {
- changing = erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_MSB);
- changing &= ~ERTS_SCHDLR_SSPND_CHNG_MSB;
- }
- }
+ schdlr_sspnd_dec_nscheds(&schdlr_sspnd.active, sched_type);
+
+ changing = erts_atomic32_read_nob(&schdlr_sspnd.changing);
while (1) {
- if (changing & ERTS_SCHDLR_SSPND_CHNG_ONLN) {
+
+ if (changing & (ERTS_SCHDLR_SSPND_CHNG_NMSB
+ | ERTS_SCHDLR_SSPND_CHNG_MSB)) {
+ int i = 0;
+ ErtsMultiSchedulingBlock *msb[3] = {0};
+ if (changing & ERTS_SCHDLR_SSPND_CHNG_NMSB)
+ msb[i++] = &schdlr_sspnd.nmsb;
+ if (changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
+ msb[i++] = &schdlr_sspnd.msb;
+
+ for (i = 0; msb[i]; i++) {
+ erts_aint32_t clr_flg = 0;
+
+ if (msb[i] == &schdlr_sspnd.nmsb
+ && schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_NORMAL) == 1) {
+ clr_flg = ERTS_SCHDLR_SSPND_CHNG_NMSB;
+ }
+ else if (schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_NORMAL) == 1
+ && schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_DIRTY_CPU) == 0
+ && schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_DIRTY_IO) == 0) {
+ clr_flg = ERTS_SCHDLR_SSPND_CHNG_MSB;
+ }
+
+ if (clr_flg) {
+ ErtsProcList *plp, *end_plp;
+ changing = erts_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~clr_flg);
+ changing &= ~clr_flg;
+ (void) erts_proclist_fetch(&msb[i]->chngq, &end_plp);
+ /* resume processes that initiated the multi scheduling block... */
+ plp = msb[i]->chngq;
+ if (plp) {
+ ASSERT(end_plp);
+ ASSERT(msb[i]->ongoing);
+ do {
+ erts_proclist_store_last(&msb[i]->blckrs,
+ proclist_copy(plp));
+ plp = plp->next;
+ } while (plp);
+ end_plp->next = resume.msb.chngrs;
+ resume.msb.chngrs = msb[i]->chngq;
+ msb[i]->chngq = NULL;
+ }
+ }
+ }
+ }
+
+ if (changing & online_flag) {
int changed = 0;
- if (no > schdlr_sspnd.online && curr_online) {
- schdlr_sspnd.curr_online--;
+ Uint32 st_online;
+
+ st_online = schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
+ sched_type);
+ if (no > st_online && curr_online) {
+ schdlr_sspnd_dec_nscheds(&schdlr_sspnd.curr_online,
+ sched_type);
curr_online = 0;
changed = 1;
}
- else if (no <= schdlr_sspnd.online && !curr_online) {
- schdlr_sspnd.curr_online++;
+ else if (no <= st_online && !curr_online) {
+ schdlr_sspnd_inc_nscheds(&schdlr_sspnd.curr_online,
+ sched_type);
curr_online = 1;
changed = 1;
}
if (changed
- && schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online)
- wake = 1;
- if (schdlr_sspnd.online == schdlr_sspnd.curr_online) {
- changing = erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
- changing &= ~ERTS_SCHDLR_SSPND_CHNG_ONLN;
+ && (schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
+ sched_type)
+ == schdlr_sspnd_get_nscheds(&schdlr_sspnd.curr_online,
+ sched_type))) {
+ ErtsProcList *plp;
+ changing = erts_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~online_flag);
+ changing &= ~online_flag;
+ if (sched_type == ERTS_SCHED_NORMAL) {
+ ASSERT(is_internal_pid(schdlr_sspnd.changer)
+ || schdlr_sspnd.changer == am_init);
+ /* resume process that initiated this change... */
+ resume.onln.chngr = schdlr_sspnd.changer;
+ plp = erts_proclist_peek_first(schdlr_sspnd.chngq);
+ if (!plp)
+ schdlr_sspnd.changer = am_false;
+ else {
+ schdlr_sspnd.changer = am_true; /* change right in transit */
+ /* resume process that is queued for next change... */
+ resume.onln.nxt = plp->pid;
+ ASSERT(is_internal_pid(resume.onln.nxt));
+ }
+ }
}
}
- if (wake) {
- erts_smp_cnd_signal(&schdlr_sspnd.cnd);
- wake = 0;
- }
-
- if (curr_online && !ongoing_multi_scheduling_block()) {
- flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
+ if (curr_online) {
+ flgs = erts_atomic32_read_acqb(&ssi->flags);
if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
break;
}
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ erts_mtx_unlock(&schdlr_sspnd.mtx);
+
+ schdlr_sspnd_resume_procs(sched_type, &resume);
while (1) {
- erts_aint32_t qmask;
- erts_aint32_t flgs;
-
- qmask = (ERTS_RUNQ_FLGS_GET(esdp->run_queue)
- & ERTS_RUNQ_FLGS_QMASK);
- aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
- if (aux_work|qmask) {
- if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
- }
- if (aux_work)
- aux_work = handle_aux_work(&esdp->aux_work_data,
- aux_work,
- 1);
- if (aux_work && erts_thr_progress_update(esdp))
- erts_thr_progress_leader_update(esdp);
- if (qmask) {
- erts_smp_runq_lock(esdp->run_queue);
- evacuate_run_queue(esdp->run_queue, &sbp);
- erts_smp_runq_unlock(esdp->run_queue);
- }
- }
+ if (sched_type != ERTS_SCHED_NORMAL)
+ suspend_dirty_scheduler_sleep(esdp);
+ else
+ {
+ ErtsMonotonicTime current_time, timeout_time;
+ int evacuate = no == 1 ? 0 : !ERTS_EMPTY_RUNQ(esdp->run_queue);
- if (!aux_work) {
- if (thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 0);
- sched_wall_time_change(esdp, 0);
- }
- erts_thr_progress_prepare_wait(esdp);
- flgs = sched_spin_suspended(ssi,
- ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT);
- if (flgs == (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_WAITING
- | ERTS_SSI_FLG_SUSPENDED)) {
- flgs = sched_set_suspended_sleeptype(ssi);
- if (flgs == (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_TSE_SLEEPING
- | ERTS_SSI_FLG_WAITING
- | ERTS_SSI_FLG_SUSPENDED)) {
- int res;
-
- do {
- res = erts_tse_wait(ssi->event);
- } while (res == EINTR);
+ ASSERT(sched_type == ERTS_SCHED_NORMAL);
+
+ aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
+
+ if (aux_work|evacuate) {
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ if (aux_work)
+ aux_work = handle_aux_work(&esdp->aux_work_data,
+ aux_work,
+ 1);
+
+ if (aux_work && erts_thr_progress_update(esdp))
+ erts_thr_progress_leader_update(esdp);
+ if (evacuate) {
+ erts_runq_lock(esdp->run_queue);
+ evacuate_run_queue(esdp->run_queue, &sbp);
+ erts_runq_unlock(esdp->run_queue);
}
}
- erts_thr_progress_finalize_wait(esdp);
- }
+
+
+ if (aux_work)
+ timeout_time = erts_next_timeout_time(esdp->next_tmo_ref);
+ else
+ timeout_time = erts_check_next_timeout_time(esdp);
+
+ current_time = erts_get_monotonic_time(esdp);
+
+ if (!aux_work && current_time < timeout_time) {
+ /* go to sleep... */
+ if (thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 0);
+ sched_wall_time_change(esdp, 0);
+ }
+ erts_thr_progress_prepare_wait(NULL);
+ suspend_normal_scheduler_sleep(esdp);
+ erts_thr_progress_finalize_wait(NULL);
+ current_time = erts_get_monotonic_time(esdp);
+ }
+
+ if (current_time >= timeout_time) {
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ erts_bump_timers(esdp->timer_wheel, current_time);
+ }
+ }
flgs = sched_prep_spin_suspended(ssi, (ERTS_SSI_FLG_WAITING
| ERTS_SSI_FLG_SUSPENDED));
if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
break;
- changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
- if (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER)
+ changing = erts_atomic32_read_nob(&schdlr_sspnd.changing);
+ if (changing)
break;
}
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
- }
-
- active_schedulers = erts_smp_atomic32_inc_read_nob(&schdlr_sspnd.active);
- changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
- if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
- && schdlr_sspnd.online == active_schedulers) {
- erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ erts_mtx_lock(&schdlr_sspnd.mtx);
+ changing = erts_atomic32_read_nob(&schdlr_sspnd.changing);
}
- ASSERT(no <= schdlr_sspnd.online);
- ASSERT(!ongoing_multi_scheduling_block());
-
+ schdlr_sspnd_inc_nscheds(&schdlr_sspnd.active, sched_type);
+ changing = erts_atomic32_read_nob(&schdlr_sspnd.changing);
+ if (changing) {
+ if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
+ && !schdlr_sspnd.msb.ongoing
+ && schdlr_sspnd_eq_nscheds(&schdlr_sspnd.online,
+ &schdlr_sspnd.active)) {
+ erts_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ }
+ if ((changing & ERTS_SCHDLR_SSPND_CHNG_NMSB)
+ && !schdlr_sspnd.nmsb.ongoing
+ && (schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
+ ERTS_SCHED_NORMAL)
+ == schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_NORMAL))) {
+ erts_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_NMSB);
+ }
+ }
+ ASSERT(no <= schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, sched_type));
}
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ erts_mtx_unlock(&schdlr_sspnd.mtx);
+
+ schdlr_sspnd_resume_procs(sched_type, &resume);
ASSERT(curr_online);
- if (erts_system_profile_flags.scheduler)
- profile_scheduler(make_small(esdp->no), am_active);
+ if (sched_type != ERTS_SCHED_NORMAL)
+ dirty_sched_wall_time_change(esdp, 1);
+ else {
+ (void) erts_get_monotonic_time(esdp);
+ if (erts_system_profile_flags.scheduler)
+ profile_scheduler(make_small(esdp->no), am_active);
- if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
}
- erts_smp_runq_lock(esdp->run_queue);
+ erts_runq_lock(esdp->run_queue);
non_empty_runq(esdp->run_queue);
- schedule_bound_processes(esdp->run_queue, &sbp);
+ if (sched_type != ERTS_SCHED_NORMAL)
+ dirty_active(esdp, 1);
+ else {
+ schedule_bound_processes(esdp->run_queue, &sbp);
- erts_sched_check_cpu_bind_post_suspend(esdp);
+ erts_sched_check_cpu_bind_post_suspend(esdp);
+ }
}
-#endif
-
-ErtsSchedSuspendResult
+void
erts_schedulers_state(Uint *total,
Uint *online,
Uint *active,
Uint *dirty_cpu,
Uint *dirty_cpu_online,
+ Uint *dirty_cpu_active,
Uint *dirty_io,
- int yield_allowed)
+ Uint *dirty_io_active)
{
- int res = ERTS_SCHDLR_SSPND_EINVAL;
- erts_aint32_t changing;
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
-#ifdef ERTS_DIRTY_SCHEDULERS
- changing |= (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing)
- | erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_changing));
-#endif
- if (yield_allowed && (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER))
- res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
- else {
+ if (active || online || dirty_cpu_online
+ || dirty_cpu_active || dirty_io_active) {
+ erts_mtx_lock(&schdlr_sspnd.mtx);
if (active)
- *active = schdlr_sspnd.online;
+ *active = schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_NORMAL);
if (online)
- *online = schdlr_sspnd.online;
- if (ongoing_multi_scheduling_block() && active)
- *active = 1;
-#ifdef ERTS_DIRTY_SCHEDULERS
+ *online = schdlr_sspnd_get_nscheds(&schdlr_sspnd.curr_online,
+ ERTS_SCHED_NORMAL);
+ if (dirty_cpu_active)
+ *dirty_cpu_active = schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_DIRTY_CPU);
if (dirty_cpu_online)
- *dirty_cpu_online = schdlr_sspnd.dirty_cpu_online;
-#endif
- res = ERTS_SCHDLR_SSPND_DONE;
+ *dirty_cpu_online = schdlr_sspnd_get_nscheds(&schdlr_sspnd.curr_online,
+ ERTS_SCHED_DIRTY_CPU);
+ if (dirty_io_active)
+ *dirty_io_active = schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_DIRTY_IO);
+ erts_mtx_unlock(&schdlr_sspnd.mtx);
}
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+
if (total)
*total = erts_no_schedulers;
-#ifdef ERTS_DIRTY_SCHEDULERS
if (dirty_cpu)
*dirty_cpu = erts_no_dirty_cpu_schedulers;
if (dirty_io)
*dirty_io = erts_no_dirty_io_schedulers;
-#endif
- return res;
}
-#ifdef ERTS_DIRTY_SCHEDULERS
+static void
+abort_sched_onln_chng_waitq(Process *p)
+{
+ Eterm resume = NIL;
+
+ erts_mtx_lock(&schdlr_sspnd.mtx);
+
+#ifdef DEBUG
+ {
+ int found_it = 0;
+ ErtsProcList *plp = erts_proclist_peek_first(schdlr_sspnd.chngq);
+ while (plp) {
+ if (erts_proclist_same(plp, p))
+ found_it++;
+ plp = erts_proclist_peek_next(schdlr_sspnd.chngq, plp);
+ }
+ ASSERT(found_it == !!(p->flags & F_SCHDLR_ONLN_WAITQ));
+ }
+#endif
+
+ if (p->flags & F_SCHDLR_ONLN_WAITQ) {
+ ErtsProcList *plp = NULL;
+
+ plp = erts_proclist_peek_first(schdlr_sspnd.chngq);
+ if (plp) {
+ if (erts_proclist_same(plp, p)
+ && schdlr_sspnd.changer == am_true) {
+ p->flags &= ~F_SCHDLR_ONLN_WAITQ;
+ /*
+ * Change right was in transit to us;
+ * transfer it to the next process by
+ * resuming it...
+ */
+ erts_proclist_remove(&schdlr_sspnd.chngq, plp);
+ proclist_destroy(plp);
+ plp = erts_proclist_peek_first(schdlr_sspnd.chngq);
+ if (plp)
+ resume = plp->pid;
+ else
+ schdlr_sspnd.changer = am_false;
+ }
+ else {
+ do {
+ if (erts_proclist_same(plp, p)) {
+ p->flags &= ~F_SCHDLR_ONLN_WAITQ;
+ erts_proclist_remove(&schdlr_sspnd.chngq, plp);
+ proclist_destroy(plp);
+ break;
+ }
+ plp = erts_proclist_peek_next(schdlr_sspnd.chngq, plp);
+ } while (plp);
+ }
+ }
+ }
+
+ erts_mtx_unlock(&schdlr_sspnd.mtx);
+
+ if (is_internal_pid(resume))
+ schdlr_sspnd_resume_proc(ERTS_SCHED_NORMAL, resume);
+}
ErtsSchedSuspendResult
erts_set_schedulers_online(Process *p,
ErtsProcLocks plocks,
Sint new_no,
- Sint *old_no
-#ifdef ERTS_DIRTY_SCHEDULERS
- , int dirty_only
-#endif
- )
+ Sint *old_no,
+ int dirty_only)
{
- ErtsSchedulerData *esdp;
- int ix, res = -1, no, have_unlocked_plocks, end_wait;
- erts_aint32_t changing = 0;
-#ifdef ERTS_DIRTY_SCHEDULERS
- ErtsSchedulerSleepInfo* ssi;
- int dirty_no, change_dirty;
-#endif
+ int resume_proc, ix, res = -1, no, have_unlocked_plocks;
+ erts_aint32_t changing = 0, change_flags;
+ int online, increase;
+ ErtsProcList *plp;
+ int dirty_no, change_dirty, dirty_online;
if (new_no < 1)
return ERTS_SCHDLR_SSPND_EINVAL;
-#ifdef ERTS_DIRTY_SCHEDULERS
else if (dirty_only && erts_no_dirty_cpu_schedulers < new_no)
return ERTS_SCHDLR_SSPND_EINVAL;
-#endif
else if (erts_no_schedulers < new_no)
return ERTS_SCHDLR_SSPND_EINVAL;
- esdp = ERTS_PROC_GET_SCHDATA(p);
- end_wait = 0;
+ if (dirty_only)
+ resume_proc = 0;
+ else
+ {
+ resume_proc = 1;
+ /*
+ * If we suspend current process we need to suspend before
+ * requesting the change; otherwise, we got a resume/suspend
+ * race...
+ */
+ if (!(plocks & ERTS_PROC_LOCK_STATUS))
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ suspend_process(p, p);
+ if (!(plocks & ERTS_PROC_LOCK_STATUS))
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ }
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ erts_mtx_lock(&schdlr_sspnd.mtx);
+ change_flags = 0;
have_unlocked_plocks = 0;
no = (int) new_no;
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(schdlr_sspnd.dirty_cpu_online <= erts_no_dirty_cpu_schedulers);
+ if (!dirty_only)
+ {
+ changing = erts_atomic32_read_nob(&schdlr_sspnd.changing);
+ if (changing & ERTS_SCHDLR_SSPND_CHNG_ONLN) {
+ enqueue_wait:
+ p->flags |= F_SCHDLR_ONLN_WAITQ;
+ plp = proclist_create(p);
+ erts_proclist_store_last(&schdlr_sspnd.chngq, plp);
+ resume_proc = 0;
+ res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
+ goto done;
+ }
+ plp = erts_proclist_peek_first(schdlr_sspnd.chngq);
+ if (!plp) {
+ ASSERT(schdlr_sspnd.changer == am_false);
+ }
+ else {
+ ASSERT(schdlr_sspnd.changer == am_true);
+ if (!erts_proclist_same(plp, p))
+ goto enqueue_wait;
+ p->flags &= ~F_SCHDLR_ONLN_WAITQ;
+ erts_proclist_remove(&schdlr_sspnd.chngq, plp);
+ proclist_destroy(plp);
+ }
+ }
+
+ *old_no = online = schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
+ ERTS_SCHED_NORMAL);
+ dirty_online = schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
+ ERTS_SCHED_DIRTY_CPU);
+ if (dirty_only)
+ *old_no = dirty_online;
+
+ ASSERT(dirty_online <= erts_no_dirty_cpu_schedulers);
+
if (dirty_only) {
- if (no > schdlr_sspnd.online) {
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- return ERTS_SCHDLR_SSPND_EINVAL;
+ if (no > online) {
+ res = ERTS_SCHDLR_SSPND_EINVAL;
+ goto done;
}
dirty_no = no;
+ if (dirty_no == dirty_online) {
+ res = ERTS_SCHDLR_SSPND_DONE;
+ goto done;
+ }
+ change_dirty = 1;
} else {
/*
* Adjust the number of dirty CPU schedulers online relative to the
* adjustment made to the number of normal schedulers online.
*/
int total_pct = erts_no_dirty_cpu_schedulers*100/erts_no_schedulers;
- int onln_pct = no*total_pct/schdlr_sspnd.online;
- dirty_no = schdlr_sspnd.dirty_cpu_online*onln_pct/100;
+ int onln_pct = no*total_pct/online;
+ dirty_no = dirty_online*onln_pct/100;
if (dirty_no == 0)
dirty_no = 1;
ASSERT(dirty_no <= erts_no_dirty_cpu_schedulers);
- }
-#endif
- changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
-#ifdef ERTS_DIRTY_SCHEDULERS
- changing |= erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing);
-#endif
- if (changing) {
- res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
- }
- else {
- int online = *old_no = schdlr_sspnd.online;
-#ifdef ERTS_DIRTY_SCHEDULERS
- int dirty_online = schdlr_sspnd.dirty_cpu_online;
- if (dirty_only) {
- *old_no = schdlr_sspnd.dirty_cpu_online;
- if (dirty_no == schdlr_sspnd.dirty_cpu_online) {
+ if (no != online)
+ change_dirty = (dirty_no != dirty_online);
+ else {
+ dirty_only = 1;
+ if (dirty_no == dirty_online) {
res = ERTS_SCHDLR_SSPND_DONE;
+ goto done;
}
change_dirty = 1;
- } else {
-#endif
- if (no == schdlr_sspnd.online) {
-#ifdef ERTS_DIRTY_SCHEDULERS
- dirty_only = 1;
- if (dirty_no == schdlr_sspnd.dirty_cpu_online)
-#endif
- res = ERTS_SCHDLR_SSPND_DONE;
-#ifdef ERTS_DIRTY_SCHEDULERS
- else
- change_dirty = 1;
-#endif
- }
-#ifdef ERTS_DIRTY_SCHEDULERS
- else
- change_dirty = (dirty_no != schdlr_sspnd.dirty_cpu_online);
}
-#endif
- if (res == -1)
- {
- int increase = (no > online);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!dirty_only) {
-#endif
- ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
- | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
- schdlr_sspnd.online = no;
-#ifdef ERTS_DIRTY_SCHEDULERS
- } else
- increase = (dirty_no > dirty_online);
- if (change_dirty) {
- ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
- | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
- schdlr_sspnd.dirty_cpu_online = dirty_no;
- }
-#endif
- if (increase) {
- int ix;
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!dirty_only) {
-#endif
- schdlr_sspnd.wait_curr_online = no;
- if (ongoing_multi_scheduling_block()) {
- for (ix = online; ix < no; ix++)
- erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
- }
- else {
- if (plocks) {
- have_unlocked_plocks = 1;
- erts_smp_proc_unlock(p, plocks);
- }
- change_no_used_runqs(no);
-
- for (ix = online; ix < no; ix++)
- resume_run_queue(ERTS_RUNQ_IX(ix));
-
- for (ix = no; ix < erts_no_run_queues; ix++)
- suspend_run_queue(ERTS_RUNQ_IX(ix));
- }
-#ifdef ERTS_DIRTY_SCHEDULERS
- }
- if (change_dirty) {
- schdlr_sspnd.dirty_cpu_wait_curr_online = dirty_no;
- ASSERT(schdlr_sspnd.dirty_cpu_curr_online !=
- schdlr_sspnd.dirty_cpu_wait_curr_online);
- if (ongoing_multi_scheduling_block()) {
- for (ix = dirty_online; ix < dirty_no; ix++) {
- ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
- erts_sched_poke(ssi);
- }
- } else {
- for (ix = dirty_online; ix < dirty_no; ix++) {
- ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
- scheduler_ssi_resume_wake(ssi);
- erts_smp_atomic32_read_band_nob(&ssi->flags,
- ~ERTS_SSI_FLG_SUSPENDED);
- }
- wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0);
- }
- }
-#endif
- res = ERTS_SCHDLR_SSPND_DONE;
- }
- else /* if (no < online) */ {
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (change_dirty) {
- schdlr_sspnd.dirty_cpu_wait_curr_online = dirty_no;
- ASSERT(schdlr_sspnd.dirty_cpu_curr_online !=
- schdlr_sspnd.dirty_cpu_wait_curr_online);
- if (ongoing_multi_scheduling_block()) {
- for (ix = dirty_no; ix < dirty_online; ix++) {
- ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
- erts_sched_poke(ssi);
- }
- } else {
- for (ix = dirty_no; ix < dirty_online; ix++) {
- ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
- erts_smp_atomic32_read_bor_nob(&ssi->flags,
- ERTS_SSI_FLG_SUSPENDED);
- }
- wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0);
- }
- }
- if (dirty_only) {
- res = ERTS_SCHDLR_SSPND_DONE;
- }
- else
-#endif
- {
- if (p->scheduler_data->no <= no) {
- res = ERTS_SCHDLR_SSPND_DONE;
- schdlr_sspnd.wait_curr_online = no;
- }
- else {
- /*
- * Yield! Current process needs to migrate
- * before bif returns.
- */
- res = ERTS_SCHDLR_SSPND_YIELD_DONE;
- schdlr_sspnd.wait_curr_online = no+1;
- }
+ }
+ if (change_dirty) {
+ change_flags |= ERTS_SCHDLR_SSPND_CHNG_DCPU_ONLN;
+ schdlr_sspnd_set_nscheds(&schdlr_sspnd.online,
+ ERTS_SCHED_DIRTY_CPU,
+ dirty_no);
+ }
- if (ongoing_multi_scheduling_block()) {
- for (ix = no; ix < online; ix++)
- erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
- }
- else {
- if (plocks) {
- have_unlocked_plocks = 1;
- erts_smp_proc_unlock(p, plocks);
- }
+ if (dirty_only)
+ increase = (dirty_no > dirty_online);
+ else
+ {
+ change_flags |= ERTS_SCHDLR_SSPND_CHNG_ONLN;
+ schdlr_sspnd_set_nscheds(&schdlr_sspnd.online,
+ ERTS_SCHED_NORMAL,
+ no);
+ increase = (no > online);
+ }
- change_no_used_runqs(no);
- for (ix = no; ix < erts_no_run_queues; ix++)
- suspend_run_queue(ERTS_RUNQ_IX(ix));
+ erts_atomic32_read_bor_nob(&schdlr_sspnd.changing, change_flags);
- for (ix = no; ix < online; ix++) {
- ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- wake_scheduler(rq);
- }
- }
+ res = ERTS_SCHDLR_SSPND_DONE;
+ if (increase) {
+ int ix;
+ if (change_dirty) {
+ ErtsSchedulerSleepInfo* ssi;
+ if (schdlr_sspnd.msb.ongoing) {
+ for (ix = dirty_online; ix < dirty_no; ix++) {
+ ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
+ erts_sched_poke(ssi);
}
+ } else {
+ for (ix = dirty_online; ix < dirty_no; ix++)
+ dcpu_sched_ix_resume_wake(ix);
}
-
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (change_dirty) {
- while (schdlr_sspnd.dirty_cpu_curr_online != schdlr_sspnd.dirty_cpu_wait_curr_online)
- erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
- ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
- erts_smp_atomic32_read_band_nob(&schdlr_sspnd.dirty_cpu_changing,
- ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ }
+ if (!dirty_only)
+ {
+ if (schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing) {
+ for (ix = online; ix < no; ix++)
+ erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
}
- if (!dirty_only)
-#endif
- {
- if (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online) {
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- if (plocks && !have_unlocked_plocks) {
- have_unlocked_plocks = 1;
- erts_smp_proc_unlock(p, plocks);
- }
- erts_thr_progress_active(esdp, 0);
- erts_thr_progress_prepare_wait(esdp);
- end_wait = 1;
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ else {
+ if (plocks) {
+ have_unlocked_plocks = 1;
+ erts_proc_unlock(p, plocks);
}
+ change_no_used_runqs(no);
- while (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online)
- erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ for (ix = online; ix < no; ix++)
+ resume_run_queue(ERTS_RUNQ_IX(ix));
- ASSERT(res != ERTS_SCHDLR_SSPND_DONE
- ? (ERTS_SCHDLR_SSPND_CHNG_WAITER
- & erts_smp_atomic32_read_nob(&schdlr_sspnd.changing))
- : (ERTS_SCHDLR_SSPND_CHNG_WAITER
- == erts_smp_atomic32_read_nob(&schdlr_sspnd.changing)));
- erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ for (ix = no; ix < erts_no_run_queues; ix++)
+ suspend_run_queue(ERTS_RUNQ_IX(ix));
}
}
}
-
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(schdlr_sspnd.dirty_cpu_online <= schdlr_sspnd.online);
- if (!dirty_only)
-#endif
- {
- if (end_wait) {
- erts_thr_progress_finalize_wait(esdp);
- erts_thr_progress_active(esdp, 1);
- }
- if (have_unlocked_plocks)
- erts_smp_proc_lock(p, plocks);
- }
-
- return res;
-}
-
-#else /* !ERTS_DIRTY_SCHEDULERS */
-
-ErtsSchedSuspendResult
-erts_set_schedulers_online(Process *p,
- ErtsProcLocks plocks,
- Sint new_no,
- Sint *old_no)
-{
- ErtsSchedulerData *esdp;
- int ix, res, no, have_unlocked_plocks, end_wait;
- erts_aint32_t changing;
-
- if (new_no < 1 || erts_no_schedulers < new_no)
- return ERTS_SCHDLR_SSPND_EINVAL;
-
- esdp = ERTS_PROC_GET_SCHDATA(p);
- end_wait = 0;
-
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
-
- have_unlocked_plocks = 0;
- no = (int) new_no;
-
- changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
- if (changing) {
- res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
- }
- else {
- int online = *old_no = schdlr_sspnd.online;
- if (no == schdlr_sspnd.online) {
- res = ERTS_SCHDLR_SSPND_DONE;
+ else /* if decrease */ {
+ if (change_dirty) {
+ if (schdlr_sspnd.msb.ongoing) {
+ for (ix = dirty_no; ix < dirty_online; ix++)
+ erts_sched_poke(ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix));
+ }
+ else {
+ for (ix = dirty_no; ix < dirty_online; ix++)
+ dcpu_sched_ix_suspend_wake(ix);
+ /*
+ * Newly suspended scheduler may have just been
+ * about to handle a task. Make sure someone takes
+ * care of such a task...
+ */
+ dcpu_sched_ix_wake(0);
+ }
}
- else {
- ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
- | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
- schdlr_sspnd.online = no;
- if (no > online) {
- int ix;
- schdlr_sspnd.wait_curr_online = no;
- if (ongoing_multi_scheduling_block()) {
- for (ix = online; ix < no; ix++)
- erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
- }
- else {
- if (plocks) {
- have_unlocked_plocks = 1;
- erts_smp_proc_unlock(p, plocks);
- }
- change_no_used_runqs(no);
-
- for (ix = online; ix < no; ix++)
- resume_run_queue(ERTS_RUNQ_IX(ix));
-
- for (ix = no; ix < erts_no_run_queues; ix++)
- suspend_run_queue(ERTS_RUNQ_IX(ix));
- }
- res = ERTS_SCHDLR_SSPND_DONE;
+ if (!dirty_only)
+ {
+ if (schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing) {
+ for (ix = no; ix < online; ix++)
+ erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
}
- else /* if (no < online) */ {
- if (p->scheduler_data->no <= no) {
- res = ERTS_SCHDLR_SSPND_DONE;
- schdlr_sspnd.wait_curr_online = no;
- }
- else {
- /*
- * Yield! Current process needs to migrate
- * before bif returns.
- */
- res = ERTS_SCHDLR_SSPND_YIELD_DONE;
- schdlr_sspnd.wait_curr_online = no+1;
- }
-
- if (ongoing_multi_scheduling_block()) {
- for (ix = no; ix < online; ix++)
- erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
+ else {
+ if (plocks) {
+ have_unlocked_plocks = 1;
+ erts_proc_unlock(p, plocks);
}
- else {
- if (plocks) {
- have_unlocked_plocks = 1;
- erts_smp_proc_unlock(p, plocks);
- }
- change_no_used_runqs(no);
- for (ix = no; ix < erts_no_run_queues; ix++)
- suspend_run_queue(ERTS_RUNQ_IX(ix));
+ change_no_used_runqs(no);
+ for (ix = no; ix < erts_no_run_queues; ix++)
+ suspend_run_queue(ERTS_RUNQ_IX(ix));
- for (ix = no; ix < online; ix++) {
- ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- wake_scheduler(rq);
- }
+ for (ix = no; ix < online; ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+ wake_scheduler(rq);
}
}
+ }
+ }
- if (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online) {
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- if (plocks && !have_unlocked_plocks) {
- have_unlocked_plocks = 1;
- erts_smp_proc_unlock(p, plocks);
- }
- erts_thr_progress_active(esdp, 0);
- erts_thr_progress_prepare_wait(esdp);
- end_wait = 1;
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- }
+ if (change_flags & ERTS_SCHDLR_SSPND_CHNG_ONLN) {
+ /* Suspend and wait for requested change to complete... */
+ schdlr_sspnd.changer = p->common.id;
+ resume_proc = 0;
+ res = ERTS_SCHDLR_SSPND_YIELD_DONE;
+ }
+
+done:
- while (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online)
- erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ ASSERT(schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
+ ERTS_SCHED_DIRTY_CPU)
+ <= schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
+ ERTS_SCHED_NORMAL));
- ASSERT(res != ERTS_SCHDLR_SSPND_DONE
- ? (ERTS_SCHDLR_SSPND_CHNG_WAITER
- & erts_smp_atomic32_read_nob(&schdlr_sspnd.changing))
- : (ERTS_SCHDLR_SSPND_CHNG_WAITER
- == erts_smp_atomic32_read_nob(&schdlr_sspnd.changing)));
- erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ erts_mtx_unlock(&schdlr_sspnd.mtx);
- }
- }
+ if (have_unlocked_plocks)
+ erts_proc_lock(p, plocks);
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- if (end_wait) {
- erts_thr_progress_finalize_wait(esdp);
- erts_thr_progress_active(esdp, 1);
+ if (resume_proc) {
+ if (!(plocks & ERTS_PROC_LOCK_STATUS))
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ resume_process(p, plocks|ERTS_PROC_LOCK_STATUS);
+ if (!(plocks & ERTS_PROC_LOCK_STATUS))
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
}
- if (have_unlocked_plocks)
- erts_smp_proc_lock(p, plocks);
return res;
}
-#endif
-
ErtsSchedSuspendResult
-erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
+erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal, int all)
{
- int ix, res, have_unlocked_plocks = 0, online;
- erts_aint32_t changing;
+ int resume_proc, ix, res, have_unlocked_plocks = 0;
ErtsProcList *plp;
-#ifdef ERTS_DIRTY_SCHEDULERS
- ErtsSchedulerSleepInfo* ssi;
-#endif
+ ErtsMultiSchedulingBlock *msbp;
+ erts_aint32_t chng_flg;
+ int have_blckd_flg;
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
-#ifdef ERTS_DIRTY_SCHEDULERS
- changing |= (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing)
- | erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_changing));
-#endif
- if (changing) {
- res = ERTS_SCHDLR_SSPND_YIELD_RESTART; /* Yield */
+ if (normal) {
+ chng_flg = ERTS_SCHDLR_SSPND_CHNG_NMSB;
+ have_blckd_flg = F_HAVE_BLCKD_NMSCHED;
+ msbp = &schdlr_sspnd.nmsb;
+ }
+ else {
+ chng_flg = ERTS_SCHDLR_SSPND_CHNG_MSB;
+ have_blckd_flg = F_HAVE_BLCKD_MSCHED;
+ msbp = &schdlr_sspnd.msb;
+ }
+
+ /*
+ * If we suspend current process we need to suspend before
+ * requesting the change; otherwise, we got a resume/suspend
+ * race...
+ */
+ if (!on) {
+ /* We never suspend current process when unblocking... */
+ resume_proc = 0;
}
- else if (on) { /* ------ BLOCK ------ */
- if (schdlr_sspnd.msb.procs) {
+ else {
+ resume_proc = 1;
+ if (!(plocks & ERTS_PROC_LOCK_STATUS))
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ suspend_process(p, p);
+ if (!(plocks & ERTS_PROC_LOCK_STATUS))
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ }
+
+ erts_mtx_lock(&schdlr_sspnd.mtx);
+ if (on) { /* ------ BLOCK ------ */
+ if (msbp->chngq) {
+ ASSERT(msbp->ongoing);
+ p->flags |= have_blckd_flg;
+ goto wait_until_msb;
+ }
+ else if (msbp->blckrs || (normal && erts_no_schedulers == 1)) {
+ ASSERT(!msbp->blckrs || msbp->ongoing);
+ msbp->ongoing = 1;
plp = proclist_create(p);
- erts_proclist_store_last(&schdlr_sspnd.msb.procs, plp);
- p->flags |= F_HAVE_BLCKD_MSCHED;
- ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1);
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_active) == 0);
- ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_active) == 0);
-#endif
- ASSERT(p->scheduler_data->no == 1);
- res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
- } else {
- int online = schdlr_sspnd.online;
- p->flags |= F_HAVE_BLCKD_MSCHED;
+ erts_proclist_store_last(&msbp->blckrs, plp);
+ p->flags |= have_blckd_flg;
+ ASSERT(normal
+ ? 1 == schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_NORMAL)
+ : schdlr_sspnd_get_nscheds_tot(&schdlr_sspnd.active) == 1);
+ ASSERT(erts_proc_sched_data(p)->no == 1);
+ if (schdlr_sspnd.msb.ongoing)
+ res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
+ else
+ res = ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED;
+ }
+ else {
+ int online = (int) schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
+ ERTS_SCHED_NORMAL);
+ ASSERT(!msbp->ongoing);
+ p->flags |= have_blckd_flg;
if (plocks) {
have_unlocked_plocks = 1;
- erts_smp_proc_unlock(p, plocks);
- }
- ASSERT(!ongoing_multi_scheduling_block());
- schdlr_sspnd.msb.ongoing = 1;
- if (online == 1) {
- res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
- ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1);
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_active) == 1);
- ASSERT(!(erts_smp_atomic32_read_nob(&ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(0)->flags)
- & ERTS_SSI_FLG_SUSPENDED));
- schdlr_sspnd.msb.dirty_cpu_wait_active = 0;
- ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB
- | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
- ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(0);
- erts_smp_atomic32_read_bor_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
- wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0);
- while (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_active)
- != schdlr_sspnd.msb.dirty_cpu_wait_active)
- erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
- ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
-
- schdlr_sspnd.msb.dirty_io_wait_active = 0;
- ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB
- | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
- for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
- ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix);
- erts_smp_atomic32_read_bor_nob(&ssi->flags,
- ERTS_SSI_FLG_SUSPENDED);
- }
- wake_dirty_schedulers(ERTS_DIRTY_IO_RUNQ, 0);
- while (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_active)
- != schdlr_sspnd.msb.dirty_io_wait_active)
- erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
- ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
-#endif
- ASSERT(p->scheduler_data->no == 1);
+ erts_proc_unlock(p, plocks);
}
- else {
- ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB
- | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
- if (p->scheduler_data->no == 1) {
- res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
- schdlr_sspnd.msb.wait_active = 1;
- }
- else {
- /*
- * Yield! Current process needs to migrate
- * before bif returns.
- */
- res = ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED;
- schdlr_sspnd.msb.wait_active = 2;
- }
+ ASSERT(!msbp->ongoing);
+ msbp->ongoing = 1;
-#ifdef ERTS_DIRTY_SCHEDULERS
- schdlr_sspnd.msb.dirty_cpu_wait_active = 0;
- ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB
- | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
- for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) {
- ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
- erts_smp_atomic32_read_bor_nob(&ssi->flags,
- ERTS_SSI_FLG_SUSPENDED);
- }
- wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0);
- while (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_active)
- != schdlr_sspnd.msb.dirty_cpu_wait_active)
- erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
- ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
- ASSERT(schdlr_sspnd.dirty_cpu_curr_online == schdlr_sspnd.dirty_cpu_online);
-
- schdlr_sspnd.msb.dirty_io_wait_active = 0;
- ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB
- | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
- for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
- ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix);
- erts_smp_atomic32_read_bor_nob(&ssi->flags,
- ERTS_SSI_FLG_SUSPENDED);
- }
- wake_dirty_schedulers(ERTS_DIRTY_IO_RUNQ, 0);
- while (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_active)
- != schdlr_sspnd.msb.dirty_io_wait_active)
- erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
- ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
- ASSERT(schdlr_sspnd.dirty_io_curr_online == schdlr_sspnd.dirty_io_online);
-#endif
- change_no_used_runqs(1);
- for (ix = 1; ix < erts_no_run_queues; ix++)
- suspend_run_queue(ERTS_RUNQ_IX(ix));
-
- for (ix = 1; ix < online; ix++) {
- ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- wake_scheduler(rq);
- }
-
- if (erts_smp_atomic32_read_nob(&schdlr_sspnd.active)
- != schdlr_sspnd.msb.wait_active) {
- ErtsSchedulerData *esdp;
-
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
-
- if (plocks && !have_unlocked_plocks) {
- have_unlocked_plocks = 1;
- erts_smp_proc_unlock(p, plocks);
- }
+ erts_atomic32_read_bor_nob(&schdlr_sspnd.changing,
+ chng_flg);
+ change_no_used_runqs(1);
+ for (ix = 1; ix < erts_no_run_queues; ix++)
+ suspend_run_queue(ERTS_RUNQ_IX(ix));
- esdp = ERTS_PROC_GET_SCHDATA(p);
-
- erts_thr_progress_active(esdp, 0);
- erts_thr_progress_prepare_wait(esdp);
-
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ for (ix = 1; ix < online; ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+ wake_scheduler(rq);
+ }
- while (erts_smp_atomic32_read_nob(&schdlr_sspnd.active)
- != schdlr_sspnd.msb.wait_active)
- erts_smp_cnd_wait(&schdlr_sspnd.cnd,
- &schdlr_sspnd.mtx);
-
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
-
- erts_thr_progress_active(esdp, 1);
- erts_thr_progress_finalize_wait(esdp);
+ if (!normal) {
+ ERTS_RUNQ_FLGS_SET_NOB(ERTS_RUNQ_IX(0), ERTS_RUNQ_FLG_MSB_EXEC);
+ erts_atomic32_read_bor_nob(&ERTS_RUNQ_IX(0)->scheduler->ssi->flags,
+ ERTS_SSI_FLG_MSB_EXEC);
+ for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++)
+ dcpu_sched_ix_suspend_wake(ix);
+ for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++)
+ dio_sched_ix_suspend_wake(ix);
+ }
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ wait_until_msb:
- }
+ ASSERT(chng_flg & erts_atomic32_read_nob(&schdlr_sspnd.changing));
- ASSERT(res != ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED
- ? (ERTS_SCHDLR_SSPND_CHNG_WAITER
- & erts_smp_atomic32_read_nob(&schdlr_sspnd.changing))
- : (ERTS_SCHDLR_SSPND_CHNG_WAITER
- == erts_smp_atomic32_read_nob(&schdlr_sspnd.changing)));
- erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
- }
- plp = proclist_create(p);
- erts_proclist_store_last(&schdlr_sspnd.msb.procs, plp);
- ASSERT(p->scheduler_data);
+ plp = proclist_create(p);
+ erts_proclist_store_last(&msbp->chngq, plp);
+ resume_proc = 0;
+ if (schdlr_sspnd.msb.ongoing)
+ res = ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED;
+ else
+ res = ERTS_SCHDLR_SSPND_YIELD_DONE_NMSCHED_BLOCKED;
+ ASSERT(erts_proc_sched_data(p));
}
}
- else if (!ongoing_multi_scheduling_block()) {
- /* unblock not ongoing */
- ASSERT(!schdlr_sspnd.msb.procs);
- res = ERTS_SCHDLR_SSPND_DONE;
+ else if (!msbp->ongoing) {
+ ASSERT(!msbp->blckrs);
+ goto unblock_res;
}
else { /* ------ UNBLOCK ------ */
- if (p->flags & F_HAVE_BLCKD_MSCHED) {
- ErtsProcList *plp = erts_proclist_peek_first(schdlr_sspnd.msb.procs);
-
- while (plp) {
- ErtsProcList *tmp_plp = plp;
- plp = erts_proclist_peek_next(schdlr_sspnd.msb.procs, plp);
- if (erts_proclist_same(tmp_plp, p)) {
- erts_proclist_remove(&schdlr_sspnd.msb.procs, tmp_plp);
- proclist_destroy(tmp_plp);
- if (!all)
- break;
+ if (p->flags & have_blckd_flg) {
+ ErtsProcList **plpps[3] = {0};
+ ErtsProcList *plp;
+
+ plpps[0] = &msbp->blckrs;
+ if (all)
+ plpps[1] = &msbp->chngq;
+
+ for (ix = 0; plpps[ix]; ix++) {
+ plp = erts_proclist_peek_first(*plpps[ix]);
+ while (plp) {
+ ErtsProcList *tmp_plp = plp;
+ plp = erts_proclist_peek_next(*plpps[ix], plp);
+ if (erts_proclist_same(tmp_plp, p)) {
+ erts_proclist_remove(plpps[ix], tmp_plp);
+ proclist_destroy(tmp_plp);
+ if (!all)
+ break;
+ }
}
}
}
- if (schdlr_sspnd.msb.procs)
- res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
- else {
- ERTS_SCHDLR_SSPND_CHNG_SET(ERTS_SCHDLR_SSPND_CHNG_MSB, 0);
- p->flags &= ~F_HAVE_BLCKD_MSCHED;
- schdlr_sspnd.msb.ongoing = 0;
- if (schdlr_sspnd.online == 1) {
- /* No normal schedulers to resume */
- ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1);
- ERTS_SCHDLR_SSPND_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_MSB);
- }
- else {
- online = schdlr_sspnd.online;
- if (plocks) {
+ if (!msbp->blckrs && !msbp->chngq) {
+ int online;
+ erts_atomic32_read_bor_nob(&schdlr_sspnd.changing,
+ chng_flg);
+ p->flags &= ~have_blckd_flg;
+ msbp->ongoing = 0;
+ if (!(schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing)) {
+ if (plocks) {
have_unlocked_plocks = 1;
- erts_smp_proc_unlock(p, plocks);
+ erts_proc_unlock(p, plocks);
}
+ online = (int) schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
+ ERTS_SCHED_NORMAL);
change_no_used_runqs(online);
/* Resume all online run queues */
@@ -7489,84 +8156,86 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
for (ix = online; ix < erts_no_run_queues; ix++)
suspend_run_queue(ERTS_RUNQ_IX(ix));
}
-#ifdef ERTS_DIRTY_SCHEDULERS
- ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(ERTS_SCHDLR_SSPND_CHNG_MSB, 0);
- schdlr_sspnd.msb.dirty_cpu_wait_active = schdlr_sspnd.dirty_cpu_online;
- for (ix = 0; ix < schdlr_sspnd.dirty_cpu_online; ix++) {
- ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
- scheduler_ssi_resume_wake(ssi);
- erts_smp_atomic32_read_band_nob(&ssi->flags,
- ~ERTS_SSI_FLG_SUSPENDED);
- }
- wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0);
-
- ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(ERTS_SCHDLR_SSPND_CHNG_MSB, 0);
- schdlr_sspnd.msb.dirty_io_wait_active = erts_no_dirty_io_schedulers;
- for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
- ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix);
- scheduler_ssi_resume_wake(ssi);
- erts_smp_atomic32_read_band_nob(&ssi->flags,
- ~ERTS_SSI_FLG_SUSPENDED);
+ if (!schdlr_sspnd.msb.ongoing) {
+ /* Get rid of msb-exec flag in run-queue of scheduler 1 */
+ resume_run_queue(ERTS_RUNQ_IX(0));
+ online = (int) schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
+ ERTS_SCHED_DIRTY_CPU);
+ for (ix = 0; ix < online; ix++)
+ dcpu_sched_ix_resume_wake(ix);
+ for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++)
+ dio_sched_ix_resume_wake(ix);
}
- wake_dirty_schedulers(ERTS_DIRTY_IO_RUNQ, 0);
-#endif
- res = ERTS_SCHDLR_SSPND_DONE;
}
+
+ unblock_res:
+ if (schdlr_sspnd.msb.ongoing)
+ res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
+ else if (schdlr_sspnd.nmsb.ongoing)
+ res = ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED;
+ else
+ res = ERTS_SCHDLR_SSPND_DONE;
}
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ erts_mtx_unlock(&schdlr_sspnd.mtx);
+
if (have_unlocked_plocks)
- erts_smp_proc_lock(p, plocks);
- return res;
-}
+ erts_proc_lock(p, plocks);
-#ifdef DEBUG
-void
-erts_dbg_multi_scheduling_return_trap(Process *p, Eterm return_value)
-{
- if (return_value == am_blocked) {
- erts_aint32_t active = erts_smp_atomic32_read_nob(&schdlr_sspnd.active);
- ASSERT(1 <= active && active <= 2);
- ASSERT(ERTS_PROC_GET_SCHDATA(p)->no == 1);
+ if (resume_proc) {
+ if (!(plocks & ERTS_PROC_LOCK_STATUS))
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ resume_process(p, plocks|ERTS_PROC_LOCK_STATUS);
+ if (!(plocks & ERTS_PROC_LOCK_STATUS))
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
}
+
+ return res;
}
-#endif
int
erts_is_multi_scheduling_blocked(void)
{
int res;
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- res = schdlr_sspnd.msb.procs != NULL;
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ erts_mtx_lock(&schdlr_sspnd.mtx);
+ if (schdlr_sspnd.msb.blckrs)
+ res = 1;
+ else if (schdlr_sspnd.nmsb.blckrs)
+ res = -1;
+ else
+ res = 0;
+ erts_mtx_unlock(&schdlr_sspnd.mtx);
return res;
}
Eterm
-erts_multi_scheduling_blockers(Process *p)
+erts_multi_scheduling_blockers(Process *p, int normal)
{
Eterm res = NIL;
+ ErtsMultiSchedulingBlock *msbp;
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- if (!erts_proclist_is_empty(schdlr_sspnd.msb.procs)) {
+ msbp = normal ? &schdlr_sspnd.nmsb : &schdlr_sspnd.msb;
+
+ erts_mtx_lock(&schdlr_sspnd.mtx);
+ if (!erts_proclist_is_empty(msbp->blckrs)) {
Eterm *hp, *hp_end;
ErtsProcList *plp1, *plp2;
Uint max_size = 0;
- for (plp1 = erts_proclist_peek_first(schdlr_sspnd.msb.procs);
+ for (plp1 = erts_proclist_peek_first(msbp->blckrs);
plp1;
- plp1 = erts_proclist_peek_next(schdlr_sspnd.msb.procs, plp1)) {
+ plp1 = erts_proclist_peek_next(msbp->blckrs, plp1)) {
max_size += 2;
}
ASSERT(max_size);
hp = HAlloc(p, max_size);
hp_end = hp + max_size;
- for (plp1 = erts_proclist_peek_first(schdlr_sspnd.msb.procs);
+ for (plp1 = erts_proclist_peek_first(msbp->blckrs);
plp1;
- plp1 = erts_proclist_peek_next(schdlr_sspnd.msb.procs, plp1)) {
- for (plp2 = erts_proclist_peek_first(schdlr_sspnd.msb.procs);
+ plp1 = erts_proclist_peek_next(msbp->blckrs, plp1)) {
+ for (plp2 = erts_proclist_peek_first(msbp->blckrs);
plp2->pid != plp1->pid;
- plp2 = erts_proclist_peek_next(schdlr_sspnd.msb.procs, plp2));
+ plp2 = erts_proclist_peek_next(msbp->blckrs, plp2));
if (plp2 == plp1) {
res = CONS(hp, plp1->pid, res);
hp += 2;
@@ -7575,7 +8244,7 @@ erts_multi_scheduling_blockers(Process *p)
}
HRelease(p, hp_end, hp);
}
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ erts_mtx_unlock(&schdlr_sspnd.mtx);
return res;
}
@@ -7585,17 +8254,30 @@ sched_thread_func(void *vesdp)
ErtsThrPrgrCallbacks callbacks;
ErtsSchedulerData *esdp = vesdp;
Uint no = esdp->no;
-#ifdef ERTS_SMP
- ERTS_SCHED_SLEEP_INFO_IX(no - 1)->event = erts_tse_fetch();
+ erts_tse_t *tse;
+
+ erts_port_task_pre_alloc_init_thread();
+ erts_sched_init_time_sup(esdp);
+
+ if (no == 1)
+ erts_aux_work_timeout_late_init(esdp);
+
+ (void) ERTS_RUNQ_FLGS_SET_NOB(esdp->run_queue,
+ ERTS_RUNQ_FLG_EXEC);
+
+ tse = erts_tse_fetch();
+ erts_tse_prepare_timed(tse);
+ ERTS_SCHED_SLEEP_INFO_IX(no - 1)->event = tse;
callbacks.arg = (void *) esdp->ssi;
callbacks.wakeup = thr_prgr_wakeup;
callbacks.prepare_wait = thr_prgr_prep_wait;
callbacks.wait = thr_prgr_wait;
callbacks.finalize_wait = thr_prgr_fin_wait;
+ erts_msacc_init_thread("scheduler", no, 1);
+
erts_thr_progress_register_managed_thread(esdp, &callbacks, 0);
erts_alloc_register_scheduler(vesdp);
-#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
{
char buf[31];
@@ -7604,57 +8286,20 @@ sched_thread_func(void *vesdp)
}
#endif
erts_tsd_set(sched_data_key, vesdp);
-#ifdef ERTS_SMP
#if HAVE_ERTS_MSEG
erts_mseg_late_init();
#endif
-#if ERTS_USE_ASYNC_READY_Q
esdp->aux_work_data.async_ready.queue = erts_get_async_ready_queue(no);
-#endif
erts_sched_init_check_cpu_bind(esdp);
erts_proc_lock_prepare_proc_lock_waiter();
-#endif
#ifdef HIPE
hipe_thread_signal_init();
#endif
erts_thread_init_float();
- if (no == 1) {
- erts_thr_progress_active(esdp, 0);
- erts_thr_progress_prepare_wait(esdp);
- }
-
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
-
- ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.changing)
- & ERTS_SCHDLR_SSPND_CHNG_ONLN);
-
- if (--schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online) {
- erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
- if (no != 1)
-#ifdef ERTS_DIRTY_SCHEDULERS
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
-#else
- erts_smp_cnd_signal(&schdlr_sspnd.cnd);
-#endif
- }
-
- if (no == 1) {
- while (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online)
- erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
- ERTS_SCHDLR_SSPND_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
- }
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
-
- if (no == 1) {
- erts_thr_progress_finalize_wait(esdp);
- erts_thr_progress_active(esdp, 1);
- }
-
#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
esdp->verify_unused_temp_alloc
= erts_alloc_get_verify_unused_temp_alloc(
@@ -7662,23 +8307,23 @@ sched_thread_func(void *vesdp)
ERTS_VERIFY_UNUSED_TEMP_ALLOC(NULL);
#endif
- process_main();
+ erts_ets_sched_spec_data_init(esdp);
+
+ process_main(esdp->x_reg_array, esdp->f_reg_array);
+
/* No schedulers should *ever* terminate */
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"Scheduler thread number %beu terminated\n",
no);
return NULL;
}
-#ifdef ERTS_DIRTY_SCHEDULERS
-#ifdef ERTS_SMP
static void*
sched_dirty_cpu_thread_func(void *vesdp)
{
ErtsThrPrgrCallbacks callbacks;
ErtsSchedulerData *esdp = vesdp;
- Uint no = ERTS_DIRTY_SCHEDULER_NO(esdp);
- ERTS_DIRTY_SCHEDULER_TYPE(esdp) = ERTS_DIRTY_CPU_SCHEDULER;
+ Uint no = esdp->dirty_no;
ASSERT(no != 0);
ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(no-1)->event = erts_tse_fetch();
callbacks.arg = (void *) esdp->ssi;
@@ -7687,6 +8332,12 @@ sched_dirty_cpu_thread_func(void *vesdp)
callbacks.wait = NULL;
callbacks.finalize_wait = NULL;
+ dirty_sched_wall_time_change(esdp, 1);
+
+ esdp->thr_id += erts_no_schedulers;
+
+ erts_msacc_init_thread("dirty_cpu_scheduler", no, 0);
+
erts_thr_progress_register_unmanaged_thread(&callbacks);
#ifdef ERTS_ENABLE_LOCK_CHECK
{
@@ -7696,9 +8347,7 @@ sched_dirty_cpu_thread_func(void *vesdp)
}
#endif
erts_tsd_set(sched_data_key, vesdp);
-#if ERTS_USE_ASYNC_READY_Q
esdp->aux_work_data.async_ready.queue = NULL;
-#endif
erts_proc_lock_prepare_proc_lock_waiter();
@@ -7707,27 +8356,9 @@ sched_dirty_cpu_thread_func(void *vesdp)
#endif
erts_thread_init_float();
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing)
- & ERTS_SCHDLR_SSPND_CHNG_ONLN);
-
- if (--schdlr_sspnd.dirty_cpu_curr_online == schdlr_sspnd.dirty_cpu_wait_curr_online) {
- erts_smp_atomic32_read_band_nob(&schdlr_sspnd.dirty_cpu_changing,
- ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
- if (no != 1)
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
- }
-
- if (no == 1) {
- while (schdlr_sspnd.dirty_cpu_curr_online != schdlr_sspnd.dirty_cpu_wait_curr_online)
- erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
- ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
- }
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
-
- process_main();
+ erts_dirty_process_main(esdp);
/* No schedulers should *ever* terminate */
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"Dirty CPU scheduler thread number %beu terminated\n",
no);
return NULL;
@@ -7738,8 +8369,7 @@ sched_dirty_io_thread_func(void *vesdp)
{
ErtsThrPrgrCallbacks callbacks;
ErtsSchedulerData *esdp = vesdp;
- Uint no = ERTS_DIRTY_SCHEDULER_NO(esdp);
- ERTS_DIRTY_SCHEDULER_TYPE(esdp) = ERTS_DIRTY_IO_SCHEDULER;
+ Uint no = esdp->dirty_no;
ASSERT(no != 0);
ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(no-1)->event = erts_tse_fetch();
callbacks.arg = (void *) esdp->ssi;
@@ -7748,6 +8378,12 @@ sched_dirty_io_thread_func(void *vesdp)
callbacks.wait = NULL;
callbacks.finalize_wait = NULL;
+ dirty_sched_wall_time_change(esdp, 1);
+
+ esdp->thr_id += erts_no_schedulers + erts_no_dirty_cpu_schedulers;
+
+ erts_msacc_init_thread("dirty_io_scheduler", no, 0);
+
erts_thr_progress_register_unmanaged_thread(&callbacks);
#ifdef ERTS_ENABLE_LOCK_CHECK
{
@@ -7757,9 +8393,7 @@ sched_dirty_io_thread_func(void *vesdp)
}
#endif
erts_tsd_set(sched_data_key, vesdp);
-#if ERTS_USE_ASYNC_READY_Q
esdp->aux_work_data.async_ready.queue = NULL;
-#endif
erts_proc_lock_prepare_proc_lock_waiter();
@@ -7768,68 +8402,45 @@ sched_dirty_io_thread_func(void *vesdp)
#endif
erts_thread_init_float();
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_changing)
- & ERTS_SCHDLR_SSPND_CHNG_ONLN);
-
- if (--schdlr_sspnd.dirty_io_curr_online == schdlr_sspnd.dirty_io_wait_curr_online) {
- erts_smp_atomic32_read_band_nob(&schdlr_sspnd.dirty_io_changing,
- ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
- if (no != 1)
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
- }
-
- if (no == 1) {
- while (schdlr_sspnd.dirty_io_curr_online != schdlr_sspnd.dirty_io_wait_curr_online)
- erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
- ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
- }
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
-
- process_main();
+ erts_dirty_process_main(esdp);
/* No schedulers should *ever* terminate */
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"Dirty I/O scheduler thread number %beu terminated\n",
no);
return NULL;
}
-#endif
-#endif
-
-static ethr_tid aux_tid;
void
erts_start_schedulers(void)
{
+ ethr_tid tid;
int res = 0;
Uint actual;
Uint wanted = erts_no_schedulers;
Uint wanted_no_schedulers = erts_no_schedulers;
+ char name[16];
ethr_thr_opts opts = ETHR_THR_OPTS_DEFAULT_INITER;
+ int ix;
opts.detached = 1;
-#ifdef ETHR_HAVE_THREAD_NAMES
- opts.name = malloc(80);
-#endif
+ opts.name = name;
-#ifdef ERTS_SMP
if (erts_runq_supervision_interval) {
opts.suggested_stack_size = 16;
-#ifdef ETHR_HAVE_THREAD_NAMES
- sprintf(opts.name, "runq_supervisor");
-#endif
+ erts_snprintf(opts.name, 16, "runq_supervisor");
erts_atomic_init_nob(&runq_supervisor_sleeping, 0);
if (0 != ethr_event_init(&runq_supervision_event))
- erl_exit(1, "Failed to create run-queue supervision event\n");
- if (0 != ethr_thr_create(&runq_supervisor_tid,
- runq_supervisor,
- NULL,
- &opts))
- erl_exit(1, "Failed to create run-queue supervision thread\n");
+ erts_exit(ERTS_ERROR_EXIT, "Failed to create run-queue supervision event\n");
+ res = ethr_thr_create(&runq_supervisor_tid,
+ runq_supervisor,
+ NULL,
+ &opts);
+ if (0 != res)
+ erts_exit(ERTS_ERROR_EXIT, "Failed to create run-queue supervision thread, "
+ "error = %d\n", res);
}
-#endif
opts.suggested_stack_size = erts_sched_thread_suggested_stack_size;
@@ -7845,14 +8456,7 @@ erts_start_schedulers(void)
ASSERT(actual == esdp->no - 1);
-#ifdef ETHR_HAVE_THREAD_NAMES
- sprintf(opts.name, "scheduler_%d", actual + 1);
-#endif
-
-#ifdef __OSE__
- /* This should be done in the bind strategy */
- opts.coreNo = (actual+1) % ose_num_cpus();
-#endif
+ erts_snprintf(opts.name, 16, "%lu_scheduler", actual + 1);
res = ethr_thr_create(&esdp->tid, sched_thread_func, (void*)esdp, &opts);
@@ -7860,51 +8464,45 @@ erts_start_schedulers(void)
break;
}
}
-
erts_no_schedulers = actual;
-#ifdef ERTS_DIRTY_SCHEDULERS
-#ifdef ERTS_SMP
{
- int ix;
for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
-#ifdef ETHR_HAVE_THREAD_NAMES
- sprintf(opts.name,"dirty_cpu_scheduler_%d", ix + 1);
-#endif
+ erts_snprintf(opts.name, 16, "%d_dirty_cpu_scheduler", ix + 1);
+ opts.suggested_stack_size = erts_dcpu_sched_thread_suggested_stack_size;
res = ethr_thr_create(&esdp->tid,sched_dirty_cpu_thread_func,(void*)esdp,&opts);
if (res != 0)
- erl_exit(1, "Failed to create dirty cpu scheduler thread %d\n", ix);
+ erts_exit(ERTS_ERROR_EXIT, "Failed to create dirty cpu scheduler thread %d, error = %d\n", ix, res);
}
for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix);
-#ifdef ETHR_HAVE_THREAD_NAMES
- sprintf(opts.name,"dirty_io_scheduler_%d", ix + 1);
-#endif
+ erts_snprintf(opts.name, 16, "%d_dirty_io_scheduler", ix + 1);
+ opts.suggested_stack_size = erts_dio_sched_thread_suggested_stack_size;
res = ethr_thr_create(&esdp->tid,sched_dirty_io_thread_func,(void*)esdp,&opts);
if (res != 0)
- erl_exit(1, "Failed to create dirty io scheduler thread %d\n", ix);
+ erts_exit(ERTS_ERROR_EXIT, "Failed to create dirty io scheduler thread %d, error = %d\n", ix, res);
}
}
-#endif
-#endif
ERTS_THR_MEMORY_BARRIER;
-#ifdef ETHR_HAVE_THREAD_NAMES
- sprintf(opts.name, "aux");
-#endif
-
-#ifdef __OSE__
- opts.coreNo = 0;
-#endif /* __OSE__ */
+ erts_snprintf(opts.name, 16, "aux");
- res = ethr_thr_create(&aux_tid, aux_thread, NULL, &opts);
+ res = ethr_thr_create(&tid, aux_thread, NULL, &opts);
if (res != 0)
- erl_exit(1, "Failed to create aux thread\n");
+ erts_exit(ERTS_ERROR_EXIT, "Failed to create aux thread, error = %d\n", res);
+
+ for (ix = 0; ix < erts_no_poll_threads; ix++) {
+ erts_snprintf(opts.name, 16, "%d_poller", ix);
+
+ res = ethr_thr_create(&tid, poll_thread, (void*)(UWord)ix, &opts);
+ if (res != 0)
+ erts_exit(ERTS_ERROR_EXIT, "Failed to create poll thread\n");
+ }
if (actual < 1)
- erl_exit(1,
+ erts_exit(ERTS_ERROR_EXIT,
"Failed to create any scheduler-threads: %s (%d)\n",
erl_errno_id(res),
res);
@@ -7918,15 +8516,9 @@ erts_start_schedulers(void)
actual, actual == 1 ? " was" : "s were");
erts_send_error_to_logger_nogl(dsbufp);
}
-
-#ifdef ETHR_HAVE_THREAD_NAMES
- free(opts.name);
-#endif
}
-#endif /* ERTS_SMP */
-#ifdef ERTS_SMP
static void
add_pend_suspend(Process *suspendee,
@@ -7940,7 +8532,7 @@ add_pend_suspend(Process *suspendee,
sizeof(ErtsPendingSuspend));
psp->next = NULL;
#ifdef DEBUG
-#if defined(ARCH_64) && !HALFWORD_HEAP
+#if defined(ARCH_64)
psp->end = (ErtsPendingSuspend *) 0xdeaddeaddeaddead;
#else
psp->end = (ErtsPendingSuspend *) 0xdeaddead;
@@ -7962,7 +8554,7 @@ handle_pending_suspend(Process *p, ErtsProcLocks p_locks)
ErtsPendingSuspend *psp;
int is_alive = !ERTS_PROC_IS_EXITING(p);
- ERTS_SMP_LC_ASSERT(p_locks & ERTS_PROC_LOCK_STATUS);
+ ERTS_LC_ASSERT(p_locks & ERTS_PROC_LOCK_STATUS);
/*
* New pending suspenders might appear while we are processing
@@ -7988,15 +8580,15 @@ cancel_suspend_of_suspendee(Process *p, ErtsProcLocks p_locks)
if (is_not_nil(p->suspendee)) {
Process *rp;
if (!(p_locks & ERTS_PROC_LOCK_STATUS))
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
rp = erts_pid2proc(p, p_locks|ERTS_PROC_LOCK_STATUS,
p->suspendee, ERTS_PROC_LOCK_STATUS);
if (rp) {
erts_resume(rp, ERTS_PROC_LOCK_STATUS);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
}
if (!(p_locks & ERTS_PROC_LOCK_STATUS))
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
p->suspendee = NIL;
}
}
@@ -8009,7 +8601,7 @@ handle_pend_sync_suspend(Process *suspendee,
{
Process *suspender;
- ERTS_SMP_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS);
+ ERTS_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS);
suspender = erts_pid2proc(suspendee,
suspendee_locks,
@@ -8023,8 +8615,9 @@ handle_pend_sync_suspend(Process *suspendee,
}
/* suspender is suspended waiting for suspendee to suspend;
resume suspender */
- resume_process(suspender);
- erts_smp_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS);
+ ASSERT(suspendee != suspender);
+ resume_process(suspender, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS);
}
}
@@ -8035,10 +8628,10 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
Process *rp;
int unlock_c_p_status;
- ERTS_SMP_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p));
+ ERTS_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p));
- ERTS_SMP_LC_ASSERT(c_p_locks & ERTS_PROC_LOCK_MAIN);
- ERTS_SMP_LC_ASSERT(pid_locks & (ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS));
+ ERTS_LC_ASSERT(c_p_locks & ERTS_PROC_LOCK_MAIN);
+ ERTS_LC_ASSERT(pid_locks & (ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS));
if (c_p->common.id == pid)
return erts_pid2proc(c_p, c_p_locks, pid, pid_locks);
@@ -8047,7 +8640,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
unlock_c_p_status = 0;
else {
unlock_c_p_status = 1;
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
}
if (c_p->suspendee == pid) {
@@ -8058,12 +8651,11 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
ASSERT(c_p->flags & F_P2PNR_RESCHED);
c_p->flags &= ~F_P2PNR_RESCHED;
if (!suspend && rp)
- resume_process(rp);
+ resume_process(rp, rp_locks);
}
else {
-
rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS,
- pid, pid_locks|ERTS_PROC_LOCK_STATUS);
+ pid, ERTS_PROC_LOCK_STATUS);
if (!rp) {
c_p->flags &= ~F_P2PNR_RESCHED;
@@ -8072,44 +8664,106 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
ASSERT(!(c_p->flags & F_P2PNR_RESCHED));
- if (suspend) {
- if (suspend_process(c_p, rp))
- goto done;
- }
- else {
- if (!((ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)
- & erts_smp_atomic32_read_acqb(&rp->state)))
+ /*
+ * Suspend the other process in order to prevent
+ * it from being selected for normal execution.
+ * This will however not prevent it from being
+ * selected for execution of a system task. If
+ * it is selected for execution of a system task
+ * we might be blocked for quite a while if the
+ * try-lock below fails. That is, there is room
+ * for improvement here...
+ */
+
+ if (!suspend_process(c_p, rp)) {
+ /* Other process running */
+
+ ASSERT((ERTS_PSFLG_RUNNING | ERTS_PSFLG_DIRTY_RUNNING)
+ & erts_atomic32_read_nob(&rp->state));
+
+ if (!suspend
+ && (erts_atomic32_read_nob(&rp->state)
+ & ERTS_PSFLG_DIRTY_RUNNING)) {
+ ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS;
+ if (need_locks && erts_proc_trylock(rp, need_locks) == EBUSY) {
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS,
+ pid, pid_locks|ERTS_PROC_LOCK_STATUS);
+ }
goto done;
+ }
- }
+ running:
- /* Other process running */
+ /*
+ * If we got pending suspenders and suspend ourselves waiting
+ * to suspend another process we might deadlock.
+ * In this case we have to yield, be suspended by
+ * someone else and then do it all over again.
+ */
+ if (!c_p->pending_suspenders) {
+ /* Mark rp pending for suspend by c_p */
+ add_pend_suspend(rp, c_p->common.id, handle_pend_sync_suspend);
+ ASSERT(is_nil(c_p->suspendee));
- /*
- * If we got pending suspenders and suspend ourselves waiting
- * to suspend another process we might deadlock.
- * In this case we have to yield, be suspended by
- * someone else and then do it all over again.
- */
- if (!c_p->pending_suspenders) {
- /* Mark rp pending for suspend by c_p */
- add_pend_suspend(rp, c_p->common.id, handle_pend_sync_suspend);
- ASSERT(is_nil(c_p->suspendee));
+ /* Suspend c_p; when rp is suspended c_p will be resumed. */
+ suspend_process(c_p, c_p);
+ c_p->flags |= F_P2PNR_RESCHED;
+ }
+ /* Yield (caller is assumed to yield immediately in bif). */
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ rp = ERTS_PROC_LOCK_BUSY;
+ }
+ else {
+ ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS;
+ if (need_locks && erts_proc_trylock(rp, need_locks) == EBUSY) {
+ if ((ERTS_PSFLG_RUNNING_SYS|ERTS_PSFLG_DIRTY_RUNNING_SYS)
+ & erts_atomic32_read_nob(&rp->state)) {
+ /* Executing system task... */
+ resume_process(rp, ERTS_PROC_LOCK_STATUS);
+ goto running;
+ }
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ /*
+ * If we are unlucky, the process just got selected for
+ * execution of a system task. In this case we may be
+ * blocked here for quite a while... Execution of system
+ * tasks are fortunately quite rare events. We try to
+ * avoid this by checking if it is in a state executing
+ * system tasks (above), but it will not prevent all
+ * scenarios for a long block here...
+ */
+ rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS,
+ pid, pid_locks|ERTS_PROC_LOCK_STATUS);
+ if (!rp)
+ goto done;
+ }
- /* Suspend c_p; when rp is suspended c_p will be resumed. */
- suspend_process(c_p, c_p);
- c_p->flags |= F_P2PNR_RESCHED;
+ /*
+ * The previous suspend has prevented the process
+ * from being selected for normal execution regardless
+ * of locks held or not held on it...
+ */
+#ifdef DEBUG
+ {
+ erts_aint32_t state;
+ state = erts_atomic32_read_nob(&rp->state);
+ ASSERT((state & ERTS_PSFLG_PENDING_EXIT)
+ || !(state & ERTS_PSFLG_RUNNING));
+ }
+#endif
+
+ if (!suspend)
+ resume_process(rp, pid_locks|ERTS_PROC_LOCK_STATUS);
}
- /* Yield (caller is assumed to yield immediately in bif). */
- erts_smp_proc_unlock(rp, pid_locks|ERTS_PROC_LOCK_STATUS);
- rp = ERTS_PROC_LOCK_BUSY;
}
done:
+
if (rp && rp != ERTS_PROC_LOCK_BUSY && !(pid_locks & ERTS_PROC_LOCK_STATUS))
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
if (unlock_c_p_status)
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
return rp;
}
@@ -8132,17 +8786,6 @@ erts_pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
}
/*
- * Like erts_pid2proc_not_running(), but hands over the process
- * in a suspended state unless (c_p is looked up).
- */
-Process *
-erts_pid2proc_suspend(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm pid, ErtsProcLocks pid_locks)
-{
- return pid2proc_not_running(c_p, c_p_locks, pid, pid_locks, 1);
-}
-
-/*
* erts_pid2proc_nropt() is normally the same as
* erts_pid2proc_not_running(). However it is only
* to be used when 'not running' is a pure optimization,
@@ -8166,7 +8809,7 @@ do_bif_suspend_process(Process *c_p,
{
ASSERT(suspendee);
ASSERT(!ERTS_PROC_IS_EXITING(suspendee));
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS
& erts_proc_lc_my_proc_locks(suspendee));
if (smon) {
if (!smon->active) {
@@ -8189,7 +8832,7 @@ handle_pend_bif_sync_suspend(Process *suspendee,
{
Process *suspender;
- ERTS_SMP_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS);
+ ERTS_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS);
suspender = erts_pid2proc(suspendee,
suspendee_locks,
@@ -8216,8 +8859,9 @@ handle_pend_bif_sync_suspend(Process *suspendee,
}
/* suspender is suspended waiting for suspendee to suspend;
resume suspender */
- resume_process(suspender);
- erts_smp_proc_unlock(suspender,
+ ASSERT(suspender != suspendee);
+ resume_process(suspender, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(suspender,
ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
}
}
@@ -8231,7 +8875,7 @@ handle_pend_bif_async_suspend(Process *suspendee,
Process *suspender;
- ERTS_SMP_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS);
+ ERTS_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS);
suspender = erts_pid2proc(suspendee,
suspendee_locks,
@@ -8255,26 +8899,10 @@ handle_pend_bif_async_suspend(Process *suspendee,
do_bif_suspend_process(suspendee, smon, suspendee);
ASSERT(!smon || res != 0);
}
- erts_smp_proc_unlock(suspender, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(suspender, ERTS_PROC_LOCK_LINK);
}
}
-#else
-
-/*
- * Non-smp version of erts_pid2proc_suspend().
- */
-Process *
-erts_pid2proc_suspend(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm pid, ErtsProcLocks pid_locks)
-{
- Process *rp = erts_pid2proc(c_p, c_p_locks, pid, pid_locks);
- if (rp)
- erts_suspend(rp, pid_locks, NULL);
- return rp;
-}
-
-#endif /* ERTS_SMP */
/*
* The erlang:suspend_process/2 BIF
@@ -8323,7 +8951,7 @@ suspend_process_2(BIF_ALIST_2)
? (ErtsProcLocks) 0
: ERTS_PROC_LOCK_STATUS);
- erts_smp_proc_lock(BIF_P, xlocks);
+ erts_proc_lock(BIF_P, xlocks);
suspendee = erts_pid2proc(BIF_P,
ERTS_PROC_LOCK_MAIN|xlocks,
@@ -8334,34 +8962,15 @@ suspend_process_2(BIF_ALIST_2)
smon = erts_add_or_lookup_suspend_monitor(&BIF_P->suspend_monitors,
BIF_ARG_1);
-#ifndef ERTS_SMP /* no ERTS_SMP */
-
- /* This is really a piece of cake without SMP support... */
- if (!smon->active) {
- erts_smp_atomic32_read_bor_nob(&suspendee->state, ERTS_PSFLG_SUSPENDED);
- suspend_process(BIF_P, suspendee);
- smon->active++;
- res = am_true;
- }
- else if (unless_suspending)
- res = am_false;
- else if (smon->active == INT_MAX)
- goto system_limit;
- else {
- smon->active++;
- res = am_true;
- }
-
-#else /* ERTS_SMP */
/* ... but a little trickier with SMP support ... */
if (asynchronous) {
/* --- Asynchronous suspend begin ---------------------------------- */
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_LINK
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_LINK
& erts_proc_lc_my_proc_locks(BIF_P));
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS
== erts_proc_lc_my_proc_locks(suspendee));
if (smon->active) {
@@ -8401,10 +9010,10 @@ suspend_process_2(BIF_ALIST_2)
else /* if (!asynchronous) */ {
/* --- Synchronous suspend begin ----------------------------------- */
- ERTS_SMP_LC_ASSERT(((ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS)
+ ERTS_LC_ASSERT(((ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS)
& erts_proc_lc_my_proc_locks(BIF_P))
== (ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS));
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS
== erts_proc_lc_my_proc_locks(suspendee));
if (BIF_P->suspendee == BIF_ARG_1) {
@@ -8470,10 +9079,9 @@ suspend_process_2(BIF_ALIST_2)
/* --- Synchronous suspend end ------------------------------------- */
}
-#endif /* ERTS_SMP */
#ifdef DEBUG
{
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&suspendee->state);
+ erts_aint32_t state = erts_atomic32_read_acqb(&suspendee->state);
ASSERT((state & ERTS_PSFLG_SUSPENDED)
|| (asynchronous && smon->pending));
ASSERT((state & ERTS_PSFLG_SUSPENDED)
@@ -8481,8 +9089,8 @@ suspend_process_2(BIF_ALIST_2)
}
#endif
- erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
- erts_smp_proc_unlock(BIF_P, xlocks);
+ erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(BIF_P, xlocks);
BIF_RET(res);
system_limit:
@@ -8490,26 +9098,22 @@ suspend_process_2(BIF_ALIST_2)
goto do_return;
no_suspendee:
-#ifdef ERTS_SMP
BIF_P->suspendee = NIL;
-#endif
erts_delete_suspend_monitor(&BIF_P->suspend_monitors, BIF_ARG_1);
badarg:
ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
-#ifdef ERTS_SMP
goto do_return;
yield:
ERTS_BIF_PREP_YIELD2(res, bif_export[BIF_suspend_process_2],
BIF_P, BIF_ARG_1, BIF_ARG_2);
-#endif
do_return:
if (suspendee)
- erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
if (xlocks)
- erts_smp_proc_unlock(BIF_P, xlocks);
+ erts_proc_unlock(BIF_P, xlocks);
return res;
}
@@ -8529,7 +9133,7 @@ resume_process_1(BIF_ALIST_1)
if (BIF_P->common.id == BIF_ARG_1)
BIF_ERROR(BIF_P, BADARG);
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
smon = erts_lookup_suspend_monitor(BIF_P->suspend_monitors, BIF_ARG_1);
if (!smon) {
@@ -8575,16 +9179,17 @@ resume_process_1(BIF_ALIST_1)
goto no_suspendee;
ASSERT(ERTS_PSFLG_SUSPENDED
- & erts_smp_atomic32_read_nob(&suspendee->state));
- resume_process(suspendee);
+ & erts_atomic32_read_nob(&suspendee->state));
+ ASSERT(BIF_P != suspendee);
+ resume_process(suspendee, ERTS_PROC_LOCK_STATUS);
- erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
}
if (!smon->active && !smon->pending)
erts_delete_suspend_monitor(&BIF_P->suspend_monitors, BIF_ARG_1);
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
BIF_RET(am_true);
@@ -8593,74 +9198,148 @@ resume_process_1(BIF_ALIST_1)
erts_delete_suspend_monitor(&BIF_P->suspend_monitors, BIF_ARG_1);
error:
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
BIF_ERROR(BIF_P, BADARG);
}
+BIF_RETTYPE
+erts_internal_is_process_executing_dirty_1(BIF_ALIST_1)
+{
+ if (is_not_internal_pid(BIF_ARG_1))
+ BIF_ERROR(BIF_P, BADARG);
+ else {
+ Process *rp = erts_proc_lookup(BIF_ARG_1);
+ if (rp) {
+ erts_aint32_t state = erts_atomic32_read_nob(&rp->state);
+ if (state & (ERTS_PSFLG_DIRTY_RUNNING
+ |ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ BIF_RET(am_true);
+ }
+ }
+ }
+ BIF_RET(am_false);
+}
+
+static ERTS_INLINE void
+run_queues_len_aux(ErtsRunQueue *rq, Uint *tot_len, Uint *qlen, int *ip, int incl_active_sched, int locked)
+{
+ Sint rq_len;
+
+ if (locked)
+ rq_len = (Sint) erts_atomic32_read_dirty(&rq->len);
+ else
+ rq_len = (Sint) erts_atomic32_read_nob(&rq->len);
+ ASSERT(rq_len >= 0);
+
+ if (incl_active_sched) {
+ if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) {
+ erts_aint32_t dcnt;
+ if (ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(rq)) {
+ dcnt = erts_atomic32_read_nob(&dirty_count.cpu.active);
+ ASSERT(0 <= dcnt && dcnt <= erts_no_dirty_cpu_schedulers);
+ }
+ else {
+ ASSERT(ERTS_RUNQ_IS_DIRTY_IO_RUNQ(rq));
+ dcnt = erts_atomic32_read_nob(&dirty_count.io.active);
+ ASSERT(0 <= dcnt && dcnt <= erts_no_dirty_io_schedulers);
+ }
+ rq_len += (Sint) dcnt;
+ }
+ else
+ {
+ if (ERTS_RUNQ_FLGS_GET_NOB(rq) & ERTS_RUNQ_FLG_EXEC)
+ rq_len++;
+ }
+ }
+ if (qlen)
+ qlen[(*ip)++] = rq_len;
+ *tot_len += (Uint) rq_len;
+}
+
Uint
-erts_run_queues_len(Uint *qlen)
+erts_run_queues_len(Uint *qlen, int atomic_queues_read, int incl_active_sched,
+ int incl_dirty_io)
{
- int i = 0;
+ int i = 0, j = 0;
Uint len = 0;
- ERTS_ATOMIC_FOREACH_RUNQ(rq,
- {
- Sint pqlen = 0;
- int pix;
- for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++)
- pqlen += RUNQ_READ_LEN(&rq->procs.prio_info[pix].len);
+ int no_rqs = erts_no_run_queues;
+
+ if (incl_dirty_io)
+ no_rqs += ERTS_NUM_DIRTY_RUNQS;
+ else
+ no_rqs += ERTS_NUM_DIRTY_CPU_RUNQS;
+
+ if (atomic_queues_read) {
+ ERTS_ATOMIC_FOREACH_RUNQ_X(rq, no_rqs,
+ run_queues_len_aux(rq, &len, qlen, &j,
+ incl_active_sched, 1),
+ /* Nothing... */);
+ }
+ else {
+ for (i = 0; i < no_rqs; i++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(i);
+ run_queues_len_aux(rq, &len, qlen, &j, incl_active_sched, 0);
+ }
- if (pqlen < 0)
- pqlen = 0;
- if (qlen)
- qlen[i++] = pqlen;
- len += pqlen;
}
- );
return len;
}
Eterm
-erts_process_status(Process *c_p, ErtsProcLocks c_p_locks,
- Process *rp, Eterm rpid)
+erts_process_state2status(erts_aint32_t state)
+{
+ if (state & ERTS_PSFLG_FREE)
+ return am_free;
+
+ if (state & ERTS_PSFLG_EXITING)
+ return am_exiting;
+
+ if (state & ERTS_PSFLG_GC)
+ return am_garbage_collecting;
+
+ if (state & ERTS_PSFLG_SUSPENDED)
+ return am_suspended;
+
+ if (state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS))
+ return am_running;
+
+ if (state & (ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_DIRTY_ACTIVE_SYS))
+ return am_runnable;
+
+ return am_waiting;
+}
+
+Eterm
+erts_process_status(Process *rp, Eterm rpid)
{
Eterm res = am_undefined;
Process *p = rp ? rp : erts_proc_lookup_raw(rpid);
if (p) {
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state);
- if (state & ERTS_PSFLG_FREE)
- res = am_free;
- else if (state & ERTS_PSFLG_EXITING)
- res = am_exiting;
- else if (state & ERTS_PSFLG_GC)
- res = am_garbage_collecting;
- else if (state & ERTS_PSFLG_SUSPENDED)
- res = am_suspended;
- else if (state & ERTS_PSFLG_RUNNING)
- res = am_running;
- else if (state & ERTS_PSFLG_ACTIVE)
- res = am_runnable;
- else
- res = am_waiting;
+ erts_aint32_t state = erts_atomic32_read_acqb(&p->state);
+ res = erts_process_state2status(state);
}
-#ifdef ERTS_SMP
else {
int i;
ErtsSchedulerData *esdp;
for (i = 0; i < erts_no_schedulers; i++) {
esdp = ERTS_SCHEDULER_IX(i);
- erts_smp_runq_lock(esdp->run_queue);
+ erts_runq_lock(esdp->run_queue);
if (esdp->free_process
&& esdp->free_process->common.id == rpid) {
res = am_free;
- erts_smp_runq_unlock(esdp->run_queue);
+ erts_runq_unlock(esdp->run_queue);
break;
}
- erts_smp_runq_unlock(esdp->run_queue);
+ erts_runq_unlock(esdp->run_queue);
}
}
-#endif
return res;
}
@@ -8676,9 +9355,9 @@ erts_suspend(Process* c_p, ErtsProcLocks c_p_locks, Port *busy_port)
int suspend;
ASSERT(c_p == erts_get_current_process());
- ERTS_SMP_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p));
+ ERTS_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p));
if (!(c_p_locks & ERTS_PROC_LOCK_STATUS))
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
if (busy_port)
suspend = erts_save_suspend_process_on_port(busy_port, c_p);
@@ -8694,7 +9373,7 @@ erts_suspend(Process* c_p, ErtsProcLocks c_p_locks, Port *busy_port)
}
if (!(c_p_locks & ERTS_PROC_LOCK_STATUS))
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
if (suspend && busy_port && erts_system_monitor_flags.busy_port)
monitor_generic(c_p, am_busy_port, busy_port->common.id);
@@ -8703,12 +9382,12 @@ erts_suspend(Process* c_p, ErtsProcLocks c_p_locks, Port *busy_port)
void
erts_resume(Process* process, ErtsProcLocks process_locks)
{
- ERTS_SMP_LC_ASSERT(process_locks == erts_proc_lc_my_proc_locks(process));
+ ERTS_LC_ASSERT(process_locks == erts_proc_lc_my_proc_locks(process));
if (!(process_locks & ERTS_PROC_LOCK_STATUS))
- erts_smp_proc_lock(process, ERTS_PROC_LOCK_STATUS);
- resume_process(process);
+ erts_proc_lock(process, ERTS_PROC_LOCK_STATUS);
+ resume_process(process, process_locks|ERTS_PROC_LOCK_STATUS);
if (!(process_locks & ERTS_PROC_LOCK_STATUS))
- erts_smp_proc_unlock(process, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(process, ERTS_PROC_LOCK_STATUS);
}
int
@@ -8725,10 +9404,10 @@ erts_resume_processes(ErtsProcList *list)
proc = erts_pid2proc(NULL, 0, plp->pid, ERTS_PROC_LOCK_STATUS);
if (proc) {
if (erts_proclist_same(plp, proc)) {
- resume_process(proc);
+ resume_process(proc, ERTS_PROC_LOCK_STATUS);
nresumed++;
}
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(proc, ERTS_PROC_LOCK_STATUS);
}
fplp = plp;
plp = plp->next;
@@ -8740,7 +9419,7 @@ erts_resume_processes(ErtsProcList *list)
Eterm
erts_get_process_priority(Process *p)
{
- erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
+ erts_aint32_t state = erts_atomic32_read_nob(&p->state);
switch (ERTS_PSFLGS_GET_USR_PRIO(state)) {
case PRIORITY_MAX: return am_max;
case PRIORITY_HIGH: return am_high;
@@ -8763,7 +9442,7 @@ erts_set_process_priority(Process *p, Eterm value)
default: return THE_NON_VALUE; break;
}
- a = erts_smp_atomic32_read_nob(&p->state);
+ a = erts_atomic32_read_nob(&p->state);
if (nprio == ERTS_PSFLGS_GET_USR_PRIO(a))
oprio = nprio;
else {
@@ -8771,7 +9450,7 @@ erts_set_process_priority(Process *p, Eterm value)
erts_aint32_t e, n, aprio;
if (a & ERTS_PSFLG_ACTIVE_SYS) {
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
slocked = 1;
}
@@ -8785,7 +9464,7 @@ erts_set_process_priority(Process *p, Eterm value)
int max_qbit;
if (!slocked) {
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
slocked = 1;
}
@@ -8826,8 +9505,12 @@ erts_set_process_priority(Process *p, Eterm value)
n |= ((nprio << ERTS_PSFLGS_USR_PRIO_OFFSET)
| (aprio << ERTS_PSFLGS_ACT_PRIO_OFFSET));
- a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
+ a = erts_atomic32_cmpxchg_mb(&p->state, n, e);
} while (a != e);
+
+ if (slocked)
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+
}
switch (oprio) {
@@ -8839,6 +9522,27 @@ erts_set_process_priority(Process *p, Eterm value)
}
}
+#ifdef __WIN32__
+Sint64
+erts_time2reds(ErtsMonotonicTime start, ErtsMonotonicTime end)
+{
+ return ERTS_TIME2REDS_IMPL__(start, end);
+}
+#endif
+
+static int
+scheduler_gc_proc(Process *c_p, int reds_left)
+{
+ int fcalls, reds;
+ if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
+ fcalls = reds_left;
+ else
+ fcalls = reds_left - CONTEXT_REDS;
+ reds = erts_garbage_collect_nobump(c_p, 0, c_p->arg_reg, c_p->arity, fcalls);
+ ASSERT(reds_left >= reds);
+ return reds;
+}
+
/*
* schedule() is called from BEAM (process_main()) or HiPE
* (hipe_mode_switch()) when the current process is to be
@@ -8857,19 +9561,19 @@ erts_set_process_priority(Process *p, Eterm value)
* so that normal processes get to run more frequently.
*/
-Process *schedule(Process *p, int calls)
+Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
{
Process *proxy_p = NULL;
ErtsRunQueue *rq;
- erts_aint_t dt;
- ErtsSchedulerData *esdp;
int context_reds;
int fcalls;
- int input_reductions;
int actual_reds;
int reds;
Uint32 flags;
erts_aint32_t state = 0; /* Supress warning... */
+ int is_normal_sched;
+
+ ERTS_MSACC_DECLARE_CACHE();
#ifdef USE_VM_PROBES
if (p != NULL && DTRACE_ENABLED(process_unscheduled)) {
@@ -8882,145 +9586,166 @@ Process *schedule(Process *p, int calls)
if (ERTS_USE_MODIFIED_TIMING()) {
context_reds = ERTS_MODIFIED_TIMING_CONTEXT_REDS;
- input_reductions = ERTS_MODIFIED_TIMING_INPUT_REDS;
}
else {
context_reds = CONTEXT_REDS;
- input_reductions = INPUT_REDUCTIONS;
}
- ERTS_SMP_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data())
+ ERTS_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data())
|| !erts_thr_progress_is_blocking());
/*
* Clean up after the process being scheduled out.
*/
if (!p) { /* NULL in the very first schedule() call */
- esdp = erts_get_scheduler_data();
+ is_normal_sched = !esdp;
+ if (is_normal_sched) {
+ esdp = erts_get_scheduler_data();
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
+ }
+ else {
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp));
+ }
rq = erts_get_runq_current(esdp);
ASSERT(esdp);
- fcalls = (int) erts_smp_atomic32_read_acqb(&function_calls);
actual_reds = reds = 0;
- erts_smp_runq_lock(rq);
+ erts_runq_lock(rq);
} else {
- sched_out_proc:
-
-#ifdef ERTS_SMP
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
- esdp = p->scheduler_data;
+ is_normal_sched = !esdp;
+ if (is_normal_sched) {
+ esdp = p->scheduler_data;
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
+ }
+ else {
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp));
+ }
ASSERT(esdp->current_process == p
|| esdp->free_process == p);
-#else
- esdp = erts_scheduler_data;
- ASSERT(esdp->current_process == p);
-#endif
+
+ sched_out_proc:
+
+ ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+
reds = actual_reds = calls - esdp->virtual_reds;
+
+ ASSERT(actual_reds >= 0);
if (reds < ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST)
reds = ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST;
esdp->virtual_reds = 0;
- fcalls = (int) erts_smp_atomic32_add_read_acqb(&function_calls, reds);
ASSERT(esdp && esdp == erts_get_scheduler_data());
rq = erts_get_runq_current(esdp);
p->reds += actual_reds;
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
-
- state = erts_smp_atomic32_read_acqb(&p->state);
+ state = erts_atomic32_read_nob(&p->state);
if (IS_TRACED(p)) {
if (IS_TRACED_FL(p, F_TRACE_CALLS) && !(state & ERTS_PSFLG_FREE))
erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_OUT);
- if (state & (ERTS_PSFLG_FREE|ERTS_PSFLG_EXITING)) {
+ if ((state & (ERTS_PSFLG_FREE|ERTS_PSFLG_EXITING)) == ERTS_PSFLG_EXITING) {
if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
- trace_sched(p, ((state & ERTS_PSFLG_FREE)
- ? am_out_exited
- : am_out_exiting));
+ trace_sched(p, ERTS_PROC_LOCK_MAIN,
+ ((state & ERTS_PSFLG_FREE)
+ ? am_out_exited
+ : am_out_exiting));
}
else {
- if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED))
- trace_sched(p, am_out);
- else if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS))
- trace_virtual_sched(p, am_out);
+ if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED) ||
+ ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS))
+ trace_sched(p, ERTS_PROC_LOCK_MAIN, am_out);
}
}
-#ifdef ERTS_SMP
- if (state & ERTS_PSFLG_PENDING_EXIT)
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+
+ if (p->trace_msg_q) {
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ erts_schedule_flush_trace_messages(p, 1);
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ }
+
+ /* have to re-read state after taking lock */
+ state = erts_atomic32_read_nob(&p->state);
+
+ if (is_normal_sched && (state & ERTS_PSFLG_PENDING_EXIT))
erts_handle_pending_exit(p, (ERTS_PROC_LOCK_MAIN
+ | ERTS_PROC_LOCK_TRACE
| ERTS_PROC_LOCK_STATUS));
if (p->pending_suspenders)
handle_pending_suspend(p, (ERTS_PROC_LOCK_MAIN
+ | ERTS_PROC_LOCK_TRACE
| ERTS_PROC_LOCK_STATUS));
-#endif
esdp->reductions += reds;
- schedule_out_process(rq, state, p, proxy_p); /* Returns with rq locked! */
- proxy_p = NULL;
-
- ERTS_PROC_REDUCTIONS_EXECUTED(rq,
- (int) ERTS_PSFLGS_GET_USR_PRIO(state),
- reds,
- actual_reds);
-
- esdp->current_process = NULL;
-#ifdef ERTS_SMP
- p->scheduler_data = NULL;
-#endif
-
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
+ {
+ int dec_refc;
+
+ /* schedule_out_process() returns with rq locked! */
+ dec_refc = schedule_out_process(rq, state, p,
+ proxy_p, is_normal_sched);
+ proxy_p = NULL;
+
+ ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq,
+ (int) ERTS_PSFLGS_GET_USR_PRIO(state),
+ reds,
+ actual_reds);
+
+ esdp->current_process = NULL;
+ if (is_normal_sched)
+ p->scheduler_data = NULL;
+
+ erts_proc_unlock(p, (ERTS_PROC_LOCK_MAIN
+ | ERTS_PROC_LOCK_STATUS
+ | ERTS_PROC_LOCK_TRACE));
+
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_OTHER);
+
+ if (state & ERTS_PSFLG_FREE) {
+ if (!is_normal_sched) {
+ ASSERT(p->flags & F_DELAYED_DEL_PROC);
+ }
+ else {
+ ASSERT(esdp->free_process == p);
+ esdp->free_process = NULL;
+ }
+ }
- if (state & ERTS_PSFLG_FREE) {
-#ifdef ERTS_SMP
- ASSERT(esdp->free_process == p);
- esdp->free_process = NULL;
-#else
- state = erts_smp_atomic32_read_nob(&p->state);
- if (!(state & ERTS_PSFLG_IN_RUNQ))
- erts_free_proc(p);
-#endif
- }
+ if (dec_refc)
+ erts_proc_dec_refc(p);
+ }
-#ifdef ERTS_SMP
ASSERT(!esdp->free_process);
-#endif
ASSERT(!esdp->current_process);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
-
- dt = erts_do_time_read_and_reset();
- if (dt) {
- erts_smp_runq_unlock(rq);
- erts_bump_timer(dt);
- erts_smp_runq_lock(rq);
- }
- BM_STOP_TIMER(system);
+ ERTS_CHK_NO_PROC_LOCKS;
}
- ERTS_SMP_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp)
- || !erts_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(!is_normal_sched || !erts_thr_progress_is_blocking());
check_activities_to_run: {
-#ifdef ERTS_SMP
+ erts_aint32_t psflg_running, psflg_running_sys;
ErtsMigrationPaths *mps;
ErtsMigrationPath *mp;
- ErtsProcList *pnd_xtrs = rq->procs.pending_exiters;
- if (erts_proclist_fetch(&pnd_xtrs, NULL)) {
- rq->procs.pending_exiters = NULL;
- erts_smp_runq_unlock(rq);
- handle_pending_exiters(pnd_xtrs);
- erts_smp_runq_lock(rq);
- }
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (is_normal_sched) {
+
+ if (esdp->check_time_reds >= ERTS_CHECK_TIME_REDS)
+ (void) erts_get_monotonic_time(esdp);
+
+ if (esdp->last_monotonic_time >= erts_next_timeout_time(esdp->next_tmo_ref)) {
+ erts_runq_unlock(rq);
+ erts_bump_timers(esdp->timer_wheel, esdp->last_monotonic_time);
+ erts_runq_lock(rq);
+ }
+
if (rq->check_balance_reds <= 0)
check_balance(rq);
- ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(!erts_thr_progress_is_blocking());
mps = erts_get_migration_paths_managed();
mp = &mps->mpath[rq->ix];
@@ -9029,79 +9754,66 @@ Process *schedule(Process *p, int calls)
immigrate(rq, mp);
}
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
continue_check_activities_to_run:
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
continue_check_activities_to_run_known_flags:
- ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp)
- || flags & ERTS_RUNQ_FLG_NONEMPTY);
+ ASSERT(!is_normal_sched || (flags & ERTS_RUNQ_FLG_NONEMPTY));
- if (flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND|ERTS_RUNQ_FLG_SUSPENDED)) {
- if (flags & ERTS_RUNQ_FLG_SUSPENDED) {
+ if (!is_normal_sched) {
+ if (erts_atomic32_read_acqb(&esdp->ssi->flags)
+ & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC)) {
suspend_scheduler(esdp);
- flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
- }
- if (flags & ERTS_RUNQ_FLG_CHK_CPU_BIND) {
- flags = ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_CHK_CPU_BIND);
- flags &= ~ ERTS_RUNQ_FLG_CHK_CPU_BIND;
- erts_sched_check_cpu_bind(esdp);
}
}
-#ifdef ERTS_DIRTY_SCHEDULERS
- else if (ERTS_SCHEDULER_IS_DIRTY(esdp)
- && (erts_smp_atomic32_read_acqb(&esdp->ssi->flags)
- & ERTS_SSI_FLG_SUSPENDED))
- suspend_scheduler(esdp);
-#endif
-
- {
+ else {
erts_aint32_t aux_work;
- int leader_update = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0
- : erts_thr_progress_update(esdp);
+ int leader_update;
+
+ ASSERT(is_normal_sched);
+
+ if (flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND
+ | ERTS_RUNQ_FLG_SUSPENDED
+ | ERTS_RUNQ_FLG_MSB_EXEC)) {
+ if (flags & (ERTS_RUNQ_FLG_SUSPENDED|ERTS_RUNQ_FLG_MSB_EXEC)) {
+ (void) ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_EXEC);
+ suspend_scheduler(esdp);
+ flags = ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_EXEC);
+ flags |= ERTS_RUNQ_FLG_EXEC;
+ }
+ if (flags & ERTS_RUNQ_FLG_CHK_CPU_BIND) {
+ flags = ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_CHK_CPU_BIND);
+ flags &= ~ERTS_RUNQ_FLG_CHK_CPU_BIND;
+ erts_sched_check_cpu_bind(esdp);
+ }
+ }
+
+ leader_update = erts_thr_progress_update(esdp);
aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work);
- if (aux_work | leader_update | ERTS_SCHED_FAIR) {
- erts_smp_runq_unlock(rq);
+ if (aux_work | leader_update) {
+ erts_runq_unlock(rq);
if (leader_update)
erts_thr_progress_leader_update(esdp);
- else if (ERTS_SCHED_FAIR)
- ERTS_SCHED_FAIR_YIELD();
if (aux_work)
handle_aux_work(&esdp->aux_work_data, aux_work, 0);
- erts_smp_runq_lock(rq);
+ erts_runq_lock(rq);
}
- ERTS_SMP_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp)
- || !erts_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(!erts_thr_progress_is_blocking());
}
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
-#else /* ERTS_SMP */
- {
- erts_aint32_t aux_work;
- aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work);
- if (aux_work)
- handle_aux_work(&esdp->aux_work_data, aux_work, 0);
- }
-#endif /* ERTS_SMP */
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && rq->halt_in_progress) {
- /*
- * TODO: if halt in progress, need to put the dirty scheduler
- * to sleep somewhere around here to prevent it from picking up
- * new work
- */
+ if (!is_normal_sched & !!(flags & ERTS_RUNQ_FLG_HALTING)) {
+ /* Wait for emulator to terminate... */
+ while (1)
+ erts_milli_sleep(1000*1000);
}
- else
-#endif
-
- if ((!(flags & ERTS_RUNQ_FLGS_QMASK) && !rq->misc.start)
- || (rq->halt_in_progress && ERTS_EMPTY_RUNQ_PORTS(rq))) {
+ else if (!runq_got_work_to_execute_flags(flags)) {
/* Prepare for scheduler wait */
-#ifdef ERTS_SMP
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
rq->wakeup_other = 0;
rq->wakeup_other_reds = 0;
@@ -9112,94 +9824,69 @@ Process *schedule(Process *p, int calls)
if (flags & ERTS_RUNQ_FLG_INACTIVE)
empty_runq(rq);
else {
- if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && try_steal_task(rq))
- goto continue_check_activities_to_run;
-
- empty_runq(rq);
-
- /*
- * Check for ERTS_RUNQ_FLG_SUSPENDED has to be done
- * after trying to steal a task.
- */
- flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
- if (flags & ERTS_RUNQ_FLG_SUSPENDED) {
- non_empty_runq(rq);
- flags |= ERTS_RUNQ_FLG_NONEMPTY;
- goto continue_check_activities_to_run_known_flags;
+ ASSERT(!runq_got_work_to_execute(rq));
+ if (!is_normal_sched) {
+ /* Dirty scheduler */
+ if (erts_atomic32_read_acqb(&esdp->ssi->flags)
+ & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC)) {
+ /* Go suspend... */
+ goto continue_check_activities_to_run_known_flags;
+ }
+ }
+ else {
+ /* Normal scheduler */
+ if (try_steal_task(rq))
+ goto continue_check_activities_to_run;
+ /*
+ * Check for suspend has to be done after trying
+ * to steal a task...
+ */
+ flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
+ if ((flags & ERTS_RUNQ_FLG_SUSPENDED)
+ /* If multi scheduling block and we have
+ * dirty work, suspend and let dirty
+ * scheduler handle work... */
+ || ((((flags & (ERTS_RUNQ_FLG_HALTING
+ | ERTS_RUNQ_FLG_MSB_EXEC))
+ == ERTS_RUNQ_FLG_MSB_EXEC))
+ && have_dirty_work())
+ ) {
+ non_empty_runq(rq);
+ flags |= ERTS_RUNQ_FLG_NONEMPTY;
+ /*
+ * Go suspend...
+ */
+ goto continue_check_activities_to_run_known_flags;
+ }
}
+ empty_runq(rq);
}
-#endif
+ (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_EXEC);
scheduler_wait(&fcalls, esdp, rq);
-
-#ifdef ERTS_SMP
+ flags = ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_EXEC);
+ flags |= ERTS_RUNQ_FLG_EXEC;
+ ERTS_MSACC_UPDATE_CACHE();
non_empty_runq(rq);
-#endif
goto check_activities_to_run;
}
- else if (!ERTS_SCHEDULER_IS_DIRTY(esdp) &&
- (fcalls > input_reductions &&
- prepare_for_sys_schedule(esdp))) {
- /*
- * Schedule system-level activities.
- */
- erts_smp_atomic32_set_relb(&function_calls, 0);
- fcalls = 0;
-
- ASSERT(!erts_port_task_have_outstanding_io_tasks());
-
-#if 0 /* Not needed since we wont wait in sys schedule */
- erts_sys_schedule_interrupt(0);
-#endif
- erts_smp_runq_unlock(rq);
- erl_sys_schedule(1);
- dt = erts_do_time_read_and_reset();
- if (dt) erts_bump_timer(dt);
-
-#ifdef ERTS_SMP
- erts_smp_runq_lock(rq);
- clear_sys_scheduling();
- goto continue_check_activities_to_run;
-#else
- goto check_activities_to_run;
-#endif
- }
-
- if (rq->misc.start)
+ if (flags & ERTS_RUNQ_FLG_MISC_OP)
exec_misc_ops(rq);
-#ifdef ERTS_SMP
wakeup_other.check(rq, flags);
-#endif
/*
* Find a new port to run.
*/
- if (RUNQ_READ_LEN(&rq->ports.info.len)) {
- int have_outstanding_io;
- have_outstanding_io = erts_port_task_execute(rq, &esdp->current_port);
- if ((have_outstanding_io && fcalls > 2*input_reductions)
- || rq->halt_in_progress) {
- /*
- * If we have performed more than 2*INPUT_REDUCTIONS since
- * last call to erl_sys_schedule() and we still haven't
- * handled all I/O tasks we stop running processes and
- * focus completely on ports.
- *
- * One could argue that this is a strange behavior. The
- * reason for doing it this way is that it is similar
- * to the behavior before port tasks were introduced.
- * We don't want to change the behavior too much, at
- * least not at the time of writing. This behavior
- * might change in the future.
- *
- * /rickard
- */
- goto check_activities_to_run;
- }
+ flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
+
+ if (flags & PORT_BIT) {
+ erts_port_task_execute(rq, &esdp->current_port);
+ if (flags & ERTS_RUNQ_FLG_HALTING)
+ goto check_activities_to_run;
}
/*
@@ -9208,11 +9895,12 @@ Process *schedule(Process *p, int calls)
pick_next_process: {
erts_aint32_t psflg_band_mask;
int prio_q;
- int qmask;
+ int qmask, qbit;
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
qmask = (int) (flags & ERTS_RUNQ_FLGS_PROCS_QMASK);
- switch (qmask & -qmask) {
+ qbit = qmask & -qmask;
+ switch (qbit) {
case MAX_BIT:
prio_q = PRIORITY_MAX;
break;
@@ -9228,11 +9916,10 @@ Process *schedule(Process *p, int calls)
case 0: /* No process at all */
default:
ASSERT(qmask == 0);
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_OTHER);
goto check_activities_to_run;
}
- BM_START_TIMER(system);
-
/*
* Take the chosen process out of the queue.
*/
@@ -9240,70 +9927,95 @@ Process *schedule(Process *p, int calls)
ASSERT(p); /* Wrong qmask in rq->flags? */
- psflg_band_mask = ~(((erts_aint32_t) 1) << (ERTS_PSFLGS_GET_PRQ_PRIO(state)
- + ERTS_PSFLGS_IN_PRQ_MASK_OFFSET));
-
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT((state & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)) !=
- (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC));
- if (state & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)) {
- ASSERT((ERTS_SCHEDULER_IS_DIRTY_CPU(esdp) && (state & ERTS_PSFLG_DIRTY_CPU_PROC)) ||
- (ERTS_SCHEDULER_IS_DIRTY_IO(esdp) && (state & ERTS_PSFLG_DIRTY_IO_PROC)));
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !(state & ERTS_PSFLG_ACTIVE_SYS))
- goto pick_next_process;
- state &= ~(ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q);
+ if (is_normal_sched) {
+ psflg_running = ERTS_PSFLG_RUNNING;
+ psflg_running_sys = ERTS_PSFLG_RUNNING_SYS;
+ psflg_band_mask = ~(((erts_aint32_t) 1) << (ERTS_PSFLGS_GET_PRQ_PRIO(state)
+ + ERTS_PSFLGS_IN_PRQ_MASK_OFFSET));
+ }
+ else {
+ psflg_running = ERTS_PSFLG_DIRTY_RUNNING;
+ psflg_running_sys = ERTS_PSFLG_DIRTY_RUNNING_SYS;
+ psflg_band_mask = ~((erts_aint32_t) 0);
}
-#endif
if (!(state & ERTS_PSFLG_PROXY))
psflg_band_mask &= ~ERTS_PSFLG_IN_RUNQ;
else {
+ Eterm pid = p->common.id;
proxy_p = p;
- p = erts_proc_lookup_raw(proxy_p->common.id);
+ p = (is_normal_sched
+ ? erts_proc_lookup_raw(pid)
+ : erts_pid2proc_opt(NULL, 0, pid, 0,
+ ERTS_P2P_FLG_INC_REFC));
if (!p) {
free_proxy_proc(proxy_p);
proxy_p = NULL;
goto pick_next_process;
}
- state = erts_smp_atomic32_read_nob(&p->state);
+ state = erts_atomic32_read_nob(&p->state);
}
+ if (!is_normal_sched)
+ clear_proc_dirty_queue_bit(p, rq, qbit);
+
while (1) {
- erts_aint32_t exp, new, tmp;
- tmp = new = exp = state;
+ erts_aint32_t exp, new;
+ int run_process;
+ new = exp = state;
new &= psflg_band_mask;
- if (!(state & (ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS))) {
- tmp = state & (ERTS_PSFLG_SUSPENDED
- | ERTS_PSFLG_PENDING_EXIT
- | ERTS_PSFLG_ACTIVE_SYS);
- if (tmp != ERTS_PSFLG_SUSPENDED) {
- if (state & ERTS_PSFLG_ACTIVE_SYS)
- new |= ERTS_PSFLG_RUNNING_SYS;
- else
- new |= ERTS_PSFLG_RUNNING;
- }
+ /*
+ * Run process if not already running (or free)
+ * or exiting and not running on a normal
+ * scheduler, and not suspended (and not in a
+ * state where suspend should be ignored).
+ */
+ run_process = (((!(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS
+ | ERTS_PSFLG_FREE)))
+ | (((state & (ERTS_PSFLG_RUNNING
+
+ | ERTS_PSFLG_FREE
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS
+ | ERTS_PSFLG_EXITING))
+ == ERTS_PSFLG_EXITING)
+ & (!!is_normal_sched))
+ )
+ & ((state & (ERTS_PSFLG_SUSPENDED
+ | ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_FREE
+ | ERTS_PSFLG_PENDING_EXIT
+ | ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_DIRTY_ACTIVE_SYS))
+ != ERTS_PSFLG_SUSPENDED)
+ & (!(state & (ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_PENDING_EXIT))
+ | (!!is_normal_sched))
+ );
+
+ if (run_process) {
+ if (state & (ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_DIRTY_ACTIVE_SYS))
+ new |= psflg_running_sys;
+ else
+ new |= psflg_running;
}
- state = erts_smp_atomic32_cmpxchg_relb(&p->state, new, exp);
+ state = erts_atomic32_cmpxchg_relb(&p->state, new, exp);
if (state == exp) {
- if ((state & (ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS
- | ERTS_PSFLG_FREE))
- || ((state & (ERTS_PSFLG_SUSPENDED
- | ERTS_PSFLG_PENDING_EXIT
- | ERTS_PSFLG_ACTIVE_SYS))
- == ERTS_PSFLG_SUSPENDED)) {
- if (state & ERTS_PSFLG_FREE) {
-#ifdef ERTS_SMP
- erts_smp_proc_dec_refc(p);
-#else
- erts_free_proc(p);
-#endif
- }
+ if (!run_process) {
if (proxy_p) {
free_proxy_proc(proxy_p);
proxy_p = NULL;
}
+ else if (state & ERTS_PSFLG_FREE) {
+ /* free and not queued by proxy */
+ erts_proc_dec_refc(p);
+ }
+ if (!is_normal_sched)
+ erts_proc_dec_refc(p);
goto pick_next_process;
}
state = new;
@@ -9315,145 +10027,219 @@ Process *schedule(Process *p, int calls)
esdp->current_process = p;
+ calls = 0;
+ reds = context_reds;
+
+ erts_runq_unlock(rq);
+
}
-#ifdef ERTS_SMP
- erts_smp_runq_unlock(rq);
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_EMULATOR);
+
if (flags & ERTS_RUNQ_FLG_PROTECTED)
(void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
+
+ state = erts_atomic32_read_nob(&p->state);
if (erts_sched_stat.enabled) {
int prio;
- UWord old = ERTS_PROC_SCHED_ID(p,
- (ERTS_PROC_LOCK_MAIN
- | ERTS_PROC_LOCK_STATUS),
- (UWord) esdp->no);
+ UWord old = ERTS_PROC_SCHED_ID(p, (UWord) esdp->no);
int migrated = old && old != esdp->no;
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
-#endif
+ ASSERT(is_normal_sched);
prio = (int) ERTS_PSFLGS_GET_USR_PRIO(state);
- erts_smp_spin_lock(&erts_sched_stat.lock);
+ erts_spin_lock(&erts_sched_stat.lock);
erts_sched_stat.prio[prio].total_executed++;
erts_sched_stat.prio[prio].executed++;
if (migrated) {
erts_sched_stat.prio[prio].total_migrated++;
erts_sched_stat.prio[prio].migrated++;
}
- erts_smp_spin_unlock(&erts_sched_stat.lock);
+ erts_spin_unlock(&erts_sched_stat.lock);
}
- if (ERTS_PROC_PENDING_EXIT(p)) {
+ state = erts_atomic32_read_nob(&p->state);
+
+ if (is_normal_sched) {
+ if ((!!(state & ERTS_PSFLGS_DIRTY_WORK))
+ & (!(state & ERTS_PSFLG_ACTIVE_SYS))) {
+ /* Migrate to dirty scheduler... */
+ sunlock_sched_out_proc:
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ goto sched_out_proc;
+ }
+ ASSERT(!p->scheduler_data);
+ p->scheduler_data = esdp;
+ }
+ else {
+ if (state & (ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_PENDING_EXIT
+ | ERTS_PSFLG_EXITING)) {
+ /*
+ * IMPORTANT! We need to take care of
+ * scheduled check-process-code requests
+ * before continuing with dirty execution!
+ */
+ /* Migrate to normal scheduler... */
+ goto sunlock_sched_out_proc;
+ }
+ if ((state & ERTS_PSFLG_DIRTY_ACTIVE_SYS)
+ && rq == ERTS_DIRTY_IO_RUNQ) {
+ /* Migrate to dirty cpu scheduler... */
+ goto sunlock_sched_out_proc;
+ }
+
+ ASSERT(rq == ERTS_DIRTY_CPU_RUNQ
+ ? (state & (ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_ACTIVE_SYS))
+ : (rq == ERTS_DIRTY_IO_RUNQ
+ && (state & ERTS_PSFLG_DIRTY_IO_PROC)));
+ }
+
+ if (state & ERTS_PSFLG_PENDING_EXIT) {
erts_handle_pending_exit(p,
ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
- state = erts_smp_atomic32_read_nob(&p->state);
+ state = erts_atomic32_read_nob(&p->state);
}
- ASSERT(!p->scheduler_data);
- p->scheduler_data = esdp;
-#endif
- reds = context_reds;
- if (IS_TRACED(p)) {
+
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+
+ /* Clear tracer if it has been removed */
+ if (IS_TRACED(p) && erts_is_tracer_proc_enabled(
+ p, ERTS_PROC_LOCK_MAIN, &p->common)) {
+
if (state & ERTS_PSFLG_EXITING) {
if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
- trace_sched(p, am_in_exiting);
+ trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in_exiting);
}
else {
- if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED))
- trace_sched(p, am_in);
- else if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS))
- trace_virtual_sched(p, am_in);
+ if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED) ||
+ ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS))
+ trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in);
}
if (IS_TRACED_FL(p, F_TRACE_CALLS)) {
erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_IN);
}
}
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
-
-#ifdef ERTS_SMP
- if (is_not_nil(ERTS_TRACER_PROC(p)))
- erts_check_my_tracer_proc(p);
-#endif
-
- if (state & ERTS_PSFLG_RUNNING_SYS) {
- reds -= execute_sys_tasks(p, &state, reds);
- if (reds <= 0
-#ifdef ERTS_DIRTY_SCHEDULERS
- || (state & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC))
-#endif
- ) {
- p->fcalls = reds;
- goto sched_out_proc;
- }
-
- ASSERT(state & ERTS_PSFLG_RUNNING_SYS);
- ASSERT(!(state & ERTS_PSFLG_RUNNING));
-
- while (1) {
- erts_aint32_t n, e;
-
- if (((state & (ERTS_PSFLG_SUSPENDED
- | ERTS_PSFLG_ACTIVE)) != ERTS_PSFLG_ACTIVE)
- && !(state & ERTS_PSFLG_EXITING))
- goto sched_out_proc;
-
- n = e = state;
- n &= ~ERTS_PSFLG_RUNNING_SYS;
- n |= ERTS_PSFLG_RUNNING;
-
- state = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
- if (state == e) {
- state = n;
- break;
- }
-
- ASSERT(state & ERTS_PSFLG_RUNNING_SYS);
- ASSERT(!(state & ERTS_PSFLG_RUNNING));
- }
- }
+ if (is_normal_sched) {
+
+ if (state & ERTS_PSFLG_RUNNING_SYS) {
+ /*
+ * GC is normally never delayed when a process
+ * is scheduled out, but might be when executing
+ * hand written beam assembly in
+ * prim_eval:'receive'. If GC is delayed we are
+ * not allowed to execute system tasks.
+ */
+ if (!(p->flags & F_DELAY_GC)) {
+ int cost = execute_sys_tasks(p, &state, reds);
+ calls += cost;
+ reds -= cost;
+ if (reds <= 0)
+ goto sched_out_proc;
+ if (state & ERTS_PSFLGS_DIRTY_WORK)
+ goto sched_out_proc;
+ }
+
+ ASSERT(state & psflg_running_sys);
+ ASSERT(!(state & psflg_running));
+
+ while (1) {
+ erts_aint32_t n, e;
+
+ if (((state & (ERTS_PSFLG_SUSPENDED
+ | ERTS_PSFLG_ACTIVE)) != ERTS_PSFLG_ACTIVE)
+ && !(state & ERTS_PSFLG_EXITING)) {
+ goto sched_out_proc;
+ }
+
+ n = e = state;
+ n &= ~psflg_running_sys;
+ n |= psflg_running;
+
+ state = erts_atomic32_cmpxchg_mb(&p->state, n, e);
+ if (state == e) {
+ state = n;
+ break;
+ }
+
+ ASSERT(state & psflg_running_sys);
+ ASSERT(!(state & psflg_running));
+ }
+ }
- if (!(state & ERTS_PSFLG_EXITING)
- && ((FLAGS(p) & F_FORCE_GC)
- || (MSO(p).overhead > BIN_VHEAP_SZ(p)))) {
- reds -= erts_garbage_collect(p, 0, p->arg_reg, p->arity);
- if (reds <= 0) {
- p->fcalls = reds;
- goto sched_out_proc;
- }
- }
+ if (ERTS_IS_GC_DESIRED(p)) {
+ if (!(state & ERTS_PSFLG_EXITING)
+ && !(p->flags & (F_DELAY_GC|F_DISABLE_GC))) {
+ int cost = scheduler_gc_proc(p, reds);
+ calls += cost;
+ reds -= cost;
+ if (reds <= 0)
+ goto sched_out_proc;
+ if (p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC))
+ goto sched_out_proc;
+ }
+ }
+ }
if (proxy_p) {
free_proxy_proc(proxy_p);
proxy_p = NULL;
}
-
+
p->fcalls = reds;
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+
+ ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
/* Never run a suspended process */
- ASSERT(!(ERTS_PSFLG_SUSPENDED & erts_smp_atomic32_read_nob(&p->state)));
+#ifdef DEBUG
+ {
+ erts_aint32_t dstate = erts_atomic32_read_nob(&p->state);
+ ASSERT(!(ERTS_PSFLG_SUSPENDED & dstate)
+ || (ERTS_PSFLG_DIRTY_RUNNING_SYS & dstate));
+ }
+#endif
+
+ ASSERT(erts_proc_read_refc(p) > 0);
+
+ if (!(state & ERTS_PSFLG_EXITING) && ERTS_PTMR_IS_TIMED_OUT(p)) {
+ BeamInstr** pi;
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+ pi = (BeamInstr **) p->def_arg_reg;
+ p->i = *pi;
+ p->flags &= ~F_INSLPQUEUE;
+ p->flags |= F_TIMO;
+ ERTS_PTMR_CLEAR(p);
+ }
return p;
}
}
static int
-notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, Eterm st_result)
+notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st,
+ Eterm st_result, int normal_sched)
{
- Process *rp = erts_proc_lookup(st->requester);
+ Process *rp;
+ if (!normal_sched)
+ rp = erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
+ st->requester, 0,
+ ERTS_P2P_FLG_INC_REFC);
+ else
+ rp = erts_proc_lookup(st->requester);
if (rp) {
ErtsProcLocks rp_locks;
ErlOffHeap *ohp;
- ErlHeapFragment* bp;
+ ErtsMessage *mp;
Eterm *hp, msg, req_id, result;
Uint st_result_sz, hsz;
#ifdef DEBUG
@@ -9465,11 +10251,7 @@ notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, Eterm st_result)
st_result_sz = is_immed(st_result) ? 0 : size_object(st_result);
hsz = st->req_id_sz + st_result_sz + 4 /* 3-tuple */;
- hp = erts_alloc_message_heap(hsz,
- &bp,
- &ohp,
- rp,
- &rp_locks);
+ mp = erts_alloc_message_heap(rp, &rp_locks, hsz, &hp, &ohp);
#ifdef DEBUG
hp_start = hp;
@@ -9494,21 +10276,16 @@ notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, Eterm st_result)
ASSERT(hp_start + hsz == hp);
#endif
- erts_queue_message(rp,
- &rp_locks,
- bp,
- msg,
- NIL
-#ifdef USE_VM_PROBES
- , NIL
-#endif
- );
+ erts_queue_message(rp, rp_locks, mp, msg, c_p->common.id);
if (c_p == rp)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
+
+ if (!normal_sched)
+ erts_proc_dec_refc(rp);
}
erts_cleanup_offheap(&st->off_heap);
@@ -9527,7 +10304,7 @@ fetch_sys_task(Process *c_p, erts_aint32_t state, int *qmaskp, int *priop)
*priop = -1; /* Shut up annoying erroneous warning */
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
if (!c_p->sys_task_qs) {
qmask = 0;
@@ -9647,13 +10424,13 @@ fetch_sys_task(Process *c_p, erts_aint32_t state, int *qmaskp, int *priop)
if (a == n)
break;
- a = erts_smp_atomic32_cmpxchg_nob(&c_p->state, n, e);
+ a = erts_atomic32_cmpxchg_nob(&c_p->state, n, e);
} while (a != e);
}
done:
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
if (unused_qs)
proc_sys_task_queues_free(unused_qs);
@@ -9664,28 +10441,28 @@ done:
}
static void save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio);
+static void save_dirty_task(Process *c_p, ErtsProcSysTask *st);
static int
execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
{
- int garbage_collected = 0;
+ int minor_gc = 0, major_gc = 0;
erts_aint32_t state = *statep;
- int max_reds = in_reds;
- int reds = 0;
+ int reds = in_reds;
int qmask = 0;
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p)));
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
do {
+ ErtsProcSysTaskType type;
ErtsProcSysTask *st;
int st_prio;
Eterm st_res;
if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) {
-#ifdef ERTS_SMP
if (state & ERTS_PSFLG_PENDING_EXIT)
erts_handle_pending_exit(c_p, ERTS_PROC_LOCK_MAIN);
-#endif
ASSERT(ERTS_PROC_IS_EXITING(c_p));
break;
}
@@ -9694,35 +10471,95 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
if (!st)
break;
- switch (st->type) {
- case ERTS_PSTT_GC:
+ type = st->type;
+
+ switch (type) {
+ case ERTS_PSTT_GC_MAJOR:
+ case ERTS_PSTT_GC_MINOR:
if (c_p->flags & F_DISABLE_GC) {
save_gc_task(c_p, st, st_prio);
st = NULL;
- reds++;
+ reds--;
}
else {
- if (!garbage_collected) {
- FLAGS(c_p) |= F_NEED_FULLSWEEP;
- reds += erts_garbage_collect(c_p,
- 0,
- c_p->arg_reg,
- c_p->arity);
- garbage_collected = 1;
+ if ((!minor_gc
+ || (!major_gc && type == ERTS_PSTT_GC_MAJOR))
+ && !(c_p->flags & F_HIBERNATED)) {
+ if (type == ERTS_PSTT_GC_MAJOR) {
+ FLAGS(c_p) |= F_NEED_FULLSWEEP;
+ }
+ reds -= scheduler_gc_proc(c_p, reds);
+ if (c_p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) {
+ save_dirty_task(c_p, st);
+ st = NULL;
+ break;
+ }
+ if (type == ERTS_PSTT_GC_MAJOR)
+ minor_gc = major_gc = 1;
+ else
+ minor_gc = 1;
}
st_res = am_true;
}
break;
- case ERTS_PSTT_CPC:
+ case ERTS_PSTT_CPC: {
+ int fcalls;
+ int cpc_reds = 0;
+ if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
+ fcalls = reds;
+ else
+ fcalls = reds - CONTEXT_REDS;
st_res = erts_check_process_code(c_p,
st->arg[0],
- st->arg[1] == am_true,
- &reds);
+ &cpc_reds,
+ fcalls);
+ reds -= cpc_reds;
+ if (is_non_value(st_res)) {
+ /* Needed gc, but gc was disabled */
+ save_gc_task(c_p, st, st_prio);
+ st = NULL;
+ }
+ break;
+ }
+ case ERTS_PSTT_CLA: {
+ int fcalls;
+ int cla_reds = 0;
+ int do_gc;
+
+ if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
+ fcalls = reds;
+ else
+ fcalls = reds - CONTEXT_REDS;
+ do_gc = st->arg[0] == am_true;
+ st_res = erts_proc_copy_literal_area(c_p, &cla_reds,
+ fcalls, do_gc);
+ reds -= cla_reds;
if (is_non_value(st_res)) {
+ if (c_p->flags & F_DIRTY_CLA) {
+ save_dirty_task(c_p, st);
+ st = NULL;
+ break;
+ }
/* Needed gc, but gc was disabled */
save_gc_task(c_p, st, st_prio);
st = NULL;
+ break;
}
+ if (do_gc) /* We did a major gc */
+ minor_gc = major_gc = 1;
+ break;
+ }
+ case ERTS_PSTT_COHMQ:
+ reds -= erts_complete_off_heap_message_queue_change(c_p);
+ st_res = am_true;
+ break;
+ case ERTS_PSTT_FTMQ:
+ reds -= erts_flush_trace_messages(c_p, ERTS_PROC_LOCK_MAIN);
+ st_res = am_true;
+ break;
+ case ERTS_PSTT_ETS_FREE_FIXATION:
+ reds -= erts_db_execute_free_fixation(c_p, (DbFixation*)st->arg[0]);
+ st_res = am_true;
break;
default:
ERTS_INTERNAL_ERROR("Invalid process sys task type");
@@ -9730,14 +10567,17 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
}
if (st)
- reds += notify_sys_task_executed(c_p, st, st_res);
+ reds += notify_sys_task_executed(c_p, st, st_res, 1);
- state = erts_smp_atomic32_read_acqb(&c_p->state);
- } while (qmask && reds < max_reds);
+ state = erts_atomic32_read_acqb(&c_p->state);
+ } while (qmask && reds > 0);
*statep = state;
- return reds;
+ if (in_reds < reds)
+ return in_reds;
+
+ return in_reds - reds;
}
static int
@@ -9746,58 +10586,218 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds)
erts_aint32_t state = in_state;
int max_reds = in_reds;
int reds = 0;
- int qmask = 0;
+ int qmask = 1; /* Set to 1 to force looping as long as there
+ * are dirty tasks.
+ */
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
do {
ErtsProcSysTask *st;
Eterm st_res;
int st_prio;
- st = fetch_sys_task(c_p, state, &qmask, &st_prio);
- if (!st)
- break;
+ if (c_p->dirty_sys_tasks) {
+ st = c_p->dirty_sys_tasks;
+ c_p->dirty_sys_tasks = st->next;
+ }
+ else
+ {
+ st = fetch_sys_task(c_p, state, &qmask, &st_prio);
+ if (!st)
+ break;
+ }
switch (st->type) {
- case ERTS_PSTT_GC:
- st_res = am_false;
- break;
+ case ERTS_PSTT_GC_MAJOR:
+ case ERTS_PSTT_GC_MINOR:
case ERTS_PSTT_CPC:
+ case ERTS_PSTT_COHMQ:
+ case ERTS_PSTT_ETS_FREE_FIXATION:
st_res = am_false;
break;
+ case ERTS_PSTT_CLA:
+ st_res = am_ok;
+ break;
+ case ERTS_PSTT_FTMQ:
+ reds -= erts_flush_trace_messages(c_p, ERTS_PROC_LOCK_MAIN);
+ st_res = am_true;
+ break;
default:
ERTS_INTERNAL_ERROR("Invalid process sys task type");
st_res = am_false;
break;
}
- reds += notify_sys_task_executed(c_p, st, st_res);
+ reds += notify_sys_task_executed(c_p, st, st_res, 1);
- state = erts_smp_atomic32_read_acqb(&c_p->state);
+ state = erts_atomic32_read_acqb(&c_p->state);
} while (qmask && reds < max_reds);
return reds;
}
-BIF_RETTYPE
-erts_internal_request_system_task_3(BIF_ALIST_3)
+
+void
+erts_execute_dirty_system_task(Process *c_p)
+{
+ Eterm cla_res = THE_NON_VALUE;
+ ErtsProcSysTask *stasks;
+
+ /*
+ * If multiple operations, perform them in the following
+ * order (in order to avoid unnecessary GC):
+ * 1. Copy Literal Area (implies major GC).
+ * 2. GC Hibernate (implies major GC if not woken).
+ * 3. Major GC (implies minor GC).
+ * 4. Minor GC.
+ *
+ * System task requests are handled after the actual
+ * operations have been performed...
+ */
+
+ ASSERT(!(c_p->flags & (F_DELAY_GC|F_DISABLE_GC)));
+
+ if (c_p->flags & F_DIRTY_CLA) {
+ int cla_reds = 0;
+ cla_res = erts_proc_copy_literal_area(c_p, &cla_reds, c_p->fcalls, 1);
+ ASSERT(is_value(cla_res));
+ }
+
+ if (c_p->flags & F_DIRTY_GC_HIBERNATE) {
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ ERTS_MSGQ_MV_INQ2PRIVQ(c_p);
+ if (c_p->msg.len)
+ c_p->flags &= ~F_DIRTY_GC_HIBERNATE; /* operation aborted... */
+ else {
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ c_p->fvalue = NIL;
+ erts_garbage_collect_hibernate(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ }
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ }
+
+ if (c_p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) {
+ if (c_p->flags & F_DIRTY_MAJOR_GC)
+ c_p->flags |= F_NEED_FULLSWEEP;
+ (void) erts_garbage_collect_nobump(c_p, 0, c_p->arg_reg,
+ c_p->arity, c_p->fcalls);
+ }
+
+ ASSERT(!(c_p->flags & (F_DIRTY_CLA
+ | F_DIRTY_GC_HIBERNATE
+ | F_DIRTY_MAJOR_GC
+ | F_DIRTY_MINOR_GC)));
+
+ stasks = c_p->dirty_sys_tasks;
+ c_p->dirty_sys_tasks = NULL;
+
+ while (stasks) {
+ Eterm st_res;
+ ErtsProcSysTask *st = stasks;
+ stasks = st->next;
+
+ switch (st->type) {
+ case ERTS_PSTT_CLA:
+ ASSERT(is_value(cla_res));
+ st_res = cla_res;
+ break;
+ case ERTS_PSTT_GC_MAJOR:
+ st_res = am_true;
+ break;
+ case ERTS_PSTT_GC_MINOR:
+ st_res = am_true;
+ break;
+
+ default:
+ ERTS_INTERNAL_ERROR("Not supported dirty system task");
+ break;
+ }
+
+ (void) notify_sys_task_executed(c_p, st, st_res, 0);
+
+ }
+
+ erts_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_DIRTY_ACTIVE_SYS);
+}
+
+static BIF_RETTYPE
+dispatch_system_task(Process *c_p, erts_aint_t fail_state,
+ ErtsProcSysTask *st, Eterm target,
+ Eterm prio, Eterm operation)
{
- Process *rp = erts_proc_lookup(BIF_ARG_1);
- ErtsProcSysTaskQs *stqs, *free_stqs = NULL;
+ Process *rp;
+ ErtsProcLocks rp_locks = 0;
+ ErlOffHeap *ohp;
+ ErtsMessage *mp;
+ Eterm *hp, msg;
+ Uint hsz, osz;
+ BIF_RETTYPE ret;
+
+ switch (st->type) {
+ case ERTS_PSTT_CPC:
+ rp = erts_dirty_process_code_checker;
+ ASSERT(fail_state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS));
+ if (c_p == rp) {
+ ERTS_BIF_PREP_RET(ret, am_dirty_execution);
+ return ret;
+ }
+ break;
+ default:
+ rp = NULL;
+ ERTS_INTERNAL_ERROR("Non-dispatchable system task");
+ break;
+ }
+
+ ERTS_BIF_PREP_RET(ret, am_ok);
+
+ /*
+ * Send message on the form: {Requester, Target, Operation}
+ */
+
+ ASSERT(is_immed(st->requester));
+ ASSERT(is_immed(target));
+ ASSERT(is_immed(prio));
+
+ osz = size_object(operation);
+ hsz = 5 /* 4-tuple */ + osz;
+
+ mp = erts_alloc_message_heap(rp, &rp_locks, hsz, &hp, &ohp);
+
+ msg = copy_struct(operation, osz, &hp, ohp);
+ msg = TUPLE4(hp, st->requester, target, prio, msg);
+
+ erts_queue_message(rp, rp_locks, mp, msg, st->requester);
+
+ if (rp_locks)
+ erts_proc_unlock(rp, rp_locks);
+
+ return ret;
+}
+
+
+static BIF_RETTYPE
+request_system_task(Process *c_p, Eterm requester, Eterm target,
+ Eterm priority, Eterm operation)
+{
+ BIF_RETTYPE ret;
+ Process *rp = erts_proc_lookup(target);
ErtsProcSysTask *st = NULL;
- erts_aint32_t prio, rp_state;
- int rp_locked;
+ erts_aint32_t prio, fail_state = ERTS_PSFLG_EXITING;
Eterm noproc_res, req_type;
- if (!rp && !is_internal_pid(BIF_ARG_1)) {
- if (!is_external_pid(BIF_ARG_1))
+ if (!rp && !is_internal_pid(target)) {
+ if (!is_external_pid(target))
goto badarg;
- if (external_pid_dist_entry(BIF_ARG_1) != erts_this_dist_entry)
+ if (external_pid_dist_entry(target) != erts_this_dist_entry)
goto badarg;
}
- switch (BIF_ARG_2) {
+ switch (priority) {
case am_max: prio = PRIORITY_MAX; break;
case am_high: prio = PRIORITY_HIGH; break;
case am_normal: prio = PRIORITY_NORMAL; break;
@@ -9805,11 +10805,11 @@ erts_internal_request_system_task_3(BIF_ALIST_3)
default: goto badarg;
}
- if (is_not_tuple(BIF_ARG_3))
+ if (is_not_tuple(operation))
goto badarg;
else {
int i;
- Eterm *tp = tuple_val(BIF_ARG_3);
+ Eterm *tp = tuple_val(operation);
Uint arity = arityval(*tp);
Eterm req_id;
Uint req_id_sz;
@@ -9844,11 +10844,10 @@ erts_internal_request_system_task_3(BIF_ALIST_3)
}
st = erts_alloc(ERTS_ALC_T_PROC_SYS_TSK,
ERTS_PROC_SYS_TASK_SIZE(tot_sz));
- st->next = st->prev = st; /* Prep for empty prio queue */
ERTS_INIT_OFF_HEAP(&st->off_heap);
hp = &st->heap[0];
- st->requester = BIF_P->common.id;
+ st->requester = requester;
st->reply_tag = req_type;
st->req_id_sz = req_id_sz;
st->req_id = req_id_sz == 0 ? req_id : copy_struct(req_id,
@@ -9867,125 +10866,208 @@ erts_internal_request_system_task_3(BIF_ALIST_3)
switch (req_type) {
case am_garbage_collect:
- st->type = ERTS_PSTT_GC;
- noproc_res = am_false;
- if (!rp)
+ switch (st->arg[0]) {
+ case am_minor: st->type = ERTS_PSTT_GC_MINOR; break;
+ case am_major: st->type = ERTS_PSTT_GC_MAJOR; break;
+ default: goto badarg;
+ }
+ noproc_res = am_false;
+ if (!rp)
goto noproc;
break;
case am_check_process_code:
if (is_not_atom(st->arg[0]))
goto badarg;
- if (st->arg[1] != am_true && st->arg[1] != am_false)
- goto badarg;
noproc_res = am_false;
st->type = ERTS_PSTT_CPC;
if (!rp)
goto noproc;
+ /*
+ * If the process should start executing dirty
+ * code it is important that this task is
+ * aborted. Therefore this strict fail state...
+ */
+ fail_state |= (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS);
+ break;
+
+ case am_copy_literals:
+ if (st->arg[0] != am_true && st->arg[0] != am_false)
+ goto badarg;
+ st->type = ERTS_PSTT_CLA;
+ noproc_res = am_ok;
+ if (!rp)
+ goto noproc;
break;
default:
goto badarg;
}
- rp_state = erts_smp_atomic32_read_nob(&rp->state);
+ if (!schedule_process_sys_task(rp, prio, st, &fail_state)) {
+ Eterm failure;
+ if (fail_state & ERTS_PSFLG_EXITING) {
+ noproc:
+ failure = noproc_res;
+ }
+ else if (fail_state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ ret = dispatch_system_task(c_p, fail_state, st,
+ target, priority, operation);
+ goto cleanup_return;
+ }
+ else {
+ ERTS_INTERNAL_ERROR("Unknown failure schedule_process_sys_task()");
+ failure = am_internal_error;
+ }
+ notify_sys_task_executed(c_p, st, failure, 1);
+ }
- rp_locked = 0;
+ ERTS_BIF_PREP_RET(ret, am_ok);
- free_stqs = NULL;
- if (rp_state & ERTS_PSFLG_ACTIVE_SYS)
- stqs = NULL;
- else {
- alloc_qs:
- stqs = proc_sys_task_queues_alloc();
- stqs->qmask = 1 << prio;
- stqs->ncount = 0;
- stqs->q[PRIORITY_MAX] = NULL;
- stqs->q[PRIORITY_HIGH] = NULL;
- stqs->q[PRIORITY_NORMAL] = NULL;
- stqs->q[PRIORITY_LOW] = NULL;
- stqs->q[prio] = st;
- }
+ return ret;
- if (!rp_locked) {
- rp_locked = 1;
- erts_smp_proc_lock(rp, ERTS_PROC_LOCK_STATUS);
+badarg:
- rp_state = erts_smp_atomic32_read_nob(&rp->state);
- if (rp_state & ERTS_PSFLG_EXITING) {
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
- rp = NULL;
- free_stqs = stqs;
- goto noproc;
- }
- }
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
- if (!rp->sys_task_qs) {
- if (stqs)
- rp->sys_task_qs = stqs;
- else
- goto alloc_qs;
+cleanup_return:
+
+ if (st) {
+ erts_cleanup_offheap(&st->off_heap);
+ erts_free(ERTS_ALC_T_PROC_SYS_TSK, st);
}
- else {
- if (stqs)
- free_stqs = stqs;
- stqs = rp->sys_task_qs;
- if (!stqs->q[prio]) {
- stqs->q[prio] = st;
- stqs->qmask |= 1 << prio;
- }
- else {
- st->next = stqs->q[prio];
- st->prev = stqs->q[prio]->prev;
- st->next->prev = st;
- st->prev->next = st;
- ASSERT(stqs->qmask & (1 << prio));
- }
+
+ return ret;
+}
+
+BIF_RETTYPE
+erts_internal_request_system_task_3(BIF_ALIST_3)
+{
+ return request_system_task(BIF_P, BIF_P->common.id,
+ BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+}
+
+BIF_RETTYPE
+erts_internal_request_system_task_4(BIF_ALIST_4)
+{
+ return request_system_task(BIF_P, BIF_ARG_1,
+ BIF_ARG_2, BIF_ARG_3, BIF_ARG_4);
+}
+
+static void
+erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type, void* arg)
+{
+ Process *rp = erts_proc_lookup(pid);
+ if (rp) {
+ ErtsProcSysTask *st;
+ erts_aint32_t state, fail_state;
+
+ st = erts_alloc(ERTS_ALC_T_PROC_SYS_TSK,
+ ERTS_PROC_SYS_TASK_SIZE(0));
+ st->type = type;
+ st->requester = NIL;
+ st->reply_tag = NIL;
+ st->req_id = NIL;
+ st->req_id_sz = 0;
+ st->arg[0] = (Eterm)arg;
+ ERTS_INIT_OFF_HEAP(&st->off_heap);
+ state = erts_atomic32_read_nob(&rp->state);
+
+ fail_state = ERTS_PSFLG_EXITING;
+
+ if (!schedule_process_sys_task(rp, ERTS_PSFLGS_GET_USR_PRIO(state),
+ st, &fail_state))
+ erts_free(ERTS_ALC_T_PROC_SYS_TSK, st);
}
+}
- if (ERTS_PSFLGS_GET_ACT_PRIO(rp_state) > prio) {
- erts_aint32_t n, a, e;
- /* Need to elevate actual prio */
- a = rp_state;
- do {
- if (ERTS_PSFLGS_GET_ACT_PRIO(a) <= prio) {
- n = a;
- break;
- }
- n = e = a;
- n &= ~ERTS_PSFLGS_ACT_PRIO_MASK;
- n |= (prio << ERTS_PSFLGS_ACT_PRIO_OFFSET);
- a = erts_smp_atomic32_cmpxchg_nob(&rp->state, n, e);
- } while (a != e);
- rp_state = n;
+void
+erts_schedule_complete_off_heap_message_queue_change(Eterm pid)
+{
+ erts_schedule_generic_sys_task(pid, ERTS_PSTT_COHMQ, NULL);
+}
+
+void
+erts_schedule_ets_free_fixation(Eterm pid, DbFixation* fix)
+{
+ erts_schedule_generic_sys_task(pid, ERTS_PSTT_ETS_FREE_FIXATION, fix);
+}
+
+
+static void
+flush_dirty_trace_messages(void *vpid)
+{
+ Process *proc;
+ Eterm pid;
+#ifdef ARCH_64
+ pid = (Eterm) vpid;
+#else
+ pid = *((Eterm *) vpid);
+ erts_free(ERTS_ALC_T_DIRTY_SL, vpid);
+#endif
+
+ proc = erts_pid2proc_opt(NULL, 0, pid, ERTS_PROC_LOCK_MAIN, 0);
+ if (proc) {
+ (void) erts_flush_trace_messages(proc, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
}
+}
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
- schedule_process_sys_task(rp, rp_state, NULL);
+void
+erts_schedule_flush_trace_messages(Process *proc, int force_on_proc)
+{
+ ErtsThrPrgrDelayHandle dhndl;
+ Eterm pid = proc->common.id;
- if (free_stqs)
- proc_sys_task_queues_free(free_stqs);
+ erts_aint32_t state;
- BIF_RET(am_ok);
+ if (!force_on_proc) {
+ state = erts_atomic32_read_nob(&proc->state);
+ if (state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ goto sched_flush_dirty;
+ }
+ }
-noproc:
+ dhndl = erts_thr_progress_unmanaged_delay();
- notify_sys_task_executed(BIF_P, st, noproc_res);
- if (free_stqs)
- proc_sys_task_queues_free(free_stqs);
- BIF_RET(am_ok);
+ erts_schedule_generic_sys_task(pid, ERTS_PSTT_FTMQ, NULL);
-badarg:
+ erts_thr_progress_unmanaged_continue(dhndl);
- if (st) {
- erts_cleanup_offheap(&st->off_heap);
- erts_free(ERTS_ALC_T_PROC_SYS_TSK, st);
+ if (!force_on_proc) {
+ state = erts_atomic32_read_mb(&proc->state);
+ if (state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ void *vargp;
+
+ sched_flush_dirty:
+ /*
+ * We traced 'proc' from another thread than
+ * it is executing on, and it is executing
+ * on a dirty scheduler. It might take a
+ * significant amount of time before it is
+ * scheduled out (where it gets opportunity
+ * to flush messages). We therefore schedule
+ * the flush on the first ordinary scheduler.
+ */
+
+#ifdef ARCH_64
+ vargp = (void *) pid;
+#else
+ {
+ Eterm *argp = erts_alloc(ERTS_ALC_T_DIRTY_SL, sizeof(Eterm));
+ *argp = pid;
+ vargp = (void *) argp;
+ }
+#endif
+ erts_schedule_misc_aux_work(1, flush_dirty_trace_messages, vargp);
+ }
}
- if (free_stqs)
- proc_sys_task_queues_free(free_stqs);
- BIF_ERROR(BIF_P, BADARG);
}
static void
@@ -9994,7 +11076,7 @@ save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio)
erts_aint32_t state;
ErtsProcSysTaskQs *qs;
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
qs = ERTS_PROC_GET_DELAYED_GC_TASK_QS(c_p);
if (!qs) {
@@ -10007,7 +11089,7 @@ save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio)
qs->q[PRIORITY_NORMAL] = NULL;
qs->q[PRIORITY_LOW] = NULL;
qs->q[prio] = st;
- (void) ERTS_PROC_SET_DELAYED_GC_TASK_QS(c_p, ERTS_PROC_LOCK_MAIN, qs);
+ (void) ERTS_PROC_SET_DELAYED_GC_TASK_QS(c_p, qs);
}
else {
if (!qs->q[prio]) {
@@ -10024,8 +11106,11 @@ save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio)
}
}
- state = erts_smp_atomic32_read_nob(&c_p->state);
- ASSERT((ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS) & state);
+ state = erts_atomic32_read_nob(&c_p->state);
+ ASSERT((ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS) & state);
while (!(state & ERTS_PSFLG_DELAYED_SYS)
|| prio < ERTS_PSFLGS_GET_ACT_PRIO(state)) {
@@ -10037,20 +11122,27 @@ save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio)
n &= ~ERTS_PSFLGS_ACT_PRIO_MASK;
n |= prio << ERTS_PSFLGS_ACT_PRIO_OFFSET;
}
- state = erts_smp_atomic32_cmpxchg_relb(&c_p->state, n, e);
+ state = erts_atomic32_cmpxchg_relb(&c_p->state, n, e);
if (state == e)
break;
}
}
+static void
+save_dirty_task(Process *c_p, ErtsProcSysTask *st)
+{
+ st->next = c_p->dirty_sys_tasks;
+ c_p->dirty_sys_tasks = st;
+}
+
int
erts_set_gc_state(Process *c_p, int enable)
{
ErtsProcSysTaskQs *dgc_tsk_qs;
ASSERT(c_p == erts_get_current_process());
ASSERT((ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)
- & erts_smp_atomic32_read_nob(&c_p->state));
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
+ & erts_atomic32_read_nob(&c_p->state));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
if (!enable) {
c_p->flags |= F_DISABLE_GC;
@@ -10065,7 +11157,7 @@ erts_set_gc_state(Process *c_p, int enable)
/* Move delayed gc tasks into sys tasks queues. */
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
if (!c_p->sys_task_qs) {
c_p->sys_task_qs = dgc_tsk_qs;
@@ -10138,7 +11230,7 @@ erts_set_gc_state(Process *c_p, int enable)
erts_aint32_t aprio, state =
#endif
- erts_smp_atomic32_read_bset_nob(&c_p->state,
+ erts_atomic32_read_bset_nob(&c_p->state,
(ERTS_PSFLG_DELAYED_SYS
| ERTS_PSFLG_ACTIVE_SYS),
ERTS_PSFLG_ACTIVE_SYS);
@@ -10152,9 +11244,9 @@ erts_set_gc_state(Process *c_p, int enable)
}
#endif
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
- (void) ERTS_PROC_SET_DELAYED_GC_TASK_QS(c_p, ERTS_PROC_LOCK_MAIN, NULL);
+ (void) ERTS_PROC_SET_DELAYED_GC_TASK_QS(c_p, NULL);
if (dgc_tsk_qs)
proc_sys_task_queues_free(dgc_tsk_qs);
@@ -10168,24 +11260,24 @@ erts_sched_stat_modify(int what)
int ix;
switch (what) {
case ERTS_SCHED_STAT_MODIFY_ENABLE:
- erts_smp_thr_progress_block();
+ erts_thr_progress_block();
erts_sched_stat.enabled = 1;
- erts_smp_thr_progress_unblock();
+ erts_thr_progress_unblock();
break;
case ERTS_SCHED_STAT_MODIFY_DISABLE:
- erts_smp_thr_progress_block();
+ erts_thr_progress_block();
erts_sched_stat.enabled = 0;
- erts_smp_thr_progress_unblock();
+ erts_thr_progress_unblock();
break;
case ERTS_SCHED_STAT_MODIFY_CLEAR:
- erts_smp_spin_lock(&erts_sched_stat.lock);
+ erts_spin_lock(&erts_sched_stat.lock);
for (ix = 0; ix < ERTS_NO_PRIO_LEVELS; ix++) {
erts_sched_stat.prio[ix].total_executed = 0;
erts_sched_stat.prio[ix].executed = 0;
erts_sched_stat.prio[ix].total_migrated = 0;
erts_sched_stat.prio[ix].migrated = 0;
}
- erts_smp_spin_unlock(&erts_sched_stat.lock);
+ erts_spin_unlock(&erts_sched_stat.lock);
break;
}
}
@@ -10199,7 +11291,7 @@ erts_sched_stat_term(Process *p, int total)
Uint executed[ERTS_NO_PRIO_LEVELS];
Uint migrated[ERTS_NO_PRIO_LEVELS];
- erts_smp_spin_lock(&erts_sched_stat.lock);
+ erts_spin_lock(&erts_sched_stat.lock);
if (total) {
int i;
for (i = 0; i < ERTS_NO_PRIO_LEVELS; i++) {
@@ -10218,7 +11310,7 @@ erts_sched_stat_term(Process *p, int total)
erts_sched_stat.prio[i].migrated = 0;
}
}
- erts_smp_spin_unlock(&erts_sched_stat.lock);
+ erts_spin_unlock(&erts_sched_stat.lock);
sz = 0;
(void) erts_bld_atom_2uint_3tup_list(NULL, &sz, ERTS_NO_PRIO_LEVELS,
@@ -10238,7 +11330,6 @@ erts_schedule_misc_op(void (*func)(void *), void *arg)
ErtsSchedulerData *esdp = erts_get_scheduler_data();
ErtsRunQueue *rq = esdp ? esdp->run_queue : ERTS_RUNQ_IX(0);
ErtsMiscOpList *molp = misc_op_list_alloc();
-#ifdef ERTS_SMP
ErtsMigrationPaths *mpaths = erts_get_migration_paths();
if (!mpaths)
@@ -10248,9 +11339,8 @@ erts_schedule_misc_op(void (*func)(void *), void *arg)
if (erq)
rq = erq;
}
-#endif
- erts_smp_runq_lock(rq);
+ erts_runq_lock(rq);
molp->next = NULL;
molp->func = func;
@@ -10261,11 +11351,11 @@ erts_schedule_misc_op(void (*func)(void *), void *arg)
rq->misc.start = molp;
rq->misc.end = molp;
-#ifdef ERTS_SMP
non_empty_runq(rq);
-#endif
- erts_smp_runq_unlock(rq);
+ ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_MISC_OP);
+
+ erts_runq_unlock(rq);
smp_notify_inc_runq(rq);
}
@@ -10295,7 +11385,10 @@ exec_misc_ops(ErtsRunQueue *rq)
rq->misc.end = NULL;
}
- erts_smp_runq_unlock(rq);
+ if (!rq->misc.start)
+ ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_MISC_OP);
+
+ erts_runq_unlock(rq);
while (molp) {
tmp_molp = molp;
@@ -10304,7 +11397,7 @@ exec_misc_ops(ErtsRunQueue *rq)
misc_op_list_free(tmp_molp);
}
- erts_smp_runq_lock(rq);
+ erts_runq_lock(rq);
}
Uint
@@ -10321,6 +11414,8 @@ erts_get_total_reductions(Uint *redsp, Uint *diffp)
Uint reds = 0;
ERTS_ATOMIC_FOREACH_RUNQ_X(rq,
+ erts_no_run_queues + ERTS_NUM_DIRTY_RUNQS,
+
reds += rq->procs.reductions,
if (redsp) *redsp = reds;
@@ -10333,12 +11428,12 @@ erts_get_exact_total_reductions(Process *c_p, Uint *redsp, Uint *diffp)
{
Uint reds = erts_current_reductions(c_p, c_p);
int ix;
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
/*
* Wait for other schedulers to schedule out their processes
* and update 'reductions'.
*/
- erts_smp_thr_progress_block();
+ erts_thr_progress_block();
for (reds = 0, ix = 0; ix < erts_no_run_queues; ix++)
reds += ERTS_RUNQ_IX(ix)->procs.reductions;
if (redsp)
@@ -10346,16 +11441,20 @@ erts_get_exact_total_reductions(Process *c_p, Uint *redsp, Uint *diffp)
if (diffp)
*diffp = reds - last_exact_reductions;
last_exact_reductions = reds;
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
+static void delete_process(Process* p);
+
void
erts_free_proc(Process *p)
{
-#ifdef ERTS_SMP
erts_proc_lock_fin(p);
-#endif
+ ASSERT(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_FREE);
+ ASSERT(0 == erts_proc_read_refc(p));
+ if (p->flags & F_DELAYED_DEL_PROC)
+ delete_process(p);
erts_free(ERTS_ALC_T_PROC, (void *) p);
}
@@ -10371,13 +11470,13 @@ static void early_init_process_struct(void *varg, Eterm data)
Process *proc = arg->proc;
proc->common.id = make_internal_pid(data);
- erts_smp_atomic32_init_relb(&proc->state, arg->state);
+ erts_atomic32_init_nob(&proc->dirty_state, 0);
+ proc->dirty_sys_tasks = NULL;
+ erts_atomic32_init_relb(&proc->state, arg->state);
-#ifdef ERTS_SMP
RUNQ_SET_RQ(&proc->run_queue, arg->run_queue);
erts_proc_lock_init(proc); /* All locks locked */
-#endif
}
@@ -10398,7 +11497,7 @@ alloc_process(ErtsRunQueue *rq, erts_aint32_t state)
init_arg.run_queue = rq;
init_arg.state = state;
- ASSERT(((char *) p) == ((char *) &p->common));
+ ERTS_CT_ASSERT(offsetof(Process,common) == 0);
if (!erts_ptab_new_element(&erts_proc,
&p->common,
@@ -10408,6 +11507,8 @@ alloc_process(ErtsRunQueue *rq, erts_aint32_t state)
return NULL;
}
+ ASSERT(erts_proc_read_refc(p) > 0);
+
ASSERT(internal_pid_serial(p->common.id) <= ERTS_MAX_PID_SERIAL);
p->approx_started = erts_get_approx_time();
@@ -10429,6 +11530,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
Eterm args, /* Arguments for function (must be well-formed list). */
ErlSpawnOpts* so) /* Options for spawn. */
{
+ Uint flags = 0;
ErtsRunQueue *rq = NULL;
Process *p;
Sint arity; /* Number of arguments. */
@@ -10438,11 +11540,17 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
Eterm res = THE_NON_VALUE;
erts_aint32_t state = 0;
erts_aint32_t prio = (erts_aint32_t) PRIORITY_NORMAL;
-
-#ifdef ERTS_SMP
- erts_smp_proc_lock(parent, ERTS_PROC_LOCKS_ALL_MINOR);
+ ErtsProcLocks locks = ERTS_PROC_LOCKS_ALL;
+#ifdef SHCOPY_SPAWN
+ erts_shcopy_t info;
+ INITIALIZE_SHCOPY(info);
+#else
+ erts_literal_area_t litarea;
+ INITIALIZE_LITERAL_PURGE_AREA(litarea);
#endif
+ erts_proc_lock(parent, ERTS_PROC_LOCKS_ALL_MINOR);
+
/*
* Check for errors.
*/
@@ -10457,6 +11565,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
int ix = so->scheduler-1;
ASSERT(0 <= ix && ix < erts_no_run_queues);
rq = ERTS_RUNQ_IX(ix);
+ /* Unsupported feature... */
state |= ERTS_PSFLG_BOUND;
}
prio = (erts_aint32_t) so->priority;
@@ -10465,6 +11574,17 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
state |= (((prio & ERTS_PSFLGS_PRIO_MASK) << ERTS_PSFLGS_ACT_PRIO_OFFSET)
| ((prio & ERTS_PSFLGS_PRIO_MASK) << ERTS_PSFLGS_USR_PRIO_OFFSET));
+ if (so->flags & SPO_OFF_HEAP_MSGQ) {
+ state |= ERTS_PSFLG_OFF_HEAP_MSGQ;
+ flags |= F_OFF_HEAP_MSGQ;
+ }
+ else if (so->flags & SPO_ON_HEAP_MSGQ) {
+ state |= ERTS_PSFLG_ON_HEAP_MSGQ;
+ flags |= F_ON_HEAP_MSGQ;
+ }
+
+ ASSERT((flags & F_ON_HEAP_MSGQ) || (flags & F_OFF_HEAP_MSGQ));
+
if (!rq)
rq = erts_get_runq_proc(parent);
@@ -10477,33 +11597,42 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
goto error;
}
-#ifdef BM_COUNTERS
- processes_busy++;
-#endif
- BM_COUNT(processes_spawned);
+ ASSERT((erts_atomic32_read_nob(&p->state)
+ & ERTS_PSFLG_ON_HEAP_MSGQ)
+ || (erts_atomic32_read_nob(&p->state)
+ & ERTS_PSFLG_OFF_HEAP_MSGQ));
- BM_SWAP_TIMER(system,size);
- arg_size = size_object(args);
- BM_SWAP_TIMER(size,system);
+#ifdef SHCOPY_SPAWN
+ arg_size = copy_shared_calculate(args, &info);
+#else
+ arg_size = size_object_litopt(args, &litarea);
+#endif
heap_need = arg_size;
- p->flags = erts_default_process_flags;
+ p->flags = flags;
+ p->static_flags = 0;
+ if (so->flags & SPO_SYSTEM_PROC)
+ p->static_flags |= ERTS_STC_FLG_SYSTEM_PROC;
if (so->flags & SPO_USE_ARGS) {
p->min_heap_size = so->min_heap_size;
p->min_vheap_size = so->min_vheap_size;
p->max_gen_gcs = so->max_gen_gcs;
+ MAX_HEAP_SIZE_SET(p, so->max_heap_size);
+ MAX_HEAP_SIZE_FLAGS_SET(p, so->max_heap_flags);
} else {
p->min_heap_size = H_MIN_SIZE;
p->min_vheap_size = BIN_VH_MIN_SIZE;
- p->max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
+ MAX_HEAP_SIZE_SET(p, H_MAX_SIZE);
+ MAX_HEAP_SIZE_FLAGS_SET(p, H_MAX_FLAGS);
+ p->max_gen_gcs = (Uint16) erts_atomic32_read_nob(&erts_max_gen_gcs);
}
p->schedule_count = 0;
ASSERT(p->min_heap_size == erts_next_heap_size(p->min_heap_size, 0));
-
- p->initial[INITIAL_MOD] = mod;
- p->initial[INITIAL_FUN] = func;
- p->initial[INITIAL_ARI] = (Uint) arity;
+
+ p->u.initial.module = mod;
+ p->u.initial.function = func;
+ p->u.initial.arity = (Uint) arity;
/*
* Must initialize binary lists here before copying binaries to process.
@@ -10522,10 +11651,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#ifdef HIPE
hipe_init_process(&p->hipe);
-#ifdef ERTS_SMP
hipe_init_process_smp(&p->hipe_smp);
#endif
-#endif
p->heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP, sizeof(Eterm)*sz);
p->old_hend = p->old_htop = p->old_heap = NULL;
p->high_water = p->heap;
@@ -10533,18 +11660,19 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->stop = p->hend = p->heap + sz;
p->htop = p->heap;
p->heap_sz = sz;
+ p->abandoned_heap = NULL;
+ p->live_hf_end = ERTS_INVALID_HFRAG_PTR;
p->catches = 0;
p->bin_vheap_sz = p->min_vheap_size;
p->bin_old_vheap_sz = p->min_vheap_size;
p->bin_old_vheap = 0;
- p->bin_vheap_mature = 0;
p->sys_task_qs = NULL;
/* No need to initialize p->fcalls. */
- p->current = p->initial+INITIAL_MOD;
+ p->current = &p->u.initial;
p->i = (BeamInstr *) beam_apply;
p->cp = (BeamInstr *) beam_apply+1;
@@ -10553,13 +11681,12 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->max_arg_reg = sizeof(p->def_arg_reg)/sizeof(p->def_arg_reg[0]);
p->arg_reg[0] = mod;
p->arg_reg[1] = func;
- BM_STOP_TIMER(system);
- BM_MESSAGE(args,p,parent);
- BM_START_TIMER(system);
- BM_SWAP_TIMER(system,copy);
- p->arg_reg[2] = copy_struct(args, arg_size, &p->htop, &p->off_heap);
- BM_MESSAGE_COPIED(arg_size);
- BM_SWAP_TIMER(copy,system);
+#ifdef SHCOPY_SPAWN
+ p->arg_reg[2] = copy_shared_perform(args, arg_size, &info, &p->htop, &p->off_heap);
+ DESTROY_SHCOPY(info);
+#else
+ p->arg_reg[2] = copy_struct_litopt(args, arg_size, &p->htop, &p->off_heap, &litarea);
+#endif
p->arity = 3;
p->fvalue = NIL;
@@ -10567,11 +11694,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->ftrace = NIL;
p->reds = 0;
-#ifdef ERTS_SMP
- p->common.u.alive.ptimer = NULL;
-#else
- sys_memset(&p->common.u.alive.tm, 0, sizeof(ErlTimer));
-#endif
+ ERTS_PTMR_INIT(p);
p->common.u.alive.reg = NULL;
ERTS_P_LINKS(p) = NULL;
@@ -10591,21 +11714,21 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
: STORE_NC(&p->htop, &p->off_heap, parent->group_leader);
}
- erts_get_default_tracing(&ERTS_TRACE_FLAGS(p), &ERTS_TRACER_PROC(p));
+ erts_get_default_proc_tracing(&ERTS_TRACE_FLAGS(p), &ERTS_TRACER(p));
p->msg.first = NULL;
p->msg.last = &p->msg.first;
p->msg.save = &p->msg.first;
+ p->msg.saved_last = &p->msg.first;
p->msg.len = 0;
-#ifdef ERTS_SMP
p->msg_inq.first = NULL;
p->msg_inq.last = &p->msg_inq.first;
p->msg_inq.len = 0;
-#endif
- p->u.bif_timers = NULL;
+ p->bif_timers = NULL;
p->mbuf = NULL;
+ p->msg_frag = NULL;
p->mbuf_sz = 0;
- p->psd = NULL;
+ erts_atomic_init_nob(&p->psd, (erts_aint_t) NULL);
p->dictionary = NULL;
p->seq_trace_lastcnt = 0;
p->seq_trace_clock = 0;
@@ -10623,21 +11746,58 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->last_old_htop = NULL;
#endif
+ p->trace_msg_q = NULL;
+ p->scheduler_data = NULL;
+ p->suspendee = NIL;
+ p->pending_suspenders = NULL;
+ p->pending_exit.reason = THE_NON_VALUE;
+ p->pending_exit.bp = NULL;
+
+#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
+ p->fp_exception = 0;
+#endif
+
if (IS_TRACED(parent)) {
if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOS) {
ERTS_TRACE_FLAGS(p) |= (ERTS_TRACE_FLAGS(parent) & TRACEE_FLAGS);
- ERTS_TRACER_PROC(p) = ERTS_TRACER_PROC(parent);
- }
- if (ARE_TRACE_FLAGS_ON(parent, F_TRACE_PROCS)) {
- trace_proc_spawn(parent, p->common.id, mod, func, args);
+ erts_tracer_replace(&p->common, ERTS_TRACER(parent));
}
- if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOS1) {
+ if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOS1) {
/* Overrides TRACE_CHILDREN */
ERTS_TRACE_FLAGS(p) |= (ERTS_TRACE_FLAGS(parent) & TRACEE_FLAGS);
- ERTS_TRACER_PROC(p) = ERTS_TRACER_PROC(parent);
+ erts_tracer_replace(&p->common, ERTS_TRACER(parent));
ERTS_TRACE_FLAGS(p) &= ~(F_TRACE_SOS1 | F_TRACE_SOS);
ERTS_TRACE_FLAGS(parent) &= ~(F_TRACE_SOS1 | F_TRACE_SOS);
}
+ if (so->flags & SPO_LINK && ERTS_TRACE_FLAGS(parent) & (F_TRACE_SOL|F_TRACE_SOL1)) {
+ ERTS_TRACE_FLAGS(p) |= (ERTS_TRACE_FLAGS(parent)&TRACEE_FLAGS);
+ erts_tracer_replace(&p->common, ERTS_TRACER(parent));
+ if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOL1) {/*maybe override*/
+ ERTS_TRACE_FLAGS(p) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
+ ERTS_TRACE_FLAGS(parent) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
+ }
+ }
+ if (ARE_TRACE_FLAGS_ON(parent, F_TRACE_PROCS)) {
+ locks &= ~(ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(parent, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ trace_proc_spawn(parent, am_spawn, p->common.id, mod, func, args);
+ if (so->flags & SPO_LINK)
+ trace_proc(parent, locks, parent, am_link, p->common.id);
+ }
+ }
+
+ if (IS_TRACED_FL(p, F_TRACE_PROCS)) {
+ if ((locks & (ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE))
+ == (ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE)) {
+ /* This happens when parent was not traced, but child is */
+ locks &= ~(ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(parent, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ }
+ trace_proc_spawn(p, am_spawned, parent->common.id, mod, func, args);
+ if (so->flags & SPO_LINK)
+ trace_proc(p, locks, p, am_getting_linked, parent->common.id);
}
/*
@@ -10648,10 +11808,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#ifdef DEBUG
int ret;
#endif
- if (IS_TRACED_FL(parent, F_TRACE_PROCS)) {
- trace_proc(parent, parent, am_link, p->common.id);
- }
-
#ifdef DEBUG
ret = erts_add_link(&ERTS_P_LINKS(parent), LINK_PID, p->common.id);
ASSERT(ret == 0);
@@ -10662,17 +11818,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
erts_add_link(&ERTS_P_LINKS(p), LINK_PID, parent->common.id);
#endif
- if (IS_TRACED(parent)) {
- if (ERTS_TRACE_FLAGS(parent) & (F_TRACE_SOL|F_TRACE_SOL1)) {
- ERTS_TRACE_FLAGS(p) |= (ERTS_TRACE_FLAGS(parent)&TRACEE_FLAGS);
- ERTS_TRACER_PROC(p) = ERTS_TRACER_PROC(parent); /*maybe steal*/
-
- if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOL1) {/*maybe override*/
- ERTS_TRACE_FLAGS(p) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
- ERTS_TRACE_FLAGS(parent) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
- }
- }
- }
}
/*
@@ -10687,19 +11832,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
so->mref = mref;
}
-#ifdef ERTS_SMP
- p->scheduler_data = NULL;
- p->suspendee = NIL;
- p->pending_suspenders = NULL;
- p->pending_exit.reason = THE_NON_VALUE;
- p->pending_exit.bp = NULL;
-#endif
-
-#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
- p->fp_exception = 0;
-#endif
-
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
+ erts_proc_unlock(p, locks);
res = p->common.id;
@@ -10707,23 +11840,27 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
* Schedule process for execution.
*/
- schedule_process(p, state);
+ erts_proc_unlock(parent, locks & ERTS_PROC_LOCKS_ALL_MINOR);
+
+ schedule_process(p, state, 0);
VERBOSE(DEBUG_PROCESSES, ("Created a new process: %T\n",p->common.id));
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(process_spawn)) {
+ ErtsCodeMFA cmfa = {mod, func, arity};
DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE);
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE);
- dtrace_fun_decode(p, mod, func, arity, process_name, mfa);
- DTRACE2(process_spawn, process_name, mfa);
+ dtrace_fun_decode(p, &cmfa, process_name, mfa_buf);
+ DTRACE2(process_spawn, process_name, mfa_buf);
}
#endif
+ return res;
error:
- erts_smp_proc_unlock(parent, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_unlock(parent, locks & ERTS_PROC_LOCKS_ALL_MINOR);
return res;
}
@@ -10739,6 +11876,8 @@ void erts_init_empty_process(Process *p)
p->stop = NULL;
p->hend = NULL;
p->heap = NULL;
+ p->abandoned_heap = NULL;
+ p->live_hf_end = ERTS_INVALID_HFRAG_PTR;
p->gen_gcs = 0;
p->max_gen_gcs = 0;
p->min_heap_size = 0;
@@ -10746,7 +11885,7 @@ void erts_init_empty_process(Process *p)
p->rcount = 0;
p->common.id = ERTS_INVALID_PID;
p->reds = 0;
- ERTS_TRACER_PROC(p) = NIL;
+ ERTS_TRACER(p) = erts_tracer_nil;
ERTS_TRACE_FLAGS(p) = F_INITIAL_TRACE_FLAGS;
p->group_leader = ERTS_INVALID_PID;
p->flags = 0;
@@ -10759,12 +11898,7 @@ void erts_init_empty_process(Process *p)
p->bin_old_vheap_sz = BIN_VH_MIN_SIZE;
p->bin_old_vheap = 0;
p->sys_task_qs = NULL;
- p->bin_vheap_mature = 0;
-#ifdef ERTS_SMP
- p->common.u.alive.ptimer = NULL;
-#else
- memset(&(p->common.u.alive.tm), 0, sizeof(ErlTimer));
-#endif
+ ERTS_PTMR_INIT(p);
p->next = NULL;
p->off_heap.first = NULL;
p->off_heap.overhead = 0;
@@ -10775,8 +11909,9 @@ void erts_init_empty_process(Process *p)
p->old_htop = NULL;
p->old_heap = NULL;
p->mbuf = NULL;
+ p->msg_frag = NULL;
p->mbuf_sz = 0;
- p->psd = NULL;
+ erts_atomic_init_nob(&p->psd, (erts_aint_t) NULL);
ERTS_P_MONITORS(p) = NULL;
ERTS_P_LINKS(p) = NULL; /* List of links */
p->nodes_monitors = NULL;
@@ -10785,14 +11920,14 @@ void erts_init_empty_process(Process *p)
p->msg.last = &p->msg.first;
p->msg.save = &p->msg.first;
p->msg.len = 0;
- p->u.bif_timers = NULL;
+ p->bif_timers = NULL;
p->dictionary = NULL;
p->seq_trace_clock = 0;
p->seq_trace_lastcnt = 0;
p->seq_trace_token = NIL;
- p->initial[0] = 0;
- p->initial[1] = 0;
- p->initial[2] = 0;
+ p->u.initial.module = 0;
+ p->u.initial.function = 0;
+ p->u.initial.arity = 0;
p->catches = 0;
p->cp = NULL;
p->i = NULL;
@@ -10813,23 +11948,24 @@ void erts_init_empty_process(Process *p)
p->parent = NIL;
p->approx_started = 0;
+ p->static_flags = 0;
+
p->common.u.alive.started_interval = 0;
#ifdef HIPE
hipe_init_process(&p->hipe);
-#ifdef ERTS_SMP
hipe_init_process_smp(&p->hipe_smp);
#endif
-#endif
INIT_HOLE_CHECK(p);
#ifdef DEBUG
p->last_old_htop = NULL;
#endif
- erts_smp_atomic32_init_nob(&p->state, (erts_aint32_t) PRIORITY_NORMAL);
+ erts_atomic32_init_nob(&p->dirty_state, 0);
+ p->dirty_sys_tasks = NULL;
+ erts_atomic32_init_nob(&p->state, (erts_aint32_t) PRIORITY_NORMAL);
-#ifdef ERTS_SMP
p->scheduler_data = NULL;
p->msg_inq.first = NULL;
p->msg_inq.last = &p->msg_inq.first;
@@ -10839,9 +11975,8 @@ void erts_init_empty_process(Process *p)
p->pending_exit.reason = THE_NON_VALUE;
p->pending_exit.bp = NULL;
erts_proc_lock_init(p);
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
+ erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
RUNQ_SET_RQ(&p->run_queue, ERTS_RUNQ_IX(0));
-#endif
#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
p->fp_exception = 0;
@@ -10858,9 +11993,11 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(p->htop == NULL);
ASSERT(p->stop == NULL);
ASSERT(p->hend == NULL);
+ ASSERT(p->abandoned_heap == NULL);
+ ASSERT(p->live_hf_end == ERTS_INVALID_HFRAG_PTR);
ASSERT(p->heap == NULL);
ASSERT(p->common.id == ERTS_INVALID_PID);
- ASSERT(ERTS_TRACER_PROC(p) == NIL);
+ ASSERT(ERTS_TRACER_IS_NIL(ERTS_TRACER(p)));
ASSERT(ERTS_TRACE_FLAGS(p) == F_INITIAL_TRACE_FLAGS);
ASSERT(p->group_leader == ERTS_INVALID_PID);
ASSERT(p->next == NULL);
@@ -10877,7 +12014,7 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(p->suspend_monitors == NULL);
ASSERT(p->msg.first == NULL);
ASSERT(p->msg.len == 0);
- ASSERT(p->u.bif_timers == NULL);
+ ASSERT(p->bif_timers == NULL);
ASSERT(p->dictionary == NULL);
ASSERT(p->catches == 0);
ASSERT(p->cp == NULL);
@@ -10886,14 +12023,12 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(p->parent == NIL);
-#ifdef ERTS_SMP
ASSERT(p->msg_inq.first == NULL);
ASSERT(p->msg_inq.len == 0);
ASSERT(p->suspendee == NIL);
ASSERT(p->pending_suspenders == NULL);
ASSERT(p->pending_exit.reason == THE_NON_VALUE);
ASSERT(p->pending_exit.bp == NULL);
-#endif
/* Thing that erts_cleanup_empty_process() cleans up */
@@ -10918,28 +12053,44 @@ erts_cleanup_empty_process(Process* p)
free_message_buffer(p->mbuf);
p->mbuf = NULL;
}
-#ifdef ERTS_SMP
erts_proc_lock_fin(p);
-#endif
#ifdef DEBUG
erts_debug_verify_clean_empty_process(p);
#endif
}
-/*
- * p must be the currently executing process.
- */
static void
delete_process(Process* p)
{
- ErlMessage* mp;
+ ErtsPSD *psd;
+ struct saved_calls *scb;
+ process_breakpoint_time_t *pbt;
VERBOSE(DEBUG_PROCESSES, ("Removing process: %T\n",p->common.id));
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] delete process: %p %p %p %p\n", p->common.id,
+ HEAP_START(p), HEAP_END(p), OLD_HEAP(p), OLD_HEND(p)));
+
+ scb = ERTS_PROC_SET_SAVED_CALLS_BUF(p, NULL);
+
+ if (scb) {
+ p->fcalls += CONTEXT_REDS; /* Reduction counting depends on this... */
+ erts_free(ERTS_ALC_T_CALLS_BUF, (void *) scb);
+ }
+
+ pbt = ERTS_PROC_SET_CALL_TIME(p, NULL);
+ if (pbt)
+ erts_free(ERTS_ALC_T_BPD, (void *) pbt);
+
+ erts_destroy_nif_export(p);
/* Cleanup psd */
- if (p->psd)
- erts_free(ERTS_ALC_T_PSD, p->psd);
+ psd = (ErtsPSD *) erts_atomic_read_nob(&p->psd);
+
+ if (psd) {
+ erts_atomic_set_nob(&p->psd, (erts_aint_t) NULL); /* Reduction counting depends on this... */
+ erts_free(ERTS_ALC_T_PSD, psd);
+ }
/* Clean binaries and funs */
erts_cleanup_offheap(&p->off_heap);
@@ -10958,16 +12109,12 @@ delete_process(Process* p)
* Release heaps. Clobber contents in DEBUG build.
*/
-
-#ifdef DEBUG
- sys_memset(p->heap, DEBUG_BAD_BYTE, p->heap_sz*sizeof(Eterm));
-#endif
-
#ifdef HIPE
hipe_delete_process(&p->hipe);
#endif
- ERTS_HEAP_FREE(ERTS_ALC_T_HEAP, (void*) p->heap, p->heap_sz*sizeof(Eterm));
+ erts_deallocate_young_generation(p);
+
if (p->old_heap != NULL) {
#ifdef DEBUG
@@ -10979,34 +12126,11 @@ delete_process(Process* p)
(p->old_hend-p->old_heap)*sizeof(Eterm));
}
- /*
- * Free all pending message buffers.
- */
- if (p->mbuf != NULL) {
- free_message_buffer(p->mbuf);
- }
-
erts_erase_dicts(p);
/* free all pending messages */
- mp = p->msg.first;
- while(mp != NULL) {
- ErlMessage* next_mp = mp->next;
- if (mp->data.attached) {
- if (is_value(mp->m[0]))
- free_message_buffer(mp->data.heap_frag);
- else {
- if (is_not_nil(mp->m[1])) {
- ErlHeapFragment *heap_frag;
- heap_frag = (ErlHeapFragment *) mp->data.dist_ext->ext_endp;
- erts_cleanup_offheap(&heap_frag->off_heap);
- }
- erts_free_dist_ext_copy(mp->data.dist_ext);
- }
- }
- free_message(mp);
- mp = next_mp;
- }
+ erts_cleanup_messages(p->msg.first);
+ p->msg.first = NULL;
ASSERT(!p->nodes_monitors);
ASSERT(!p->suspend_monitors);
@@ -11022,13 +12146,16 @@ set_proc_exiting(Process *p,
{
erts_aint32_t state = in_state, enq_prio = -1;
int enqueue;
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) == ERTS_PROC_LOCKS_ALL);
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) == ERTS_PROC_LOCKS_ALL);
enqueue = change_proc_schedule_state(p,
- ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT,
+ (ERTS_PSFLG_SUSPENDED
+ | ERTS_PSFLG_PENDING_EXIT
+ | ERTS_PSFLGS_DIRTY_WORK),
ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE,
&state,
- &enq_prio);
+ &enq_prio,
+ ERTS_PROC_LOCKS_ALL);
p->fvalue = reason;
if (bp)
@@ -11040,13 +12167,10 @@ set_proc_exiting(Process *p,
*/
p->freason = EXTAG_EXIT;
KILL_CATCHES(p);
- cancel_timer(p);
p->i = (BeamInstr *) beam_exit;
- if (enqueue)
- add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio),
- state,
- enq_prio);
+
+ add2runq(enqueue, enq_prio, p, state, NULL);
}
static ERTS_INLINE erts_aint32_t
@@ -11057,10 +12181,13 @@ set_proc_self_exiting(Process *c_p)
#endif
erts_aint32_t state, enq_prio = -1;
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCKS_ALL);
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCKS_ALL);
- state = erts_smp_atomic32_read_nob(&c_p->state);
- ASSERT(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS));
+ state = erts_atomic32_read_nob(&c_p->state);
+ ASSERT(state & (ERTS_PSFLG_RUNNING
+ |ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS));
#ifdef DEBUG
enqueue =
@@ -11069,99 +12196,127 @@ set_proc_self_exiting(Process *c_p)
ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT,
ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE,
&state,
- &enq_prio);
+ &enq_prio,
+ ERTS_PROC_LOCKS_ALL);
ASSERT(!enqueue);
return state;
}
-#ifdef ERTS_SMP
void
erts_handle_pending_exit(Process *c_p, ErtsProcLocks locks)
{
ErtsProcLocks xlocks;
ASSERT(is_value(c_p->pending_exit.reason));
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == locks);
- ERTS_SMP_LC_ASSERT(locks & ERTS_PROC_LOCK_MAIN);
- ERTS_SMP_LC_ASSERT(!((ERTS_PSFLG_EXITING|ERTS_PSFLG_FREE)
- & erts_smp_atomic32_read_nob(&c_p->state)));
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == locks);
+ ERTS_LC_ASSERT(locks & ERTS_PROC_LOCK_MAIN);
+ ERTS_LC_ASSERT(!((ERTS_PSFLG_EXITING|ERTS_PSFLG_FREE)
+ & erts_atomic32_read_nob(&c_p->state)));
/* Ensure that all locks on c_p are locked before proceeding... */
if (locks == ERTS_PROC_LOCKS_ALL)
xlocks = 0;
else {
xlocks = ~locks & ERTS_PROC_LOCKS_ALL;
- if (erts_smp_proc_trylock(c_p, xlocks) == EBUSY) {
- erts_smp_proc_unlock(c_p, locks & ~ERTS_PROC_LOCK_MAIN);
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ if (erts_proc_trylock(c_p, xlocks) == EBUSY) {
+ erts_proc_unlock(c_p, locks & ~ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
}
}
set_proc_exiting(c_p,
- erts_smp_atomic32_read_acqb(&c_p->state),
+ erts_atomic32_read_acqb(&c_p->state),
c_p->pending_exit.reason,
c_p->pending_exit.bp);
c_p->pending_exit.reason = THE_NON_VALUE;
c_p->pending_exit.bp = NULL;
if (xlocks)
- erts_smp_proc_unlock(c_p, xlocks);
+ erts_proc_unlock(c_p, xlocks);
}
+static void save_pending_exiter(Process *p, ErtsProcList *plp);
+
static void
-handle_pending_exiters(ErtsProcList *pnd_xtrs)
+do_handle_pending_exiters(ErtsProcList *pnd_xtrs)
{
/* 'list' is expected to have been fetched (i.e. not a ring anymore) */
ErtsProcList *plp = pnd_xtrs;
while (plp) {
- ErtsProcList *free_plp;
- Process *p = erts_pid2proc(NULL, 0, plp->pid, ERTS_PROC_LOCKS_ALL);
+ ErtsProcList *next_plp = plp->next;
+ Process *p = erts_proc_lookup(plp->pid);
if (p) {
- if (erts_proclist_same(plp, p)) {
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state);
- if (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))) {
- ASSERT(state & ERTS_PSFLG_PENDING_EXIT);
- erts_handle_pending_exit(p, ERTS_PROC_LOCKS_ALL);
+ erts_aint32_t state;
+ /*
+ * If the process is running on a normal scheduler, the
+ * pending exit will soon be detected and handled by the
+ * scheduler running the process (at schedule in/out).
+ */
+ if (erts_proc_trylock(p, ERTS_PROC_LOCKS_ALL) != EBUSY) {
+ if (erts_proclist_same(plp, p)) {
+ state = erts_atomic32_read_acqb(&p->state);
+ if (!(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_EXITING))) {
+ ASSERT(state & ERTS_PSFLG_PENDING_EXIT);
+ erts_handle_pending_exit(p, ERTS_PROC_LOCKS_ALL);
+ }
}
+ erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
+ }
+ else {
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ if (erts_proclist_same(plp, p)) {
+ state = erts_atomic32_read_acqb(&p->state);
+ if (!(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_EXITING))) {
+ /*
+ * Save process and try to acquire all
+ * locks at a later time...
+ */
+ save_pending_exiter(p, plp);
+ plp = NULL;
+ }
+ }
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
}
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
}
- free_plp = plp;
- plp = plp->next;
- proclist_destroy(free_plp);
+ if (plp)
+ proclist_destroy(plp);
+ plp = next_plp;
}
}
static void
-save_pending_exiter(Process *p)
+save_pending_exiter(Process *p, ErtsProcList *plp)
{
- ErtsProcList *plp;
+ ErtsSchedulerSleepInfo *ssi;
ErtsRunQueue *rq;
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
- rq = erts_get_runq_current(NULL);
+ rq = RUNQ_READ_RQ(&p->run_queue);
+ ASSERT(rq && !ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
- plp = proclist_create(p);
+ if (!plp)
+ plp = proclist_create(p);
- erts_smp_runq_lock(rq);
+ erts_runq_lock(rq);
erts_proclist_store_last(&rq->procs.pending_exiters, plp);
non_empty_runq(rq);
- erts_smp_runq_unlock(rq);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
- wake_dirty_schedulers(rq, 0);
- else
-#endif
- wake_scheduler(rq);
+ ssi = rq->scheduler->ssi;
+
+ erts_runq_unlock(rq);
+
+ set_aux_work_flags_wakeup_nob(ssi, ERTS_SSI_AUX_WORK_PENDING_EXITERS);
}
-#endif
/*
* This function delivers an EXIT message to a process
@@ -11172,43 +12327,47 @@ static ERTS_INLINE void
send_exit_message(Process *to, ErtsProcLocks *to_locksp,
Eterm exit_term, Uint term_size, Eterm token)
{
- if (token == NIL
-#ifdef USE_VM_PROBES
- || token == am_have_dt_utag
-#endif
- ) {
- Eterm* hp;
- Eterm mess;
- ErlHeapFragment* bp;
- ErlOffHeap *ohp;
-
- hp = erts_alloc_message_heap(term_size, &bp, &ohp, to, to_locksp);
+ ErtsMessage *mp;
+ ErlOffHeap *ohp;
+ Eterm* hp;
+ Eterm mess;
+#ifdef SHCOPY_SEND
+ erts_shcopy_t info;
+#endif
+
+ if (!have_seqtrace(token)) {
+#ifdef SHCOPY_SEND
+ INITIALIZE_SHCOPY(info);
+ term_size = copy_shared_calculate(exit_term, &info);
+ mp = erts_alloc_message_heap(to, to_locksp, term_size, &hp, &ohp);
+ mess = copy_shared_perform(exit_term, term_size, &info, &hp, ohp);
+ DESTROY_SHCOPY(info);
+#else
+ mp = erts_alloc_message_heap(to, to_locksp, term_size, &hp, &ohp);
mess = copy_struct(exit_term, term_size, &hp, ohp);
- erts_queue_message(to, to_locksp, bp, mess, NIL
-#ifdef USE_VM_PROBES
- , NIL
#endif
- );
+ erts_queue_message(to, *to_locksp, mp, mess, am_system);
} else {
- ErlHeapFragment* bp;
- Eterm* hp;
- Eterm mess;
Eterm temp_token;
Uint sz_token;
ASSERT(is_tuple(token));
sz_token = size_object(token);
- bp = new_message_buffer(term_size+sz_token);
- hp = bp->mem;
- mess = copy_struct(exit_term, term_size, &hp, &bp->off_heap);
- /* the trace token must in this case be updated by the caller */
- seq_trace_output(token, mess, SEQ_TRACE_SEND, to->common.id, NULL);
- temp_token = copy_struct(token, sz_token, &hp, &bp->off_heap);
- erts_queue_message(to, to_locksp, bp, mess, temp_token
-#ifdef USE_VM_PROBES
- , NIL
+#ifdef SHCOPY_SEND
+ INITIALIZE_SHCOPY(info);
+ term_size = copy_shared_calculate(exit_term, &info);
+ mp = erts_alloc_message_heap(to, to_locksp, term_size+sz_token, &hp, &ohp);
+ mess = copy_shared_perform(exit_term, term_size, &info, &hp, ohp);
+ DESTROY_SHCOPY(info);
+#else
+ mp = erts_alloc_message_heap(to, to_locksp, term_size+sz_token, &hp, &ohp);
+ mess = copy_struct(exit_term, term_size, &hp, ohp);
#endif
- );
+ /* the trace token must in this case be updated by the caller */
+ seq_trace_output(token, mess, SEQ_TRACE_SEND, to->common.id, to);
+ temp_token = copy_struct(token, sz_token, &hp, ohp);
+ ERL_MESSAGE_TOKEN(mp) = temp_token;
+ erts_queue_message(to, *to_locksp, mp, mess, am_system);
}
}
@@ -11293,11 +12452,11 @@ send_exit_signal(Process *c_p, /* current process if and only
Uint32 flags /* flags */
)
{
- erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state);
+ erts_aint32_t state = erts_atomic32_read_nob(&rp->state);
Eterm rsn = reason == am_kill ? am_killed : reason;
- ERTS_SMP_LC_ASSERT(*rp_locks == erts_proc_lc_my_proc_locks(rp));
- ERTS_SMP_LC_ASSERT((*rp_locks & ERTS_PROC_LOCKS_XSIG_SEND)
+ ERTS_LC_ASSERT(*rp_locks == erts_proc_lc_my_proc_locks(rp));
+ ERTS_LC_ASSERT((*rp_locks & ERTS_PROC_LOCKS_XSIG_SEND)
== ERTS_PROC_LOCKS_XSIG_SEND);
ASSERT(reason != THE_NON_VALUE);
@@ -11317,11 +12476,10 @@ send_exit_signal(Process *c_p, /* current process if and only
if ((state & ERTS_PSFLG_TRAP_EXIT)
&& (reason != am_kill || (flags & ERTS_XSIG_FLG_IGN_KILL))) {
- if (is_not_nil(token)
-#ifdef USE_VM_PROBES
- && token != am_have_dt_utag
-#endif
- && token_update)
+ /* have to release the status and trace lock in order to send the exit message */
+ erts_proc_unlock(rp, *rp_locks & (ERTS_PROC_LOCKS_XSIG_SEND|ERTS_PROC_LOCK_TRACE));
+ *rp_locks &= ~(ERTS_PROC_LOCKS_XSIG_SEND|ERTS_PROC_LOCK_TRACE);
+ if (have_seqtrace(token) && token_update)
seq_trace_update_send(token_update);
if (is_value(exit_tuple))
send_exit_message(rp, rp_locks, exit_tuple, exit_tuple_sz, token);
@@ -11330,7 +12488,6 @@ send_exit_signal(Process *c_p, /* current process if and only
return 1; /* Receiver will get a message */
}
else if (reason != am_normal || (flags & ERTS_XSIG_FLG_NO_IGN_NORMAL)) {
-#ifdef ERTS_SMP
if (!(state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))) {
ASSERT(!rp->pending_exit.bp);
@@ -11340,41 +12497,60 @@ send_exit_signal(Process *c_p, /* current process if and only
if (*rp_locks != ERTS_PROC_LOCKS_ALL) {
ErtsProcLocks need_locks = (~(*rp_locks)
& ERTS_PROC_LOCKS_ALL);
- if (erts_smp_proc_trylock(c_p, need_locks) == EBUSY) {
- erts_smp_proc_unlock(c_p,
+ if (erts_proc_trylock(c_p, need_locks) == EBUSY) {
+ erts_proc_unlock(c_p,
*rp_locks & ~ERTS_PROC_LOCK_MAIN);
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
}
*rp_locks = ERTS_PROC_LOCKS_ALL;
}
set_proc_exiting(c_p, state, rsn, NULL);
}
- else if (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))) {
+ else if (!(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS))) {
/* Process not running ... */
ErtsProcLocks need_locks = ~(*rp_locks) & ERTS_PROC_LOCKS_ALL;
+ ErlHeapFragment *bp = NULL;
+ Eterm rsn_cpy;
if (need_locks
- && erts_smp_proc_trylock(rp, need_locks) == EBUSY) {
+ && erts_proc_trylock(rp, need_locks) == EBUSY) {
/* ... but we havn't got all locks on it ... */
- save_pending_exiter(rp);
+ save_pending_exiter(rp, NULL);
/*
* The pending exit will be discovered when next
* process is scheduled in
*/
- goto set_pending_exit;
+ goto set_pending_exit;
}
+ /* ...and we have all locks on it... */
+ *rp_locks = ERTS_PROC_LOCKS_ALL;
+
+ state = erts_atomic32_read_nob(&rp->state);
+
+ if (is_immed(rsn))
+ rsn_cpy = rsn;
else {
- /* ...and we have all locks on it... */
- *rp_locks = ERTS_PROC_LOCKS_ALL;
- set_proc_exiting(rp,
- state,
- (is_immed(rsn)
- ? rsn
- : copy_object(rsn, rp)),
- NULL);
+ Eterm *hp;
+ ErlOffHeap *ohp;
+ Uint rsn_sz = size_object(rsn);
+ if (state & ERTS_PSFLG_DIRTY_RUNNING) {
+ bp = new_message_buffer(rsn_sz);
+ ohp = &bp->off_heap;
+ hp = &bp->mem[0];
+ }
+ else
+ {
+ hp = HAlloc(rp, rsn_sz);
+ ohp = &rp->off_heap;
+ }
+ rsn_cpy = copy_struct(rsn, rsn_sz, &hp, ohp);
}
+
+ set_proc_exiting(rp, state, rsn_cpy, bp);
}
else { /* Process running... */
-
+
/*
* The pending exit will be discovered when the process
* is scheduled out if not discovered earlier.
@@ -11396,8 +12572,30 @@ send_exit_signal(Process *c_p, /* current process if and only
&bp->off_heap);
rp->pending_exit.bp = bp;
}
- erts_smp_atomic32_read_bor_relb(&rp->state,
- ERTS_PSFLG_PENDING_EXIT);
+
+ /*
+ * If no dirty work has been scheduled, pending exit will
+ * be discovered when the process is scheduled. If dirty work
+ * has been scheduled, we may need to add it to a normal run
+ * queue...
+ */
+ {
+ erts_aint32_t a = erts_atomic32_read_nob(&rp->state);
+ while (1) {
+ erts_aint32_t n, e;
+ int dwork;
+ n = e = a;
+ n |= ERTS_PSFLG_PENDING_EXIT;
+ dwork = !!(n & ERTS_PSFLGS_DIRTY_WORK);
+ n &= ~ERTS_PSFLGS_DIRTY_WORK;
+ a = erts_atomic32_cmpxchg_mb(&rp->state, n, e);
+ if (a == e) {
+ if (dwork)
+ erts_schedule_process(rp, n, *rp_locks);
+ break;
+ }
+ }
+ }
}
}
/* else:
@@ -11409,17 +12607,6 @@ send_exit_signal(Process *c_p, /* current process if and only
* that the receiver *will* exit; either on the pending
* exit or by itself before seeing the pending exit.
*/
-#else /* !ERTS_SMP */
- erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state);
- if (!(state & ERTS_PSFLG_EXITING)) {
- set_proc_exiting(rp,
- state,
- (is_immed(rsn) || c_p == rp
- ? rsn
- : copy_object(rsn, rp)),
- NULL);
- }
-#endif
return -1; /* Receiver will exit */
}
@@ -11459,24 +12646,24 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
ExitMonitorContext *pcontext = vpcontext;
DistEntry *dep;
ErtsMonitor *rmon;
- Process *rp;
- if (mon->type == MON_ORIGIN) {
+ switch (mon->type) {
+ case MON_ORIGIN:
/* We are monitoring someone else, we need to demonitor that one.. */
- if (is_atom(mon->pid)) { /* remote by name */
- ASSERT(is_node_name_atom(mon->pid));
- dep = erts_sysname_to_connected_dist_entry(mon->pid);
+ if (is_atom(mon->u.pid)) { /* remote by name */
+ ASSERT(is_node_name_atom(mon->u.pid));
+ dep = erts_sysname_to_connected_dist_entry(mon->u.pid);
if (dep) {
- erts_smp_de_links_lock(dep);
+ erts_de_links_lock(dep);
rmon = erts_remove_monitor(&(dep->monitors), mon->ref);
- erts_smp_de_links_unlock(dep);
+ erts_de_links_unlock(dep);
if (rmon) {
ErtsDSigData dsd;
int code = erts_dsig_prepare(&dsd, dep, NULL,
ERTS_DSP_NO_LOCK, 0);
if (code == ERTS_DSIG_PREP_CONNECTED) {
code = erts_dsig_send_demonitor(&dsd,
- rmon->pid,
+ rmon->u.pid,
mon->name,
mon->ref,
1);
@@ -11484,37 +12671,46 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
}
erts_destroy_monitor(rmon);
}
- erts_deref_dist_entry(dep);
}
} else {
- ASSERT(is_pid(mon->pid));
- if (is_internal_pid(mon->pid)) { /* local by pid or name */
- rp = erts_pid2proc(NULL, 0, mon->pid, ERTS_PROC_LOCK_LINK);
+ ASSERT(is_pid(mon->u.pid) || is_port(mon->u.pid));
+ /* if is local by pid or name */
+ if (is_internal_pid(mon->u.pid)) {
+ Process *rp = erts_pid2proc(NULL, 0, mon->u.pid, ERTS_PROC_LOCK_LINK);
if (!rp) {
goto done;
}
rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
if (rmon == NULL) {
goto done;
}
erts_destroy_monitor(rmon);
- } else { /* remote by pid */
- ASSERT(is_external_pid(mon->pid));
- dep = external_pid_dist_entry(mon->pid);
+ } else if (is_internal_port(mon->u.pid)) {
+ /* Is a local port */
+ Port *prt = erts_port_lookup_raw(mon->u.pid);
+ if (!prt) {
+ goto done;
+ }
+ erts_port_demonitor(pcontext->p,
+ ERTS_PORT_DEMONITOR_ORIGIN_ON_DEATHBED,
+ prt, mon->ref, NULL);
+ } else { /* remote by pid */
+ ASSERT(is_external_pid(mon->u.pid));
+ dep = external_pid_dist_entry(mon->u.pid);
ASSERT(dep != NULL);
if (dep) {
- erts_smp_de_links_lock(dep);
+ erts_de_links_lock(dep);
rmon = erts_remove_monitor(&(dep->monitors), mon->ref);
- erts_smp_de_links_unlock(dep);
+ erts_de_links_unlock(dep);
if (rmon) {
ErtsDSigData dsd;
int code = erts_dsig_prepare(&dsd, dep, NULL,
ERTS_DSP_NO_LOCK, 0);
if (code == ERTS_DSIG_PREP_CONNECTED) {
code = erts_dsig_send_demonitor(&dsd,
- rmon->pid,
- mon->pid,
+ rmon->u.pid,
+ mon->u.pid,
mon->ref,
1);
ASSERT(code == ERTS_DSIG_SEND_OK);
@@ -11524,22 +12720,23 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
}
}
}
- } else { /* type == MON_TARGET */
- ASSERT(mon->type == MON_TARGET);
- ASSERT(is_pid(mon->pid) || is_internal_port(mon->pid));
- if (is_internal_port(mon->pid)) {
- Port *prt = erts_id2port(mon->pid);
+ break;
+ case MON_TARGET:
+ ASSERT(is_pid(mon->u.pid) || is_internal_port(mon->u.pid));
+ if (is_internal_port(mon->u.pid)) {
+ Port *prt = erts_id2port(mon->u.pid);
if (prt == NULL) {
goto done;
}
erts_fire_port_monitor(prt, mon->ref);
erts_port_release(prt);
- } else if (is_internal_pid(mon->pid)) {/* local by name or pid */
+ } else if (is_internal_pid(mon->u.pid)) {/* local by name or pid */
Eterm watched;
+ Process *rp;
DeclareTmpHeapNoproc(lhp,3);
ErtsProcLocks rp_locks = (ERTS_PROC_LOCK_LINK
| ERTS_PROC_LOCKS_MSG_SEND);
- rp = erts_pid2proc(NULL, 0, mon->pid, rp_locks);
+ rp = erts_pid2proc(NULL, 0, mon->u.pid, rp_locks);
if (rp == NULL) {
goto done;
}
@@ -11556,25 +12753,25 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
}
UnUseTmpHeapNoproc(3);
/* else: demonitor while we exited, i.e. do nothing... */
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
} else { /* external by pid or name */
- ASSERT(is_external_pid(mon->pid));
- dep = external_pid_dist_entry(mon->pid);
+ ASSERT(is_external_pid(mon->u.pid));
+ dep = external_pid_dist_entry(mon->u.pid);
ASSERT(dep != NULL);
if (dep) {
- erts_smp_de_links_lock(dep);
+ erts_de_links_lock(dep);
rmon = erts_remove_monitor(&(dep->monitors), mon->ref);
- erts_smp_de_links_unlock(dep);
+ erts_de_links_unlock(dep);
if (rmon) {
ErtsDSigData dsd;
int code = erts_dsig_prepare(&dsd, dep, NULL,
ERTS_DSP_NO_LOCK, 0);
if (code == ERTS_DSIG_PREP_CONNECTED) {
code = erts_dsig_send_m_exit(&dsd,
- mon->pid,
+ mon->u.pid,
(rmon->name != NIL
? rmon->name
- : rmon->pid),
+ : rmon->u.pid),
mon->ref,
pcontext->reason);
ASSERT(code == ERTS_DSIG_SEND_OK);
@@ -11583,6 +12780,17 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
}
}
}
+ break;
+ case MON_NIF_TARGET:
+ erts_fire_nif_monitor(mon->u.resource,
+ pcontext->p->common.id,
+ mon->ref);
+ break;
+ case MON_TIME_OFFSET:
+ erts_demonitor_time_offset(mon->ref);
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid monitor type");
}
done:
/* As the monitors are previously removed from the process,
@@ -11612,6 +12820,7 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext)
DistEntry *dep;
Process *rp;
+
switch(lnk->type) {
case LINK_PID:
if(is_internal_port(item)) {
@@ -11659,12 +12868,16 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext)
if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
/* We didn't exit the process and it is traced */
if (IS_TRACED_FL(rp, F_TRACE_PROCS)) {
- trace_proc(p, rp, am_getting_unlinked, p->common.id);
+ if (rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) {
+ erts_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND);
+ rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND;
+ }
+ trace_proc(NULL, 0, rp, am_getting_unlinked, p->common.id);
}
}
}
ASSERT(rp != p);
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
}
}
else if (is_external_pid(item)) {
@@ -11674,14 +12887,14 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext)
int code;
ErtsDistLinkData dld;
erts_remove_dist_link(&dld, p->common.id, item, dep);
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
code = erts_dsig_prepare(&dsd, dep, p, ERTS_DSP_NO_LOCK, 0);
if (code == ERTS_DSIG_PREP_CONNECTED) {
code = erts_dsig_send_exit_tt(&dsd, p->common.id, item,
reason, SEQ_TRACE_TOKEN(p));
ASSERT(code == ERTS_DSIG_SEND_OK);
}
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
erts_destroy_dist_link(&dld);
}
}
@@ -11692,17 +12905,16 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext)
if(dep) {
/* dist entries have node links in a separate structure to
avoid confusion */
- erts_smp_de_links_lock(dep);
+ erts_de_links_lock(dep);
rlnk = erts_remove_link(&(dep->node_links), p->common.id);
- erts_smp_de_links_unlock(dep);
+ erts_de_links_unlock(dep);
if (rlnk)
erts_destroy_link(rlnk);
- erts_deref_dist_entry(dep);
}
break;
default:
- erl_exit(1, "bad type in link list\n");
+ erts_exit(ERTS_ERROR_EXIT, "bad type in link list\n");
break;
}
erts_destroy_link(lnk);
@@ -11714,9 +12926,10 @@ resume_suspend_monitor(ErtsSuspendMonitor *smon, void *vc_p)
Process *suspendee = erts_pid2proc((Process *) vc_p, ERTS_PROC_LOCK_MAIN,
smon->pid, ERTS_PROC_LOCK_STATUS);
if (suspendee) {
+ ASSERT(suspendee != vc_p);
if (smon->active)
- resume_process(suspendee);
- erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
+ resume_process(suspendee, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
}
erts_destroy_suspend_monitor(smon);
}
@@ -11728,6 +12941,7 @@ erts_do_exit_process(Process* p, Eterm reason)
{
p->arity = 0; /* No live registers */
p->fvalue = reason;
+
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(process_exit)) {
@@ -11740,18 +12954,17 @@ erts_do_exit_process(Process* p, Eterm reason)
}
#endif
-#ifdef ERTS_SMP
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+ if (p->static_flags & ERTS_STC_FLG_SYSTEM_PROC)
+ erts_exit(ERTS_DUMP_EXIT, "System process %T terminated: %T\n",
+ p->common.id, reason);
+
+ ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
/* By locking all locks (main lock is already locked) when going
to exiting state (ERTS_PSFLG_EXITING), it is enough to take any lock when
looking up a process (erts_pid2proc()) to prevent the looked up
process from exiting until the lock has been released. */
- erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
-#endif
+ erts_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
-#ifndef ERTS_SMP
- set_proc_self_exiting(p);
-#else
if (ERTS_PSFLG_PENDING_EXIT & set_proc_self_exiting(p)) {
/* Process exited before pending exit was received... */
p->pending_exit.reason = THE_NON_VALUE;
@@ -11763,15 +12976,12 @@ erts_do_exit_process(Process* p, Eterm reason)
cancel_suspend_of_suspendee(p, ERTS_PROC_LOCKS_ALL);
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p);
-#endif
+ ERTS_MSGQ_MV_INQ2PRIVQ(p);
if (IS_TRACED(p)) {
if (IS_TRACED_FL(p, F_TRACE_CALLS))
erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_EXITING);
- if (IS_TRACED_FL(p,F_TRACE_PROCS))
- trace_proc(p, p, am_exit, reason);
}
erts_trace_check_exiting(p->common.id);
@@ -11779,20 +12989,24 @@ erts_do_exit_process(Process* p, Eterm reason)
ASSERT((ERTS_TRACE_FLAGS(p) & F_INITIAL_TRACE_FLAGS)
== F_INITIAL_TRACE_FLAGS);
- cancel_timer(p); /* Always cancel timer just in case */
+ ASSERT(erts_proc_read_refc(p) > 0);
+ if (ERTS_PTMR_IS_SET(p)) {
+ erts_cancel_proc_timer(p);
+ ASSERT(erts_proc_read_refc(p) > 0);
+ }
+
+ erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- if (p->u.bif_timers)
- erts_cancel_bif_timers(p, ERTS_PROC_LOCKS_ALL);
+ if (IS_TRACED_FL(p,F_TRACE_PROCS))
+ trace_proc(p, ERTS_PROC_LOCK_MAIN, p, am_exit, reason);
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
/*
- * The p->u.bif_timers of this process can *not* be used anymore;
+ * p->u.initial of this process can *not* be used anymore;
* will be overwritten by misc termination data.
*/
p->u.terminate = NULL;
-
erts_continue_exit_process(p);
}
@@ -11803,39 +13017,71 @@ erts_continue_exit_process(Process *p)
ErtsMonitor *mon;
ErtsProcLocks curr_locks = ERTS_PROC_LOCK_MAIN;
Eterm reason = p->fvalue;
- DistEntry *dep;
- struct saved_calls *scb;
- process_breakpoint_time_t *pbt;
+ DistEntry *dep = NULL;
erts_aint32_t state;
+ int delay_del_proc = 0;
#ifdef DEBUG
int yield_allowed = 1;
#endif
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p));
ASSERT(ERTS_PROC_IS_EXITING(p));
-#ifdef ERTS_SMP
+ ASSERT(erts_proc_read_refc(p) > 0);
+ if (p->bif_timers) {
+ if (erts_cancel_bif_timers(p, &p->bif_timers, &p->u.terminate)) {
+ ASSERT(erts_proc_read_refc(p) > 0);
+ goto yield;
+ }
+ ASSERT(erts_proc_read_refc(p) > 0);
+ p->bif_timers = NULL;
+ }
+
+ if (p->flags & F_SCHDLR_ONLN_WAITQ)
+ abort_sched_onln_chng_waitq(p);
+
if (p->flags & F_HAVE_BLCKD_MSCHED) {
ErtsSchedSuspendResult ssr;
- ssr = erts_block_multi_scheduling(p, ERTS_PROC_LOCK_MAIN, 0, 1);
+ ssr = erts_block_multi_scheduling(p, ERTS_PROC_LOCK_MAIN, 0, 0, 1);
+ switch (ssr) {
+ case ERTS_SCHDLR_SSPND_YIELD_RESTART:
+ goto yield;
+ case ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED:
+ case ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED:
+ case ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED:
+ case ERTS_SCHDLR_SSPND_YIELD_DONE_NMSCHED_BLOCKED:
+ case ERTS_SCHDLR_SSPND_DONE:
+ case ERTS_SCHDLR_SSPND_YIELD_DONE:
+ p->flags &= ~F_HAVE_BLCKD_MSCHED;
+ break;
+ case ERTS_SCHDLR_SSPND_EINVAL:
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error: %d\n",
+ __FILE__, __LINE__, (int) ssr);
+ }
+ }
+ if (p->flags & F_HAVE_BLCKD_NMSCHED) {
+ ErtsSchedSuspendResult ssr;
+ ssr = erts_block_multi_scheduling(p, ERTS_PROC_LOCK_MAIN, 0, 1, 1);
switch (ssr) {
case ERTS_SCHDLR_SSPND_YIELD_RESTART:
goto yield;
case ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED:
+ case ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED:
case ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED:
+ case ERTS_SCHDLR_SSPND_YIELD_DONE_NMSCHED_BLOCKED:
case ERTS_SCHDLR_SSPND_DONE:
case ERTS_SCHDLR_SSPND_YIELD_DONE:
p->flags &= ~F_HAVE_BLCKD_MSCHED;
break;
case ERTS_SCHDLR_SSPND_EINVAL:
default:
- erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error: %d\n",
+ erts_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error: %d\n",
__FILE__, __LINE__, (int) ssr);
}
}
-#endif
if (p->flags & F_USING_DB) {
if (erts_db_process_exiting(p, ERTS_PROC_LOCK_MAIN))
@@ -11844,12 +13090,22 @@ erts_continue_exit_process(Process *p)
}
erts_set_gc_state(p, 1);
- state = erts_smp_atomic32_read_acqb(&p->state);
- if (state & ERTS_PSFLG_ACTIVE_SYS) {
+ state = erts_atomic32_read_acqb(&p->state);
+ if (state & ERTS_PSFLG_ACTIVE_SYS
+ || p->dirty_sys_tasks
+ ) {
if (cleanup_sys_tasks(p, state, CONTEXT_REDS) >= CONTEXT_REDS/2)
goto yield;
}
+#ifdef DEBUG
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ ASSERT(p->sys_task_qs == NULL);
+ ASSERT(ERTS_PROC_GET_DELAYED_GC_TASK_QS(p) == NULL);
+ ASSERT(p->dirty_sys_tasks == NULL);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+#endif
+
if (p->flags & F_USING_DDLL) {
erts_ddll_proc_dead(p, ERTS_PROC_LOCK_MAIN);
p->flags &= ~F_USING_DDLL;
@@ -11877,7 +13133,10 @@ erts_continue_exit_process(Process *p)
ASSERT(!p->common.u.alive.reg);
}
- erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_EXIT))
+ trace_sched(p, curr_locks, am_out_exited);
+
+ erts_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
curr_locks = ERTS_PROC_LOCKS_ALL;
/*
@@ -11898,23 +13157,21 @@ erts_continue_exit_process(Process *p)
{
/* Do *not* use erts_get_runq_proc() */
ErtsRunQueue *rq;
- rq = erts_get_runq_current(ERTS_GET_SCHEDULER_DATA_FROM_PROC(p));
+ rq = erts_get_runq_current(erts_proc_sched_data(p));
- erts_smp_runq_lock(rq);
+ erts_runq_lock(rq);
-#ifdef ERTS_SMP
ASSERT(p->scheduler_data);
ASSERT(p->scheduler_data->current_process == p);
ASSERT(p->scheduler_data->free_process == NULL);
p->scheduler_data->current_process = NULL;
p->scheduler_data->free_process = p;
-#endif
/* Time of death! */
erts_ptab_delete_element(&erts_proc, &p->common);
- erts_smp_runq_unlock(rq);
+ erts_runq_unlock(rq);
}
/*
@@ -11926,47 +13183,45 @@ erts_continue_exit_process(Process *p)
{
/* Inactivate and notify free */
- erts_aint32_t n, e, a = erts_smp_atomic32_read_nob(&p->state);
-#ifdef ERTS_SMP
+ erts_aint32_t n, e, a = erts_atomic32_read_nob(&p->state);
int refc_inced = 0;
-#endif
while (1) {
n = e = a;
ASSERT(a & ERTS_PSFLG_EXITING);
n |= ERTS_PSFLG_FREE;
n &= ~ERTS_PSFLG_ACTIVE;
-#ifdef ERTS_SMP
if ((n & ERTS_PSFLG_IN_RUNQ) && !refc_inced) {
- erts_smp_proc_inc_refc(p);
+ erts_proc_inc_refc(p);
refc_inced = 1;
}
-#endif
- a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
+ a = erts_atomic32_cmpxchg_mb(&p->state, n, e);
if (a == e)
break;
}
-#ifdef ERTS_SMP
+ if (a & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ p->flags |= F_DELAYED_DEL_PROC;
+ delay_del_proc = 1;
+ /*
+ * The dirty scheduler decrease refc
+ * when done with the process...
+ */
+ }
+
if (refc_inced && !(n & ERTS_PSFLG_IN_RUNQ))
- erts_smp_proc_dec_refc(p);
-#endif
+ erts_proc_dec_refc(p);
}
-
+
dep = ((p->flags & F_DISTRIBUTION)
- ? ERTS_PROC_SET_DIST_ENTRY(p, ERTS_PROC_LOCKS_ALL, NULL)
- : NULL);
- scb = ERTS_PROC_SET_SAVED_CALLS_BUF(p, ERTS_PROC_LOCKS_ALL, NULL);
- pbt = ERTS_PROC_SET_CALL_TIME(p, ERTS_PROC_LOCKS_ALL, NULL);
-
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
-#ifdef BM_COUNTERS
- processes_busy--;
-#endif
+ ? ERTS_PROC_SET_DIST_ENTRY(p, NULL)
+ : NULL);
+
+ erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
if (dep) {
- erts_do_net_exits(dep, reason);
- if(dep)
- erts_deref_dist_entry(dep);
+ erts_do_net_exits(dep, (reason == am_kill) ? am_killed : reason);
+ erts_deref_dist_entry(dep);
}
/*
@@ -11998,18 +13253,15 @@ erts_continue_exit_process(Process *p)
have none here */
}
- if (scb)
- erts_free(ERTS_ALC_T_CALLS_BUF, (void *) scb);
+ erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
- if (pbt)
- erts_free(ERTS_ALC_T_BPD, (void *) pbt);
+ erts_flush_trace_messages(p, ERTS_PROC_LOCK_MAIN);
- delete_process(p);
+ ERTS_TRACER_CLEAR(&ERTS_TRACER(p));
-#ifdef ERTS_SMP
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
-#endif
+ if (!delay_del_proc)
+ delete_process(p);
return;
@@ -12019,79 +13271,22 @@ erts_continue_exit_process(Process *p)
ASSERT(yield_allowed);
#endif
- ERTS_SMP_LC_ASSERT(curr_locks == erts_proc_lc_my_proc_locks(p));
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & curr_locks);
+ ERTS_LC_ASSERT(curr_locks == erts_proc_lc_my_proc_locks(p));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & curr_locks);
p->i = (BeamInstr *) beam_continue_exit;
if (!(curr_locks & ERTS_PROC_LOCK_STATUS)) {
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
curr_locks |= ERTS_PROC_LOCK_STATUS;
}
if (curr_locks != ERTS_PROC_LOCK_MAIN)
- erts_smp_proc_unlock(p, ~ERTS_PROC_LOCK_MAIN & curr_locks);
-
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p));
-
-}
-
-/* Callback for process timeout */
-static void
-timeout_proc(Process* p)
-{
- erts_aint32_t state;
- BeamInstr** pi = (BeamInstr **) p->def_arg_reg;
- p->i = *pi;
- p->flags |= F_TIMO;
- p->flags &= ~F_INSLPQUEUE;
-
- state = erts_smp_atomic32_read_acqb(&p->state);
- if (!(state & ERTS_PSFLG_ACTIVE))
- schedule_process(p, state);
-}
-
-
-void
-cancel_timer(Process* p)
-{
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
- p->flags &= ~(F_INSLPQUEUE|F_TIMO);
-#ifdef ERTS_SMP
- erts_cancel_smp_ptimer(p->common.u.alive.ptimer);
-#else
- erts_cancel_timer(&p->common.u.alive.tm);
-#endif
-}
+ erts_proc_unlock(p, ~ERTS_PROC_LOCK_MAIN & curr_locks);
-/*
- * Insert a process into the time queue, with a timeout 'timeout' in ms.
- */
-void
-set_timer(Process* p, Uint timeout)
-{
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p));
- /* check for special case timeout=0 DONT ADD TO time queue */
- if (timeout == 0) {
- p->flags |= F_TIMO;
- return;
- }
- p->flags |= F_INSLPQUEUE;
- p->flags &= ~F_TIMO;
-
-#ifdef ERTS_SMP
- erts_create_smp_ptimer(&p->common.u.alive.ptimer,
- p->common.id,
- (ErlTimeoutProc) timeout_proc,
- timeout);
-#else
- erts_set_timer(&p->common.u.alive.tm,
- (ErlTimeoutProc) timeout_proc,
- NULL,
- (void*) p,
- timeout);
-#endif
+ BUMP_ALL_REDS(p);
}
/*
@@ -12099,7 +13294,7 @@ set_timer(Process* p, Uint timeout)
*/
void
-erts_stack_dump(int to, void *to_arg, Process *p)
+erts_stack_dump(fmtfn_t to, void *to_arg, Process *p)
{
Eterm* sp;
int yreg = -1;
@@ -12109,12 +13304,12 @@ erts_stack_dump(int to, void *to_arg, Process *p)
}
erts_program_counter_info(to, to_arg, p);
for (sp = p->stop; sp < STACK_START(p); sp++) {
- yreg = stack_element_dump(to, to_arg, p, sp, yreg);
+ yreg = stack_element_dump(to, to_arg, sp, yreg);
}
}
void
-erts_program_counter_info(int to, void *to_arg, Process *p)
+erts_program_counter_info(fmtfn_t to, void *to_arg, Process *p)
{
erts_aint32_t state;
int i;
@@ -12125,7 +13320,7 @@ erts_program_counter_info(int to, void *to_arg, Process *p)
erts_print(to, to_arg, "CP: %p (", p->cp);
print_function_from_pc(to, to_arg, p->cp);
erts_print(to, to_arg, ")\n");
- state = erts_smp_atomic32_read_acqb(&p->state);
+ state = erts_atomic32_read_acqb(&p->state);
if (!(state & (ERTS_PSFLG_RUNNING
| ERTS_PSFLG_RUNNING_SYS
| ERTS_PSFLG_GC))) {
@@ -12144,10 +13339,10 @@ erts_program_counter_info(int to, void *to_arg, Process *p)
}
static void
-print_function_from_pc(int to, void *to_arg, BeamInstr* x)
+print_function_from_pc(fmtfn_t to, void *to_arg, BeamInstr* x)
{
- BeamInstr* addr = find_function_from_pc(x);
- if (addr == NULL) {
+ ErtsCodeMFA *cmfa = find_function_from_pc(x);
+ if (cmfa == NULL) {
if (x == beam_exit) {
erts_print(to, to_arg, "<terminate process>");
} else if (x == beam_continue_exit) {
@@ -12161,12 +13356,13 @@ print_function_from_pc(int to, void *to_arg, BeamInstr* x)
}
} else {
erts_print(to, to_arg, "%T:%T/%d + %d",
- addr[0], addr[1], addr[2], ((x-addr)-2) * sizeof(Eterm));
+ cmfa->module, cmfa->function, cmfa->arity,
+ (x-(BeamInstr*)cmfa) * sizeof(Eterm));
}
}
static int
-stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg)
+stack_element_dump(fmtfn_t to, void *to_arg, Eterm* sp, int yreg)
{
Eterm x = *sp;
@@ -12180,7 +13376,7 @@ stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg)
}
if (is_CP(x)) {
- erts_print(to, to_arg, "Return addr %p (", (Eterm *) EXPAND_POINTER(x));
+ erts_print(to, to_arg, "Return addr %p (", (Eterm *) x);
print_function_from_pc(to, to_arg, cp_val(x));
erts_print(to, to_arg, ")\n");
yreg = 0;
@@ -12195,48 +13391,260 @@ stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg)
}
/*
+ * Print scheduler information
+ */
+void
+erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) {
+ int i;
+ erts_aint32_t flg;
+ Process *p;
+
+ erts_print(to, to_arg, "=scheduler:%u\n", esdp->no);
+
+ flg = erts_atomic32_read_dirty(&esdp->ssi->flags);
+ erts_print(to, to_arg, "Scheduler Sleep Info Flags: ");
+ for (i = 0; i < ERTS_SSI_FLGS_MAX && flg; i++) {
+ erts_aint32_t chk = (1 << i);
+ if (flg & chk) {
+ switch (chk) {
+ case ERTS_SSI_FLG_SLEEPING:
+ erts_print(to, to_arg, "SLEEPING"); break;
+ case ERTS_SSI_FLG_POLL_SLEEPING:
+ erts_print(to, to_arg, "POLL_SLEEPING"); break;
+ case ERTS_SSI_FLG_TSE_SLEEPING:
+ erts_print(to, to_arg, "TSE_SLEEPING"); break;
+ case ERTS_SSI_FLG_WAITING:
+ erts_print(to, to_arg, "WAITING"); break;
+ case ERTS_SSI_FLG_SUSPENDED:
+ erts_print(to, to_arg, "SUSPENDED"); break;
+ case ERTS_SSI_FLG_MSB_EXEC:
+ erts_print(to, to_arg, "MSB_EXEC"); break;
+ default:
+ erts_print(to, to_arg, "UNKNOWN(%d)", flg); break;
+ }
+ if (flg > chk)
+ erts_print(to, to_arg, " | ");
+ flg -= chk;
+ }
+ }
+ erts_print(to, to_arg, "\n");
+
+ flg = erts_atomic32_read_dirty(&esdp->ssi->aux_work);
+ erts_print(to, to_arg, "Scheduler Sleep Info Aux Work: ");
+ for (i = 0; i < ERTS_SSI_AUX_WORK_NO_FLAGS && flg; i++) {
+ erts_aint32_t chk = (1 << i);
+ if (flg & chk) {
+ if (erts_aux_work_flag_descr[i])
+ erts_print(to, to_arg, "%s", erts_aux_work_flag_descr[i]);
+ else
+ erts_print(to, to_arg, "1<<%d", i);
+ if (flg > chk)
+ erts_print(to, to_arg, " | ");
+ flg -= chk;
+ }
+ }
+ erts_print(to, to_arg, "\n");
+
+ erts_print(to, to_arg, "Current Port: ");
+ if (esdp->current_port)
+ erts_print(to, to_arg, "%T", esdp->current_port->common.id);
+ erts_print(to, to_arg, "\n");
+
+ for (i = 0; i < ERTS_NO_PROC_PRIO_LEVELS; i++) {
+ erts_print(to, to_arg, "Run Queue ");
+ switch (i) {
+ case PRIORITY_MAX:
+ erts_print(to, to_arg, "Max ");
+ break;
+ case PRIORITY_HIGH:
+ erts_print(to, to_arg, "High ");
+ break;
+ case PRIORITY_NORMAL:
+ erts_print(to, to_arg, "Normal ");
+ break;
+ case PRIORITY_LOW:
+ erts_print(to, to_arg, "Low ");
+ break;
+ default:
+ erts_print(to, to_arg, "Unknown ");
+ break;
+ }
+ erts_print(to, to_arg, "Length: %d\n",
+ erts_atomic32_read_dirty(&esdp->run_queue->procs.prio_info[i].len));
+ }
+ erts_print(to, to_arg, "Run Queue Port Length: %d\n",
+ erts_atomic32_read_dirty(&esdp->run_queue->ports.info.len));
+
+ flg = erts_atomic32_read_dirty(&esdp->run_queue->flags);
+ erts_print(to, to_arg, "Run Queue Flags: ");
+ for (i = 0; i < ERTS_RUNQ_FLG_MAX && flg; i++) {
+ erts_aint32_t chk = (1 << i);
+ if (flg & chk) {
+ switch (chk) {
+ case (1 << PRIORITY_MAX):
+ erts_print(to, to_arg, "NONEMPTY_MAX"); break;
+ case (1 << PRIORITY_HIGH):
+ erts_print(to, to_arg, "NONEMPTY_HIGH"); break;
+ case (1 << PRIORITY_NORMAL):
+ erts_print(to, to_arg, "NONEMPTY_NORMAL"); break;
+ case (1 << PRIORITY_LOW):
+ erts_print(to, to_arg, "NONEMPTY_LOW"); break;
+ case (1 << (PRIORITY_MAX + ERTS_RUNQ_FLGS_EMIGRATE_SHFT)):
+ erts_print(to, to_arg, "EMIGRATE_MAX"); break;
+ case (1 << (PRIORITY_HIGH + ERTS_RUNQ_FLGS_EMIGRATE_SHFT)):
+ erts_print(to, to_arg, "EMIGRATE_HIGH"); break;
+ case (1 << (PRIORITY_NORMAL + ERTS_RUNQ_FLGS_EMIGRATE_SHFT)):
+ erts_print(to, to_arg, "EMIGRATE_NORMAL"); break;
+ case (1 << (PRIORITY_LOW + ERTS_RUNQ_FLGS_EMIGRATE_SHFT)):
+ erts_print(to, to_arg, "EMIGRATE_LOW"); break;
+ case (1 << (PRIORITY_MAX + ERTS_RUNQ_FLGS_IMMIGRATE_SHFT)):
+ erts_print(to, to_arg, "IMMIGRATE_MAX"); break;
+ case (1 << (PRIORITY_HIGH + ERTS_RUNQ_FLGS_IMMIGRATE_SHFT)):
+ erts_print(to, to_arg, "IMMIGRATE_HIGH"); break;
+ case (1 << (PRIORITY_NORMAL + ERTS_RUNQ_FLGS_IMMIGRATE_SHFT)):
+ erts_print(to, to_arg, "IMMIGRATE_NORMAL"); break;
+ case (1 << (PRIORITY_LOW + ERTS_RUNQ_FLGS_IMMIGRATE_SHFT)):
+ erts_print(to, to_arg, "IMMIGRATE_LOW"); break;
+ case (1 << (PRIORITY_MAX + ERTS_RUNQ_FLGS_EVACUATE_SHFT)):
+ erts_print(to, to_arg, "EVACUATE_MAX"); break;
+ case (1 << (PRIORITY_HIGH + ERTS_RUNQ_FLGS_EVACUATE_SHFT)):
+ erts_print(to, to_arg, "EVACUATE_HIGH"); break;
+ case (1 << (PRIORITY_NORMAL + ERTS_RUNQ_FLGS_EVACUATE_SHFT)):
+ erts_print(to, to_arg, "EVACUATE_NORMAL"); break;
+ case (1 << (PRIORITY_LOW + ERTS_RUNQ_FLGS_EVACUATE_SHFT)):
+ erts_print(to, to_arg, "EVACUATE_LOW"); break;
+ case ERTS_RUNQ_FLG_OUT_OF_WORK:
+ erts_print(to, to_arg, "OUT_OF_WORK"); break;
+ case ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK:
+ erts_print(to, to_arg, "HALFTIME_OUT_OF_WORK"); break;
+ case ERTS_RUNQ_FLG_SUSPENDED:
+ erts_print(to, to_arg, "SUSPENDED"); break;
+ case ERTS_RUNQ_FLG_CHK_CPU_BIND:
+ erts_print(to, to_arg, "CHK_CPU_BIND"); break;
+ case ERTS_RUNQ_FLG_INACTIVE:
+ erts_print(to, to_arg, "INACTIVE"); break;
+ case ERTS_RUNQ_FLG_NONEMPTY:
+ erts_print(to, to_arg, "NONEMPTY"); break;
+ case ERTS_RUNQ_FLG_PROTECTED:
+ erts_print(to, to_arg, "PROTECTED"); break;
+ case ERTS_RUNQ_FLG_EXEC:
+ erts_print(to, to_arg, "EXEC"); break;
+ case ERTS_RUNQ_FLG_MSB_EXEC:
+ erts_print(to, to_arg, "MSB_EXEC"); break;
+ case ERTS_RUNQ_FLG_MISC_OP:
+ erts_print(to, to_arg, "MISC_OP"); break;
+ default:
+ erts_print(to, to_arg, "UNKNOWN(%d)", flg); break;
+ }
+ if (flg > chk)
+ erts_print(to, to_arg, " | ");
+ flg -= chk;
+ }
+ }
+ erts_print(to, to_arg, "\n");
+
+ /* This *MUST* to be the last information in scheduler block */
+ p = esdp->current_process;
+ erts_print(to, to_arg, "Current Process: ");
+ if (esdp->current_process && !(ERTS_TRACE_FLAGS(p) & F_SENSITIVE)) {
+ flg = erts_atomic32_read_dirty(&p->state);
+ erts_print(to, to_arg, "%T\n", p->common.id);
+
+ erts_print(to, to_arg, "Current Process State: ");
+ erts_dump_process_state(to, to_arg, flg);
+
+ erts_print(to, to_arg, "Current Process Internal State: ");
+ erts_dump_extended_process_state(to, to_arg, flg);
+
+ erts_print(to, to_arg, "Current Process Program counter: %p (", p->i);
+ print_function_from_pc(to, to_arg, p->i);
+ erts_print(to, to_arg, ")\n");
+ erts_print(to, to_arg, "Current Process CP: %p (", p->cp);
+ print_function_from_pc(to, to_arg, p->cp);
+ erts_print(to, to_arg, ")\n");
+
+ /* Getting this stacktrace can segfault if we are very very
+ unlucky if called while a process is being garbage collected.
+ Therefore we only call this on other schedulers if we either
+ have protection against segfaults, or we know that the process
+ is not garbage collecting. It *should* always be safe to call
+ on a process owned by us, even if it is currently being garbage
+ collected.
+ */
+ erts_print(to, to_arg, "Current Process Limited Stack Trace:\n");
+ erts_limited_stack_trace(to, to_arg, p);
+ } else
+ erts_print(to, to_arg, "\n");
+
+}
+
+/*
* A nice system halt closing all open port goes as follows:
* 1) This function schedules the aux work ERTS_SSI_AUX_WORK_REAP_PORTS
* on all schedulers, then schedules itself out.
- * 2) All shedulers detect this and set the flag halt_in_progress
+ * 2) All shedulers detect this and set the flag ERTS_RUNQ_FLG_HALTING
* on their run queue. The last scheduler sets all non-closed ports
* ERTS_PORT_SFLG_HALT. Global atomic erts_halt_progress is used
* as refcount to determine which is last.
- * 3) While the run ques has flag halt_in_progress no processes
+ * 3) While the run queues has flag ERTS_RUNQ_FLG_HALTING no processes
* will be scheduled, only ports.
* 4) When the last port closes that scheduler calls erlang:halt/1.
* The same global atomic is used as refcount.
*
* A BIF that calls this should make sure to schedule out to never come back:
- * erl_halt((int)(- code));
+ * erts_halt(code);
* ERTS_BIF_YIELD1(bif_export[BIF_erlang_halt_1], BIF_P, NIL);
*/
-void erl_halt(int code)
+void erts_halt(int code)
{
- if (-1 == erts_smp_atomic32_cmpxchg_acqb(&erts_halt_progress,
+ if (-1 == erts_atomic32_cmpxchg_acqb(&erts_halt_progress,
erts_no_schedulers,
-1)) {
-#ifdef ERTS_DIRTY_SCHEDULERS
- ERTS_DIRTY_CPU_RUNQ->halt_in_progress = 1;
- ERTS_DIRTY_IO_RUNQ->halt_in_progress = 1;
-#endif
+ ERTS_RUNQ_FLGS_SET(ERTS_DIRTY_CPU_RUNQ, ERTS_RUNQ_FLG_HALTING);
+ ERTS_RUNQ_FLGS_SET(ERTS_DIRTY_IO_RUNQ, ERTS_RUNQ_FLG_HALTING);
erts_halt_code = code;
notify_reap_ports_relb();
}
}
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
int
erts_dbg_check_halloc_lock(Process *p)
{
+ ErtsSchedulerData *esdp;
if (ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p))
return 1;
+ if ((p->static_flags & ERTS_STC_FLG_SHADOW_PROC)
+ && ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()))
+ return 1;
if (p->common.id == ERTS_INVALID_PID)
return 1;
- if (p->scheduler_data && p == p->scheduler_data->match_pseudo_process)
+ esdp = erts_proc_sched_data(p);
+ if (esdp && p == esdp->match_pseudo_process)
return 1;
if (erts_thr_progress_is_blocking())
return 1;
return 0;
}
#endif
+
+void
+erts_debug_later_op_foreach(void (*callback)(void*),
+ void (*func)(void *, ErtsThrPrgrVal, void *),
+ void *arg)
+{
+ int six;
+ if (!erts_thr_progress_is_blocking())
+ ERTS_INTERNAL_ERROR("Not blocking thread progress");
+
+ for (six = 0; six < erts_no_schedulers; six++) {
+ ErtsSchedulerData *esdp = &erts_aligned_scheduler_data[six].esd;
+ ErtsThrPrgrLaterOp *lop = esdp->aux_work_data.later_op.first;
+
+ while (lop) {
+ if (lop->func == callback)
+ func(arg, lop->later, lop->data);
+ lop = lop->next;
+ }
+ }
+}
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index ed6dadbffa..66d7848f89 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2014. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -20,6 +21,8 @@
#ifndef __PROCESS_H__
#define __PROCESS_H__
+#include "sys.h"
+
#undef ERTS_INCLUDE_SCHEDULER_INTERNALS
#if (defined(ERL_PROCESS_C__) \
|| defined(ERL_PORT_TASK_C__) \
@@ -36,8 +39,6 @@
typedef struct process Process;
-#include "sys.h"
-
#define ERTS_PROCESS_LOCK_ONLY_PROC_LOCK_TYPE__
#include "erl_process_lock.h" /* Only pull out important types... */
#undef ERTS_PROCESS_LOCK_ONLY_PROC_LOCK_TYPE__
@@ -46,18 +47,24 @@ typedef struct process Process;
#include "erl_port.h"
#undef ERL_PORT_GET_PORT_TYPE_ONLY__
#include "erl_vm.h"
-#include "erl_smp.h"
#include "erl_message.h"
#include "erl_process_dict.h"
#include "erl_node_container_utils.h"
#include "erl_node_tables.h"
#include "erl_monitors.h"
-#include "erl_bif_timer.h"
+#include "erl_hl_timer.h"
#include "erl_time.h"
#include "erl_atom_table.h"
#include "external.h"
#include "erl_mseg.h"
#include "erl_async.h"
+#include "erl_gc.h"
+#define ERTS_ONLY_INCLUDE_TRACE_FLAGS
+#include "erl_trace.h"
+#undef ERTS_ONLY_INCLUDE_TRACE_FLAGS
+#define ERTS_ONLY_SCHED_SPEC_ETS_DATA
+#include "erl_db.h"
+#undef ERTS_ONLY_SCHED_SPEC_ETS_DATA
#ifdef HIPE
#include "hipe_process.h"
@@ -74,10 +81,8 @@ struct ErtsNodesMonitor_;
#define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT 0
#define ERTS_MAX_NO_OF_SCHEDULERS 1024
-#ifdef ERTS_DIRTY_SCHEDULERS
#define ERTS_MAX_NO_OF_DIRTY_CPU_SCHEDULERS ERTS_MAX_NO_OF_SCHEDULERS
#define ERTS_MAX_NO_OF_DIRTY_IO_SCHEDULERS ERTS_MAX_NO_OF_SCHEDULERS
-#endif
#define ERTS_DEFAULT_MAX_PROCESSES (1 << 18)
@@ -90,10 +95,6 @@ struct ErtsNodesMonitor_;
#define ERTS_HEAP_FREE(Type, Ptr, Size) \
erts_free((Type), (Ptr))
-#define INITIAL_MOD 0
-#define INITIAL_FUN 1
-#define INITIAL_ARI 2
-
#include "export.h"
struct saved_calls {
@@ -107,18 +108,17 @@ extern Export exp_send, exp_receive, exp_timeout;
extern int erts_sched_compact_load;
extern int erts_sched_balance_util;
extern Uint erts_no_schedulers;
-#ifdef ERTS_DIRTY_SCHEDULERS
+extern Uint erts_no_total_schedulers;
extern Uint erts_no_dirty_cpu_schedulers;
extern Uint erts_no_dirty_io_schedulers;
-#endif
extern Uint erts_no_run_queues;
extern int erts_sched_thread_suggested_stack_size;
-#define ERTS_SCHED_THREAD_MIN_STACK_SIZE 4 /* Kilo words */
+extern int erts_dcpu_sched_thread_suggested_stack_size;
+extern int erts_dio_sched_thread_suggested_stack_size;
+#define ERTS_SCHED_THREAD_MIN_STACK_SIZE 20 /* Kilo words */
#define ERTS_SCHED_THREAD_MAX_STACK_SIZE 8192 /* Kilo words */
-#ifdef ERTS_SMP
#include "erl_bits.h"
-#endif
/* process priorities */
#define PRIORITY_MAX 0
@@ -167,6 +167,16 @@ extern int erts_sched_thread_suggested_stack_size;
(((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 5))
#define ERTS_RUNQ_FLG_PROTECTED \
(((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 6))
+#define ERTS_RUNQ_FLG_EXEC \
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 7))
+#define ERTS_RUNQ_FLG_MSB_EXEC \
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 8))
+#define ERTS_RUNQ_FLG_MISC_OP \
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 9))
+#define ERTS_RUNQ_FLG_HALTING \
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 10))
+
+#define ERTS_RUNQ_FLG_MAX (ERTS_RUNQ_FLG_BASE2 + 11)
#define ERTS_RUNQ_FLGS_MIGRATION_QMASKS \
(ERTS_RUNQ_FLGS_EMIGRATE_QMASK \
@@ -206,31 +216,39 @@ extern int erts_sched_thread_suggested_stack_size;
((FLGS) &= ~ERTS_RUNQ_FLG_EVACUATE((PRIO)))
#define ERTS_RUNQ_FLGS_INIT(RQ, INIT) \
- erts_smp_atomic32_init_nob(&(RQ)->flags, (erts_aint32_t) (INIT))
+ erts_atomic32_init_nob(&(RQ)->flags, (erts_aint32_t) (INIT))
#define ERTS_RUNQ_FLGS_SET(RQ, FLGS) \
- ((Uint32) erts_smp_atomic32_read_bor_relb(&(RQ)->flags, \
+ ((Uint32) erts_atomic32_read_bor_relb(&(RQ)->flags, \
(erts_aint32_t) (FLGS)))
+#define ERTS_RUNQ_FLGS_SET_NOB(RQ, FLGS) \
+ ((Uint32) erts_atomic32_read_bor_nob(&(RQ)->flags, \
+ (erts_aint32_t) (FLGS)))
#define ERTS_RUNQ_FLGS_BSET(RQ, MSK, FLGS) \
- ((Uint32) erts_smp_atomic32_read_bset_relb(&(RQ)->flags, \
+ ((Uint32) erts_atomic32_read_bset_relb(&(RQ)->flags, \
(erts_aint32_t) (MSK), \
(erts_aint32_t) (FLGS)))
#define ERTS_RUNQ_FLGS_UNSET(RQ, FLGS) \
- ((Uint32) erts_smp_atomic32_read_band_relb(&(RQ)->flags, \
+ ((Uint32) erts_atomic32_read_band_relb(&(RQ)->flags, \
(erts_aint32_t) ~(FLGS)))
+#define ERTS_RUNQ_FLGS_UNSET_NOB(RQ, FLGS) \
+ ((Uint32) erts_atomic32_read_band_nob(&(RQ)->flags, \
+ (erts_aint32_t) ~(FLGS)))
#define ERTS_RUNQ_FLGS_GET(RQ) \
- ((Uint32) erts_smp_atomic32_read_acqb(&(RQ)->flags))
+ ((Uint32) erts_atomic32_read_acqb(&(RQ)->flags))
#define ERTS_RUNQ_FLGS_GET_NOB(RQ) \
- ((Uint32) erts_smp_atomic32_read_nob(&(RQ)->flags))
+ ((Uint32) erts_atomic32_read_nob(&(RQ)->flags))
#define ERTS_RUNQ_FLGS_GET_MB(RQ) \
- ((Uint32) erts_smp_atomic32_read_mb(&(RQ)->flags))
+ ((Uint32) erts_atomic32_read_mb(&(RQ)->flags))
#define ERTS_RUNQ_FLGS_READ_BSET(RQ, MSK, FLGS) \
- ((Uint32) erts_smp_atomic32_read_bset_relb(&(RQ)->flags, \
+ ((Uint32) erts_atomic32_read_bset_relb(&(RQ)->flags, \
(erts_aint32_t) (MSK), \
(erts_aint32_t) (FLGS)))
typedef enum {
ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED,
+ ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED,
ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED,
+ ERTS_SCHDLR_SSPND_YIELD_DONE_NMSCHED_BLOCKED,
ERTS_SCHDLR_SSPND_DONE,
ERTS_SCHDLR_SSPND_YIELD_RESTART,
ERTS_SCHDLR_SSPND_YIELD_DONE,
@@ -249,6 +267,9 @@ typedef enum {
#define ERTS_SSI_FLG_TSE_SLEEPING (((erts_aint32_t) 1) << 2)
#define ERTS_SSI_FLG_WAITING (((erts_aint32_t) 1) << 3)
#define ERTS_SSI_FLG_SUSPENDED (((erts_aint32_t) 1) << 4)
+#define ERTS_SSI_FLG_MSB_EXEC (((erts_aint32_t) 1) << 5)
+
+#define ERTS_SSI_FLGS_MAX 6
#define ERTS_SSI_FLGS_SLEEP_TYPE \
(ERTS_SSI_FLG_TSE_SLEEPING|ERTS_SSI_FLG_POLL_SLEEPING)
@@ -259,44 +280,95 @@ typedef enum {
#define ERTS_SSI_FLGS_ALL \
(ERTS_SSI_FLGS_SLEEP \
| ERTS_SSI_FLG_WAITING \
- | ERTS_SSI_FLG_SUSPENDED)
+ | ERTS_SSI_FLG_SUSPENDED \
+ | ERTS_SSI_FLG_MSB_EXEC)
/*
- * Keep ERTS_SSI_AUX_WORK flags in expected frequency order relative
- * eachother. Most frequent - lowest bit number.
+ * Keep ERTS_SSI_AUX_WORK flags ordered in expected frequency
+ * order relative eachother. Most frequent at lowest at lowest
+ * index.
+ *
+ * ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED_IX *need* to be
+ * highest index...
+ *
+ * Remember to update description in erts_pre_init_process()
+ * when adding new flags...
*/
-#define ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP (((erts_aint32_t) 1) << 0)
-#define ERTS_SSI_AUX_WORK_DD (((erts_aint32_t) 1) << 1)
-#define ERTS_SSI_AUX_WORK_DD_THR_PRGR (((erts_aint32_t) 1) << 2)
-#define ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC (((erts_aint32_t) 1) << 3)
-#define ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM (((erts_aint32_t) 1) << 4)
-#define ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP (((erts_aint32_t) 1) << 5)
-#define ERTS_SSI_AUX_WORK_ASYNC_READY (((erts_aint32_t) 1) << 6)
-#define ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN (((erts_aint32_t) 1) << 7)
-#define ERTS_SSI_AUX_WORK_MISC_THR_PRGR (((erts_aint32_t) 1) << 8)
-#define ERTS_SSI_AUX_WORK_MISC (((erts_aint32_t) 1) << 9)
-#define ERTS_SSI_AUX_WORK_CHECK_CHILDREN (((erts_aint32_t) 1) << 10)
-#define ERTS_SSI_AUX_WORK_SET_TMO (((erts_aint32_t) 1) << 11)
-#define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK (((erts_aint32_t) 1) << 12)
-#define ERTS_SSI_AUX_WORK_REAP_PORTS (((erts_aint32_t) 1) << 13)
+typedef enum {
+ ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP_IX,
+ ERTS_SSI_AUX_WORK_DD_IX,
+ ERTS_SSI_AUX_WORK_DD_THR_PRGR_IX,
+ ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC_IX,
+ ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM_IX,
+ ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP_IX,
+ ERTS_SSI_AUX_WORK_CNCLD_TMRS_IX,
+ ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR_IX,
+ ERTS_SSI_AUX_WORK_ASYNC_READY_IX,
+ ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN_IX,
+ ERTS_SSI_AUX_WORK_MISC_THR_PRGR_IX,
+ ERTS_SSI_AUX_WORK_MISC_IX,
+ ERTS_SSI_AUX_WORK_PENDING_EXITERS_IX,
+ ERTS_SSI_AUX_WORK_SET_TMO_IX,
+ ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX,
+ ERTS_SSI_AUX_WORK_YIELD_IX,
+ ERTS_SSI_AUX_WORK_REAP_PORTS_IX,
+ ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED_IX, /* SHOULD be last flag index */
+
+ ERTS_SSI_AUX_WORK_NO_FLAGS /* Not a flag index... */
+} ErtsSsiAuxWorkFlagIndex;
+
+#define ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP_IX)
+#define ERTS_SSI_AUX_WORK_DD \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_DD_IX)
+#define ERTS_SSI_AUX_WORK_DD_THR_PRGR \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_DD_THR_PRGR_IX)
+#define ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC_IX)
+#define ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM_IX)
+#define ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP_IX)
+#define ERTS_SSI_AUX_WORK_CNCLD_TMRS \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_CNCLD_TMRS_IX)
+#define ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR_IX)
+#define ERTS_SSI_AUX_WORK_ASYNC_READY \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_ASYNC_READY_IX)
+#define ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN_IX)
+#define ERTS_SSI_AUX_WORK_MISC_THR_PRGR \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_MISC_THR_PRGR_IX)
+#define ERTS_SSI_AUX_WORK_MISC \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_MISC_IX)
+#define ERTS_SSI_AUX_WORK_PENDING_EXITERS \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_PENDING_EXITERS_IX)
+#define ERTS_SSI_AUX_WORK_SET_TMO \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_SET_TMO_IX)
+#define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX)
+#define ERTS_SSI_AUX_WORK_YIELD \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_YIELD_IX)
+#define ERTS_SSI_AUX_WORK_REAP_PORTS \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_REAP_PORTS_IX)
+#define ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED_IX)
typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo;
-#ifdef ERTS_DIRTY_SCHEDULERS
typedef struct {
- erts_smp_spinlock_t lock;
+ erts_spinlock_t lock;
ErtsSchedulerSleepInfo *list;
} ErtsSchedulerSleepList;
-#endif
struct ErtsSchedulerSleepInfo_ {
-#ifdef ERTS_SMP
+ struct ErtsSchedulerData_ *esdp;
ErtsSchedulerSleepInfo *next;
ErtsSchedulerSleepInfo *prev;
- erts_smp_atomic32_t flags;
+ erts_atomic32_t flags;
erts_tse_t *event;
-#endif
+ struct erts_poll_thread *psi;
erts_atomic32_t aux_work;
};
@@ -329,34 +401,33 @@ typedef struct {
Process* last;
} ErtsRunPrioQueue;
+typedef enum {
+ ERTS_SCHED_NORMAL,
+ ERTS_SCHED_DIRTY_CPU,
+ ERTS_SCHED_DIRTY_IO
+} ErtsSchedType;
+
typedef struct ErtsSchedulerData_ ErtsSchedulerData;
typedef struct ErtsRunQueue_ ErtsRunQueue;
typedef struct {
- erts_smp_atomic32_t len;
+ erts_atomic32_t len;
erts_aint32_t max_len;
int reds;
} ErtsRunQueueInfo;
-#ifdef HAVE_GETHRTIME
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
# undef ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT
# define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT 1
#endif
-#ifdef ERTS_SMP
#undef ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
#define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT
-#ifdef ARCH_64
-typedef erts_atomic_t ErtsAtomicSchedTime;
-#elif defined(ARCH_32)
-typedef erts_dw_atomic_t ErtsAtomicSchedTime;
-#else
-# error :-/
-#endif
+typedef erts_atomic64_t ErtsAtomicSchedTime;
#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
typedef struct {
@@ -394,33 +465,27 @@ struct ErtsMigrationPaths_ {
ErtsMigrationPath mpath[1];
};
-#endif /* ERTS_SMP */
struct ErtsRunQueue_ {
int ix;
- erts_smp_mtx_t mtx;
- erts_smp_cnd_t cnd;
+ erts_mtx_t mtx;
+ erts_cnd_t cnd;
-#ifdef ERTS_DIRTY_SCHEDULERS
-#ifdef ERTS_SMP
ErtsSchedulerSleepList sleepers;
-#endif
-#endif
ErtsSchedulerData *scheduler;
int waiting; /* < 0 in sys schedule; > 0 on cnd variable */
int woken;
- erts_smp_atomic32_t flags;
+ erts_atomic32_t flags;
int check_balance_reds;
int full_reds_history_sum;
int full_reds_history[ERTS_FULL_REDS_HISTORY_SIZE];
int out_of_work_count;
erts_aint32_t max_len;
- erts_aint32_t len;
+ erts_atomic32_t len;
int wakeup_other;
int wakeup_other_reds;
- int halt_in_progress;
struct {
ErtsProcList *pending_exiters;
@@ -437,7 +502,7 @@ struct ErtsRunQueue_ {
struct {
ErtsMiscOpList *start;
ErtsMiscOpList *end;
- erts_smp_atomic_t evac_runq;
+ erts_atomic_t evac_runq;
} misc;
struct {
@@ -450,9 +515,7 @@ struct ErtsRunQueue_ {
#endif
};
-#ifdef ERTS_SMP
extern long erts_runq_supervision_interval;
-#endif
typedef union {
ErtsRunQueue runq;
@@ -461,38 +524,37 @@ typedef union {
extern ErtsAlignedRunQueue *erts_aligned_run_queues;
-#define ERTS_PROC_REDUCTIONS_EXECUTED(RQ, PRIO, REDS, AREDS) \
+#define ERTS_PROC_REDUCTIONS_EXECUTED(SD, RQ, PRIO, REDS, AREDS)\
do { \
(RQ)->procs.reductions += (AREDS); \
(RQ)->procs.prio_info[(PRIO)].reds += (REDS); \
(RQ)->check_balance_reds -= (REDS); \
(RQ)->wakeup_other_reds += (AREDS); \
+ (SD)->check_time_reds += (AREDS); \
} while (0)
-#define ERTS_PORT_REDUCTIONS_EXECUTED(RQ, REDS) \
+#define ERTS_PORT_REDUCTIONS_EXECUTED(SD, RQ, REDS) \
do { \
(RQ)->ports.info.reds += (REDS); \
(RQ)->check_balance_reds -= (REDS); \
(RQ)->wakeup_other_reds += (REDS); \
+ (SD)->check_time_reds += (REDS); \
} while (0)
typedef struct {
- int need; /* "+sbu true" or scheduler_wall_time enabled */
+ union {
+ erts_atomic32_t mod; /* on dirty schedulers */
+ int need; /* "+sbu true" or scheduler_wall_time enabled */
+ } u;
int enabled;
Uint64 start;
struct {
Uint64 total;
Uint64 start;
- int currently;
} working;
} ErtsSchedWallTime;
typedef struct {
- Uint64 reclaimed;
- Uint64 garbage_cols;
-} ErtsGCInfo;
-
-typedef struct {
int sched;
erts_aint32_t aux_work;
} ErtsDelayedAuxWorkWakeupJob;
@@ -501,62 +563,57 @@ typedef struct {
int sched_id;
ErtsSchedulerData *esdp;
ErtsSchedulerSleepInfo *ssi;
-#ifdef ERTS_SMP
ErtsThrPrgrVal current_thr_prgr;
ErtsThrPrgrVal latest_wakeup;
-#endif
struct {
int ix;
-#ifdef ERTS_SMP
ErtsThrPrgrVal thr_prgr;
-#endif
} misc;
-#ifdef ERTS_SMP
struct {
ErtsThrPrgrVal thr_prgr;
- void (*completed_callback)(void *);
- void (*completed_arg)(void *);
} dd;
struct {
ErtsThrPrgrVal thr_prgr;
+ } cncld_tmrs;
+ struct {
+ ErtsThrPrgrVal thr_prgr;
UWord size;
ErtsThrPrgrLaterOp *first;
ErtsThrPrgrLaterOp *last;
} later_op;
-#endif
-#ifdef ERTS_USE_ASYNC_READY_Q
struct {
-#ifdef ERTS_SMP
int need_thr_prgr;
ErtsThrPrgrVal thr_prgr;
-#endif
void *queue;
} async_ready;
-#endif
-#ifdef ERTS_SMP
struct {
Uint64 next;
int *sched2jix;
int jix;
ErtsDelayedAuxWorkWakeupJob *job;
} delayed_wakeup;
-#endif
+ struct {
+ ErtsEtsAllYieldData ets_all;
+ /* Other yielding operations... */
+ } yield;
+ struct {
+ struct {
+ erts_aint32_t flags;
+ void (*callback)(void *);
+ void *arg;
+ } wait_completed;
+ } debug;
} ErtsAuxWorkData;
-#ifdef ERTS_DIRTY_SCHEDULERS
+#define ERTS_SCHED_AUX_YIELD_DATA(ESDP, NAME) \
+ (&(ESDP)->aux_work_data.yield.NAME)
+void erts_notify_new_aux_yield_work(ErtsSchedulerData *esdp);
+
typedef enum {
ERTS_DIRTY_CPU_SCHEDULER,
ERTS_DIRTY_IO_SCHEDULER
} ErtsDirtySchedulerType;
-typedef union {
- struct {
- ErtsDirtySchedulerType type: 1;
- unsigned num: 31;
- } s;
- Uint no;
-} ErtsDirtySchedId;
-#endif
struct ErtsSchedulerData_ {
/*
@@ -567,25 +624,20 @@ struct ErtsSchedulerData_ {
Eterm* x_reg_array; /* X registers */
FloatDef* f_reg_array; /* Floating point registers. */
-#ifdef ERTS_SMP
+ ErtsTimerWheel *timer_wheel;
+ ErtsNextTimeoutRef next_tmo_ref;
+ ErtsHLTimerService *timer_service;
ethr_tid tid; /* Thread id */
struct erl_bits_state erl_bits_state; /* erl_bits.c state */
void *match_pseudo_process; /* erl_db_util.c:db_prog_match() */
Process *free_process;
ErtsThrPrgrData thr_progress_data;
-#endif
-#if !HEAP_ON_C_STACK
- Eterm tmp_heap[TMP_HEAP_SIZE];
- int num_tmp_heap_used;
- Eterm beam_emu_tmp_heap[BEAM_EMU_TMP_HEAP_SIZE];
- Eterm erl_arith_tmp_heap[ERL_ARITH_TMP_HEAP_SIZE];
-#endif
ErtsSchedulerSleepInfo *ssi;
Process *current_process;
+ ErtsSchedType type;
Uint no; /* Scheduler number for normal schedulers */
-#ifdef ERTS_DIRTY_SCHEDULERS
- ErtsDirtySchedId dirty_no; /* Scheduler number for dirty schedulers */
-#endif
+ Uint dirty_no; /* Scheduler number for dirty schedulers */
+ Process *dirty_shadow_process;
Port *current_port;
ErtsRunQueue *run_queue;
int virtual_reds;
@@ -593,13 +645,25 @@ struct ErtsSchedulerData_ {
ErtsAuxWorkData aux_work_data;
ErtsAtomCacheMap atom_cache_map;
+ ErtsMonotonicTime last_monotonic_time;
+ int check_time_reds;
+
+ Uint32 thr_id;
+ Uint64 unique;
+ Uint64 ref;
+
ErtsSchedAllocData alloc_data;
+ struct {
+ Uint64 out;
+ Uint64 in;
+ } io;
+
Uint64 reductions;
ErtsSchedWallTime sched_wall_time;
ErtsGCInfo gc_info;
ErtsPortTaskHandle nosuspend_port_task_handle;
-
+ ErtsEtsTables ets_tables;
#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
erts_alloc_verify_func_t verify_unused_temp_alloc;
Allctr_t *verify_unused_temp_alloc_data;
@@ -612,32 +676,23 @@ typedef union {
} ErtsAlignedSchedulerData;
extern ErtsAlignedSchedulerData *erts_aligned_scheduler_data;
-#ifdef ERTS_DIRTY_SCHEDULERS
extern ErtsAlignedSchedulerData *erts_aligned_dirty_cpu_scheduler_data;
extern ErtsAlignedSchedulerData *erts_aligned_dirty_io_scheduler_data;
-#endif
-#ifndef ERTS_SMP
-extern ErtsSchedulerData *erts_scheduler_data;
-#endif
-#ifdef ERTS_SCHED_FAIR
-#define ERTS_SCHED_FAIR_YIELD() ETHR_YIELD()
-#else
-#define ERTS_SCHED_FAIR 0
-#define ERTS_SCHED_FAIR_YIELD()
+#if defined(ERTS_ENABLE_LOCK_CHECK)
+int erts_lc_runq_is_locked(ErtsRunQueue *);
#endif
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
-int erts_smp_lc_runq_is_locked(ErtsRunQueue *);
-#endif
+void
+erts_debug_later_op_foreach(void (*callback)(void*),
+ void (*func)(void *, ErtsThrPrgrVal, void *),
+ void *arg);
#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
-#ifdef ERTS_SMP
void erts_empty_runq(ErtsRunQueue *rq);
void erts_non_empty_runq(ErtsRunQueue *rq);
-#endif
/*
@@ -645,80 +700,84 @@ void erts_non_empty_runq(ErtsRunQueue *rq);
* other threads peek at values without run queue lock.
*/
-ERTS_GLB_INLINE void erts_smp_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio);
-ERTS_GLB_INLINE void erts_smp_dec_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio);
-ERTS_GLB_INLINE void erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi);
+ERTS_GLB_INLINE void erts_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio);
+ERTS_GLB_INLINE void erts_dec_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio);
+ERTS_GLB_INLINE void erts_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
-erts_smp_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio)
+erts_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio)
{
erts_aint32_t len;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
- len = erts_smp_atomic32_read_nob(&rqi->len);
+ len = erts_atomic32_read_dirty(&rq->len);
+
+ if (len == 0)
+ erts_non_empty_runq(rq);
+ len++;
+ if (rq->max_len < len)
+ rq->max_len = len;
+ ASSERT(len > 0);
+ erts_atomic32_set_nob(&rq->len, len);
+
+ len = erts_atomic32_read_dirty(&rqi->len);
ASSERT(len >= 0);
if (len == 0) {
- ASSERT((erts_smp_atomic32_read_nob(&rq->flags)
+ ASSERT((erts_atomic32_read_nob(&rq->flags)
& ((erts_aint32_t) (1 << prio))) == 0);
- erts_smp_atomic32_read_bor_nob(&rq->flags,
+ erts_atomic32_read_bor_nob(&rq->flags,
(erts_aint32_t) (1 << prio));
}
len++;
if (rqi->max_len < len)
rqi->max_len = len;
- erts_smp_atomic32_set_relb(&rqi->len, len);
-
-#ifdef ERTS_SMP
- if (rq->len == 0)
- erts_non_empty_runq(rq);
-#endif
- rq->len++;
- if (rq->max_len < rq->len)
- rq->max_len = len;
- ASSERT(rq->len > 0);
+ erts_atomic32_set_relb(&rqi->len, len);
}
ERTS_GLB_INLINE void
-erts_smp_dec_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio)
+erts_dec_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio)
{
erts_aint32_t len;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
+
+ len = erts_atomic32_read_dirty(&rq->len);
+ len--;
+ ASSERT(len >= 0);
+ erts_atomic32_set_nob(&rq->len, len);
- len = erts_smp_atomic32_read_nob(&rqi->len);
+ len = erts_atomic32_read_dirty(&rqi->len);
len--;
ASSERT(len >= 0);
if (len == 0) {
- ASSERT((erts_smp_atomic32_read_nob(&rq->flags)
+ ASSERT((erts_atomic32_read_nob(&rq->flags)
& ((erts_aint32_t) (1 << prio))));
- erts_smp_atomic32_read_band_nob(&rq->flags,
+ erts_atomic32_read_band_nob(&rq->flags,
~((erts_aint32_t) (1 << prio)));
}
- erts_smp_atomic32_set_relb(&rqi->len, len);
+ erts_atomic32_set_relb(&rqi->len, len);
- rq->len--;
- ASSERT(rq->len >= 0);
}
ERTS_GLB_INLINE void
-erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi)
+erts_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi)
{
erts_aint32_t len;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
- len = erts_smp_atomic32_read_nob(&rqi->len);
+ len = erts_atomic32_read_dirty(&rqi->len);
ASSERT(rqi->max_len >= len);
rqi->max_len = len;
}
#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#define RUNQ_READ_LEN(X) erts_smp_atomic32_read_nob((X))
+#define RUNQ_READ_LEN(X) erts_atomic32_read_nob((X))
#endif /* ERTS_INCLUDE_SCHEDULER_INTERNALS */
@@ -731,15 +790,20 @@ erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi)
#define ERTS_PSD_ERROR_HANDLER 0
#define ERTS_PSD_SAVED_CALLS_BUF 1
#define ERTS_PSD_SCHED_ID 2
-#define ERTS_PSD_DIST_ENTRY 3
-#define ERTS_PSD_CALL_TIME_BP 4
-#define ERTS_PSD_DELAYED_GC_TASK_QS 5
-#ifdef ERTS_DIRTY_SCHEDULERS
-#define ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT 6
+#define ERTS_PSD_CALL_TIME_BP 3
+#define ERTS_PSD_DELAYED_GC_TASK_QS 4
+#define ERTS_PSD_NIF_TRAP_EXPORT 5
+#define ERTS_PSD_ETS_OWNED_TABLES 6
+#define ERTS_PSD_ETS_FIXED_TABLES 7
+#define ERTS_PSD_DIST_ENTRY 8
+#define ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF 9 /* keep last... */
-#define ERTS_PSD_SIZE 7
-#else
-#define ERTS_PSD_SIZE 6
+#define ERTS_PSD_SIZE 10
+
+#if !defined(HIPE)
+# undef ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF
+# undef ERTS_PSD_SIZE
+# define ERTS_PSD_SIZE 9
#endif
typedef struct {
@@ -752,25 +816,29 @@ typedef struct {
#define ERTS_PSD_ERROR_HANDLER_BUF_GET_LOCKS ERTS_PROC_LOCK_MAIN
#define ERTS_PSD_ERROR_HANDLER_BUF_SET_LOCKS ERTS_PROC_LOCK_MAIN
-#define ERTS_PSD_SAVED_CALLS_BUF_GET_LOCKS ERTS_PROC_LOCK_MAIN
-#define ERTS_PSD_SAVED_CALLS_BUF_SET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_SAVED_CALLS_BUF_GET_LOCKS ((ErtsProcLocks) 0)
+#define ERTS_PSD_SAVED_CALLS_BUF_SET_LOCKS ((ErtsProcLocks) 0)
#define ERTS_PSD_SCHED_ID_GET_LOCKS ERTS_PROC_LOCK_STATUS
#define ERTS_PSD_SCHED_ID_SET_LOCKS ERTS_PROC_LOCK_STATUS
-#define ERTS_PSD_DIST_ENTRY_GET_LOCKS ERTS_PROC_LOCK_MAIN
-#define ERTS_PSD_DIST_ENTRY_SET_LOCKS ERTS_PROC_LOCK_MAIN
-
#define ERTS_PSD_CALL_TIME_BP_GET_LOCKS ERTS_PROC_LOCK_MAIN
#define ERTS_PSD_CALL_TIME_BP_SET_LOCKS ERTS_PROC_LOCK_MAIN
#define ERTS_PSD_DELAYED_GC_TASK_QS_GET_LOCKS ERTS_PROC_LOCK_MAIN
#define ERTS_PSD_DELAYED_GC_TASK_QS_SET_LOCKS ERTS_PROC_LOCK_MAIN
-#ifdef ERTS_DIRTY_SCHEDULERS
-#define ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_GET_LOCKS ERTS_PROC_LOCK_MAIN
-#define ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_SET_LOCKS ERTS_PROC_LOCK_MAIN
-#endif
+#define ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS ERTS_PROC_LOCK_MAIN
+
+#define ERTS_PSD_ETS_OWNED_TABLES_GET_LOCKS ERTS_PROC_LOCK_STATUS
+#define ERTS_PSD_ETS_OWNED_TABLES_SET_LOCKS ERTS_PROC_LOCK_STATUS
+
+#define ERTS_PSD_ETS_FIXED_TABLES_GET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_ETS_FIXED_TABLES_SET_LOCKS ERTS_PROC_LOCK_MAIN
+
+#define ERTS_PSD_DIST_ENTRY_GET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_DIST_ENTRY_SET_LOCKS ERTS_PROC_LOCK_MAIN
typedef struct {
ErtsProcLocks get_locks;
@@ -786,7 +854,7 @@ extern ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE];
#define ERTS_SCHED_STAT_MODIFY_CLEAR 3
typedef struct {
- erts_smp_spinlock_t lock;
+ erts_spinlock_t lock;
int enabled;
struct {
Eterm name;
@@ -807,7 +875,6 @@ typedef struct {
typedef struct ErtsProcSysTask_ ErtsProcSysTask;
typedef struct ErtsProcSysTaskQs_ ErtsProcSysTaskQs;
-#ifdef ERTS_SMP
typedef struct ErtsPendingSuspend_ ErtsPendingSuspend;
struct ErtsPendingSuspend_ {
@@ -820,7 +887,6 @@ struct ErtsPendingSuspend_ {
Eterm pid);
};
-#endif
/* Defines to ease the change of memory architecture */
@@ -847,10 +913,18 @@ struct ErtsPendingSuspend_ {
# define MIN_VHEAP_SIZE(p) (p)->min_vheap_size
# define BIN_VHEAP_SZ(p) (p)->bin_vheap_sz
-# define BIN_VHEAP_MATURE(p) (p)->bin_vheap_mature
# define BIN_OLD_VHEAP_SZ(p) (p)->bin_old_vheap_sz
# define BIN_OLD_VHEAP(p) (p)->bin_old_vheap
+# define MAX_HEAP_SIZE_GET(p) ((p)->max_heap_size >> 2)
+# define MAX_HEAP_SIZE_SET(p, sz) ((p)->max_heap_size = ((sz) << 2) | \
+ MAX_HEAP_SIZE_FLAGS_GET(p))
+# define MAX_HEAP_SIZE_FLAGS_GET(p) ((p)->max_heap_size & 0x3)
+# define MAX_HEAP_SIZE_FLAGS_SET(p, flags) ((p)->max_heap_size = flags | \
+ ((p)->max_heap_size & ~0x3))
+# define MAX_HEAP_SIZE_KILL 1
+# define MAX_HEAP_SIZE_LOG 2
+
struct process {
ErtsPTabElementCommon common; /* *Need* to be first in struct */
@@ -865,9 +939,11 @@ struct process {
Eterm* stop; /* Stack top */
Eterm* heap; /* Heap start */
Eterm* hend; /* Heap end */
+ Eterm* abandoned_heap;
Uint heap_sz; /* Size of heap in words */
Uint min_heap_size; /* Minimum size of heap (in words). */
Uint min_vheap_size; /* Minimum size of virtual heap (in words). */
+ Uint max_heap_size; /* Maximum size of heap (in words). */
#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
volatile unsigned long fp_exception;
@@ -899,8 +975,7 @@ struct process {
Uint32 rcount; /* suspend count */
int schedule_count; /* Times left to reschedule a low prio process */
Uint reds; /* No of reductions for this process */
- Eterm group_leader; /* Pid in charge
- (can be boxed) */
+ Eterm group_leader; /* Pid in charge (can be boxed) */
Uint flags; /* Trap exit, etc (no trace flags anymore) */
Eterm fvalue; /* Exit & Throw value (failure reason) */
Uint freason; /* Reason for detected failure */
@@ -916,10 +991,7 @@ struct process {
ErlMessageQueue msg; /* Message queue */
- union {
- ErtsBifTimer *bif_timers; /* Bif timers aiming at this process */
- void *terminate;
- } u;
+ ErtsBifTimers *bif_timers; /* Bif timers aiming at this process */
ProcDict *dictionary; /* Process dictionary, may be NULL */
@@ -930,21 +1002,27 @@ struct process {
#ifdef USE_VM_PROBES
Eterm dt_utag; /* Place to store the dynamc trace user tag */
Uint dt_utag_flags; /* flag field for the dt_utag */
-#endif
- BeamInstr initial[3]; /* Initial module(0), function(1), arity(2), often used instead
- of pointer to funcinfo instruction, hence the BeamInstr datatype */
- BeamInstr* current; /* Current Erlang function, part of the funcinfo:
+#endif
+ union {
+ void *terminate;
+ ErtsCodeMFA initial; /* Initial module(0), function(1), arity(2),
+ often used instead of pointer to funcinfo
+ instruction. */
+ } u;
+ ErtsCodeMFA* current; /* Current Erlang function, part of the funcinfo:
* module(0), function(1), arity(2)
* (module and functions are tagged atoms;
- * arity an untagged integer). BeamInstr * because it references code
+ * arity an untagged integer).
*/
-
+
/*
* Information mainly for post-mortem use (erl crash dump).
*/
Eterm parent; /* Pid of process that created this process. */
erts_approx_time_t approx_started; /* Time when started. */
+ Uint32 static_flags; /* Flags that do *not* change */
+
/* This is the place, where all fields that differs between memory
* architectures, have gone to.
*/
@@ -956,35 +1034,38 @@ struct process {
Uint16 gen_gcs; /* Number of (minor) generational GCs. */
Uint16 max_gen_gcs; /* Max minor gen GCs before fullsweep. */
ErlOffHeap off_heap; /* Off-heap data updated by copy_struct(). */
- ErlHeapFragment* mbuf; /* Pointer to message buffer list */
- Uint mbuf_sz; /* Size of all message buffers */
- ErtsPSD *psd; /* Rarely used process specific data */
+ ErlHeapFragment* mbuf; /* Pointer to heap fragment list */
+ ErlHeapFragment* live_hf_end;
+ ErtsMessage *msg_frag; /* Pointer to message fragment list */
+ Uint mbuf_sz; /* Total size of heap fragments and message fragments */
+ erts_atomic_t psd; /* Rarely used process specific data */
Uint64 bin_vheap_sz; /* Virtual heap block size for binaries */
- Uint64 bin_vheap_mature; /* Virtual heap block size for binaries */
Uint64 bin_old_vheap_sz; /* Virtual old heap block size for binaries */
Uint64 bin_old_vheap; /* Virtual old heap size for binaries */
ErtsProcSysTaskQs *sys_task_qs;
+ ErtsProcSysTask *dirty_sys_tasks;
- erts_smp_atomic32_t state; /* Process state flags (see ERTS_PSFLG_*) */
+ erts_atomic32_t state; /* Process state flags (see ERTS_PSFLG_*) */
+ erts_atomic32_t dirty_state; /* Process dirty state flags (see ERTS_PDSFLG_*) */
-#ifdef ERTS_SMP
ErlMessageInQueue msg_inq;
+ ErlTraceMessageQueue *trace_msg_q;
ErtsPendExit pending_exit;
erts_proc_lock_t lock;
ErtsSchedulerData *scheduler_data;
Eterm suspendee;
ErtsPendingSuspend *pending_suspenders;
- erts_smp_atomic_t run_queue;
+ erts_atomic_t run_queue;
#ifdef HIPE
struct hipe_process_state_smp hipe_smp;
#endif
-#endif
#ifdef CHECK_FOR_HOLES
Eterm* last_htop; /* No need to scan the heap below this point. */
ErlHeapFragment* last_mbuf; /* No need to scan beyond this mbuf. */
+ ErlHeapFragment* heap_hfrag; /* Heap abandoned, htop now lives in this frag */
#endif
#ifdef DEBUG
@@ -999,6 +1080,10 @@ struct process {
Uint space_verified; /* Avoid HAlloc forcing heap fragments when */
Eterm* space_verified_from; /* we rely on available heap space (TestHeap) */
#endif
+
+#ifdef DEBUG
+ Uint debug_reds_in;
+#endif
};
extern const Process erts_invalid_process;
@@ -1008,6 +1093,7 @@ extern const Process erts_invalid_process;
do { \
(p)->last_htop = 0; \
(p)->last_mbuf = 0; \
+ (p)->heap_hfrag = NULL; \
} while (0)
# define ERTS_HOLE_CHECK(p) erts_check_for_holes((p))
@@ -1060,6 +1146,9 @@ void erts_check_for_holes(Process* p);
* USR_PRIO -> User prio. i.e., prio the user has set.
* PRQ_PRIO -> Prio queue prio, i.e., prio queue currently
* enqueued in.
+ *
+ * Update etp-proc-state-int in $ERL_TOP/erts/etc/unix/etp-commands.in
+ * when changing ERTS_PSFLG_*.
*/
#define ERTS_PSFLGS_ACT_PRIO_MASK \
(ERTS_PSFLGS_PRIO_MASK << ERTS_PSFLGS_ACT_PRIO_OFFSET)
@@ -1085,24 +1174,70 @@ void erts_check_for_holes(Process* p);
#define ERTS_PSFLG_RUNNING_SYS ERTS_PSFLG_BIT(15)
#define ERTS_PSFLG_PROXY ERTS_PSFLG_BIT(16)
#define ERTS_PSFLG_DELAYED_SYS ERTS_PSFLG_BIT(17)
-#ifdef ERTS_DIRTY_SCHEDULERS
-#define ERTS_PSFLG_DIRTY_CPU_PROC ERTS_PSFLG_BIT(18)
-#define ERTS_PSFLG_DIRTY_IO_PROC ERTS_PSFLG_BIT(19)
-#define ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q ERTS_PSFLG_BIT(20)
-#define ERTS_PSFLG_DIRTY_IO_PROC_IN_Q ERTS_PSFLG_BIT(21)
-#endif
+#define ERTS_PSFLG_OFF_HEAP_MSGQ ERTS_PSFLG_BIT(18)
+#define ERTS_PSFLG_ON_HEAP_MSGQ ERTS_PSFLG_BIT(19)
+#define ERTS_PSFLG_DIRTY_CPU_PROC ERTS_PSFLG_BIT(20)
+#define ERTS_PSFLG_DIRTY_IO_PROC ERTS_PSFLG_BIT(21)
+#define ERTS_PSFLG_DIRTY_ACTIVE_SYS ERTS_PSFLG_BIT(22)
+#define ERTS_PSFLG_DIRTY_RUNNING ERTS_PSFLG_BIT(23)
+#define ERTS_PSFLG_DIRTY_RUNNING_SYS ERTS_PSFLG_BIT(24)
+
+#define ERTS_PSFLG_MAX (ERTS_PSFLGS_ZERO_BIT_OFFSET + 24)
+
+#define ERTS_PSFLGS_DIRTY_WORK (ERTS_PSFLG_DIRTY_CPU_PROC \
+ | ERTS_PSFLG_DIRTY_IO_PROC \
+ | ERTS_PSFLG_DIRTY_ACTIVE_SYS)
#define ERTS_PSFLGS_IN_PRQ_MASK (ERTS_PSFLG_IN_PRQ_MAX \
| ERTS_PSFLG_IN_PRQ_HIGH \
| ERTS_PSFLG_IN_PRQ_NORMAL \
| ERTS_PSFLG_IN_PRQ_LOW)
+#define ERTS_PSFLGS_VOLATILE_HEAP (ERTS_PSFLG_EXITING \
+ | ERTS_PSFLG_PENDING_EXIT \
+ | ERTS_PSFLG_DIRTY_RUNNING \
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)
+
#define ERTS_PSFLGS_GET_ACT_PRIO(PSFLGS) \
(((PSFLGS) >> ERTS_PSFLGS_ACT_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK)
#define ERTS_PSFLGS_GET_USR_PRIO(PSFLGS) \
(((PSFLGS) >> ERTS_PSFLGS_USR_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK)
#define ERTS_PSFLGS_GET_PRQ_PRIO(PSFLGS) \
- (((PSFLGS) >> ERTS_PSFLGS_USR_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK)
+ (((PSFLGS) >> ERTS_PSFLGS_PRQ_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK)
+
+
+/*
+ * Flags in the dirty_state field.
+ */
+
+#define ERTS_PDSFLG_IN_CPU_PRQ_MAX (((erts_aint32_t) 1) << 0)
+#define ERTS_PDSFLG_IN_CPU_PRQ_HIGH (((erts_aint32_t) 1) << 1)
+#define ERTS_PDSFLG_IN_CPU_PRQ_NORMAL (((erts_aint32_t) 1) << 2)
+#define ERTS_PDSFLG_IN_CPU_PRQ_LOW (((erts_aint32_t) 1) << 3)
+#define ERTS_PDSFLG_IN_IO_PRQ_MAX (((erts_aint32_t) 1) << 4)
+#define ERTS_PDSFLG_IN_IO_PRQ_HIGH (((erts_aint32_t) 1) << 5)
+#define ERTS_PDSFLG_IN_IO_PRQ_NORMAL (((erts_aint32_t) 1) << 6)
+#define ERTS_PDSFLG_IN_IO_PRQ_LOW (((erts_aint32_t) 1) << 7)
+
+#define ERTS_PDSFLGS_QMASK ERTS_PSFLGS_QMASK
+#define ERTS_PDSFLGS_IN_CPU_PRQ_MASK_OFFSET 0
+#define ERTS_PDSFLGS_IN_IO_PRQ_MASK_OFFSET ERTS_PSFLGS_QMASK_BITS
+
+#define ERTS_PDSFLG_IN_CPU_PRQ_MASK (ERTS_PDSFLG_IN_CPU_PRQ_MAX \
+ | ERTS_PDSFLG_IN_CPU_PRQ_HIGH \
+ | ERTS_PDSFLG_IN_CPU_PRQ_NORMAL\
+ | ERTS_PDSFLG_IN_CPU_PRQ_LOW)
+#define ERTS_PDSFLG_IN_IO_PRQ_MASK (ERTS_PDSFLG_IN_CPU_PRQ_MAX \
+ | ERTS_PDSFLG_IN_CPU_PRQ_HIGH \
+ | ERTS_PDSFLG_IN_CPU_PRQ_NORMAL\
+ | ERTS_PDSFLG_IN_CPU_PRQ_LOW)
+
+
+/*
+ * Static flags that do not change after process creation.
+ */
+#define ERTS_STC_FLG_SYSTEM_PROC (((Uint32) 1) << 0)
+#define ERTS_STC_FLG_SHADOW_PROC (((Uint32) 1) << 1)
/* The sequential tracing token is a tuple of size 5:
*
@@ -1131,12 +1266,17 @@ void erts_check_for_holes(Process* p);
#define SPO_LINK 1
#define SPO_USE_ARGS 2
#define SPO_MONITOR 4
+#define SPO_SYSTEM_PROC 8
+#define SPO_OFF_HEAP_MSGQ 16
+#define SPO_ON_HEAP_MSGQ 32
+
+extern int erts_default_spo_flags;
/*
* The following struct contains options for a process to be spawned.
*/
typedef struct {
- Uint flags;
+ int flags;
int error_code; /* Error code returned from create_process(). */
Eterm mref; /* Monitor ref returned (if SPO_MONITOR was given). */
@@ -1148,6 +1288,8 @@ typedef struct {
Uint min_vheap_size; /* Minimum virtual heap size */
int priority; /* Priority for process. */
Uint16 max_gen_gcs; /* Maximum number of gen GCs before fullsweep. */
+ Uint max_heap_size; /* Maximum heap size in words */
+ Uint max_heap_flags; /* Maximum heap flags (kill | log) */
int scheduler;
} ErlSpawnOpts;
@@ -1164,10 +1306,13 @@ ERTS_GLB_INLINE void erts_heap_frag_shrink(Process* p, Eterm* hp);
ERTS_GLB_INLINE void erts_heap_frag_shrink(Process* p, Eterm* hp)
{
ErlHeapFragment* hf = MBUF(p);
+ Uint sz;
- ASSERT(hf!=NULL && (hp - hf->mem < (unsigned long)hf->alloc_size));
+ ASSERT(hf!=NULL && (hp - hf->mem < hf->alloc_size));
- hf->used_size = hp - hf->mem;
+ sz = hp - hf->mem;
+ p->mbuf_sz -= hf->used_size - sz;
+ hf->used_size = sz;
}
#endif /* inline */
@@ -1176,8 +1321,7 @@ Eterm* erts_heap_alloc(Process* p, Uint need, Uint xtra);
Eterm* erts_set_hole_marker(Eterm* ptr, Uint sz);
#endif
-extern Uint erts_default_process_flags;
-extern erts_smp_rwmtx_t erts_cpu_bind_rwmtx;
+extern erts_rwmtx_t erts_cpu_bind_rwmtx;
/* If any of the erts_system_monitor_* variables are set (enabled),
** erts_system_monitor must be != NIL, to allow testing on just
** the erts_system_monitor_* variables.
@@ -1204,6 +1348,7 @@ struct erts_system_profile_flags_t {
unsigned int exclusive : 1;
};
extern struct erts_system_profile_flags_t erts_system_profile_flags;
+extern int erts_system_profile_ts_type;
/* process flags */
#define F_HIBERNATE_SCHED (1 << 0) /* Schedule out after hibernate op */
@@ -1217,65 +1362,117 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags;
#define F_HAVE_BLCKD_MSCHED (1 << 8) /* Process has blocked multi-scheduling */
#define F_P2PNR_RESCHED (1 << 9) /* Process has been rescheduled via erts_pid2proc_not_running() */
#define F_FORCE_GC (1 << 10) /* Force gc at process in-scheduling */
-#define F_DISABLE_GC (1 << 11) /* Disable GC */
+#define F_DISABLE_GC (1 << 11) /* Disable GC (see below) */
+#define F_OFF_HEAP_MSGQ (1 << 12) /* Off heap msg queue */
+#define F_ON_HEAP_MSGQ (1 << 13) /* On heap msg queue */
+#define F_OFF_HEAP_MSGQ_CHNG (1 << 14) /* Off heap msg queue changing */
+#define F_ABANDONED_HEAP_USE (1 << 15) /* Have usage of abandoned heap */
+#define F_DELAY_GC (1 << 16) /* Similar to disable GC (see below) */
+#define F_SCHDLR_ONLN_WAITQ (1 << 17) /* Process enqueued waiting to change schedulers online */
+#define F_HAVE_BLCKD_NMSCHED (1 << 18) /* Process has blocked normal multi-scheduling */
+#define F_HIPE_MODE (1 << 19) /* Process is executing in HiPE mode */
+#define F_DELAYED_DEL_PROC (1 << 20) /* Delay delete process (dirty proc exit case) */
+#define F_DIRTY_CLA (1 << 21) /* Dirty copy literal area scheduled */
+#define F_DIRTY_GC_HIBERNATE (1 << 22) /* Dirty GC hibernate scheduled */
+#define F_DIRTY_MAJOR_GC (1 << 23) /* Dirty major GC scheduled */
+#define F_DIRTY_MINOR_GC (1 << 24) /* Dirty minor GC scheduled */
+#define F_HIBERNATED (1 << 25) /* Hibernated */
+
+/*
+ * F_DISABLE_GC and F_DELAY_GC are similar. Both will prevent
+ * GC of the process, but it is important to use the right
+ * one:
+ * - F_DISABLE_GC should *only* be used by BIFs. This when
+ * the BIF needs to yield while preventig a GC.
+ * - F_DELAY_GC should only be used when GC is temporarily
+ * disabled while the process is scheduled. A process must
+ * not be scheduled out while F_DELAY_GC is set.
+ */
+
+#define ERTS_TRACE_FLAGS_TS_TYPE_SHIFT 0
+
+#define F_TRACE_FLAG(N) (1 << (ERTS_TRACE_TS_TYPE_BITS + (N)))
/* process trace_flags */
-#define F_SENSITIVE (1 << 0)
-#define F_TRACE_SEND (1 << 1)
-#define F_TRACE_RECEIVE (1 << 2)
-#define F_TRACE_SOS (1 << 3) /* Set on spawn */
-#define F_TRACE_SOS1 (1 << 4) /* Set on first spawn */
-#define F_TRACE_SOL (1 << 5) /* Set on link */
-#define F_TRACE_SOL1 (1 << 6) /* Set on first link */
-#define F_TRACE_CALLS (1 << 7)
-#define F_TIMESTAMP (1 << 8)
-#define F_TRACE_PROCS (1 << 9)
-#define F_TRACE_FIRST_CHILD (1 << 10)
-#define F_TRACE_SCHED (1 << 11)
-#define F_TRACE_GC (1 << 12)
-#define F_TRACE_ARITY_ONLY (1 << 13)
-#define F_TRACE_RETURN_TO (1 << 14) /* Return_to trace when breakpoint tracing */
-#define F_TRACE_SILENT (1 << 15) /* No call trace msg suppress */
-#define F_TRACER (1 << 16) /* May be (has been) tracer */
-#define F_EXCEPTION_TRACE (1 << 17) /* May have exception trace on stack */
+#define F_NOW_TS (ERTS_TRACE_FLG_NOW_TIMESTAMP \
+ << ERTS_TRACE_FLAGS_TS_TYPE_SHIFT)
+#define F_STRICT_MON_TS (ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP \
+ << ERTS_TRACE_FLAGS_TS_TYPE_SHIFT)
+#define F_MON_TS (ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP \
+ << ERTS_TRACE_FLAGS_TS_TYPE_SHIFT)
+#define F_SENSITIVE F_TRACE_FLAG(0)
+#define F_TRACE_SEND F_TRACE_FLAG(1)
+#define F_TRACE_RECEIVE F_TRACE_FLAG(2)
+#define F_TRACE_SOS F_TRACE_FLAG(3) /* Set on spawn */
+#define F_TRACE_SOS1 F_TRACE_FLAG(4) /* Set on first spawn */
+#define F_TRACE_SOL F_TRACE_FLAG(5) /* Set on link */
+#define F_TRACE_SOL1 F_TRACE_FLAG(6) /* Set on first link */
+#define F_TRACE_CALLS F_TRACE_FLAG(7)
+#define F_TRACE_PROCS F_TRACE_FLAG(8)
+#define F_TRACE_FIRST_CHILD F_TRACE_FLAG(9)
+#define F_TRACE_SCHED F_TRACE_FLAG(10)
+#define F_TRACE_GC F_TRACE_FLAG(11)
+#define F_TRACE_ARITY_ONLY F_TRACE_FLAG(12)
+#define F_TRACE_RETURN_TO F_TRACE_FLAG(13) /* Return_to trace when breakpoint tracing */
+#define F_TRACE_SILENT F_TRACE_FLAG(14) /* No call trace msg suppress */
+#define F_EXCEPTION_TRACE F_TRACE_FLAG(15) /* May have exception trace on stack */
/* port trace flags, currently the same as process trace flags */
-#define F_TRACE_SCHED_PORTS (1 << 18) /* Trace of port scheduling */
-#define F_TRACE_SCHED_PROCS (1 << 19) /* With virtual scheduling */
-#define F_TRACE_PORTS (1 << 20) /* Ports equivalent to F_TRACE_PROCS */
-#define F_TRACE_SCHED_NO (1 << 21) /* Trace with scheduler id */
-#define F_TRACE_SCHED_EXIT (1 << 22)
+#define F_TRACE_SCHED_PORTS F_TRACE_FLAG(17) /* Trace of port scheduling */
+#define F_TRACE_SCHED_PROCS F_TRACE_FLAG(18) /* With virtual scheduling */
+#define F_TRACE_PORTS F_TRACE_FLAG(19) /* Ports equivalent to F_TRACE_PROCS */
+#define F_TRACE_SCHED_NO F_TRACE_FLAG(20) /* Trace with scheduler id */
+#define F_TRACE_SCHED_EXIT F_TRACE_FLAG(21)
-#define F_NUM_FLAGS 23
+#define F_NUM_FLAGS (ERTS_TRACE_TS_TYPE_BITS + 22)
#ifdef DEBUG
# define F_INITIAL_TRACE_FLAGS (5 << F_NUM_FLAGS)
#else
# define F_INITIAL_TRACE_FLAGS 0
#endif
-
+/* F_TIMESTAMP_MASK is a bit-field of all all timestamp types */
+#define F_TIMESTAMP_MASK \
+ (ERTS_TRACE_TS_TYPE_MASK << ERTS_TRACE_FLAGS_TS_TYPE_SHIFT)
#define TRACEE_FLAGS ( F_TRACE_PROCS | F_TRACE_CALLS \
| F_TRACE_SOS | F_TRACE_SOS1| F_TRACE_RECEIVE \
| F_TRACE_SOL | F_TRACE_SOL1 | F_TRACE_SEND \
- | F_TRACE_SCHED | F_TIMESTAMP | F_TRACE_GC \
+ | F_TRACE_SCHED | F_TIMESTAMP_MASK | F_TRACE_GC \
| F_TRACE_ARITY_ONLY | F_TRACE_RETURN_TO \
| F_TRACE_SILENT | F_TRACE_SCHED_PROCS | F_TRACE_PORTS \
| F_TRACE_SCHED_PORTS | F_TRACE_SCHED_NO \
| F_TRACE_SCHED_EXIT)
+
#define ERTS_TRACEE_MODIFIER_FLAGS \
- (F_TRACE_SILENT | F_TIMESTAMP | F_TRACE_SCHED_NO)
-#define ERTS_PORT_TRACEE_FLAGS \
- (ERTS_TRACEE_MODIFIER_FLAGS | F_TRACE_PORTS | F_TRACE_SCHED_PORTS)
+ (F_TRACE_SILENT | F_TIMESTAMP_MASK | F_TRACE_SCHED_NO \
+ | F_TRACE_RECEIVE | F_TRACE_SEND)
+#define ERTS_PORT_TRACEE_FLAGS \
+ (ERTS_TRACEE_MODIFIER_FLAGS | F_TRACE_PORTS | F_TRACE_SCHED_PORTS)
#define ERTS_PROC_TRACEE_FLAGS \
- ((TRACEE_FLAGS & ~ERTS_PORT_TRACEE_FLAGS) | ERTS_TRACEE_MODIFIER_FLAGS)
+ ((TRACEE_FLAGS & ~ERTS_PORT_TRACEE_FLAGS) | ERTS_TRACEE_MODIFIER_FLAGS)
+
+#define SEQ_TRACE_FLAG(N) (1 << (ERTS_TRACE_TS_TYPE_BITS + (N)))
/* Sequential trace flags */
+
+/* SEQ_TRACE_TIMESTAMP_MASK is a bit-field */
+#define SEQ_TRACE_TIMESTAMP_MASK \
+ (ERTS_TRACE_TS_TYPE_MASK << ERTS_SEQ_TRACE_FLAGS_TS_TYPE_SHIFT)
+
#define SEQ_TRACE_SEND (1 << 0)
#define SEQ_TRACE_RECEIVE (1 << 1)
#define SEQ_TRACE_PRINT (1 << 2)
-#define SEQ_TRACE_TIMESTAMP (1 << 3)
+
+#define ERTS_SEQ_TRACE_FLAGS_TS_TYPE_SHIFT 3
+
+#define SEQ_TRACE_NOW_TS (ERTS_TRACE_FLG_NOW_TIMESTAMP \
+ << ERTS_SEQ_TRACE_FLAGS_TS_TYPE_SHIFT)
+#define SEQ_TRACE_STRICT_MON_TS (ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP \
+ << ERTS_SEQ_TRACE_FLAGS_TS_TYPE_SHIFT)
+#define SEQ_TRACE_MON_TS (ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP \
+ << ERTS_SEQ_TRACE_FLAGS_TS_TYPE_SHIFT)
#ifdef USE_VM_PROBES
#define DT_UTAG_PERMANENT (1 << 0)
@@ -1288,86 +1485,66 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags;
#define ERTS_XSIG_FLG_IGN_KILL (((Uint32) 1) << 0)
#define ERTS_XSIG_FLG_NO_IGN_NORMAL (((Uint32) 1) << 1)
-#define CANCEL_TIMER(p) \
- do { \
- if ((p)->flags & (F_INSLPQUEUE)) \
- cancel_timer(p); \
- else \
- (p)->flags &= ~F_TIMO; \
+#define CANCEL_TIMER(P) \
+ do { \
+ if ((P)->flags & (F_INSLPQUEUE|F_TIMO)) { \
+ if ((P)->flags & F_INSLPQUEUE) \
+ erts_cancel_proc_timer((P)); \
+ else \
+ (P)->flags &= ~F_TIMO; \
+ } \
} while (0)
-#if defined(ERTS_DIRTY_SCHEDULERS) && defined(ERTS_SMP)
-#define ERTS_NUM_DIRTY_RUNQS 2
-#else
-#define ERTS_NUM_DIRTY_RUNQS 0
-#endif
+#define ERTS_NUM_DIRTY_CPU_RUNQS 1
+#define ERTS_NUM_DIRTY_IO_RUNQS 1
+
+#define ERTS_NUM_DIRTY_RUNQS (ERTS_NUM_DIRTY_CPU_RUNQS+ERTS_NUM_DIRTY_IO_RUNQS)
#define ERTS_RUNQ_IX(IX) \
- (ASSERT(0 <= (IX) && (IX) < erts_no_run_queues), \
+ (ASSERT(0 <= (IX) && (IX) < erts_no_run_queues+ERTS_NUM_DIRTY_RUNQS), \
&erts_aligned_run_queues[(IX)].runq)
-#ifdef ERTS_DIRTY_SCHEDULERS
#define ERTS_RUNQ_IX_IS_DIRTY(IX) \
- (-(ERTS_NUM_DIRTY_RUNQS) <= (IX) && (IX) < 0)
+ (ASSERT(0 <= (IX) && (IX) < erts_no_run_queues+ERTS_NUM_DIRTY_RUNQS), \
+ (erts_no_run_queues <= (IX)))
#define ERTS_DIRTY_RUNQ_IX(IX) \
(ASSERT(ERTS_RUNQ_IX_IS_DIRTY(IX)), \
&erts_aligned_run_queues[(IX)].runq)
-#define ERTS_DIRTY_CPU_RUNQ (&erts_aligned_run_queues[-1].runq)
-#define ERTS_DIRTY_IO_RUNQ (&erts_aligned_run_queues[-2].runq)
-#define ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(RQ) ((RQ)->ix == -1)
-#define ERTS_RUNQ_IS_DIRTY_IO_RUNQ(RQ) ((RQ)->ix == -2)
-#else
-#define ERTS_RUNQ_IX_IS_DIRTY(IX) 0
-#endif
+#define ERTS_DIRTY_CPU_RUNQ (&erts_aligned_run_queues[erts_no_run_queues].runq)
+#define ERTS_DIRTY_IO_RUNQ (&erts_aligned_run_queues[erts_no_run_queues+1].runq)
+#define ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(RQ) ((RQ) == ERTS_DIRTY_CPU_RUNQ)
+#define ERTS_RUNQ_IS_DIRTY_IO_RUNQ(RQ) ((RQ) == ERTS_DIRTY_IO_RUNQ)
#define ERTS_SCHEDULER_IX(IX) \
(ASSERT(0 <= (IX) && (IX) < erts_no_schedulers), \
&erts_aligned_scheduler_data[(IX)].esd)
-#ifdef ERTS_DIRTY_SCHEDULERS
#define ERTS_DIRTY_CPU_SCHEDULER_IX(IX) \
(ASSERT(0 <= (IX) && (IX) < erts_no_dirty_cpu_schedulers), \
&erts_aligned_dirty_cpu_scheduler_data[(IX)].esd)
#define ERTS_DIRTY_IO_SCHEDULER_IX(IX) \
(ASSERT(0 <= (IX) && (IX) < erts_no_dirty_io_schedulers), \
&erts_aligned_dirty_io_scheduler_data[(IX)].esd)
-#define ERTS_DIRTY_SCHEDULER_NO(ESDP) \
- ((ESDP)->dirty_no.s.num)
-#define ERTS_DIRTY_SCHEDULER_TYPE(ESDP) \
- ((ESDP)->dirty_no.s.type)
-#ifdef ERTS_SMP
#define ERTS_SCHEDULER_IS_DIRTY(ESDP) \
- ((ESDP)->dirty_no.s.num != 0)
+ ((ESDP)->type != ERTS_SCHED_NORMAL)
#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) \
- ((ESDP)->dirty_no.s.type == 0)
+ ((ESDP)->type == ERTS_SCHED_DIRTY_CPU)
#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) \
- ((ESDP)->dirty_no.s.type == 1)
-#else
-#define ERTS_SCHEDULER_IS_DIRTY(ESDP) 0
-#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) 0
-#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) 0
-#endif
-#else
-#define ERTS_RUNQ_IX_IS_DIRTY(IX) 0
-#define ERTS_SCHEDULER_IS_DIRTY(ESDP) 0
-#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) 0
-#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) 0
-#endif
+ ((ESDP)->type == ERTS_SCHED_DIRTY_IO)
void erts_pre_init_process(void);
void erts_late_init_process(void);
void erts_early_init_scheduling(int);
-void erts_init_scheduling(int, int
-#ifdef ERTS_DIRTY_SCHEDULERS
- , int, int, int
-#endif
- );
-
+void erts_init_scheduling(int, int, int, int, int, int);
+void erts_execute_dirty_system_task(Process *c_p);
int erts_set_gc_state(Process *c_p, int enable);
-Eterm erts_sched_wall_time_request(Process *c_p, int set, int enable);
+Eterm erts_sched_wall_time_request(Process *c_p, int set, int enable,
+ int dirty_cpu, int want_dirty_io);
+Eterm erts_system_check_request(Process *c_p);
Eterm erts_gc_info_request(Process *c_p);
Uint64 erts_get_proc_interval(void);
Uint64 erts_ensure_later_proc_interval(Uint64);
Uint64 erts_step_proc_interval(void);
ErtsProcList *erts_proclist_create(Process *);
+ErtsProcList *erts_proclist_copy(ErtsProcList *);
void erts_proclist_destroy(ErtsProcList *);
ERTS_GLB_INLINE int erts_proclist_same(ErtsProcList *, Process *);
@@ -1461,7 +1638,7 @@ ERTS_GLB_INLINE ErtsProcList *erts_proclist_fetch_first(ErtsProcList **list)
return NULL;
else {
ErtsProcList *res = *list;
- if (res == *list)
+ if (res->next == *list)
*list = NULL;
else
*list = res->next;
@@ -1548,44 +1725,34 @@ void erts_schedule_thr_prgr_later_cleanup_op(void (*)(void *),
void *,
ErtsThrPrgrLaterOp *,
UWord);
+void erts_schedule_complete_off_heap_message_queue_change(Eterm pid);
+struct db_fixation;
+void erts_schedule_ets_free_fixation(Eterm pid, struct db_fixation*);
+void erts_schedule_flush_trace_messages(Process *proc, int force_on_proc);
+int erts_flush_trace_messages(Process *c_p, ErtsProcLocks locks);
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
int erts_dbg_check_halloc_lock(Process *p);
#endif
-#ifdef DEBUG
-void erts_dbg_multi_scheduling_return_trap(Process *, Eterm);
-#endif
-int erts_get_max_no_executing_schedulers(void);
-#if defined(ERTS_SMP) || defined(ERTS_DIRTY_SCHEDULERS)
-ErtsSchedSuspendResult
-erts_schedulers_state(Uint *, Uint *, Uint *, Uint *, Uint *, Uint *, int);
-#endif
-#ifdef ERTS_SMP
+void
+erts_schedulers_state(Uint *, Uint *, Uint *, Uint *, Uint *, Uint *, Uint *, Uint *);
ErtsSchedSuspendResult
erts_set_schedulers_online(Process *p,
ErtsProcLocks plocks,
Sint new_no,
- Sint *old_no
-#ifdef ERTS_DIRTY_SCHEDULERS
- , int dirty_only
-#endif
- );
+ Sint *old_no,
+ int dirty_only);
ErtsSchedSuspendResult
-erts_block_multi_scheduling(Process *, ErtsProcLocks, int, int);
+erts_block_multi_scheduling(Process *, ErtsProcLocks, int, int, int);
int erts_is_multi_scheduling_blocked(void);
-Eterm erts_multi_scheduling_blockers(Process *);
+Eterm erts_multi_scheduling_blockers(Process *, int);
void erts_start_schedulers(void);
void erts_alloc_notify_delayed_dealloc(int);
void erts_alloc_ensure_handle_delayed_dealloc_call(int);
-void erts_smp_notify_check_children_needed(void);
-#endif
-#if ERTS_USE_ASYNC_READY_Q
+void erts_notify_canceled_timer(ErtsSchedulerData *, int);
void erts_notify_check_async_ready_queue(void *);
-#endif
-#ifdef ERTS_SMP
void erts_notify_code_ix_activation(Process* p, ErtsThrPrgrVal later);
void erts_notify_finish_breakpointing(Process* p);
-#endif
void erts_schedule_misc_aux_work(int sched_id,
void (*func)(void *),
void *arg);
@@ -1594,11 +1761,13 @@ void erts_schedule_multi_misc_aux_work(int ignore_self,
void (*func)(void *),
void *arg);
erts_aint32_t erts_set_aux_work_timeout(int, erts_aint32_t, int);
+void erts_aux_work_timeout_late_init(ErtsSchedulerData *esdp);
void erts_sched_notify_check_cpu_bind(void);
Uint erts_active_schedulers(void);
void erts_init_process(int, int, int);
-Eterm erts_process_status(Process *, ErtsProcLocks, Process *, Eterm);
-Uint erts_run_queues_len(Uint *);
+Eterm erts_process_state2status(erts_aint32_t);
+Eterm erts_process_status(Process *, Eterm);
+Uint erts_run_queues_len(Uint *, int, int, int);
void erts_add_to_runq(Process *);
Eterm erts_bound_schedulers_term(Process *c_p);
Eterm erts_get_cpu_topology_term(Process *c_p, Eterm which);
@@ -1606,13 +1775,11 @@ Eterm erts_get_schedulers_binds(Process *c_p);
Eterm erts_set_cpu_topology(Process *c_p, Eterm term);
Eterm erts_bind_schedulers(Process *c_p, Eterm how);
ErtsRunQueue *erts_schedid2runq(Uint);
-Process *schedule(Process*, int);
+Process *erts_schedule(ErtsSchedulerData *, Process*, int);
void erts_schedule_misc_op(void (*)(void *), void *);
Eterm erl_create_process(Process*, Eterm, Eterm, Eterm, ErlSpawnOpts*);
void erts_do_exit_process(Process*, Eterm);
void erts_continue_exit_process(Process *);
-void set_timer(Process*, Uint);
-void cancel_timer(Process*);
/* Begin System profile */
Uint erts_runnable_process_count(void);
/* End System profile */
@@ -1621,8 +1788,12 @@ void erts_cleanup_empty_process(Process* p);
#ifdef DEBUG
void erts_debug_verify_clean_empty_process(Process* p);
#endif
-void erts_stack_dump(int to, void *to_arg, Process *);
-void erts_program_counter_info(int to, void *to_arg, Process *);
+void erts_stack_dump(fmtfn_t to, void *to_arg, Process *);
+void erts_limited_stack_trace(fmtfn_t to, void *to_arg, Process *);
+void erts_program_counter_info(fmtfn_t to, void *to_arg, Process *);
+void erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp);
+void erts_dump_extended_process_state(fmtfn_t to, void *to_arg, erts_aint32_t psflg);
+void erts_dump_process_state(fmtfn_t to, void *to_arg, erts_aint32_t psflg);
Eterm erts_get_process_priority(Process *p);
Eterm erts_set_process_priority(Process *p, Eterm prio);
@@ -1650,37 +1821,29 @@ int erts_send_exit_signal(Process *,
Eterm,
Process *,
Uint32);
-#ifdef ERTS_SMP
void erts_handle_pending_exit(Process *, ErtsProcLocks);
#define ERTS_PROC_PENDING_EXIT(P) \
- (ERTS_PSFLG_PENDING_EXIT & erts_smp_atomic32_read_acqb(&(P)->state))
-#else
-#define ERTS_PROC_PENDING_EXIT(P) 0
-#endif
+ (ERTS_PSFLG_PENDING_EXIT & erts_atomic32_read_acqb(&(P)->state))
-void erts_deep_process_dump(int, void *);
+void erts_deep_process_dump(fmtfn_t, void *);
Eterm erts_get_reader_groups_map(Process *c_p);
Eterm erts_debug_reader_groups_map(Process *c_p, int groups);
Uint erts_debug_nbalance(void);
-int erts_debug_wait_deallocations(Process *c_p);
-Uint erts_process_memory(Process *c_p);
+#define ERTS_DEBUG_WAIT_COMPLETED_DEALLOCATIONS (1 << 0)
+#define ERTS_DEBUG_WAIT_COMPLETED_TIMER_CANCELLATIONS (1 << 1)
-#ifdef ERTS_SMP
-# define ERTS_GET_SCHEDULER_DATA_FROM_PROC(PROC) ((PROC)->scheduler_data)
-# define ERTS_PROC_GET_SCHDATA(PROC) ((PROC)->scheduler_data)
-#else
-# define ERTS_GET_SCHEDULER_DATA_FROM_PROC(PROC) (erts_scheduler_data)
-# define ERTS_PROC_GET_SCHDATA(PROC) (erts_scheduler_data)
-#endif
+int erts_debug_wait_completed(Process *c_p, int flags);
+
+Uint erts_process_memory(Process *c_p, int incl_msg_inq);
#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
# define ERTS_VERIFY_UNUSED_TEMP_ALLOC(P) \
do { \
ErtsSchedulerData *esdp__ = ((P) \
- ? ERTS_PROC_GET_SCHDATA((Process *) (P)) \
+ ? erts_proc_sched_data((Process *) (P)) \
: erts_get_scheduler_data()); \
if (esdp__ && !ERTS_SCHEDULER_IS_DIRTY(esdp__)) \
esdp__->verify_unused_temp_alloc( \
@@ -1690,145 +1853,177 @@ do { \
# define ERTS_VERIFY_UNUSED_TEMP_ALLOC(ESDP)
#endif
-#if defined(ERTS_SMP) || defined(USE_THREADS)
ErtsSchedulerData *erts_get_scheduler_data(void);
-#else
-ERTS_GLB_INLINE ErtsSchedulerData *erts_get_scheduler_data(void);
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-ERTS_GLB_INLINE
-ErtsSchedulerData *erts_get_scheduler_data(void)
-{
- return erts_scheduler_data;
-}
-#endif
-#endif
+void erts_schedule_process(Process *, erts_aint32_t, ErtsProcLocks);
-void erts_schedule_process(Process *, erts_aint32_t);
+ERTS_GLB_INLINE void erts_proc_notify_new_message(Process *p, ErtsProcLocks locks);
+ERTS_GLB_INLINE void erts_schedule_dirty_sys_execution(Process *c_p);
-ERTS_GLB_INLINE void erts_proc_notify_new_message(Process *p);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
-erts_proc_notify_new_message(Process *p)
+erts_proc_notify_new_message(Process *p, ErtsProcLocks locks)
{
/* No barrier needed, due to msg lock */
- erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
+ erts_aint32_t state = erts_atomic32_read_nob(&p->state);
if (!(state & ERTS_PSFLG_ACTIVE))
- erts_schedule_process(p, state);
+ erts_schedule_process(p, state, locks);
}
+
+ERTS_GLB_INLINE void
+erts_schedule_dirty_sys_execution(Process *c_p)
+{
+ erts_aint32_t a, n, e;
+
+ a = erts_atomic32_read_nob(&c_p->state);
+
+ /*
+ * Only a currently executing process schedules
+ * itself for dirty-sys execution...
+ */
+
+ ASSERT(a & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS));
+
+ /* Don't set dirty-active-sys if we are about to exit... */
+
+ while (!(a & (ERTS_PSFLG_DIRTY_ACTIVE_SYS
+ | ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_PENDING_EXIT))) {
+ e = a;
+ n = a | ERTS_PSFLG_DIRTY_ACTIVE_SYS;
+ a = erts_atomic32_cmpxchg_mb(&c_p->state, n, e);
+ if (a == e)
+ break; /* dirty-active-sys set */
+ }
+}
+
#endif
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
#define ERTS_PROCESS_LOCK_ONLY_LOCK_CHECK_PROTO__
#include "erl_process_lock.h"
#undef ERTS_PROCESS_LOCK_ONLY_LOCK_CHECK_PROTO__
-#define ERTS_SMP_LC_CHK_RUNQ_LOCK(RQ, L) \
+#define ERTS_LC_CHK_RUNQ_LOCK(RQ, L) \
do { \
if ((L)) \
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked((RQ))); \
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked((RQ))); \
else \
- ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked((RQ))); \
+ ERTS_LC_ASSERT(!erts_lc_runq_is_locked((RQ))); \
} while (0)
#else
-#define ERTS_SMP_LC_CHK_RUNQ_LOCK(RQ, L)
+#define ERTS_LC_CHK_RUNQ_LOCK(RQ, L)
#endif
-void *erts_psd_set_init(Process *p, ErtsProcLocks plocks, int ix, void *data);
+void *erts_psd_set_init(Process *p, int ix, void *data);
ERTS_GLB_INLINE void *
erts_psd_get(Process *p, int ix);
ERTS_GLB_INLINE void *
-erts_psd_set(Process *p, ErtsProcLocks plocks, int ix, void *new);
+erts_psd_set(Process *p, int ix, void *new);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void *
erts_psd_get(Process *p, int ix)
{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+ ErtsPSD *psd;
+#if defined(ERTS_ENABLE_LOCK_CHECK)
ErtsProcLocks locks = erts_proc_lc_my_proc_locks(p);
if (ERTS_LC_PSD_ANY_LOCK == erts_psd_required_locks[ix].get_locks)
- ERTS_SMP_LC_ASSERT(locks || erts_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(locks || erts_thr_progress_is_blocking());
else {
locks &= erts_psd_required_locks[ix].get_locks;
- ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].get_locks == locks
+ ERTS_LC_ASSERT(erts_psd_required_locks[ix].get_locks == locks
|| erts_thr_progress_is_blocking());
}
#endif
+
+ psd = (ErtsPSD *) erts_atomic_read_nob(&p->psd);
ASSERT(0 <= ix && ix < ERTS_PSD_SIZE);
- return p->psd ? p->psd->data[ix] : NULL;
+ if (!psd)
+ return NULL;
+ ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
+ return psd->data[ix];
}
-
-/*
- * NOTE: erts_psd_set() might release and reacquire locks on 'p'.
- */
ERTS_GLB_INLINE void *
-erts_psd_set(Process *p, ErtsProcLocks plocks, int ix, void *data)
+erts_psd_set(Process *p, int ix, void *data)
{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+ ErtsPSD *psd;
+#if defined(ERTS_ENABLE_LOCK_CHECK)
ErtsProcLocks locks = erts_proc_lc_my_proc_locks(p);
- if (ERTS_LC_PSD_ANY_LOCK == erts_psd_required_locks[ix].set_locks)
- ERTS_SMP_LC_ASSERT(locks || erts_thr_progress_is_blocking());
- else {
- locks &= erts_psd_required_locks[ix].set_locks;
- ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].set_locks == locks
- || erts_thr_progress_is_blocking());
+ erts_aint32_t state = state = erts_atomic32_read_nob(&p->state);
+ if (!(state & ERTS_PSFLG_FREE)) {
+ if (ERTS_LC_PSD_ANY_LOCK == erts_psd_required_locks[ix].set_locks)
+ ERTS_LC_ASSERT(locks || erts_thr_progress_is_blocking());
+ else {
+ locks &= erts_psd_required_locks[ix].set_locks;
+ ERTS_LC_ASSERT(erts_psd_required_locks[ix].set_locks == locks
+ || erts_thr_progress_is_blocking());
+ }
}
#endif
+ psd = (ErtsPSD *) erts_atomic_read_nob(&p->psd);
ASSERT(0 <= ix && ix < ERTS_PSD_SIZE);
- if (p->psd) {
- void *old = p->psd->data[ix];
- p->psd->data[ix] = data;
+ if (psd) {
+ void *old;
+#ifdef ETHR_ORDERED_READ_DEPEND
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreStore);
+#endif
+ old = psd->data[ix];
+ psd->data[ix] = data;
return old;
}
- else {
- if (!data)
- return NULL;
- else
- return erts_psd_set_init(p, plocks, ix, data);
- }
+
+ if (!data)
+ return NULL;
+
+ return erts_psd_set_init(p, ix, data);
}
#endif
-#define ERTS_PROC_SCHED_ID(P, L, ID) \
- ((UWord) erts_psd_set((P), (L), ERTS_PSD_SCHED_ID, (void *) (ID)))
-
-#define ERTS_PROC_GET_DIST_ENTRY(P) \
- ((DistEntry *) erts_psd_get((P), ERTS_PSD_DIST_ENTRY))
-#define ERTS_PROC_SET_DIST_ENTRY(P, L, D) \
- ((DistEntry *) erts_psd_set((P), (L), ERTS_PSD_DIST_ENTRY, (void *) (D)))
+#define ERTS_PROC_SCHED_ID(P, ID) \
+ ((UWord) erts_psd_set((P), ERTS_PSD_SCHED_ID, (void *) (ID)))
#define ERTS_PROC_GET_SAVED_CALLS_BUF(P) \
((struct saved_calls *) erts_psd_get((P), ERTS_PSD_SAVED_CALLS_BUF))
-#define ERTS_PROC_SET_SAVED_CALLS_BUF(P, L, SCB) \
- ((struct saved_calls *) erts_psd_set((P), (L), ERTS_PSD_SAVED_CALLS_BUF, (void *) (SCB)))
+#define ERTS_PROC_SET_SAVED_CALLS_BUF(P, SCB) \
+ ((struct saved_calls *) erts_psd_set((P), ERTS_PSD_SAVED_CALLS_BUF, (void *) (SCB)))
#define ERTS_PROC_GET_CALL_TIME(P) \
((process_breakpoint_time_t *) erts_psd_get((P), ERTS_PSD_CALL_TIME_BP))
-#define ERTS_PROC_SET_CALL_TIME(P, L, PBT) \
- ((process_breakpoint_time_t *) erts_psd_set((P), (L), ERTS_PSD_CALL_TIME_BP, (void *) (PBT)))
+#define ERTS_PROC_SET_CALL_TIME(P, PBT) \
+ ((process_breakpoint_time_t *) erts_psd_set((P), ERTS_PSD_CALL_TIME_BP, (void *) (PBT)))
#define ERTS_PROC_GET_DELAYED_GC_TASK_QS(P) \
((ErtsProcSysTaskQs *) erts_psd_get((P), ERTS_PSD_DELAYED_GC_TASK_QS))
-#define ERTS_PROC_SET_DELAYED_GC_TASK_QS(P, L, PBT) \
- ((ErtsProcSysTaskQs *) erts_psd_set((P), (L), ERTS_PSD_DELAYED_GC_TASK_QS, (void *) (PBT)))
-
-#ifdef ERTS_DIRTY_SCHEDULERS
-#define ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(P) \
- ((Export *) erts_psd_get((P), ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT))
-#define ERTS_PROC_SET_DIRTY_SCHED_TRAP_EXPORT(P, L, DSTE) \
- ((Export *) erts_psd_set((P), (L), ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT, (void *) (DSTE)))
-#endif
+#define ERTS_PROC_SET_DELAYED_GC_TASK_QS(P, PBT) \
+ ((ErtsProcSysTaskQs *) erts_psd_set((P), ERTS_PSD_DELAYED_GC_TASK_QS, (void *) (PBT)))
+
+#define ERTS_PROC_GET_NIF_TRAP_EXPORT(P) \
+ erts_psd_get((P), ERTS_PSD_NIF_TRAP_EXPORT)
+#define ERTS_PROC_SET_NIF_TRAP_EXPORT(P, NTE) \
+ erts_psd_set((P), ERTS_PSD_NIF_TRAP_EXPORT, (void *) (NTE))
+#define ERTS_PROC_GET_DIST_ENTRY(P) \
+ ((DistEntry *) erts_psd_get((P), ERTS_PSD_DIST_ENTRY))
+#define ERTS_PROC_SET_DIST_ENTRY(P, DE) \
+ ((DistEntry *) erts_psd_set((P), ERTS_PSD_DIST_ENTRY, (void *) (DE)))
+
+#ifdef HIPE
+#define ERTS_PROC_GET_SUSPENDED_SAVED_CALLS_BUF(P) \
+ ((struct saved_calls *) erts_psd_get((P), ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF))
+#define ERTS_PROC_SET_SUSPENDED_SAVED_CALLS_BUF(P, SCB) \
+ ((struct saved_calls *) erts_psd_set((P), ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF, (void *) (SCB)))
+#endif
ERTS_GLB_INLINE Eterm erts_proc_get_error_handler(Process *p);
-ERTS_GLB_INLINE Eterm erts_proc_set_error_handler(Process *p,
- ErtsProcLocks plocks,
- Eterm handler);
+ERTS_GLB_INLINE Eterm erts_proc_set_error_handler(Process *p, Eterm handler);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE Eterm
@@ -1844,13 +2039,13 @@ erts_proc_get_error_handler(Process *p)
}
ERTS_GLB_INLINE Eterm
-erts_proc_set_error_handler(Process *p, ErtsProcLocks plocks, Eterm handler)
+erts_proc_set_error_handler(Process *p, Eterm handler)
{
void *old_val;
void *new_val;
ASSERT(is_atom(handler));
new_val = (handler == am_error_handler) ? NULL : (void *) (UWord) handler;
- old_val = erts_psd_set(p, plocks, ERTS_PSD_ERROR_HANDLER, new_val);
+ old_val = erts_psd_set(p, ERTS_PSD_ERROR_HANDLER, new_val);
if (!old_val)
return am_error_handler;
else {
@@ -1863,7 +2058,6 @@ erts_proc_set_error_handler(Process *p, ErtsProcLocks plocks, Eterm handler)
#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
-#ifdef ERTS_SMP
#include "erl_thr_progress.h"
@@ -1965,31 +2159,62 @@ erts_check_emigration_need(ErtsRunQueue *c_rq, int prio)
#endif
-#endif
#endif
+ERTS_GLB_INLINE ErtsSchedulerData *erts_proc_sched_data(Process *c_p);
ERTS_GLB_INLINE int erts_is_scheduler_bound(ErtsSchedulerData *esdp);
ERTS_GLB_INLINE Process *erts_get_current_process(void);
ERTS_GLB_INLINE Eterm erts_get_current_pid(void);
ERTS_GLB_INLINE Uint erts_get_scheduler_id(void);
ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_proc(Process *p);
ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_current(ErtsSchedulerData *esdp);
-#ifndef ERTS_ENABLE_LOCK_COUNT
-ERTS_GLB_INLINE void erts_smp_runq_lock(ErtsRunQueue *rq);
-#endif
-ERTS_GLB_INLINE int erts_smp_runq_trylock(ErtsRunQueue *rq);
-ERTS_GLB_INLINE void erts_smp_runq_unlock(ErtsRunQueue *rq);
-#ifndef ERTS_ENABLE_LOCK_COUNT
-ERTS_GLB_INLINE void erts_smp_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq);
-#endif
-ERTS_GLB_INLINE void erts_smp_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq);
-ERTS_GLB_INLINE void erts_smp_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2);
-ERTS_GLB_INLINE void erts_smp_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2);
+ERTS_GLB_INLINE void erts_runq_lock(ErtsRunQueue *rq);
+ERTS_GLB_INLINE int erts_runq_trylock(ErtsRunQueue *rq);
+ERTS_GLB_INLINE void erts_runq_unlock(ErtsRunQueue *rq);
+ERTS_GLB_INLINE void erts_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq);
+ERTS_GLB_INLINE void erts_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq);
+ERTS_GLB_INLINE void erts_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2);
+ERTS_GLB_INLINE void erts_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2);
+
+ERTS_GLB_INLINE ErtsMessage *erts_alloc_message_heap_state(Process *pp,
+ erts_aint32_t *psp,
+ ErtsProcLocks *plp,
+ Uint sz,
+ Eterm **hpp,
+ ErlOffHeap **ohpp);
+ERTS_GLB_INLINE ErtsMessage *erts_alloc_message_heap(Process *pp,
+ ErtsProcLocks *plp,
+ Uint sz,
+ Eterm **hpp,
+ ErlOffHeap **ohpp);
+
+ERTS_GLB_INLINE void erts_shrink_message_heap(ErtsMessage **msgpp, Process *pp,
+ Eterm *start_hp, Eterm *used_hp, Eterm *end_hp,
+ Eterm *brefs, Uint brefs_size);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE
+ErtsSchedulerData *erts_proc_sched_data(Process *c_p)
+{
+ ErtsSchedulerData *esdp;
+ ASSERT(c_p);
+ esdp = c_p->scheduler_data;
+ if (esdp) {
+ ASSERT(esdp == erts_get_scheduler_data());
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
+ }
+ else {
+ esdp = erts_get_scheduler_data();
+ ASSERT(esdp);
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp));
+ }
+ ASSERT(esdp);
+ return esdp;
+}
+
+ERTS_GLB_INLINE
int erts_is_scheduler_bound(ErtsSchedulerData *esdp)
{
if (!esdp)
@@ -2015,157 +2240,154 @@ Eterm erts_get_current_pid(void)
ERTS_GLB_INLINE
Uint erts_get_scheduler_id(void)
{
-#ifdef ERTS_SMP
ErtsSchedulerData *esdp = erts_get_scheduler_data();
-#ifdef ERTS_DIRTY_SCHEDULERS
if (esdp && ERTS_SCHEDULER_IS_DIRTY(esdp))
return 0;
else
-#endif
return esdp ? esdp->no : (Uint) 0;
-#else
- return erts_get_scheduler_data() ? (Uint) 1 : (Uint) 0;
-#endif
}
ERTS_GLB_INLINE ErtsRunQueue *
erts_get_runq_proc(Process *p)
{
-#ifdef ERTS_SMP
ASSERT(ERTS_AINT_NULL != erts_atomic_read_nob(&p->run_queue));
return (ErtsRunQueue *) erts_atomic_read_nob(&p->run_queue);
-#else
- return ERTS_RUNQ_IX(0);
-#endif
}
ERTS_GLB_INLINE ErtsRunQueue *
erts_get_runq_current(ErtsSchedulerData *esdp)
{
ASSERT(!esdp || esdp == erts_get_scheduler_data());
-#ifdef ERTS_SMP
if (!esdp)
esdp = erts_get_scheduler_data();
return esdp->run_queue;
-#else
- return ERTS_RUNQ_IX(0);
-#endif
}
ERTS_GLB_INLINE void
-erts_smp_runq_lock(ErtsRunQueue *rq)
+erts_runq_lock(ErtsRunQueue *rq)
{
-#ifdef ERTS_SMP
- erts_smp_mtx_lock(&rq->mtx);
-#endif
+ erts_mtx_lock(&rq->mtx);
}
-#ifdef ERTS_ENABLE_LOCK_COUNT
-
-#define erts_smp_runq_lock(rq) erts_smp_mtx_lock_x(&(rq)->mtx, __FILE__, __LINE__)
-
-#endif
-
ERTS_GLB_INLINE int
-erts_smp_runq_trylock(ErtsRunQueue *rq)
-{
-#ifdef ERTS_SMP
- return erts_smp_mtx_trylock(&rq->mtx);
-#else
- return 0;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_runq_unlock(ErtsRunQueue *rq)
+erts_runq_trylock(ErtsRunQueue *rq)
{
-#ifdef ERTS_SMP
- erts_smp_mtx_unlock(&rq->mtx);
-#endif
+ return erts_mtx_trylock(&rq->mtx);
}
-#ifdef ERTS_ENABLE_LOCK_COUNT
-
-#define erts_smp_xrunq_lock(rq, xrq) erts_smp_xrunq_lock_x((rq), (xrq), __FILE__, __LINE__)
-
ERTS_GLB_INLINE void
-erts_smp_xrunq_lock_x(ErtsRunQueue *rq, ErtsRunQueue *xrq, char* file, int line)
+erts_runq_unlock(ErtsRunQueue *rq)
{
-#ifdef ERTS_SMP
- ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&rq->mtx));
- if (xrq != rq) {
- if (erts_smp_mtx_trylock(&xrq->mtx) == EBUSY) {
- if (rq < xrq)
- erts_smp_mtx_lock_x(&xrq->mtx, file, line);
- else {
- erts_smp_mtx_unlock(&rq->mtx);
- erts_smp_mtx_lock_x(&xrq->mtx, file, line);
- erts_smp_mtx_lock_x(&rq->mtx, file, line);
- }
- }
- }
-#endif
+ erts_mtx_unlock(&rq->mtx);
}
-#else
-
ERTS_GLB_INLINE void
-erts_smp_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq)
+erts_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq)
{
-#ifdef ERTS_SMP
- ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&rq->mtx));
+ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&rq->mtx));
if (xrq != rq) {
- if (erts_smp_mtx_trylock(&xrq->mtx) == EBUSY) {
+ if (erts_mtx_trylock(&xrq->mtx) == EBUSY) {
if (rq < xrq)
- erts_smp_mtx_lock(&xrq->mtx);
+ erts_mtx_lock(&xrq->mtx);
else {
- erts_smp_mtx_unlock(&rq->mtx);
- erts_smp_mtx_lock(&xrq->mtx);
- erts_smp_mtx_lock(&rq->mtx);
+ erts_mtx_unlock(&rq->mtx);
+ erts_mtx_lock(&xrq->mtx);
+ erts_mtx_lock(&rq->mtx);
}
}
}
-#endif
}
-#endif
-
ERTS_GLB_INLINE void
-erts_smp_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq)
+erts_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq)
{
-#ifdef ERTS_SMP
if (xrq != rq)
- erts_smp_mtx_unlock(&xrq->mtx);
-#endif
+ erts_mtx_unlock(&xrq->mtx);
}
ERTS_GLB_INLINE void
-erts_smp_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2)
+erts_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2)
{
-#ifdef ERTS_SMP
ASSERT(rq1 && rq2);
if (rq1 == rq2)
- erts_smp_mtx_lock(&rq1->mtx);
+ erts_mtx_lock(&rq1->mtx);
else if (rq1 < rq2) {
- erts_smp_mtx_lock(&rq1->mtx);
- erts_smp_mtx_lock(&rq2->mtx);
+ erts_mtx_lock(&rq1->mtx);
+ erts_mtx_lock(&rq2->mtx);
}
else {
- erts_smp_mtx_lock(&rq2->mtx);
- erts_smp_mtx_lock(&rq1->mtx);
+ erts_mtx_lock(&rq2->mtx);
+ erts_mtx_lock(&rq1->mtx);
}
-#endif
}
ERTS_GLB_INLINE void
-erts_smp_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2)
+erts_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2)
{
-#ifdef ERTS_SMP
ASSERT(rq1 && rq2);
- erts_smp_mtx_unlock(&rq1->mtx);
+ erts_mtx_unlock(&rq1->mtx);
if (rq1 != rq2)
- erts_smp_mtx_unlock(&rq2->mtx);
-#endif
+ erts_mtx_unlock(&rq2->mtx);
+}
+
+ERTS_GLB_INLINE ErtsMessage *
+erts_alloc_message_heap_state(Process *pp,
+ erts_aint32_t *psp,
+ ErtsProcLocks *plp,
+ Uint sz,
+ Eterm **hpp,
+ ErlOffHeap **ohpp)
+{
+ int on_heap;
+ ErtsMessage *mp;
+
+ if ((*psp) & ERTS_PSFLG_OFF_HEAP_MSGQ) {
+ mp = erts_alloc_message(sz, hpp);
+ *ohpp = sz == 0 ? NULL : &mp->hfrag.off_heap;
+ return mp;
+ }
+
+ mp = erts_try_alloc_message_on_heap(pp, psp, plp, sz, hpp, ohpp, &on_heap);
+ ASSERT(pp || !on_heap);
+ return mp;
+}
+
+ERTS_GLB_INLINE ErtsMessage *
+erts_alloc_message_heap(Process *pp,
+ ErtsProcLocks *plp,
+ Uint sz,
+ Eterm **hpp,
+ ErlOffHeap **ohpp)
+{
+ erts_aint32_t state = pp ? erts_atomic32_read_nob(&pp->state) : 0;
+ return erts_alloc_message_heap_state(pp, &state, plp, sz, hpp, ohpp);
+}
+
+ERTS_GLB_INLINE void
+erts_shrink_message_heap(ErtsMessage **msgpp, Process *pp,
+ Eterm *start_hp, Eterm *used_hp, Eterm *end_hp,
+ Eterm *brefs, Uint brefs_size)
+{
+ ASSERT(start_hp <= used_hp && used_hp <= end_hp);
+ if ((*msgpp)->data.attached == ERTS_MSG_COMBINED_HFRAG)
+ *msgpp = erts_shrink_message(*msgpp, used_hp - start_hp,
+ brefs, brefs_size);
+ else if (!(*msgpp)->data.attached) {
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN
+ & erts_proc_lc_my_proc_locks(pp));
+ HRelease(pp, end_hp, used_hp);
+ }
+ else {
+ ErlHeapFragment *hfrag = (*msgpp)->data.heap_frag;
+ if (start_hp != used_hp)
+ hfrag = erts_resize_message_buffer(hfrag, used_hp - start_hp,
+ brefs, brefs_size);
+ else {
+ free_message_buffer(hfrag);
+ hfrag = NULL;
+ }
+ (*msgpp)->data.heap_frag = hfrag;
+ }
}
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
@@ -2177,18 +2399,42 @@ ERTS_GLB_INLINE ErtsAtomCacheMap *
erts_get_atom_cache_map(Process *c_p)
{
ErtsSchedulerData *esdp = (c_p
- ? ERTS_PROC_GET_SCHDATA(c_p)
+ ? erts_proc_sched_data(c_p)
: erts_get_scheduler_data());
ASSERT(esdp);
return &esdp->atom_cache_map;
}
#endif
-Process *erts_pid2proc_suspend(Process *,
- ErtsProcLocks,
- Eterm,
- ErtsProcLocks);
-#ifdef ERTS_SMP
+#ifdef __WIN32__
+/*
+ * Don't want erts_time2reds() inlined in beam_emu.c on windows since
+ * it is compiled with gcc which fails on it. Implementation is in
+ * erl_process.c on windows.
+ */
+# define ERTS_TIME2REDS_IMPL__ erts_time2reds__
+#else
+# define ERTS_TIME2REDS_IMPL__ erts_time2reds
+#endif
+
+ERTS_GLB_INLINE Sint64 ERTS_TIME2REDS_IMPL__(ErtsMonotonicTime start,
+ ErtsMonotonicTime end);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE Sint64
+ERTS_TIME2REDS_IMPL__(ErtsMonotonicTime start, ErtsMonotonicTime end)
+{
+ ErtsMonotonicTime time = end - start;
+ ASSERT(time >= 0);
+ time = ERTS_MONOTONIC_TO_USEC(time);
+ if (time == 0)
+ return (Sint64) 1; /* At least one reduction */
+ /* Currently two reductions per micro second */
+ time *= (CONTEXT_REDS-1)/1000 + 1;
+ return (Sint64) time;
+}
+#endif
+
Process *erts_pid2proc_not_running(Process *,
ErtsProcLocks,
@@ -2201,37 +2447,29 @@ Process *erts_pid2proc_nropt(Process *c_p,
extern int erts_disable_proc_not_running_opt;
#ifdef DEBUG
-#define ERTS_SMP_ASSERT_IS_NOT_EXITING(P) \
+#define ERTS_ASSERT_IS_NOT_EXITING(P) \
do { ASSERT(!ERTS_PROC_IS_EXITING((P))); } while (0)
#else
-#define ERTS_SMP_ASSERT_IS_NOT_EXITING(P)
+#define ERTS_ASSERT_IS_NOT_EXITING(P)
#endif
-#else /* !ERTS_SMP */
-
-#define ERTS_SMP_ASSERT_IS_NOT_EXITING(P)
-
-#define erts_pid2proc_not_running erts_pid2proc
-#define erts_pid2proc_nropt erts_pid2proc
-
-#endif
#define ERTS_PROC_IS_EXITING(P) \
- (ERTS_PSFLG_EXITING & erts_smp_atomic32_read_acqb(&(P)->state))
+ (ERTS_PSFLG_EXITING & erts_atomic32_read_acqb(&(P)->state))
/* Minimum NUMBER of processes for a small system to start */
#define ERTS_MIN_PROCESSES 1024
-#if defined(ERTS_SMP) && ERTS_MIN_PROCESSES < ERTS_NO_OF_PIX_LOCKS
+#if ERTS_MIN_PROCESSES < ERTS_NO_OF_PIX_LOCKS
#undef ERTS_MIN_PROCESSES
#define ERTS_MIN_PROCESSES ERTS_NO_OF_PIX_LOCKS
#endif
-void erts_smp_notify_inc_runq(ErtsRunQueue *runq);
+void erts_notify_inc_runq(ErtsRunQueue *runq);
-#ifdef ERTS_SMP
void erts_sched_finish_poke(ErtsSchedulerSleepInfo *, erts_aint32_t);
ERTS_GLB_INLINE void erts_sched_poke(ErtsSchedulerSleepInfo *ssi);
+void erts_aux_thread_poke(void);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -2240,9 +2478,9 @@ erts_sched_poke(ErtsSchedulerSleepInfo *ssi)
{
erts_aint32_t flags;
ERTS_THR_MEMORY_BARRIER;
- flags = erts_smp_atomic32_read_nob(&ssi->flags);
+ flags = erts_atomic32_read_nob(&ssi->flags);
if (flags & ERTS_SSI_FLG_SLEEPING) {
- flags = erts_smp_atomic32_read_band_nob(&ssi->flags, ~ERTS_SSI_FLGS_SLEEP);
+ flags = erts_atomic32_read_band_nob(&ssi->flags, ~ERTS_SSI_FLGS_SLEEP);
erts_sched_finish_poke(ssi, flags);
}
}
@@ -2250,7 +2488,6 @@ erts_sched_poke(ErtsSchedulerSleepInfo *ssi)
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#endif /* #ifdef ERTS_SMP */
#include "erl_process_lock.h"
@@ -2259,6 +2496,6 @@ erts_sched_poke(ErtsSchedulerSleepInfo *ssi)
#endif
-void erl_halt(int code);
-extern erts_smp_atomic32_t erts_halt_progress;
+void erts_halt(int code);
+extern erts_atomic32_t erts_halt_progress;
extern int erts_halt_code;
diff --git a/erts/emulator/beam/erl_process_dict.c b/erts/emulator/beam/erl_process_dict.c
index 23e5bf737f..3c80f0e0f6 100644
--- a/erts/emulator/beam/erl_process_dict.c
+++ b/erts/emulator/beam/erl_process_dict.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -47,19 +48,22 @@
/* Hash constant macros */
#define MAX_HASH 1342177280UL
-#define INITIAL_SIZE 10
+#define INITIAL_SIZE (erts_pd_initial_size)
/* Hash utility macros */
-#define HASH_RANGE(PDict) ((PDict)->homeSize + (PDict)->splitPosition)
+#define HASH_RANGE(PDict) ((PDict)->usedSlots)
-#define MAKE_HASH(Term) \
-((is_small(Term)) ? unsigned_val(Term) : \
- ((is_atom(Term)) ? \
- (atom_tab(atom_val(term))->slot.bucket.hvalue) : \
- make_hash2(Term)))
+#define MAKE_HASH(Term) \
+ ((is_small(Term)) ? (Uint32) unsigned_val(Term) : \
+ ((is_atom(Term)) ? \
+ (Uint32) atom_val(Term) : \
+ make_internal_hash(Term, 0)))
#define PD_SZ2BYTES(Sz) (sizeof(ProcDict) + ((Sz) - 1)*sizeof(Eterm))
+#define pd_hash_value(Pdict, Key) \
+ pd_hash_value_to_ix(Pdict, MAKE_HASH((Key)))
+
/* Memory allocation macros */
#define PD_ALLOC(Sz) \
erts_alloc(ERTS_ALC_T_PROC_DICT, (Sz))
@@ -73,15 +77,21 @@
#define TCDR(Term) CDR(list_val(Term))
/* Array access macro */
-#define ARRAY_GET(PDict, Index) (((PDict)->size > (Index)) ? \
- (PDict)->data[Index] : NIL)
+#define ARRAY_GET(PDict, Index) (ASSERT((Index) < (PDict)->arraySize), \
+ (PDict)->data[Index])
+#define ARRAY_PUT(PDict, Index, Val) (ASSERT((Index) < (PDict)->arraySize), \
+ (PDict)->data[Index] = (Val))
+
+#define IS_POW2(X) ((X) && !((X) & ((X)-1)))
/*
* Forward decalarations
*/
static void pd_hash_erase(Process *p, Eterm id, Eterm *ret);
static void pd_hash_erase_all(Process *p);
+static Eterm pd_hash_get_with_hval(Process *p, Eterm bucket, Eterm id);
static Eterm pd_hash_get_keys(Process *p, Eterm value);
+static Eterm pd_hash_get_all_keys(Process *p, ProcDict *pd);
static Eterm pd_hash_get_all(Process *p, ProcDict *pd);
static Eterm pd_hash_put(Process *p, Eterm id, Eterm value);
@@ -89,9 +99,9 @@ static void shrink(Process *p, Eterm* ret);
static void grow(Process *p);
static void array_shrink(ProcDict **ppd, unsigned int need);
-static Eterm array_put(ProcDict **ppdict, unsigned int ndx, Eterm term);
+static void ensure_array_size(ProcDict**, unsigned int size);
-static unsigned int pd_hash_value(ProcDict *pdict, Eterm term);
+static unsigned int pd_hash_value_to_ix(ProcDict *pdict, Uint32 hx);
static unsigned int next_array_size(unsigned int need);
/*
@@ -132,11 +142,21 @@ static void pd_check(ProcDict *pd);
** External interface
*/
+int
+erts_pd_set_initial_size(int size)
+{
+ if (size <= 0)
+ return 0;
+
+ erts_pd_initial_size = 1 << erts_fit_in_bits_uint(size-1);
+ return 1;
+}
+
/*
* Called from break handler
*/
void
-erts_dictionary_dump(int to, void *to_arg, ProcDict *pd)
+erts_dictionary_dump(fmtfn_t to, void *to_arg, ProcDict *pd)
{
unsigned int i;
#ifdef DEBUG
@@ -144,9 +164,9 @@ erts_dictionary_dump(int to, void *to_arg, ProcDict *pd)
/*PD_CHECK(pd);*/
if (pd == NULL)
return;
- erts_print(to, to_arg, "(size = %d, used = %d, homeSize = %d, "
+ erts_print(to, to_arg, "(size = %d, usedSlots = %d, "
"splitPosition = %d, numElements = %d)\n",
- pd->size, pd->used, pd->homeSize,
+ pd->arraySize, pd->usedSlots,
pd->splitPosition, (unsigned int) pd->numElements);
for (i = 0; i < HASH_RANGE(pd); ++i) {
erts_print(to, to_arg, "%d: %T\n", i, ARRAY_GET(pd, i));
@@ -176,8 +196,8 @@ erts_dictionary_dump(int to, void *to_arg, ProcDict *pd)
}
void
-erts_deep_dictionary_dump(int to, void *to_arg,
- ProcDict* pd, void (*cb)(int, void *, Eterm))
+erts_deep_dictionary_dump(fmtfn_t to, void *to_arg,
+ ProcDict* pd, void (*cb)(fmtfn_t, void *, Eterm))
{
unsigned int i;
Eterm t;
@@ -201,7 +221,7 @@ erts_dicts_mem_size(Process *p)
{
Uint size = 0;
if (p->dictionary)
- size += PD_SZ2BYTES(p->dictionary->size);
+ size += PD_SZ2BYTES(p->dictionary->arraySize);
return size;
}
@@ -275,6 +295,16 @@ BIF_RETTYPE get_1(BIF_ALIST_1)
BIF_RET(ret);
}
+BIF_RETTYPE get_keys_0(BIF_ALIST_0)
+{
+ Eterm ret;
+
+ PD_CHECK(BIF_P->dictionary);
+ ret = pd_hash_get_all_keys(BIF_P,BIF_P->dictionary);
+ PD_CHECK(BIF_P->dictionary);
+ BIF_RET(ret);
+}
+
BIF_RETTYPE get_keys_1(BIF_ALIST_1)
{
Eterm ret;
@@ -333,7 +363,7 @@ static void pd_hash_erase(Process *p, Eterm id, Eterm *ret)
if (is_boxed(old)) { /* Tuple */
ASSERT(is_tuple(old));
if (EQ(tuple_val(old)[1], id)) {
- array_put(&(p->dictionary), hval, NIL);
+ ARRAY_PUT(p->dictionary, hval, NIL);
--(p->dictionary->numElements);
*ret = tuple_val(old)[2];
}
@@ -353,7 +383,7 @@ static void pd_hash_erase(Process *p, Eterm id, Eterm *ret)
old = ARRAY_GET(p->dictionary, hval);
ASSERT(is_list(old));
if (is_nil(TCDR(old))) {
- array_put(&p->dictionary, hval, TCAR(old));
+ ARRAY_PUT(p->dictionary, hval, TCAR(old));
}
} else if (is_not_nil(old)) {
#ifdef DEBUG
@@ -362,7 +392,7 @@ static void pd_hash_erase(Process *p, Eterm id, Eterm *ret)
"display term found in line %d:\n"
"%T\n", p->common.id, __LINE__, old);
#endif
- erl_exit(1, "Damaged process dictionary found during erase/1.");
+ erts_exit(ERTS_ERROR_EXIT, "Damaged process dictionary found during erase/1.");
}
if ((range = HASH_RANGE(p->dictionary)) > INITIAL_SIZE &&
range / 2 > (p->dictionary->numElements)) {
@@ -378,40 +408,102 @@ static void pd_hash_erase_all(Process *p)
}
}
+Uint32 erts_pd_make_hx(Eterm key)
+{
+ return MAKE_HASH(key);
+}
+
+Eterm erts_pd_hash_get_with_hx(Process *p, Uint32 hx, Eterm id)
+{
+ unsigned int hval;
+ ProcDict *pd = p->dictionary;
+
+ ASSERT(hx == MAKE_HASH(id));
+ if (pd == NULL)
+ return am_undefined;
+ hval = pd_hash_value_to_ix(pd, hx);
+ return pd_hash_get_with_hval(p, ARRAY_GET(pd, hval), id);
+}
+
Eterm erts_pd_hash_get(Process *p, Eterm id)
{
unsigned int hval;
- Eterm tmp;
ProcDict *pd = p->dictionary;
if (pd == NULL)
return am_undefined;
hval = pd_hash_value(pd, id);
- tmp = ARRAY_GET(pd, hval);
- if (is_boxed(tmp)) { /* Tuple */
- ASSERT(is_tuple(tmp));
- if (EQ(tuple_val(tmp)[1], id)) {
- return tuple_val(tmp)[2];
+ return pd_hash_get_with_hval(p, ARRAY_GET(pd, hval), id);
+}
+
+Eterm pd_hash_get_with_hval(Process *p, Eterm bucket, Eterm id)
+{
+ if (is_boxed(bucket)) { /* Tuple */
+ ASSERT(is_tuple(bucket));
+ if (EQ(tuple_val(bucket)[1], id)) {
+ return tuple_val(bucket)[2];
}
- } else if (is_list(tmp)) {
- for (; tmp != NIL && !EQ(tuple_val(TCAR(tmp))[1], id); tmp = TCDR(tmp)) {
+ } else if (is_list(bucket)) {
+ for (; bucket != NIL && !EQ(tuple_val(TCAR(bucket))[1], id); bucket = TCDR(bucket)) {
;
}
- if (tmp != NIL) {
- return tuple_val(TCAR(tmp))[2];
+ if (bucket != NIL) {
+ return tuple_val(TCAR(bucket))[2];
}
- } else if (is_not_nil(tmp)) {
+ } else if (is_not_nil(bucket)) {
#ifdef DEBUG
erts_fprintf(stderr,
"Process dictionary for process %T is broken, trying to "
"display term found in line %d:\n"
- "%T\n", p->common.id, __LINE__, tmp);
+ "%T\n", p->common.id, __LINE__, bucket);
#endif
- erl_exit(1, "Damaged process dictionary found during get/1.");
+ erts_exit(ERTS_ERROR_EXIT, "Damaged process dictionary found during get/1.");
}
return am_undefined;
}
+
+#define PD_GET_TKEY(Dst,Src) \
+do { \
+ ASSERT(is_tuple((Src))); \
+ ASSERT(arityval(*((Eterm*)tuple_val((Src)))) == 2); \
+ (Dst) = ((Eterm*)tuple_val((Src)))[1]; \
+} while(0)
+
+static Eterm pd_hash_get_all_keys(Process *p, ProcDict *pd) {
+ Eterm* hp;
+ Eterm res = NIL;
+ Eterm tmp, tmp2;
+ unsigned int i;
+ unsigned int num;
+
+ if (pd == NULL) {
+ return res;
+ }
+
+ num = HASH_RANGE(pd);
+ hp = HAlloc(p, pd->numElements * 2);
+
+ for (i = 0; i < num; ++i) {
+ tmp = ARRAY_GET(pd, i);
+ if (is_boxed(tmp)) {
+ PD_GET_TKEY(tmp,tmp);
+ res = CONS(hp, tmp, res);
+ hp += 2;
+ } else if (is_list(tmp)) {
+ while (tmp != NIL) {
+ tmp2 = TCAR(tmp);
+ PD_GET_TKEY(tmp2,tmp2);
+ res = CONS(hp, tmp2, res);
+ hp += 2;
+ tmp = TCDR(tmp);
+ }
+ }
+ }
+ return res;
+}
+#undef PD_GET_TKEY
+
static Eterm pd_hash_get_keys(Process *p, Eterm value)
{
Eterm *hp;
@@ -496,8 +588,11 @@ static Eterm pd_hash_put(Process *p, Eterm id, Eterm value)
if (p->dictionary == NULL) {
/* Create it */
- array_put(&(p->dictionary), INITIAL_SIZE - 1, NIL);
- p->dictionary->homeSize = INITIAL_SIZE;
+ ensure_array_size(&p->dictionary, INITIAL_SIZE);
+ p->dictionary->usedSlots = INITIAL_SIZE;
+ p->dictionary->sizeMask = INITIAL_SIZE*2 - 1;
+ p->dictionary->splitPosition = 0;
+ p->dictionary->numElements = 0;
}
hval = pd_hash_value(p->dictionary, id);
old = ARRAY_GET(p->dictionary, hval);
@@ -530,7 +625,7 @@ static Eterm pd_hash_put(Process *p, Eterm id, Eterm value)
root[0] = id;
root[1] = value;
root[2] = old;
- BUMP_REDS(p, erts_garbage_collect(p, needed, root, 3));
+ erts_garbage_collect(p, needed, root, 3);
id = root[0];
value = root[1];
old = root[2];
@@ -549,19 +644,19 @@ static Eterm pd_hash_put(Process *p, Eterm id, Eterm value)
* Update the dictionary.
*/
if (is_nil(old)) {
- array_put(&(p->dictionary), hval, tpl);
+ ARRAY_PUT(p->dictionary, hval, tpl);
++(p->dictionary->numElements);
} else if (is_boxed(old)) {
ASSERT(is_tuple(old));
if (EQ(tuple_val(old)[1],id)) {
- array_put(&(p->dictionary), hval, tpl);
+ ARRAY_PUT(p->dictionary, hval, tpl);
return tuple_val(old)[2];
} else {
hp = HeapOnlyAlloc(p, 4);
tmp = CONS(hp, old, NIL);
hp += 2;
++(p->dictionary->numElements);
- array_put(&(p->dictionary), hval, CONS(hp, tpl, tmp));
+ ARRAY_PUT(p->dictionary, hval, CONS(hp, tpl, tmp));
hp += 2;
ASSERT(hp <= hp_limit);
}
@@ -571,7 +666,7 @@ static Eterm pd_hash_put(Process *p, Eterm id, Eterm value)
* New key. Simply prepend the tuple to the beginning of the list.
*/
hp = HeapOnlyAlloc(p, 2);
- array_put(&(p->dictionary), hval, CONS(hp, tpl, old));
+ ARRAY_PUT(p->dictionary, hval, CONS(hp, tpl, old));
hp += 2;
ASSERT(hp <= hp_limit);
++(p->dictionary->numElements);
@@ -606,7 +701,7 @@ static Eterm pd_hash_put(Process *p, Eterm id, Eterm value)
nlist = CONS(hp, tpl, nlist);
hp += 2;
ASSERT(hp <= hp_limit);
- array_put(&(p->dictionary), hval, nlist);
+ ARRAY_PUT(p->dictionary, hval, nlist);
return tuple_val(TCAR(tmp))[2];
}
} else {
@@ -617,7 +712,7 @@ static Eterm pd_hash_put(Process *p, Eterm id, Eterm value)
"%T\n", p->common.id, __LINE__, old);
#endif
- erl_exit(1, "Damaged process dictionary found during put/2.");
+ erts_exit(ERTS_ERROR_EXIT, "Damaged process dictionary found during put/2.");
}
if (HASH_RANGE(p->dictionary) <= p->dictionary->numElements) {
grow(p);
@@ -631,6 +726,7 @@ static Eterm pd_hash_put(Process *p, Eterm id, Eterm value)
static void shrink(Process *p, Eterm* ret)
{
+ ProcDict *pd = p->dictionary;
unsigned int range = HASH_RANGE(p->dictionary);
unsigned int steps = (range*3) / 10;
Eterm hi, lo, tmp;
@@ -645,25 +741,26 @@ static void shrink(Process *p, Eterm* ret)
}
for (i = 0; i < steps; ++i) {
- ProcDict *pd = p->dictionary;
if (pd->splitPosition == 0) {
- pd->homeSize /= 2;
- pd->splitPosition = pd->homeSize;
+ ASSERT(IS_POW2(pd->usedSlots));
+ pd->sizeMask = pd->usedSlots - 1;
+ pd->splitPosition = pd->usedSlots / 2;
}
--(pd->splitPosition);
- hi = ARRAY_GET(pd, (pd->splitPosition + pd->homeSize));
+ /* Must wait to decrement 'usedSlots' for GC rootset below */
+ hi = ARRAY_GET(pd, pd->usedSlots - 1);
lo = ARRAY_GET(pd, pd->splitPosition);
if (hi != NIL) {
if (lo == NIL) {
- array_put(&(p->dictionary), pd->splitPosition, hi);
+ ARRAY_PUT(pd, pd->splitPosition, hi);
} else {
- int needed = 4;
+ Sint needed = 4;
if (is_list(hi) && is_list(lo)) {
needed = 2*erts_list_length(hi);
}
if (HeapWordsLeft(p) < needed) {
- BUMP_REDS(p, erts_garbage_collect(p, needed, ret, 1));
- hi = pd->data[(pd->splitPosition + pd->homeSize)];
+ erts_garbage_collect(p, needed, ret, 1);
+ hi = pd->data[pd->usedSlots - 1];
lo = pd->data[pd->splitPosition];
}
#ifdef DEBUG
@@ -674,13 +771,13 @@ static void shrink(Process *p, Eterm* ret)
hp = HeapOnlyAlloc(p, 4);
tmp = CONS(hp, hi, NIL);
hp += 2;
- array_put(&(p->dictionary), pd->splitPosition,
+ ARRAY_PUT(pd, pd->splitPosition,
CONS(hp,lo,tmp));
hp += 2;
ASSERT(hp <= hp_limit);
} else { /* hi is a list */
hp = HeapOnlyAlloc(p, 2);
- array_put(&(p->dictionary), pd->splitPosition,
+ ARRAY_PUT(pd, pd->splitPosition,
CONS(hp, lo, hi));
hp += 2;
ASSERT(hp <= hp_limit);
@@ -688,7 +785,7 @@ static void shrink(Process *p, Eterm* ret)
} else { /* lo is a list */
if (is_tuple(hi)) {
hp = HeapOnlyAlloc(p, 2);
- array_put(&(p->dictionary), pd->splitPosition,
+ ARRAY_PUT(pd, pd->splitPosition,
CONS(hp, hi, lo));
hp += 2;
ASSERT(hp <= hp_limit);
@@ -700,14 +797,15 @@ static void shrink(Process *p, Eterm* ret)
hp += 2;
}
ASSERT(hp <= hp_limit);
- array_put(&(p->dictionary), pd->splitPosition, lo);
+ ARRAY_PUT(pd, pd->splitPosition, lo);
}
}
}
}
- array_put(&(p->dictionary), (pd->splitPosition + pd->homeSize), NIL);
+ --pd->usedSlots;
+ ARRAY_PUT(pd, pd->usedSlots, NIL);
}
- if (HASH_RANGE(p->dictionary) <= (p->dictionary->size / 4)) {
+ if (HASH_RANGE(p->dictionary) <= (p->dictionary->arraySize / 4)) {
array_shrink(&(p->dictionary), (HASH_RANGE(p->dictionary) * 3) / 2);
}
}
@@ -715,14 +813,14 @@ static void shrink(Process *p, Eterm* ret)
static void grow(Process *p)
{
unsigned int i,j;
- unsigned int steps = p->dictionary->homeSize / 5;
+ unsigned int steps = (p->dictionary->usedSlots / 4) & 0xf;
Eterm l1,l2;
Eterm l;
Eterm *hp;
unsigned int pos;
unsigned int homeSize;
- int needed = 0;
- ProcDict *pd;
+ Sint needed = 0;
+ ProcDict *pd = p->dictionary;
#ifdef DEBUG
Eterm *hp_limit;
#endif
@@ -731,18 +829,20 @@ static void grow(Process *p)
if (steps == 0)
steps = 1;
/* Dont grow over MAX_HASH */
- if ((MAX_HASH - steps) <= HASH_RANGE(p->dictionary)) {
+ if ((MAX_HASH - steps) <= HASH_RANGE(pd)) {
return;
}
+ ensure_array_size(&p->dictionary, HASH_RANGE(pd) + steps);
+ pd = p->dictionary;
+
/*
* Calculate total number of heap words needed, and garbage collect
* if necessary.
*/
- pd = p->dictionary;
pos = pd->splitPosition;
- homeSize = pd->homeSize;
+ homeSize = pd->usedSlots - pd->splitPosition;
for (i = 0; i < steps; ++i) {
if (pos == homeSize) {
homeSize *= 2;
@@ -758,7 +858,7 @@ static void grow(Process *p)
}
}
if (HeapWordsLeft(p) < needed) {
- BUMP_REDS(p, erts_garbage_collect(p, needed, 0, 0));
+ erts_garbage_collect(p, needed, 0, 0);
}
#ifdef DEBUG
hp_limit = p->htop + needed;
@@ -767,21 +867,22 @@ static void grow(Process *p)
/*
* Now grow.
*/
-
+ homeSize = pd->usedSlots - pd->splitPosition;
for (i = 0; i < steps; ++i) {
- ProcDict *pd = p->dictionary;
- if (pd->splitPosition == pd->homeSize) {
- pd->homeSize *= 2;
- pd->splitPosition = 0;
+ if (pd->splitPosition == homeSize) {
+ homeSize *= 2;
+ pd->sizeMask = homeSize*2 - 1;
+ pd->splitPosition = 0;
}
pos = pd->splitPosition;
++pd->splitPosition; /* For the hashes */
+ ++pd->usedSlots;
+ ASSERT(pos + homeSize == pd->usedSlots - 1);
l = ARRAY_GET(pd, pos);
if (is_tuple(l)) {
if (pd_hash_value(pd, tuple_val(l)[1]) != pos) {
- array_put(&(p->dictionary), pos +
- p->dictionary->homeSize, l);
- array_put(&(p->dictionary), pos, NIL);
+ ARRAY_PUT(pd, pos + homeSize, l);
+ ARRAY_PUT(pd, pos, NIL);
}
} else {
l2 = NIL;
@@ -803,10 +904,8 @@ static void grow(Process *p)
if (l2 != NIL && TCDR(l2) == NIL)
l2 = TCAR(l2);
ASSERT(hp <= hp_limit);
- /* After array_put pd is no longer valid */
- array_put(&(p->dictionary), pos, l1);
- array_put(&(p->dictionary), pos +
- p->dictionary->homeSize, l2);
+ ARRAY_PUT(pd, pos, l1);
+ ARRAY_PUT(pd, pos + homeSize, l2);
}
}
@@ -823,73 +922,65 @@ static void array_shrink(ProcDict **ppd, unsigned int need)
{
unsigned int siz = next_array_size(need);
- HDEBUGF(("array_shrink: size = %d, used = %d, need = %d",
- (*ppd)->size, (*ppd)->used, need));
+ HDEBUGF(("array_shrink: size = %d, need = %d",
+ (*ppd)->arraySize, need));
- if (siz > (*ppd)->size)
+ if (siz >= (*ppd)->arraySize)
return; /* Only shrink */
*ppd = PD_REALLOC(((void *) *ppd),
- PD_SZ2BYTES((*ppd)->size),
+ PD_SZ2BYTES((*ppd)->arraySize),
PD_SZ2BYTES(siz));
- (*ppd)->size = siz;
- if ((*ppd)->size < (*ppd)->used)
- (*ppd)->used = (*ppd)->size;
+ (*ppd)->arraySize = siz;
}
-static Eterm array_put(ProcDict **ppdict, unsigned int ndx, Eterm term)
+static void ensure_array_size(ProcDict **ppdict, unsigned int size)
{
+ ProcDict *pd = *ppdict;
unsigned int i;
- Eterm ret;
- if (*ppdict == NULL) {
- Uint siz = next_array_size(ndx+1);
- ProcDict *p;
- p = PD_ALLOC(PD_SZ2BYTES(siz));
+ if (pd == NULL) {
+ Uint siz = next_array_size(size);
+
+ pd = PD_ALLOC(PD_SZ2BYTES(siz));
for (i = 0; i < siz; ++i)
- p->data[i] = NIL;
- p->size = siz;
- p->homeSize = p->splitPosition = p->numElements = p->used = 0;
- *ppdict = p;
- } else if (ndx >= (*ppdict)->size) {
- Uint osize = (*ppdict)->size;
- Uint nsize = next_array_size(ndx+1);
- *ppdict = PD_REALLOC(((void *) *ppdict),
+ pd->data[i] = NIL;
+ pd->arraySize = siz;
+ *ppdict = pd;
+ } else if (size > pd->arraySize) {
+ Uint osize = pd->arraySize;
+ Uint nsize = next_array_size(size);
+ pd = PD_REALLOC(((void *) pd),
PD_SZ2BYTES(osize),
PD_SZ2BYTES(nsize));
for (i = osize; i < nsize; ++i)
- (*ppdict)->data[i] = NIL;
- (*ppdict)->size = nsize;
+ pd->data[i] = NIL;
+ pd->arraySize = nsize;
+ *ppdict = pd;
}
- ret = (*ppdict)->data[ndx];
- (*ppdict)->data[ndx] = term;
- if ((ndx + 1) > (*ppdict)->used)
- (*ppdict)->used = ndx + 1;
-#ifdef HARDDEBUG
- HDEBUGF(("array_put: (*ppdict)->size = %d, (*ppdict)->used = %d, ndx = %d",
- (*ppdict)->size, (*ppdict)->used, ndx));
- erts_fprintf(stderr, "%T", term);
-#endif /* HARDDEBUG */
- return ret;
}
/*
** Basic utilities
*/
-static unsigned int pd_hash_value(ProcDict *pdict, Eterm term)
+static unsigned int pd_hash_value_to_ix(ProcDict *pdict, Uint32 hx)
{
- Uint hash, high;
+ Uint high;
- hash = MAKE_HASH(term);
- high = hash % (pdict->homeSize*2);
+ ASSERT(IS_POW2(pdict->sizeMask+1));
+ ASSERT(HASH_RANGE(pdict) >= (pdict->sizeMask >> 1));
+ ASSERT(HASH_RANGE(pdict) <= (pdict->sizeMask + 1));
+
+ high = hx & pdict->sizeMask;
if (high >= HASH_RANGE(pdict))
- return hash % pdict->homeSize;
+ return hx & (pdict->sizeMask >> 1);
return high;
}
+
static unsigned int next_array_size(unsigned int need)
{
static unsigned int tab[] =
@@ -949,35 +1040,39 @@ static unsigned int next_array_size(unsigned int need)
static void pd_check(ProcDict *pd)
{
unsigned int i;
+ unsigned int used;
Uint num;
if (pd == NULL)
return;
- ASSERT(pd->size >= pd->used);
+ used = HASH_RANGE(pd);
+ ASSERT(pd->arraySize >= used);
ASSERT(HASH_RANGE(pd) <= MAX_HASH);
- for (i = 0, num = 0; i < pd->used; ++i) {
+ for (i = 0, num = 0; i < used; ++i) {
Eterm t = pd->data[i];
if (is_nil(t)) {
continue;
} else if (is_tuple(t)) {
++num;
ASSERT(arityval(*tuple_val(t)) == 2);
+ ASSERT(pd_hash_value(pd, tuple_val(t)[1]) == i);
continue;
} else if (is_list(t)) {
while (t != NIL) {
++num;
ASSERT(is_tuple(TCAR(t)));
ASSERT(arityval(*(tuple_val(TCAR(t)))) == 2);
+ ASSERT(pd_hash_value(pd, tuple_val(TCAR(t))[1]) == i);
t = TCDR(t);
}
continue;
} else {
- erl_exit(1,
+ erts_exit(ERTS_ERROR_EXIT,
"Found tag 0x%08x in process dictionary at position %d",
(unsigned long) t, (int) i);
}
}
ASSERT(num == pd->numElements);
- ASSERT(pd->splitPosition <= pd->homeSize);
+ ASSERT(pd->usedSlots >= pd->splitPosition*2);
}
#endif /* DEBUG */
diff --git a/erts/emulator/beam/erl_process_dict.h b/erts/emulator/beam/erl_process_dict.h
index 8fad2a67ab..ab58f3c239 100644
--- a/erts/emulator/beam/erl_process_dict.h
+++ b/erts/emulator/beam/erl_process_dict.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -22,21 +23,27 @@
#include "sys.h"
typedef struct proc_dict {
- unsigned int size;
- unsigned int used;
- unsigned int homeSize;
+ unsigned int sizeMask;
+ unsigned int usedSlots;
+ unsigned int arraySize;
unsigned int splitPosition;
Uint numElements;
Eterm data[1]; /* The beginning of an array of erlang terms */
} ProcDict;
+#define ERTS_PD_START(PD) ((PD)->data)
+#define ERTS_PD_SIZE(PD) ((PD)->usedSlots)
+
+int erts_pd_set_initial_size(int size);
Uint erts_dicts_mem_size(struct process *p);
void erts_erase_dicts(struct process *p);
-void erts_dictionary_dump(int to, void *to_arg, ProcDict *pd);
-void erts_deep_dictionary_dump(int to, void *to_arg,
- ProcDict* pd, void (*cb)(int, void *, Eterm obj));
+void erts_dictionary_dump(fmtfn_t to, void *to_arg, ProcDict *pd);
+void erts_deep_dictionary_dump(fmtfn_t to, void *to_arg,
+ ProcDict* pd, void (*cb)(fmtfn_t, void *, Eterm obj));
Eterm erts_dictionary_copy(struct process *p, ProcDict *pd);
Eterm erts_pd_hash_get(struct process *p, Eterm id);
+Uint32 erts_pd_make_hx(Eterm key);
+Eterm erts_pd_hash_get_with_hx(Process *p, Uint32 hx, Eterm id);
#endif
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
index 2f3cf23b00..0b7f361622 100644
--- a/erts/emulator/beam/erl_process_dump.c
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -31,6 +32,7 @@
#include "dist.h"
#include "beam_catches.h"
#include "erl_binary.h"
+#include "erl_map.h"
#define ERTS_WANT_EXTERNAL_TAGS
#include "external.h"
@@ -39,16 +41,22 @@
#define OUR_NIL _make_header(0,_TAG_HEADER_FLOAT)
-static void dump_process_info(int to, void *to_arg, Process *p);
-static void dump_element(int to, void *to_arg, Eterm x);
-static void dump_dist_ext(int to, void *to_arg, ErtsDistExternal *edep);
-static void dump_element_nl(int to, void *to_arg, Eterm x);
-static int stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp,
+static void dump_process_info(fmtfn_t to, void *to_arg, Process *p);
+static void dump_element(fmtfn_t to, void *to_arg, Eterm x);
+static void dump_dist_ext(fmtfn_t to, void *to_arg, ErtsDistExternal *edep);
+static void dump_element_nl(fmtfn_t to, void *to_arg, Eterm x);
+static int stack_element_dump(fmtfn_t to, void *to_arg, Eterm* sp,
int yreg);
-static void print_function_from_pc(int to, void *to_arg, BeamInstr* x);
-static void heap_dump(int to, void *to_arg, Eterm x);
-static void dump_binaries(int to, void *to_arg, Binary* root);
-static void dump_externally(int to, void *to_arg, Eterm term);
+static void stack_trace_dump(fmtfn_t to, void *to_arg, Eterm* sp);
+static void print_function_from_pc(fmtfn_t to, void *to_arg, BeamInstr* x);
+static void heap_dump(fmtfn_t to, void *to_arg, Eterm x);
+static void dump_binaries(fmtfn_t to, void *to_arg, Binary* root);
+static void dump_externally(fmtfn_t to, void *to_arg, Eterm term);
+static void mark_literal(Eterm* ptr);
+static void init_literal_areas(void);
+static void dump_literals(fmtfn_t to, void *to_arg);
+static void dump_module_literals(fmtfn_t to, void *to_arg,
+ ErtsLiteralArea* lit_area);
static Binary* all_binaries;
@@ -56,41 +64,46 @@ extern BeamInstr beam_apply[];
extern BeamInstr beam_exit[];
extern BeamInstr beam_continue_exit[];
-
void
-erts_deep_process_dump(int to, void *to_arg)
+erts_deep_process_dump(fmtfn_t to, void *to_arg)
{
int i, max = erts_ptab_max(&erts_proc);
all_binaries = NULL;
-
+ init_literal_areas();
+
for (i = 0; i < max; i++) {
Process *p = erts_pix2proc(i);
if (p && p->i != ENULL) {
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state);
+ erts_aint32_t state = erts_atomic32_read_acqb(&p->state);
if (!(state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_GC)))
dump_process_info(to, to_arg, p);
}
}
+ dump_literals(to, to_arg);
dump_binaries(to, to_arg, all_binaries);
}
-Uint erts_process_memory(Process *p) {
- ErlMessage *mp;
+Uint erts_process_memory(Process *p, int incl_msg_inq) {
+ ErtsMessage *mp;
Uint size = 0;
struct saved_calls *scb;
size += sizeof(Process);
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p);
+ if (incl_msg_inq)
+ ERTS_MSGQ_MV_INQ2PRIVQ(p);
erts_doforall_links(ERTS_P_LINKS(p), &erts_one_link_size, &size);
erts_doforall_monitors(ERTS_P_MONITORS(p), &erts_one_mon_size, &size);
size += (p->heap_sz + p->mbuf_sz) * sizeof(Eterm);
+ if (p->abandoned_heap)
+ size += (p->hend - p->heap) * sizeof(Eterm);
if (p->old_hend && p->old_heap)
size += (p->old_hend - p->old_heap) * sizeof(Eterm);
- size += p->msg.len * sizeof(ErlMessage);
+
+ size += p->msg.len * sizeof(ErtsMessage);
for (mp = p->msg.first; mp; mp = mp->next)
if (mp->data.attached)
@@ -100,7 +113,7 @@ Uint erts_process_memory(Process *p) {
size += p->arity * sizeof(p->arg_reg[0]);
}
- if (p->psd)
+ if (erts_atomic_read_nob(&p->psd) != (erts_aint_t) NULL)
size += sizeof(ErtsPSD);
scb = ERTS_PROC_GET_SAVED_CALLS_BUF(p);
@@ -114,13 +127,13 @@ Uint erts_process_memory(Process *p) {
}
static void
-dump_process_info(int to, void *to_arg, Process *p)
+dump_process_info(fmtfn_t to, void *to_arg, Process *p)
{
Eterm* sp;
- ErlMessage* mp;
+ ErtsMessage* mp;
int yreg = -1;
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p);
+ ERTS_MSGQ_MV_INQ2PRIVQ(p);
if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0 && p->msg.first) {
erts_print(to, to_arg, "=proc_messages:%T\n", p->common.id);
@@ -148,7 +161,7 @@ dump_process_info(int to, void *to_arg, Process *p)
if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0) {
erts_print(to, to_arg, "=proc_stack:%T\n", p->common.id);
for (sp = p->stop; sp < STACK_START(p); sp++) {
- yreg = stack_element_dump(to, to_arg, p, sp, yreg);
+ yreg = stack_element_dump(to, to_arg, sp, yreg);
}
erts_print(to, to_arg, "=proc_heap:%T\n", p->common.id);
@@ -173,7 +186,7 @@ dump_process_info(int to, void *to_arg, Process *p)
}
static void
-dump_dist_ext(int to, void *to_arg, ErtsDistExternal *edep)
+dump_dist_ext(fmtfn_t to, void *to_arg, ErtsDistExternal *edep)
{
if (!edep)
erts_print(to, to_arg, "D0:E0:");
@@ -207,7 +220,7 @@ dump_dist_ext(int to, void *to_arg, ErtsDistExternal *edep)
}
static void
-dump_element(int to, void *to_arg, Eterm x)
+dump_element(fmtfn_t to, void *to_arg, Eterm x)
{
if (is_list(x)) {
erts_print(to, to_arg, "H" PTR_FMT, list_val(x));
@@ -237,15 +250,71 @@ dump_element(int to, void *to_arg, Eterm x)
}
static void
-dump_element_nl(int to, void *to_arg, Eterm x)
+dump_element_nl(fmtfn_t to, void *to_arg, Eterm x)
{
dump_element(to, to_arg, x);
erts_putc(to, to_arg, '\n');
}
+static void
+stack_trace_dump(fmtfn_t to, void *to_arg, Eterm *sp) {
+ Eterm x = *sp;
+ if (is_CP(x)) {
+ erts_print(to, to_arg, "%p:", sp);
+ erts_print(to, to_arg, "SReturn addr 0x%X (", cp_val(x));
+ print_function_from_pc(to, to_arg, cp_val(x));
+ erts_print(to, to_arg, ")\n");
+ }
+}
+
+void
+erts_limited_stack_trace(fmtfn_t to, void *to_arg, Process *p)
+{
+ Eterm* sp;
+
+
+ if (ERTS_TRACE_FLAGS(p) & F_SENSITIVE) {
+ return;
+ }
+
+ if (STACK_START(p) < STACK_TOP(p)) {
+ return;
+ }
+
+ if ((STACK_START(p) - STACK_TOP(p)) < 512) {
+ if (erts_sys_is_area_readable((char*)STACK_TOP(p),
+ (char*)STACK_START(p)))
+ for (sp = STACK_TOP(p); sp < STACK_START(p); sp++)
+ stack_trace_dump(to, to_arg, sp);
+ else
+ erts_print(to, to_arg, "Could not read from stack memory: %p - %p\n",
+ STACK_TOP(p), STACK_START(p));
+ } else {
+ sp = STACK_TOP(p);
+ if (erts_sys_is_area_readable((char*)STACK_TOP(p),
+ (char*)(STACK_TOP(p) + 25)))
+ for (; sp < (STACK_TOP(p) + 256); sp++)
+ stack_trace_dump(to, to_arg, sp);
+ else
+ erts_print(to, to_arg, "Could not read from stack memory: %p - %p\n",
+ STACK_TOP(p), STACK_TOP(p) + 256);
+
+ erts_print(to, to_arg, "%p: skipping %d frames\n",
+ sp, STACK_START(p) - STACK_TOP(p) - 512);
+
+ if (erts_sys_is_area_readable((char*)(STACK_START(p) - 256),
+ (char*)STACK_START(p)))
+ for (sp = STACK_START(p) - 256; sp < STACK_START(p); sp++)
+ stack_trace_dump(to, to_arg, sp);
+ else
+ erts_print(to, to_arg, "Could not read from stack memory: %p - %p\n",
+ STACK_START(p) - 256, STACK_START(p));
+ }
+
+}
static int
-stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg)
+stack_element_dump(fmtfn_t to, void *to_arg, Eterm* sp, int yreg)
{
Eterm x = *sp;
@@ -273,10 +342,10 @@ stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg)
}
static void
-print_function_from_pc(int to, void *to_arg, BeamInstr* x)
+print_function_from_pc(fmtfn_t to, void *to_arg, BeamInstr* x)
{
- BeamInstr* addr = find_function_from_pc(x);
- if (addr == NULL) {
+ ErtsCodeMFA* cmfa = find_function_from_pc(x);
+ if (cmfa == NULL) {
if (x == beam_exit) {
erts_print(to, to_arg, "<terminate process>");
} else if (x == beam_continue_exit) {
@@ -288,12 +357,13 @@ print_function_from_pc(int to, void *to_arg, BeamInstr* x)
}
} else {
erts_print(to, to_arg, "%T:%T/%bpu + %bpu",
- addr[0], addr[1], addr[2], ((x-addr)-2) * sizeof(Eterm));
+ cmfa->module, cmfa->function, cmfa->arity,
+ (x-(BeamInstr*)cmfa) * sizeof(Eterm));
}
}
static void
-heap_dump(int to, void *to_arg, Eterm x)
+heap_dump(fmtfn_t to, void *to_arg, Eterm x)
{
DeclareTmpHeapNoproc(last,1);
Eterm* next = last;
@@ -307,10 +377,12 @@ heap_dump(int to, void *to_arg, Eterm x)
while (x != OUR_NIL) {
if (is_CP(x)) {
- next = (Eterm *) EXPAND_POINTER(x);
+ next = (Eterm *) x;
} else if (is_list(x)) {
ptr = list_val(x);
- if (ptr[0] != OUR_NIL) {
+ if (erts_is_literal(x, ptr)) {
+ mark_literal(ptr);
+ } else if (ptr[0] != OUR_NIL) {
erts_print(to, to_arg, PTR_FMT ":l", ptr);
dump_element(to, to_arg, ptr[0]);
erts_putc(to, to_arg, '|');
@@ -320,7 +392,7 @@ heap_dump(int to, void *to_arg, Eterm x)
ptr[1] = make_small(0);
}
x = ptr[0];
- ptr[0] = (Eterm) COMPRESS_POINTER(next);
+ ptr[0] = (Eterm) next;
next = ptr + 1;
continue;
}
@@ -329,7 +401,9 @@ heap_dump(int to, void *to_arg, Eterm x)
ptr = boxed_val(x);
hdr = *ptr;
- if (hdr != OUR_NIL) { /* If not visited */
+ if (erts_is_literal(x, ptr)) {
+ mark_literal(ptr);
+ } else if (hdr != OUR_NIL) {
erts_print(to, to_arg, PTR_FMT ":", ptr);
if (is_arity_value(hdr)) {
Uint i;
@@ -350,7 +424,7 @@ heap_dump(int to, void *to_arg, Eterm x)
ptr[0] = OUR_NIL;
} else {
x = ptr[arity];
- ptr[0] = (Eterm) COMPRESS_POINTER(next);
+ ptr[0] = (Eterm) next;
next = ptr + arity - 1;
continue;
}
@@ -384,8 +458,8 @@ heap_dump(int to, void *to_arg, Eterm x)
ProcBin* pb = (ProcBin *) binary_val(x);
Binary* val = pb->val;
- if (erts_smp_atomic_xchg_nob(&val->refc, 0) != 0) {
- val->flags = (UWord) all_binaries;
+ if (erts_atomic_xchg_nob(&val->intern.refc, 0) != 0) {
+ val->intern.flags = (UWord) all_binaries;
all_binaries = val;
}
erts_print(to, to_arg,
@@ -435,11 +509,77 @@ heap_dump(int to, void *to_arg, Eterm x)
erts_print(to, to_arg, "p<%beu.%beu>\n",
port_channel_no(x), port_number(x));
*ptr = OUR_NIL;
+ } else if (is_map_header(hdr)) {
+ if (is_flatmap_header(hdr)) {
+ flatmap_t* fmp = (flatmap_t *) flatmap_val(x);
+ Eterm* values = ptr + sizeof(flatmap_t) / sizeof(Eterm);
+ Uint map_size = fmp->size;
+ int i;
+
+ erts_print(to, to_arg, "Mf" ETERM_FMT ":", map_size);
+ dump_element(to, to_arg, fmp->keys);
+ erts_putc(to, to_arg, ':');
+ for (i = 0; i < map_size; i++) {
+ dump_element(to, to_arg, values[i]);
+ if (is_immed(values[i])) {
+ values[i] = make_small(0);
+ }
+ if (i < map_size-1) {
+ erts_putc(to, to_arg, ',');
+ }
+ }
+ erts_putc(to, to_arg, '\n');
+ *ptr = OUR_NIL;
+ x = fmp->keys;
+ if (map_size) {
+ fmp->keys = (Eterm) next;
+ next = &values[map_size-1];
+ }
+ continue;
+ } else {
+ Uint i;
+ Uint sz = 0;
+ Eterm* nodes = ptr + 1;
+
+ switch (MAP_HEADER_TYPE(hdr)) {
+ case MAP_HEADER_TAG_HAMT_HEAD_ARRAY:
+ nodes++;
+ sz = 16;
+ erts_print(to, to_arg, "Mh" ETERM_FMT ":" ETERM_FMT ":",
+ hashmap_size(x), sz);
+ break;
+ case MAP_HEADER_TAG_HAMT_HEAD_BITMAP:
+ nodes++;
+ sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ erts_print(to, to_arg, "Mh" ETERM_FMT ":" ETERM_FMT ":",
+ hashmap_size(x), sz);
+ break;
+ case MAP_HEADER_TAG_HAMT_NODE_BITMAP:
+ sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ erts_print(to, to_arg, "Mn" ETERM_FMT ":", sz);
+ break;
+ }
+ *ptr = OUR_NIL;
+ for (i = 0; i < sz; i++) {
+ dump_element(to, to_arg, nodes[i]);
+ if (is_immed(nodes[i])) {
+ nodes[i] = make_small(0);
+ }
+ if (i < sz-1) {
+ erts_putc(to, to_arg, ',');
+ }
+ }
+ erts_putc(to, to_arg, '\n');
+ x = nodes[0];
+ nodes[0] = (Eterm) next;
+ next = &nodes[sz-1];
+ continue;
+ }
} else {
/*
* All other we dump in the external term format.
*/
- dump_externally(to, to_arg, x);
+ dump_externally(to, to_arg, x);
erts_putc(to, to_arg, '\n');
*ptr = OUR_NIL;
}
@@ -453,7 +593,7 @@ heap_dump(int to, void *to_arg, Eterm x)
}
static void
-dump_binaries(int to, void *to_arg, Binary* current)
+dump_binaries(fmtfn_t to, void *to_arg, Binary* current)
{
while (current) {
long i;
@@ -466,12 +606,12 @@ dump_binaries(int to, void *to_arg, Binary* current)
erts_print(to, to_arg, "%02X", bytes[i]);
}
erts_putc(to, to_arg, '\n');
- current = (Binary *) current->flags;
+ current = (Binary *) current->intern.flags;
}
}
static void
-dump_externally(int to, void *to_arg, Eterm term)
+dump_externally(fmtfn_t to, void *to_arg, Eterm term)
{
byte sbuf[1024]; /* encode and hope for the best ... */
byte* s;
@@ -508,3 +648,394 @@ dump_externally(int to, void *to_arg, Eterm term)
erts_print(to, to_arg, "%02X", *s++);
}
}
+
+/*
+ * Handle dumping of literal areas.
+ */
+
+static ErtsLiteralArea** lit_areas;
+static Uint num_lit_areas;
+
+static int compare_areas(const void * a, const void * b)
+{
+ ErtsLiteralArea** a_p = (ErtsLiteralArea **) a;
+ ErtsLiteralArea** b_p = (ErtsLiteralArea **) b;
+
+ if (*a_p < *b_p) {
+ return -1;
+ } else if (*b_p < *a_p) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+
+static void
+init_literal_areas(void)
+{
+ int i;
+ Module* modp;
+ ErtsCodeIndex code_ix;
+ ErtsLiteralArea** area_p;
+
+ code_ix = erts_active_code_ix();
+ erts_rlock_old_code(code_ix);
+
+ lit_areas = area_p = erts_dump_lit_areas;
+ num_lit_areas = 0;
+ for (i = 0; i < module_code_size(code_ix); i++) {
+ modp = module_code(i, code_ix);
+ if (modp == NULL) {
+ continue;
+ }
+ if (modp->curr.code_length > 0 &&
+ modp->curr.code_hdr->literal_area) {
+ *area_p++ = modp->curr.code_hdr->literal_area;
+ }
+ if (modp->old.code_length > 0 && modp->old.code_hdr->literal_area) {
+ *area_p++ = modp->old.code_hdr->literal_area;
+ }
+ }
+
+ num_lit_areas = area_p - lit_areas;
+ ASSERT(num_lit_areas <= erts_dump_num_lit_areas);
+ for (i = 0; i < num_lit_areas; i++) {
+ lit_areas[i]->off_heap = 0;
+ }
+
+ qsort(lit_areas, num_lit_areas, sizeof(ErtsLiteralArea *),
+ compare_areas);
+
+ erts_runlock_old_code(code_ix);
+}
+
+static int search_areas(const void * a, const void * b) {
+ Eterm* key = (Eterm *) a;
+ ErtsLiteralArea** b_p = (ErtsLiteralArea **) b;
+ if (key < b_p[0]->start) {
+ return -1;
+ } else if (b_p[0]->end <= key) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static void mark_literal(Eterm* ptr)
+{
+ ErtsLiteralArea** ap;
+
+ ap = bsearch(ptr, lit_areas, num_lit_areas, sizeof(ErtsLiteralArea*),
+ search_areas);
+
+ /*
+ * If the literal was created by native code, this search will not
+ * find it and ap will be NULL.
+ */
+
+ if (ap) {
+ ap[0]->off_heap = (struct erl_off_heap_header *) 1;
+ }
+}
+
+
+static void
+dump_literals(fmtfn_t to, void *to_arg)
+{
+ ErtsCodeIndex code_ix;
+ int i;
+
+ code_ix = erts_active_code_ix();
+ erts_rlock_old_code(code_ix);
+
+ erts_print(to, to_arg, "=literals\n");
+ for (i = 0; i < num_lit_areas; i++) {
+ if (lit_areas[i]->off_heap) {
+ dump_module_literals(to, to_arg, lit_areas[i]);
+ }
+ }
+
+ erts_runlock_old_code(code_ix);
+}
+
+static void
+dump_module_literals(fmtfn_t to, void *to_arg, ErtsLiteralArea* lit_area)
+{
+ Eterm* htop;
+ Eterm* hend;
+
+ htop = lit_area->start;
+ hend = lit_area->end;
+ while (htop < hend) {
+ Eterm w = *htop;
+ Eterm term;
+ Uint size;
+
+ switch (primary_tag(w)) {
+ case TAG_PRIMARY_HEADER:
+ term = make_boxed(htop);
+ erts_print(to, to_arg, PTR_FMT ":", htop);
+ if (is_arity_value(w)) {
+ Uint i;
+ Uint arity = arityval(w);
+
+ erts_print(to, to_arg, "t" ETERM_FMT ":", arity);
+ for (i = 1; i <= arity; i++) {
+ dump_element(to, to_arg, htop[i]);
+ if (i < arity) {
+ erts_putc(to, to_arg, ',');
+ }
+ }
+ erts_putc(to, to_arg, '\n');
+ } else if (w == HEADER_FLONUM) {
+ FloatDef f;
+ char sbuf[31];
+ int i;
+
+ GET_DOUBLE_DATA((htop+1), f);
+ i = sys_double_to_chars(f.fd, sbuf, sizeof(sbuf));
+ sys_memset(sbuf+i, 0, 31-i);
+ erts_print(to, to_arg, "F%X:%s\n", i, sbuf);
+ } else if (_is_bignum_header(w)) {
+ erts_print(to, to_arg, "B%T\n", term);
+ } else if (is_binary_header(w)) {
+ Uint tag = thing_subtag(w);
+ Uint size = binary_size(term);
+ Uint i;
+
+ if (tag == HEAP_BINARY_SUBTAG) {
+ byte* p;
+
+ erts_print(to, to_arg, "Yh%X:", size);
+ p = binary_bytes(term);
+ for (i = 0; i < size; i++) {
+ erts_print(to, to_arg, "%02X", p[i]);
+ }
+ } else if (tag == REFC_BINARY_SUBTAG) {
+ ProcBin* pb = (ProcBin *) binary_val(term);
+ Binary* val = pb->val;
+
+ if (erts_atomic_xchg_nob(&val->intern.refc, 0) != 0) {
+ val->intern.flags = (UWord) all_binaries;
+ all_binaries = val;
+ }
+ erts_print(to, to_arg,
+ "Yc" PTR_FMT ":" PTR_FMT ":" PTR_FMT,
+ val,
+ pb->bytes - (byte *)val->orig_bytes,
+ size);
+ } else if (tag == SUB_BINARY_SUBTAG) {
+ ErlSubBin* Sb = (ErlSubBin *) binary_val(term);
+ Eterm* real_bin;
+ void* val;
+
+ real_bin = boxed_val(Sb->orig);
+ if (thing_subtag(*real_bin) == REFC_BINARY_SUBTAG) {
+ /*
+ * Unvisited REFC_BINARY: Point directly to
+ * the binary.
+ */
+ ProcBin* pb = (ProcBin *) real_bin;
+ val = pb->val;
+ } else {
+ /*
+ * Heap binary or visited REFC binary: Point
+ * to heap binary or ProcBin on the heap.
+ */
+ val = real_bin;
+ }
+ erts_print(to, to_arg,
+ "Ys" PTR_FMT ":" PTR_FMT ":" PTR_FMT,
+ val, Sb->offs, size);
+ }
+ erts_putc(to, to_arg, '\n');
+ } else if (is_map_header(w)) {
+ if (is_flatmap_header(w)) {
+ flatmap_t* fmp = (flatmap_t *) flatmap_val(term);
+ Eterm* values = htop + sizeof(flatmap_t) / sizeof(Eterm);
+ Uint map_size = fmp->size;
+ int i;
+
+ erts_print(to, to_arg, "Mf" ETERM_FMT ":", map_size);
+ dump_element(to, to_arg, fmp->keys);
+ erts_putc(to, to_arg, ':');
+ for (i = 0; i < map_size; i++) {
+ dump_element(to, to_arg, values[i]);
+ if (i < map_size-1) {
+ erts_putc(to, to_arg, ',');
+ }
+ }
+ erts_putc(to, to_arg, '\n');
+ } else {
+ Uint i;
+ Uint sz = 0;
+ Eterm* nodes = htop + 1;
+
+ switch (MAP_HEADER_TYPE(w)) {
+ case MAP_HEADER_TAG_HAMT_HEAD_ARRAY:
+ nodes++;
+ sz = 16;
+ erts_print(to, to_arg, "Mh" ETERM_FMT ":" ETERM_FMT ":",
+ hashmap_size(term), sz);
+ break;
+ case MAP_HEADER_TAG_HAMT_HEAD_BITMAP:
+ nodes++;
+ sz = hashmap_bitcount(MAP_HEADER_VAL(w));
+ erts_print(to, to_arg, "Mh" ETERM_FMT ":" ETERM_FMT ":",
+ hashmap_size(term), sz);
+ break;
+ case MAP_HEADER_TAG_HAMT_NODE_BITMAP:
+ sz = hashmap_bitcount(MAP_HEADER_VAL(w));
+ erts_print(to, to_arg, "Mn" ETERM_FMT ":", sz);
+ break;
+ }
+ for (i = 0; i < sz; i++) {
+ dump_element(to, to_arg, nodes[i]);
+ if (i < sz-1) {
+ erts_putc(to, to_arg, ',');
+ }
+ }
+ erts_putc(to, to_arg, '\n');
+ }
+ }
+ size = 1 + header_arity(w);
+ switch (w & _HEADER_SUBTAG_MASK) {
+ case MAP_SUBTAG:
+ if (is_flatmap_header(w)) {
+ size += 1 + flatmap_get_size(htop);
+ } else {
+ size += hashmap_bitcount(MAP_HEADER_VAL(w));
+ }
+ break;
+ case SUB_BINARY_SUBTAG:
+ size += 1;
+ break;
+ }
+ break;
+ default:
+ ASSERT(!is_header(htop[1]));
+ erts_print(to, to_arg, PTR_FMT ":l", htop);
+ dump_element(to, to_arg, htop[0]);
+ erts_putc(to, to_arg, '|');
+ dump_element(to, to_arg, htop[1]);
+ erts_putc(to, to_arg, '\n');
+ size = 2;
+ break;
+ }
+ htop += size;
+ }
+}
+
+void erts_dump_process_state(fmtfn_t to, void *to_arg, erts_aint32_t psflg)
+{
+ char *s;
+ switch (erts_process_state2status(psflg)) {
+ case am_free: s = "Non Existing"; break; /* Should never happen */
+ case am_exiting: s = "Exiting"; break;
+ case am_garbage_collecting: s = "Garbing"; break;
+ case am_suspended: s = "Suspended"; break;
+ case am_running: s = "Running"; break;
+ case am_runnable: s = "Scheduled"; break;
+ case am_waiting: s = "Waiting"; break;
+ default: s = "Undefined"; break; /* Should never happen */
+ }
+
+ erts_print(to, to_arg, "%s\n", s);
+}
+
+void
+erts_dump_extended_process_state(fmtfn_t to, void *to_arg, erts_aint32_t psflg) {
+
+ int i;
+
+ switch (ERTS_PSFLGS_GET_ACT_PRIO(psflg)) {
+ case PRIORITY_MAX: erts_print(to, to_arg, "ACT_PRIO_MAX | "); break;
+ case PRIORITY_HIGH: erts_print(to, to_arg, "ACT_PRIO_HIGH | "); break;
+ case PRIORITY_NORMAL: erts_print(to, to_arg, "ACT_PRIO_NORMAL | "); break;
+ case PRIORITY_LOW: erts_print(to, to_arg, "ACT_PRIO_LOW | "); break;
+ }
+ switch (ERTS_PSFLGS_GET_USR_PRIO(psflg)) {
+ case PRIORITY_MAX: erts_print(to, to_arg, "USR_PRIO_MAX | "); break;
+ case PRIORITY_HIGH: erts_print(to, to_arg, "USR_PRIO_HIGH | "); break;
+ case PRIORITY_NORMAL: erts_print(to, to_arg, "USR_PRIO_NORMAL | "); break;
+ case PRIORITY_LOW: erts_print(to, to_arg, "USR_PRIO_LOW | "); break;
+ }
+ switch (ERTS_PSFLGS_GET_PRQ_PRIO(psflg)) {
+ case PRIORITY_MAX: erts_print(to, to_arg, "PRQ_PRIO_MAX"); break;
+ case PRIORITY_HIGH: erts_print(to, to_arg, "PRQ_PRIO_HIGH"); break;
+ case PRIORITY_NORMAL: erts_print(to, to_arg, "PRQ_PRIO_NORMAL"); break;
+ case PRIORITY_LOW: erts_print(to, to_arg, "PRQ_PRIO_LOW"); break;
+ }
+
+ psflg &= ~(ERTS_PSFLGS_ACT_PRIO_MASK |
+ ERTS_PSFLGS_USR_PRIO_MASK |
+ ERTS_PSFLGS_PRQ_PRIO_MASK);
+
+ if (psflg)
+ erts_print(to, to_arg, " | ");
+
+ for (i = 0; i <= ERTS_PSFLG_MAX && psflg; i++) {
+ erts_aint32_t chk = (1 << i);
+ if (psflg & chk) {
+ switch (chk) {
+ case ERTS_PSFLG_IN_PRQ_MAX:
+ erts_print(to, to_arg, "IN_PRQ_MAX"); break;
+ case ERTS_PSFLG_IN_PRQ_HIGH:
+ erts_print(to, to_arg, "IN_PRQ_HIGH"); break;
+ case ERTS_PSFLG_IN_PRQ_NORMAL:
+ erts_print(to, to_arg, "IN_PRQ_NORMAL"); break;
+ case ERTS_PSFLG_IN_PRQ_LOW:
+ erts_print(to, to_arg, "IN_PRQ_LOW"); break;
+ case ERTS_PSFLG_FREE:
+ erts_print(to, to_arg, "FREE"); break;
+ case ERTS_PSFLG_EXITING:
+ erts_print(to, to_arg, "EXITING"); break;
+ case ERTS_PSFLG_PENDING_EXIT:
+ erts_print(to, to_arg, "PENDING_EXIT"); break;
+ case ERTS_PSFLG_ACTIVE:
+ erts_print(to, to_arg, "ACTIVE"); break;
+ case ERTS_PSFLG_IN_RUNQ:
+ erts_print(to, to_arg, "IN_RUNQ"); break;
+ case ERTS_PSFLG_RUNNING:
+ erts_print(to, to_arg, "RUNNING"); break;
+ case ERTS_PSFLG_SUSPENDED:
+ erts_print(to, to_arg, "SUSPENDED"); break;
+ case ERTS_PSFLG_GC:
+ erts_print(to, to_arg, "GC"); break;
+ case ERTS_PSFLG_BOUND:
+ erts_print(to, to_arg, "BOUND"); break;
+ case ERTS_PSFLG_TRAP_EXIT:
+ erts_print(to, to_arg, "TRAP_EXIT"); break;
+ case ERTS_PSFLG_ACTIVE_SYS:
+ erts_print(to, to_arg, "ACTIVE_SYS"); break;
+ case ERTS_PSFLG_RUNNING_SYS:
+ erts_print(to, to_arg, "RUNNING_SYS"); break;
+ case ERTS_PSFLG_PROXY:
+ erts_print(to, to_arg, "PROXY"); break;
+ case ERTS_PSFLG_DELAYED_SYS:
+ erts_print(to, to_arg, "DELAYED_SYS"); break;
+ case ERTS_PSFLG_OFF_HEAP_MSGQ:
+ erts_print(to, to_arg, "OFF_HEAP_MSGQ"); break;
+ case ERTS_PSFLG_ON_HEAP_MSGQ:
+ erts_print(to, to_arg, "ON_HEAP_MSGQ"); break;
+ case ERTS_PSFLG_DIRTY_CPU_PROC:
+ erts_print(to, to_arg, "DIRTY_CPU_PROC"); break;
+ case ERTS_PSFLG_DIRTY_IO_PROC:
+ erts_print(to, to_arg, "DIRTY_IO_PROC"); break;
+ case ERTS_PSFLG_DIRTY_ACTIVE_SYS:
+ erts_print(to, to_arg, "DIRTY_ACTIVE_SYS"); break;
+ case ERTS_PSFLG_DIRTY_RUNNING:
+ erts_print(to, to_arg, "DIRTY_RUNNING"); break;
+ case ERTS_PSFLG_DIRTY_RUNNING_SYS:
+ erts_print(to, to_arg, "DIRTY_RUNNING_SYS"); break;
+ default:
+ erts_print(to, to_arg, "UNKNOWN(%d)", chk); break;
+ }
+ if (psflg > chk)
+ erts_print(to, to_arg, " | ");
+ psflg -= chk;
+ }
+ }
+ erts_print(to, to_arg, "\n");
+}
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index 82cc68222d..431867f27e 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2007-2012. All Rights Reserved.
+ * Copyright Ericsson AB 2007-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -55,9 +56,9 @@
* Note that wait flags may be read without the pix lock, but
* it is important that wait flags only are modified when the pix
* lock is held.
- * This implementation assumes that erts_smp_atomic_or_retold()
+ * This implementation assumes that erts_atomic_or_retold()
* provides necessary memorybarriers for a lock operation, and that
- * erts_smp_atomic_and_retold() provides necessary memorybarriers
+ * erts_atomic_and_retold() provides necessary memorybarriers
* for an unlock operation.
*/
@@ -68,7 +69,6 @@
#include "erl_process.h"
#include "erl_thr_progress.h"
-#ifdef ERTS_SMP
#if ERTS_PROC_LOCK_OWN_IMPL
@@ -103,24 +103,21 @@ static struct {
Sint16 proc_lock_main;
Sint16 proc_lock_link;
Sint16 proc_lock_msgq;
+ Sint16 proc_lock_btm;
Sint16 proc_lock_status;
+ Sint16 proc_lock_trace;
} lc_id;
#endif
erts_pix_lock_t erts_pix_locks[ERTS_NO_OF_PIX_LOCKS];
-
void
erts_init_proc_lock(int cpus)
{
int i;
for (i = 0; i < ERTS_NO_OF_PIX_LOCKS; i++) {
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_mtx_init_x(&erts_pix_locks[i].u.mtx,
- "pix_lock", make_small(i), 1);
-#else
- erts_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock");
-#endif
+ erts_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock", make_small(i),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
}
#if ERTS_PROC_LOCK_OWN_IMPL
erts_thr_install_exit_handler(cleanup_tse);
@@ -145,7 +142,9 @@ erts_init_proc_lock(int cpus)
lc_id.proc_lock_main = erts_lc_get_lock_order_id("proc_main");
lc_id.proc_lock_link = erts_lc_get_lock_order_id("proc_link");
lc_id.proc_lock_msgq = erts_lc_get_lock_order_id("proc_msgq");
+ lc_id.proc_lock_btm = erts_lc_get_lock_order_id("proc_btm");
lc_id.proc_lock_status = erts_lc_get_lock_order_id("proc_status");
+ lc_id.proc_lock_trace = erts_lc_get_lock_order_id("proc_trace");
#endif
}
@@ -464,7 +463,7 @@ wait_for_locks(Process *p,
}
/*
- * erts_proc_lock_failed() is called when erts_smp_proc_lock()
+ * erts_proc_lock_failed() is called when erts_proc_lock()
* wasn't able to lock all locks. We may need to transfer locks
* to waiters and wait for our turn on locks.
*
@@ -543,7 +542,7 @@ erts_proc_lock_failed(Process *p,
}
/*
- * erts_proc_unlock_failed() is called when erts_smp_proc_unlock()
+ * erts_proc_unlock_failed() is called when erts_proc_unlock()
* wasn't able to unlock all locks. We may need to transfer locks
* to waiters.
*/
@@ -707,9 +706,9 @@ proc_safelock(int is_managed,
need_locks1 |= unlock_locks;
if (!is_managed && !have_locks1) {
refc1 = 1;
- erts_smp_proc_inc_refc(p1);
+ erts_proc_inc_refc(p1);
}
- erts_smp_proc_unlock(p1, unlock_locks);
+ erts_proc_unlock(p1, unlock_locks);
}
unlock_locks = unlock_mask & have_locks2;
if (unlock_locks) {
@@ -717,9 +716,9 @@ proc_safelock(int is_managed,
need_locks2 |= unlock_locks;
if (!is_managed && !have_locks2) {
refc2 = 1;
- erts_smp_proc_inc_refc(p2);
+ erts_proc_inc_refc(p2);
}
- erts_smp_proc_unlock(p2, unlock_locks);
+ erts_proc_unlock(p2, unlock_locks);
}
}
@@ -750,7 +749,7 @@ proc_safelock(int is_managed,
if (need_locks2 & lock)
lock_no--;
locks = need_locks1 & lock_mask;
- erts_smp_proc_lock(p1, locks);
+ erts_proc_lock(p1, locks);
have_locks1 |= locks;
need_locks1 &= ~locks;
}
@@ -761,7 +760,7 @@ proc_safelock(int is_managed,
lock = (1 << ++lock_no);
}
locks = need_locks2 & lock_mask;
- erts_smp_proc_lock(p2, locks);
+ erts_proc_lock(p2, locks);
have_locks2 |= locks;
need_locks2 &= ~locks;
}
@@ -798,9 +797,9 @@ proc_safelock(int is_managed,
if (!is_managed) {
if (refc1)
- erts_smp_proc_dec_refc(p1);
+ erts_proc_dec_refc(p1);
if (refc2)
- erts_smp_proc_dec_refc(p2);
+ erts_proc_dec_refc(p2);
}
}
@@ -861,8 +860,8 @@ erts_pid2proc_opt(Process *c_p,
return NULL;
need_locks &= ~c_p_have_locks;
if (!need_locks) {
- if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
- erts_smp_proc_inc_refc(c_p);
+ if (flags & ERTS_P2P_FLG_INC_REFC)
+ erts_proc_inc_refc(c_p);
return c_p;
}
}
@@ -875,8 +874,8 @@ erts_pid2proc_opt(Process *c_p,
if (proc->common.id != pid)
proc = NULL;
else if (!need_locks) {
- if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
- erts_smp_proc_inc_refc(proc);
+ if (flags & ERTS_P2P_FLG_INC_REFC)
+ erts_proc_inc_refc(proc);
}
else {
int busy;
@@ -898,7 +897,7 @@ erts_pid2proc_opt(Process *c_p,
#endif /* ERTS_PROC_LOCK_OWN_IMPL */
{
/* Try a quick trylock to grab all the locks we need. */
- busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks);
+ busy = (int) erts_proc_raw_trylock__(proc, need_locks);
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK)
erts_proc_lc_trylock(proc, need_locks, !busy, __FILE__,__LINE__);
@@ -916,8 +915,8 @@ erts_pid2proc_opt(Process *c_p,
#endif
if (!busy) {
- if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
- erts_smp_proc_inc_refc(proc);
+ if (flags & ERTS_P2P_FLG_INC_REFC)
+ erts_proc_inc_refc(proc);
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
/* all is great */
@@ -932,16 +931,16 @@ erts_pid2proc_opt(Process *c_p,
proc = ERTS_PROC_LOCK_BUSY;
else {
int managed;
- if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
- erts_smp_proc_inc_refc(proc);
+ if (flags & ERTS_P2P_FLG_INC_REFC)
+ erts_proc_inc_refc(proc);
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
- erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks);
+ erts_lcnt_proc_lock_unacquire(&proc->lock, lcnt_locks);
#endif
managed = dhndl == ERTS_THR_PRGR_DHANDLE_MANAGED;
if (!managed) {
- erts_smp_proc_inc_refc(proc);
+ erts_proc_inc_refc(proc);
erts_thr_progress_unmanaged_continue(dhndl);
dec_refc_proc = proc;
@@ -976,16 +975,16 @@ erts_pid2proc_opt(Process *c_p,
: (proc
!= (Process *) erts_ptab_pix2intptr_nob(&erts_proc, pix)))) {
- erts_smp_proc_unlock(proc, need_locks);
+ erts_proc_unlock(proc, need_locks);
- if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
+ if (flags & ERTS_P2P_FLG_INC_REFC)
dec_refc_proc = proc;
proc = NULL;
}
if (dec_refc_proc)
- erts_smp_proc_dec_refc(dec_refc_proc);
+ erts_proc_dec_refc(dec_refc_proc);
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_PROC_LOCK_DEBUG)
ERTS_LC_ASSERT(!proc
@@ -998,14 +997,47 @@ erts_pid2proc_opt(Process *c_p,
return proc;
}
+static ERTS_INLINE
+Process *proc_lookup_inc_refc(Eterm pid, int allow_exit)
+{
+ Process *proc;
+ ErtsThrPrgrDelayHandle dhndl;
+
+ dhndl = erts_thr_progress_unmanaged_delay();
+
+ proc = erts_proc_lookup_raw(pid);
+ if (proc) {
+ if (!allow_exit && ERTS_PROC_IS_EXITING(proc))
+ proc = NULL;
+ else
+ erts_proc_inc_refc(proc);
+ }
+
+ erts_thr_progress_unmanaged_continue(dhndl);
+
+ return proc;
+}
+
+Process *erts_proc_lookup_inc_refc(Eterm pid)
+{
+ return proc_lookup_inc_refc(pid, 0);
+}
+
+Process *erts_proc_lookup_raw_inc_refc(Eterm pid)
+{
+ return proc_lookup_inc_refc(pid, 1);
+}
+
void
erts_proc_lock_init(Process *p)
{
+#if ERTS_PROC_LOCK_OWN_IMPL || defined(ERTS_PROC_LOCK_DEBUG)
int i;
+#endif
#if ERTS_PROC_LOCK_OWN_IMPL
/* We always start with all locks locked */
#if ERTS_PROC_LOCK_ATOMIC_IMPL
- erts_smp_atomic32_init_nob(&p->lock.flags,
+ erts_atomic32_init_nob(&p->lock.flags,
(erts_aint32_t) ERTS_PROC_LOCKS_ALL);
#else
p->lock.flags = ERTS_PROC_LOCKS_ALL;
@@ -1017,38 +1049,46 @@ erts_proc_lock_init(Process *p)
#endif
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
-#ifdef ERTS_ENABLE_LOCK_COUNT
- int do_lock_count = 1;
-#else
- int do_lock_count = 0;
-#endif
-
- erts_mtx_init_x(&p->lock.main, "proc_main", p->common.id, do_lock_count);
+ erts_mtx_init(&p->lock.main, "proc_main", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.main.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.main.lc);
#endif
- erts_mtx_init_x(&p->lock.link, "proc_link", p->common.id, do_lock_count);
+ erts_mtx_init(&p->lock.link, "proc_link", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.link.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.link.lc);
#endif
- erts_mtx_init_x(&p->lock.msgq, "proc_msgq", p->common.id, do_lock_count);
+ erts_mtx_init(&p->lock.msgq, "proc_msgq", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.msgq.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.msgq.lc);
#endif
- erts_mtx_init_x(&p->lock.status, "proc_status", p->common.id,
- do_lock_count);
+ erts_mtx_init(&p->lock.btm, "proc_btm", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
+ ethr_mutex_lock(&p->lock.btm.mtx);
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_trylock(1, &p->lock.btm.lc);
+#endif
+ erts_mtx_init(&p->lock.status, "proc_status", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.status.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.status.lc);
#endif
+ erts_mtx_init(&p->lock.trace, "proc_trace", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
+ ethr_mutex_lock(&p->lock.trace.mtx);
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_trylock(1, &p->lock.trace.lc);
+#endif
#endif
- erts_atomic32_init_nob(&p->lock.refc, 1);
#ifdef ERTS_PROC_LOCK_DEBUG
for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
- erts_smp_atomic32_init_nob(&p->lock.locked[i], (erts_aint32_t) 1);
+ erts_atomic32_init_nob(&p->lock.locked[i], (erts_aint32_t) 1);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_proc_lock_init(p);
@@ -1064,9 +1104,11 @@ erts_proc_lock_fin(Process *p)
erts_mtx_destroy(&p->lock.main);
erts_mtx_destroy(&p->lock.link);
erts_mtx_destroy(&p->lock.msgq);
+ erts_mtx_destroy(&p->lock.btm);
erts_mtx_destroy(&p->lock.status);
+ erts_mtx_destroy(&p->lock.trace);
#endif
-#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP)
+#if defined(ERTS_ENABLE_LOCK_COUNT)
erts_lcnt_proc_lock_destroy(p);
#endif
}
@@ -1074,141 +1116,72 @@ erts_proc_lock_fin(Process *p)
/* --- Process lock counting ----------------------------------------------- */
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
+
void erts_lcnt_proc_lock_init(Process *p) {
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK) {
- if (p->common.id != ERTS_INVALID_PID) {
- erts_lcnt_init_lock_x(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK, p->common.id);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK, p->common.id);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK, p->common.id);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_status), "proc_status", ERTS_LCNT_LT_PROCLOCK, p->common.id);
- } else {
- erts_lcnt_init_lock(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK);
- erts_lcnt_init_lock(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK);
- erts_lcnt_init_lock(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK);
- erts_lcnt_init_lock(&(p->lock.lcnt_status), "proc_status", ERTS_LCNT_LT_PROCLOCK);
- }
- } else {
- sys_memzero(&(p->lock.lcnt_main), sizeof(p->lock.lcnt_main));
- sys_memzero(&(p->lock.lcnt_msgq), sizeof(p->lock.lcnt_msgq));
- sys_memzero(&(p->lock.lcnt_link), sizeof(p->lock.lcnt_link));
- sys_memzero(&(p->lock.lcnt_status), sizeof(p->lock.lcnt_status));
+ erts_lcnt_init_ref(&p->lock.lcnt_carrier);
+
+ if(erts_lcnt_check_enabled(ERTS_LOCK_FLAGS_CATEGORY_PROCESS)) {
+ erts_lcnt_enable_proc_lock_count(p, 1);
}
-}
-
+} /* logic reversed */
void erts_lcnt_proc_lock_destroy(Process *p) {
- erts_lcnt_destroy_lock(&(p->lock.lcnt_main));
- erts_lcnt_destroy_lock(&(p->lock.lcnt_msgq));
- erts_lcnt_destroy_lock(&(p->lock.lcnt_link));
- erts_lcnt_destroy_lock(&(p->lock.lcnt_status));
+ erts_lcnt_uninstall(&p->lock.lcnt_carrier);
}
-void erts_lcnt_proc_lock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK) {
- if (locks & ERTS_PROC_LOCK_MAIN) {
- erts_lcnt_lock(&(lock->lcnt_main));
- }
- if (locks & ERTS_PROC_LOCK_MSGQ) {
- erts_lcnt_lock(&(lock->lcnt_msgq));
- }
- if (locks & ERTS_PROC_LOCK_LINK) {
- erts_lcnt_lock(&(lock->lcnt_link));
- }
- if (locks & ERTS_PROC_LOCK_STATUS) {
- erts_lcnt_lock(&(lock->lcnt_status));
- }
+void erts_lcnt_enable_proc_lock_count(Process *proc, int enable) {
+ if(proc->common.id == ERTS_INVALID_PID) {
+ /* Locks without an id are more trouble than they're worth; there's no
+ * way to look them up and we can't track them with _STATIC since it's
+ * too early to tell whether we're a system process (proc->static_flags
+ * hasn't been not set yet). */
+ } else if(!enable) {
+ erts_lcnt_proc_lock_destroy(proc);
+ } else if(!erts_lcnt_check_ref_installed(&proc->lock.lcnt_carrier)) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+
+ carrier = erts_lcnt_create_lock_info_carrier(ERTS_LCNT_PROCLOCK_COUNT);
+
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN,
+ "proc_main", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_LINK,
+ "proc_link", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ,
+ "proc_msgq", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM,
+ "proc_btm", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS,
+ "proc_status",proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE,
+ "proc_trace", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+
+ erts_lcnt_install(&proc->lock.lcnt_carrier, carrier);
}
}
-void erts_lcnt_proc_lock_post_x(erts_proc_lock_t *lock, ErtsProcLocks locks, char *file, unsigned int line) {
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK) {
- if (locks & ERTS_PROC_LOCK_MAIN) {
- erts_lcnt_lock_post_x(&(lock->lcnt_main), file, line);
- }
- if (locks & ERTS_PROC_LOCK_MSGQ) {
- erts_lcnt_lock_post_x(&(lock->lcnt_msgq), file, line);
- }
- if (locks & ERTS_PROC_LOCK_LINK) {
- erts_lcnt_lock_post_x(&(lock->lcnt_link), file, line);
- }
- if (locks & ERTS_PROC_LOCK_STATUS) {
- erts_lcnt_lock_post_x(&(lock->lcnt_status), file, line);
- }
- }
-}
+void erts_lcnt_update_process_locks(int enable) {
+ int i, max;
-void erts_lcnt_proc_lock_unaquire(erts_proc_lock_t *lock, ErtsProcLocks locks) {
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK) {
- if (locks & ERTS_PROC_LOCK_MAIN) {
- erts_lcnt_lock_unaquire(&(lock->lcnt_main));
- }
- if (locks & ERTS_PROC_LOCK_MSGQ) {
- erts_lcnt_lock_unaquire(&(lock->lcnt_msgq));
- }
- if (locks & ERTS_PROC_LOCK_LINK) {
- erts_lcnt_lock_unaquire(&(lock->lcnt_link));
- }
- if (locks & ERTS_PROC_LOCK_STATUS) {
- erts_lcnt_lock_unaquire(&(lock->lcnt_status));
- }
- }
-}
+ max = erts_ptab_max(&erts_proc);
-void erts_lcnt_proc_unlock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK) {
- if (locks & ERTS_PROC_LOCK_MAIN) {
- erts_lcnt_unlock(&(lock->lcnt_main));
- }
- if (locks & ERTS_PROC_LOCK_MSGQ) {
- erts_lcnt_unlock(&(lock->lcnt_msgq));
- }
- if (locks & ERTS_PROC_LOCK_LINK) {
- erts_lcnt_unlock(&(lock->lcnt_link));
- }
- if (locks & ERTS_PROC_LOCK_STATUS) {
- erts_lcnt_unlock(&(lock->lcnt_status));
- }
- }
-}
-void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res) {
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK) {
- if (locks & ERTS_PROC_LOCK_MAIN) {
- erts_lcnt_trylock(&(lock->lcnt_main), res);
- }
- if (locks & ERTS_PROC_LOCK_MSGQ) {
- erts_lcnt_trylock(&(lock->lcnt_msgq), res);
- }
- if (locks & ERTS_PROC_LOCK_LINK) {
- erts_lcnt_trylock(&(lock->lcnt_link), res);
- }
- if (locks & ERTS_PROC_LOCK_STATUS) {
- erts_lcnt_trylock(&(lock->lcnt_status), res);
- }
- }
-}
+ for(i = 0; i < max; i++) {
+ int delay_handle;
+ Process *proc;
+ delay_handle = erts_thr_progress_unmanaged_delay();
+ proc = erts_pix2proc(i);
-void erts_lcnt_enable_proc_lock_count(int enable)
-{
- int i, max = erts_ptab_max(&erts_proc);
-
- for (i = 0; i < max; ++i) {
- Process* p = erts_pix2proc(i);
- if (p) {
- if (enable) {
- if (!ERTS_LCNT_LOCK_TYPE(&(p->lock.lcnt_main))) {
- erts_lcnt_proc_lock_init(p);
- }
- } else {
- if (ERTS_LCNT_LOCK_TYPE(&(p->lock.lcnt_main))) {
- erts_lcnt_proc_lock_destroy(p);
- }
- }
- }
+ if(proc != NULL) {
+ erts_lcnt_enable_proc_lock_count(proc, enable);
+ }
+
+ if(delay_handle != ERTS_THR_PRGR_DHANDLE_MANAGED) {
+ erts_thr_progress_unmanaged_continue(delay_handle);
+ }
}
}
-#endif /* ifdef ERTS_ENABLE_LOCK_COUNT */
+#endif /* ERTS_ENABLE_LOCK_COUNT */
/* --- Process lock checking ----------------------------------------------- */
@@ -1222,7 +1195,7 @@ erts_proc_lc_lock(Process *p, ErtsProcLocks locks, char *file, unsigned int line
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
erts_lc_lock_x(&lck,file,line);
@@ -1235,10 +1208,18 @@ erts_proc_lc_lock(Process *p, ErtsProcLocks locks, char *file, unsigned int line
lck.id = lc_id.proc_lock_msgq;
erts_lc_lock_x(&lck,file,line);
}
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ lck.id = lc_id.proc_lock_btm;
+ erts_lc_lock_x(&lck,file,line);
+ }
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
erts_lc_lock_x(&lck,file,line);
}
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ lck.id = lc_id.proc_lock_trace;
+ erts_lc_lock_x(&lck,file,line);
+ }
}
void
@@ -1247,7 +1228,7 @@ erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked,
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
erts_lc_trylock_x(locked, &lck, file, line);
@@ -1260,10 +1241,18 @@ erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked,
lck.id = lc_id.proc_lock_msgq;
erts_lc_trylock_x(locked, &lck, file, line);
}
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ lck.id = lc_id.proc_lock_btm;
+ erts_lc_trylock_x(locked, &lck, file, line);
+ }
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
erts_lc_trylock_x(locked, &lck, file, line);
}
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ lck.id = lc_id.proc_lock_trace;
+ erts_lc_trylock_x(locked, &lck, file, line);
+ }
}
void
@@ -1271,11 +1260,19 @@ erts_proc_lc_unlock(Process *p, ErtsProcLocks locks)
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ lck.id = lc_id.proc_lock_trace;
+ erts_lc_unlock(&lck);
+ }
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
erts_lc_unlock(&lck);
}
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ lck.id = lc_id.proc_lock_btm;
+ erts_lc_unlock(&lck);
+ }
if (locks & ERTS_PROC_LOCK_MSGQ) {
lck.id = lc_id.proc_lock_msgq;
erts_lc_unlock(&lck);
@@ -1298,11 +1295,19 @@ erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks)
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ lck.id = lc_id.proc_lock_trace;
+ erts_lc_might_unlock(&lck);
+ }
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
erts_lc_might_unlock(&lck);
}
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ lck.id = lc_id.proc_lock_btm;
+ erts_lc_might_unlock(&lck);
+ }
if (locks & ERTS_PROC_LOCK_MSGQ) {
lck.id = lc_id.proc_lock_msgq;
erts_lc_might_unlock(&lck);
@@ -1322,8 +1327,12 @@ erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks)
erts_lc_might_unlock(&p->lock.link.lc);
if (locks & ERTS_PROC_LOCK_MSGQ)
erts_lc_might_unlock(&p->lock.msgq.lc);
+ if (locks & ERTS_PROC_LOCK_BTM)
+ erts_lc_might_unlock(&p->lock.btm.lc);
if (locks & ERTS_PROC_LOCK_STATUS)
erts_lc_might_unlock(&p->lock.status.lc);
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ erts_lc_might_unlock(&p->lock.trace.lc);
#endif
}
@@ -1334,7 +1343,7 @@ erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks, char *file,
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
erts_lc_require_lock(&lck, file, line);
@@ -1347,10 +1356,18 @@ erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks, char *file,
lck.id = lc_id.proc_lock_msgq;
erts_lc_require_lock(&lck, file, line);
}
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ lck.id = lc_id.proc_lock_btm;
+ erts_lc_require_lock(&lck, file, line);
+ }
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
erts_lc_require_lock(&lck, file, line);
}
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ lck.id = lc_id.proc_lock_trace;
+ erts_lc_require_lock(&lck, file, line);
+ }
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
if (locks & ERTS_PROC_LOCK_MAIN)
erts_lc_require_lock(&p->lock.main.lc, file, line);
@@ -1358,8 +1375,12 @@ erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks, char *file,
erts_lc_require_lock(&p->lock.link.lc, file, line);
if (locks & ERTS_PROC_LOCK_MSGQ)
erts_lc_require_lock(&p->lock.msgq.lc, file, line);
+ if (locks & ERTS_PROC_LOCK_BTM)
+ erts_lc_require_lock(&p->lock.btm.lc, file, line);
if (locks & ERTS_PROC_LOCK_STATUS)
erts_lc_require_lock(&p->lock.status.lc, file, line);
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ erts_lc_require_lock(&p->lock.trace.lc, file, line);
#endif
}
@@ -1369,11 +1390,19 @@ erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks)
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ lck.id = lc_id.proc_lock_trace;
+ erts_lc_unrequire_lock(&lck);
+ }
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
erts_lc_unrequire_lock(&lck);
}
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ lck.id = lc_id.proc_lock_btm;
+ erts_lc_unrequire_lock(&lck);
+ }
if (locks & ERTS_PROC_LOCK_MSGQ) {
lck.id = lc_id.proc_lock_msgq;
erts_lc_unrequire_lock(&lck);
@@ -1393,8 +1422,12 @@ erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks)
erts_lc_unrequire_lock(&p->lock.link.lc);
if (locks & ERTS_PROC_LOCK_MSGQ)
erts_lc_unrequire_lock(&p->lock.msgq.lc);
+ if (locks & ERTS_PROC_LOCK_BTM)
+ erts_lc_unrequire_lock(&p->lock.btm.lc);
if (locks & ERTS_PROC_LOCK_STATUS)
erts_lc_unrequire_lock(&p->lock.status.lc);
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ erts_lc_unrequire_lock(&p->lock.trace.lc);
#endif
}
@@ -1406,7 +1439,7 @@ erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks)
if (locks & ERTS_PROC_LOCKS_ALL) {
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN)
lck.id = lc_id.proc_lock_main;
@@ -1414,8 +1447,12 @@ erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks)
lck.id = lc_id.proc_lock_link;
else if (locks & ERTS_PROC_LOCK_MSGQ)
lck.id = lc_id.proc_lock_msgq;
+ else if (locks & ERTS_PROC_LOCK_BTM)
+ lck.id = lc_id.proc_lock_btm;
else if (locks & ERTS_PROC_LOCK_STATUS)
lck.id = lc_id.proc_lock_status;
+ else if (locks & ERTS_PROC_LOCK_TRACE)
+ lck.id = lc_id.proc_lock_trace;
else
erts_lc_fail("Unknown proc lock found");
@@ -1428,29 +1465,76 @@ erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks)
void erts_proc_lc_chk_only_proc_main(Process *p)
{
-#if ERTS_PROC_LOCK_OWN_IMPL
- erts_lc_lock_t proc_main = ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
- p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
- erts_lc_check_exact(&proc_main, 1);
-#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
- erts_lc_check_exact(&p->lock.main.lc, 1);
-#endif
+ erts_proc_lc_chk_only_proc(p, ERTS_PROC_LOCK_MAIN);
}
#if ERTS_PROC_LOCK_OWN_IMPL
#define ERTS_PROC_LC_EMPTY_LOCK_INIT \
- ERTS_LC_LOCK_INIT(-1, THE_NON_VALUE, ERTS_LC_FLG_LT_PROCLOCK)
+ ERTS_LC_LOCK_INIT(-1, THE_NON_VALUE, ERTS_LOCK_TYPE_PROCLOCK)
#endif /* ERTS_PROC_LOCK_OWN_IMPL */
+void erts_proc_lc_chk_only_proc(Process *p, ErtsProcLocks locks)
+{
+ int have_locks_len = 0;
+#if ERTS_PROC_LOCK_OWN_IMPL
+ erts_lc_lock_t have_locks[6] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT};
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_main;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_link;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_msgq;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_btm;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_status;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_trace;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
+#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
+ erts_lc_lock_t have_locks[6];
+ if (locks & ERTS_PROC_LOCK_MAIN)
+ have_locks[have_locks_len++] = p->lock.main.lc;
+ if (locks & ERTS_PROC_LOCK_LINK)
+ have_locks[have_locks_len++] = p->lock.link.lc;
+ if (locks & ERTS_PROC_LOCK_MSGQ)
+ have_locks[have_locks_len++] = p->lock.msgq.lc;
+ if (locks & ERTS_PROC_LOCK_BTM)
+ have_locks[have_locks_len++] = p->lock.btm.lc;
+ if (locks & ERTS_PROC_LOCK_STATUS)
+ have_locks[have_locks_len++] = p->lock.status.lc;
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ have_locks[have_locks_len++] = p->lock.trace.lc;
+#endif
+ erts_lc_check_exact(have_locks, have_locks_len);
+}
+
void
erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks)
{
int have_locks_len = 0;
#if ERTS_PROC_LOCK_OWN_IMPL
- erts_lc_lock_t have_locks[4] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ erts_lc_lock_t have_locks[6] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT};
if (locks & ERTS_PROC_LOCK_MAIN) {
have_locks[have_locks_len].id = lc_id.proc_lock_main;
@@ -1464,20 +1548,32 @@ erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks)
have_locks[have_locks_len].id = lc_id.proc_lock_msgq;
have_locks[have_locks_len++].extra = p->common.id;
}
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_btm;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
if (locks & ERTS_PROC_LOCK_STATUS) {
have_locks[have_locks_len].id = lc_id.proc_lock_status;
have_locks[have_locks_len++].extra = p->common.id;
}
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_trace;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
- erts_lc_lock_t have_locks[4];
+ erts_lc_lock_t have_locks[6];
if (locks & ERTS_PROC_LOCK_MAIN)
have_locks[have_locks_len++] = p->lock.main.lc;
if (locks & ERTS_PROC_LOCK_LINK)
have_locks[have_locks_len++] = p->lock.link.lc;
if (locks & ERTS_PROC_LOCK_MSGQ)
have_locks[have_locks_len++] = p->lock.msgq.lc;
+ if (locks & ERTS_PROC_LOCK_BTM)
+ have_locks[have_locks_len++] = p->lock.btm.lc;
if (locks & ERTS_PROC_LOCK_STATUS)
have_locks[have_locks_len++] = p->lock.status.lc;
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ have_locks[have_locks_len++] = p->lock.trace.lc;
#endif
erts_lc_check(have_locks, have_locks_len, NULL, 0);
}
@@ -1488,12 +1584,14 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
int have_locks_len = 0;
int have_not_locks_len = 0;
#if ERTS_PROC_LOCK_OWN_IMPL
- erts_lc_lock_t have_locks[4] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ erts_lc_lock_t have_locks[6] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT};
- erts_lc_lock_t have_not_locks[4] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ erts_lc_lock_t have_not_locks[6] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT};
@@ -1521,6 +1619,14 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
have_not_locks[have_not_locks_len].id = lc_id.proc_lock_msgq;
have_not_locks[have_not_locks_len++].extra = p->common.id;
}
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_btm;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
+ else {
+ have_not_locks[have_not_locks_len].id = lc_id.proc_lock_btm;
+ have_not_locks[have_not_locks_len++].extra = p->common.id;
+ }
if (locks & ERTS_PROC_LOCK_STATUS) {
have_locks[have_locks_len].id = lc_id.proc_lock_status;
have_locks[have_locks_len++].extra = p->common.id;
@@ -1529,9 +1635,17 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
have_not_locks[have_not_locks_len].id = lc_id.proc_lock_status;
have_not_locks[have_not_locks_len++].extra = p->common.id;
}
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_trace;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
+ else {
+ have_not_locks[have_not_locks_len].id = lc_id.proc_lock_trace;
+ have_not_locks[have_not_locks_len++].extra = p->common.id;
+ }
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
- erts_lc_lock_t have_locks[4];
- erts_lc_lock_t have_not_locks[4];
+ erts_lc_lock_t have_locks[6];
+ erts_lc_lock_t have_not_locks[6];
if (locks & ERTS_PROC_LOCK_MAIN)
have_locks[have_locks_len++] = p->lock.main.lc;
@@ -1545,10 +1659,18 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
have_locks[have_locks_len++] = p->lock.msgq.lc;
else
have_not_locks[have_not_locks_len++] = p->lock.msgq.lc;
+ if (locks & ERTS_PROC_LOCK_BTM)
+ have_locks[have_locks_len++] = p->lock.btm.lc;
+ else
+ have_not_locks[have_not_locks_len++] = p->lock.btm.lc;
if (locks & ERTS_PROC_LOCK_STATUS)
have_locks[have_locks_len++] = p->lock.status.lc;
else
have_not_locks[have_not_locks_len++] = p->lock.status.lc;
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ have_locks[have_locks_len++] = p->lock.trace.lc;
+ else
+ have_not_locks[have_not_locks_len++] = p->lock.trace.lc;
#endif
erts_lc_check(have_locks, have_locks_len,
@@ -1558,29 +1680,37 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
ErtsProcLocks
erts_proc_lc_my_proc_locks(Process *p)
{
- int resv[4];
+ int resv[6];
ErtsProcLocks res = 0;
#if ERTS_PROC_LOCK_OWN_IMPL
- erts_lc_lock_t locks[4] = {ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
+ erts_lc_lock_t locks[6] = {ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LOCK_TYPE_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_link,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LOCK_TYPE_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_msgq,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LOCK_TYPE_PROCLOCK),
+ ERTS_LC_LOCK_INIT(lc_id.proc_lock_btm,
+ p->common.id,
+ ERTS_LOCK_TYPE_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_status,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK)};
+ ERTS_LOCK_TYPE_PROCLOCK),
+ ERTS_LC_LOCK_INIT(lc_id.proc_lock_trace,
+ p->common.id,
+ ERTS_LOCK_TYPE_PROCLOCK)};
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
- erts_lc_lock_t locks[4] = {p->lock.main.lc,
+ erts_lc_lock_t locks[6] = {p->lock.main.lc,
p->lock.link.lc,
p->lock.msgq.lc,
- p->lock.status.lc};
+ p->lock.btm.lc,
+ p->lock.status.lc,
+ p->lock.trace.lc};
#endif
- erts_lc_have_locks(resv, locks, 4);
+ erts_lc_have_locks(resv, locks, 6);
if (resv[0])
res |= ERTS_PROC_LOCK_MAIN;
if (resv[1])
@@ -1588,7 +1718,11 @@ erts_proc_lc_my_proc_locks(Process *p)
if (resv[2])
res |= ERTS_PROC_LOCK_MSGQ;
if (resv[3])
+ res |= ERTS_PROC_LOCK_BTM;
+ if (resv[4])
res |= ERTS_PROC_LOCK_STATUS;
+ if (resv[5])
+ res |= ERTS_PROC_LOCK_TRACE;
return res;
}
@@ -1596,13 +1730,15 @@ erts_proc_lc_my_proc_locks(Process *p)
void
erts_proc_lc_chk_no_proc_locks(char *file, int line)
{
- int resv[4];
- int ids[4] = {lc_id.proc_lock_main,
+ int resv[6];
+ int ids[6] = {lc_id.proc_lock_main,
lc_id.proc_lock_link,
lc_id.proc_lock_msgq,
- lc_id.proc_lock_status};
- erts_lc_have_lock_ids(resv, ids, 4);
- if (!ERTS_IS_CRASH_DUMPING && (resv[0] || resv[1] || resv[2] || resv[3])) {
+ lc_id.proc_lock_btm,
+ lc_id.proc_lock_status,
+ lc_id.proc_lock_trace};
+ erts_lc_have_lock_ids(resv, ids, 6);
+ if (!ERTS_IS_CRASH_DUMPING && (resv[0] || resv[1] || resv[2] || resv[3] || resv[4] || resv[5])) {
erts_lc_fail("%s:%d: Thread has process locks locked when expected "
"not to have any process locks locked",
file, line);
@@ -1644,4 +1780,3 @@ check_queue(erts_proc_lock_t *lck)
}
#endif
-#endif /* ERTS_SMP */
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index 052d992d3f..9d5691d3c4 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2007-2012. All Rights Reserved.
+ * Copyright Ericsson AB 2007-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -35,7 +36,7 @@
#include "erl_lock_count.h"
#endif
-#include "erl_smp.h"
+#include "erl_threads.h"
#if defined(VALGRIND) || defined(ETHR_DISABLE_NATIVE_IMPLS)
# define ERTS_PROC_LOCK_OWN_IMPL 0
@@ -65,35 +66,44 @@
#endif
-#define ERTS_PROC_LOCK_MAX_BIT 3
+#define ERTS_PROC_LOCK_MAX_BIT 5
typedef erts_aint32_t ErtsProcLocks;
typedef struct erts_proc_lock_t_ {
#if ERTS_PROC_LOCK_OWN_IMPL
#if ERTS_PROC_LOCK_ATOMIC_IMPL
- erts_smp_atomic32_t flags;
+ erts_atomic32_t flags;
#else
ErtsProcLocks flags;
#endif
erts_tse_t *queue[ERTS_PROC_LOCK_MAX_BIT+1];
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt_main;
- erts_lcnt_lock_t lcnt_link;
- erts_lcnt_lock_t lcnt_msgq;
- erts_lcnt_lock_t lcnt_status;
+#if defined(ERTS_ENABLE_LOCK_COUNT) && !ERTS_PROC_LOCK_RAW_MUTEX_IMPL
+ /* Each erts_mtx_t has its own lock counter ^ */
+
+ #define ERTS_LCNT_PROCLOCK_IDX_MAIN 0
+ #define ERTS_LCNT_PROCLOCK_IDX_LINK 1
+ #define ERTS_LCNT_PROCLOCK_IDX_MSGQ 2
+ #define ERTS_LCNT_PROCLOCK_IDX_BTM 3
+ #define ERTS_LCNT_PROCLOCK_IDX_STATUS 4
+ #define ERTS_LCNT_PROCLOCK_IDX_TRACE 5
+
+ #define ERTS_LCNT_PROCLOCK_COUNT 6
+
+ erts_lcnt_ref_t lcnt_carrier;
#endif
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
erts_mtx_t main;
erts_mtx_t link;
erts_mtx_t msgq;
+ erts_mtx_t btm;
erts_mtx_t status;
+ erts_mtx_t trace;
#else
# error "no implementation"
#endif
- erts_atomic32_t refc;
#ifdef ERTS_PROC_LOCK_DEBUG
- erts_smp_atomic32_t locked[ERTS_PROC_LOCK_MAX_BIT+1];
+ erts_atomic32_t locked[ERTS_PROC_LOCK_MAX_BIT+1];
#endif
} erts_proc_lock_t;
@@ -120,18 +130,33 @@ typedef struct erts_proc_lock_t_ {
* Message queue lock:
* Protects the following fields in the process structure:
* * msg_inq
- * * bif_timers
*/
#define ERTS_PROC_LOCK_MSGQ (((ErtsProcLocks) 1) << 2)
/*
+ * Bif timer lock:
+ * Protects the following fields in the process structure:
+ * * bif_timers
+ */
+#define ERTS_PROC_LOCK_BTM (((ErtsProcLocks) 1) << 3)
+
+/*
* Status lock:
* Protects the following fields in the process structure:
* * pending_suspenders
* * suspendee
+ * * sys_tasks
* * ...
*/
-#define ERTS_PROC_LOCK_STATUS (((ErtsProcLocks) 1) << ERTS_PROC_LOCK_MAX_BIT)
+#define ERTS_PROC_LOCK_STATUS (((ErtsProcLocks) 1) << 4)
+
+/*
+ * Trace message lock:
+ * Protects the order in which messages are sent
+ * from trace nifs. This lock is taken inside enif_send.
+ *
+ */
+#define ERTS_PROC_LOCK_TRACE (((ErtsProcLocks) 1) << ERTS_PROC_LOCK_MAX_BIT)
/*
* Special fields:
@@ -146,8 +171,8 @@ typedef struct erts_proc_lock_t_ {
* all process locks are held, and are allowed to be read if
* at least one process lock (whichever one doesn't matter)
* is held:
- * * tracer_proc
- * * tracer_flags
+ * * common.tracer
+ * * common.trace_flags
*
* The following fields are only allowed to be accessed if
* both the schedule queue lock and at least one process lock
@@ -190,17 +215,19 @@ typedef struct erts_proc_lock_t_ {
/* ERTS_PROC_LOCKS_* are combinations of process locks */
-#define ERTS_PROC_LOCKS_MSG_RECEIVE (ERTS_PROC_LOCK_MSGQ \
- | ERTS_PROC_LOCK_STATUS)
-#define ERTS_PROC_LOCKS_MSG_SEND (ERTS_PROC_LOCK_MSGQ \
- | ERTS_PROC_LOCK_STATUS)
+#define ERTS_PROC_LOCKS_MSG_RECEIVE ERTS_PROC_LOCK_MSGQ
+#define ERTS_PROC_LOCKS_MSG_SEND ERTS_PROC_LOCK_MSGQ
#define ERTS_PROC_LOCKS_XSIG_SEND ERTS_PROC_LOCK_STATUS
#define ERTS_PROC_LOCKS_ALL \
((((ErtsProcLocks) 1) << (ERTS_PROC_LOCK_MAX_BIT + 1)) - 1)
#define ERTS_PROC_LOCKS_ALL_MINOR (ERTS_PROC_LOCKS_ALL \
- & ~ERTS_PROC_LOCK_MAIN)
+ & ~ERTS_PROC_LOCK_MAIN)
+
+/* All locks we first must unlock to lock L */
+#define ERTS_PROC_LOCKS_HIGHER_THAN(L) \
+ (ERTS_PROC_LOCKS_ALL & (~(L) & ~((L)-1)))
#define ERTS_PIX_LOCKS_BITS 10
@@ -216,32 +243,188 @@ typedef struct erts_proc_lock_t_ {
/* Lock counter implemetation */
#ifdef ERTS_ENABLE_LOCK_POSITION
-#define erts_smp_proc_lock__(P,I,L) erts_smp_proc_lock_x__(P,I,L,__FILE__,__LINE__)
-#define erts_smp_proc_lock(P,L) erts_smp_proc_lock_x(P,L,__FILE__,__LINE__)
+#define erts_proc_lock__(P,I,L) erts_proc_lock_x__(P,I,L,__FILE__,__LINE__)
+#define erts_proc_lock(P,L) erts_proc_lock_x(P,L,__FILE__,__LINE__)
#endif
-#if defined(ERTS_SMP) && defined (ERTS_ENABLE_LOCK_COUNT)
+#if defined (ERTS_ENABLE_LOCK_COUNT)
void erts_lcnt_proc_lock_init(Process *p);
void erts_lcnt_proc_lock_destroy(Process *p);
+
+ERTS_GLB_INLINE
void erts_lcnt_proc_lock(erts_proc_lock_t *lock, ErtsProcLocks locks);
+ERTS_GLB_INLINE
void erts_lcnt_proc_lock_post_x(erts_proc_lock_t *lock, ErtsProcLocks locks, char *file, unsigned int line);
-void erts_lcnt_proc_lock_unaquire(erts_proc_lock_t *lock, ErtsProcLocks locks);
+ERTS_GLB_INLINE
+void erts_lcnt_proc_lock_unacquire(erts_proc_lock_t *lock, ErtsProcLocks locks);
+ERTS_GLB_INLINE
void erts_lcnt_proc_unlock(erts_proc_lock_t *lock, ErtsProcLocks locks);
+ERTS_GLB_INLINE
void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res);
-void erts_lcnt_enable_proc_lock_count(int enable);
+void erts_lcnt_enable_proc_lock_count(Process *proc, int enable);
+void erts_lcnt_update_process_locks(int enable);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE
+void erts_lcnt_proc_lock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(&lock->lcnt_carrier, &handle, &carrier)) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_LINK);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ);
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS);
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE);
+ }
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_proc_lock_post_x(erts_proc_lock_t *lock, ErtsProcLocks locks,
+ char *file, unsigned int line) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(&lock->lcnt_carrier, &handle, &carrier)) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN, file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_LINK, file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ, file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM, file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS, file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE, file, line);
+ }
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_proc_lock_unacquire(erts_proc_lock_t *lock, ErtsProcLocks locks) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(&lock->lcnt_carrier, &handle, &carrier)) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_LINK);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ);
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS);
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE);
+ }
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_proc_unlock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(&lock->lcnt_carrier, &handle, &carrier)) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_LINK);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ);
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS);
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE);
+ }
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(&lock->lcnt_carrier, &handle, &carrier)) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN, res);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_LINK, res);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ, res);
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM, res);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS, res);
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE, res);
+ }
+ erts_lcnt_close_ref(handle, carrier);
+ }
+} /* reversed logic */
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
#endif /* ERTS_ENABLE_LOCK_COUNT*/
/* --- Process lock checking ----------------------------------------------- */
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
-#define ERTS_SMP_CHK_NO_PROC_LOCKS \
+#if defined(ERTS_ENABLE_LOCK_CHECK)
+#define ERTS_CHK_NO_PROC_LOCKS \
erts_proc_lc_chk_no_proc_locks(__FILE__, __LINE__)
-#define ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(P) \
+#define ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(P) \
erts_proc_lc_chk_only_proc_main((P))
void erts_proc_lc_lock(Process *p, ErtsProcLocks locks,
char *file, unsigned int line);
@@ -252,6 +435,7 @@ void erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks);
void erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks);
void erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks);
void erts_proc_lc_chk_only_proc_main(Process *p);
+void erts_proc_lc_chk_only_proc(Process *p, ErtsProcLocks locks);
void erts_proc_lc_chk_no_proc_locks(char *file, int line);
ErtsProcLocks erts_proc_lc_my_proc_locks(Process *p);
int erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks);
@@ -259,8 +443,8 @@ void erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks,
char* file, unsigned int line);
void erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks);
#else
-#define ERTS_SMP_CHK_NO_PROC_LOCKS
-#define ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(P)
+#define ERTS_CHK_NO_PROC_LOCKS
+#define ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(P)
#endif
#endif /* #ifndef ERTS_PROC_LOCK_LOCK_CHECK__ */
@@ -271,7 +455,6 @@ void erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks);
#ifndef ERTS_PROCESS_LOCK_H__
#define ERTS_PROCESS_LOCK_H__
-#ifdef ERTS_SMP
typedef struct {
union {
@@ -288,21 +471,21 @@ typedef struct {
#if ERTS_PROC_LOCK_ATOMIC_IMPL
#define ERTS_PROC_LOCK_FLGS_BAND_(L, MSK) \
- ((ErtsProcLocks) erts_smp_atomic32_read_band_nob(&(L)->flags, \
+ ((ErtsProcLocks) erts_atomic32_read_band_nob(&(L)->flags, \
(erts_aint32_t) (MSK)))
#define ERTS_PROC_LOCK_FLGS_BOR_ACQB_(L, MSK) \
- ((ErtsProcLocks) erts_smp_atomic32_read_bor_acqb(&(L)->flags, \
+ ((ErtsProcLocks) erts_atomic32_read_bor_acqb(&(L)->flags, \
(erts_aint32_t) (MSK)))
#define ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(L, NEW, EXPECTED) \
- ((ErtsProcLocks) erts_smp_atomic32_cmpxchg_acqb(&(L)->flags, \
+ ((ErtsProcLocks) erts_atomic32_cmpxchg_acqb(&(L)->flags, \
(erts_aint32_t) (NEW), \
(erts_aint32_t) (EXPECTED)))
#define ERTS_PROC_LOCK_FLGS_CMPXCHG_RELB_(L, NEW, EXPECTED) \
- ((ErtsProcLocks) erts_smp_atomic32_cmpxchg_relb(&(L)->flags, \
+ ((ErtsProcLocks) erts_atomic32_cmpxchg_relb(&(L)->flags, \
(erts_aint32_t) (NEW), \
(erts_aint32_t) (EXPECTED)))
#define ERTS_PROC_LOCK_FLGS_READ_(L) \
- ((ErtsProcLocks) erts_smp_atomic32_read_nob(&(L)->flags))
+ ((ErtsProcLocks) erts_atomic32_read_nob(&(L)->flags))
#else /* no opt atomic ops */
@@ -373,22 +556,22 @@ ERTS_GLB_INLINE void erts_pix_lock(erts_pix_lock_t *);
ERTS_GLB_INLINE void erts_pix_unlock(erts_pix_lock_t *);
ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *);
-ERTS_GLB_INLINE ErtsProcLocks erts_smp_proc_raw_trylock__(Process *p,
+ERTS_GLB_INLINE ErtsProcLocks erts_proc_raw_trylock__(Process *p,
ErtsProcLocks locks);
#ifdef ERTS_ENABLE_LOCK_POSITION
-ERTS_GLB_INLINE void erts_smp_proc_lock_x__(Process *,
+ERTS_GLB_INLINE void erts_proc_lock_x__(Process *,
erts_pix_lock_t *,
ErtsProcLocks,
char *file, unsigned int line);
#else
-ERTS_GLB_INLINE void erts_smp_proc_lock__(Process *,
+ERTS_GLB_INLINE void erts_proc_lock__(Process *,
erts_pix_lock_t *,
ErtsProcLocks);
#endif
-ERTS_GLB_INLINE void erts_smp_proc_unlock__(Process *,
+ERTS_GLB_INLINE void erts_proc_unlock__(Process *,
erts_pix_lock_t *,
ErtsProcLocks);
-ERTS_GLB_INLINE int erts_smp_proc_trylock__(Process *,
+ERTS_GLB_INLINE int erts_proc_trylock__(Process *,
erts_pix_lock_t *,
ErtsProcLocks);
@@ -416,7 +599,7 @@ ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *pixlck)
}
/*
- * Helper function for erts_smp_proc_lock__ and erts_smp_proc_trylock__.
+ * Helper function for erts_proc_lock__ and erts_proc_trylock__.
*
* Attempts to grab all of 'locks' simultaneously.
*
@@ -429,7 +612,7 @@ ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *pixlck)
* Does not release the pix lock.
*/
ERTS_GLB_INLINE ErtsProcLocks
-erts_smp_proc_raw_trylock__(Process *p, ErtsProcLocks locks)
+erts_proc_raw_trylock__(Process *p, ErtsProcLocks locks)
{
#if ERTS_PROC_LOCK_OWN_IMPL
ErtsProcLocks expct_lflgs = 0;
@@ -463,13 +646,25 @@ erts_smp_proc_raw_trylock__(Process *p, ErtsProcLocks locks)
if (locks & ERTS_PROC_LOCK_MSGQ)
if (erts_mtx_trylock(&p->lock.msgq) == EBUSY)
goto busy_msgq;
+ if (locks & ERTS_PROC_LOCK_BTM)
+ if (erts_mtx_trylock(&p->lock.btm) == EBUSY)
+ goto busy_btm;
if (locks & ERTS_PROC_LOCK_STATUS)
if (erts_mtx_trylock(&p->lock.status) == EBUSY)
goto busy_status;
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ if (erts_mtx_trylock(&p->lock.trace) == EBUSY)
+ goto busy_trace;
return 0;
+busy_trace:
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ erts_mtx_unlock(&p->lock.trace);
busy_status:
+ if (locks & ERTS_PROC_LOCK_BTM)
+ erts_mtx_unlock(&p->lock.btm);
+busy_btm:
if (locks & ERTS_PROC_LOCK_MSGQ)
erts_mtx_unlock(&p->lock.msgq);
busy_msgq:
@@ -486,12 +681,12 @@ busy_main:
ERTS_GLB_INLINE void
#ifdef ERTS_ENABLE_LOCK_POSITION
-erts_smp_proc_lock_x__(Process *p,
+erts_proc_lock_x__(Process *p,
erts_pix_lock_t *pix_lck,
ErtsProcLocks locks,
char *file, unsigned int line)
#else
-erts_smp_proc_lock__(Process *p,
+erts_proc_lock__(Process *p,
erts_pix_lock_t *pix_lck,
ErtsProcLocks locks)
#endif
@@ -509,7 +704,11 @@ erts_smp_proc_lock__(Process *p,
ERTS_LC_ASSERT((locks & ~ERTS_PROC_LOCKS_ALL) == 0);
- old_lflgs = erts_smp_proc_raw_trylock__(p, locks);
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_proc_lc_lock(p, locks, file, line);
+#endif
+
+ old_lflgs = erts_proc_raw_trylock__(p, locks);
if (old_lflgs != 0) {
/*
@@ -530,9 +729,6 @@ erts_smp_proc_lock__(Process *p,
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_proc_lock_post_x(&(p->lock), locks, file, line);
#endif
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_proc_lc_lock(p, locks, file, line);
-#endif
#ifdef ERTS_PROC_LOCK_DEBUG
erts_proc_lock_op_debug(p, locks, 1);
@@ -549,8 +745,12 @@ erts_smp_proc_lock__(Process *p,
erts_mtx_lock(&p->lock.link);
if (locks & ERTS_PROC_LOCK_MSGQ)
erts_mtx_lock(&p->lock.msgq);
+ if (locks & ERTS_PROC_LOCK_BTM)
+ erts_mtx_lock(&p->lock.btm);
if (locks & ERTS_PROC_LOCK_STATUS)
erts_mtx_lock(&p->lock.status);
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ erts_mtx_lock(&p->lock.trace);
#ifdef ERTS_PROC_LOCK_DEBUG
erts_proc_lock_op_debug(p, locks, 1);
@@ -560,7 +760,7 @@ erts_smp_proc_lock__(Process *p,
}
ERTS_GLB_INLINE void
-erts_smp_proc_unlock__(Process *p,
+erts_proc_unlock__(Process *p,
erts_pix_lock_t *pix_lck,
ErtsProcLocks locks)
{
@@ -636,8 +836,12 @@ erts_smp_proc_unlock__(Process *p,
erts_proc_lock_op_debug(p, locks, 0);
#endif
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ erts_mtx_unlock(&p->lock.trace);
if (locks & ERTS_PROC_LOCK_STATUS)
erts_mtx_unlock(&p->lock.status);
+ if (locks & ERTS_PROC_LOCK_BTM)
+ erts_mtx_unlock(&p->lock.btm);
if (locks & ERTS_PROC_LOCK_MSGQ)
erts_mtx_unlock(&p->lock.msgq);
if (locks & ERTS_PROC_LOCK_LINK)
@@ -649,7 +853,7 @@ erts_smp_proc_unlock__(Process *p,
}
ERTS_GLB_INLINE int
-erts_smp_proc_trylock__(Process *p,
+erts_proc_trylock__(Process *p,
erts_pix_lock_t *pix_lck,
ErtsProcLocks locks)
{
@@ -670,7 +874,7 @@ erts_smp_proc_trylock__(Process *p,
erts_pix_lock(pix_lck);
#endif
- if (erts_smp_proc_raw_trylock__(p, locks) != 0) {
+ if (erts_proc_raw_trylock__(p, locks) != 0) {
/* Didn't get all locks... */
res = EBUSY;
@@ -707,7 +911,7 @@ erts_smp_proc_trylock__(Process *p,
return res;
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
- if (erts_smp_proc_raw_trylock__(p, locks) != 0)
+ if (erts_proc_raw_trylock__(p, locks) != 0)
return EBUSY;
else {
#ifdef ERTS_PROC_LOCK_DEBUG
@@ -728,11 +932,11 @@ erts_proc_lock_op_debug(Process *p, ErtsProcLocks locks, int locked)
if (locks & lock) {
erts_aint32_t lock_count;
if (locked) {
- lock_count = erts_smp_atomic32_inc_read_nob(&p->lock.locked[i]);
+ lock_count = erts_atomic32_inc_read_nob(&p->lock.locked[i]);
ERTS_LC_ASSERT(lock_count == 1);
}
else {
- lock_count = erts_smp_atomic32_dec_read_nob(&p->lock.locked[i]);
+ lock_count = erts_atomic32_dec_read_nob(&p->lock.locked[i]);
ERTS_LC_ASSERT(lock_count == 0);
}
}
@@ -742,106 +946,108 @@ erts_proc_lock_op_debug(Process *p, ErtsProcLocks locks, int locked)
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#endif /* ERTS_SMP */
#ifdef ERTS_ENABLE_LOCK_POSITION
-ERTS_GLB_INLINE void erts_smp_proc_lock_x(Process *, ErtsProcLocks, char *file, unsigned int line);
+ERTS_GLB_INLINE void erts_proc_lock_x(Process *, ErtsProcLocks, char *file, unsigned int line);
#else
-ERTS_GLB_INLINE void erts_smp_proc_lock(Process *, ErtsProcLocks);
+ERTS_GLB_INLINE void erts_proc_lock(Process *, ErtsProcLocks);
#endif
-ERTS_GLB_INLINE void erts_smp_proc_unlock(Process *, ErtsProcLocks);
-ERTS_GLB_INLINE int erts_smp_proc_trylock(Process *, ErtsProcLocks);
+ERTS_GLB_INLINE void erts_proc_unlock(Process *, ErtsProcLocks);
+ERTS_GLB_INLINE int erts_proc_trylock(Process *, ErtsProcLocks);
-ERTS_GLB_INLINE void erts_smp_proc_inc_refc(Process *);
-ERTS_GLB_INLINE void erts_smp_proc_dec_refc(Process *);
-ERTS_GLB_INLINE void erts_smp_proc_add_refc(Process *, Sint32);
+ERTS_GLB_INLINE void erts_proc_inc_refc(Process *);
+ERTS_GLB_INLINE void erts_proc_dec_refc(Process *);
+ERTS_GLB_INLINE void erts_proc_add_refc(Process *, Sint);
+ERTS_GLB_INLINE Sint erts_proc_read_refc(Process *);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
#ifdef ERTS_ENABLE_LOCK_POSITION
-erts_smp_proc_lock_x(Process *p, ErtsProcLocks locks, char *file, unsigned int line)
+erts_proc_lock_x(Process *p, ErtsProcLocks locks, char *file, unsigned int line)
#else
-erts_smp_proc_lock(Process *p, ErtsProcLocks locks)
+erts_proc_lock(Process *p, ErtsProcLocks locks)
#endif
{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
- erts_smp_proc_lock_x__(p,
+#if defined(ERTS_ENABLE_LOCK_POSITION)
+ erts_proc_lock_x__(p,
#if ERTS_PROC_LOCK_ATOMIC_IMPL
NULL,
#else
ERTS_PID2PIXLOCK(p->common.id),
#endif /*ERTS_PROC_LOCK_ATOMIC_IMPL*/
locks, file, line);
-#elif defined(ERTS_SMP)
- erts_smp_proc_lock__(p,
+#else
+ erts_proc_lock__(p,
#if ERTS_PROC_LOCK_ATOMIC_IMPL
NULL,
#else
ERTS_PID2PIXLOCK(p->common.id),
#endif /*ERTS_PROC_LOCK_ATOMIC_IMPL*/
locks);
-#endif /*ERTS_SMP*/
+#endif /*ERTS_ENABLE_LOCK_POSITION*/
}
ERTS_GLB_INLINE void
-erts_smp_proc_unlock(Process *p, ErtsProcLocks locks)
+erts_proc_unlock(Process *p, ErtsProcLocks locks)
{
-#ifdef ERTS_SMP
- erts_smp_proc_unlock__(p,
+ erts_proc_unlock__(p,
#if ERTS_PROC_LOCK_ATOMIC_IMPL
NULL,
#else
ERTS_PID2PIXLOCK(p->common.id),
#endif
locks);
-#endif
}
ERTS_GLB_INLINE int
-erts_smp_proc_trylock(Process *p, ErtsProcLocks locks)
+erts_proc_trylock(Process *p, ErtsProcLocks locks)
{
-#ifndef ERTS_SMP
- return 0;
-#else
- return erts_smp_proc_trylock__(p,
+ return erts_proc_trylock__(p,
#if ERTS_PROC_LOCK_ATOMIC_IMPL
NULL,
#else
ERTS_PID2PIXLOCK(p->common.id),
#endif
locks);
-#endif
}
-ERTS_GLB_INLINE void erts_smp_proc_inc_refc(Process *p)
+ERTS_GLB_INLINE void erts_proc_inc_refc(Process *p)
{
-#ifdef ERTS_SMP
- erts_ptab_inc_refc(&p->common);
-#endif
+ ASSERT(!(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY));
+ erts_ptab_atmc_inc_refc(&p->common);
}
-ERTS_GLB_INLINE void erts_smp_proc_dec_refc(Process *p)
+ERTS_GLB_INLINE void erts_proc_dec_refc(Process *p)
{
-#ifdef ERTS_SMP
- int referred = erts_ptab_dec_test_refc(&p->common);
- if (!referred)
+ Sint referred;
+ ASSERT(!(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY));
+ referred = erts_ptab_atmc_dec_test_refc(&p->common);
+ if (!referred) {
+ ASSERT(ERTS_PROC_IS_EXITING(p));
erts_free_proc(p);
-#endif
+ }
}
-ERTS_GLB_INLINE void erts_smp_proc_add_refc(Process *p, Sint32 add_refc)
+ERTS_GLB_INLINE void erts_proc_add_refc(Process *p, Sint add_refc)
{
-#ifdef ERTS_SMP
- int referred = erts_ptab_add_test_refc(&p->common, add_refc);
- if (!referred)
+ Sint referred;
+ ASSERT(!(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY));
+ referred = erts_ptab_atmc_add_test_refc(&p->common, add_refc);
+ if (!referred) {
+ ASSERT(ERTS_PROC_IS_EXITING(p));
erts_free_proc(p);
-#endif
+ }
+}
+
+ERTS_GLB_INLINE Sint erts_proc_read_refc(Process *p)
+{
+ ASSERT(!(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY));
+ return erts_ptab_atmc_read_refc(&p->common);
}
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#ifdef ERTS_SMP
void erts_proc_lock_init(Process *);
void erts_proc_lock_fin(Process *);
void erts_proc_safelock(Process *a_proc,
@@ -850,7 +1056,6 @@ void erts_proc_safelock(Process *a_proc,
Process *b_proc,
ErtsProcLocks b_have_locks,
ErtsProcLocks b_need_locks);
-#endif
/*
* --- Process table lookup ------------------------------------------------
@@ -868,21 +1073,20 @@ void erts_proc_safelock(Process *a_proc,
#define ERTS_P2P_FLG_ALLOW_OTHER_X (1 << 0)
#define ERTS_P2P_FLG_TRY_LOCK (1 << 1)
-#define ERTS_P2P_FLG_SMP_INC_REFC (1 << 2)
+#define ERTS_P2P_FLG_INC_REFC (1 << 2)
#define ERTS_PROC_LOCK_BUSY ((Process *) &erts_invalid_process)
#define erts_pid2proc(PROC, HL, PID, NL) \
erts_pid2proc_opt((PROC), (HL), (PID), (NL), 0)
+Process *erts_proc_lookup_inc_refc(Eterm pid);
+Process *erts_proc_lookup_raw_inc_refc(Eterm pid);
ERTS_GLB_INLINE Process *erts_pix2proc(int ix);
ERTS_GLB_INLINE Process *erts_proc_lookup_raw(Eterm pid);
ERTS_GLB_INLINE Process *erts_proc_lookup(Eterm pid);
-#ifndef ERTS_SMP
-ERTS_GLB_INLINE
-#endif
Process *erts_pid2proc_opt(Process *, ErtsProcLocks, Eterm, ErtsProcLocks, int);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -899,7 +1103,7 @@ ERTS_GLB_INLINE Process *erts_proc_lookup_raw(Eterm pid)
{
Process *proc;
- ERTS_SMP_LC_ASSERT(erts_thr_progress_lc_is_delaying());
+ ERTS_LC_ASSERT(erts_thr_progress_lc_is_delaying());
if (is_not_internal_pid(pid))
return NULL;
@@ -919,22 +1123,6 @@ ERTS_GLB_INLINE Process *erts_proc_lookup(Eterm pid)
return proc;
}
-#ifndef ERTS_SMP
-ERTS_GLB_INLINE Process *
-erts_pid2proc_opt(Process *c_p_unused,
- ErtsProcLocks c_p_have_locks_unused,
- Eterm pid,
- ErtsProcLocks pid_need_locks_unused,
- int flags)
-{
- Process *proc = erts_proc_lookup_raw(pid);
- return ((!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
- && proc
- && ERTS_PROC_IS_EXITING(proc))
- ? NULL
- : proc);
-}
-#endif /* !ERTS_SMP */
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
diff --git a/erts/emulator/beam/erl_ptab.c b/erts/emulator/beam/erl_ptab.c
index eabf016081..38c095fb4a 100644
--- a/erts/emulator/beam/erl_ptab.c
+++ b/erts/emulator/beam/erl_ptab.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2012-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2012-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -280,124 +281,38 @@ struct ErtsPTabListBifData_ {
};
-#ifdef ARCH_32
-
-static ERTS_INLINE Uint64
-dw_aint_to_uint64(erts_dw_aint_t *dw)
-{
-#ifdef ETHR_SU_DW_NAINT_T__
- return (Uint64) dw->dw_sint;
-#else
- Uint64 res;
- res = (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_HIGH_WORD]);
- res <<= 32;
- res |= (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_LOW_WORD]);
- return res;
-#endif
-}
-
-static void
-unint64_to_dw_aint(erts_dw_aint_t *dw, Uint64 val)
-{
-#ifdef ETHR_SU_DW_NAINT_T__
- dw->dw_sint = (ETHR_SU_DW_NAINT_T__) val;
-#else
- dw->sint[ERTS_DW_AINT_LOW_WORD] = (erts_aint_t) (val & 0xffffffff);
- dw->sint[ERTS_DW_AINT_HIGH_WORD] = (erts_aint_t) ((val >> 32) & 0xffffffff);
-#endif
-}
-
static ERTS_INLINE void
last_data_init_nob(ErtsPTab *ptab, Uint64 val)
{
- erts_dw_aint_t dw;
- unint64_to_dw_aint(&dw, val);
- erts_smp_dw_atomic_init_nob(&ptab->vola.tile.last_data, &dw);
+ erts_atomic64_init_nob(&ptab->vola.tile.last_data, (erts_aint64_t) val);
}
static ERTS_INLINE void
last_data_set_relb(ErtsPTab *ptab, Uint64 val)
{
- erts_dw_aint_t dw;
- unint64_to_dw_aint(&dw, val);
- erts_smp_dw_atomic_set_relb(&ptab->vola.tile.last_data, &dw);
+ erts_atomic64_set_relb(&ptab->vola.tile.last_data, (erts_aint64_t) val);
}
static ERTS_INLINE Uint64
last_data_read_nob(ErtsPTab *ptab)
{
- erts_dw_aint_t dw;
- erts_smp_dw_atomic_read_nob(&ptab->vola.tile.last_data, &dw);
- return dw_aint_to_uint64(&dw);
+ return (Uint64) erts_atomic64_read_nob(&ptab->vola.tile.last_data);
}
static ERTS_INLINE Uint64
last_data_read_acqb(ErtsPTab *ptab)
{
- erts_dw_aint_t dw;
- erts_smp_dw_atomic_read_acqb(&ptab->vola.tile.last_data, &dw);
- return dw_aint_to_uint64(&dw);
+ return (Uint64) erts_atomic64_read_acqb(&ptab->vola.tile.last_data);
}
static ERTS_INLINE Uint64
last_data_cmpxchg_relb(ErtsPTab *ptab, Uint64 new, Uint64 exp)
{
- erts_dw_aint_t dw_new, dw_xchg;
-
- unint64_to_dw_aint(&dw_new, new);
- unint64_to_dw_aint(&dw_xchg, exp);
-
- if (erts_smp_dw_atomic_cmpxchg_relb(&ptab->vola.tile.last_data,
- &dw_new,
- &dw_xchg))
- return exp;
- else
- return dw_aint_to_uint64(&dw_xchg);
-}
-
-#elif defined(ARCH_64)
-
-union {
- erts_smp_atomic_t pid_data;
- char align[ERTS_CACHE_LINE_SIZE];
-} last erts_align_attribute(ERTS_CACHE_LINE_SIZE);
-
-static ERTS_INLINE void
-last_data_init_nob(ErtsPTab *ptab, Uint64 val)
-{
- erts_smp_atomic_init_nob(&ptab->vola.tile.last_data, (erts_aint_t) val);
-}
-
-static ERTS_INLINE void
-last_data_set_relb(ErtsPTab *ptab, Uint64 val)
-{
- erts_smp_atomic_set_relb(&ptab->vola.tile.last_data, (erts_aint_t) val);
+ return (Uint64) erts_atomic64_cmpxchg_relb(&ptab->vola.tile.last_data,
+ (erts_aint64_t) new,
+ (erts_aint64_t) exp);
}
-static ERTS_INLINE Uint64
-last_data_read_nob(ErtsPTab *ptab)
-{
- return (Uint64) erts_smp_atomic_read_nob(&ptab->vola.tile.last_data);
-}
-
-static ERTS_INLINE Uint64
-last_data_read_acqb(ErtsPTab *ptab)
-{
- return (Uint64) erts_smp_atomic_read_acqb(&ptab->vola.tile.last_data);
-}
-
-static ERTS_INLINE Uint64
-last_data_cmpxchg_relb(ErtsPTab *ptab, Uint64 new, Uint64 exp)
-{
- return (Uint64) erts_smp_atomic_cmpxchg_relb(&ptab->vola.tile.last_data,
- (erts_aint_t) new,
- (erts_aint_t) exp);
-}
-
-#else
-# error "Not 64-bit, nor 32-bit architecture..."
-#endif
-
static ERTS_INLINE int
last_data_cmp(Uint64 ld1, Uint64 ld2)
{
@@ -431,9 +346,9 @@ ix_to_free_id_data_ix(ErtsPTab *ptab, Uint32 ix)
UWord
erts_ptab_mem_size(ErtsPTab *ptab)
{
- UWord size = ptab->r.o.max*sizeof(erts_smp_atomic_t);
+ UWord size = ptab->r.o.max*sizeof(erts_atomic_t);
if (ptab->r.o.free_id_data)
- size += ptab->r.o.max*sizeof(erts_smp_atomic32_t);
+ size += ptab->r.o.max*sizeof(erts_atomic32_t);
return size;
}
@@ -446,18 +361,20 @@ erts_ptab_init_table(ErtsPTab *ptab,
int size,
UWord element_size,
char *name,
- int legacy)
+ int legacy,
+ int atomic_refc)
{
size_t tab_sz, alloc_sz;
Uint32 bits, cl, cli, ix, ix_per_cache_line, tab_cache_lines;
char *tab_end;
- erts_smp_atomic_t *tab_entry;
- erts_smp_rwmtx_opt_t rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
- rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
- rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED;
-
- erts_smp_rwmtx_init_opt(&ptab->list.data.rwmtx, &rwmtx_opts, name);
- erts_smp_atomic32_init_nob(&ptab->vola.tile.count, 0);
+ erts_atomic_t *tab_entry;
+ erts_rwmtx_opt_t rwmtx_opts = ERTS_RWMTX_OPT_DEFAULT_INITER;
+ rwmtx_opts.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
+ rwmtx_opts.lived = ERTS_RWMTX_LONG_LIVED;
+
+ erts_rwmtx_init_opt(&ptab->list.data.rwmtx, &rwmtx_opts, name, NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+ erts_atomic32_init_nob(&ptab->vola.tile.count, 0);
last_data_init_nob(ptab, ~((Uint64) 0));
/* A size that is a power of 2 is to prefer performance wise */
@@ -471,20 +388,20 @@ erts_ptab_init_table(ErtsPTab *ptab,
ptab->r.o.element_size = element_size;
ptab->r.o.max = size;
- tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_smp_atomic_t));
+ tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_atomic_t));
alloc_sz = tab_sz;
if (!legacy)
- alloc_sz += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_smp_atomic32_t));
+ alloc_sz += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_atomic32_t));
ptab->r.o.tab = erts_alloc_permanent_cache_aligned(atype, alloc_sz);
tab_end = ((char *) ptab->r.o.tab) + tab_sz;
tab_entry = ptab->r.o.tab;
while (tab_end > ((char *) tab_entry)) {
- erts_smp_atomic_init_nob(tab_entry, ERTS_AINT_NULL);
+ erts_atomic_init_nob(tab_entry, ERTS_AINT_NULL);
tab_entry++;
}
tab_cache_lines = tab_sz/ERTS_CACHE_LINE_SIZE;
- ix_per_cache_line = (ERTS_CACHE_LINE_SIZE/sizeof(erts_smp_atomic_t));
+ ix_per_cache_line = (ERTS_CACHE_LINE_SIZE/sizeof(erts_atomic_t));
ASSERT((ptab->r.o.max & (ptab->r.o.max - 1)) == 0); /* power of 2 */
ASSERT((ix_per_cache_line & (ix_per_cache_line - 1)) == 0); /* power of 2 */
ASSERT((tab_cache_lines & (tab_cache_lines - 1)) == 0); /* power of 2 */
@@ -501,6 +418,8 @@ erts_ptab_init_table(ErtsPTab *ptab,
ptab->r.o.invalid_data = erts_ptab_id2data(ptab, invalid_element->id);
ptab->r.o.release_element = release_element;
+ ptab->r.o.atomic_refc = atomic_refc;
+
if (legacy) {
ptab->r.o.free_id_data = NULL;
ptab->r.o.dix_cl_mask = 0;
@@ -510,11 +429,11 @@ erts_ptab_init_table(ErtsPTab *ptab,
}
else {
- tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_smp_atomic32_t));
- ptab->r.o.free_id_data = (erts_smp_atomic32_t *) tab_end;
+ tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_atomic32_t));
+ ptab->r.o.free_id_data = (erts_atomic32_t *) tab_end;
tab_cache_lines = tab_sz/ERTS_CACHE_LINE_SIZE;
- ix_per_cache_line = (ERTS_CACHE_LINE_SIZE/sizeof(erts_smp_atomic32_t));
+ ix_per_cache_line = (ERTS_CACHE_LINE_SIZE/sizeof(erts_atomic32_t));
ptab->r.o.dix_cl_mask = tab_cache_lines-1;
ptab->r.o.dix_cl_shift = erts_fit_in_bits_int32(ix_per_cache_line-1);
@@ -529,19 +448,19 @@ erts_ptab_init_table(ErtsPTab *ptab,
ix = 0;
for (cl = 0; cl < tab_cache_lines; cl++) {
for (cli = 0; cli < ix_per_cache_line; cli++) {
- erts_smp_atomic32_init_nob(&ptab->r.o.free_id_data[ix],
+ erts_atomic32_init_nob(&ptab->r.o.free_id_data[ix],
cli*tab_cache_lines+cl);
- ASSERT(erts_smp_atomic32_read_nob(&ptab->r.o.free_id_data[ix]) != ptab->r.o.invalid_data);
+ ASSERT(erts_atomic32_read_nob(&ptab->r.o.free_id_data[ix]) != ptab->r.o.invalid_data);
ix++;
}
}
- erts_smp_atomic32_init_nob(&ptab->vola.tile.aid_ix, -1);
- erts_smp_atomic32_init_nob(&ptab->vola.tile.fid_ix, -1);
+ erts_atomic32_init_nob(&ptab->vola.tile.aid_ix, -1);
+ erts_atomic32_init_nob(&ptab->vola.tile.fid_ix, -1);
}
- erts_smp_interval_init(&ptab->list.data.interval);
+ erts_interval_init(&ptab->list.data.interval);
ptab->list.data.deleted.start = NULL;
ptab->list.data.deleted.end = NULL;
ptab->list.data.chunks = (((ptab->r.o.max - 1)
@@ -556,14 +475,14 @@ erts_ptab_init_table(ErtsPTab *ptab,
* we don't want to shrink the size to ERTS_PTAB_MAX_SIZE/2.
*
* In order to fix this, we insert a pointer from the table
- * to the invalid_element, wich will be interpreted as a
+ * to the invalid_element, which will be interpreted as a
* slot currently being modified. This way we will be able to
* have ERTS_PTAB_MAX_SIZE-1 valid elements in the table while
* still having a table size of the power of 2.
*/
- erts_smp_atomic32_inc_nob(&ptab->vola.tile.count);
+ erts_atomic32_inc_nob(&ptab->vola.tile.count);
pix = erts_ptab_data2pix(ptab, ptab->r.o.invalid_data);
- erts_smp_atomic_set_relb(&ptab->r.o.tab[pix],
+ erts_atomic_set_relb(&ptab->r.o.tab[pix],
(erts_aint_t) ptab->r.o.invalid_element);
}
@@ -587,12 +506,12 @@ erts_ptab_new_element(ErtsPTab *ptab,
erts_ptab_rlock(ptab);
- count = erts_smp_atomic32_inc_read_acqb(&ptab->vola.tile.count);
+ count = erts_atomic32_inc_read_acqb(&ptab->vola.tile.count);
if (count > ptab->r.o.max) {
while (1) {
erts_aint32_t act_count;
- act_count = erts_smp_atomic32_cmpxchg_relb(&ptab->vola.tile.count,
+ act_count = erts_atomic32_cmpxchg_relb(&ptab->vola.tile.count,
count-1,
count);
if (act_count == count) {
@@ -606,30 +525,31 @@ erts_ptab_new_element(ErtsPTab *ptab,
}
ptab_el->u.alive.started_interval
- = erts_smp_current_interval_nob(erts_ptab_interval(ptab));
+ = erts_current_interval_nob(erts_ptab_interval(ptab));
if (ptab->r.o.free_id_data) {
do {
- ix = (Uint32) erts_smp_atomic32_inc_read_acqb(&ptab->vola.tile.aid_ix);
+ ix = (Uint32) erts_atomic32_inc_read_acqb(&ptab->vola.tile.aid_ix);
ix = ix_to_free_id_data_ix(ptab, ix);
- data = erts_smp_atomic32_xchg_nob(&ptab->r.o.free_id_data[ix],
+ data = erts_atomic32_xchg_nob(&ptab->r.o.free_id_data[ix],
(erts_aint32_t)ptab->r.o.invalid_data);
}while ((Eterm)data == ptab->r.o.invalid_data);
init_ptab_el(init_arg, (Eterm) data);
-#ifdef ERTS_SMP
- erts_smp_atomic32_init_nob(&ptab_el->refc, 1);
-#endif
+ if (ptab->r.o.atomic_refc)
+ erts_atomic_init_nob(&ptab_el->refc.atmc, 1);
+ else
+ ptab_el->refc.sint = 1;
pix = erts_ptab_data2pix(ptab, (Eterm) data);
#ifdef DEBUG
- ASSERT(ERTS_AINT_NULL == erts_smp_atomic_xchg_relb(&ptab->r.o.tab[pix],
+ ASSERT(ERTS_AINT_NULL == erts_atomic_xchg_relb(&ptab->r.o.tab[pix],
(erts_aint_t) ptab_el));
#else
- erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el);
+ erts_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el);
#endif
erts_ptab_runlock(ptab);
@@ -643,7 +563,7 @@ erts_ptab_new_element(ErtsPTab *ptab,
restart:
ptab_el->u.alive.started_interval
- = erts_smp_current_interval_nob(erts_ptab_interval(ptab));
+ = erts_current_interval_nob(erts_ptab_interval(ptab));
ld = last_data_read_acqb(ptab);
@@ -651,10 +571,10 @@ erts_ptab_new_element(ErtsPTab *ptab,
while (1) {
ld++;
pix = erts_ptab_data2pix(ptab, ERTS_PTAB_LastData2EtermData(ld));
- if (erts_smp_atomic_read_nob(&ptab->r.o.tab[pix])
+ if (erts_atomic_read_nob(&ptab->r.o.tab[pix])
== ERTS_AINT_NULL) {
erts_aint_t val;
- val = erts_smp_atomic_cmpxchg_relb(&ptab->r.o.tab[pix],
+ val = erts_atomic_cmpxchg_relb(&ptab->r.o.tab[pix],
invalid,
ERTS_AINT_NULL);
@@ -694,16 +614,17 @@ erts_ptab_new_element(ErtsPTab *ptab,
init_ptab_el(init_arg, data);
-#ifdef ERTS_SMP
- erts_smp_atomic32_init_nob(&ptab_el->refc, 1);
-#endif
+ if (ptab->r.o.atomic_refc)
+ erts_atomic_init_nob(&ptab_el->refc.atmc, 1);
+ else
+ ptab_el->refc.sint = 1;
/* Move into slot reserved */
#ifdef DEBUG
- ASSERT(invalid == erts_smp_atomic_xchg_relb(&ptab->r.o.tab[pix],
+ ASSERT(invalid == erts_atomic_xchg_relb(&ptab->r.o.tab[pix],
(erts_aint_t) ptab_el));
#else
- erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el);
+ erts_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el);
#endif
if (rlocked)
@@ -723,7 +644,7 @@ save_deleted_element(ErtsPTab *ptab, ErtsPTabElementCommon *ptab_el)
sizeof(ErtsPTabDeletedElement));
ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start
&& ptab->list.data.deleted.end);
- ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab));
+ ERTS_LC_ASSERT(erts_lc_ptab_is_rwlocked(ptab));
ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
@@ -733,7 +654,7 @@ save_deleted_element(ErtsPTab *ptab, ErtsPTabElementCommon *ptab_el)
ptdep->u.element.id = ptab_el->id;
ptdep->u.element.inserted = ptab_el->u.alive.started_interval;
ptdep->u.element.deleted =
- erts_smp_current_interval_nob(erts_ptab_interval(ptab));
+ erts_current_interval_nob(erts_ptab_interval(ptab));
ptab->list.data.deleted.end->next = ptdep;
ptab->list.data.deleted.end = ptdep;
@@ -757,7 +678,7 @@ erts_ptab_delete_element(ErtsPTab *ptab,
pix = erts_ptab_id2pix(ptab, ptab_el->id);
/* *Need* to be an managed thread */
- ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
+ ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread());
erts_ptab_rlock(ptab);
maybe_save = ptab->list.data.deleted.end != NULL;
@@ -766,7 +687,7 @@ erts_ptab_delete_element(ErtsPTab *ptab,
erts_ptab_rwlock(ptab);
}
- erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], ERTS_AINT_NULL);
+ erts_atomic_set_relb(&ptab->r.o.tab[pix], ERTS_AINT_NULL);
if (ptab->r.o.free_id_data) {
Uint32 prev_data;
@@ -782,17 +703,17 @@ erts_ptab_delete_element(ErtsPTab *ptab,
ASSERT(pix == erts_ptab_data2pix(ptab, data));
do {
- ix = (Uint32) erts_smp_atomic32_inc_read_relb(&ptab->vola.tile.fid_ix);
+ ix = (Uint32) erts_atomic32_inc_read_relb(&ptab->vola.tile.fid_ix);
ix = ix_to_free_id_data_ix(ptab, ix);
- prev_data = erts_smp_atomic32_cmpxchg_nob(&ptab->r.o.free_id_data[ix],
+ prev_data = erts_atomic32_cmpxchg_nob(&ptab->r.o.free_id_data[ix],
data,
ptab->r.o.invalid_data);
}while ((Eterm)prev_data != ptab->r.o.invalid_data);
}
- ASSERT(erts_smp_atomic32_read_nob(&ptab->vola.tile.count) > 0);
- erts_smp_atomic32_dec_relb(&ptab->vola.tile.count);
+ ASSERT(erts_atomic32_read_nob(&ptab->vola.tile.count) > 0);
+ erts_atomic32_dec_relb(&ptab->vola.tile.count);
if (!maybe_save)
erts_ptab_runlock(ptab);
@@ -813,7 +734,7 @@ erts_ptab_delete_element(ErtsPTab *ptab,
* erts_ptab_list() implements BIFs listing the content of the table,
* e.g. erlang:processes/0.
*/
-static void cleanup_ptab_list_bif_data(Binary *bp);
+static int cleanup_ptab_list_bif_data(Binary *bp);
static int ptab_list_bif_engine(Process *c_p, Eterm *res_accp, Binary *mbp);
@@ -851,23 +772,23 @@ erts_ptab_list(Process *c_p, ErtsPTab *ptab)
}
else {
Eterm *hp;
- Eterm magic_bin;
+ Eterm magic_ref;
ERTS_PTAB_LIST_DBG_CHK_RESLIST(res_acc);
- hp = HAlloc(c_p, PROC_BIN_SIZE);
- ERTS_PTAB_LIST_DBG_SAVE_HEAP_ALLOC(ptlbdp, hp, PROC_BIN_SIZE);
- magic_bin = erts_mk_magic_binary_term(&hp, &MSO(c_p), mbp);
+ hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
+ ERTS_PTAB_LIST_DBG_SAVE_HEAP_ALLOC(ptlbdp, hp, ERTS_MAGIC_REF_THING_SIZE);
+ magic_ref = erts_mk_magic_ref(&hp, &MSO(c_p), mbp);
ERTS_PTAB_LIST_DBG_VERIFY_HEAP_ALLOC_USED(ptlbdp, hp);
ERTS_PTAB_LIST_DBG_TRACE(c_p->common.id, trap);
ERTS_BIF_PREP_YIELD2(ret_val,
&ptab_list_continue_export,
c_p,
res_acc,
- magic_bin);
+ magic_ref);
}
return ret_val;
}
-static void
+static int
cleanup_ptab_list_bif_data(Binary *bp)
{
ErtsPTabListBifData *ptlbdp = ERTS_MAGIC_BIN_DATA(bp);
@@ -955,6 +876,8 @@ cleanup_ptab_list_bif_data(Binary *bp)
ERTS_PTAB_LIST_DBG_TRACE(ptlbdp->debug.caller, return);
ERTS_PTAB_LIST_DBG_CLEANUP(ptlbdp);
+
+ return 1;
}
static int
@@ -1004,7 +927,7 @@ ptab_list_bif_engine(Process *c_p, Eterm *res_accp, Binary *mbp)
sizeof(ErtsPTabDeletedElement));
ptlbdp->bif_invocation->ix = -1;
ptlbdp->bif_invocation->u.bif_invocation.interval
- = erts_smp_step_interval_nob(erts_ptab_interval(ptab));
+ = erts_step_interval_nob(erts_ptab_interval(ptab));
ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
ptlbdp->bif_invocation->next = NULL;
@@ -1045,12 +968,12 @@ ptab_list_bif_engine(Process *c_p, Eterm *res_accp, Binary *mbp)
locked = 1;
}
- ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab));
+ ERTS_LC_ASSERT(erts_lc_ptab_is_rwlocked(ptab));
ERTS_PTAB_LIST_DBG_TRACE(p->common.id, insp_table);
if (cix != 0)
ptlbdp->chunk[cix].interval
- = erts_smp_step_interval_nob(erts_ptab_interval(ptab));
+ = erts_step_interval_nob(erts_ptab_interval(ptab));
else if (ptlbdp->bif_invocation)
ptlbdp->chunk[0].interval = *invocation_interval_p;
/* else: interval is irrelevant */
@@ -1335,7 +1258,7 @@ ptab_list_bif_engine(Process *c_p, Eterm *res_accp, Binary *mbp)
return 1;
default:
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"%s:%d:ptab_list_bif_engine(): Invalid state: %d\n",
__FILE__, __LINE__, (int) ptlbdp->state);
}
@@ -1367,9 +1290,7 @@ static BIF_RETTYPE ptab_list_continue(BIF_ALIST_2)
res_acc = BIF_ARG_1;
- ERTS_PTAB_LIST_ASSERT(ERTS_TERM_IS_MAGIC_BINARY(BIF_ARG_2));
-
- mbp = ((ProcBin *) binary_val(BIF_ARG_2))->val;
+ mbp = erts_magic_ref2bin(BIF_ARG_2);
ERTS_PTAB_LIST_ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp)
== cleanup_ptab_list_bif_data);
@@ -1410,18 +1331,18 @@ static void assert_ptab_consistency(ErtsPTab *ptab)
int null_slots = 0;
for (ix=0; ix < ptab->r.o.max; ix++) {
- if (erts_smp_atomic32_read_nob(&ptab->r.o.free_id_data[ix]) != ptab->r.o.invalid_data) {
+ if (erts_atomic32_read_nob(&ptab->r.o.free_id_data[ix]) != ptab->r.o.invalid_data) {
++free_pids;
- data = erts_smp_atomic32_read_nob(&ptab->r.o.free_id_data[ix]);
+ data = erts_atomic32_read_nob(&ptab->r.o.free_id_data[ix]);
pix = erts_ptab_data2pix(ptab, (Eterm) data);
ASSERT(erts_ptab_pix2intptr_nob(ptab, pix) == ERTS_AINT_NULL);
}
- if (erts_smp_atomic_read_nob(&ptab->r.o.tab[ix]) == ERTS_AINT_NULL) {
+ if (erts_atomic_read_nob(&ptab->r.o.tab[ix]) == ERTS_AINT_NULL) {
++null_slots;
}
}
ASSERT(free_pids == null_slots);
- ASSERT(free_pids == ptab->r.o.max - erts_smp_atomic32_read_nob(&ptab->vola.tile.count));
+ ASSERT(free_pids == ptab->r.o.max - erts_atomic32_read_nob(&ptab->vola.tile.count));
}
#endif
}
@@ -1445,7 +1366,7 @@ erts_ptab_test_next_id(ErtsPTab *ptab, int set, Uint next)
Uint32 i, max_ix, num, stop_id_ix;
max_ix = ptab->r.o.max - 1;
num = next;
- id_ix = (Uint32) erts_smp_atomic32_read_nob(&ptab->vola.tile.aid_ix);
+ id_ix = (Uint32) erts_atomic32_read_nob(&ptab->vola.tile.aid_ix);
for (i=0; i <= max_ix; ++i) {
Uint32 pix;
@@ -1459,26 +1380,26 @@ erts_ptab_test_next_id(ErtsPTab *ptab, int set, Uint next)
if (ERTS_AINT_NULL == erts_ptab_pix2intptr_nob(ptab, pix)) {
++id_ix;
dix = ix_to_free_id_data_ix(ptab, id_ix);
- erts_smp_atomic32_set_nob(&ptab->r.o.free_id_data[dix], num);
+ erts_atomic32_set_nob(&ptab->r.o.free_id_data[dix], num);
ASSERT(pix == erts_ptab_data2pix(ptab, num));
}
}
- erts_smp_atomic32_set_nob(&ptab->vola.tile.fid_ix, id_ix);
+ erts_atomic32_set_nob(&ptab->vola.tile.fid_ix, id_ix);
/* Write invalid_data in rest of free_id_data[]: */
- stop_id_ix = (1 + erts_smp_atomic32_read_nob(&ptab->vola.tile.aid_ix)) & max_ix;
+ stop_id_ix = (1 + erts_atomic32_read_nob(&ptab->vola.tile.aid_ix)) & max_ix;
while (1) {
id_ix = (id_ix+1) & max_ix;
if (id_ix == stop_id_ix)
break;
dix = ix_to_free_id_data_ix(ptab, id_ix);
- erts_smp_atomic32_set_nob(&ptab->r.o.free_id_data[dix],
+ erts_atomic32_set_nob(&ptab->r.o.free_id_data[dix],
ptab->r.o.invalid_data);
}
}
- id_ix = (Uint32) erts_smp_atomic32_read_nob(&ptab->vola.tile.aid_ix) + 1;
+ id_ix = (Uint32) erts_atomic32_read_nob(&ptab->vola.tile.aid_ix) + 1;
dix = ix_to_free_id_data_ix(ptab, id_ix);
- res = (Sint) erts_smp_atomic32_read_nob(&ptab->r.o.free_id_data[dix]);
+ res = (Sint) erts_atomic32_read_nob(&ptab->r.o.free_id_data[dix]);
}
else {
/* Deprecated legacy algorithm... */
@@ -1695,11 +1616,11 @@ debug_ptab_list_verify_all_pids(ErtsPTabListBifData *ptlbdp)
static void
debug_ptab_list_check_del_list(ErtsPTab *ptab)
{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab));
+ ERTS_LC_ASSERT(erts_lc_ptab_is_rwlocked(ptab));
if (!ptab->list.data.deleted.start)
ERTS_PTAB_LIST_ASSERT(!ptab->list.data.deleted.end);
else {
- Uint64 curr_interval = erts_smp_current_interval_nob(erts_ptab_interval(ptab));
+ Uint64 curr_interval = erts_current_interval_nob(erts_ptab_interval(ptab));
Uint64 *prev_x_interval_p = NULL;
ErtsPTabDeletedElement *ptdep;
diff --git a/erts/emulator/beam/erl_ptab.h b/erts/emulator/beam/erl_ptab.h
index e3e05f14af..4858cc8ab8 100644
--- a/erts/emulator/beam/erl_ptab.h
+++ b/erts/emulator/beam/erl_ptab.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2012-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2012-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -36,14 +37,16 @@
#include "erl_alloc.h"
#include "erl_monitors.h"
-#define ERTS_TRACER_PROC(P) ((P)->common.tracer_proc)
+#define ERTS_TRACER(P) ((P)->common.tracer)
+#define ERTS_TRACER_MODULE(T) (CAR(list_val(T)))
+#define ERTS_TRACER_STATE(T) (CDR(list_val(T)))
#define ERTS_TRACE_FLAGS(P) ((P)->common.trace_flags)
#define ERTS_P_LINKS(P) ((P)->common.u.alive.links)
#define ERTS_P_MONITORS(P) ((P)->common.u.alive.monitors)
#define IS_TRACED(p) \
- (ERTS_TRACER_PROC((p)) != NIL)
+ (ERTS_TRACER(p) != NIL)
#define ARE_TRACE_FLAGS_ON(p,tf) \
((ERTS_TRACE_FLAGS((p)) & (tf|F_SENSITIVE)) == (tf))
#define IS_TRACED_FL(p,tf) \
@@ -51,11 +54,13 @@
typedef struct {
Eterm id;
-#ifdef ERTS_SMP
- erts_atomic32_t refc;
-#endif
- Eterm tracer_proc;
+ union {
+ erts_atomic_t atmc;
+ Sint sint;
+ } refc;
+ ErtsTracer tracer;
Uint trace_flags;
+ erts_atomic_t timer;
union {
/* --- While being alive --- */
struct {
@@ -63,11 +68,6 @@ typedef struct {
struct reg_proc *reg;
ErtsLink *links;
ErtsMonitor *monitors;
-#ifdef ERTS_SMP
- ErtsSmpPTimer *ptimer;
-#else
- ErlTimer tm;
-#endif
} alive;
/* --- While being released --- */
@@ -78,7 +78,7 @@ typedef struct {
typedef struct ErtsPTabDeletedElement_ ErtsPTabDeletedElement;
typedef struct {
- erts_smp_rwmtx_t rwmtx;
+ erts_rwmtx_t rwmtx;
erts_interval_t interval;
struct {
ErtsPTabDeletedElement *start;
@@ -88,19 +88,15 @@ typedef struct {
} ErtsPTabListData;
typedef struct {
-#ifdef ARCH_32
- erts_smp_dw_atomic_t last_data;
-#else
- erts_smp_atomic_t last_data;
-#endif
- erts_smp_atomic32_t count;
- erts_smp_atomic32_t aid_ix;
- erts_smp_atomic32_t fid_ix;
+ erts_atomic64_t last_data;
+ erts_atomic32_t count;
+ erts_atomic32_t aid_ix;
+ erts_atomic32_t fid_ix;
} ErtsPTabVolatileData;
typedef struct {
- erts_smp_atomic_t *tab;
- erts_smp_atomic32_t *free_id_data;
+ erts_atomic_t *tab;
+ erts_atomic32_t *free_id_data;
Uint32 max;
Uint32 pix_mask;
Uint32 pix_cl_mask;
@@ -115,6 +111,7 @@ typedef struct {
Eterm invalid_data;
void (*release_element)(void *);
UWord element_size;
+ int atomic_refc;
} ErtsPTabReadOnlyData;
typedef struct {
@@ -171,7 +168,7 @@ typedef struct {
#define ERTS_PTAB_INVALID_ID(TAG) \
((Eterm) \
- ((((1 << ERTS_PTAB_ID_DATA_SIZE) - 1) << ERTS_PTAB_ID_DATA_SHIFT) \
+ ((((1U << ERTS_PTAB_ID_DATA_SIZE) - 1) << ERTS_PTAB_ID_DATA_SHIFT) \
| (TAG)))
#define erts_ptab_is_valid_id(ID) \
@@ -185,7 +182,8 @@ void erts_ptab_init_table(ErtsPTab *ptab,
int size,
UWord element_size,
char *name,
- int legacy);
+ int legacy,
+ int atomic_refc);
int erts_ptab_new_element(ErtsPTab *ptab,
ErtsPTabElementCommon *ptab_el,
void *init_arg,
@@ -210,17 +208,23 @@ ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_ddrb(ErtsPTab *ptab, int ix);
ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_rb(ErtsPTab *ptab, int ix);
ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_acqb(ErtsPTab *ptab, int ix);
ERTS_GLB_INLINE void erts_ptab_inc_refc(ErtsPTabElementCommon *ptab_el);
-ERTS_GLB_INLINE int erts_ptab_dec_test_refc(ErtsPTabElementCommon *ptab_el);
-ERTS_GLB_INLINE int erts_ptab_add_test_refc(ErtsPTabElementCommon *ptab_el,
- Sint32 add_refc);
+ERTS_GLB_INLINE Sint erts_ptab_dec_test_refc(ErtsPTabElementCommon *ptab_el);
+ERTS_GLB_INLINE Sint erts_ptab_add_test_refc(ErtsPTabElementCommon *ptab_el,
+ Sint add_refc);
+ERTS_GLB_INLINE Sint erts_ptab_read_refc(ErtsPTabElementCommon *ptab_el);
+ERTS_GLB_INLINE void erts_ptab_atmc_inc_refc(ErtsPTabElementCommon *ptab_el);
+ERTS_GLB_INLINE Sint erts_ptab_atmc_dec_test_refc(ErtsPTabElementCommon *ptab_el);
+ERTS_GLB_INLINE Sint erts_ptab_atmc_add_test_refc(ErtsPTabElementCommon *ptab_el,
+ Sint add_refc);
+ERTS_GLB_INLINE Sint erts_ptab_atmc_read_refc(ErtsPTabElementCommon *ptab_el);
ERTS_GLB_INLINE void erts_ptab_rlock(ErtsPTab *ptab);
ERTS_GLB_INLINE int erts_ptab_tryrlock(ErtsPTab *ptab);
ERTS_GLB_INLINE void erts_ptab_runlock(ErtsPTab *ptab);
ERTS_GLB_INLINE void erts_ptab_rwlock(ErtsPTab *ptab);
ERTS_GLB_INLINE int erts_ptab_tryrwlock(ErtsPTab *ptab);
ERTS_GLB_INLINE void erts_ptab_rwunlock(ErtsPTab *ptab);
-ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rlocked(ErtsPTab *ptab);
-ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rwlocked(ErtsPTab *ptab);
+ERTS_GLB_INLINE int erts_lc_ptab_is_rlocked(ErtsPTab *ptab);
+ERTS_GLB_INLINE int erts_lc_ptab_is_rwlocked(ErtsPTab *ptab);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -241,7 +245,7 @@ ERTS_GLB_INLINE int
erts_ptab_count(ErtsPTab *ptab)
{
int max = ptab->r.o.max;
- erts_aint32_t res = erts_smp_atomic32_read_nob(&ptab->vola.tile.count);
+ erts_aint32_t res = erts_atomic32_read_nob(&ptab->vola.tile.count);
if (max == ERTS_PTAB_MAX_SIZE) {
max--;
res--;
@@ -348,111 +352,124 @@ erts_ptab_id2data(ErtsPTab *ptab, Eterm id)
ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_nob(ErtsPTab *ptab, int ix)
{
ASSERT(0 <= ix && ix < ptab->r.o.max);
- return erts_smp_atomic_read_nob(&ptab->r.o.tab[ix]);
+ return erts_atomic_read_nob(&ptab->r.o.tab[ix]);
}
ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_ddrb(ErtsPTab *ptab, int ix)
{
ASSERT(0 <= ix && ix < ptab->r.o.max);
- return erts_smp_atomic_read_ddrb(&ptab->r.o.tab[ix]);
+ return erts_atomic_read_ddrb(&ptab->r.o.tab[ix]);
}
ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_rb(ErtsPTab *ptab, int ix)
{
ASSERT(0 <= ix && ix < ptab->r.o.max);
- return erts_smp_atomic_read_rb(&ptab->r.o.tab[ix]);
+ return erts_atomic_read_rb(&ptab->r.o.tab[ix]);
}
ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_acqb(ErtsPTab *ptab, int ix)
{
ASSERT(0 <= ix && ix < ptab->r.o.max);
- return erts_smp_atomic_read_acqb(&ptab->r.o.tab[ix]);
+ return erts_atomic_read_acqb(&ptab->r.o.tab[ix]);
}
-ERTS_GLB_INLINE void erts_ptab_inc_refc(ErtsPTabElementCommon *ptab_el)
+ERTS_GLB_INLINE void erts_ptab_atmc_inc_refc(ErtsPTabElementCommon *ptab_el)
{
-#ifdef ERTS_SMP
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_aint32_t refc = erts_atomic32_inc_read_nob(&ptab_el->refc);
- ERTS_SMP_LC_ASSERT(refc > 1);
+ erts_aint_t refc = erts_atomic_inc_read_nob(&ptab_el->refc.atmc);
+ ERTS_LC_ASSERT(refc > 1);
#else
- erts_atomic32_inc_nob(&ptab_el->refc);
-#endif
+ erts_atomic_inc_nob(&ptab_el->refc.atmc);
#endif
}
-ERTS_GLB_INLINE int erts_ptab_dec_test_refc(ErtsPTabElementCommon *ptab_el)
+ERTS_GLB_INLINE Sint erts_ptab_atmc_dec_test_refc(ErtsPTabElementCommon *ptab_el)
{
-#ifdef ERTS_SMP
- erts_aint32_t refc = erts_atomic32_dec_read_nob(&ptab_el->refc);
- ERTS_SMP_LC_ASSERT(refc >= 0);
- return (int) refc;
-#else
- return 0;
-#endif
+ erts_aint_t refc = erts_atomic_dec_read_relb(&ptab_el->refc.atmc);
+ ERTS_LC_ASSERT(refc >= 0);
+ if (refc == 0)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+ return (Sint) refc;
}
-ERTS_GLB_INLINE int erts_ptab_add_test_refc(ErtsPTabElementCommon *ptab_el,
- Sint32 add_refc)
+ERTS_GLB_INLINE Sint erts_ptab_atmc_add_test_refc(ErtsPTabElementCommon *ptab_el,
+ Sint add_refc)
{
-#ifdef ERTS_SMP
- erts_aint32_t refc;
+ erts_aint_t refc = erts_atomic_add_read_mb(&ptab_el->refc.atmc,
+ (erts_aint_t) add_refc);
+ ERTS_LC_ASSERT(refc >= 0);
+ return (Sint) refc;
+}
-#ifndef ERTS_ENABLE_LOCK_CHECK
- if (add_refc >= 0) {
- erts_atomic32_add_nob(&ptab_el->refc,
- (erts_aint32_t) add_refc);
- return 1;
- }
-#endif
+ERTS_GLB_INLINE Sint erts_ptab_atmc_read_refc(ErtsPTabElementCommon *ptab_el)
+{
+ return (Sint) erts_atomic_read_nob(&ptab_el->refc.atmc);
+}
- refc = erts_atomic32_add_read_nob(&ptab_el->refc,
- (erts_aint32_t) add_refc);
- ERTS_SMP_LC_ASSERT(refc >= 0);
- return (int) refc;
-#else
- return 0;
-#endif
+ERTS_GLB_INLINE void erts_ptab_inc_refc(ErtsPTabElementCommon *ptab_el)
+{
+ ptab_el->refc.sint++;
+ ASSERT(ptab_el->refc.sint > 1);
+}
+
+ERTS_GLB_INLINE Sint erts_ptab_dec_test_refc(ErtsPTabElementCommon *ptab_el)
+{
+ Sint refc = --ptab_el->refc.sint;
+ ERTS_LC_ASSERT(refc >= 0);
+ return refc;
+}
+
+ERTS_GLB_INLINE Sint erts_ptab_add_test_refc(ErtsPTabElementCommon *ptab_el,
+ Sint add_refc)
+{
+ ptab_el->refc.sint += add_refc;
+ ERTS_LC_ASSERT(ptab_el->refc.sint >= 0);
+ return (Sint) ptab_el->refc.sint;
+}
+
+ERTS_GLB_INLINE Sint erts_ptab_read_refc(ErtsPTabElementCommon *ptab_el)
+{
+ return ptab_el->refc.sint;
}
ERTS_GLB_INLINE void erts_ptab_rlock(ErtsPTab *ptab)
{
- erts_smp_rwmtx_rlock(&ptab->list.data.rwmtx);
+ erts_rwmtx_rlock(&ptab->list.data.rwmtx);
}
ERTS_GLB_INLINE int erts_ptab_tryrlock(ErtsPTab *ptab)
{
- return erts_smp_rwmtx_tryrlock(&ptab->list.data.rwmtx);
+ return erts_rwmtx_tryrlock(&ptab->list.data.rwmtx);
}
ERTS_GLB_INLINE void erts_ptab_runlock(ErtsPTab *ptab)
{
- erts_smp_rwmtx_runlock(&ptab->list.data.rwmtx);
+ erts_rwmtx_runlock(&ptab->list.data.rwmtx);
}
ERTS_GLB_INLINE void erts_ptab_rwlock(ErtsPTab *ptab)
{
- erts_smp_rwmtx_rwlock(&ptab->list.data.rwmtx);
+ erts_rwmtx_rwlock(&ptab->list.data.rwmtx);
}
ERTS_GLB_INLINE int erts_ptab_tryrwlock(ErtsPTab *ptab)
{
- return erts_smp_rwmtx_tryrwlock(&ptab->list.data.rwmtx);
+ return erts_rwmtx_tryrwlock(&ptab->list.data.rwmtx);
}
ERTS_GLB_INLINE void erts_ptab_rwunlock(ErtsPTab *ptab)
{
- erts_smp_rwmtx_rwunlock(&ptab->list.data.rwmtx);
+ erts_rwmtx_rwunlock(&ptab->list.data.rwmtx);
}
-ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rlocked(ErtsPTab *ptab)
+ERTS_GLB_INLINE int erts_lc_ptab_is_rlocked(ErtsPTab *ptab)
{
- return erts_smp_lc_rwmtx_is_rlocked(&ptab->list.data.rwmtx);
+ return erts_lc_rwmtx_is_rlocked(&ptab->list.data.rwmtx);
}
-ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rwlocked(ErtsPTab *ptab)
+ERTS_GLB_INLINE int erts_lc_ptab_is_rwlocked(ErtsPTab *ptab)
{
- return erts_smp_lc_rwmtx_is_rwlocked(&ptab->list.data.rwmtx);
+ return erts_lc_rwmtx_is_rwlocked(&ptab->list.data.rwmtx);
}
#endif
diff --git a/erts/emulator/beam/erl_rbtree.h b/erts/emulator/beam/erl_rbtree.h
new file mode 100644
index 0000000000..e59d6900b0
--- /dev/null
+++ b/erts/emulator/beam/erl_rbtree.h
@@ -0,0 +1,1757 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2015-2017. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: A Red-Black (binary search) Tree implementation. The search,
+ * insert, and delete operations are all O(log n) operations
+ * on a Red-Black Tree. Red-Black Trees are described in
+ * "Introduction to Algorithms", by Thomas H. Cormen, Charles
+ * E. Leiserson, and Ronald L. Riverest.
+ *
+ * Use by defining mandatory defines as well as defines for
+ * API functions wanted, and include this header.
+ *
+ * Author: Rickard Green
+ *
+ *
+ * Mandatory defines:
+ * - ERTS_RBT_PREFIX - Prefix to use on functions.
+ * - ERTS_RBT_T - Type of a tree node.
+ * - ERTS_RBT_KEY_T - Type of key for a tree node.
+ * - ERTS_RBT_FLAGS_T - Type of flags for a tree node.
+ * - ERTS_RBT_INIT_EMPTY_TNODE(T) -Initialize an empty tree node.
+ * - ERTS_RBT_IS_RED(T) - Is tree node red?
+ * - ERTS_RBT_SET_RED(T) - Set tree node red.
+ * - ERTS_RBT_IS_BLACK(T) - Is tree node back?
+ * - ERTS_RBT_SET_BLACK(T) - Set tree node black.
+ * - ERTS_RBT_GET_FLAGS(T) - Get flags of tree node (incl colors).
+ * - ERTS_RBT_SET_FLAGS(T, F) - Set flags of tree note.
+ * - ERTS_RBT_GET_PARENT(T) - Get parent node.
+ * - ERTS_RBT_SET_PARENT(T, P) - Set parent node.
+ * - ERTS_RBT_GET_RIGHT(T) - Get right child node.
+ * - ERTS_RBT_SET_RIGHT(T, R) - Set right child node.
+ * - ERTS_RBT_GET_LEFT(T) - Get left child node.
+ * - ERTS_RBT_SET_LEFT(T, L) - Set left child node.
+ * - ERTS_RBT_GET_KEY(T) - Get key of node.
+ * - ERTS_RBT_IS_LT(KX, KY) - Is key KX less than key KY?
+ * - ERTS_RBT_IS_EQ(KX, KY) - Is key KX equal to key KY?
+ *
+ * Optional defines:
+ *
+ * - ERTS_RBT_UNDEF - Undefine all user defined ERTS_RBT_*
+ * defines after use.
+ *
+ * - ERTS_RBT_NO_API_INLINE - Do not inline API functions.
+ *
+ * Attached data management:
+ * - ERTS_RBT_UPDATE_ATTACHED_DATA_ROTATE(L, OP, NP) - Called
+ * when a rotate operation has been performed. If L (in int)
+ * is a non zero, a left rotation was performed; otherwise,
+ * a right rotation was performed. OR points to the old
+ * parent node and NP points to the new parent node.
+ * - ERTS_RBT_UPDATE_ATTACHED_DATA_DMOD(F, T) - Called when
+ * a delete operation modifies a tree node. A delete
+ * modification is either a removal or replacement of a
+ * node. F points to the parent of the tree node that was
+ * modified. T points to the next ancestor that will be
+ * modified. If T is NULL, no more removal and/or
+ * replacements will be made. One typically wants to update
+ * the attached data of each node between F and T. If T is
+ * NULL all the way up to the root.
+ * - ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT(OR, NR) - Called
+ * when the root node changes. OR points to the old
+ * root node and NP points to the new root node.
+ *
+ * Request implementation of API functions:
+ * - ERTS_RBT_WANT_DELETE
+ * - ERTS_RBT_WANT_INSERT
+ * - ERTS_RBT_WANT_LOOKUP_INSERT
+ * - ERTS_RBT_WANT_REPLACE
+ * - ERTS_RBT_WANT_LOOKUP
+ * - ERTS_RBT_WANT_SMALLEST
+ * - ERTS_RBT_WANT_LARGEST
+ * - ERTS_RBT_WANT_FOREACH
+ * - ERTS_RBT_WANT_FOREACH_DESTROY
+ * - ERTS_RBT_WANT_FOREACH_YIELDING
+ * - ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING
+ * - ERTS_RBT_WANT_FOREACH_SMALL
+ * - ERTS_RBT_WANT_FOREACH_LARGE
+ * - ERTS_RBT_WANT_FOREACH_SMALL_DESTROY
+ * - ERTS_RBT_WANT_FOREACH_LARGE_DESTROY
+ * - ERTS_RBT_WANT_FOREACH_SMALL_YIELDING
+ * - ERTS_RBT_WANT_FOREACH_LARGE_YIELDING
+ * - ERTS_RBT_WANT_FOREACH_SMALL_DESTROY_YIELDING
+ * - ERTS_RBT_WANT_FOREACH_LARGE_DESTROY_YIELDING
+ * - ERTS_RBT_WANT_DEBUG_PRINT
+ *
+ * The yield state data type will equal
+ * <ERTS_RBT_PREFIX>_rbt_yield_state_t.
+ *
+ * The yield state should be statically initialized by
+ * ERTS_RBT_YIELD_STAT_INITER
+ *
+ * or dynamically initialized with
+ * ERTS_RBT_YIELD_STAT_INIT(<ERTS_RBT_PREFIX>_rbt_yield_state_t *ystate)
+ *
+ *
+ * The following API functions are implemented if corresponding
+ * ERTS_RBT_WANT_<OPERATION> is defined:
+ *
+ * - void <ERTS_RBT_PREFIX>_rbt_delete(
+ * ERTS_RBT_T **tree,
+ * ERTS_RBT_T *element);
+ * Delete element from tree.
+ *
+ * - void <ERTS_RBT_PREFIX>_rbt_insert(
+ * ERTS_RBT_T **tree,
+ * ERTS_RBT_T *element);
+ * Insert element into tree.
+ *
+ * - ERTS_RBT_T * <ERTS_RBT_PREFIX>_rbt_lookup_insert(
+ * ERTS_RBT_T **tree,
+ * ERTS_RBT_T *element);
+ * Look up an element in the tree that compares as equal to the
+ * element passed as argument, and return the looked up element.
+ * If no element compared as equal, insert the element passed as
+ * argument into the tree, and return NULL.
+ *
+ * - void <ERTS_RBT_PREFIX>_rbt_replace(
+ * ERTS_RBT_T **tree,
+ * ERTS_RBT_T *old_element,
+ * ERTS_RBT_T *new_element);
+ * Replace old_element in the tree with new_element. Both elements
+ * *should* compare as equal.
+ *
+ * - ERTS_RBT_T * <ERTS_RBT_PREFIX>_rbt_lookup(
+ * ERTS_RBT_T *tree,
+ * ERTS_RBT_KEY_T key);
+ * Look up an element with a key that compares as equal to
+ * the key passed as argument.
+ *
+ * - ERTS_RBT_T * <ERTS_RBT_PREFIX>_rbt_smallest(
+ * ERTS_RBT_T *tree);
+ * Look up the element with the smallest key.
+ *
+ * - ERTS_RBT_T * <ERTS_RBT_PREFIX>_rbt_largest(
+ * ERTS_RBT_T *tree);
+ * Look up the element with the largest key.
+ *
+ * - void <ERTS_RBT_PREFIX>_rbt_foreach(
+ * ERTS_RBT_T *tree,
+ * void (*op)(ERTS_RBT_T *, void *),
+ * void *arg);
+ * Operate by calling the operator 'op' on each element.
+ * Order is undefined.
+ *
+ * 'arg' is passed as argument to 'op'.
+ *
+ * - void <ERTS_RBT_PREFIX>_rbt_foreach_destroy(
+ * ERTS_RBT_T *tree,
+ * void (*op)(ERTS_RBT_T *, void *),
+ * void *arg);
+ * Operate by calling the operator 'op' on each element.
+ * Order is undefined. Each element should be destroyed
+ * by 'op'.
+ *
+ * 'arg' is passed as argument to 'op'.
+ *
+ * - int <ERTS_RBT_PREFIX>_rbt_foreach_yielding(
+ * ERTS_RBT_T *tree,
+ * void (*op)(ERTS_RBT_T *, void *),
+ * void *arg,
+ * <ERTS_RBT_PREFIX>_rbt_yield_state_t *ystate,
+ * Sint ylimit);
+ * Operate by calling the operator 'op' on each element.
+ * Order is undefined.
+ *
+ * Yield when 'ylimit' elements has been processed. True is
+ * returned when yielding, and false is returned when
+ * the whole tree has been processed. The tree should not be
+ * modified until all of it has been processed.
+ *
+ * 'arg' is passed as argument to 'op'.
+ *
+ * - int <ERTS_RBT_PREFIX>_rbt_foreach_destroy_yielding(
+ * ERTS_RBT_T *tree,
+ * void (*op)(ERTS_RBT_T *, void *),
+ * void *arg,
+ * <ERTS_RBT_PREFIX>_rbt_yield_state_t *ystate,
+ * Sint ylimit);
+ * Operate by calling the operator 'op' on each element.
+ * Order is undefined. Each element should be destroyed
+ * by 'op'.
+ *
+ * Yield when 'ylimit' elements has been processed. True is
+ * returned when yielding, and false is returned when
+ * the whole tree has been processed.
+ *
+ * 'arg' is passed as argument to 'op'.
+ *
+ * - void <ERTS_RBT_PREFIX>_rbt_foreach_small(
+ * ERTS_RBT_T *tree,
+ * void (*op)(ERTS_RBT_T *, void *),
+ * void *arg);
+ * Operate by calling the operator 'op' on each element from
+ * smallest towards larger elements.
+ *
+ * 'arg' is passed as argument to 'op'.
+ *
+ * - void <ERTS_RBT_PREFIX>_rbt_foreach_large(
+ * ERTS_RBT_T *tree,
+ * void (*op)(ERTS_RBT_T *, void *),
+ * void *arg);
+ * Operate by calling the operator 'op' on each element from
+ * largest towards smaller elements.
+ *
+ * 'arg' is passed as argument to 'op'.
+ *
+ * - int <ERTS_RBT_PREFIX>_rbt_foreach_small_yielding(
+ * ERTS_RBT_T *tree,
+ * void (*op)(ERTS_RBT_T *, void *),
+ * void *arg,
+ * <ERTS_RBT_PREFIX>_rbt_yield_state_t *ystate,
+ * Sint ylimit);
+ * Operate by calling the operator 'op' on each element from
+ * smallest towards larger elements.
+ *
+ * Yield when 'ylimit' elements has been processed. True is
+ * returned when yielding, and false is returned when
+ * the whole tree has been processed. The tree should not be
+ * modified until all of it has been processed.
+ *
+ * 'arg' is passed as argument to 'op'.
+ *
+ * - int <ERTS_RBT_PREFIX>_rbt_foreach_large_yielding(
+ * ERTS_RBT_T *tree,
+ * void (*op)(ERTS_RBT_T *, void *),
+ * void *arg,
+ * <ERTS_RBT_PREFIX>_rbt_yield_state_t *ystate,
+ * Sint ylimit);
+ * Operate by calling the operator 'op' on each element from
+ * largest towards smaller elements.
+ *
+ * Yield when 'ylimit' elements has been processed. True is
+ * returned when yielding, and false is returned when
+ * the whole tree has been processed. The tree should not be
+ * modified until all of it has been processed.
+ *
+ * 'arg' is passed as argument to 'op'.
+ *
+ * - void <ERTS_RBT_PREFIX>_rbt_foreach_small_destroy(
+ * ERTS_RBT_T **tree,
+ * void (*op)(ERTS_RBT_T *, void *),
+ * void (*destr)(ERTS_RBT_T *, void *),
+ * void *arg);
+ * Operate by calling the operator 'op' on each element from
+ * smallest towards larger elements.
+ *
+ * Destroy elements by calling the destructor 'destr'. Elements
+ * are destroyed when not needed by the tree structure anymore.
+ * Note that elements are often *not* destroyed in another order
+ * than the order that the elements are operated on.
+ *
+ * 'arg' is passed as argument to 'op' and 'destroy'.
+ *
+ * - void <ERTS_RBT_PREFIX>_rbt_foreach_large_destroy(
+ * ERTS_RBT_T **tree,
+ * void (*op)(ERTS_RBT_T *, void *),
+ * void (*destr)(ERTS_RBT_T *, void *),
+ * void *arg);
+ * Operate by calling the operator 'op' on each element from
+ * largest towards smaller elements.
+ *
+ * Destroy elements by calling the destructor 'destr'. Elements
+ * are destroyed when not needed by the tree structure anymore.
+ * Note that elements are often destroyed in another order
+ * than the order that the elements are operated on.
+ *
+ * 'arg' is passed as argument to 'op' and 'destroy'.
+ *
+ * - int <ERTS_RBT_PREFIX>_rbt_foreach_small_destroy_yielding(
+ * ERTS_RBT_T **tree,
+ * void (*op)(ERTS_RBT_T *, void *),
+ * void (*destr)(ERTS_RBT_T *, void *),
+ * void *arg,
+ * <ERTS_RBT_PREFIX>_rbt_yield_state_t *ystate,
+ * Sint ylimit);
+ * Operate by calling the operator 'op' on each element from
+ * smallest towards larger elements.
+ *
+ * Destroy elements by calling the destructor 'destr'. Elements
+ * are destroyed when not needed by the tree structure anymore.
+ * Note that elements are often destroyed in another order
+ * than the order that the elements are operated on.
+ *
+ * Yield when 'ylimit' elements has been processed. True is
+ * returned when yielding, and false is returned when
+ * the whole tree has been processed. The tree should not be
+ * modified until all of it has been processed.
+ *
+ * 'arg' is passed as argument to 'op' and 'destroy'.
+ *
+ * - int <ERTS_RBT_PREFIX>_rbt_foreach_large_destroy_yielding(
+ * ERTS_RBT_T **tree,
+ * void (*op)(ERTS_RBT_T *, void *),
+ * void (*destr)(ERTS_RBT_T *, void *),
+ * void *arg,
+ * <ERTS_RBT_PREFIX>_rbt_yield_state_t *ystate,
+ * Sint ylimit);
+ * Operate by calling the operator 'op' on each element from
+ * largest towards smaller elements.
+ *
+ * Destroy elements by calling the destructor 'destr'. Elements
+ * are destroyed when not needed by the tree structure anymore.
+ * Note that elements are often destroyed in another order
+ * than the order that the elements are operated on.
+ *
+ * Yield when 'ylimit' elements has been processed. True is
+ * returned when yielding, and false is returned when
+ * the whole tree has been processed. The tree should not be
+ * modified until all of it has been processed.
+ *
+ * 'arg' is passed as argument to 'op' and 'destroy'.
+ *
+ * - void <ERTS_RBT_PREFIX>_rbt_debug_print(
+ * FILE *filep,
+ * ERTS_RBT_T *x,
+ * int indent,
+ * (void)(*print_node)(ERTS_RBT_T *));
+ * Prints the tree. Note that this function is recursive.
+ * Should only be used for debuging.
+ */
+
+
+/*
+ * Check that we have all mandatory defines
+ */
+#ifndef ERTS_RBT_PREFIX
+# error Missing definition of ERTS_RBT_PREFIX
+#endif
+#ifndef ERTS_RBT_T
+# error Missing definition of ERTS_RBT_T
+#endif
+#ifndef ERTS_RBT_KEY_T
+# error Missing definition of ERTS_RBT_KEY_T
+#endif
+#ifndef ERTS_RBT_FLAGS_T
+# error Missing definition of ERTS_RBT_FLAGS_T
+#endif
+#ifndef ERTS_RBT_INIT_EMPTY_TNODE
+# error Missing definition of ERTS_RBT_INIT_EMPTY_TNODE
+#endif
+#ifndef ERTS_RBT_IS_RED
+# error Missing definition of ERTS_RBT_IS_RED
+#endif
+#ifndef ERTS_RBT_SET_RED
+# error Missing definition of ERTS_RBT_SET_RED
+#endif
+#ifndef ERTS_RBT_IS_BLACK
+# error Missing definition of ERTS_RBT_IS_BLACK
+#endif
+#ifndef ERTS_RBT_SET_BLACK
+# error Missing definition of ERTS_RBT_SET_BLACK
+#endif
+#ifndef ERTS_RBT_GET_FLAGS
+# error Missing definition of ERTS_RBT_GET_FLAGS
+#endif
+#ifndef ERTS_RBT_SET_FLAGS
+# error Missing definition of ERTS_RBT_SET_FLAGS
+#endif
+#ifndef ERTS_RBT_GET_PARENT
+# error Missing definition of ERTS_RBT_GET_PARENT
+#endif
+#ifndef ERTS_RBT_SET_PARENT
+# error Missing definition of ERTS_RBT_SET_PARENT
+#endif
+#ifndef ERTS_RBT_GET_RIGHT
+# error Missing definition of ERTS_RBT_GET_RIGHT
+#endif
+#ifndef ERTS_RBT_GET_LEFT
+# error Missing definition of ERTS_RBT_GET_LEFT
+#endif
+#ifndef ERTS_RBT_IS_LT
+# error Missing definition of ERTS_RBT_IS_LT
+#endif
+#ifndef ERTS_RBT_GET_KEY
+# error Missing definition of ERTS_RBT_GET_KEY
+#endif
+#ifndef ERTS_RBT_IS_EQ
+# error Missing definition of ERTS_RBT_IS_EQ
+#endif
+
+#if defined(ERTS_RBT_HARD_DEBUG) || defined(DEBUG)
+# ifndef ERTS_RBT_DEBUG
+# define ERTS_RBT_DEBUG 1
+# endif
+#endif
+
+#if defined(ERTS_RBT_HARD_DEBUG) && defined(__GNUC__)
+#warning "* * * * * * * * * * * * * * * * * *"
+#warning "* ERTS_RBT_HARD_DEBUG IS ENABLED! *"
+#warning "* * * * * * * * * * * * * * * * * *"
+#endif
+
+#undef ERTS_RBT_ASSERT
+#if defined(ERTS_RBT_DEBUG)
+#define ERTS_RBT_ASSERT(E) ERTS_ASSERT(E)
+#else
+#define ERTS_RBT_ASSERT(E) ((void) 1)
+#endif
+
+#undef ERTS_RBT_API_INLINE__
+#if defined(ERTS_RBT_NO_API_INLINE) || defined(ERTS_RBT_DEBUG)
+# define ERTS_RBT_API_INLINE__
+#else
+# define ERTS_RBT_API_INLINE__ ERTS_INLINE
+#endif
+
+#ifndef ERTS_RBT_YIELD_STAT_INITER
+# define ERTS_RBT_YIELD_STAT_INITER {NULL, 0}
+#endif
+#ifndef ERTS_RBT_YIELD_STAT_INIT
+# define ERTS_RBT_YIELD_STAT_INIT(YS) \
+ do { \
+ (YS)->x = NULL; \
+ (YS)->up = 0; \
+ } while (0)
+#endif
+
+#define ERTS_RBT_CONCAT_MACRO_VALUES___(X, Y) \
+ X ## Y
+#define ERTS_RBT_CONCAT_MACRO_VALUES__(X, Y) \
+ ERTS_RBT_CONCAT_MACRO_VALUES___(X, Y)
+
+#undef ERTS_RBT_YIELD_STATE_T__
+#define ERTS_RBT_YIELD_STATE_T__ \
+ ERTS_RBT_CONCAT_MACRO_VALUES__(ERTS_RBT_PREFIX, _rbt_yield_state_t)
+
+typedef struct {
+ ERTS_RBT_T *x;
+ int up;
+} ERTS_RBT_YIELD_STATE_T__;
+
+#define ERTS_RBT_FUNC__(Name) \
+ ERTS_RBT_CONCAT_MACRO_VALUES__(ERTS_RBT_PREFIX, _rbt_ ## Name)
+
+#undef ERTS_RBT_NEED_REPLACE__
+#undef ERTS_RBT_NEED_INSERT__
+#undef ERTS_RBT_NEED_ROTATE__
+#undef ERTS_RBT_NEED_FOREACH_UNORDERED__
+#undef ERTS_RBT_NEED_FOREACH_ORDERED__
+#undef ERTS_RBT_NEED_HDBG_CHECK_TREE__
+#undef ERTS_RBT_HDBG_CHECK_TREE__
+
+#if defined(ERTS_RBT_WANT_REPLACE) || defined(ERTS_RBT_WANT_DELETE)
+# define ERTS_RBT_NEED_REPLACE__
+#endif
+#if defined(ERTS_RBT_WANT_INSERT) || defined(ERTS_RBT_WANT_LOOKUP_INSERT)
+# define ERTS_RBT_NEED_INSERT__
+#endif
+#if defined(ERTS_RBT_WANT_DELETE) || defined(ERTS_RBT_NEED_INSERT__)
+# define ERTS_RBT_NEED_ROTATE__
+#endif
+#if defined(ERTS_RBT_WANT_FOREACH) \
+ || defined(ERTS_RBT_WANT_FOREACH_YIELDING) \
+ || defined(ERTS_RBT_WANT_FOREACH_DESTROY) \
+ || defined(ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING)
+# define ERTS_RBT_NEED_FOREACH_UNORDERED__
+#endif
+#if defined(ERTS_RBT_WANT_FOREACH_SMALL) \
+ || defined(ERTS_RBT_WANT_FOREACH_LARGE) \
+ || defined(ERTS_RBT_WANT_FOREACH_SMALL_YIELDING) \
+ || defined(ERTS_RBT_WANT_FOREACH_LARGE_YIELDING) \
+ || defined(ERTS_RBT_WANT_FOREACH_SMALL_DESTROY) \
+ || defined(ERTS_RBT_WANT_FOREACH_LARGE_DESTROY) \
+ || defined(ERTS_RBT_WANT_FOREACH_SMALL_DESTROY_YIELDING) \
+ || defined(ERTS_RBT_WANT_FOREACH_LARGE_DESTROY_YIELDING)
+# define ERTS_RBT_NEED_FOREACH_ORDERED__
+#endif
+#if defined(ERTS_RBT_HARD_DEBUG) \
+ && (defined(ERTS_RBT_WANT_DELETE) \
+ || defined(ERTS_RBT_NEED_INSERT__))
+static void ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root, ERTS_RBT_T *node);
+# define ERTS_RBT_NEED_HDBG_CHECK_TREE__
+# define ERTS_RBT_HDBG_CHECK_TREE__(R,N) \
+ ERTS_RBT_FUNC__(hdbg_check_tree)((R),(N))
+#else
+# define ERTS_RBT_HDBG_CHECK_TREE__(R,N) ((void) 1)
+#endif
+
+#ifdef ERTS_RBT_NEED_ROTATE__
+
+static ERTS_INLINE void
+ERTS_RBT_FUNC__(left_rotate__)(ERTS_RBT_T **root, ERTS_RBT_T *x)
+{
+ ERTS_RBT_T *y, *l, *p;
+
+ y = ERTS_RBT_GET_RIGHT(x);
+ l = ERTS_RBT_GET_LEFT(y);
+ ERTS_RBT_SET_RIGHT(x, l);
+
+ if (l)
+ ERTS_RBT_SET_PARENT(l, x);
+
+ p = ERTS_RBT_GET_PARENT(x);
+ ERTS_RBT_SET_PARENT(y, p);
+
+ if (!p) {
+ ERTS_RBT_ASSERT(*root == x);
+ *root = y;
+#ifdef ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT
+ ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT(x, y);
+#endif
+ }
+ else if (x == ERTS_RBT_GET_LEFT(p))
+ ERTS_RBT_SET_LEFT(p, y);
+ else {
+ ERTS_RBT_ASSERT(x == ERTS_RBT_GET_RIGHT(p));
+ ERTS_RBT_SET_RIGHT(p, y);
+ }
+ ERTS_RBT_SET_LEFT(y, x);
+ ERTS_RBT_SET_PARENT(x, y);
+
+#ifdef ERTS_RBT_UPDATE_ATTACHED_DATA_ROTATE
+ ERTS_RBT_UPDATE_ATTACHED_DATA_ROTATE(!0, x, y);
+#endif
+
+}
+
+static ERTS_INLINE void
+ERTS_RBT_FUNC__(right_rotate__)(ERTS_RBT_T **root, ERTS_RBT_T *x)
+{
+ ERTS_RBT_T *y, *r, *p;
+
+ y = ERTS_RBT_GET_LEFT(x);
+ r = ERTS_RBT_GET_RIGHT(y);
+ ERTS_RBT_SET_LEFT(x, r);
+
+ if (r)
+ ERTS_RBT_SET_PARENT(r, x);
+
+ p = ERTS_RBT_GET_PARENT(x);
+ ERTS_RBT_SET_PARENT(y, p);
+
+ if (!p) {
+ ERTS_RBT_ASSERT(*root == x);
+ *root = y;
+#ifdef ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT
+ ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT(x, y);
+#endif
+ }
+ else if (x == ERTS_RBT_GET_RIGHT(p))
+ ERTS_RBT_SET_RIGHT(p, y);
+ else {
+ ERTS_RBT_ASSERT(x == ERTS_RBT_GET_LEFT(p));
+ ERTS_RBT_SET_LEFT(p, y);
+ }
+
+ ERTS_RBT_SET_RIGHT(y, x);
+ ERTS_RBT_SET_PARENT(x, y);
+
+#ifdef ERTS_RBT_UPDATE_ATTACHED_DATA_ROTATE
+ ERTS_RBT_UPDATE_ATTACHED_DATA_ROTATE(0, x, y);
+#endif
+
+}
+
+#endif /* ERTS_RBT_NEED_ROTATE__ */
+
+#ifdef ERTS_RBT_NEED_REPLACE__
+
+/*
+ * Replace node x with node y
+ */
+static ERTS_INLINE void
+ERTS_RBT_FUNC__(replace__)(ERTS_RBT_T **root, ERTS_RBT_T *x, ERTS_RBT_T *y)
+{
+ ERTS_RBT_T *p, *r, *l;
+ ERTS_RBT_FLAGS_T f;
+
+ p = ERTS_RBT_GET_PARENT(x);
+ if (!p) {
+ ERTS_RBT_ASSERT(*root == x);
+ *root = y;
+#ifdef ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT
+ ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT(x, y);
+#endif
+ }
+ else if (x == ERTS_RBT_GET_LEFT(p))
+ ERTS_RBT_SET_LEFT(p, y);
+ else {
+ ERTS_RBT_ASSERT(x == ERTS_RBT_GET_RIGHT(p));
+ ERTS_RBT_SET_RIGHT(p, y);
+ }
+ l = ERTS_RBT_GET_LEFT(x);
+ if (l) {
+ ERTS_RBT_ASSERT(ERTS_RBT_GET_PARENT(l) == x);
+ ERTS_RBT_SET_PARENT(l, y);
+ }
+ r = ERTS_RBT_GET_RIGHT(x);
+ if (r) {
+ ERTS_RBT_ASSERT(ERTS_RBT_GET_PARENT(r) == x);
+ ERTS_RBT_SET_PARENT(r, y);
+ }
+
+ f = ERTS_RBT_GET_FLAGS(x);
+ ERTS_RBT_SET_FLAGS(y, f);
+ ERTS_RBT_SET_PARENT(y, p);
+ ERTS_RBT_SET_RIGHT(y, r);
+ ERTS_RBT_SET_LEFT(y, l);
+}
+
+#endif /* ERTS_RBT_NEED_REPLACE__ */
+
+#ifdef ERTS_RBT_WANT_REPLACE
+
+static ERTS_RBT_API_INLINE__ void
+ERTS_RBT_FUNC__(replace)(ERTS_RBT_T **root, ERTS_RBT_T *x, ERTS_RBT_T *y)
+{
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_EQ(ERTS_RBT_GET_KEY(x),
+ ERTS_RBT_GET_KEY(y)));
+
+ ERTS_RBT_FUNC__(replace__)(root, x, y);
+}
+
+#endif /* ERTS_RBT_WANT_REPLACE */
+
+#ifdef ERTS_RBT_WANT_DELETE
+
+/*
+ * Delete a node.
+ */
+static ERTS_RBT_API_INLINE__ void
+ERTS_RBT_FUNC__(delete)(ERTS_RBT_T **root, ERTS_RBT_T *n)
+{
+ int spliced_is_black;
+ ERTS_RBT_T *p, *x, *y, *z = n;
+ ERTS_RBT_T null_x; /* null_x is used to get the fixup started when we
+ splice out a node without children. */
+
+ ERTS_RBT_HDBG_CHECK_TREE__(*root, n);
+
+ ERTS_RBT_INIT_EMPTY_TNODE(&null_x);
+
+ /* Remove node from tree... */
+
+ /* Find node to splice out */
+ if (!ERTS_RBT_GET_LEFT(z) || !ERTS_RBT_GET_RIGHT(z))
+ y = z;
+ else {
+ /* Set y to z:s successor */
+ y = ERTS_RBT_GET_RIGHT(z);
+ while (1) {
+ ERTS_RBT_T *t = ERTS_RBT_GET_LEFT(y);
+ if (!t)
+ break;
+ y = t;
+ }
+ }
+ /* splice out y */
+ x = ERTS_RBT_GET_LEFT(y);
+ if (!x)
+ x = ERTS_RBT_GET_RIGHT(y);
+ spliced_is_black = ERTS_RBT_IS_BLACK(y);
+ p = ERTS_RBT_GET_PARENT(y);
+ if (x)
+ ERTS_RBT_SET_PARENT(x, p);
+ else if (spliced_is_black) {
+ x = &null_x;
+ ERTS_RBT_SET_BLACK(x);
+ ERTS_RBT_SET_PARENT(x, p);
+ ERTS_RBT_SET_LEFT(y, x);
+ }
+
+ if (!p) {
+ ERTS_RBT_ASSERT(*root == y);
+ *root = x;
+#ifdef ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT
+ ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT(y, x);
+#endif
+ }
+ else {
+ if (y == ERTS_RBT_GET_LEFT(p))
+ ERTS_RBT_SET_LEFT(p, x);
+ else {
+ ERTS_RBT_ASSERT(y == ERTS_RBT_GET_RIGHT(p));
+ ERTS_RBT_SET_RIGHT(p, x);
+ }
+#ifdef ERTS_RBT_UPDATE_ATTACHED_DATA_DMOD
+ if (p != z)
+ ERTS_RBT_UPDATE_ATTACHED_DATA_DMOD(p, y == z ? NULL : z);
+#endif
+ }
+ if (y != z) {
+ /* We spliced out the successor of z; replace z by the successor */
+ ERTS_RBT_FUNC__(replace__)(root, z, y);
+#ifdef ERTS_RBT_UPDATE_ATTACHED_DATA_DMOD
+ ERTS_RBT_UPDATE_ATTACHED_DATA_DMOD(y, NULL);
+#endif
+ }
+
+ if (spliced_is_black) {
+ /* We removed a black node which makes the resulting tree
+ violate the Red-Black Tree properties. Fixup tree... */
+
+ p = ERTS_RBT_GET_PARENT(x);
+ while (ERTS_RBT_IS_BLACK(x) && p) {
+ ERTS_RBT_T *r, *l;
+
+ /*
+ * x has an "extra black" which we move up the tree
+ * until we reach the root or until we can get rid of it.
+ *
+ * y is the sibbling of x, and p is their parent
+ */
+
+ if (x == ERTS_RBT_GET_LEFT(p)) {
+ y = ERTS_RBT_GET_RIGHT(p);
+
+ ERTS_RBT_ASSERT(y);
+
+ if (ERTS_RBT_IS_RED(y)) {
+ ERTS_RBT_ASSERT(ERTS_RBT_GET_RIGHT(y));
+ ERTS_RBT_ASSERT(ERTS_RBT_GET_LEFT(y));
+
+ ERTS_RBT_SET_BLACK(y);
+
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_BLACK(p));
+
+ ERTS_RBT_SET_RED(p);
+ ERTS_RBT_FUNC__(left_rotate__)(root, p);
+ p = ERTS_RBT_GET_PARENT(x);
+ y = ERTS_RBT_GET_RIGHT(p);
+ }
+
+ ERTS_RBT_ASSERT(y);
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_BLACK(y));
+
+ l = ERTS_RBT_GET_LEFT(y);
+ r = ERTS_RBT_GET_RIGHT(y);
+ if ((!l || ERTS_RBT_IS_BLACK(l))
+ && (!r || ERTS_RBT_IS_BLACK(r))) {
+ ERTS_RBT_SET_RED(y);
+ x = p;
+ p = ERTS_RBT_GET_PARENT(x);
+ }
+ else {
+ if (!r || ERTS_RBT_IS_BLACK(r)) {
+ ERTS_RBT_SET_BLACK(l);
+ ERTS_RBT_SET_RED(y);
+ ERTS_RBT_FUNC__(right_rotate__)(root, y);
+ p = ERTS_RBT_GET_PARENT(x);
+ y = ERTS_RBT_GET_RIGHT(p);
+ }
+
+ ERTS_RBT_ASSERT(y);
+
+ if (p && ERTS_RBT_IS_RED(p)) {
+
+ ERTS_RBT_SET_BLACK(p);
+ ERTS_RBT_SET_RED(y);
+ }
+
+ ERTS_RBT_ASSERT(ERTS_RBT_GET_RIGHT(y));
+
+ ERTS_RBT_SET_BLACK(ERTS_RBT_GET_RIGHT(y));
+ ERTS_RBT_FUNC__(left_rotate__)(root, p);
+ x = *root;
+ break;
+ }
+ }
+ else {
+ ERTS_RBT_ASSERT(x == ERTS_RBT_GET_RIGHT(p));
+
+ y = ERTS_RBT_GET_LEFT(p);
+
+ ERTS_RBT_ASSERT(y);
+
+ if (ERTS_RBT_IS_RED(y)) {
+ ERTS_RBT_ASSERT(ERTS_RBT_GET_RIGHT(y));
+ ERTS_RBT_ASSERT(ERTS_RBT_GET_LEFT(y));
+
+ ERTS_RBT_SET_BLACK(y);
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_BLACK(p));
+ ERTS_RBT_SET_RED(p);
+ ERTS_RBT_FUNC__(right_rotate__)(root, p);
+
+ p = ERTS_RBT_GET_PARENT(x);
+ y = ERTS_RBT_GET_LEFT(p);
+ }
+
+ ERTS_RBT_ASSERT(y);
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_BLACK(y));
+
+ l = ERTS_RBT_GET_LEFT(y);
+ r = ERTS_RBT_GET_RIGHT(y);
+
+ if ((!r || ERTS_RBT_IS_BLACK(r))
+ && (!l || ERTS_RBT_IS_BLACK(l))) {
+ ERTS_RBT_SET_RED(y);
+ x = p;
+ p = ERTS_RBT_GET_PARENT(x);
+ }
+ else {
+ if (!l || ERTS_RBT_IS_BLACK(l)) {
+ ERTS_RBT_SET_BLACK(r);
+ ERTS_RBT_SET_RED(y);
+ ERTS_RBT_FUNC__(left_rotate__)(root, y);
+
+ p = ERTS_RBT_GET_PARENT(x);
+ y = ERTS_RBT_GET_LEFT(p);
+ }
+
+ ERTS_RBT_ASSERT(y);
+
+ if (p && ERTS_RBT_IS_RED(p)) {
+ ERTS_RBT_SET_BLACK(p);
+ ERTS_RBT_SET_RED(y);
+ }
+
+ ERTS_RBT_ASSERT(ERTS_RBT_GET_LEFT(y));
+
+ ERTS_RBT_SET_BLACK(ERTS_RBT_GET_LEFT(y));
+ ERTS_RBT_FUNC__(right_rotate__)(root, p);
+ x = *root;
+ break;
+ }
+ }
+ }
+
+ ERTS_RBT_SET_BLACK(x);
+
+ x = &null_x;
+ p = ERTS_RBT_GET_PARENT(x);
+
+ if (p) {
+ if (ERTS_RBT_GET_LEFT(p) == x)
+ ERTS_RBT_SET_LEFT(p, NULL);
+ else {
+ ERTS_RBT_ASSERT(ERTS_RBT_GET_RIGHT(p) == x);
+ ERTS_RBT_SET_RIGHT(p, NULL);
+ }
+
+ ERTS_RBT_ASSERT(!ERTS_RBT_GET_LEFT(x));
+ ERTS_RBT_ASSERT(!ERTS_RBT_GET_RIGHT(x));
+ }
+ else if (*root == x) {
+ *root = NULL;
+
+#ifdef ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT
+ ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT(x, NULL);
+#endif
+
+ ERTS_RBT_ASSERT(!ERTS_RBT_GET_LEFT(x));
+ ERTS_RBT_ASSERT(!ERTS_RBT_GET_RIGHT(x));
+ }
+ }
+
+ ERTS_RBT_HDBG_CHECK_TREE__(*root, NULL);
+
+}
+
+#endif /* ERTS_RBT_WANT_DELETE */
+
+#ifdef ERTS_RBT_NEED_INSERT__
+
+static void
+ERTS_RBT_FUNC__(insert_fixup__)(ERTS_RBT_T **root, ERTS_RBT_T *n)
+{
+ ERTS_RBT_T *x, *y;
+
+ x = n;
+
+ /*
+ * Rearrange the tree so that it satisfies the Red-Black Tree properties
+ */
+
+ ERTS_RBT_ASSERT(x != *root && ERTS_RBT_IS_RED(ERTS_RBT_GET_PARENT(x)));
+ do {
+ ERTS_RBT_T *p, *pp;
+
+ /*
+ * x and its parent are both red. Move the red pair up the tree
+ * until we get to the root or until we can separate them.
+ */
+
+ p = ERTS_RBT_GET_PARENT(x);
+ pp = ERTS_RBT_GET_PARENT(p);
+
+ ERTS_RBT_ASSERT(p && pp);
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_RED(x));
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_BLACK(pp));
+
+ if (p == ERTS_RBT_GET_LEFT(pp)) {
+ y = ERTS_RBT_GET_RIGHT(pp);
+ if (y && ERTS_RBT_IS_RED(y)) {
+ ERTS_RBT_SET_BLACK(y);
+ ERTS_RBT_SET_BLACK(p);
+ ERTS_RBT_SET_RED(pp);
+ x = pp;
+ }
+ else {
+
+ if (x == ERTS_RBT_GET_RIGHT(p)) {
+ x = p;
+ ERTS_RBT_FUNC__(left_rotate__)(root, x);
+ p = ERTS_RBT_GET_PARENT(x);
+ pp = ERTS_RBT_GET_PARENT(p);
+
+ ERTS_RBT_ASSERT(p && pp);
+ }
+
+ ERTS_RBT_ASSERT(x == ERTS_RBT_GET_LEFT(ERTS_RBT_GET_LEFT(pp)));
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_RED(x));
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_RED(p));
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_BLACK(pp));
+ ERTS_RBT_ASSERT(!y || ERTS_RBT_IS_BLACK(y));
+
+
+ ERTS_RBT_SET_BLACK(p);
+ ERTS_RBT_SET_RED(pp);
+ ERTS_RBT_FUNC__(right_rotate__)(root, pp);
+
+
+ ERTS_RBT_ASSERT(ERTS_RBT_GET_LEFT(ERTS_RBT_GET_PARENT(x)) == x);
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_RED(x));
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_RED(
+ ERTS_RBT_GET_RIGHT(
+ ERTS_RBT_GET_PARENT(x))));
+ ERTS_RBT_ASSERT(!ERTS_RBT_GET_PARENT(x)
+ || ERTS_RBT_IS_BLACK(ERTS_RBT_GET_PARENT(x)));
+ break;
+ }
+ }
+ else {
+ ERTS_RBT_ASSERT(p == ERTS_RBT_GET_RIGHT(pp));
+
+ y = ERTS_RBT_GET_LEFT(pp);
+ if (y && ERTS_RBT_IS_RED(y)) {
+ ERTS_RBT_SET_BLACK(y);
+ ERTS_RBT_SET_BLACK(p);
+ ERTS_RBT_SET_RED(pp);
+ x = pp;
+ }
+ else {
+
+ if (x == ERTS_RBT_GET_LEFT(p)) {
+ x = p;
+ ERTS_RBT_FUNC__(right_rotate__)(root, x);
+ p = ERTS_RBT_GET_PARENT(x);
+ pp = ERTS_RBT_GET_PARENT(p);
+
+ ERTS_RBT_ASSERT(p && pp);
+ }
+
+ ERTS_RBT_ASSERT(x == ERTS_RBT_GET_RIGHT(ERTS_RBT_GET_RIGHT(pp)));
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_RED(x));
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_RED(p));
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_BLACK(pp));
+ ERTS_RBT_ASSERT(!y || ERTS_RBT_IS_BLACK(y));
+
+
+ ERTS_RBT_SET_BLACK(p);
+ ERTS_RBT_SET_RED(pp);
+ ERTS_RBT_FUNC__(left_rotate__)(root, pp);
+
+
+ ERTS_RBT_ASSERT(ERTS_RBT_GET_RIGHT(ERTS_RBT_GET_PARENT(x)) == x);
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_RED(x));
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_RED(
+ ERTS_RBT_GET_LEFT(
+ ERTS_RBT_GET_PARENT(x))));
+ ERTS_RBT_ASSERT(!ERTS_RBT_GET_PARENT(x)
+ || ERTS_RBT_IS_BLACK(ERTS_RBT_GET_PARENT(x)));
+ break;
+ }
+ }
+ } while (x != *root && ERTS_RBT_IS_RED(ERTS_RBT_GET_PARENT(x)));
+
+ ERTS_RBT_SET_BLACK(*root);
+
+}
+
+static ERTS_INLINE ERTS_RBT_T *
+ERTS_RBT_FUNC__(insert_aux__)(ERTS_RBT_T **root, ERTS_RBT_T *n, int lookup)
+{
+ ERTS_RBT_KEY_T kn = ERTS_RBT_GET_KEY(n);
+
+ ERTS_RBT_HDBG_CHECK_TREE__(*root, NULL);
+
+ ERTS_RBT_INIT_EMPTY_TNODE(n);
+
+ if (!*root) {
+ ERTS_RBT_SET_BLACK(n);
+ *root = n;
+#ifdef ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT
+ ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT(NULL, n);
+#endif
+ }
+ else {
+ ERTS_RBT_T *p, *x = *root;
+
+ while (1) {
+ ERTS_RBT_KEY_T kx;
+ ERTS_RBT_T *c;
+
+ kx = ERTS_RBT_GET_KEY(x);
+
+ if (lookup && ERTS_RBT_IS_EQ(kn, kx)) {
+
+ ERTS_RBT_HDBG_CHECK_TREE__(*root, NULL);
+
+ return x;
+ }
+
+ if (ERTS_RBT_IS_LT(kn, kx)) {
+ c = ERTS_RBT_GET_LEFT(x);
+ if (!c) {
+ ERTS_RBT_SET_PARENT(n, x);
+ ERTS_RBT_SET_LEFT(x, n);
+ p = x;
+ break;
+ }
+ }
+ else {
+ c = ERTS_RBT_GET_RIGHT(x);
+ if (!c) {
+ ERTS_RBT_SET_PARENT(n, x);
+ ERTS_RBT_SET_RIGHT(x, n);
+ p = x;
+ break;
+ }
+ }
+
+ x = c;
+ }
+
+ ERTS_RBT_ASSERT(p);
+
+ ERTS_RBT_SET_RED(n);
+ if (ERTS_RBT_IS_RED(p))
+ ERTS_RBT_FUNC__(insert_fixup__)(root, n);
+ }
+
+ ERTS_RBT_HDBG_CHECK_TREE__(*root, n);
+
+ return NULL;
+}
+
+#endif /* ERTS_RBT_NEED_INSERT__ */
+
+#ifdef ERTS_RBT_WANT_LOOKUP_INSERT
+
+static ERTS_RBT_API_INLINE__ ERTS_RBT_T *
+ERTS_RBT_FUNC__(lookup_insert)(ERTS_RBT_T **root, ERTS_RBT_T *n)
+{
+ return ERTS_RBT_FUNC__(insert_aux__)(root, n, !0);
+}
+
+#endif /* ERTS_RBT_WANT_LOOKUP_INSERT */
+
+#ifdef ERTS_RBT_WANT_INSERT
+
+static ERTS_RBT_API_INLINE__ void
+ERTS_RBT_FUNC__(insert)(ERTS_RBT_T **root, ERTS_RBT_T *n)
+{
+ (void) ERTS_RBT_FUNC__(insert_aux__)(root, n, 0);
+}
+
+#endif /* ERTS_RBT_WANT_INSERT */
+
+#ifdef ERTS_RBT_WANT_LOOKUP
+
+static ERTS_RBT_API_INLINE__ ERTS_RBT_T *
+ERTS_RBT_FUNC__(lookup)(ERTS_RBT_T *root, ERTS_RBT_KEY_T key)
+{
+ ERTS_RBT_T *x = root;
+
+ if (!x)
+ return NULL;
+
+ while (1) {
+ ERTS_RBT_KEY_T kx = ERTS_RBT_GET_KEY(x);
+ ERTS_RBT_T *c;
+
+ if (ERTS_RBT_IS_EQ(key, kx))
+ return x;
+
+ if (ERTS_RBT_IS_LT(key, kx)) {
+ c = ERTS_RBT_GET_LEFT(x);
+ if (!c)
+ return NULL;
+ }
+ else {
+ c = ERTS_RBT_GET_RIGHT(x);
+ if (!c)
+ return NULL;
+ }
+
+ x = c;
+ }
+}
+
+#endif /* ERTS_RBT_WANT_LOOKUP */
+
+#ifdef ERTS_RBT_WANT_SMALLEST
+
+static ERTS_RBT_API_INLINE__ ERTS_RBT_T *
+ERTS_RBT_FUNC__(smallest)(ERTS_RBT_T *root)
+{
+ ERTS_RBT_T *x = root;
+
+ if (!x)
+ return NULL;
+
+ while (1) {
+ ERTS_RBT_T *c = ERTS_RBT_GET_LEFT(x);
+ if (!c)
+ break;
+ x = c;
+ }
+
+ return x;
+}
+
+#endif /* ERTS_RBT_WANT_SMALLEST */
+
+#ifdef ERTS_RBT_WANT_LARGEST
+
+static ERTS_RBT_API_INLINE__ ERTS_RBT_T *
+ERTS_RBT_FUNC__(largest)(ERTS_RBT_T *root)
+{
+ ERTS_RBT_T *x = root;
+
+ if (!x)
+ return NULL;
+
+ while (1) {
+ ERTS_RBT_T *c = ERTS_RBT_GET_RIGHT(x);
+ if (!c)
+ break;
+ x = c;
+ }
+
+ return x;
+}
+
+#endif /* ERTS_RBT_WANT_LARGEST */
+
+#ifdef ERTS_RBT_NEED_FOREACH_UNORDERED__
+
+static ERTS_INLINE int
+ERTS_RBT_FUNC__(foreach_unordered__)(ERTS_RBT_T **root,
+ int destroying,
+ void (*op)(ERTS_RBT_T *, void *),
+ void *arg,
+ int yielding,
+ ERTS_RBT_YIELD_STATE_T__ *ystate,
+ Sint ylimit)
+{
+ ERTS_RBT_T *c, *p, *x;
+
+ ERTS_RBT_ASSERT(!yielding || ystate);
+
+ if (yielding && ystate->x) {
+ x = ystate->x;
+ ERTS_RBT_ASSERT(ystate->up);
+ goto restart_up;
+ }
+ else {
+ x = *root;
+ if (!x)
+ return 0;
+ if (destroying)
+ *root = NULL;
+ }
+
+ while (1) {
+
+ while (1) {
+
+ while (1) {
+ c = ERTS_RBT_GET_LEFT(x);
+ if (!c)
+ break;
+ x = c;
+ }
+
+ c = ERTS_RBT_GET_RIGHT(x);
+ if (!c)
+ break;
+ x = c;
+ }
+
+ while (1) {
+#ifdef ERTS_RBT_DEBUG
+ int cdir;
+#endif
+ if (yielding && ylimit-- <= 0) {
+ ystate->x = x;
+ ystate->up = 1;
+ return 1;
+ }
+
+ restart_up:
+
+ p = ERTS_RBT_GET_PARENT(x);
+
+#ifdef ERTS_RBT_DEBUG
+ ERTS_RBT_ASSERT(!destroying || !ERTS_RBT_GET_LEFT(x));
+ ERTS_RBT_ASSERT(!destroying || !ERTS_RBT_GET_RIGHT(x));
+
+ if (p) {
+ if (x == ERTS_RBT_GET_LEFT(p)) {
+ cdir = -1;
+ if (destroying)
+ ERTS_RBT_SET_LEFT(p, NULL);
+ }
+ else {
+ ERTS_RBT_ASSERT(x == ERTS_RBT_GET_RIGHT(p));
+ cdir = 1;
+ if (destroying)
+ ERTS_RBT_SET_RIGHT(p, NULL);
+ }
+ }
+#endif
+
+ (*op)(x, arg);
+
+ if (!p) {
+ if (yielding) {
+ ystate->x = NULL;
+ ystate->up = 0;
+ }
+ return 0; /* Done */
+ }
+
+ c = ERTS_RBT_GET_RIGHT(p);
+ if (c && c != x) {
+ ERTS_RBT_ASSERT(cdir < 0);
+
+ /* Go down tree of x's sibling... */
+ x = c;
+ break;
+ }
+
+ x = p;
+ }
+ }
+}
+
+#endif /* ERTS_RBT_NEED_FOREACH_UNORDERED__ */
+
+#ifdef ERTS_RBT_NEED_FOREACH_ORDERED__
+
+static ERTS_INLINE int
+ERTS_RBT_FUNC__(foreach_ordered__)(ERTS_RBT_T **root,
+ int from_small,
+ int destroying,
+ void (*op)(ERTS_RBT_T *, void *),
+ void (*destroy)(ERTS_RBT_T *, void *),
+ void *arg,
+ int yielding,
+ ERTS_RBT_YIELD_STATE_T__ *ystate,
+ Sint ylimit)
+{
+ ERTS_RBT_T *c, *p, *x;
+
+ ERTS_RBT_ASSERT(!yielding || ystate);
+ ERTS_RBT_ASSERT(!destroying || destroy);
+
+ if (yielding && ystate->x) {
+ x = ystate->x;
+ if (ystate->up)
+ goto restart_up;
+ else
+ goto restart_down;
+ }
+ else {
+ x = *root;
+ if (!x)
+ return 0;
+ if (destroying)
+ *root = NULL;
+ }
+
+ while (1) {
+
+ while (1) {
+
+ while (1) {
+ c = from_small ? ERTS_RBT_GET_LEFT(x) : ERTS_RBT_GET_RIGHT(x);
+ if (!c)
+ break;
+ x = c;
+ }
+
+ (*op)(x, arg);
+
+ if (yielding && --ylimit <= 0) {
+ ystate->x = x;
+ ystate->up = 0;
+ return 1;
+ }
+
+ restart_down:
+
+ c = from_small ? ERTS_RBT_GET_RIGHT(x) : ERTS_RBT_GET_LEFT(x);
+ if (!c)
+ break;
+ x = c;
+ }
+
+ while (1) {
+ p = ERTS_RBT_GET_PARENT(x);
+
+ if (p) {
+
+ c = from_small ? ERTS_RBT_GET_RIGHT(p) : ERTS_RBT_GET_LEFT(p);
+ if (!c || c != x) {
+ ERTS_RBT_ASSERT((from_small
+ ? ERTS_RBT_GET_LEFT(p)
+ : ERTS_RBT_GET_RIGHT(p)) == x);
+
+ (*op)(p, arg);
+
+ if (yielding && --ylimit <= 0) {
+ ystate->x = x;
+ ystate->up = 1;
+ return 1;
+ restart_up:
+ p = ERTS_RBT_GET_PARENT(x);
+ }
+ }
+
+ if (c && c != x) {
+ ERTS_RBT_ASSERT((from_small
+ ? ERTS_RBT_GET_LEFT(p)
+ : ERTS_RBT_GET_RIGHT(p)) == x);
+
+ /* Go down tree of x's sibling... */
+ x = c;
+ break;
+ }
+ }
+
+ if (destroying) {
+
+#ifdef ERTS_RBT_DEBUG
+ ERTS_RBT_ASSERT(!ERTS_RBT_GET_LEFT(x)
+ && !ERTS_RBT_GET_RIGHT(x));
+
+ if (p) {
+ if (x == ERTS_RBT_GET_LEFT(p))
+ ERTS_RBT_SET_LEFT(p, NULL);
+ else {
+ ERTS_RBT_ASSERT(x == ERTS_RBT_GET_RIGHT(p));
+ ERTS_RBT_SET_RIGHT(p, NULL);
+ }
+ }
+#endif
+
+ (*destroy)(x, arg);
+ }
+
+ if (!p) {
+ if (yielding) {
+ ystate->x = NULL;
+ ystate->up = 0;
+ }
+ return 0; /* Done */
+ }
+ x = p;
+ }
+ }
+}
+
+#endif /* ERTS_RBT_NEED_FOREACH_ORDERED__ */
+
+#ifdef ERTS_RBT_WANT_FOREACH
+
+static ERTS_RBT_API_INLINE__ void
+ERTS_RBT_FUNC__(foreach)(ERTS_RBT_T *root,
+ void (*op)(ERTS_RBT_T *, void *),
+ void *arg)
+{
+ (void) ERTS_RBT_FUNC__(foreach_unordered__)(&root, 0, op, arg,
+ 0, NULL, 0);
+}
+
+#endif /* ERTS_RBT_WANT_FOREACH */
+
+#ifdef ERTS_RBT_WANT_FOREACH_SMALL
+
+static ERTS_RBT_API_INLINE__ void
+ERTS_RBT_FUNC__(foreach_small)(ERTS_RBT_T *root,
+ void (*op)(ERTS_RBT_T *, void *),
+ void *arg)
+{
+ (void) ERTS_RBT_FUNC__(foreach_ordered__)(&root, 1, 0,
+ op, NULL, arg,
+ 0, NULL, 0);
+}
+
+#endif /* ERTS_RBT_WANT_FOREACH_SMALL */
+
+#ifdef ERTS_RBT_WANT_FOREACH_LARGE
+
+static ERTS_RBT_API_INLINE__ void
+ERTS_RBT_FUNC__(foreach_large)(ERTS_RBT_T *root,
+ void (*op)(ERTS_RBT_T *, void *),
+ void *arg)
+{
+ (void) ERTS_RBT_FUNC__(foreach_ordered__)(&root, 0, 0,
+ op, NULL, arg,
+ 0, NULL, 0);
+}
+
+#endif /* ERTS_RBT_WANT_FOREACH_LARGE */
+
+#ifdef ERTS_RBT_WANT_FOREACH_YIELDING
+
+static ERTS_RBT_API_INLINE__ void
+ERTS_RBT_FUNC__(foreach_yielding)(ERTS_RBT_T *root,
+ void (*op)(ERTS_RBT_T *, void *),
+ void *arg,
+ ERTS_RBT_YIELD_STATE_T__ *ystate,
+ Sint ylimit)
+{
+ (void) ERTS_RBT_FUNC__(foreach_unordered__)(*root, 0, op, arg,
+ 1, ystate, ylimit);
+}
+
+#endif /* ERTS_RBT_WANT_FOREACH_YIELDING */
+
+#ifdef ERTS_RBT_WANT_FOREACH_SMALL_YIELDING
+
+static ERTS_RBT_API_INLINE__ int
+ERTS_RBT_FUNC__(foreach_small_yielding)(ERTS_RBT_T *root,
+ void (*op)(ERTS_RBT_T *, void *),
+ void *arg,
+ ERTS_RBT_YIELD_STATE_T__ *ystate,
+ Sint ylimit)
+{
+ return ERTS_RBT_FUNC__(foreach_ordered__)(&root, 1, 0,
+ op, NULL, arg,
+ 1, ystate, ylimit);
+}
+
+#endif /* ERTS_RBT_WANT_FOREACH_SMALL_YIELDING */
+
+#ifdef ERTS_RBT_WANT_FOREACH_LARGE_YIELDING
+
+static ERTS_RBT_API_INLINE__ int
+ERTS_RBT_FUNC__(foreach_large_yielding)(ERTS_RBT_T *root,
+ void (*op)(ERTS_RBT_T *, void *),
+ void *arg,
+ ERTS_RBT_YIELD_STATE_T__ *ystate,
+ Sint ylimit)
+{
+ return ERTS_RBT_FUNC__(foreach_ordered__)(&root, 0, 0,
+ op, NULL, arg,
+ 1, ystate, ylimit);
+}
+
+#endif /* ERTS_RBT_WANT_FOREACH_LARGE_YIELDING */
+
+#ifdef ERTS_RBT_WANT_FOREACH_DESTROY
+
+static ERTS_RBT_API_INLINE__ void
+ERTS_RBT_FUNC__(foreach_destroy)(ERTS_RBT_T **root,
+ void (*op)(ERTS_RBT_T *, void *),
+ void *arg)
+{
+ (void) ERTS_RBT_FUNC__(foreach_unordered__)(root, 1, op, arg,
+ 0, NULL, 0);
+}
+
+#endif /* ERTS_RBT_WANT_FOREACH_DESTROY */
+
+#ifdef ERTS_RBT_WANT_FOREACH_SMALL_DESTROY
+
+static ERTS_RBT_API_INLINE__ void
+ERTS_RBT_FUNC__(foreach_small_destroy)(ERTS_RBT_T **root,
+ void (*op)(ERTS_RBT_T *, void *),
+ void (*destr)(ERTS_RBT_T *, void *),
+ void *arg)
+{
+ (void) ERTS_RBT_FUNC__(foreach_ordered__)(root, 1, 1,
+ op, destr, arg,
+ 0, NULL, 0);
+}
+
+#endif /* ERTS_RBT_WANT_FOREACH_SMALL_DESTROY */
+
+#ifdef ERTS_RBT_WANT_FOREACH_LARGE_DESTROY
+
+static ERTS_RBT_API_INLINE__ void
+ERTS_RBT_FUNC__(foreach_large_destroy)(ERTS_RBT_T **root,
+ void (*op)(ERTS_RBT_T *, void *),
+ void (*destr)(ERTS_RBT_T *, void *),
+ void *arg)
+{
+ (void) ERTS_RBT_FUNC__(foreach_ordered__)(root, 0, 1,
+ op, destr, arg,
+ 0, NULL, 0);
+}
+
+#endif /* ERTS_RBT_WANT_FOREACH_LARGE_DESTROY */
+
+#ifdef ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING
+
+static ERTS_RBT_API_INLINE__ int
+ERTS_RBT_FUNC__(foreach_destroy_yielding)(ERTS_RBT_T **root,
+ void (*op)(ERTS_RBT_T *, void *),
+ void *arg,
+ ERTS_RBT_YIELD_STATE_T__ *ystate,
+ Sint ylimit)
+{
+ return ERTS_RBT_FUNC__(foreach_unordered__)(root, 1, op, arg,
+ 1, ystate, ylimit);
+}
+
+#endif /* ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING */
+
+#ifdef ERTS_RBT_WANT_FOREACH_SMALL_DESTROY_YIELDING
+
+static ERTS_RBT_API_INLINE__ int
+ERTS_RBT_FUNC__(foreach_small_destroy_yielding)(ERTS_RBT_T **root,
+ void (*op)(ERTS_RBT_T *, void *),
+ void (*destr)(ERTS_RBT_T *, void *),
+ void *arg,
+ ERTS_RBT_YIELD_STATE_T__ *ystate,
+ Sint ylimit)
+{
+ return ERTS_RBT_FUNC__(foreach_ordered__)(root, 1, 1,
+ op, destr, arg,
+ 1, ystate, ylimit);
+}
+
+#endif /* ERTS_RBT_WANT_FOREACH_SMALL_DESTROY_YIELDING */
+
+#ifdef ERTS_RBT_WANT_FOREACH_LARGE_DESTROY_YIELDING
+
+static ERTS_RBT_API_INLINE__ int
+ERTS_RBT_FUNC__(foreach_large_destroy_yielding)(ERTS_RBT_T **root,
+ void (*op)(ERTS_RBT_T *, void *),
+ void (*destr)(ERTS_RBT_T *, void *),
+ void *arg,
+ ERTS_RBT_YIELD_STATE_T__ *ystate,
+ Sint ylimit)
+{
+ return ERTS_RBT_FUNC__(foreach_ordered__)(root, 0, 1,
+ op, destr, arg,
+ 1, ystate, ylimit);
+}
+
+#endif /* ERTS_RBT_WANT_FOREACH_LARGE_DESTROY_YIELDING */
+
+#ifdef ERTS_RBT_WANT_DEBUG_PRINT
+
+static void
+ERTS_RBT_FUNC__(debug_print)(FILE *filep, ERTS_RBT_T *x, int indent,
+ void (*print_node)(ERTS_RBT_T *))
+{
+ if (x) {
+ ERTS_RBT_FUNC__(debug_print)(filep, ERTS_RBT_GET_RIGHT(x),
+ indent+2, print_node);
+ erts_fprintf(filep,
+ "%*s[%s:%p:",
+ indent, "",
+ ERTS_RBT_IS_BLACK(x) ? "Black" : "Red",
+ x);
+ (*print_node)(x);
+ erts_fprintf(filep, "]\n");
+ ERTS_RBT_FUNC__(debug_print)(filep, ERTS_RBT_GET_LEFT(x),
+ indent+2, print_node);
+ }
+}
+
+#endif /* ERTS_RBT_WANT_DEBUG_PRINT */
+
+#ifdef ERTS_RBT_NEED_HDBG_CHECK_TREE__
+
+static void
+ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root, ERTS_RBT_T *n)
+{
+ int black_depth = -1, no_black = 0;
+ ERTS_RBT_T *c, *p, *x = root;
+ ERTS_RBT_KEY_T kx;
+ ERTS_RBT_KEY_T kc;
+
+ if (!x) {
+ ERTS_RBT_ASSERT(!n);
+ return;
+ }
+
+ ERTS_RBT_ASSERT(!ERTS_RBT_GET_PARENT(x));
+
+ while (1) {
+
+ while (1) {
+
+ while (1) {
+
+ if (x == n)
+ n = NULL;
+
+ if (ERTS_RBT_IS_BLACK(x))
+ no_black++;
+ else {
+ c = ERTS_RBT_GET_RIGHT(x);
+ ERTS_RBT_ASSERT(!c || ERTS_RBT_IS_BLACK(c));
+ c = ERTS_RBT_GET_LEFT(x);
+ ERTS_RBT_ASSERT(!c || ERTS_RBT_IS_BLACK(c));
+ }
+
+ c = ERTS_RBT_GET_LEFT(x);
+ if (!c)
+ break;
+
+ ERTS_RBT_ASSERT(x == ERTS_RBT_GET_PARENT(c));
+
+ kx = ERTS_RBT_GET_KEY(x);
+ kc = ERTS_RBT_GET_KEY(c);
+
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_LT(kc, kx)
+ || ERTS_RBT_IS_EQ(kc, kx));
+
+ x = c;
+ }
+
+ c = ERTS_RBT_GET_RIGHT(x);
+ if (!c) {
+ if (black_depth < 0)
+ black_depth = no_black;
+ ERTS_RBT_ASSERT(black_depth == no_black);
+ break;
+ }
+
+ ERTS_RBT_ASSERT(x == ERTS_RBT_GET_PARENT(c));
+
+ kx = ERTS_RBT_GET_KEY(x);
+ kc = ERTS_RBT_GET_KEY(c);
+
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_LT(kx, kc)
+ || ERTS_RBT_IS_EQ(kx, kc));
+ x = c;
+ }
+
+ while (1) {
+ p = ERTS_RBT_GET_PARENT(x);
+
+ if (ERTS_RBT_IS_BLACK(x))
+ no_black--;
+
+ if (p) {
+
+ ERTS_RBT_ASSERT(x == ERTS_RBT_GET_LEFT(p)
+ || x == ERTS_RBT_GET_RIGHT(p));
+
+ c = ERTS_RBT_GET_RIGHT(p);
+ if (c && c != x) {
+ ERTS_RBT_ASSERT(ERTS_RBT_GET_LEFT(p) == x);
+
+ kx = ERTS_RBT_GET_KEY(x);
+ kc = ERTS_RBT_GET_KEY(c);
+
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_LT(kx, kc)
+ || ERTS_RBT_IS_EQ(kx, kc));
+ /* Go down tree of x's sibling... */
+ x = c;
+ break;
+ }
+ }
+
+ if (!p) {
+ ERTS_RBT_ASSERT(root == x);
+ ERTS_RBT_ASSERT(no_black == 0);
+ ERTS_RBT_ASSERT(!n);
+ return; /* Done */
+ }
+
+ x = p;
+ }
+ }
+}
+
+#undef ERTS_RBT_PRINT_TREE__
+
+#endif /* ERTS_RBT_NEED_HDBG_CHECK_TREE__ */
+
+#undef ERTS_RBT_ASSERT
+#undef ERTS_RBT_DEBUG
+#undef ERTS_RBT_API_INLINE__
+#undef ERTS_RBT_YIELD_STATE_T__
+#undef ERTS_RBT_NEED_REPLACE__
+#undef ERTS_RBT_NEED_INSERT__
+#undef ERTS_RBT_NEED_ROTATE__
+#undef ERTS_RBT_NEED_FOREACH_UNORDERED__
+#undef ERTS_RBT_NEED_FOREACH_ORDERED__
+#undef ERTS_RBT_NEED_HDBG_CHECK_TREE__
+#undef ERTS_RBT_HDBG_CHECK_TREE__
+
+#ifdef ERTS_RBT_UNDEF
+# undef ERTS_RBT_PREFIX
+# undef ERTS_RBT_T
+# undef ERTS_RBT_KEY_T
+# undef ERTS_RBT_FLAGS_T
+# undef ERTS_RBT_INIT_EMPTY_TNODE
+# undef ERTS_RBT_IS_RED
+# undef ERTS_RBT_SET_RED
+# undef ERTS_RBT_IS_BLACK
+# undef ERTS_RBT_SET_BLACK
+# undef ERTS_RBT_GET_FLAGS
+# undef ERTS_RBT_SET_FLAGS
+# undef ERTS_RBT_GET_PARENT
+# undef ERTS_RBT_SET_PARENT
+# undef ERTS_RBT_GET_RIGHT
+# undef ERTS_RBT_SET_RIGHT
+# undef ERTS_RBT_GET_LEFT
+# undef ERTS_RBT_SET_LEFT
+# undef ERTS_RBT_GET_KEY
+# undef ERTS_RBT_IS_LT
+# undef ERTS_RBT_IS_EQ
+# undef ERTS_RBT_UNDEF
+# undef ERTS_RBT_NO_API_INLINE
+# undef ERTS_RBT_UPDATE_ATTACHED_DATA_ROTATE
+# undef ERTS_RBT_UPDATE_ATTACHED_DATA_DMOD
+# undef ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT
+# undef ERTS_RBT_WANT_DELETE
+# undef ERTS_RBT_WANT_INSERT
+# undef ERTS_RBT_WANT_LOOKUP_INSERT
+# undef ERTS_RBT_WANT_REPLACE
+# undef ERTS_RBT_WANT_LOOKUP
+# undef ERTS_RBT_WANT_SMALLEST
+# undef ERTS_RBT_WANT_LARGEST
+# undef ERTS_RBT_WANT_FOREACH
+# undef ERTS_RBT_WANT_FOREACH_DESTROY
+# undef ERTS_RBT_WANT_FOREACH_YIELDING
+# undef ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING
+# undef ERTS_RBT_WANT_FOREACH_SMALL
+# undef ERTS_RBT_WANT_FOREACH_LARGE
+# undef ERTS_RBT_WANT_FOREACH_SMALL_DESTROY
+# undef ERTS_RBT_WANT_FOREACH_LARGE_DESTROY
+# undef ERTS_RBT_WANT_FOREACH_SMALL_YIELDING
+# undef ERTS_RBT_WANT_FOREACH_LARGE_YIELDING
+# undef ERTS_RBT_WANT_FOREACH_SMALL_DESTROY_YIELDING
+# undef ERTS_RBT_WANT_FOREACH_LARGE_DESTROY_YIELDING
+# undef ERTS_RBT_WANT_DEBUG_PRINT
+#endif
diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.c b/erts/emulator/beam/erl_sched_spec_pre_alloc.c
index a490aec734..ab204303d7 100644
--- a/erts/emulator/beam/erl_sched_spec_pre_alloc.c
+++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2011-2012. All Rights Reserved.
+ * Copyright Ericsson AB 2011-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -31,13 +32,12 @@
# include "config.h"
#endif
-#ifdef ERTS_SMP
#include "erl_process.h"
#include "erl_thr_progress.h"
erts_sspa_data_t *
-erts_sspa_create(size_t blk_sz, int pa_size)
+erts_sspa_create(size_t blk_sz, int pa_size, int nthreads, const char* name)
{
erts_sspa_data_t *data;
size_t tot_size;
@@ -48,22 +48,30 @@ erts_sspa_create(size_t blk_sz, int pa_size)
int no_blocks = pa_size;
int no_blocks_per_chunk;
- if (erts_no_schedulers == 1)
+ if (!name) { /* schedulers only variant */
+ ASSERT(!nthreads);
+ nthreads = erts_no_schedulers;
+ }
+ else {
+ ASSERT(nthreads > 0);
+ }
+
+ if (nthreads == 1)
no_blocks_per_chunk = no_blocks;
else {
int extra = (no_blocks - 1)/4 + 1;
if (extra == 0)
extra = 1;
no_blocks_per_chunk = no_blocks;
- no_blocks_per_chunk += extra*erts_no_schedulers;
- no_blocks_per_chunk /= erts_no_schedulers;
+ no_blocks_per_chunk += extra * nthreads;
+ no_blocks_per_chunk /= nthreads;
}
- no_blocks = no_blocks_per_chunk * erts_no_schedulers;
+ no_blocks = no_blocks_per_chunk * nthreads;
chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_chunk_header_t));
chunk_mem_size += blk_sz * no_blocks_per_chunk;
chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(chunk_mem_size);
tot_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_data_t));
- tot_size += chunk_mem_size*erts_no_schedulers;
+ tot_size += chunk_mem_size * nthreads;
p = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_PRE_ALLOC_DATA, tot_size);
data = (erts_sspa_data_t *) p;
@@ -72,10 +80,16 @@ erts_sspa_create(size_t blk_sz, int pa_size)
data->chunks_mem_size = chunk_mem_size;
data->start = chunk_start;
- data->end = chunk_start + chunk_mem_size*erts_no_schedulers;
+ data->end = chunk_start + chunk_mem_size * nthreads;
+ data->nthreads = nthreads;
+
+ if (name) { /* thread variant */
+ erts_tsd_key_create(&data->tsd_key, (char*)name);
+ erts_atomic_init_nob(&data->id_generator, 0);
+ }
/* Initialize all chunks */
- for (cix = 0; cix < erts_no_schedulers; cix++) {
+ for (cix = 0; cix < nthreads; cix++) {
erts_sspa_chunk_t *chnk = erts_sspa_cix2chunk(data, cix);
erts_sspa_chunk_header_t *chdr = &chnk->aligned.header;
erts_sspa_blk_t *blk;
@@ -160,7 +174,7 @@ enqueue_remote_managed_thread(erts_sspa_chunk_header_t *chdr,
if ((i & 1) == 0)
itmp = itmp2;
else {
- enq = (erts_sspa_blk_t *) itmp;
+ enq = (erts_sspa_blk_t *) itmp2;
itmp = erts_atomic_read_acqb(&enq->next_atmc);
ASSERT(itmp != ERTS_AINT_NULL);
}
@@ -324,4 +338,3 @@ erts_sspa_process_remote_frees(erts_sspa_chunk_header_t *chdr,
return res;
}
-#endif /* ERTS_SMP */
diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.h b/erts/emulator/beam/erl_sched_spec_pre_alloc.h
index 9144c73acd..d232db0e69 100644
--- a/erts/emulator/beam/erl_sched_spec_pre_alloc.h
+++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2011-2012. All Rights Reserved.
+ * Copyright Ericsson AB 2011-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -30,7 +31,6 @@
#ifndef ERTS_SCHED_SPEC_PRE_ALLOC_H__
#define ERTS_SCHED_SPEC_PRE_ALLOC_H__
-#ifdef ERTS_SMP
#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
#define ERL_THR_PROGRESS_TSD_TYPE_ONLY
@@ -59,6 +59,11 @@ typedef struct {
char *start;
char *end;
int chunks_mem_size;
+ int nthreads;
+
+ /* Used only by thread variant: */
+ erts_tsd_key_t tsd_key;
+ erts_atomic_t id_generator;
} erts_sspa_data_t;
typedef union erts_sspa_blk_t_ erts_sspa_blk_t;
@@ -140,7 +145,9 @@ check_local_list(erts_sspa_chunk_header_t *chdr)
#endif
erts_sspa_data_t *erts_sspa_create(size_t blk_sz,
- int pa_size);
+ int pa_size,
+ int nthreads,
+ const char* name);
void erts_sspa_remote_free(erts_sspa_chunk_header_t *chdr,
erts_sspa_blk_t *blk,
int cinit);
@@ -158,7 +165,7 @@ ERTS_GLB_INLINE int erts_sspa_free(erts_sspa_data_t *data, int cix, char *blk);
ERTS_GLB_INLINE erts_sspa_chunk_t *
erts_sspa_cix2chunk(erts_sspa_data_t *data, int cix)
{
- ASSERT(0 <= cix && cix < erts_no_schedulers);
+ ASSERT(0 <= cix && cix < data->nthreads);
return (erts_sspa_chunk_t *) (data->start + cix*data->chunks_mem_size);
}
@@ -171,7 +178,7 @@ erts_sspa_ptr2cix(erts_sspa_data_t *data, void *ptr)
return -1;
diff = ((char *) ptr) - data->start;
cix = (int) diff / data->chunks_mem_size;
- ASSERT(0 <= cix && cix < erts_no_schedulers);
+ ASSERT(0 <= cix && cix < data->nthreads);
return cix;
}
@@ -235,6 +242,5 @@ erts_sspa_free(erts_sspa_data_t *data, int cix, char *cblk)
#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#endif /* ERTS_SMP */
#endif /* ERTS_SCHED_SPEC_PRE_ALLOC_H__ */
diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h
deleted file mode 100644
index c38ef47d87..0000000000
--- a/erts/emulator/beam/erl_smp.h
+++ /dev/null
@@ -1,1417 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2013. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-/*
- * SMP interface to ethread library.
- * This is essentially "sed s/erts_/erts_smp_/g < erl_threads.h > erl_smp.h",
- * plus changes to NOP operations when ERTS_SMP is disabled.
- * Author: Mikael Pettersson
- */
-#ifndef ERL_SMP_H
-#define ERL_SMP_H
-#include "erl_threads.h"
-
-#ifdef ERTS_ENABLE_LOCK_POSITION
-#define erts_smp_mtx_lock(L) erts_smp_mtx_lock_x(L, __FILE__, __LINE__)
-#define erts_smp_mtx_trylock(L) erts_smp_mtx_trylock_x(L, __FILE__, __LINE__)
-#define erts_smp_spin_lock(L) erts_smp_spin_lock_x(L, __FILE__, __LINE__)
-#define erts_smp_rwmtx_tryrlock(L) erts_smp_rwmtx_tryrlock_x(L, __FILE__, __LINE__)
-#define erts_smp_rwmtx_rlock(L) erts_smp_rwmtx_rlock_x(L, __FILE__, __LINE__)
-#define erts_smp_rwmtx_tryrwlock(L) erts_smp_rwmtx_tryrwlock_x(L, __FILE__, __LINE__)
-#define erts_smp_rwmtx_rwlock(L) erts_smp_rwmtx_rwlock_x(L, __FILE__, __LINE__)
-#define erts_smp_read_lock(L) erts_smp_read_lock_x(L, __FILE__, __LINE__)
-#define erts_smp_write_lock(L) erts_smp_write_lock_x(L, __FILE__, __LINE__)
-#endif
-
-
-#ifdef ERTS_SMP
-#define ERTS_SMP_THR_OPTS_DEFAULT_INITER ERTS_THR_OPTS_DEFAULT_INITER
-typedef erts_thr_opts_t erts_smp_thr_opts_t;
-typedef erts_thr_init_data_t erts_smp_thr_init_data_t;
-typedef erts_tid_t erts_smp_tid_t;
-typedef erts_mtx_t erts_smp_mtx_t;
-typedef erts_cnd_t erts_smp_cnd_t;
-#define ERTS_SMP_RWMTX_OPT_DEFAULT_INITER ERTS_RWMTX_OPT_DEFAULT_INITER
-#define ERTS_SMP_RWMTX_TYPE_NORMAL ERTS_RWMTX_TYPE_NORMAL
-#define ERTS_SMP_RWMTX_TYPE_FREQUENT_READ ERTS_RWMTX_TYPE_FREQUENT_READ
-#define ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ \
- ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ
-#define ERTS_SMP_RWMTX_LONG_LIVED ERTS_RWMTX_LONG_LIVED
-#define ERTS_SMP_RWMTX_SHORT_LIVED ERTS_RWMTX_SHORT_LIVED
-#define ERTS_SMP_RWMTX_UNKNOWN_LIVED ERTS_RWMTX_UNKNOWN_LIVED
-typedef erts_rwmtx_opt_t erts_smp_rwmtx_opt_t;
-typedef erts_rwmtx_t erts_smp_rwmtx_t;
-typedef erts_tsd_key_t erts_smp_tsd_key_t;
-#define erts_smp_dw_atomic_t erts_dw_atomic_t
-#define erts_smp_atomic_t erts_atomic_t
-#define erts_smp_atomic32_t erts_atomic32_t
-typedef erts_spinlock_t erts_smp_spinlock_t;
-typedef erts_rwlock_t erts_smp_rwlock_t;
-void erts_thr_fatal_error(int, char *); /* implemented in erl_init.c */
-
-#define ERTS_SMP_MEMORY_BARRIER ERTS_THR_MEMORY_BARRIER
-#define ERTS_SMP_WRITE_MEMORY_BARRIER ERTS_THR_WRITE_MEMORY_BARRIER
-#define ERTS_SMP_READ_MEMORY_BARRIER ERTS_THR_READ_MEMORY_BARRIER
-#define ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER
-
-#else /* #ifdef ERTS_SMP */
-
-#define ERTS_SMP_THR_OPTS_DEFAULT_INITER {0}
-typedef int erts_smp_thr_opts_t;
-typedef int erts_smp_thr_init_data_t;
-typedef int erts_smp_tid_t;
-typedef int erts_smp_mtx_t;
-typedef int erts_smp_cnd_t;
-#define ERTS_SMP_RWMTX_OPT_DEFAULT_INITER {0}
-#define ERTS_SMP_RWMTX_TYPE_NORMAL 0
-#define ERTS_SMP_RWMTX_TYPE_FREQUENT_READ 0
-#define ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ 0
-#define ERTS_SMP_RWMTX_LONG_LIVED 0
-#define ERTS_SMP_RWMTX_SHORT_LIVED 0
-#define ERTS_SMP_RWMTX_UNKNOWN_LIVED 0
-typedef struct {
- char type;
- char lived;
- int main_spincount;
- int aux_spincount;
-} erts_smp_rwmtx_opt_t;
-typedef int erts_smp_rwmtx_t;
-typedef int erts_smp_tsd_key_t;
-#define erts_smp_dw_atomic_t erts_no_dw_atomic_t
-#define erts_smp_atomic_t erts_no_atomic_t
-#define erts_smp_atomic32_t erts_no_atomic32_t
-#if __GNUC__ > 2
-typedef struct { } erts_smp_spinlock_t;
-typedef struct { } erts_smp_rwlock_t;
-#else
-typedef struct { int gcc_is_buggy; } erts_smp_spinlock_t;
-typedef struct { int gcc_is_buggy; } erts_smp_rwlock_t;
-#endif
-
-#define ERTS_SMP_MEMORY_BARRIER
-#define ERTS_SMP_WRITE_MEMORY_BARRIER
-#define ERTS_SMP_READ_MEMORY_BARRIER
-#define ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER
-
-#endif /* #ifdef ERTS_SMP */
-
-ERTS_GLB_INLINE void erts_smp_thr_init(erts_smp_thr_init_data_t *id);
-ERTS_GLB_INLINE void erts_smp_thr_create(erts_smp_tid_t *tid,
- void * (*func)(void *),
- void *arg,
- erts_smp_thr_opts_t *opts);
-ERTS_GLB_INLINE void erts_smp_thr_join(erts_smp_tid_t tid, void **thr_res);
-ERTS_GLB_INLINE void erts_smp_thr_detach(erts_smp_tid_t tid);
-ERTS_GLB_INLINE void erts_smp_thr_exit(void *res);
-ERTS_GLB_INLINE void erts_smp_install_exit_handler(void (*exit_handler)(void));
-ERTS_GLB_INLINE erts_smp_tid_t erts_smp_thr_self(void);
-ERTS_GLB_INLINE int erts_smp_equal_tids(erts_smp_tid_t x, erts_smp_tid_t y);
-#ifdef ERTS_HAVE_REC_MTX_INIT
-#define ERTS_SMP_HAVE_REC_MTX_INIT 1
-ERTS_GLB_INLINE void erts_smp_rec_mtx_init(erts_smp_mtx_t *mtx);
-#endif
-ERTS_GLB_INLINE void erts_smp_mtx_init_x(erts_smp_mtx_t *mtx,
- char *name,
- Eterm extra);
-ERTS_GLB_INLINE void erts_smp_mtx_init_locked_x(erts_smp_mtx_t *mtx,
- char *name,
- Eterm extra);
-ERTS_GLB_INLINE void erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name);
-ERTS_GLB_INLINE void erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name);
-ERTS_GLB_INLINE void erts_smp_mtx_destroy(erts_smp_mtx_t *mtx);
-#ifdef ERTS_ENABLE_LOCK_POSITION
-ERTS_GLB_INLINE int erts_smp_mtx_trylock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line);
-ERTS_GLB_INLINE void erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line);
-#else
-ERTS_GLB_INLINE int erts_smp_mtx_trylock(erts_smp_mtx_t *mtx);
-ERTS_GLB_INLINE void erts_smp_mtx_lock(erts_smp_mtx_t *mtx);
-#endif
-ERTS_GLB_INLINE void erts_smp_mtx_unlock(erts_smp_mtx_t *mtx);
-ERTS_GLB_INLINE int erts_smp_lc_mtx_is_locked(erts_smp_mtx_t *mtx);
-ERTS_GLB_INLINE void erts_smp_cnd_init(erts_smp_cnd_t *cnd);
-ERTS_GLB_INLINE void erts_smp_cnd_destroy(erts_smp_cnd_t *cnd);
-ERTS_GLB_INLINE void erts_smp_cnd_wait(erts_smp_cnd_t *cnd,
- erts_smp_mtx_t *mtx);
-ERTS_GLB_INLINE void erts_smp_cnd_signal(erts_smp_cnd_t *cnd);
-ERTS_GLB_INLINE void erts_smp_cnd_broadcast(erts_smp_cnd_t *cnd);
-ERTS_GLB_INLINE void erts_smp_rwmtx_set_reader_group(int no);
-ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt_x(erts_smp_rwmtx_t *rwmtx,
- erts_smp_rwmtx_opt_t *opt,
- char *name,
- Eterm extra);
-ERTS_GLB_INLINE void erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx,
- char *name,
- Eterm extra);
-ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx,
- erts_smp_rwmtx_opt_t *opt,
- char *name);
-ERTS_GLB_INLINE void erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx,
- char *name);
-ERTS_GLB_INLINE void erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx);
-#ifdef ERTS_ENABLE_LOCK_POSITION
-ERTS_GLB_INLINE int erts_smp_rwmtx_tryrlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line);
-ERTS_GLB_INLINE void erts_smp_rwmtx_rlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line);
-ERTS_GLB_INLINE void erts_smp_rwmtx_rwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line);
-ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line);
-#else
-ERTS_GLB_INLINE int erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx);
-ERTS_GLB_INLINE void erts_smp_rwmtx_rlock(erts_smp_rwmtx_t *rwmtx);
-ERTS_GLB_INLINE void erts_smp_rwmtx_rwlock(erts_smp_rwmtx_t *rwmtx);
-ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx);
-#endif
-ERTS_GLB_INLINE void erts_smp_rwmtx_runlock(erts_smp_rwmtx_t *rwmtx);
-ERTS_GLB_INLINE void erts_smp_rwmtx_rwunlock(erts_smp_rwmtx_t *rwmtx);
-ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rlocked(erts_smp_rwmtx_t *mtx);
-ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx);
-ERTS_GLB_INLINE void erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock,
- char *name,
- Eterm extra);
-ERTS_GLB_INLINE void erts_smp_spinlock_init(erts_smp_spinlock_t *lock,
- char *name);
-ERTS_GLB_INLINE void erts_smp_spinlock_destroy(erts_smp_spinlock_t *lock);
-ERTS_GLB_INLINE void erts_smp_spin_unlock(erts_smp_spinlock_t *lock);
-#ifdef ERTS_ENABLE_LOCK_POSITION
-ERTS_GLB_INLINE void erts_smp_spin_lock_x(erts_smp_spinlock_t *lock, char *file, unsigned int line);
-#else
-ERTS_GLB_INLINE void erts_smp_spin_lock(erts_smp_spinlock_t *lock);
-#endif
-ERTS_GLB_INLINE int erts_smp_lc_spinlock_is_locked(erts_smp_spinlock_t *lock);
-ERTS_GLB_INLINE void erts_smp_rwlock_init_x(erts_smp_rwlock_t *lock,
- char *name,
- Eterm extra);
-ERTS_GLB_INLINE void erts_smp_rwlock_init(erts_smp_rwlock_t *lock,
- char *name);
-ERTS_GLB_INLINE void erts_smp_rwlock_destroy(erts_smp_rwlock_t *lock);
-ERTS_GLB_INLINE void erts_smp_read_unlock(erts_smp_rwlock_t *lock);
-#ifdef ERTS_ENABLE_LOCK_POSITION
-ERTS_GLB_INLINE void erts_smp_read_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line);
-ERTS_GLB_INLINE void erts_smp_write_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line);
-#else
-ERTS_GLB_INLINE void erts_smp_read_lock(erts_smp_rwlock_t *lock);
-ERTS_GLB_INLINE void erts_smp_write_lock(erts_smp_rwlock_t *lock);
-#endif
-ERTS_GLB_INLINE void erts_smp_write_unlock(erts_smp_rwlock_t *lock);
-ERTS_GLB_INLINE int erts_smp_lc_rwlock_is_rlocked(erts_smp_rwlock_t *lock);
-ERTS_GLB_INLINE int erts_smp_lc_rwlock_is_rwlocked(erts_smp_rwlock_t *lock);
-ERTS_GLB_INLINE void erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp,
- char *keyname);
-ERTS_GLB_INLINE void erts_smp_tsd_key_delete(erts_smp_tsd_key_t key);
-ERTS_GLB_INLINE void erts_smp_tsd_set(erts_smp_tsd_key_t key, void *value);
-ERTS_GLB_INLINE void * erts_smp_tsd_get(erts_smp_tsd_key_t key);
-
-#ifdef ERTS_THR_HAVE_SIG_FUNCS
-#define ERTS_SMP_THR_HAVE_SIG_FUNCS 1
-ERTS_GLB_INLINE void erts_smp_thr_sigmask(int how,
- const sigset_t *set,
- sigset_t *oset);
-ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
-#endif /* #ifdef ERTS_THR_HAVE_SIG_FUNCS */
-
-/*
- * See "Documentation of atomics and memory barriers" at the top
- * of erl_threads.h for info on atomics.
- */
-
-#ifdef ERTS_SMP
-
-/* Double word size atomics */
-
-#define erts_smp_dw_atomic_init_nob erts_dw_atomic_init_nob
-#define erts_smp_dw_atomic_set_nob erts_dw_atomic_set_nob
-#define erts_smp_dw_atomic_read_nob erts_dw_atomic_read_nob
-#define erts_smp_dw_atomic_cmpxchg_nob erts_dw_atomic_cmpxchg_nob
-
-#define erts_smp_dw_atomic_init_mb erts_dw_atomic_init_mb
-#define erts_smp_dw_atomic_set_mb erts_dw_atomic_set_mb
-#define erts_smp_dw_atomic_read_mb erts_dw_atomic_read_mb
-#define erts_smp_dw_atomic_cmpxchg_mb erts_dw_atomic_cmpxchg_mb
-
-#define erts_smp_dw_atomic_init_acqb erts_dw_atomic_init_acqb
-#define erts_smp_dw_atomic_set_acqb erts_dw_atomic_set_acqb
-#define erts_smp_dw_atomic_read_acqb erts_dw_atomic_read_acqb
-#define erts_smp_dw_atomic_cmpxchg_acqb erts_dw_atomic_cmpxchg_acqb
-
-#define erts_smp_dw_atomic_init_relb erts_dw_atomic_init_relb
-#define erts_smp_dw_atomic_set_relb erts_dw_atomic_set_relb
-#define erts_smp_dw_atomic_read_relb erts_dw_atomic_read_relb
-#define erts_smp_dw_atomic_cmpxchg_relb erts_dw_atomic_cmpxchg_relb
-
-#define erts_smp_dw_atomic_init_ddrb erts_dw_atomic_init_ddrb
-#define erts_smp_dw_atomic_set_ddrb erts_dw_atomic_set_ddrb
-#define erts_smp_dw_atomic_read_ddrb erts_dw_atomic_read_ddrb
-#define erts_smp_dw_atomic_cmpxchg_ddrb erts_dw_atomic_cmpxchg_ddrb
-
-#define erts_smp_dw_atomic_init_rb erts_dw_atomic_init_rb
-#define erts_smp_dw_atomic_set_rb erts_dw_atomic_set_rb
-#define erts_smp_dw_atomic_read_rb erts_dw_atomic_read_rb
-#define erts_smp_dw_atomic_cmpxchg_rb erts_dw_atomic_cmpxchg_rb
-
-#define erts_smp_dw_atomic_init_wb erts_dw_atomic_init_wb
-#define erts_smp_dw_atomic_set_wb erts_dw_atomic_set_wb
-#define erts_smp_dw_atomic_read_wb erts_dw_atomic_read_wb
-#define erts_smp_dw_atomic_cmpxchg_wb erts_dw_atomic_cmpxchg_wb
-
-#define erts_smp_dw_atomic_set_dirty erts_dw_atomic_set_dirty
-#define erts_smp_dw_atomic_read_dirty erts_dw_atomic_read_dirty
-
-/* Word size atomics */
-
-#define erts_smp_atomic_init_nob erts_atomic_init_nob
-#define erts_smp_atomic_set_nob erts_atomic_set_nob
-#define erts_smp_atomic_read_nob erts_atomic_read_nob
-#define erts_smp_atomic_inc_read_nob erts_atomic_inc_read_nob
-#define erts_smp_atomic_dec_read_nob erts_atomic_dec_read_nob
-#define erts_smp_atomic_inc_nob erts_atomic_inc_nob
-#define erts_smp_atomic_dec_nob erts_atomic_dec_nob
-#define erts_smp_atomic_add_read_nob erts_atomic_add_read_nob
-#define erts_smp_atomic_add_nob erts_atomic_add_nob
-#define erts_smp_atomic_read_bor_nob erts_atomic_read_bor_nob
-#define erts_smp_atomic_read_band_nob erts_atomic_read_band_nob
-#define erts_smp_atomic_xchg_nob erts_atomic_xchg_nob
-#define erts_smp_atomic_cmpxchg_nob erts_atomic_cmpxchg_nob
-#define erts_smp_atomic_read_bset_nob erts_atomic_read_bset_nob
-
-#define erts_smp_atomic_init_mb erts_atomic_init_mb
-#define erts_smp_atomic_set_mb erts_atomic_set_mb
-#define erts_smp_atomic_read_mb erts_atomic_read_mb
-#define erts_smp_atomic_inc_read_mb erts_atomic_inc_read_mb
-#define erts_smp_atomic_dec_read_mb erts_atomic_dec_read_mb
-#define erts_smp_atomic_inc_mb erts_atomic_inc_mb
-#define erts_smp_atomic_dec_mb erts_atomic_dec_mb
-#define erts_smp_atomic_add_read_mb erts_atomic_add_read_mb
-#define erts_smp_atomic_add_mb erts_atomic_add_mb
-#define erts_smp_atomic_read_bor_mb erts_atomic_read_bor_mb
-#define erts_smp_atomic_read_band_mb erts_atomic_read_band_mb
-#define erts_smp_atomic_xchg_mb erts_atomic_xchg_mb
-#define erts_smp_atomic_cmpxchg_mb erts_atomic_cmpxchg_mb
-#define erts_smp_atomic_read_bset_mb erts_atomic_read_bset_mb
-
-#define erts_smp_atomic_init_acqb erts_atomic_init_acqb
-#define erts_smp_atomic_set_acqb erts_atomic_set_acqb
-#define erts_smp_atomic_read_acqb erts_atomic_read_acqb
-#define erts_smp_atomic_inc_read_acqb erts_atomic_inc_read_acqb
-#define erts_smp_atomic_dec_read_acqb erts_atomic_dec_read_acqb
-#define erts_smp_atomic_inc_acqb erts_atomic_inc_acqb
-#define erts_smp_atomic_dec_acqb erts_atomic_dec_acqb
-#define erts_smp_atomic_add_read_acqb erts_atomic_add_read_acqb
-#define erts_smp_atomic_add_acqb erts_atomic_add_acqb
-#define erts_smp_atomic_read_bor_acqb erts_atomic_read_bor_acqb
-#define erts_smp_atomic_read_band_acqb erts_atomic_read_band_acqb
-#define erts_smp_atomic_xchg_acqb erts_atomic_xchg_acqb
-#define erts_smp_atomic_cmpxchg_acqb erts_atomic_cmpxchg_acqb
-#define erts_smp_atomic_read_bset_acqb erts_atomic_read_bset_acqb
-
-#define erts_smp_atomic_init_relb erts_atomic_init_relb
-#define erts_smp_atomic_set_relb erts_atomic_set_relb
-#define erts_smp_atomic_read_relb erts_atomic_read_relb
-#define erts_smp_atomic_inc_read_relb erts_atomic_inc_read_relb
-#define erts_smp_atomic_dec_read_relb erts_atomic_dec_read_relb
-#define erts_smp_atomic_inc_relb erts_atomic_inc_relb
-#define erts_smp_atomic_dec_relb erts_atomic_dec_relb
-#define erts_smp_atomic_add_read_relb erts_atomic_add_read_relb
-#define erts_smp_atomic_add_relb erts_atomic_add_relb
-#define erts_smp_atomic_read_bor_relb erts_atomic_read_bor_relb
-#define erts_smp_atomic_read_band_relb erts_atomic_read_band_relb
-#define erts_smp_atomic_xchg_relb erts_atomic_xchg_relb
-#define erts_smp_atomic_cmpxchg_relb erts_atomic_cmpxchg_relb
-#define erts_smp_atomic_read_bset_relb erts_atomic_read_bset_relb
-
-#define erts_smp_atomic_init_ddrb erts_atomic_init_ddrb
-#define erts_smp_atomic_set_ddrb erts_atomic_set_ddrb
-#define erts_smp_atomic_read_ddrb erts_atomic_read_ddrb
-#define erts_smp_atomic_inc_read_ddrb erts_atomic_inc_read_ddrb
-#define erts_smp_atomic_dec_read_ddrb erts_atomic_dec_read_ddrb
-#define erts_smp_atomic_inc_ddrb erts_atomic_inc_ddrb
-#define erts_smp_atomic_dec_ddrb erts_atomic_dec_ddrb
-#define erts_smp_atomic_add_read_ddrb erts_atomic_add_read_ddrb
-#define erts_smp_atomic_add_ddrb erts_atomic_add_ddrb
-#define erts_smp_atomic_read_bor_ddrb erts_atomic_read_bor_ddrb
-#define erts_smp_atomic_read_band_ddrb erts_atomic_read_band_ddrb
-#define erts_smp_atomic_xchg_ddrb erts_atomic_xchg_ddrb
-#define erts_smp_atomic_cmpxchg_ddrb erts_atomic_cmpxchg_ddrb
-#define erts_smp_atomic_read_bset_ddrb erts_atomic_read_bset_ddrb
-
-#define erts_smp_atomic_init_rb erts_atomic_init_rb
-#define erts_smp_atomic_set_rb erts_atomic_set_rb
-#define erts_smp_atomic_read_rb erts_atomic_read_rb
-#define erts_smp_atomic_inc_read_rb erts_atomic_inc_read_rb
-#define erts_smp_atomic_dec_read_rb erts_atomic_dec_read_rb
-#define erts_smp_atomic_inc_rb erts_atomic_inc_rb
-#define erts_smp_atomic_dec_rb erts_atomic_dec_rb
-#define erts_smp_atomic_add_read_rb erts_atomic_add_read_rb
-#define erts_smp_atomic_add_rb erts_atomic_add_rb
-#define erts_smp_atomic_read_bor_rb erts_atomic_read_bor_rb
-#define erts_smp_atomic_read_band_rb erts_atomic_read_band_rb
-#define erts_smp_atomic_xchg_rb erts_atomic_xchg_rb
-#define erts_smp_atomic_cmpxchg_rb erts_atomic_cmpxchg_rb
-#define erts_smp_atomic_read_bset_rb erts_atomic_read_bset_rb
-
-#define erts_smp_atomic_init_wb erts_atomic_init_wb
-#define erts_smp_atomic_set_wb erts_atomic_set_wb
-#define erts_smp_atomic_read_wb erts_atomic_read_wb
-#define erts_smp_atomic_inc_read_wb erts_atomic_inc_read_wb
-#define erts_smp_atomic_dec_read_wb erts_atomic_dec_read_wb
-#define erts_smp_atomic_inc_wb erts_atomic_inc_wb
-#define erts_smp_atomic_dec_wb erts_atomic_dec_wb
-#define erts_smp_atomic_add_read_wb erts_atomic_add_read_wb
-#define erts_smp_atomic_add_wb erts_atomic_add_wb
-#define erts_smp_atomic_read_bor_wb erts_atomic_read_bor_wb
-#define erts_smp_atomic_read_band_wb erts_atomic_read_band_wb
-#define erts_smp_atomic_xchg_wb erts_atomic_xchg_wb
-#define erts_smp_atomic_cmpxchg_wb erts_atomic_cmpxchg_wb
-#define erts_smp_atomic_read_bset_wb erts_atomic_read_bset_wb
-
-#define erts_smp_atomic_set_dirty erts_atomic_set_dirty
-#define erts_smp_atomic_read_dirty erts_atomic_read_dirty
-
-/* 32-bit atomics */
-
-#define erts_smp_atomic32_init_nob erts_atomic32_init_nob
-#define erts_smp_atomic32_set_nob erts_atomic32_set_nob
-#define erts_smp_atomic32_read_nob erts_atomic32_read_nob
-#define erts_smp_atomic32_inc_read_nob erts_atomic32_inc_read_nob
-#define erts_smp_atomic32_dec_read_nob erts_atomic32_dec_read_nob
-#define erts_smp_atomic32_inc_nob erts_atomic32_inc_nob
-#define erts_smp_atomic32_dec_nob erts_atomic32_dec_nob
-#define erts_smp_atomic32_add_read_nob erts_atomic32_add_read_nob
-#define erts_smp_atomic32_add_nob erts_atomic32_add_nob
-#define erts_smp_atomic32_read_bor_nob erts_atomic32_read_bor_nob
-#define erts_smp_atomic32_read_band_nob erts_atomic32_read_band_nob
-#define erts_smp_atomic32_xchg_nob erts_atomic32_xchg_nob
-#define erts_smp_atomic32_cmpxchg_nob erts_atomic32_cmpxchg_nob
-#define erts_smp_atomic32_read_bset_nob erts_atomic32_read_bset_nob
-
-#define erts_smp_atomic32_init_mb erts_atomic32_init_mb
-#define erts_smp_atomic32_set_mb erts_atomic32_set_mb
-#define erts_smp_atomic32_read_mb erts_atomic32_read_mb
-#define erts_smp_atomic32_inc_read_mb erts_atomic32_inc_read_mb
-#define erts_smp_atomic32_dec_read_mb erts_atomic32_dec_read_mb
-#define erts_smp_atomic32_inc_mb erts_atomic32_inc_mb
-#define erts_smp_atomic32_dec_mb erts_atomic32_dec_mb
-#define erts_smp_atomic32_add_read_mb erts_atomic32_add_read_mb
-#define erts_smp_atomic32_add_mb erts_atomic32_add_mb
-#define erts_smp_atomic32_read_bor_mb erts_atomic32_read_bor_mb
-#define erts_smp_atomic32_read_band_mb erts_atomic32_read_band_mb
-#define erts_smp_atomic32_xchg_mb erts_atomic32_xchg_mb
-#define erts_smp_atomic32_cmpxchg_mb erts_atomic32_cmpxchg_mb
-#define erts_smp_atomic32_read_bset_mb erts_atomic32_read_bset_mb
-
-#define erts_smp_atomic32_init_acqb erts_atomic32_init_acqb
-#define erts_smp_atomic32_set_acqb erts_atomic32_set_acqb
-#define erts_smp_atomic32_read_acqb erts_atomic32_read_acqb
-#define erts_smp_atomic32_inc_read_acqb erts_atomic32_inc_read_acqb
-#define erts_smp_atomic32_dec_read_acqb erts_atomic32_dec_read_acqb
-#define erts_smp_atomic32_inc_acqb erts_atomic32_inc_acqb
-#define erts_smp_atomic32_dec_acqb erts_atomic32_dec_acqb
-#define erts_smp_atomic32_add_read_acqb erts_atomic32_add_read_acqb
-#define erts_smp_atomic32_add_acqb erts_atomic32_add_acqb
-#define erts_smp_atomic32_read_bor_acqb erts_atomic32_read_bor_acqb
-#define erts_smp_atomic32_read_band_acqb erts_atomic32_read_band_acqb
-#define erts_smp_atomic32_xchg_acqb erts_atomic32_xchg_acqb
-#define erts_smp_atomic32_cmpxchg_acqb erts_atomic32_cmpxchg_acqb
-#define erts_smp_atomic32_read_bset_acqb erts_atomic32_read_bset_acqb
-
-#define erts_smp_atomic32_init_relb erts_atomic32_init_relb
-#define erts_smp_atomic32_set_relb erts_atomic32_set_relb
-#define erts_smp_atomic32_read_relb erts_atomic32_read_relb
-#define erts_smp_atomic32_inc_read_relb erts_atomic32_inc_read_relb
-#define erts_smp_atomic32_dec_read_relb erts_atomic32_dec_read_relb
-#define erts_smp_atomic32_inc_relb erts_atomic32_inc_relb
-#define erts_smp_atomic32_dec_relb erts_atomic32_dec_relb
-#define erts_smp_atomic32_add_read_relb erts_atomic32_add_read_relb
-#define erts_smp_atomic32_add_relb erts_atomic32_add_relb
-#define erts_smp_atomic32_read_bor_relb erts_atomic32_read_bor_relb
-#define erts_smp_atomic32_read_band_relb erts_atomic32_read_band_relb
-#define erts_smp_atomic32_xchg_relb erts_atomic32_xchg_relb
-#define erts_smp_atomic32_cmpxchg_relb erts_atomic32_cmpxchg_relb
-#define erts_smp_atomic32_read_bset_relb erts_atomic32_read_bset_relb
-
-#define erts_smp_atomic32_init_ddrb erts_atomic32_init_ddrb
-#define erts_smp_atomic32_set_ddrb erts_atomic32_set_ddrb
-#define erts_smp_atomic32_read_ddrb erts_atomic32_read_ddrb
-#define erts_smp_atomic32_inc_read_ddrb erts_atomic32_inc_read_ddrb
-#define erts_smp_atomic32_dec_read_ddrb erts_atomic32_dec_read_ddrb
-#define erts_smp_atomic32_inc_ddrb erts_atomic32_inc_ddrb
-#define erts_smp_atomic32_dec_ddrb erts_atomic32_dec_ddrb
-#define erts_smp_atomic32_add_read_ddrb erts_atomic32_add_read_ddrb
-#define erts_smp_atomic32_add_ddrb erts_atomic32_add_ddrb
-#define erts_smp_atomic32_read_bor_ddrb erts_atomic32_read_bor_ddrb
-#define erts_smp_atomic32_read_band_ddrb erts_atomic32_read_band_ddrb
-#define erts_smp_atomic32_xchg_ddrb erts_atomic32_xchg_ddrb
-#define erts_smp_atomic32_cmpxchg_ddrb erts_atomic32_cmpxchg_ddrb
-#define erts_smp_atomic32_read_bset_ddrb erts_atomic32_read_bset_ddrb
-
-#define erts_smp_atomic32_init_rb erts_atomic32_init_rb
-#define erts_smp_atomic32_set_rb erts_atomic32_set_rb
-#define erts_smp_atomic32_read_rb erts_atomic32_read_rb
-#define erts_smp_atomic32_inc_read_rb erts_atomic32_inc_read_rb
-#define erts_smp_atomic32_dec_read_rb erts_atomic32_dec_read_rb
-#define erts_smp_atomic32_inc_rb erts_atomic32_inc_rb
-#define erts_smp_atomic32_dec_rb erts_atomic32_dec_rb
-#define erts_smp_atomic32_add_read_rb erts_atomic32_add_read_rb
-#define erts_smp_atomic32_add_rb erts_atomic32_add_rb
-#define erts_smp_atomic32_read_bor_rb erts_atomic32_read_bor_rb
-#define erts_smp_atomic32_read_band_rb erts_atomic32_read_band_rb
-#define erts_smp_atomic32_xchg_rb erts_atomic32_xchg_rb
-#define erts_smp_atomic32_cmpxchg_rb erts_atomic32_cmpxchg_rb
-#define erts_smp_atomic32_read_bset_rb erts_atomic32_read_bset_rb
-
-#define erts_smp_atomic32_init_wb erts_atomic32_init_wb
-#define erts_smp_atomic32_set_wb erts_atomic32_set_wb
-#define erts_smp_atomic32_read_wb erts_atomic32_read_wb
-#define erts_smp_atomic32_inc_read_wb erts_atomic32_inc_read_wb
-#define erts_smp_atomic32_dec_read_wb erts_atomic32_dec_read_wb
-#define erts_smp_atomic32_inc_wb erts_atomic32_inc_wb
-#define erts_smp_atomic32_dec_wb erts_atomic32_dec_wb
-#define erts_smp_atomic32_add_read_wb erts_atomic32_add_read_wb
-#define erts_smp_atomic32_add_wb erts_atomic32_add_wb
-#define erts_smp_atomic32_read_bor_wb erts_atomic32_read_bor_wb
-#define erts_smp_atomic32_read_band_wb erts_atomic32_read_band_wb
-#define erts_smp_atomic32_xchg_wb erts_atomic32_xchg_wb
-#define erts_smp_atomic32_cmpxchg_wb erts_atomic32_cmpxchg_wb
-#define erts_smp_atomic32_read_bset_wb erts_atomic32_read_bset_wb
-
-#define erts_smp_atomic32_set_dirty erts_atomic32_set_dirty
-#define erts_smp_atomic32_read_dirty erts_atomic32_read_dirty
-
-#else /* !ERTS_SMP */
-
-/* Double word size atomics */
-
-#define erts_smp_dw_atomic_init_nob erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_set_nob erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_read_nob erts_no_dw_atomic_read
-#define erts_smp_dw_atomic_cmpxchg_nob erts_no_dw_atomic_cmpxchg
-
-#define erts_smp_dw_atomic_init_mb erts_no_dw_atomic_init
-#define erts_smp_dw_atomic_set_mb erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_read_mb erts_no_dw_atomic_read
-#define erts_smp_dw_atomic_cmpxchg_mb erts_no_dw_atomic_cmpxchg
-
-#define erts_smp_dw_atomic_init_acqb erts_no_dw_atomic_init
-#define erts_smp_dw_atomic_set_acqb erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_read_acqb erts_no_dw_atomic_read
-#define erts_smp_dw_atomic_cmpxchg_acqb erts_no_dw_atomic_cmpxchg
-
-#define erts_smp_dw_atomic_init_relb erts_no_dw_atomic_init
-#define erts_smp_dw_atomic_set_relb erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_read_relb erts_no_dw_atomic_read
-#define erts_smp_dw_atomic_cmpxchg_relb erts_no_dw_atomic_cmpxchg
-
-#define erts_smp_dw_atomic_init_ddrb erts_no_dw_atomic_init
-#define erts_smp_dw_atomic_set_ddrb erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_read_ddrb erts_no_dw_atomic_read
-#define erts_smp_dw_atomic_cmpxchg_ddrb erts_no_dw_atomic_cmpxchg
-
-#define erts_smp_dw_atomic_init_rb erts_no_dw_atomic_init
-#define erts_smp_dw_atomic_set_rb erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_read_rb erts_no_dw_atomic_read
-#define erts_smp_dw_atomic_cmpxchg_rb erts_no_dw_atomic_cmpxchg
-
-#define erts_smp_dw_atomic_init_wb erts_no_dw_atomic_init
-#define erts_smp_dw_atomic_set_wb erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_read_wb erts_no_dw_atomic_read
-#define erts_smp_dw_atomic_cmpxchg_wb erts_no_dw_atomic_cmpxchg
-
-#define erts_smp_dw_atomic_set_dirty erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_read_dirty erts_no_dw_atomic_read
-
-/* Word size atomics */
-
-#define erts_smp_atomic_init_nob erts_no_atomic_set
-#define erts_smp_atomic_set_nob erts_no_atomic_set
-#define erts_smp_atomic_read_nob erts_no_atomic_read
-#define erts_smp_atomic_inc_read_nob erts_no_atomic_inc_read
-#define erts_smp_atomic_dec_read_nob erts_no_atomic_dec_read
-#define erts_smp_atomic_inc_nob erts_no_atomic_inc
-#define erts_smp_atomic_dec_nob erts_no_atomic_dec
-#define erts_smp_atomic_add_read_nob erts_no_atomic_add_read
-#define erts_smp_atomic_add_nob erts_no_atomic_add
-#define erts_smp_atomic_read_bor_nob erts_no_atomic_read_bor
-#define erts_smp_atomic_read_band_nob erts_no_atomic_read_band
-#define erts_smp_atomic_xchg_nob erts_no_atomic_xchg
-#define erts_smp_atomic_cmpxchg_nob erts_no_atomic_cmpxchg
-#define erts_smp_atomic_read_bset_nob erts_no_atomic_read_bset
-
-#define erts_smp_atomic_init_mb erts_no_atomic_set
-#define erts_smp_atomic_set_mb erts_no_atomic_set
-#define erts_smp_atomic_read_mb erts_no_atomic_read
-#define erts_smp_atomic_inc_read_mb erts_no_atomic_inc_read
-#define erts_smp_atomic_dec_read_mb erts_no_atomic_dec_read
-#define erts_smp_atomic_inc_mb erts_no_atomic_inc
-#define erts_smp_atomic_dec_mb erts_no_atomic_dec
-#define erts_smp_atomic_add_read_mb erts_no_atomic_add_read
-#define erts_smp_atomic_add_mb erts_no_atomic_add
-#define erts_smp_atomic_read_bor_mb erts_no_atomic_read_bor
-#define erts_smp_atomic_read_band_mb erts_no_atomic_read_band
-#define erts_smp_atomic_xchg_mb erts_no_atomic_xchg
-#define erts_smp_atomic_cmpxchg_mb erts_no_atomic_cmpxchg
-#define erts_smp_atomic_read_bset_mb erts_no_atomic_read_bset
-
-#define erts_smp_atomic_init_acqb erts_no_atomic_set
-#define erts_smp_atomic_set_acqb erts_no_atomic_set
-#define erts_smp_atomic_read_acqb erts_no_atomic_read
-#define erts_smp_atomic_inc_read_acqb erts_no_atomic_inc_read
-#define erts_smp_atomic_dec_read_acqb erts_no_atomic_dec_read
-#define erts_smp_atomic_inc_acqb erts_no_atomic_inc
-#define erts_smp_atomic_dec_acqb erts_no_atomic_dec
-#define erts_smp_atomic_add_read_acqb erts_no_atomic_add_read
-#define erts_smp_atomic_add_acqb erts_no_atomic_add
-#define erts_smp_atomic_read_bor_acqb erts_no_atomic_read_bor
-#define erts_smp_atomic_read_band_acqb erts_no_atomic_read_band
-#define erts_smp_atomic_xchg_acqb erts_no_atomic_xchg
-#define erts_smp_atomic_cmpxchg_acqb erts_no_atomic_cmpxchg
-#define erts_smp_atomic_read_bset_acqb erts_no_atomic_read_bset
-
-#define erts_smp_atomic_init_relb erts_no_atomic_set
-#define erts_smp_atomic_set_relb erts_no_atomic_set
-#define erts_smp_atomic_read_relb erts_no_atomic_read
-#define erts_smp_atomic_inc_read_relb erts_no_atomic_inc_read
-#define erts_smp_atomic_dec_read_relb erts_no_atomic_dec_read
-#define erts_smp_atomic_inc_relb erts_no_atomic_inc
-#define erts_smp_atomic_dec_relb erts_no_atomic_dec
-#define erts_smp_atomic_add_read_relb erts_no_atomic_add_read
-#define erts_smp_atomic_add_relb erts_no_atomic_add
-#define erts_smp_atomic_read_bor_relb erts_no_atomic_read_bor
-#define erts_smp_atomic_read_band_relb erts_no_atomic_read_band
-#define erts_smp_atomic_xchg_relb erts_no_atomic_xchg
-#define erts_smp_atomic_cmpxchg_relb erts_no_atomic_cmpxchg
-#define erts_smp_atomic_read_bset_relb erts_no_atomic_read_bset
-
-#define erts_smp_atomic_init_ddrb erts_no_atomic_set
-#define erts_smp_atomic_set_ddrb erts_no_atomic_set
-#define erts_smp_atomic_read_ddrb erts_no_atomic_read
-#define erts_smp_atomic_inc_read_ddrb erts_no_atomic_inc_read
-#define erts_smp_atomic_dec_read_ddrb erts_no_atomic_dec_read
-#define erts_smp_atomic_inc_ddrb erts_no_atomic_inc
-#define erts_smp_atomic_dec_ddrb erts_no_atomic_dec
-#define erts_smp_atomic_add_read_ddrb erts_no_atomic_add_read
-#define erts_smp_atomic_add_ddrb erts_no_atomic_add
-#define erts_smp_atomic_read_bor_ddrb erts_no_atomic_read_bor
-#define erts_smp_atomic_read_band_ddrb erts_no_atomic_read_band
-#define erts_smp_atomic_xchg_ddrb erts_no_atomic_xchg
-#define erts_smp_atomic_cmpxchg_ddrb erts_no_atomic_cmpxchg
-#define erts_smp_atomic_read_bset_ddrb erts_no_atomic_read_bset
-
-#define erts_smp_atomic_init_rb erts_no_atomic_set
-#define erts_smp_atomic_set_rb erts_no_atomic_set
-#define erts_smp_atomic_read_rb erts_no_atomic_read
-#define erts_smp_atomic_inc_read_rb erts_no_atomic_inc_read
-#define erts_smp_atomic_dec_read_rb erts_no_atomic_dec_read
-#define erts_smp_atomic_inc_rb erts_no_atomic_inc
-#define erts_smp_atomic_dec_rb erts_no_atomic_dec
-#define erts_smp_atomic_add_read_rb erts_no_atomic_add_read
-#define erts_smp_atomic_add_rb erts_no_atomic_add
-#define erts_smp_atomic_read_bor_rb erts_no_atomic_read_bor
-#define erts_smp_atomic_read_band_rb erts_no_atomic_read_band
-#define erts_smp_atomic_xchg_rb erts_no_atomic_xchg
-#define erts_smp_atomic_cmpxchg_rb erts_no_atomic_cmpxchg
-#define erts_smp_atomic_read_bset_rb erts_no_atomic_read_bset
-
-#define erts_smp_atomic_init_wb erts_no_atomic_set
-#define erts_smp_atomic_set_wb erts_no_atomic_set
-#define erts_smp_atomic_read_wb erts_no_atomic_read
-#define erts_smp_atomic_inc_read_wb erts_no_atomic_inc_read
-#define erts_smp_atomic_dec_read_wb erts_no_atomic_dec_read
-#define erts_smp_atomic_inc_wb erts_no_atomic_inc
-#define erts_smp_atomic_dec_wb erts_no_atomic_dec
-#define erts_smp_atomic_add_read_wb erts_no_atomic_add_read
-#define erts_smp_atomic_add_wb erts_no_atomic_add
-#define erts_smp_atomic_read_bor_wb erts_no_atomic_read_bor
-#define erts_smp_atomic_read_band_wb erts_no_atomic_read_band
-#define erts_smp_atomic_xchg_wb erts_no_atomic_xchg
-#define erts_smp_atomic_cmpxchg_wb erts_no_atomic_cmpxchg
-#define erts_smp_atomic_read_bset_wb erts_no_atomic_read_bset
-
-#define erts_smp_atomic_set_dirty erts_no_atomic_set
-#define erts_smp_atomic_read_dirty erts_no_atomic_read
-
-/* 32-bit atomics */
-
-#define erts_smp_atomic32_init_nob erts_no_atomic32_set
-#define erts_smp_atomic32_set_nob erts_no_atomic32_set
-#define erts_smp_atomic32_read_nob erts_no_atomic32_read
-#define erts_smp_atomic32_inc_read_nob erts_no_atomic32_inc_read
-#define erts_smp_atomic32_dec_read_nob erts_no_atomic32_dec_read
-#define erts_smp_atomic32_inc_nob erts_no_atomic32_inc
-#define erts_smp_atomic32_dec_nob erts_no_atomic32_dec
-#define erts_smp_atomic32_add_read_nob erts_no_atomic32_add_read
-#define erts_smp_atomic32_add_nob erts_no_atomic32_add
-#define erts_smp_atomic32_read_bor_nob erts_no_atomic32_read_bor
-#define erts_smp_atomic32_read_band_nob erts_no_atomic32_read_band
-#define erts_smp_atomic32_xchg_nob erts_no_atomic32_xchg
-#define erts_smp_atomic32_cmpxchg_nob erts_no_atomic32_cmpxchg
-#define erts_smp_atomic32_read_bset_nob erts_no_atomic32_read_bset
-
-#define erts_smp_atomic32_init_mb erts_no_atomic32_set
-#define erts_smp_atomic32_set_mb erts_no_atomic32_set
-#define erts_smp_atomic32_read_mb erts_no_atomic32_read
-#define erts_smp_atomic32_inc_read_mb erts_no_atomic32_inc_read
-#define erts_smp_atomic32_dec_read_mb erts_no_atomic32_dec_read
-#define erts_smp_atomic32_inc_mb erts_no_atomic32_inc
-#define erts_smp_atomic32_dec_mb erts_no_atomic32_dec
-#define erts_smp_atomic32_add_read_mb erts_no_atomic32_add_read
-#define erts_smp_atomic32_add_mb erts_no_atomic32_add
-#define erts_smp_atomic32_read_bor_mb erts_no_atomic32_read_bor
-#define erts_smp_atomic32_read_band_mb erts_no_atomic32_read_band
-#define erts_smp_atomic32_xchg_mb erts_no_atomic32_xchg
-#define erts_smp_atomic32_cmpxchg_mb erts_no_atomic32_cmpxchg
-#define erts_smp_atomic32_read_bset_mb erts_no_atomic32_read_bset
-
-#define erts_smp_atomic32_init_acqb erts_no_atomic32_set
-#define erts_smp_atomic32_set_acqb erts_no_atomic32_set
-#define erts_smp_atomic32_read_acqb erts_no_atomic32_read
-#define erts_smp_atomic32_inc_read_acqb erts_no_atomic32_inc_read
-#define erts_smp_atomic32_dec_read_acqb erts_no_atomic32_dec_read
-#define erts_smp_atomic32_inc_acqb erts_no_atomic32_inc
-#define erts_smp_atomic32_dec_acqb erts_no_atomic32_dec
-#define erts_smp_atomic32_add_read_acqb erts_no_atomic32_add_read
-#define erts_smp_atomic32_add_acqb erts_no_atomic32_add
-#define erts_smp_atomic32_read_bor_acqb erts_no_atomic32_read_bor
-#define erts_smp_atomic32_read_band_acqb erts_no_atomic32_read_band
-#define erts_smp_atomic32_xchg_acqb erts_no_atomic32_xchg
-#define erts_smp_atomic32_cmpxchg_acqb erts_no_atomic32_cmpxchg
-#define erts_smp_atomic32_read_bset_acqb erts_no_atomic32_read_bset
-
-#define erts_smp_atomic32_init_relb erts_no_atomic32_set
-#define erts_smp_atomic32_set_relb erts_no_atomic32_set
-#define erts_smp_atomic32_read_relb erts_no_atomic32_read
-#define erts_smp_atomic32_inc_read_relb erts_no_atomic32_inc_read
-#define erts_smp_atomic32_dec_read_relb erts_no_atomic32_dec_read
-#define erts_smp_atomic32_inc_relb erts_no_atomic32_inc
-#define erts_smp_atomic32_dec_relb erts_no_atomic32_dec
-#define erts_smp_atomic32_add_read_relb erts_no_atomic32_add_read
-#define erts_smp_atomic32_add_relb erts_no_atomic32_add
-#define erts_smp_atomic32_read_bor_relb erts_no_atomic32_read_bor
-#define erts_smp_atomic32_read_band_relb erts_no_atomic32_read_band
-#define erts_smp_atomic32_xchg_relb erts_no_atomic32_xchg
-#define erts_smp_atomic32_cmpxchg_relb erts_no_atomic32_cmpxchg
-#define erts_smp_atomic32_read_bset_relb erts_no_atomic32_read_bset
-
-#define erts_smp_atomic32_init_ddrb erts_no_atomic32_set
-#define erts_smp_atomic32_set_ddrb erts_no_atomic32_set
-#define erts_smp_atomic32_read_ddrb erts_no_atomic32_read
-#define erts_smp_atomic32_inc_read_ddrb erts_no_atomic32_inc_read
-#define erts_smp_atomic32_dec_read_ddrb erts_no_atomic32_dec_read
-#define erts_smp_atomic32_inc_ddrb erts_no_atomic32_inc
-#define erts_smp_atomic32_dec_ddrb erts_no_atomic32_dec
-#define erts_smp_atomic32_add_read_ddrb erts_no_atomic32_add_read
-#define erts_smp_atomic32_add_ddrb erts_no_atomic32_add
-#define erts_smp_atomic32_read_bor_ddrb erts_no_atomic32_read_bor
-#define erts_smp_atomic32_read_band_ddrb erts_no_atomic32_read_band
-#define erts_smp_atomic32_xchg_ddrb erts_no_atomic32_xchg
-#define erts_smp_atomic32_cmpxchg_ddrb erts_no_atomic32_cmpxchg
-#define erts_smp_atomic32_read_bset_ddrb erts_no_atomic32_read_bset
-
-#define erts_smp_atomic32_init_rb erts_no_atomic32_set
-#define erts_smp_atomic32_set_rb erts_no_atomic32_set
-#define erts_smp_atomic32_read_rb erts_no_atomic32_read
-#define erts_smp_atomic32_inc_read_rb erts_no_atomic32_inc_read
-#define erts_smp_atomic32_dec_read_rb erts_no_atomic32_dec_read
-#define erts_smp_atomic32_inc_rb erts_no_atomic32_inc
-#define erts_smp_atomic32_dec_rb erts_no_atomic32_dec
-#define erts_smp_atomic32_add_read_rb erts_no_atomic32_add_read
-#define erts_smp_atomic32_add_rb erts_no_atomic32_add
-#define erts_smp_atomic32_read_bor_rb erts_no_atomic32_read_bor
-#define erts_smp_atomic32_read_band_rb erts_no_atomic32_read_band
-#define erts_smp_atomic32_xchg_rb erts_no_atomic32_xchg
-#define erts_smp_atomic32_cmpxchg_rb erts_no_atomic32_cmpxchg
-#define erts_smp_atomic32_read_bset_rb erts_no_atomic32_read_bset
-
-#define erts_smp_atomic32_init_wb erts_no_atomic32_set
-#define erts_smp_atomic32_set_wb erts_no_atomic32_set
-#define erts_smp_atomic32_read_wb erts_no_atomic32_read
-#define erts_smp_atomic32_inc_read_wb erts_no_atomic32_inc_read
-#define erts_smp_atomic32_dec_read_wb erts_no_atomic32_dec_read
-#define erts_smp_atomic32_inc_wb erts_no_atomic32_inc
-#define erts_smp_atomic32_dec_wb erts_no_atomic32_dec
-#define erts_smp_atomic32_add_read_wb erts_no_atomic32_add_read
-#define erts_smp_atomic32_add_wb erts_no_atomic32_add
-#define erts_smp_atomic32_read_bor_wb erts_no_atomic32_read_bor
-#define erts_smp_atomic32_read_band_wb erts_no_atomic32_read_band
-#define erts_smp_atomic32_xchg_wb erts_no_atomic32_xchg
-#define erts_smp_atomic32_cmpxchg_wb erts_no_atomic32_cmpxchg
-#define erts_smp_atomic32_read_bset_wb erts_no_atomic32_read_bset
-
-#define erts_smp_atomic32_set_dirty erts_no_atomic32_set
-#define erts_smp_atomic32_read_dirty erts_no_atomic32_read
-
-#endif /* !ERTS_SMP */
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE void
-erts_smp_thr_init(erts_smp_thr_init_data_t *id)
-{
-#ifdef ERTS_SMP
- erts_thr_init(id);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_thr_create(erts_smp_tid_t *tid, void * (*func)(void *), void *arg,
- erts_smp_thr_opts_t *opts)
-{
-#ifdef ERTS_SMP
- erts_thr_create(tid, func, arg, opts);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_thr_join(erts_smp_tid_t tid, void **thr_res)
-{
-#ifdef ERTS_SMP
- erts_thr_join(tid, thr_res);
-#endif
-}
-
-
-ERTS_GLB_INLINE void
-erts_smp_thr_detach(erts_smp_tid_t tid)
-{
-#ifdef ERTS_SMP
- erts_thr_detach(tid);
-#endif
-}
-
-
-ERTS_GLB_INLINE void
-erts_smp_thr_exit(void *res)
-{
-#ifdef ERTS_SMP
- erts_thr_exit(res);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_install_exit_handler(void (*exit_handler)(void))
-{
-#ifdef ERTS_SMP
- erts_thr_install_exit_handler(exit_handler);
-#endif
-}
-
-ERTS_GLB_INLINE erts_smp_tid_t
-erts_smp_thr_self(void)
-{
-#ifdef ERTS_SMP
- return erts_thr_self();
-#else
- return 0;
-#endif
-}
-
-
-ERTS_GLB_INLINE int
-erts_smp_equal_tids(erts_smp_tid_t x, erts_smp_tid_t y)
-{
-#ifdef ERTS_SMP
- return erts_equal_tids(x, y);
-#else
- return 1;
-#endif
-}
-
-
-#ifdef ERTS_HAVE_REC_MTX_INIT
-ERTS_GLB_INLINE void
-erts_smp_rec_mtx_init(erts_smp_mtx_t *mtx)
-{
-#ifdef ERTS_SMP
- erts_rec_mtx_init(mtx);
-#endif
-}
-#endif
-
-ERTS_GLB_INLINE void
-erts_smp_mtx_init_x(erts_smp_mtx_t *mtx, char *name, Eterm extra)
-{
-#ifdef ERTS_SMP
- erts_mtx_init_x(mtx, name, extra, 1);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_mtx_init_locked_x(erts_smp_mtx_t *mtx, char *name, Eterm extra)
-{
-#ifdef ERTS_SMP
- erts_mtx_init_locked_x(mtx, name, extra, 1);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name)
-{
-#ifdef ERTS_SMP
- erts_mtx_init(mtx, name);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name)
-{
-#ifdef ERTS_SMP
- erts_mtx_init_locked(mtx, name);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_mtx_destroy(erts_smp_mtx_t *mtx)
-{
-#ifdef ERTS_SMP
- erts_mtx_destroy(mtx);
-#endif
-}
-
-ERTS_GLB_INLINE int
-#ifdef ERTS_ENABLE_LOCK_POSITION
-erts_smp_mtx_trylock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line)
-#else
-erts_smp_mtx_trylock(erts_smp_mtx_t *mtx)
-#endif
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
- return erts_mtx_trylock_x(mtx,file,line);
-#elif defined(ERTS_SMP)
- return erts_mtx_trylock(mtx);
-#else
- return 0;
-#endif
-
-}
-
-
-ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_POSITION
-erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line)
-#else
-erts_smp_mtx_lock(erts_smp_mtx_t *mtx)
-#endif
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
- erts_mtx_lock_x(mtx, file, line);
-#elif defined(ERTS_SMP)
- erts_mtx_lock(mtx);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_mtx_unlock(erts_smp_mtx_t *mtx)
-{
-#ifdef ERTS_SMP
- erts_mtx_unlock(mtx);
-#endif
-}
-
-ERTS_GLB_INLINE int
-erts_smp_lc_mtx_is_locked(erts_smp_mtx_t *mtx)
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
- return erts_lc_mtx_is_locked(mtx);
-#else
- return 0;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_cnd_init(erts_smp_cnd_t *cnd)
-{
-#ifdef ERTS_SMP
- erts_cnd_init(cnd);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_cnd_destroy(erts_smp_cnd_t *cnd)
-{
-#ifdef ERTS_SMP
- erts_cnd_destroy(cnd);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_cnd_wait(erts_smp_cnd_t *cnd, erts_smp_mtx_t *mtx)
-{
-#ifdef ERTS_SMP
- erts_cnd_wait(cnd, mtx);
-#endif
-}
-
-/*
- * IMPORTANT note about erts_smp_cnd_signal() and erts_smp_cnd_broadcast()
- *
- * POSIX allow a call to `pthread_cond_signal' or `pthread_cond_broadcast'
- * even though the associated mutex/mutexes isn't/aren't locked by the
- * caller. Our implementation do not allow that in order to avoid a
- * performance penalty. That is, all associated mutexes *need* to be
- * locked by the caller of erts_smp_cnd_signal()/erts_smp_cnd_broadcast()!
- */
-
-ERTS_GLB_INLINE void
-erts_smp_cnd_signal(erts_smp_cnd_t *cnd)
-{
-#ifdef ERTS_SMP
- erts_cnd_signal(cnd);
-#endif
-}
-
-
-ERTS_GLB_INLINE void
-erts_smp_cnd_broadcast(erts_smp_cnd_t *cnd)
-{
-#ifdef ERTS_SMP
- erts_cnd_broadcast(cnd);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwmtx_set_reader_group(int no)
-{
-#ifdef ERTS_SMP
- erts_rwmtx_set_reader_group(no);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwmtx_init_opt_x(erts_smp_rwmtx_t *rwmtx,
- erts_smp_rwmtx_opt_t *opt,
- char *name,
- Eterm extra)
-{
-#ifdef ERTS_SMP
- erts_rwmtx_init_opt_x(rwmtx, opt, name, extra);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx, char *name, Eterm extra)
-{
-#ifdef ERTS_SMP
- erts_rwmtx_init_x(rwmtx, name, extra);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx,
- erts_smp_rwmtx_opt_t *opt,
- char *name)
-{
-#ifdef ERTS_SMP
- erts_rwmtx_init_opt(rwmtx, opt, name);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx, char *name)
-{
-#ifdef ERTS_SMP
- erts_rwmtx_init(rwmtx, name);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx)
-{
-#ifdef ERTS_SMP
- erts_rwmtx_destroy(rwmtx);
-#endif
-}
-
-ERTS_GLB_INLINE int
-#ifdef ERTS_ENABLE_LOCK_POSITION
-erts_smp_rwmtx_tryrlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line)
-#else
-erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx)
-#endif
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
- return erts_rwmtx_tryrlock_x(rwmtx, file, line);
-#elif defined(ERTS_SMP)
- return erts_rwmtx_tryrlock(rwmtx);
-#else
- return 0;
-#endif
-}
-
-ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_POSITION
-erts_smp_rwmtx_rlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line)
-#else
-erts_smp_rwmtx_rlock(erts_smp_rwmtx_t *rwmtx)
-#endif
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
- erts_rwmtx_rlock_x(rwmtx, file, line);
-#elif defined(ERTS_SMP)
- erts_rwmtx_rlock(rwmtx);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwmtx_runlock(erts_smp_rwmtx_t *rwmtx)
-{
-#ifdef ERTS_SMP
- erts_rwmtx_runlock(rwmtx);
-#endif
-}
-
-
-ERTS_GLB_INLINE int
-#ifdef ERTS_ENABLE_LOCK_POSITION
-erts_smp_rwmtx_tryrwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line)
-#else
-erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx)
-#endif
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
- return erts_rwmtx_tryrwlock_x(rwmtx, file, line);
-#elif defined(ERTS_SMP)
- return erts_rwmtx_tryrwlock(rwmtx);
-#else
- return 0;
-#endif
-}
-
-ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_POSITION
-erts_smp_rwmtx_rwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line)
-#else
-erts_smp_rwmtx_rwlock(erts_smp_rwmtx_t *rwmtx)
-#endif
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
- erts_rwmtx_rwlock_x(rwmtx, file, line);
-#elif defined(ERTS_SMP)
- erts_rwmtx_rwlock(rwmtx);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwmtx_rwunlock(erts_smp_rwmtx_t *rwmtx)
-{
-#ifdef ERTS_SMP
- erts_rwmtx_rwunlock(rwmtx);
-#endif
-}
-
-#if 0 /* The following rwmtx function names are
- reserved for potential future use. */
-
-/* Try upgrade from r-locked state to rw-locked state */
-ERTS_GLB_INLINE int
-erts_smp_rwmtx_trywlock(erts_smp_rwmtx_t *rwmtx)
-{
- return 0;
-}
-
-/* Upgrade from r-locked state to rw-locked state */
-ERTS_GLB_INLINE void
-erts_smp_rwmtx_wlock(erts_smp_rwmtx_t *rwmtx)
-{
-
-}
-
-/* Downgrade from rw-locked state to r-locked state */
-ERTS_GLB_INLINE void
-erts_smp_rwmtx_wunlock(erts_smp_rwmtx_t *rwmtx)
-{
-
-}
-
-#endif
-
-ERTS_GLB_INLINE int
-erts_smp_lc_rwmtx_is_rlocked(erts_smp_rwmtx_t *mtx)
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
- return erts_lc_rwmtx_is_rlocked(mtx);
-#else
- return 0;
-#endif
-}
-
-ERTS_GLB_INLINE int
-erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx)
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
- return erts_lc_rwmtx_is_rwlocked(mtx);
-#else
- return 0;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock, char *name, Eterm extra)
-{
-#ifdef ERTS_SMP
- erts_spinlock_init_x(lock, name, extra);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_spinlock_init(erts_smp_spinlock_t *lock, char *name)
-{
-#ifdef ERTS_SMP
- erts_spinlock_init(lock, name);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_spinlock_destroy(erts_smp_spinlock_t *lock)
-{
-#ifdef ERTS_SMP
- erts_spinlock_destroy(lock);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_spin_unlock(erts_smp_spinlock_t *lock)
-{
-#ifdef ERTS_SMP
- erts_spin_unlock(lock);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_POSITION
-erts_smp_spin_lock_x(erts_smp_spinlock_t *lock, char *file, unsigned int line)
-#else
-erts_smp_spin_lock(erts_smp_spinlock_t *lock)
-#endif
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
- erts_spin_lock_x(lock, file, line);
-#elif defined(ERTS_SMP)
- erts_spin_lock(lock);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE int
-erts_smp_lc_spinlock_is_locked(erts_smp_spinlock_t *lock)
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
- return erts_lc_spinlock_is_locked(lock);
-#else
- return 0;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwlock_init_x(erts_smp_rwlock_t *lock, char *name, Eterm extra)
-{
-#ifdef ERTS_SMP
- erts_rwlock_init_x(lock, name, extra);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwlock_init(erts_smp_rwlock_t *lock, char *name)
-{
-#ifdef ERTS_SMP
- erts_rwlock_init(lock, name);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwlock_destroy(erts_smp_rwlock_t *lock)
-{
-#ifdef ERTS_SMP
- erts_rwlock_destroy(lock);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_read_unlock(erts_smp_rwlock_t *lock)
-{
-#ifdef ERTS_SMP
- erts_read_unlock(lock);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_POSITION
-erts_smp_read_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line)
-#else
-erts_smp_read_lock(erts_smp_rwlock_t *lock)
-#endif
-{
-#if defined(ERTS_ENABLE_LOCK_POSITION) && defined(ERTS_SMP)
- erts_read_lock_x(lock, file, line);
-#elif defined(ERTS_SMP)
- erts_read_lock(lock);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_write_unlock(erts_smp_rwlock_t *lock)
-{
-#ifdef ERTS_SMP
- erts_write_unlock(lock);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_POSITION
-erts_smp_write_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line)
-#else
-erts_smp_write_lock(erts_smp_rwlock_t *lock)
-#endif
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
- erts_write_lock_x(lock, file, line);
-#elif defined(ERTS_SMP)
- erts_write_lock(lock);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE int
-erts_smp_lc_rwlock_is_rlocked(erts_smp_rwlock_t *lock)
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
- return erts_lc_rwlock_is_rlocked(lock);
-#else
- return 0;
-#endif
-}
-
-ERTS_GLB_INLINE int
-erts_smp_lc_rwlock_is_rwlocked(erts_smp_rwlock_t *lock)
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
- return erts_lc_rwlock_is_rwlocked(lock);
-#else
- return 0;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp, char* keyname)
-{
-#ifdef ERTS_SMP
- erts_tsd_key_create(keyp,keyname);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_tsd_key_delete(erts_smp_tsd_key_t key)
-{
-#ifdef ERTS_SMP
- erts_tsd_key_delete(key);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_tsd_set(erts_smp_tsd_key_t key, void *value)
-{
-#ifdef ERTS_SMP
- erts_tsd_set(key, value);
-#endif
-}
-
-ERTS_GLB_INLINE void *
-erts_smp_tsd_get(erts_smp_tsd_key_t key)
-{
-#ifdef ERTS_SMP
- return erts_tsd_get(key);
-#else
- return NULL;
-#endif
-}
-
-#ifdef ERTS_THR_HAVE_SIG_FUNCS
-#define ERTS_SMP_THR_HAVE_SIG_FUNCS 1
-
-ERTS_GLB_INLINE void
-erts_smp_thr_sigmask(int how, const sigset_t *set, sigset_t *oset)
-{
-#ifdef ERTS_SMP
- erts_thr_sigmask(how, set, oset);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_thr_sigwait(const sigset_t *set, int *sig)
-{
-#ifdef ERTS_SMP
- erts_thr_sigwait(set, sig);
-#endif
-}
-
-#endif /* #ifdef ERTS_THR_HAVE_SIG_FUNCS */
-
-#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-
-#endif /* ERL_SMP_H */
-
-#ifdef ERTS_UNDEF_DEPRECATED_ATOMICS
-
-/* Deprecated functions to replace */
-
-#undef erts_smp_atomic_init
-#undef erts_smp_atomic_set
-#undef erts_smp_atomic_read
-#undef erts_smp_atomic_inctest
-#undef erts_smp_atomic_dectest
-#undef erts_smp_atomic_inc
-#undef erts_smp_atomic_dec
-#undef erts_smp_atomic_addtest
-#undef erts_smp_atomic_add
-#undef erts_smp_atomic_xchg
-#undef erts_smp_atomic_cmpxchg
-#undef erts_smp_atomic_bor
-#undef erts_smp_atomic_band
-
-#undef erts_smp_atomic32_init
-#undef erts_smp_atomic32_set
-#undef erts_smp_atomic32_read
-#undef erts_smp_atomic32_inctest
-#undef erts_smp_atomic32_dectest
-#undef erts_smp_atomic32_inc
-#undef erts_smp_atomic32_dec
-#undef erts_smp_atomic32_addtest
-#undef erts_smp_atomic32_add
-#undef erts_smp_atomic32_xchg
-#undef erts_smp_atomic32_cmpxchg
-#undef erts_smp_atomic32_bor
-#undef erts_smp_atomic32_band
-
-#endif
diff --git a/erts/emulator/beam/erl_sock.h b/erts/emulator/beam/erl_sock.h
index 7ae6116dc5..3429a52d7e 100644
--- a/erts/emulator/beam/erl_sock.h
+++ b/erts/emulator/beam/erl_sock.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
diff --git a/erts/emulator/beam/erl_sys_driver.h b/erts/emulator/beam/erl_sys_driver.h
index dab4a94a9b..d46e88cb05 100644
--- a/erts/emulator/beam/erl_sys_driver.h
+++ b/erts/emulator/beam/erl_sys_driver.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2001-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2001-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
diff --git a/erts/emulator/beam/erl_term.c b/erts/emulator/beam/erl_term.c
index 28cbe7004f..d904e35e40 100644
--- a/erts/emulator/beam/erl_term.c
+++ b/erts/emulator/beam/erl_term.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -27,92 +28,71 @@
#include <stdlib.h>
#include <stdio.h>
-__decl_noreturn static void __noreturn
-et_abort(const char *expr, const char *file, unsigned line)
+void
+erts_set_literal_tag(Eterm *term, Eterm *hp_start, Eterm hsz)
{
-#ifdef EXIT_ON_ET_ABORT
- static int have_been_called = 0;
-
- if (have_been_called) {
- abort();
- } else {
- /*
- * Prevent infinite loop.
- */
- have_been_called = 1;
- erl_exit(1, "TYPE ASSERTION FAILED, file %s, line %u: %s\n", file, line, expr);
+#ifdef TAG_LITERAL_PTR
+ Eterm *hp_end, *hp;
+
+ hp_end = hp_start + hsz;
+ hp = hp_start;
+
+ while (hp < hp_end) {
+ switch (primary_tag(*hp)) {
+ case TAG_PRIMARY_BOXED:
+ case TAG_PRIMARY_LIST:
+ *hp |= TAG_LITERAL_PTR;
+ break;
+ case TAG_PRIMARY_HEADER:
+ if (header_is_thing(*hp)) {
+ hp += thing_arityval(*hp);
+ }
+ break;
+ default:
+ break;
+ }
+
+ hp++;
}
-#else
- erts_fprintf(stderr, "TYPE ASSERTION FAILED, file %s, line %u: %s\n", file, line, expr);
- abort();
+ if (is_boxed(*term) || is_list(*term))
+ *term |= TAG_LITERAL_PTR;
#endif
}
-#if ET_DEBUG
-#define ET_ASSERT(expr,file,line) \
-do { \
- if (!(expr)) \
- et_abort(#expr, file, line); \
-} while(0)
-#else
-#define ET_ASSERT(expr,file,line) do { } while(0)
-#endif
+void
+erts_term_init(void)
+{
+#ifdef ERTS_ORDINARY_REF_MARKER
+ /* Ordinary and magic references of same size... */
+
+ ErtsRefThing ref_thing;
+
+ ERTS_CT_ASSERT(ERTS_ORDINARY_REF_MARKER == ~((Uint32)0));
+ ref_thing.m.header = ERTS_REF_THING_HEADER;
+ ref_thing.m.mb = (ErtsMagicBinary *) ~((UWord) 3);
+ ref_thing.m.next = (struct erl_off_heap_header *) ~((UWord) 3);
+ if (ref_thing.o.marker == ERTS_ORDINARY_REF_MARKER)
+ ERTS_INTERNAL_ERROR("Cannot differentiate between magic and ordinary references");
+
+ ERTS_CT_ASSERT(offsetof(ErtsORefThing,marker) != 0);
+ ERTS_CT_ASSERT(sizeof(ErtsORefThing) == sizeof(ErtsMRefThing));
+# ifdef ERTS_MAGIC_REF_THING_HEADER
+# error Magic ref thing header should not have been defined...
+# endif
-#if ET_DEBUG
-unsigned tag_val_def_debug(Wterm x, const char *file, unsigned line)
#else
-unsigned tag_val_def(Wterm x)
-#define file __FILE__
-#define line __LINE__
+ /* Ordinary and magic references of different sizes... */
+
+# ifndef ERTS_MAGIC_REF_THING_HEADER
+# error Magic ref thing header should have been defined...
+# endif
+ ERTS_CT_ASSERT(sizeof(ErtsORefThing) != sizeof(ErtsMRefThing));
+
#endif
-{
- static char msg[32];
-
- switch (x & _TAG_PRIMARY_MASK) {
- case TAG_PRIMARY_LIST:
- ET_ASSERT(_list_precond(x),file,line);
- return LIST_DEF;
- case TAG_PRIMARY_BOXED: {
- Eterm hdr = *boxed_val(x);
- ET_ASSERT(is_header(hdr),file,line);
- switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_HEADER_ARITYVAL >> _TAG_PRIMARY_SIZE): return TUPLE_DEF;
- case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE): return BIG_DEF;
- case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE): return BIG_DEF;
- case (_TAG_HEADER_REF >> _TAG_PRIMARY_SIZE): return REF_DEF;
- case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE): return FLOAT_DEF;
- case (_TAG_HEADER_EXPORT >> _TAG_PRIMARY_SIZE): return EXPORT_DEF;
- case (_TAG_HEADER_FUN >> _TAG_PRIMARY_SIZE): return FUN_DEF;
- case (_TAG_HEADER_EXTERNAL_PID >> _TAG_PRIMARY_SIZE): return EXTERNAL_PID_DEF;
- case (_TAG_HEADER_EXTERNAL_PORT >> _TAG_PRIMARY_SIZE): return EXTERNAL_PORT_DEF;
- case (_TAG_HEADER_EXTERNAL_REF >> _TAG_PRIMARY_SIZE): return EXTERNAL_REF_DEF;
- case (_TAG_HEADER_REFC_BIN >> _TAG_PRIMARY_SIZE): return BINARY_DEF;
- case (_TAG_HEADER_HEAP_BIN >> _TAG_PRIMARY_SIZE): return BINARY_DEF;
- case (_TAG_HEADER_SUB_BIN >> _TAG_PRIMARY_SIZE): return BINARY_DEF;
- case (_TAG_HEADER_MAP >> _TAG_PRIMARY_SIZE): return MAP_DEF;
- }
- break;
- }
- case TAG_PRIMARY_IMMED1: {
- switch ((x & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_IMMED1_PID >> _TAG_PRIMARY_SIZE): return PID_DEF;
- case (_TAG_IMMED1_PORT >> _TAG_PRIMARY_SIZE): return PORT_DEF;
- case (_TAG_IMMED1_IMMED2 >> _TAG_PRIMARY_SIZE): {
- switch ((x & _TAG_IMMED2_MASK) >> _TAG_IMMED1_SIZE) {
- case (_TAG_IMMED2_ATOM >> _TAG_IMMED1_SIZE): return ATOM_DEF;
- case (_TAG_IMMED2_NIL >> _TAG_IMMED1_SIZE): return NIL_DEF;
- }
- break;
- }
- case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE): return SMALL_DEF;
- }
- break;
- }
- }
- erts_snprintf(msg, sizeof(msg), "tag_val_def: %#lx", (unsigned long) x);
- et_abort(msg, file, line);
-#undef file
-#undef line
+
+ ERTS_CT_ASSERT(ERTS_REF_THING_SIZE*sizeof(Eterm) == sizeof(ErtsORefThing));
+ ERTS_CT_ASSERT(ERTS_MAGIC_REF_THING_SIZE*sizeof(Eterm) == sizeof(ErtsMRefThing));
+
}
/*
@@ -127,10 +107,10 @@ FUNTY checked_##FUN(ARGTY x, const char *file, unsigned line) \
return _unchecked_##FUN(x); \
}
-ET_DEFINE_CHECKED(Eterm,make_boxed,Eterm*,_is_taggable_pointer);
+ET_DEFINE_CHECKED(Eterm,make_boxed,const Eterm*,_is_taggable_pointer);
ET_DEFINE_CHECKED(int,is_boxed,Eterm,!is_header);
ET_DEFINE_CHECKED(Eterm*,boxed_val,Wterm,_boxed_precond);
-ET_DEFINE_CHECKED(Eterm,make_list,Eterm*,_is_taggable_pointer);
+ET_DEFINE_CHECKED(Eterm,make_list,const Eterm*,_is_taggable_pointer);
ET_DEFINE_CHECKED(int,is_not_list,Eterm,!is_header);
ET_DEFINE_CHECKED(Eterm*,list_val,Wterm,_list_precond);
ET_DEFINE_CHECKED(Uint,unsigned_val,Eterm,is_small);
@@ -151,8 +131,8 @@ ET_DEFINE_CHECKED(Eterm*,tuple_val,Wterm,is_tuple);
ET_DEFINE_CHECKED(struct erl_node_*,internal_pid_node,Eterm,is_internal_pid);
ET_DEFINE_CHECKED(struct erl_node_*,internal_port_node,Eterm,is_internal_port);
ET_DEFINE_CHECKED(Eterm*,internal_ref_val,Wterm,is_internal_ref);
-ET_DEFINE_CHECKED(Uint,internal_ref_data_words,Wterm,is_internal_ref);
-ET_DEFINE_CHECKED(Uint32*,internal_ref_data,Wterm,is_internal_ref);
+ET_DEFINE_CHECKED(Uint32*,internal_magic_ref_numbers,Wterm,is_internal_magic_ref);
+ET_DEFINE_CHECKED(Uint32*,internal_ordinary_ref_numbers,Wterm,is_internal_ordinary_ref);
ET_DEFINE_CHECKED(struct erl_node_*,internal_ref_node,Eterm,is_internal_ref);
ET_DEFINE_CHECKED(Eterm*,external_val,Wterm,is_external);
ET_DEFINE_CHECKED(Uint,external_data_words,Wterm,is_external);
@@ -171,9 +151,7 @@ ET_DEFINE_CHECKED(Uint,external_thing_data_words,ExternalThing*,is_thing_ptr);
ET_DEFINE_CHECKED(Eterm,make_cp,UWord *,_is_taggable_pointer);
ET_DEFINE_CHECKED(UWord *,cp_val,Eterm,is_CP);
ET_DEFINE_CHECKED(Uint,catch_val,Eterm,is_catch);
-ET_DEFINE_CHECKED(Uint,x_reg_offset,Uint,_is_xreg);
-ET_DEFINE_CHECKED(Uint,y_reg_offset,Uint,_is_yreg);
-ET_DEFINE_CHECKED(Uint,x_reg_index,Uint,_is_xreg);
-ET_DEFINE_CHECKED(Uint,y_reg_index,Uint,_is_yreg);
+ET_DEFINE_CHECKED(Uint,loader_x_reg_index,Uint,_is_loader_x_reg);
+ET_DEFINE_CHECKED(Uint,loader_y_reg_index,Uint,_is_loader_y_reg);
#endif /* ET_DEBUG */
diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h
index 37014ccf94..5ec6b6b44b 100644
--- a/erts/emulator/beam/erl_term.h
+++ b/erts/emulator/beam/erl_term.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2014. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -20,33 +21,11 @@
#ifndef __ERL_TERM_H
#define __ERL_TERM_H
-#include "sys.h" /* defines HALFWORD_HEAP */
+#include "erl_mmap.h"
-typedef UWord Wterm; /* Full word terms */
+void erts_term_init(void);
-#if HALFWORD_HEAP
-# define HEAP_ON_C_STACK 0
-# if HALFWORD_ASSERT
-# ifdef ET_DEBUG
-# undef ET_DEBUG
-# endif
-# define ET_DEBUG 1
-# endif
-# if 1
-# define CHECK_POINTER_MASK 0xFFFFFFFF00000000UL
-# define COMPRESS_POINTER(APointer) ((Eterm) (UWord) (APointer))
-# define EXPAND_POINTER(AnEterm) ((UWord) (AnEterm))
-# else
-# define CHECK_POINTER_MASK 0x0UL
-# define COMPRESS_POINTER(AnUint) (AnUint)
-# define EXPAND_POINTER(APointer) (APointer)
-# endif
-#else
-# define HEAP_ON_C_STACK 1
-# define CHECK_POINTER_MASK 0x0UL
-# define COMPRESS_POINTER(AnUint) (AnUint)
-# define EXPAND_POINTER(APointer) (APointer)
-#endif
+typedef UWord Wterm; /* Full word terms */
struct erl_node_; /* Declared in erl_node_tables.h */
@@ -73,6 +52,24 @@ struct erl_node_; /* Declared in erl_node_tables.h */
#define _ET_APPLY(F,X) _unchecked_##F(X)
#endif
+#if defined(ARCH_64)
+# define TAG_PTR_MASK__ 0x7
+# if !defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
+# ifdef HIPE
+# error Hipe on 64-bit needs a real mmap as it does not support the literal tag
+# endif
+# define TAG_LITERAL_PTR 0x4
+# else
+# undef TAG_LITERAL_PTR
+# endif
+#elif defined(ARCH_32)
+# define TAG_PTR_MASK__ 0x3
+# undef TAG_LITERAL_PTR
+#else
+# error Not supported arch
+#endif
+
+
#define _TAG_PRIMARY_SIZE 2
#define _TAG_PRIMARY_MASK 0x3
#define TAG_PRIMARY_HEADER 0x0
@@ -147,21 +144,21 @@ struct erl_node_; /* Declared in erl_node_tables.h */
#define MAP_SUBTAG (0xF << _TAG_PRIMARY_SIZE) /* MAP */
-#define _TAG_HEADER_ARITYVAL (TAG_PRIMARY_HEADER|ARITYVAL_SUBTAG)
-#define _TAG_HEADER_FUN (TAG_PRIMARY_HEADER|FUN_SUBTAG)
-#define _TAG_HEADER_POS_BIG (TAG_PRIMARY_HEADER|POS_BIG_SUBTAG)
-#define _TAG_HEADER_NEG_BIG (TAG_PRIMARY_HEADER|NEG_BIG_SUBTAG)
-#define _TAG_HEADER_FLOAT (TAG_PRIMARY_HEADER|FLOAT_SUBTAG)
-#define _TAG_HEADER_EXPORT (TAG_PRIMARY_HEADER|EXPORT_SUBTAG)
-#define _TAG_HEADER_REF (TAG_PRIMARY_HEADER|REF_SUBTAG)
-#define _TAG_HEADER_REFC_BIN (TAG_PRIMARY_HEADER|REFC_BINARY_SUBTAG)
-#define _TAG_HEADER_HEAP_BIN (TAG_PRIMARY_HEADER|HEAP_BINARY_SUBTAG)
-#define _TAG_HEADER_SUB_BIN (TAG_PRIMARY_HEADER|SUB_BINARY_SUBTAG)
-#define _TAG_HEADER_EXTERNAL_PID (TAG_PRIMARY_HEADER|EXTERNAL_PID_SUBTAG)
-#define _TAG_HEADER_EXTERNAL_PORT (TAG_PRIMARY_HEADER|EXTERNAL_PORT_SUBTAG)
-#define _TAG_HEADER_EXTERNAL_REF (TAG_PRIMARY_HEADER|EXTERNAL_REF_SUBTAG)
+#define _TAG_HEADER_ARITYVAL (TAG_PRIMARY_HEADER|ARITYVAL_SUBTAG)
+#define _TAG_HEADER_FUN (TAG_PRIMARY_HEADER|FUN_SUBTAG)
+#define _TAG_HEADER_POS_BIG (TAG_PRIMARY_HEADER|POS_BIG_SUBTAG)
+#define _TAG_HEADER_NEG_BIG (TAG_PRIMARY_HEADER|NEG_BIG_SUBTAG)
+#define _TAG_HEADER_FLOAT (TAG_PRIMARY_HEADER|FLOAT_SUBTAG)
+#define _TAG_HEADER_EXPORT (TAG_PRIMARY_HEADER|EXPORT_SUBTAG)
+#define _TAG_HEADER_REF (TAG_PRIMARY_HEADER|REF_SUBTAG)
+#define _TAG_HEADER_REFC_BIN (TAG_PRIMARY_HEADER|REFC_BINARY_SUBTAG)
+#define _TAG_HEADER_HEAP_BIN (TAG_PRIMARY_HEADER|HEAP_BINARY_SUBTAG)
+#define _TAG_HEADER_SUB_BIN (TAG_PRIMARY_HEADER|SUB_BINARY_SUBTAG)
+#define _TAG_HEADER_EXTERNAL_PID (TAG_PRIMARY_HEADER|EXTERNAL_PID_SUBTAG)
+#define _TAG_HEADER_EXTERNAL_PORT (TAG_PRIMARY_HEADER|EXTERNAL_PORT_SUBTAG)
+#define _TAG_HEADER_EXTERNAL_REF (TAG_PRIMARY_HEADER|EXTERNAL_REF_SUBTAG)
#define _TAG_HEADER_BIN_MATCHSTATE (TAG_PRIMARY_HEADER|BIN_MATCHSTATE_SUBTAG)
-#define _TAG_HEADER_MAP (TAG_PRIMARY_HEADER|MAP_SUBTAG)
+#define _TAG_HEADER_MAP (TAG_PRIMARY_HEADER|MAP_SUBTAG)
#define _TAG_HEADER_MASK 0x3F
@@ -189,16 +186,14 @@ struct erl_node_; /* Declared in erl_node_tables.h */
/* boxed object access methods */
-#if HALFWORD_HEAP
-#define _is_taggable_pointer(x) (((UWord)(x) & (CHECK_POINTER_MASK | 0x3)) == 0)
-#define _boxed_precond(x) (is_boxed(x))
-#else
-#define _is_taggable_pointer(x) (((Uint)(x) & 0x3) == 0)
+
+#define _is_taggable_pointer(x) (((Uint)(x) & TAG_PTR_MASK__) == 0)
+
#define _boxed_precond(x) (is_boxed(x))
-#endif
-#define _is_aligned(x) (((Uint)(x) & 0x3) == 0)
-#define _unchecked_make_boxed(x) ((Uint) COMPRESS_POINTER(x) + TAG_PRIMARY_BOXED)
-_ET_DECLARE_CHECKED(Eterm,make_boxed,Eterm*)
+
+#define _is_aligned(x) (((Uint)(x) & TAG_PTR_MASK__) == 0)
+#define _unchecked_make_boxed(x) ((Uint)(x) + TAG_PRIMARY_BOXED)
+_ET_DECLARE_CHECKED(Eterm,make_boxed,const Eterm*)
#define make_boxed(x) _ET_APPLY(make_boxed,(x))
#if 1
#define _is_not_boxed(x) ((x) & (_TAG_PRIMARY_MASK-TAG_PRIMARY_BOXED))
@@ -208,13 +203,17 @@ _ET_DECLARE_CHECKED(int,is_boxed,Eterm)
#else
#define is_boxed(x) (((x) & _TAG_PRIMARY_MASK) == TAG_PRIMARY_BOXED)
#endif
-#define _unchecked_boxed_val(x) ((Eterm*) EXPAND_POINTER(((x) - TAG_PRIMARY_BOXED)))
+#ifdef TAG_LITERAL_PTR
+#define _unchecked_boxed_val(x) _unchecked_ptr_val(x)
+#else
+#define _unchecked_boxed_val(x) ((Eterm*) ((x) - TAG_PRIMARY_BOXED))
+#endif
_ET_DECLARE_CHECKED(Eterm*,boxed_val,Wterm)
#define boxed_val(x) _ET_APPLY(boxed_val,(x))
/* cons cell ("list") access methods */
-#define _unchecked_make_list(x) ((Uint) COMPRESS_POINTER(x) + TAG_PRIMARY_LIST)
-_ET_DECLARE_CHECKED(Eterm,make_list,Eterm*)
+#define _unchecked_make_list(x) ((Uint)(x) + TAG_PRIMARY_LIST)
+_ET_DECLARE_CHECKED(Eterm,make_list,const Eterm*)
#define make_list(x) _ET_APPLY(make_list,(x))
#if 1
#define _unchecked_is_not_list(x) ((x) & (_TAG_PRIMARY_MASK-TAG_PRIMARY_LIST))
@@ -225,12 +224,12 @@ _ET_DECLARE_CHECKED(int,is_not_list,Eterm)
#define is_list(x) (((x) & _TAG_PRIMARY_MASK) == TAG_PRIMARY_LIST)
#define is_not_list(x) (!is_list((x)))
#endif
-#if HALFWORD_HEAP
#define _list_precond(x) (is_list(x))
+#ifdef TAG_LITERAL_PTR
+#define _unchecked_list_val(x) _unchecked_ptr_val(x)
#else
-#define _list_precond(x) (is_list(x))
+#define _unchecked_list_val(x) ((Eterm*) ((x) - TAG_PRIMARY_LIST))
#endif
-#define _unchecked_list_val(x) ((Eterm*) EXPAND_POINTER((x) - TAG_PRIMARY_LIST))
_ET_DECLARE_CHECKED(Eterm*,list_val,Wterm)
#define list_val(x) _ET_APPLY(list_val,(x))
@@ -241,15 +240,22 @@ _ET_DECLARE_CHECKED(Eterm*,list_val,Wterm)
#define CDR(x) ((x)[1])
/* generic tagged pointer (boxed or list) access methods */
-#define _unchecked_ptr_val(x) ((Eterm*) EXPAND_POINTER((x) & ~((Uint) 0x3)))
+#define _unchecked_ptr_val(x) ((Eterm*) ((x) & ~((Uint) TAG_PTR_MASK__)))
#define ptr_val(x) _unchecked_ptr_val((x)) /*XXX*/
#define _unchecked_offset_ptr(x,offs) ((x)+((offs)*sizeof(Eterm)))
#define offset_ptr(x,offs) _unchecked_offset_ptr(x,offs) /*XXX*/
#define _unchecked_byte_offset_ptr(x,byte_offs) ((x)+(offs))
#define byte_offset_ptr(x,offs) _unchecked_byte_offset_ptr(x,offs) /*XXX*/
+#ifdef TAG_LITERAL_PTR
+#define _unchecked_is_not_literal_ptr(x) (!((x) & TAG_LITERAL_PTR))
+#define is_not_literal_ptr(x) _unchecked_is_not_literal_ptr((x)) /*XXX*/
+#define is_literal_ptr(x) (!is_not_literal_ptr((x))) /*XXX*/
+#endif
+
+
/* fixnum ("small") access methods */
-#if defined(ARCH_64) && !HALFWORD_HEAP
+#if defined(ARCH_64)
#define SMALL_BITS (64-4)
#define SMALL_DIGITS (17)
#else
@@ -264,7 +270,6 @@ _ET_DECLARE_CHECKED(Eterm*,list_val,Wterm)
#define is_byte(x) (((x) & ((~(Uint)0 << (_TAG_IMMED1_SIZE+8)) + _TAG_IMMED1_MASK)) == _TAG_IMMED1_SMALL)
#define is_valid_bit_size(x) (((Sint)(x)) >= 0 && ((x) & 0x7F) == _TAG_IMMED1_SMALL)
#define is_not_valid_bit_size(x) (!is_valid_bit_size((x)))
-#define MY_IS_SSMALL(x) (((Uint) ((((x)) >> (SMALL_BITS-1)) + 1)) < 2)
#define _unchecked_unsigned_val(x) ((x) >> _TAG_IMMED1_SIZE)
_ET_DECLARE_CHECKED(Uint,unsigned_val,Eterm)
#define unsigned_val(x) _ET_APPLY(unsigned_val,(x))
@@ -296,9 +301,10 @@ _ET_DECLARE_CHECKED(Uint,atom_val,Eterm)
#define atom_val(x) _ET_APPLY(atom_val,(x))
/* header (arityval or thing) access methods */
-#define _make_header(sz,tag) ((Uint)(((sz) << _HEADER_ARITY_OFFS) + (tag)))
+#define _make_header(sz,tag) ((Uint)(((Uint)(sz) << _HEADER_ARITY_OFFS) + (tag)))
#define is_header(x) (((x) & _TAG_PRIMARY_MASK) == TAG_PRIMARY_HEADER)
-#define _unchecked_header_arity(x) ((x) >> _HEADER_ARITY_OFFS)
+#define _unchecked_header_arity(x) \
+ (is_map_header(x) ? MAP_HEADER_ARITY(x) : ((x) >> _HEADER_ARITY_OFFS))
_ET_DECLARE_CHECKED(Uint,header_arity,Eterm)
#define header_arity(x) _ET_APPLY(header_arity,(x))
@@ -361,6 +367,7 @@ _ET_DECLARE_CHECKED(Uint,thing_subtag,Eterm)
((((x) & (_TAG_HEADER_MASK)) == _TAG_HEADER_REFC_BIN) || \
(((x) & (_TAG_HEADER_MASK)) == _TAG_HEADER_HEAP_BIN) || \
(((x) & (_TAG_HEADER_MASK)) == _TAG_HEADER_SUB_BIN))
+
#define make_binary(x) make_boxed((Eterm*)(x))
#define is_binary(x) (is_boxed((x)) && is_binary_header(*boxed_val((x))))
#define is_not_binary(x) (!is_binary((x)))
@@ -393,11 +400,7 @@ _ET_DECLARE_CHECKED(Eterm*,fun_val,Wterm)
_ET_DECLARE_CHECKED(Eterm*,export_val,Wterm)
#define export_val(x) _ET_APPLY(export_val,(x))
#define is_export_header(x) ((x) == HEADER_EXPORT)
-#if HALFWORD_HEAP
-#define HEADER_EXPORT _make_header(2,_TAG_HEADER_EXPORT)
-#else
#define HEADER_EXPORT _make_header(1,_TAG_HEADER_EXPORT)
-#endif
/* bignum access methods */
#define make_pos_bignum_header(sz) _make_header((sz),_TAG_HEADER_POS_BIG)
@@ -421,7 +424,7 @@ _ET_DECLARE_CHECKED(Eterm*,big_val,Wterm)
#define big_val(x) _ET_APPLY(big_val,(x))
/* flonum ("float") access methods */
-#if defined(ARCH_64) && !HALFWORD_HEAP
+#if defined(ARCH_64)
#define HEADER_FLONUM _make_header(1,_TAG_HEADER_FLOAT)
#else
#define HEADER_FLONUM _make_header(2,_TAG_HEADER_FLOAT)
@@ -442,12 +445,12 @@ typedef union float_def
byte fb[sizeof(ieee754_8)];
Uint16 fs[sizeof(ieee754_8) / sizeof(Uint16)];
Uint32 fw[sizeof(ieee754_8) / sizeof(Uint32)];
-#if defined(ARCH_64) && !HALFWORD_HEAP
+#if defined(ARCH_64)
Uint fdw;
#endif
} FloatDef;
-#if defined(ARCH_64) && !HALFWORD_HEAP
+#if defined(ARCH_64)
#define FLOAT_VAL_GET_DOUBLE(fval, f) (f).fdw = *((fval)+1)
@@ -556,14 +559,6 @@ _ET_DECLARE_CHECKED(Eterm*,tuple_val,Wterm)
#define _GETBITS(X,Pos,Size) (((X) >> (Pos)) & ~(~((Uint) 0) << (Size)))
-/*
- * Creation in node specific data (pids, ports, refs)
- */
-
-#define _CRE_SIZE 2
-
-/* MAX value for the creation field in pid, port and reference */
-#define MAX_CREATION (1 << _CRE_SIZE)
/*
* PID layout (internal pids):
@@ -577,7 +572,7 @@ _ET_DECLARE_CHECKED(Eterm*,tuple_val,Wterm)
*
* n : number
*
- * Old pid layout:
+ * Very old pid layout:
*
* |3 3 2 2 2 2 2 2|2 2 2 2 1 1 1 1|1 1 1 1 1 1 | |
* |1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0|
@@ -724,73 +719,235 @@ _ET_DECLARE_CHECKED(struct erl_node_*,internal_port_node,Eterm)
#define ERTS_MAX_REF_NUMBERS 3
#define ERTS_REF_NUMBERS ERTS_MAX_REF_NUMBERS
-#if defined(ARCH_64) && !HALFWORD_HEAP
-# define ERTS_REF_WORDS (ERTS_REF_NUMBERS/2 + 1)
-# define ERTS_REF_32BIT_WORDS (ERTS_REF_NUMBERS+1)
-#else
-# define ERTS_REF_WORDS ERTS_REF_NUMBERS
-# define ERTS_REF_32BIT_WORDS ERTS_REF_NUMBERS
+#ifndef ERTS_ENDIANNESS
+# error ERTS_ENDIANNESS not defined...
#endif
-typedef struct {
- Eterm header;
- union {
- Uint32 ui32[ERTS_REF_32BIT_WORDS];
- Uint ui[ERTS_REF_WORDS];
- } data;
-} RefThing;
+#if ERTS_REF_NUMBERS != 3
+# error "A new reference layout for 64-bit needs to be implemented..."
+#endif
-#define REF_THING_SIZE (sizeof(RefThing)/sizeof(Uint))
-#define REF_THING_HEAD_SIZE (sizeof(Eterm)/sizeof(Uint))
+struct magic_binary;
-#define make_ref_thing_header(DW) \
- _make_header((DW)+REF_THING_HEAD_SIZE-1,_TAG_HEADER_REF)
+#if defined(ARCH_64)
-#if defined(ARCH_64) && !HALFWORD_HEAP
+# define ERTS_ORDINARY_REF_MARKER (~((Uint32) 0))
+
+typedef struct {
+ Eterm header;
+#if ERTS_ENDIANNESS <= 0
+ Uint32 marker;
+#endif
+ Uint32 num[ERTS_REF_NUMBERS];
+#if ERTS_ENDIANNESS > 0
+ Uint32 marker;
+#endif
+} ErtsORefThing;
+
+typedef struct {
+ Eterm header;
+ struct magic_binary *mb;
+ struct erl_off_heap_header* next;
+#if !ERTS_ENDIANNESS
+ Uint32 num[ERTS_REF_NUMBERS];
+ Uint32 marker;
+#endif
+} ErtsMRefThing;
/*
- * Ref layout on a 64-bit little endian machine:
+ * Ordinary ref layout on a 64-bit little endian machine:
*
* 63 31 0
* +--------------+--------------+
* | Thing word |
* +--------------+--------------+
- * | Data 0 | 32-bit arity |
+ * | Data 0 | 0xffffffff |
* +--------------+--------------+
* | Data 2 | Data 1 |
* +--------------+--------------+
*
- * Data is stored as an Uint32 array with 32-bit arity as first number.
+ * Ordinary ref layout on a 64-bit big endian machine:
+ *
+ * 63 31 0
+ * +--------------+--------------+
+ * | Thing word |
+ * +--------------+--------------+
+ * | Data 0 | Data 1 |
+ * +--------------+--------------+
+ * | Data 2 | 0xffffffff |
+ * +--------------+--------------+
+ *
+ * Magic Ref layout on a 64-bit machine:
+ *
+ * 63 31 0
+ * +--------------+--------------+
+ * | Thing word |
+ * +--------------+--------------+
+ * | Magic Binary Pointer |
+ * +--------------+--------------+
+ * | Next Off Heap Pointer |
+ * +--------------+--------------+
+ *
+ * Both pointers in the magic ref are 64-bit aligned. That is,
+ * least significant bits are zero. The marker 32-bit word is
+ * placed over the least significant bits of one of the pointers.
+ * That is, we can distinguish between magic and ordinary ref
+ * by looking at the marker field.
+ *
*/
#define write_ref_thing(Hp, R0, R1, R2) \
do { \
- ((RefThing *) (Hp))->header = make_ref_thing_header(ERTS_REF_WORDS); \
- ((RefThing *) (Hp))->data.ui32[0] = ERTS_REF_NUMBERS; \
- ((RefThing *) (Hp))->data.ui32[1] = (R0); \
- ((RefThing *) (Hp))->data.ui32[2] = (R1); \
- ((RefThing *) (Hp))->data.ui32[3] = (R2); \
+ ((ErtsORefThing *) (Hp))->header = ERTS_REF_THING_HEADER; \
+ ((ErtsORefThing *) (Hp))->marker = ERTS_ORDINARY_REF_MARKER; \
+ ((ErtsORefThing *) (Hp))->num[0] = (R0); \
+ ((ErtsORefThing *) (Hp))->num[1] = (R1); \
+ ((ErtsORefThing *) (Hp))->num[2] = (R2); \
} while (0)
-#else
+#if ERTS_ENDIANNESS
+/* Known big or little endian */
+
+#define write_magic_ref_thing(Hp, Ohp, Binp) \
+do { \
+ ((ErtsMRefThing *) (Hp))->header = ERTS_REF_THING_HEADER; \
+ ((ErtsMRefThing *) (Hp))->mb = (Binp); \
+ ((ErtsMRefThing *) (Hp))->next = (Ohp)->first; \
+ (Ohp)->first = (struct erl_off_heap_header*) (Hp); \
+ ASSERT(erts_is_ref_numbers_magic((Binp)->refn)); \
+} while (0)
+
+#else /* !ERTS_ENDIANNESS */
+
+#define write_magic_ref_thing(Hp, Ohp, Binp) \
+do { \
+ ((ErtsMRefThing *) (Hp))->header = ERTS_MAGIC_REF_THING_HEADER; \
+ ((ErtsMRefThing *) (Hp))->mb = (Binp); \
+ ((ErtsMRefThing *) (Hp))->next = (Ohp)->first; \
+ (Ohp)->first = (struct erl_off_heap_header*) (Hp); \
+ ((ErtsMRefThing *) (Hp))->marker = 0; \
+ ((ErtsMRefThing *) (Hp))->num[0] = (Binp)->refn[0]; \
+ ((ErtsMRefThing *) (Hp))->num[1] = (Binp)->refn[1]; \
+ ((ErtsMRefThing *) (Hp))->num[2] = (Binp)->refn[2]; \
+ ASSERT(erts_is_ref_numbers_magic((Binp)->refn)); \
+} while (0)
+
+#endif /* !ERTS_ENDIANNESS */
+
+#else /* ARCH_32 */
+
+typedef struct {
+ Eterm header;
+ Uint32 num[ERTS_REF_NUMBERS];
+} ErtsORefThing;
+
+typedef struct {
+ Eterm header;
+ struct magic_binary *mb;
+ struct erl_off_heap_header* next;
+} ErtsMRefThing;
+
#define write_ref_thing(Hp, R0, R1, R2) \
do { \
- ((RefThing *) (Hp))->header = make_ref_thing_header(ERTS_REF_WORDS); \
- ((RefThing *) (Hp))->data.ui32[0] = (R0); \
- ((RefThing *) (Hp))->data.ui32[1] = (R1); \
- ((RefThing *) (Hp))->data.ui32[2] = (R2); \
+ ((ErtsORefThing *) (Hp))->header = ERTS_REF_THING_HEADER; \
+ ((ErtsORefThing *) (Hp))->num[0] = (R0); \
+ ((ErtsORefThing *) (Hp))->num[1] = (R1); \
+ ((ErtsORefThing *) (Hp))->num[2] = (R2); \
+} while (0)
+
+#define write_magic_ref_thing(Hp, Ohp, Binp) \
+do { \
+ ((ErtsMRefThing *) (Hp))->header = ERTS_MAGIC_REF_THING_HEADER; \
+ ((ErtsMRefThing *) (Hp))->mb = (Binp); \
+ ((ErtsMRefThing *) (Hp))->next = (Ohp)->first; \
+ (Ohp)->first = (struct erl_off_heap_header*) (Hp); \
+ ASSERT(erts_is_ref_numbers_magic((Binp)->refn)); \
} while (0)
+#endif /* ARCH_32 */
+
+typedef union {
+ ErtsMRefThing m;
+ ErtsORefThing o;
+} ErtsRefThing;
+
+/* for copy sharing */
+#define BOXED_VISITED_MASK ((Eterm) 3)
+#define BOXED_VISITED ((Eterm) 1)
+#define BOXED_SHARED_UNPROCESSED ((Eterm) 2)
+#define BOXED_SHARED_PROCESSED ((Eterm) 3)
+
+#define ERTS_REF_THING_SIZE (sizeof(ErtsORefThing)/sizeof(Uint))
+#define ERTS_MAGIC_REF_THING_SIZE (sizeof(ErtsMRefThing)/sizeof(Uint))
+#define ERTS_MAX_INTERNAL_REF_SIZE (sizeof(ErtsRefThing)/sizeof(Uint))
+
+#define make_ref_thing_header(Words) \
+ _make_header((Words)-1,_TAG_HEADER_REF)
+
+#define ERTS_REF_THING_HEADER _make_header(ERTS_REF_THING_SIZE-1,_TAG_HEADER_REF)
+
+#if defined(ARCH_64) && ERTS_ENDIANNESS /* All internal refs of same size... */
+
+# undef ERTS_MAGIC_REF_THING_HEADER
+
+# define is_ref_thing_header(x) ((x) == ERTS_REF_THING_HEADER)
+
+#ifdef SHCOPY
+#define is_ordinary_ref_thing(x) \
+ (((ErtsRefThing *) (x))->o.marker == ERTS_ORDINARY_REF_MARKER)
+#else
+#define is_ordinary_ref_thing(x) \
+ (ASSERT(is_ref_thing_header((*((Eterm *)(x))) & ~BOXED_VISITED_MASK)), \
+ ((ErtsRefThing *) (x))->o.marker == ERTS_ORDINARY_REF_MARKER)
+#endif
+
+#define is_magic_ref_thing(x) \
+ (!is_ordinary_ref_thing((x)))
+
+#define is_internal_magic_ref(x) \
+ ((_unchecked_is_boxed((x)) && *boxed_val((x)) == ERTS_REF_THING_HEADER) \
+ && is_magic_ref_thing(boxed_val((x))))
+
+#define is_internal_ordinary_ref(x) \
+ ((_unchecked_is_boxed((x)) && *boxed_val((x)) == ERTS_REF_THING_HEADER) \
+ && is_ordinary_ref_thing(boxed_val((x))))
+
+#else /* Ordinary and magic references of different sizes... */
+
+# define ERTS_MAGIC_REF_THING_HEADER \
+ _make_header(ERTS_MAGIC_REF_THING_SIZE-1,_TAG_HEADER_REF)
+
+# define is_ref_thing_header(x) \
+ (((x) & _TAG_HEADER_MASK) == _TAG_HEADER_REF)
+
+#define is_ordinary_ref_thing(x) \
+ (ASSERT(is_ref_thing_header(*((Eterm *)(x)))), \
+ *((Eterm *)(x)) == ERTS_REF_THING_HEADER)
+
+#define is_magic_ref_thing(x) \
+ (ASSERT(is_ref_thing_header(*((Eterm *)(x)))), \
+ *((Eterm *)(x)) == ERTS_MAGIC_REF_THING_HEADER)
+
+#define is_internal_magic_ref(x) \
+ (_unchecked_is_boxed((x)) && *boxed_val((x)) == ERTS_MAGIC_REF_THING_HEADER)
+
+#define is_internal_ordinary_ref(x) \
+ (_unchecked_is_boxed((x)) && *boxed_val((x)) == ERTS_REF_THING_HEADER)
+
#endif
-#define is_ref_thing_header(x) (((x) & _TAG_HEADER_MASK) == _TAG_HEADER_REF)
#define make_internal_ref(x) make_boxed((Eterm*)(x))
-#define _unchecked_ref_thing_ptr(x) \
- ((RefThing*) _unchecked_internal_ref_val(x))
-#define ref_thing_ptr(x) \
- ((RefThing*) internal_ref_val(x))
+#define _unchecked_ordinary_ref_thing_ptr(x) \
+ ((ErtsORefThing*) _unchecked_internal_ref_val(x))
+#define ordinary_ref_thing_ptr(x) \
+ ((ErtsORefThing*) internal_ref_val(x))
+
+#define _unchecked_magic_ref_thing_ptr(x) \
+ ((ErtsMRefThing*) _unchecked_internal_ref_val(x))
+#define magic_ref_thing_ptr(x) \
+ ((ErtsMRefThing*) internal_ref_val(x))
#define is_internal_ref(x) \
(_unchecked_is_boxed((x)) && is_ref_thing_header(*boxed_val((x))))
@@ -802,16 +959,21 @@ do { \
_ET_DECLARE_CHECKED(Eterm*,internal_ref_val,Wterm)
#define internal_ref_val(x) _ET_APPLY(internal_ref_val,(x))
-#define internal_thing_ref_data_words(t) (thing_arityval(*(Eterm*)(t)))
-#define _unchecked_internal_ref_data_words(x) \
- (_unchecked_thing_arityval(*_unchecked_internal_ref_val(x)))
-_ET_DECLARE_CHECKED(Uint,internal_ref_data_words,Wterm)
-#define internal_ref_data_words(x) _ET_APPLY(internal_ref_data_words,(x))
+#define internal_ordinary_thing_ref_numbers(ort) (((ErtsORefThing *)(ort))->num)
+#define _unchecked_internal_ordinary_ref_numbers(x) (internal_ordinary_thing_ref_numbers(_unchecked_ordinary_ref_thing_ptr(x)))
+_ET_DECLARE_CHECKED(Uint32*,internal_ordinary_ref_numbers,Wterm)
+#define internal_ordinary_ref_numbers(x) _ET_APPLY(internal_ordinary_ref_numbers,(x))
+
+#if defined(ARCH_64) && !ERTS_ENDIANNESS
+#define internal_magic_thing_ref_numbers(mrt) (((ErtsMRefThing *)(mrt))->num)
+#else
+#define internal_magic_thing_ref_numbers(mrt) (((ErtsMRefThing *)(mrt))->mb->refn)
+#endif
+
+#define _unchecked_internal_magic_ref_numbers(x) (internal_magic_thing_ref_numbers(_unchecked_magic_ref_thing_ptr(x)))
+_ET_DECLARE_CHECKED(Uint32*,internal_magic_ref_numbers,Wterm)
+#define internal_magic_ref_numbers(x) _ET_APPLY(internal_magic_ref_numbers,(x))
-#define internal_thing_ref_data(thing) ((thing)->data.ui32)
-#define _unchecked_internal_ref_data(x) (internal_thing_ref_data(_unchecked_ref_thing_ptr(x)))
-_ET_DECLARE_CHECKED(Uint32*,internal_ref_data,Wterm)
-#define internal_ref_data(x) _ET_APPLY(internal_ref_data,(x))
#define _unchecked_internal_ref_node(x) erts_this_node
_ET_DECLARE_CHECKED(struct erl_node_*,internal_ref_node,Eterm)
@@ -990,6 +1152,37 @@ _ET_DECLARE_CHECKED(Uint32*,external_ref_data,Wterm)
_ET_DECLARE_CHECKED(struct erl_node_*,external_ref_node,Eterm)
#define external_ref_node(x) _ET_APPLY(external_ref_node,(x))
+/* maps */
+
+#define MAP_HEADER_TAG_SZ (2)
+#define MAP_HEADER_ARITY_SZ (8)
+#define MAP_HEADER_VAL_SZ (16)
+
+#define MAP_HEADER_TAG_FLATMAP_HEAD (0x0)
+#define MAP_HEADER_TAG_HAMT_NODE_BITMAP (0x1)
+#define MAP_HEADER_TAG_HAMT_HEAD_ARRAY (0x2)
+#define MAP_HEADER_TAG_HAMT_HEAD_BITMAP (0x3)
+
+#define MAP_HEADER_TYPE(Hdr) (((Hdr) >> (_HEADER_ARITY_OFFS)) & (0x3))
+#define MAP_HEADER_ARITY(Hdr) (((Hdr) >> (_HEADER_ARITY_OFFS + MAP_HEADER_TAG_SZ)) & (0xff))
+#define MAP_HEADER_VAL(Hdr) (((Hdr) >> (_HEADER_ARITY_OFFS + MAP_HEADER_TAG_SZ + MAP_HEADER_ARITY_SZ)) & (0xffff))
+
+#define make_hashmap(x) make_boxed((Eterm*)(x))
+#define is_hashmap(x) (is_boxed((x)) && is_hashmap_header(*boxed_val((x))))
+#define is_not_hashmap(x) (!is_hashmap(x))
+#define is_hashmap_header(x) (((x) & (_HEADER_MAP_HASHMAP_HEAD_MASK)) == HAMT_SUBTAG_HEAD_ARRAY)
+#define hashmap_val(x) _unchecked_boxed_val((x))
+
+#define make_flatmap(x) make_boxed((Eterm*)(x))
+#define is_flatmap(x) (is_boxed((x)) && is_flatmap_header(*boxed_val((x))))
+#define is_not_flatmap(x) (!is_flatmap((x)))
+#define is_flatmap_header(x) (((x) & (_HEADER_MAP_SUBTAG_MASK)) == HAMT_SUBTAG_HEAD_FLATMAP)
+#define flatmap_val(x) (_unchecked_boxed_val((x)))
+
+#define is_map_header(x) (((x) & (_TAG_HEADER_MASK)) == _TAG_HEADER_MAP)
+#define is_map(x) (is_boxed((x)) && is_map_header(*boxed_val(x)))
+#define is_not_map(x) (!is_map(x))
+
/* number tests */
#define is_integer(x) (is_small(x) || is_big(x))
@@ -1007,14 +1200,14 @@ _ET_DECLARE_CHECKED(struct erl_node_*,external_ref_node,Eterm)
#error "fix yer arch, like"
#endif
-#define _unchecked_make_cp(x) ((Eterm) COMPRESS_POINTER(x))
+#define _unchecked_make_cp(x) ((Eterm)(x))
_ET_DECLARE_CHECKED(Eterm,make_cp,BeamInstr*)
#define make_cp(x) _ET_APPLY(make_cp,(x))
#define is_not_CP(x) ((x) & _CPMASK)
#define is_CP(x) (!is_not_CP(x))
-#define _unchecked_cp_val(x) ((BeamInstr*) EXPAND_POINTER(x))
+#define _unchecked_cp_val(x) ((BeamInstr*) (x))
_ET_DECLARE_CHECKED(BeamInstr*,cp_val,Eterm)
#define cp_val(x) _ET_APPLY(cp_val,(x))
@@ -1030,44 +1223,40 @@ _ET_DECLARE_CHECKED(Uint,catch_val,Eterm)
/*
* Overloaded tags.
*
- * SMALL = 15
- * ATOM/NIL=7
+ * In the loader, we want to tag a term in a way so that it can
+ * be any literal (atom/integer/float/tuple/list/binary) or a
+ * register.
*
- * Note that the two least significant bits in SMALL/ATOM/NIL always are 3;
- * thus, we can distinguish register from literals by looking at only these
- * two bits.
+ * We can achive that by overloading the PID and PORT tags to
+ * mean X and Y registers. That works because there are no
+ * pid or port literals.
*/
-#define X_REG_DEF 0
-#define Y_REG_DEF 1
-#define R_REG_DEF 2
-
-#define beam_reg_tag(x) ((x) & 3)
+#define _LOADER_TAG_XREG _TAG_IMMED1_PID
+#define _LOADER_TAG_YREG _TAG_IMMED1_PORT
+#define _LOADER_TAG_SIZE _TAG_IMMED1_SIZE
+#define _LOADER_MASK _TAG_IMMED1_MASK
-#define make_rreg() R_REG_DEF
-#define make_xreg(ix) (((ix) * sizeof(Eterm)) | X_REG_DEF)
-#define make_yreg(ix) (((ix) * sizeof(Eterm)) | Y_REG_DEF)
+#define LOADER_X_REG _LOADER_TAG_XREG
+#define LOADER_Y_REG _LOADER_TAG_YREG
-#define _is_xreg(x) (beam_reg_tag(x) == X_REG_DEF)
-#define _is_yreg(x) (beam_reg_tag(x) == Y_REG_DEF)
+#define make_loader_x_reg(R) (((R) << _LOADER_TAG_SIZE) | _LOADER_TAG_XREG)
+#define make_loader_y_reg(R) (((R) << _LOADER_TAG_SIZE) | _LOADER_TAG_YREG)
-#define _unchecked_x_reg_offset(R) ((R) - X_REG_DEF)
-_ET_DECLARE_CHECKED(Uint,x_reg_offset,Uint)
-#define x_reg_offset(R) _ET_APPLY(x_reg_offset,(R))
+#define loader_reg_index(R) ((R) >> _LOADER_TAG_SIZE)
-#define _unchecked_y_reg_offset(R) ((R) - Y_REG_DEF)
-_ET_DECLARE_CHECKED(Uint,y_reg_offset,Uint)
-#define y_reg_offset(R) _ET_APPLY(y_reg_offset,(R))
+#define loader_tag(T) ((T) & _LOADER_MASK)
-#define reg_index(R) ((R) / sizeof(Eterm))
+#define _is_loader_x_reg(x) (loader_tag(x) == _LOADER_TAG_XREG)
+#define _is_loader_y_reg(x) (loader_tag(x) == _LOADER_TAG_YREG)
-#define _unchecked_x_reg_index(R) ((R) >> 2)
-_ET_DECLARE_CHECKED(Uint,x_reg_index,Uint)
-#define x_reg_index(R) _ET_APPLY(x_reg_index,(R))
+#define _unchecked_loader_x_reg_index(R) ((R) >> _LOADER_TAG_SIZE)
+_ET_DECLARE_CHECKED(Uint,loader_x_reg_index,Uint)
+#define loader_x_reg_index(R) _ET_APPLY(loader_x_reg_index,(R))
-#define _unchecked_y_reg_index(R) ((R) >> 2)
-_ET_DECLARE_CHECKED(Uint,y_reg_index,Uint)
-#define y_reg_index(R) _ET_APPLY(y_reg_index,(R))
+#define _unchecked_loader_y_reg_index(R) ((R) >> _LOADER_TAG_SIZE)
+_ET_DECLARE_CHECKED(Uint,loader_y_reg_index,Uint)
+#define loader_y_reg_index(R) _ET_APPLY(loader_y_reg_index,(R))
/*
* Backwards compatibility definitions:
@@ -1095,13 +1284,16 @@ _ET_DECLARE_CHECKED(Uint,y_reg_index,Uint)
#define FLOAT_DEF 0xe
#define BIG_DEF 0xf
#define SMALL_DEF 0x10
+#define MATCHSTATE_DEF 0x11 /* not a "real" term */
+
+#define FIRST_VACANT_TAG_DEF 0x12
#if ET_DEBUG
-extern unsigned tag_val_def_debug(Wterm, const char*, unsigned);
-#define tag_val_def(x) tag_val_def_debug((x),__FILE__,__LINE__)
+ERTS_GLB_INLINE unsigned tag_val_def(Wterm, const char*, unsigned);
#else
-extern unsigned tag_val_def(Wterm);
+ERTS_GLB_INLINE unsigned tag_val_def(Wterm);
#endif
+
#define not_eq_tags(X,Y) (tag_val_def((X)) ^ tag_val_def((Y)))
#define NUMBER_CODE(x,y) ((tag_val_def(x) << 5) | tag_val_def(y))
@@ -1116,81 +1308,83 @@ extern unsigned tag_val_def(Wterm);
#define FLOAT_BIG _NUMBER_CODE(FLOAT_DEF,BIG_DEF)
#define FLOAT_FLOAT _NUMBER_CODE(FLOAT_DEF,FLOAT_DEF)
-#if HALFWORD_HEAP
-#define ptr2rel(PTR,BASE) ((Eterm*)((char*)(PTR) - (char*)(BASE)))
-#define rterm2wterm(REL,BASE) ((Wterm)(REL) + (Wterm)(BASE))
-
-#else /* HALFWORD_HEAP */
-
-#define ptr2rel(PTR,BASE) (PTR)
-#define rterm2wterm(REL,BASE) (REL)
-
-#endif /* !HALFWORD_HEAP */
-
-#define make_list_rel(PTR, BASE) make_list(ptr2rel(PTR,BASE))
-#define make_boxed_rel(PTR, BASE) make_boxed(ptr2rel(PTR,BASE))
-#define make_fun_rel make_boxed_rel
-#define make_binary_rel make_boxed_rel
-#define make_tuple_rel make_boxed_rel
-#define make_external_rel make_boxed_rel
-#define make_internal_ref_rel make_boxed_rel
-#define make_big_rel make_boxed_rel
+#define is_same(A,B) ((A)==(B))
-#define binary_val_rel(RTERM, BASE) binary_val(rterm2wterm(RTERM, BASE))
-#define list_val_rel(RTERM, BASE) list_val(rterm2wterm(RTERM, BASE))
-#define boxed_val_rel(RTERM, BASE) boxed_val(rterm2wterm(RTERM, BASE))
-#define tuple_val_rel(RTERM, BASE) tuple_val(rterm2wterm(RTERM, BASE))
-#define export_val_rel(RTERM, BASE) export_val(rterm2wterm(RTERM, BASE))
-#define fun_val_rel(RTERM, BASE) fun_val(rterm2wterm(RTERM, BASE))
-#define big_val_rel(RTERM,BASE) big_val(rterm2wterm(RTERM,BASE))
-#define float_val_rel(RTERM,BASE) float_val(rterm2wterm(RTERM,BASE))
-#define internal_ref_val_rel(RTERM,BASE) internal_ref_val(rterm2wterm(RTERM,BASE))
+void erts_set_literal_tag(Eterm *term, Eterm *hp_start, Eterm hsz);
-#define external_thing_ptr_rel(RTERM, BASE) external_thing_ptr(rterm2wterm(RTERM, BASE))
-#define external_data_words_rel(RTERM,BASE) external_data_words(rterm2wterm(RTERM,BASE))
-
-#define external_port_node_rel(RTERM,BASE) external_port_node(rterm2wterm(RTERM,BASE))
-#define external_port_data_rel(RTERM,BASE) external_port_data(rterm2wterm(RTERM,BASE))
-
-#define is_external_pid_rel(RTERM,BASE) is_external_pid(rterm2wterm(RTERM,BASE))
-#define external_pid_node_rel(RTERM,BASE) external_pid_node(rterm2wterm(RTERM,BASE))
-#define external_pid_data_rel(RTERM,BASE) external_pid_data(rterm2wterm(RTERM,BASE))
-
-#define is_binary_rel(RTERM,BASE) is_binary(rterm2wterm(RTERM,BASE))
-#define is_float_rel(RTERM,BASE) is_float(rterm2wterm(RTERM,BASE))
-#define is_fun_rel(RTERM,BASE) is_fun(rterm2wterm(RTERM,BASE))
-#define is_big_rel(RTERM,BASE) is_big(rterm2wterm(RTERM,BASE))
-#define is_export_rel(RTERM,BASE) is_export(rterm2wterm(RTERM,BASE))
-#define is_tuple_rel(RTERM,BASE) is_tuple(rterm2wterm(RTERM,BASE))
-
-#define GET_DOUBLE_REL(RTERM, f, BASE) GET_DOUBLE(rterm2wterm(RTERM,BASE), f)
-
-#define ref_thing_ptr_rel(RTERM,BASE) ref_thing_ptr(rterm2wterm(RTERM,BASE))
-#define is_internal_ref_rel(RTERM,BASE) is_internal_ref(rterm2wterm(RTERM,BASE))
-#define is_external_rel(RTERM,BASE) is_external(rterm2wterm(RTERM,BASE))
-#define is_external_port_rel(RTERM,BASE) is_external_port(rterm2wterm(RTERM,BASE))
-#define is_external_ref_rel(RTERM,BASE) is_external_ref(rterm2wterm(RTERM,BASE))
-
-#define external_node_rel(RTERM,BASE) external_node(rterm2wterm(RTERM,BASE))
-
-
-#if HALFWORD_HEAP
-ERTS_GLB_INLINE int is_same(Eterm a, Eterm* a_base, Eterm b, Eterm* b_base);
+#if ET_DEBUG
+#define ET_ASSERT(expr,file,line) \
+do { \
+ if (!(expr)) \
+ erl_assert_error("TYPE ASSERTION: " #expr, __FUNCTION__, file, line); \
+} while(0)
+#else
+#define ET_ASSERT(expr,file,line) do { } while(0)
+#endif
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-ERTS_GLB_INLINE int is_same(Eterm a, Eterm* a_base, Eterm b, Eterm* b_base)
-{
- /* If bases differ, assume a and b are on different "heaps",
- ie can only be same if immed */
- ASSERT(a_base == b_base || is_immed(a) || is_immed(b)
- || rterm2wterm(a,a_base) != rterm2wterm(b,b_base));
- return a == b && (a_base == b_base || is_immed(a));
+#if ET_DEBUG
+ERTS_GLB_INLINE unsigned tag_val_def(Wterm x, const char *file, unsigned line)
+#else
+ERTS_GLB_INLINE unsigned tag_val_def(Wterm x)
+#define file __FILE__
+#define line __LINE__
+#endif
+{
+ static char *msg = "tag_val_def error";
+
+ switch (x & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_LIST:
+ ET_ASSERT(_list_precond(x),file,line);
+ return LIST_DEF;
+ case TAG_PRIMARY_BOXED: {
+ Eterm hdr = *boxed_val(x);
+ ET_ASSERT(is_header(hdr),file,line);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_ARITYVAL >> _TAG_PRIMARY_SIZE): return TUPLE_DEF;
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE): return BIG_DEF;
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE): return BIG_DEF;
+ case (_TAG_HEADER_REF >> _TAG_PRIMARY_SIZE): return REF_DEF;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE): return FLOAT_DEF;
+ case (_TAG_HEADER_EXPORT >> _TAG_PRIMARY_SIZE): return EXPORT_DEF;
+ case (_TAG_HEADER_FUN >> _TAG_PRIMARY_SIZE): return FUN_DEF;
+ case (_TAG_HEADER_EXTERNAL_PID >> _TAG_PRIMARY_SIZE): return EXTERNAL_PID_DEF;
+ case (_TAG_HEADER_EXTERNAL_PORT >> _TAG_PRIMARY_SIZE): return EXTERNAL_PORT_DEF;
+ case (_TAG_HEADER_EXTERNAL_REF >> _TAG_PRIMARY_SIZE): return EXTERNAL_REF_DEF;
+ case (_TAG_HEADER_MAP >> _TAG_PRIMARY_SIZE): return MAP_DEF;
+ case (_TAG_HEADER_REFC_BIN >> _TAG_PRIMARY_SIZE): return BINARY_DEF;
+ case (_TAG_HEADER_HEAP_BIN >> _TAG_PRIMARY_SIZE): return BINARY_DEF;
+ case (_TAG_HEADER_SUB_BIN >> _TAG_PRIMARY_SIZE): return BINARY_DEF;
+ case (_TAG_HEADER_BIN_MATCHSTATE >> _TAG_PRIMARY_SIZE): return MATCHSTATE_DEF;
+ }
+
+ break;
+ }
+ case TAG_PRIMARY_IMMED1: {
+ switch ((x & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_PID >> _TAG_PRIMARY_SIZE): return PID_DEF;
+ case (_TAG_IMMED1_PORT >> _TAG_PRIMARY_SIZE): return PORT_DEF;
+ case (_TAG_IMMED1_IMMED2 >> _TAG_PRIMARY_SIZE): {
+ switch ((x & _TAG_IMMED2_MASK) >> _TAG_IMMED1_SIZE) {
+ case (_TAG_IMMED2_ATOM >> _TAG_IMMED1_SIZE): return ATOM_DEF;
+ case (_TAG_IMMED2_NIL >> _TAG_IMMED1_SIZE): return NIL_DEF;
+ }
+ break;
+ }
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE): return SMALL_DEF;
+ }
+ break;
+ }
+ }
+ erl_assert_error(msg, __FUNCTION__, file, line);
+#undef file
+#undef line
}
#endif
-#else /* !HALFWORD_HEAP */
-#define is_same(A,A_BASE,B,B_BASE) ((A)==(B))
+#if ET_DEBUG
+#define tag_val_def(X) tag_val_def(X, __FILE__, __LINE__)
#endif
#endif /* __ERL_TERM_H */
diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c
index 545a0343d0..96824dc06e 100644
--- a/erts/emulator/beam/erl_thr_progress.c
+++ b/erts/emulator/beam/erl_thr_progress.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2011-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2011-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -79,7 +80,6 @@
#include "erl_thr_progress.h"
#include "global.h"
-#ifdef ERTS_SMP
#define ERTS_THR_PRGR_DBG_CHK_WAKEUP_REQUEST_VALUE 0
@@ -94,9 +94,9 @@
#define ERTS_THR_PRGR_FTL_ERR_BLCK_POLL_INTERVAL 100
-#define ERTS_THR_PRGR_LFLG_BLOCK (((erts_aint32_t) 1) << 31)
-#define ERTS_THR_PRGR_LFLG_NO_LEADER (((erts_aint32_t) 1) << 30)
-#define ERTS_THR_PRGR_LFLG_WAITING_UM (((erts_aint32_t) 1) << 29)
+#define ERTS_THR_PRGR_LFLG_BLOCK ((erts_aint32_t) (1U << 31))
+#define ERTS_THR_PRGR_LFLG_NO_LEADER ((erts_aint32_t) (1U << 30))
+#define ERTS_THR_PRGR_LFLG_WAITING_UM ((erts_aint32_t) (1U << 29))
#define ERTS_THR_PRGR_LFLG_ACTIVE_MASK (~(ERTS_THR_PRGR_LFLG_NO_LEADER \
| ERTS_THR_PRGR_LFLG_BLOCK \
| ERTS_THR_PRGR_LFLG_WAITING_UM))
@@ -115,70 +115,24 @@
#undef read_nob
#define read_nob erts_thr_prgr_read_nob__
-#ifdef ARCH_64
-
static ERTS_INLINE void
set_mb(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
{
- erts_atomic_set_mb(atmc, val);
+ erts_atomic64_set_mb(atmc, (erts_aint64_t) val);
}
static ERTS_INLINE void
set_nob(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
{
- erts_atomic_set_nob(atmc, val);
+ erts_atomic64_set_nob(atmc, (erts_aint64_t) val);
}
static ERTS_INLINE void
init_nob(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
{
- erts_atomic_init_nob(atmc, val);
-}
-
-#else
-
-#undef dw_aint_to_val
-#define dw_aint_to_val erts_thr_prgr_dw_aint_to_val__
-
-static void
-val_to_dw_aint(erts_dw_aint_t *dw_aint, ErtsThrPrgrVal val)
-{
-#ifdef ETHR_SU_DW_NAINT_T__
- dw_aint->dw_sint = (ETHR_SU_DW_NAINT_T__) val;
-#else
- dw_aint->sint[ERTS_DW_AINT_LOW_WORD]
- = (erts_aint_t) (val & 0xffffffff);
- dw_aint->sint[ERTS_DW_AINT_HIGH_WORD]
- = (erts_aint_t) ((val >> 32) & 0xffffffff);
-#endif
-}
-
-static ERTS_INLINE void
-set_mb(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
-{
- erts_dw_aint_t dw_aint;
- val_to_dw_aint(&dw_aint, val);
- erts_dw_atomic_set_mb(atmc, &dw_aint);
+ erts_atomic64_init_nob(atmc, (erts_aint64_t) val);
}
-static ERTS_INLINE void
-set_nob(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
-{
- erts_dw_aint_t dw_aint;
- val_to_dw_aint(&dw_aint, val);
- erts_dw_atomic_set_nob(atmc, &dw_aint);
-}
-
-static ERTS_INLINE void
-init_nob(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
-{
- erts_dw_aint_t dw_aint;
- val_to_dw_aint(&dw_aint, val);
- erts_dw_atomic_init_nob(atmc, &dw_aint);
-}
-
-#endif
-
/* #define ERTS_THR_PROGRESS_STATE_DEBUG */
#ifdef ERTS_THR_PROGRESS_STATE_DEBUG
@@ -187,8 +141,8 @@ init_nob(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
#warning "Thread progress state debug is on"
#endif
-#define ERTS_THR_PROGRESS_STATE_DEBUG_LEADER (((erts_aint32_t) 1) << 0)
-#define ERTS_THR_PROGRESS_STATE_DEBUG_ACTIVE (((erts_aint32_t) 1) << 1)
+#define ERTS_THR_PROGRESS_STATE_DEBUG_LEADER ((erts_aint32_t) (1U << 0))
+#define ERTS_THR_PROGRESS_STATE_DEBUG_ACTIVE ((erts_aint32_t) (1U << 1))
#define ERTS_THR_PROGRESS_STATE_DEBUG_INIT(ID) \
erts_atomic32_init_nob(&intrnl->thr[(ID)].data.state_debug, \
@@ -224,10 +178,10 @@ do { \
#endif /* ERTS_THR_PROGRESS_STATE_DEBUG */
-#define ERTS_THR_PRGR_BLCKR_INVALID (~((erts_aint32_t) 0))
-#define ERTS_THR_PRGR_BLCKR_UNMANAGED (((erts_aint32_t) 1) << 31)
+#define ERTS_THR_PRGR_BLCKR_INVALID ((erts_aint32_t) (~0U))
+#define ERTS_THR_PRGR_BLCKR_UNMANAGED ((erts_aint32_t) (1U << 31))
-#define ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING (((erts_aint32_t) 1) << 31)
+#define ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING ((erts_aint32_t) (1U << 31))
#define ERTS_THR_PRGR_BM_BITS 32
#define ERTS_THR_PRGR_BM_SHIFT 5
@@ -366,13 +320,23 @@ tmp_thr_prgr_data(ErtsSchedulerData *esdp)
ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(esdp);
if (!tpd) {
- /*
- * We only allocate the part up to the wakeup_request field
- * which is the first field only used by registered threads
- */
- tpd = erts_alloc(ERTS_ALC_T_T_THR_PRGR_DATA,
- offsetof(ErtsThrPrgrData, wakeup_request));
- init_tmp_thr_prgr_data(tpd);
+ /*
+ * We only allocate the part up to the wakeup_request field which is
+ * the first field only used by registered threads
+ */
+ size_t alloc_size = offsetof(ErtsThrPrgrData, wakeup_request);
+
+ /* We may land here as a result of unmanaged_delay being called from
+ * the lock counting module, which in turn might be called from within
+ * the allocator, so we use plain malloc to avoid deadlocks. */
+ tpd =
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ malloc(alloc_size);
+#else
+ erts_alloc(ERTS_ALC_T_T_THR_PRGR_DATA, alloc_size);
+#endif
+
+ init_tmp_thr_prgr_data(tpd);
}
return tpd;
@@ -382,8 +346,13 @@ static ERTS_INLINE void
return_tmp_thr_prgr_data(ErtsThrPrgrData *tpd)
{
if (tpd->is_temporary) {
- erts_tsd_set(erts_thr_prgr_data_key__, NULL);
- erts_free(ERTS_ALC_T_T_THR_PRGR_DATA, tpd);
+ erts_tsd_set(erts_thr_prgr_data_key__, NULL);
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ free(tpd);
+#else
+ erts_free(ERTS_ALC_T_T_THR_PRGR_DATA, tpd);
+#endif
}
}
@@ -547,7 +516,7 @@ erts_thr_progress_register_unmanaged_thread(ErtsThrPrgrCallbacks *callbacks)
if (tpd) {
if (!tpd->is_temporary)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"%s:%d:%s(): Double register of thread\n",
__FILE__, __LINE__, __func__);
is_blocking = tpd->is_blocking;
@@ -569,7 +538,7 @@ erts_thr_progress_register_unmanaged_thread(ErtsThrPrgrCallbacks *callbacks)
#endif
ASSERT(tpd->id >= 0);
if (tpd->id >= intrnl->unmanaged.no)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"%s:%d:%s(): Too many unmanaged registered threads\n",
__FILE__, __LINE__, __func__);
@@ -592,7 +561,7 @@ erts_thr_progress_register_managed_thread(ErtsSchedulerData *esdp,
if (tpd) {
if (!tpd->is_temporary)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"%s:%d:%s(): Double register of thread\n",
__FILE__, __LINE__, __func__);
is_blocking = tpd->is_blocking;
@@ -613,7 +582,7 @@ erts_thr_progress_register_managed_thread(ErtsSchedulerData *esdp,
tpd->id = erts_atomic32_inc_read_nob(&intrnl->misc.data.managed_id);
ASSERT(tpd->id >= 0);
if (tpd->id >= intrnl->managed.no)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"%s:%d:%s(): Too many managed registered threads\n",
__FILE__, __LINE__, __func__);
@@ -745,6 +714,7 @@ leader_update(ErtsThrPrgrData *tpd)
tpd->leader_state.chk_next_ix = no_managed;
erts_atomic32_set_nob(&intrnl->misc.data.umrefc_ix.current,
(erts_aint32_t) new_umrefc_ix);
+ tpd->leader_state.umrefc_ix.current = new_umrefc_ix;
ETHR_MEMBAR(ETHR_StoreLoad);
refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc);
ASSERT(refc >= 0);
@@ -1014,8 +984,10 @@ erts_thr_progress_unmanaged_continue__(ErtsThrPrgrDelayHandle handle)
#ifdef ERTS_ENABLE_LOCK_CHECK
ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(NULL);
ERTS_LC_ASSERT(tpd && tpd->is_delaying);
- tpd->is_delaying = 0;
- return_tmp_thr_prgr_data(tpd);
+ tpd->is_delaying--;
+ ASSERT(tpd->is_delaying >= 0);
+ if (!tpd->is_delaying)
+ return_tmp_thr_prgr_data(tpd);
#endif
ASSERT(!erts_thr_progress_is_managed_thread());
@@ -1040,7 +1012,7 @@ erts_thr_progress_unmanaged_delay__(void)
#ifdef ERTS_ENABLE_LOCK_CHECK
{
ErtsThrPrgrData *tpd = tmp_thr_prgr_data(NULL);
- tpd->is_delaying = 1;
+ tpd->is_delaying++;
}
#endif
return (ErtsThrPrgrDelayHandle) umrefc_ix;
@@ -1078,7 +1050,7 @@ has_reached_wakeup(ErtsThrPrgrVal wakeup)
limit += 1;
if (!erts_thr_progress_has_passed__(limit, wakeup))
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"Invalid wakeup request value found:"
" current=%b64u, wakeup=%b64u, limit=%b64u",
current, wakeup, limit);
@@ -1147,7 +1119,7 @@ request_wakeup_managed(ErtsThrPrgrData *tpd, ErtsThrPrgrVal value)
ix = erts_atomic32_inc_read_nob(&mwd->len) - 1;
#if ERTS_THR_PRGR_DBG_CHK_WAKEUP_REQUEST_VALUE
if (ix >= intrnl->managed.no)
- erl_exit(ERTS_ABORT_EXIT, "Internal error: Too many wakeup requests\n");
+ erts_exit(ERTS_ABORT_EXIT, "Internal error: Too many wakeup requests\n");
#endif
mwd->id[ix] = tpd->id;
@@ -1231,7 +1203,7 @@ wakeup_unmanaged_threads(ErtsThrPrgrUnmanagedWakeupData *umwd)
int hbase = hix << ERTS_THR_PRGR_BM_SHIFT;
int hbit;
for (hbit = 0; hbit < ERTS_THR_PRGR_BM_BITS; hbit++) {
- if (hmask & (1 << hbit)) {
+ if (hmask & (1U << hbit)) {
erts_aint_t lmask;
int lix = hbase + hbit;
ASSERT(0 <= lix && lix < umwd->low_sz);
@@ -1240,7 +1212,7 @@ wakeup_unmanaged_threads(ErtsThrPrgrUnmanagedWakeupData *umwd)
int lbase = lix << ERTS_THR_PRGR_BM_SHIFT;
int lbit;
for (lbit = 0; lbit < ERTS_THR_PRGR_BM_BITS; lbit++) {
- if (lmask & (1 << lbit)) {
+ if (lmask & (1U << lbit)) {
int id = lbase + lbit;
wakeup_unmanaged(id);
}
@@ -1381,25 +1353,10 @@ erts_thr_progress_block(void)
thr_progress_block(tmp_thr_prgr_data(NULL), 1);
}
-void
-erts_thr_progress_fatal_error_block(SWord timeout,
- ErtsThrPrgrData *tmp_tpd_bufp)
+int
+erts_thr_progress_fatal_error_block(ErtsThrPrgrData *tmp_tpd_bufp)
{
ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(NULL);
- erts_aint32_t bc;
- SWord time_left = timeout;
- SysTimeval to;
-
- /*
- * Counting poll intervals may give us a too long timeout
- * if cpu is busy. If we got tolerant time of day we use it
- * to prevent this.
- */
- if (!erts_disable_tolerant_timeofday) {
- erts_get_timeval(&to);
- to.tv_sec += timeout / 1000;
- to.tv_sec += timeout % 1000;
- }
if (!tpd) {
/*
@@ -1412,9 +1369,25 @@ erts_thr_progress_fatal_error_block(SWord timeout,
init_tmp_thr_prgr_data(tpd);
}
- bc = thr_progress_block(tpd, 0);
- if (bc == 0)
- return; /* Succefully blocked all managed threads */
+ /* Returns number of threads that have not yes been blocked */
+ return thr_progress_block(tpd, 0);
+}
+
+void
+erts_thr_progress_fatal_error_wait(SWord timeout) {
+ erts_aint32_t bc;
+ SWord time_left = timeout;
+ ErtsMonotonicTime timeout_time;
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+
+ /*
+ * Counting poll intervals may give us a too long timeout
+ * if cpu is busy. We use timeout time to try to prevent
+ * this. In case we havn't got time correction this may
+ * however fail too...
+ */
+ timeout_time = erts_get_monotonic_time(esdp);
+ timeout_time += ERTS_MSEC_TO_MONOTONIC((ErtsMonotonicTime) timeout);
while (1) {
if (erts_milli_sleep(ERTS_THR_PRGR_FTL_ERR_BLCK_POLL_INTERVAL) == 0)
@@ -1424,14 +1397,8 @@ erts_thr_progress_fatal_error_block(SWord timeout,
break; /* Succefully blocked all managed threads */
if (time_left <= 0)
break; /* Timeout */
- if (!erts_disable_tolerant_timeofday) {
- SysTimeval now;
- erts_get_timeval(&now);
- if (now.tv_sec > to.tv_sec)
- break; /* Timeout */
- if (now.tv_sec == to.tv_sec && now.tv_usec >= to.tv_usec)
- break; /* Timeout */
- }
+ if (timeout_time <= erts_get_monotonic_time(esdp))
+ break; /* Timeout */
}
}
@@ -1545,4 +1512,3 @@ void erts_thr_progress_dbg_print_state(void)
}
-#endif
diff --git a/erts/emulator/beam/erl_thr_progress.h b/erts/emulator/beam/erl_thr_progress.h
index 5f392944c2..fa936b5707 100644
--- a/erts/emulator/beam/erl_thr_progress.h
+++ b/erts/emulator/beam/erl_thr_progress.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2011-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2011-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -32,18 +33,6 @@
#include "sys.h"
-#ifndef ERTS_SMP
-
-#define erts_smp_thr_progress_block() ((void) 0)
-#define erts_smp_thr_progress_unblock() ((void) 0)
-#define erts_smp_thr_progress_is_blocking() 1
-
-#else /* ERTS_SMP */
-
-#define erts_smp_thr_progress_block erts_thr_progress_block
-#define erts_smp_thr_progress_unblock erts_thr_progress_unblock
-#define erts_smp_thr_progress_is_blocking erts_thr_progress_is_blocking
-
void erts_thr_progress_block(void);
void erts_thr_progress_unblock(void);
int erts_thr_progress_is_blocking(void);
@@ -83,16 +72,13 @@ typedef struct {
ErtsThrPrgrLeaderState leader_state;
} ErtsThrPrgrData;
-void erts_thr_progress_fatal_error_block(SWord timeout,
- ErtsThrPrgrData *tmp_tpd_bufp);
+int erts_thr_progress_fatal_error_block(ErtsThrPrgrData *tmp_tpd_bufp);
+void erts_thr_progress_fatal_error_wait(SWord timeout);
-#endif /* ERTS_SMP */
typedef struct ErtsThrPrgrLaterOp_ ErtsThrPrgrLaterOp;
struct ErtsThrPrgrLaterOp_ {
-#ifdef ERTS_SMP
ErtsThrPrgrVal later;
-#endif
void (*func)(void *);
void *data;
ErtsThrPrgrLaterOp *next;
@@ -106,7 +92,6 @@ struct ErtsThrPrgrLaterOp_ {
#include "erl_threads.h"
#include "erl_process.h"
-#ifdef ERTS_SMP
/* ERTS_THR_PRGR_VAL_FIRST should only be used when initializing... */
#define ERTS_THR_PRGR_VAL_FIRST ((ErtsThrPrgrVal) 0)
@@ -115,11 +100,7 @@ struct ErtsThrPrgrLaterOp_ {
extern erts_tsd_key_t erts_thr_prgr_data_key__;
-#ifdef ARCH_64
-# define ERTS_THR_PRGR_ATOMIC erts_atomic_t
-#else /* ARCH_32 */
-# define ERTS_THR_PRGR_ATOMIC erts_dw_atomic_t
-#endif
+#define ERTS_THR_PRGR_ATOMIC erts_atomic64_t
typedef struct {
void *arg;
@@ -158,10 +139,6 @@ void erts_thr_progress_unmanaged_continue__(int umrefc_ix);
void erts_thr_progress_dbg_print_state(void);
-#ifdef ARCH_32
-#define ERTS_THR_PRGR_ATOMIC erts_dw_atomic_t
-ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_dw_aint_to_val__(erts_dw_aint_t *dw_aint);
-#endif
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_nob__(ERTS_THR_PRGR_ATOMIC *atmc);
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_acqb__(ERTS_THR_PRGR_ATOMIC *atmc);
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_mb__(ERTS_THR_PRGR_ATOMIC *atmc);
@@ -184,68 +161,24 @@ ERTS_GLB_INLINE int erts_thr_progress_has_reached(ErtsThrPrgrVal val);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-#ifdef ARCH_64
-
-ERTS_GLB_INLINE ErtsThrPrgrVal
-erts_thr_prgr_read_nob__(ERTS_THR_PRGR_ATOMIC *atmc)
-{
- return (ErtsThrPrgrVal) erts_atomic_read_nob(atmc);
-}
-
-ERTS_GLB_INLINE ErtsThrPrgrVal
-erts_thr_prgr_read_acqb__(ERTS_THR_PRGR_ATOMIC *atmc)
-{
- return (ErtsThrPrgrVal) erts_atomic_read_acqb(atmc);
-}
-
-ERTS_GLB_INLINE ErtsThrPrgrVal
-erts_thr_prgr_read_mb__(ERTS_THR_PRGR_ATOMIC *atmc)
-{
- return (ErtsThrPrgrVal) erts_atomic_read_mb(atmc);
-}
-
-#else /* ARCH_32 */
-
-ERTS_GLB_INLINE ErtsThrPrgrVal
-erts_thr_prgr_dw_aint_to_val__(erts_dw_aint_t *dw_aint)
-{
-#ifdef ETHR_SU_DW_NAINT_T__
- return (ErtsThrPrgrVal) dw_aint->dw_sint;
-#else
- ErtsThrPrgrVal res;
- res = (ErtsThrPrgrVal) ((Uint32) dw_aint->sint[ERTS_DW_AINT_HIGH_WORD]);
- res <<= 32;
- res |= (ErtsThrPrgrVal) ((Uint32) dw_aint->sint[ERTS_DW_AINT_LOW_WORD]);
- return res;
-#endif
-}
-
ERTS_GLB_INLINE ErtsThrPrgrVal
erts_thr_prgr_read_nob__(ERTS_THR_PRGR_ATOMIC *atmc)
{
- erts_dw_aint_t dw_aint;
- erts_dw_atomic_read_nob(atmc, &dw_aint);
- return erts_thr_prgr_dw_aint_to_val__(&dw_aint);
+ return (ErtsThrPrgrVal) erts_atomic64_read_nob(atmc);
}
ERTS_GLB_INLINE ErtsThrPrgrVal
erts_thr_prgr_read_acqb__(ERTS_THR_PRGR_ATOMIC *atmc)
{
- erts_dw_aint_t dw_aint;
- erts_dw_atomic_read_acqb(atmc, &dw_aint);
- return erts_thr_prgr_dw_aint_to_val__(&dw_aint);
+ return (ErtsThrPrgrVal) erts_atomic64_read_acqb(atmc);
}
ERTS_GLB_INLINE ErtsThrPrgrVal
erts_thr_prgr_read_mb__(ERTS_THR_PRGR_ATOMIC *atmc)
{
- erts_dw_aint_t dw_aint;
- erts_dw_atomic_read_mb(atmc, &dw_aint);
- return erts_thr_prgr_dw_aint_to_val__(&dw_aint);
+ return (ErtsThrPrgrVal) erts_atomic64_read_mb(atmc);
}
-#endif
-
ERTS_GLB_INLINE int
erts_thr_progress_is_managed_thread(void)
{
@@ -375,6 +308,5 @@ erts_thr_progress_has_reached(ErtsThrPrgrVal val)
#endif
-#endif /* ERTS_SMP */
#endif
diff --git a/erts/emulator/beam/erl_thr_queue.c b/erts/emulator/beam/erl_thr_queue.c
index f8ca87ddcc..548c2768e5 100644
--- a/erts/emulator/beam/erl_thr_queue.c
+++ b/erts/emulator/beam/erl_thr_queue.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2011-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2011-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -86,32 +87,10 @@
#define ERTS_THR_Q_MAX_FINI_DEQ_OPS 50
-#ifdef ERTS_SMP
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(sl_element,
ErtsThrQElement_t,
1000,
ERTS_ALC_T_THR_Q_EL_SL)
-#else
-
-static void
-init_sl_element_alloc(void)
-{
-}
-
-static ErtsThrQElement_t *
-sl_element_alloc(void)
-{
- return erts_alloc(ERTS_ALC_T_THR_Q_EL_SL,
- sizeof(ErtsThrQElement_t));
-}
-
-static void
-sl_element_free(ErtsThrQElement_t *p)
-{
- erts_free(ERTS_ALC_T_THR_Q_EL_SL, p);
-}
-
-#endif
#define ErtsThrQDirtyReadEl(A) \
((ErtsThrQElement_t *) erts_atomic_read_dirty((A)))
@@ -134,14 +113,6 @@ static void noop_callback(void *arg) { }
void
erts_thr_q_initialize(ErtsThrQ_t *q, ErtsThrQInit_t *qi)
{
-#ifndef USE_THREADS
- q->init = *qi;
- if (!q->init.notify)
- q->init.notify = noop_callback;
- q->first = NULL;
- q->last = NULL;
- q->q.blk = NULL;
-#else
erts_atomic_init_nob(&q->tail.data.marker.next, ERTS_AINT_NULL);
q->tail.data.marker.data.ptr = NULL;
erts_atomic_init_nob(&q->tail.data.last,
@@ -163,10 +134,8 @@ erts_thr_q_initialize(ErtsThrQ_t *q, ErtsThrQInit_t *qi)
q->head.deq_fini.automatic = qi->auto_finalize_dequeue;
q->head.deq_fini.start = NULL;
q->head.deq_fini.end = NULL;
-#ifdef ERTS_SMP
q->head.next.thr_progress = erts_thr_progress_current();
q->head.next.thr_progress_reached = 1;
-#endif
q->head.next.um_refc_ix = 1;
q->head.next.unref_end = &q->tail.data.marker;
q->head.used_marker = 1;
@@ -175,15 +144,12 @@ erts_thr_q_initialize(ErtsThrQ_t *q, ErtsThrQInit_t *qi)
q->q.finalizing = 0;
q->q.live = qi->live.queue;
q->q.blk = NULL;
-#endif
}
ErtsThrQCleanState_t
erts_thr_q_finalize(ErtsThrQ_t *q)
{
-#ifdef USE_THREADS
q->q.finalizing = 1;
-#endif
while (erts_thr_q_dequeue(q));
return erts_thr_q_clean(q);
}
@@ -223,12 +189,11 @@ ErtsThrQCleanState_t
erts_thr_q_destroy(ErtsThrQ_t *q)
{
if (!q->q.blk)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"Trying to destroy not created thread queue\n");
return erts_thr_q_finalize(q);
}
-#ifdef USE_THREADS
static void
destroy(ErtsThrQ_t *q)
@@ -248,7 +213,6 @@ destroy(ErtsThrQ_t *q)
erts_free(atype, q->q.blk);
}
-#endif
static ERTS_INLINE ErtsThrQElement_t *
element_live_alloc(ErtsThrQLive_t live)
@@ -266,11 +230,7 @@ static ERTS_INLINE ErtsThrQElement_t *
element_alloc(ErtsThrQ_t *q)
{
ErtsThrQLive_t live;
-#ifdef USE_THREADS
live = q->tail.data.live;
-#else
- live = q->init.live.objects;
-#endif
return element_live_alloc(live);
}
@@ -290,15 +250,10 @@ static ERTS_INLINE void
element_free(ErtsThrQ_t *q, ErtsThrQElement_t *el)
{
ErtsThrQLive_t live;
-#ifdef USE_THREADS
live = q->head.live;
-#else
- live = q->init.live.objects;
-#endif
element_live_free(live, el);
}
-#ifdef USE_THREADS
static ERTS_INLINE ErtsThrQElement_t *
enqueue_managed(ErtsThrQ_t *q, ErtsThrQElement_t *this)
@@ -422,11 +377,9 @@ clean(ErtsThrQ_t *q, int max_ops, int do_notify)
return ERTS_THR_Q_CLEAN;
}
-#ifdef ERTS_SMP
if (q->head.next.thr_progress_reached
|| erts_thr_progress_has_reached(q->head.next.thr_progress)) {
q->head.next.thr_progress_reached = 1;
-#endif
um_refc_ix = q->head.next.um_refc_ix;
if (erts_atomic_read_acqb(&q->tail.data.um_refc[um_refc_ix]) == 0) {
/* Move unreferenced end pointer forward... */
@@ -438,23 +391,17 @@ clean(ErtsThrQ_t *q, int max_ops, int do_notify)
ilast = (erts_aint_t) enqueue_marker(q, NULL);
if (q->head.unref_end == (ErtsThrQElement_t *) ilast)
- ERTS_SMP_MEMORY_BARRIER;
+ ERTS_THR_MEMORY_BARRIER;
else {
q->head.next.unref_end = (ErtsThrQElement_t *) ilast;
-#ifdef ERTS_SMP
q->head.next.thr_progress = erts_thr_progress_later(NULL);
-#endif
erts_atomic32_set_relb(&q->tail.data.um_refc_ix,
um_refc_ix);
q->head.next.um_refc_ix = um_refc_ix == 0 ? 1 : 0;
-#ifdef ERTS_SMP
q->head.next.thr_progress_reached = 0;
-#endif
}
}
-#ifdef ERTS_SMP
}
-#endif
head = ErtsThrQDirtyReadEl(&q->head.head);
if (q->head.first == head) {
@@ -488,9 +435,7 @@ clean(ErtsThrQ_t *q, int max_ops, int do_notify)
check_thr_progress:
-#ifdef ERTS_SMP
if (q->head.next.thr_progress_reached)
-#endif
{
int um_refc_ix = q->head.next.um_refc_ix;
if (erts_atomic_read_acqb(&q->tail.data.um_refc[um_refc_ix]) == 0) {
@@ -504,24 +449,16 @@ check_thr_progress:
return ERTS_THR_Q_NEED_THR_PRGR;
}
-#endif
ErtsThrQCleanState_t
erts_thr_q_clean(ErtsThrQ_t *q)
{
-#ifdef USE_THREADS
return clean(q, ERTS_THR_Q_MAX_SCHED_CLEAN_OPS, 0);
-#else
- return ERTS_THR_Q_CLEAN;
-#endif
}
ErtsThrQCleanState_t
erts_thr_q_inspect(ErtsThrQ_t *q, int ensure_empty)
{
-#ifndef USE_THREADS
- return ERTS_THR_Q_CLEAN;
-#else
ErtsThrQElement_t *head = ErtsThrQDirtyReadEl(&q->head.head);
if (ensure_empty) {
erts_aint_t inext;
@@ -552,53 +489,33 @@ erts_thr_q_inspect(ErtsThrQ_t *q, int ensure_empty)
if (q->head.first != q->head.unref_end)
return ERTS_THR_Q_DIRTY;
-#ifdef ERTS_SMP
if (q->head.next.thr_progress_reached)
-#endif
{
int um_refc_ix = q->head.next.um_refc_ix;
if (erts_atomic_read_acqb(&q->tail.data.um_refc[um_refc_ix]) == 0)
return ERTS_THR_Q_DIRTY;
}
return ERTS_THR_Q_NEED_THR_PRGR;
-#endif
}
static void
enqueue(ErtsThrQ_t *q, void *data, ErtsThrQElement_t *this)
{
-#ifndef USE_THREADS
- ASSERT(data);
-
- this->next = NULL;
- this->data.ptr = data;
-
- if (q->last)
- q->last->next = this;
- else {
- q->first = q->last = this;
- q->init.notify(q->init.arg);
- }
-#else
int notify;
int um_refc_ix = 0;
-#ifdef ERTS_SMP
int unmanaged_thread;
-#endif
#if ERTS_THR_Q_DBG_CHK_DATA
if (!data)
- erl_exit(ERTS_ABORT_EXIT, "Missing data in enqueue\n");
+ erts_exit(ERTS_ABORT_EXIT, "Missing data in enqueue\n");
#endif
ASSERT(!q->q.finalizing);
this->data.ptr = data;
-#ifdef ERTS_SMP
unmanaged_thread = !erts_thr_progress_is_managed_thread();
if (unmanaged_thread)
-#endif
{
um_refc_ix = erts_atomic32_read_acqb(&q->tail.data.um_refc_ix);
while (1) {
@@ -615,9 +532,7 @@ enqueue(ErtsThrQ_t *q, void *data, ErtsThrQElement_t *this)
notify = this == enqueue_managed(q, this);
-#ifdef ERTS_SMP
if (unmanaged_thread)
-#endif
{
if (notify)
erts_atomic_dec_relb(&q->tail.data.um_refc[um_refc_ix]);
@@ -626,7 +541,6 @@ enqueue(ErtsThrQ_t *q, void *data, ErtsThrQElement_t *this)
}
if (notify)
q->tail.data.notify(q->tail.data.arg);
-#endif
}
void
@@ -644,9 +558,6 @@ erts_thr_q_prepare_enqueue(ErtsThrQ_t *q)
int
erts_thr_q_get_finalize_dequeue_data(ErtsThrQ_t *q, ErtsThrQFinDeQ_t *fdp)
{
-#ifndef USE_THREADS
- return 0;
-#else
#ifdef DEBUG
if (!q->head.deq_fini.start) {
ASSERT(!q->head.deq_fini.end);
@@ -669,14 +580,12 @@ erts_thr_q_get_finalize_dequeue_data(ErtsThrQ_t *q, ErtsThrQFinDeQ_t *fdp)
q->head.deq_fini.start = NULL;
q->head.deq_fini.end = NULL;
return fdp->start != NULL;
-#endif
}
void
erts_thr_q_append_finalize_dequeue_data(ErtsThrQFinDeQ_t *fdp0,
ErtsThrQFinDeQ_t *fdp1)
{
-#ifdef USE_THREADS
if (fdp1->start) {
if (fdp0->end)
ErtsThrQDirtySetEl(&fdp0->end->next, fdp1->start);
@@ -684,13 +593,11 @@ erts_thr_q_append_finalize_dequeue_data(ErtsThrQFinDeQ_t *fdp0,
fdp0->start = fdp1->start;
fdp0->end = fdp1->end;
}
-#endif
}
int erts_thr_q_finalize_dequeue(ErtsThrQFinDeQ_t *state)
{
-#ifdef USE_THREADS
ErtsThrQElement_t *start = state->start;
if (start) {
ErtsThrQLive_t live;
@@ -709,17 +616,14 @@ int erts_thr_q_finalize_dequeue(ErtsThrQFinDeQ_t *state)
return 1; /* More to do */
state->end = NULL;
}
-#endif
return 0;
}
void
erts_thr_q_finalize_dequeue_state_init(ErtsThrQFinDeQ_t *state)
{
-#ifdef USE_THREADS
state->start = NULL;
state->end = NULL;
-#endif
}
@@ -733,22 +637,6 @@ erts_thr_q_enqueue_prepared(ErtsThrQ_t *q, void *data, ErtsThrQPrepEnQ_t *prep)
void *
erts_thr_q_dequeue(ErtsThrQ_t *q)
{
-#ifndef USE_THREADS
- void *res;
- ErtsThrQElement_t *tmp;
-
- if (!q->first)
- return NULL;
- tmp = q->first;
- res = tmp->data.ptr;
- q->first = tmp->next;
- if (!q->first)
- q->last = NULL;
-
- element_free(q, tmp);
-
- return res;
-#else
ErtsThrQElement_t *head;
erts_aint_t inext;
void *res;
@@ -770,12 +658,34 @@ erts_thr_q_dequeue(ErtsThrQ_t *q)
#if ERTS_THR_Q_DBG_CHK_DATA
head->data.ptr = NULL;
if (!res)
- erl_exit(ERTS_ABORT_EXIT, "Missing data in dequeue\n");
+ erts_exit(ERTS_ABORT_EXIT, "Missing data in dequeue\n");
#endif
clean(q,
(q->head.deq_fini.automatic
? ERTS_THR_Q_MAX_DEQUEUE_CLEAN_OPS
: ERTS_THR_Q_MAX_SCHED_CLEAN_OPS), 1);
return res;
-#endif
}
+
+#ifdef USE_LTTNG_VM_TRACEPOINTS
+int
+erts_thr_q_length_dirty(ErtsThrQ_t *q)
+{
+ int n = 0;
+ ErtsThrQElement_t *e;
+ erts_aint_t inext;
+
+ e = ErtsThrQDirtyReadEl(&q->head.head);
+ inext = erts_atomic_read_acqb(&e->next);
+
+ while (inext != ERTS_AINT_NULL) {
+ e = (ErtsThrQElement_t *) inext;
+ if (e != &q->tail.data.marker) {
+ /* don't count marker */
+ n++;
+ }
+ inext = erts_atomic_read_acqb(&e->next);
+ }
+ return n;
+}
+#endif
diff --git a/erts/emulator/beam/erl_thr_queue.h b/erts/emulator/beam/erl_thr_queue.h
index 13af758b3f..163a25318d 100644
--- a/erts/emulator/beam/erl_thr_queue.h
+++ b/erts/emulator/beam/erl_thr_queue.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2011-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2011-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -77,11 +78,7 @@ typedef struct ErtsThrQElement_t_ ErtsThrQElement_t;
typedef struct ErtsThrQElement_t ErtsThrQPrepEnQ_t;
struct ErtsThrQElement_t_ {
-#ifdef USE_THREADS
erts_atomic_t next;
-#else
- ErtsThrQElement_t *next;
-#endif
union {
erts_atomic_t atmc;
void *ptr;
@@ -99,7 +96,6 @@ typedef enum {
ERTS_THR_Q_DIRTY,
} ErtsThrQCleanState_t;
-#ifdef USE_THREADS
typedef struct {
ErtsThrQElement_t marker;
@@ -107,9 +103,7 @@ typedef struct {
erts_atomic_t um_refc[2];
erts_atomic32_t um_refc_ix;
ErtsThrQLive_t live;
-#ifdef ERTS_SMP
erts_atomic32_t thr_prgr_clean_scheduled;
-#endif
void *arg;
void (*notify)(void *);
} ErtsThrQTail_t;
@@ -140,10 +134,8 @@ struct ErtsThrQ_t_ {
ErtsThrQElement_t *end;
} deq_fini;
struct {
-#ifdef ERTS_SMP
ErtsThrPrgrVal thr_progress;
int thr_progress_reached;
-#endif
int um_refc_ix;
ErtsThrQElement_t *unref_end;
} next;
@@ -158,18 +150,6 @@ struct ErtsThrQ_t_ {
} q;
};
-#else /* !USE_THREADS */
-
-struct ErtsThrQ_t_ {
- ErtsThrQInit_t init;
- ErtsThrQElement_t *first;
- ErtsThrQElement_t *last;
- struct {
- void *blk;
- } q;
-};
-
-#endif
void erts_thr_q_init(void);
void erts_thr_q_initialize(ErtsThrQ_t *, ErtsThrQInit_t *);
@@ -189,19 +169,19 @@ void erts_thr_q_append_finalize_dequeue_data(ErtsThrQFinDeQ_t *,
int erts_thr_q_finalize_dequeue(ErtsThrQFinDeQ_t *);
void erts_thr_q_finalize_dequeue_state_init(ErtsThrQFinDeQ_t *);
-#ifdef ERTS_SMP
-ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_q_need_thr_progress(ErtsThrQ_t *q);
+#ifdef USE_LTTNG_VM_TRACEPOINTS
+int erts_thr_q_length_dirty(ErtsThrQ_t *);
#endif
+ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_q_need_thr_progress(ErtsThrQ_t *q);
+
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-#ifdef ERTS_SMP
ERTS_GLB_INLINE ErtsThrPrgrVal
erts_thr_q_need_thr_progress(ErtsThrQ_t *q)
{
return q->head.next.thr_progress;
}
-#endif
#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index 80026104db..aedceb6fc2 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2001-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2001-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -44,11 +45,6 @@
* Data dependency read barrier. Orders *only* loads
* according to data dependency across the barrier.
*
- * If thread support has been disabled, these barriers will become no-ops.
- *
- * If the prefix ERTS_THR_ is replaced with ERTS_SMP_, the barriers will
- * be enabled only in the SMP enabled runtime system.
- *
* --- Atomic operations ---
*
* Atomics operations exist for 32-bit, word size, and double word size
@@ -85,20 +81,6 @@
* barrier. Load in atomic operation is ordered
* before the barrier.
*
- * If thread support has been disabled, these functions are mapped to
- * functions that performs the same operation, but aren't atomic
- * and don't imply any memory barriers.
- *
- * If the atomic operations are prefixed with erts_smp_ instead of only
- * erts_ the atomic operations will only be atomic in the SMP enabled
- * runtime system, and will be mapped to non-atomic operations without
- * memory barriers in the runtime system without SMP support. Atomic
- * operations with erts_smp_ prefix should use the atomic types
- * erts_smp_atomic32_t, erts_smp_atomic_t, and erts_smp_dw_atomic_t
- * instead of erts_atomic32_t, erts_atomic_t, and erts_dw_atomic_t. The
- * integer data types erts_aint32_t, erts_aint_t, and erts_dw_atomic_t
- * are the same.
- *
* --- 32-bit atomic operations ---
*
* The following 32-bit atomic operations exist. <B> should be
@@ -258,13 +240,15 @@
#include "sys.h"
-#ifdef USE_THREADS
+#include "erl_lock_flags.h"
+#include "erl_term.h"
+
#define ETHR_TRY_INLINE_FUNCS
#include "ethread.h"
+
#include "erl_lock_check.h"
#include "erl_lock_count.h"
-#include "erl_term.h"
#if defined(__GLIBC__) && (__GLIBC__ << 16) + __GLIBC_MINOR__ < (2 << 16) + 4
/*
@@ -306,9 +290,11 @@ typedef struct {
erts_lc_lock_t lc;
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt;
+ erts_lcnt_ref_t lcnt;
+#endif
+#ifdef DEBUG
+ erts_lock_flags_t flags;
#endif
-
} erts_mtx_t;
typedef ethr_cond erts_cnd_t;
@@ -319,7 +305,10 @@ typedef struct {
erts_lc_lock_t lc;
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt;
+ erts_lcnt_ref_t lcnt;
+#endif
+#ifdef DEBUG
+ erts_lock_flags_t flags;
#endif
} erts_rwmtx_t;
@@ -344,6 +333,16 @@ typedef ethr_ts_event erts_tse_t;
#define erts_aint32_t ethr_sint32_t
#define erts_atomic32_t ethr_atomic32_t
+#if defined(ARCH_32)
+# define erts_atomic64_t ethr_dw_atomic_t
+# define erts_aint64_t ethr_sint64_t
+#elif defined(ARCH_64)
+# define erts_atomic64_t ethr_atomic_t
+# define erts_aint64_t ethr_sint_t
+#else
+# error "Not supported architecture"
+#endif
+
#define ERTS_DW_AINT_HIGH_WORD ETHR_DW_SINT_HIGH_WORD
#define ERTS_DW_AINT_LOW_WORD ETHR_DW_SINT_LOW_WORD
@@ -354,7 +353,10 @@ typedef struct {
erts_lc_lock_t lc;
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt;
+ erts_lcnt_ref_t lcnt;
+#endif
+#ifdef DEBUG
+ erts_lock_flags_t flags;
#endif
} erts_spinlock_t;
@@ -365,7 +367,10 @@ typedef struct {
erts_lc_lock_t lc;
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt;
+ erts_lcnt_ref_t lcnt;
+#endif
+#ifdef DEBUG
+ erts_lock_flags_t flags;
#endif
} erts_rwlock_t;
@@ -380,73 +385,6 @@ __decl_noreturn void __noreturn erts_thr_fatal_error(int, char *);
# define ERTS_HAVE_REC_MTX_INIT ETHR_HAVE_ETHR_REC_MUTEX_INIT
#endif
-#else /* #ifdef USE_THREADS */
-
-#define ERTS_THR_MEMORY_BARRIER
-#define ERTS_THR_WRITE_MEMORY_BARRIER
-#define ERTS_THR_READ_MEMORY_BARRIER
-#define ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER
-
-#define ERTS_THR_OPTS_DEFAULT_INITER 0
-typedef int erts_thr_opts_t;
-typedef int erts_thr_init_data_t;
-typedef int erts_thr_late_init_data_t;
-typedef int erts_tid_t;
-typedef int erts_mtx_t;
-typedef int erts_cnd_t;
-#define ERTS_RWMTX_OPT_DEFAULT_INITER {0}
-#define ERTS_RWMTX_TYPE_NORMAL 0
-#define ERTS_RWMTX_TYPE_FREQUENT_READ 0
-#define ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ 0
-#define ERTS_RWMTX_LONG_LIVED 0
-#define ERTS_RWMTX_SHORT_LIVED 0
-#define ERTS_RWMTX_UNKNOWN_LIVED 0
-typedef struct {
- char type;
- char lived;
- int main_spincount;
- int aux_spincount;
-} erts_rwmtx_opt_t;
-typedef int erts_rwmtx_t;
-typedef int erts_tsd_key_t;
-typedef int erts_tse_t;
-
-typedef struct { SWord sint[2]; } erts_dw_aint_t;
-typedef SWord erts_aint_t;
-typedef Sint32 erts_aint32_t;
-
-#define erts_dw_atomic_t erts_dw_aint_t
-#define erts_atomic_t erts_aint_t
-#define erts_atomic32_t erts_aint32_t
-
-#if __GNUC__ > 2
-typedef struct { } erts_spinlock_t;
-typedef struct { } erts_rwlock_t;
-#else
-typedef struct { int gcc_is_buggy; } erts_spinlock_t;
-typedef struct { int gcc_is_buggy; } erts_rwlock_t;
-#endif
-
-#ifdef WORDS_BIGENDIAN
-#define ERTS_DW_AINT_LOW_WORD 1
-#define ERTS_DW_AINT_HIGH_WORD 0
-#else
-#define ERTS_DW_AINT_LOW_WORD 0
-#define ERTS_DW_AINT_HIGH_WORD 1
-#endif
-
-#define ERTS_MTX_INITER 0
-#define ERTS_CND_INITER 0
-#define ERTS_THR_INIT_DATA_DEF_INITER 0
-
-#define ERTS_HAVE_REC_MTX_INIT 1
-
-#endif /* #ifdef USE_THREADS */
-
-#define erts_no_dw_atomic_t erts_dw_aint_t
-#define erts_no_atomic_t erts_aint_t
-#define erts_no_atomic32_t erts_aint32_t
-
#define ERTS_AINT_NULL ((erts_aint_t) NULL)
#define ERTS_AINT_T_MAX (~(((erts_aint_t) 1) << (sizeof(erts_aint_t)*8-1)))
@@ -463,17 +401,16 @@ ERTS_GLB_INLINE void erts_thr_detach(erts_tid_t tid);
ERTS_GLB_INLINE void erts_thr_exit(void *res);
ERTS_GLB_INLINE void erts_thr_install_exit_handler(void (*exit_handler)(void));
ERTS_GLB_INLINE erts_tid_t erts_thr_self(void);
+ERTS_GLB_INLINE int erts_thr_getname(erts_tid_t tid, char *buf, size_t len);
ERTS_GLB_INLINE int erts_equal_tids(erts_tid_t x, erts_tid_t y);
-ERTS_GLB_INLINE void erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra,
- int enable_lcnt);
-ERTS_GLB_INLINE void erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra,
- Uint16 opt, int enable_lcnt);
-ERTS_GLB_INLINE void erts_mtx_init_locked_x(erts_mtx_t *mtx,
- char *name,
- Eterm extra,
- int enable_lcnt);
-ERTS_GLB_INLINE void erts_mtx_init(erts_mtx_t *mtx, char *name);
-ERTS_GLB_INLINE void erts_mtx_init_locked(erts_mtx_t *mtx, char *name);
+ERTS_GLB_INLINE void erts_mtx_init(erts_mtx_t *mtx,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
+ERTS_GLB_INLINE void erts_mtx_init_locked(erts_mtx_t *mtx,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_mtx_destroy(erts_mtx_t *mtx);
#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE int erts_mtx_trylock_x(erts_mtx_t *mtx, char *file,
@@ -492,18 +429,15 @@ ERTS_GLB_INLINE void erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx);
ERTS_GLB_INLINE void erts_cnd_signal(erts_cnd_t *cnd);
ERTS_GLB_INLINE void erts_cnd_broadcast(erts_cnd_t *cnd);
ERTS_GLB_INLINE void erts_rwmtx_set_reader_group(int no);
-ERTS_GLB_INLINE void erts_rwmtx_init_opt_x(erts_rwmtx_t *rwmtx,
- erts_rwmtx_opt_t *opt,
- char *name,
- Eterm extra);
-ERTS_GLB_INLINE void erts_rwmtx_init_x(erts_rwmtx_t *rwmtx,
- char *name,
- Eterm extra);
ERTS_GLB_INLINE void erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx,
- erts_rwmtx_opt_t *opt,
- char *name);
+ erts_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_rwmtx_init(erts_rwmtx_t *rwmtx,
- char *name);
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_rwmtx_destroy(erts_rwmtx_t *rwmtx);
#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE int erts_rwmtx_tryrlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line);
@@ -520,66 +454,10 @@ ERTS_GLB_INLINE void erts_rwmtx_runlock(erts_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx);
ERTS_GLB_INLINE int erts_lc_rwmtx_is_rlocked(erts_rwmtx_t *mtx);
ERTS_GLB_INLINE int erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx);
-
-ERTS_GLB_INLINE void erts_no_dw_atomic_set(erts_no_dw_atomic_t *var, erts_no_dw_atomic_t *val);
-ERTS_GLB_INLINE void erts_no_dw_atomic_read(erts_no_dw_atomic_t *var, erts_no_dw_atomic_t *val);
-ERTS_GLB_INLINE int erts_no_dw_atomic_cmpxchg(erts_no_dw_atomic_t *var,
- erts_no_dw_atomic_t *val,
- erts_no_dw_atomic_t *old_val);
-ERTS_GLB_INLINE void erts_no_atomic_set(erts_no_atomic_t *var, erts_aint_t i);
-ERTS_GLB_INLINE erts_aint_t erts_no_atomic_read(erts_no_atomic_t *var);
-ERTS_GLB_INLINE erts_aint_t erts_no_atomic_inc_read(erts_no_atomic_t *incp);
-ERTS_GLB_INLINE erts_aint_t erts_no_atomic_dec_read(erts_no_atomic_t *decp);
-ERTS_GLB_INLINE void erts_no_atomic_inc(erts_no_atomic_t *incp);
-ERTS_GLB_INLINE void erts_no_atomic_dec(erts_no_atomic_t *decp);
-ERTS_GLB_INLINE erts_aint_t erts_no_atomic_add_read(erts_no_atomic_t *addp,
- erts_aint_t i);
-ERTS_GLB_INLINE void erts_no_atomic_add(erts_no_atomic_t *addp, erts_aint_t i);
-ERTS_GLB_INLINE erts_aint_t erts_no_atomic_read_bor(erts_no_atomic_t *var,
- erts_aint_t mask);
-ERTS_GLB_INLINE erts_aint_t erts_no_atomic_read_band(erts_no_atomic_t *var,
- erts_aint_t mask);
-ERTS_GLB_INLINE erts_aint_t erts_no_atomic_xchg(erts_no_atomic_t *xchgp,
- erts_aint_t new);
-ERTS_GLB_INLINE erts_aint_t erts_no_atomic_cmpxchg(erts_no_atomic_t *xchgp,
- erts_aint_t new,
- erts_aint_t expected);
-ERTS_GLB_INLINE erts_aint_t erts_no_atomic_read_bset(erts_no_atomic_t *var,
- erts_aint_t mask,
- erts_aint_t set);
-ERTS_GLB_INLINE void erts_no_atomic32_set(erts_no_atomic32_t *var,
- erts_aint32_t i);
-ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read(erts_no_atomic32_t *var);
-ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_inc_read(erts_no_atomic32_t *incp);
-ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_dec_read(erts_no_atomic32_t *decp);
-ERTS_GLB_INLINE void erts_no_atomic32_inc(erts_no_atomic32_t *incp);
-ERTS_GLB_INLINE void erts_no_atomic32_dec(erts_no_atomic32_t *decp);
-ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_add_read(erts_no_atomic32_t *addp,
- erts_aint32_t i);
-ERTS_GLB_INLINE void erts_no_atomic32_add(erts_no_atomic32_t *addp,
- erts_aint32_t i);
-ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read_bor(erts_no_atomic32_t *var,
- erts_aint32_t mask);
-ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read_band(erts_no_atomic32_t *var,
- erts_aint32_t mask);
-ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_xchg(erts_no_atomic32_t *xchgp,
- erts_aint32_t new);
-ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_cmpxchg(erts_no_atomic32_t *xchgp,
- erts_aint32_t new,
- erts_aint32_t expected);
-ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read_bset(erts_no_atomic32_t *var,
- erts_aint32_t mask,
- erts_aint32_t set);
-
-ERTS_GLB_INLINE void erts_spinlock_init_x_opt(erts_spinlock_t *lock,
- char *name,
- Eterm extra,
- Uint16 opt);
-ERTS_GLB_INLINE void erts_spinlock_init_x(erts_spinlock_t *lock,
- char *name,
- Eterm extra);
ERTS_GLB_INLINE void erts_spinlock_init(erts_spinlock_t *lock,
- char *name);
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_spinlock_destroy(erts_spinlock_t *lock);
ERTS_GLB_INLINE void erts_spin_unlock(erts_spinlock_t *lock);
#ifdef ERTS_ENABLE_LOCK_POSITION
@@ -588,11 +466,10 @@ ERTS_GLB_INLINE void erts_spin_lock_x(erts_spinlock_t *lock, char *file, unsigne
ERTS_GLB_INLINE void erts_spin_lock(erts_spinlock_t *lock);
#endif
ERTS_GLB_INLINE int erts_lc_spinlock_is_locked(erts_spinlock_t *lock);
-ERTS_GLB_INLINE void erts_rwlock_init_x(erts_rwlock_t *lock,
- char *name,
- Eterm extra);
ERTS_GLB_INLINE void erts_rwlock_init(erts_rwlock_t *lock,
- char *name);
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_rwlock_destroy(erts_rwlock_t *lock);
ERTS_GLB_INLINE void erts_read_unlock(erts_rwlock_t *lock);
#ifdef ERTS_ENABLE_LOCK_POSITION
@@ -611,23 +488,29 @@ ERTS_GLB_INLINE void erts_tsd_set(erts_tsd_key_t key, void *value);
ERTS_GLB_INLINE void * erts_tsd_get(erts_tsd_key_t key);
ERTS_GLB_INLINE erts_tse_t *erts_tse_fetch(void);
ERTS_GLB_INLINE void erts_tse_return(erts_tse_t *ep);
+ERTS_GLB_INLINE void erts_tse_prepare_timed(erts_tse_t *ep);
ERTS_GLB_INLINE void erts_tse_set(erts_tse_t *ep);
ERTS_GLB_INLINE void erts_tse_reset(erts_tse_t *ep);
ERTS_GLB_INLINE int erts_tse_wait(erts_tse_t *ep);
ERTS_GLB_INLINE int erts_tse_swait(erts_tse_t *ep, int spincount);
+ERTS_GLB_INLINE int erts_tse_twait(erts_tse_t *ep, Sint64 tmo);
+ERTS_GLB_INLINE int erts_tse_stwait(erts_tse_t *ep, int spincount, Sint64 tmo);
ERTS_GLB_INLINE int erts_tse_is_tmp(erts_tse_t *ep);
ERTS_GLB_INLINE void erts_thr_set_main_status(int, int);
ERTS_GLB_INLINE int erts_thr_get_main_status(void);
ERTS_GLB_INLINE void erts_thr_yield(void);
+
#ifdef ETHR_HAVE_ETHR_SIG_FUNCS
#define ERTS_THR_HAVE_SIG_FUNCS 1
ERTS_GLB_INLINE void erts_thr_sigmask(int how, const sigset_t *set,
sigset_t *oset);
ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
+
+ERTS_GLB_INLINE void erts_thr_kill(erts_tid_t tid, int sig);
+
#endif /* #ifdef HAVE_ETHR_SIG_FUNCS */
-#ifdef USE_THREADS
ERTS_GLB_INLINE erts_aint_t
erts_atomic_read_bset_nob(erts_atomic_t *var,
@@ -1200,480 +1083,570 @@ erts_atomic32_read_dirty(erts_atomic32_t *var)
#endif
-#else /* !USE_THREADS */
+/* 64-bit atomics */
+
+#if defined(ARCH_64)
+
+#define erts_atomic64_init_nob ethr_atomic_init
+#define erts_atomic64_set_nob ethr_atomic_set
+#define erts_atomic64_read_nob ethr_atomic_read
+#define erts_atomic64_inc_read_nob ethr_atomic_inc_read
+#define erts_atomic64_dec_read_nob ethr_atomic_dec_read
+#define erts_atomic64_inc_nob ethr_atomic_inc
+#define erts_atomic64_dec_nob ethr_atomic_dec
+#define erts_atomic64_add_read_nob ethr_atomic_add_read
+#define erts_atomic64_add_nob ethr_atomic_add
+#define erts_atomic64_read_bor_nob ethr_atomic_read_bor
+#define erts_atomic64_read_band_nob ethr_atomic_read_band
+#define erts_atomic64_xchg_nob ethr_atomic_xchg
+#define erts_atomic64_cmpxchg_nob ethr_atomic_cmpxchg
+#define erts_atomic64_read_bset_nob erts_atomic_read_bset_nob
+
+#define erts_atomic64_init_mb ethr_atomic_init_mb
+#define erts_atomic64_set_mb ethr_atomic_set_mb
+#define erts_atomic64_read_mb ethr_atomic_read_mb
+#define erts_atomic64_inc_read_mb ethr_atomic_inc_read_mb
+#define erts_atomic64_dec_read_mb ethr_atomic_dec_read_mb
+#define erts_atomic64_inc_mb ethr_atomic_inc_mb
+#define erts_atomic64_dec_mb ethr_atomic_dec_mb
+#define erts_atomic64_add_read_mb ethr_atomic_add_read_mb
+#define erts_atomic64_add_mb ethr_atomic_add_mb
+#define erts_atomic64_read_bor_mb ethr_atomic_read_bor_mb
+#define erts_atomic64_read_band_mb ethr_atomic_read_band_mb
+#define erts_atomic64_xchg_mb ethr_atomic_xchg_mb
+#define erts_atomic64_cmpxchg_mb ethr_atomic_cmpxchg_mb
+#define erts_atomic64_read_bset_mb erts_atomic_read_bset_mb
+
+#define erts_atomic64_init_acqb ethr_atomic_init_acqb
+#define erts_atomic64_set_acqb ethr_atomic_set_acqb
+#define erts_atomic64_read_acqb ethr_atomic_read_acqb
+#define erts_atomic64_inc_read_acqb ethr_atomic_inc_read_acqb
+#define erts_atomic64_dec_read_acqb ethr_atomic_dec_read_acqb
+#define erts_atomic64_inc_acqb ethr_atomic_inc_acqb
+#define erts_atomic64_dec_acqb ethr_atomic_dec_acqb
+#define erts_atomic64_add_read_acqb ethr_atomic_add_read_acqb
+#define erts_atomic64_add_acqb ethr_atomic_add_acqb
+#define erts_atomic64_read_bor_acqb ethr_atomic_read_bor_acqb
+#define erts_atomic64_read_band_acqb ethr_atomic_read_band_acqb
+#define erts_atomic64_xchg_acqb ethr_atomic_xchg_acqb
+#define erts_atomic64_cmpxchg_acqb ethr_atomic_cmpxchg_acqb
+#define erts_atomic64_read_bset_acqb erts_atomic_read_bset_acqb
+
+#define erts_atomic64_init_relb ethr_atomic_init_relb
+#define erts_atomic64_set_relb ethr_atomic_set_relb
+#define erts_atomic64_read_relb ethr_atomic_read_relb
+#define erts_atomic64_inc_read_relb ethr_atomic_inc_read_relb
+#define erts_atomic64_dec_read_relb ethr_atomic_dec_read_relb
+#define erts_atomic64_inc_relb ethr_atomic_inc_relb
+#define erts_atomic64_dec_relb ethr_atomic_dec_relb
+#define erts_atomic64_add_read_relb ethr_atomic_add_read_relb
+#define erts_atomic64_add_relb ethr_atomic_add_relb
+#define erts_atomic64_read_bor_relb ethr_atomic_read_bor_relb
+#define erts_atomic64_read_band_relb ethr_atomic_read_band_relb
+#define erts_atomic64_xchg_relb ethr_atomic_xchg_relb
+#define erts_atomic64_cmpxchg_relb ethr_atomic_cmpxchg_relb
+#define erts_atomic64_read_bset_relb erts_atomic_read_bset_relb
+
+#define erts_atomic64_init_ddrb ethr_atomic_init_ddrb
+#define erts_atomic64_set_ddrb ethr_atomic_set_ddrb
+#define erts_atomic64_read_ddrb ethr_atomic_read_ddrb
+#define erts_atomic64_inc_read_ddrb ethr_atomic_inc_read_ddrb
+#define erts_atomic64_dec_read_ddrb ethr_atomic_dec_read_ddrb
+#define erts_atomic64_inc_ddrb ethr_atomic_inc_ddrb
+#define erts_atomic64_dec_ddrb ethr_atomic_dec_ddrb
+#define erts_atomic64_add_read_ddrb ethr_atomic_add_read_ddrb
+#define erts_atomic64_add_ddrb ethr_atomic_add_ddrb
+#define erts_atomic64_read_bor_ddrb ethr_atomic_read_bor_ddrb
+#define erts_atomic64_read_band_ddrb ethr_atomic_read_band_ddrb
+#define erts_atomic64_xchg_ddrb ethr_atomic_xchg_ddrb
+#define erts_atomic64_cmpxchg_ddrb ethr_atomic_cmpxchg_ddrb
+#define erts_atomic64_read_bset_ddrb erts_atomic_read_bset_ddrb
+
+#define erts_atomic64_init_rb ethr_atomic_init_rb
+#define erts_atomic64_set_rb ethr_atomic_set_rb
+#define erts_atomic64_read_rb ethr_atomic_read_rb
+#define erts_atomic64_inc_read_rb ethr_atomic_inc_read_rb
+#define erts_atomic64_dec_read_rb ethr_atomic_dec_read_rb
+#define erts_atomic64_inc_rb ethr_atomic_inc_rb
+#define erts_atomic64_dec_rb ethr_atomic_dec_rb
+#define erts_atomic64_add_read_rb ethr_atomic_add_read_rb
+#define erts_atomic64_add_rb ethr_atomic_add_rb
+#define erts_atomic64_read_bor_rb ethr_atomic_read_bor_rb
+#define erts_atomic64_read_band_rb ethr_atomic_read_band_rb
+#define erts_atomic64_xchg_rb ethr_atomic_xchg_rb
+#define erts_atomic64_cmpxchg_rb ethr_atomic_cmpxchg_rb
+#define erts_atomic64_read_bset_rb erts_atomic_read_bset_rb
+
+#define erts_atomic64_init_wb ethr_atomic_init_wb
+#define erts_atomic64_set_wb ethr_atomic_set_wb
+#define erts_atomic64_read_wb ethr_atomic_read_wb
+#define erts_atomic64_inc_read_wb ethr_atomic_inc_read_wb
+#define erts_atomic64_dec_read_wb ethr_atomic_dec_read_wb
+#define erts_atomic64_inc_wb ethr_atomic_inc_wb
+#define erts_atomic64_dec_wb ethr_atomic_dec_wb
+#define erts_atomic64_add_read_wb ethr_atomic_add_read_wb
+#define erts_atomic64_add_wb ethr_atomic_add_wb
+#define erts_atomic64_read_bor_wb ethr_atomic_read_bor_wb
+#define erts_atomic64_read_band_wb ethr_atomic_read_band_wb
+#define erts_atomic64_xchg_wb ethr_atomic_xchg_wb
+#define erts_atomic64_cmpxchg_wb ethr_atomic_cmpxchg_wb
+#define erts_atomic64_read_bset_wb erts_atomic_read_bset_wb
+
+#define erts_atomic64_set_dirty erts_atomic_set_dirty
+#define erts_atomic64_read_dirty erts_atomic_read_dirty
+
+#elif defined(ARCH_32)
+
+#undef ERTS_ATOMIC64_OPS_DECL__
+
+#define ERTS_ATOMIC64_OPS_DECL__(BARRIER) \
+ERTS_GLB_INLINE void \
+erts_atomic64_init_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val); \
+ERTS_GLB_INLINE void \
+erts_atomic64_set_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_read_ ## BARRIER(erts_atomic64_t *var); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_inc_read_ ## BARRIER(erts_atomic64_t *var); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_dec_read_ ## BARRIER(erts_atomic64_t *var); \
+ERTS_GLB_INLINE void \
+erts_atomic64_inc_ ## BARRIER(erts_atomic64_t *var); \
+ERTS_GLB_INLINE void \
+erts_atomic64_dec_ ## BARRIER(erts_atomic64_t *var); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_add_read_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val); \
+ERTS_GLB_INLINE void \
+erts_atomic64_add_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_read_bor_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_read_band_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_xchg_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_cmpxchg_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t new, \
+ erts_aint64_t exp); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_read_bset_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t mask, \
+ erts_aint64_t set)
+
+ERTS_ATOMIC64_OPS_DECL__(nob);
+ERTS_ATOMIC64_OPS_DECL__(mb);
+ERTS_ATOMIC64_OPS_DECL__(acqb);
+ERTS_ATOMIC64_OPS_DECL__(relb);
+ERTS_ATOMIC64_OPS_DECL__(ddrb);
+ERTS_ATOMIC64_OPS_DECL__(rb);
+ERTS_ATOMIC64_OPS_DECL__(wb);
+
+#undef ERTS_ATOMIC64_OPS_DECL__
+
+ERTS_GLB_INLINE void
+erts_atomic64_set_dirty(erts_atomic64_t *var, erts_aint64_t val);
+ERTS_GLB_INLINE erts_aint64_t
+erts_atomic64_read_dirty(erts_atomic64_t *var);
-/* Double word size atomics */
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-#define erts_dw_atomic_init_nob erts_no_dw_atomic_set
-#define erts_dw_atomic_set_nob erts_no_dw_atomic_set
-#define erts_dw_atomic_read_nob erts_no_dw_atomic_read
-#define erts_dw_atomic_cmpxchg_nob erts_no_dw_atomic_cmpxchg
+/*
+ * The ethr_dw_atomic_*_nob() functions below
+ * are here to make it possible for the
+ * ERTS_ATOMIC64_OPS_IMPL__() to map erts
+ * barriers to ethread barriers...
+ */
+static ERTS_INLINE void
+ethr_dw_atomic_init_nob(ethr_dw_atomic_t *var,
+ ethr_dw_sint_t *val)
+{
+ ethr_dw_atomic_init(var, val);
+}
-#define erts_dw_atomic_init_mb erts_no_dw_atomic_init
-#define erts_dw_atomic_set_mb erts_no_dw_atomic_set
-#define erts_dw_atomic_read_mb erts_no_dw_atomic_read
-#define erts_dw_atomic_cmpxchg_mb erts_no_dw_atomic_cmpxchg
+static ERTS_INLINE void
+ethr_dw_atomic_set_nob(ethr_dw_atomic_t *var,
+ ethr_dw_sint_t *val)
+{
+ ethr_dw_atomic_set(var, val);
+}
-#define erts_dw_atomic_init_acqb erts_no_dw_atomic_init
-#define erts_dw_atomic_set_acqb erts_no_dw_atomic_set
-#define erts_dw_atomic_read_acqb erts_no_dw_atomic_read
-#define erts_dw_atomic_cmpxchg_acqb erts_no_dw_atomic_cmpxchg
+static ERTS_INLINE void
+ethr_dw_atomic_read_nob(ethr_dw_atomic_t *var,
+ ethr_dw_sint_t *val)
+{
+ ethr_dw_atomic_read(var, val);
+}
-#define erts_dw_atomic_init_relb erts_no_dw_atomic_init
-#define erts_dw_atomic_set_relb erts_no_dw_atomic_set
-#define erts_dw_atomic_read_relb erts_no_dw_atomic_read
-#define erts_dw_atomic_cmpxchg_relb erts_no_dw_atomic_cmpxchg
+static ERTS_INLINE int
+ethr_dw_atomic_cmpxchg_nob(ethr_dw_atomic_t *var,
+ ethr_dw_sint_t *new,
+ ethr_dw_sint_t *xchg)
+{
+ return ethr_dw_atomic_cmpxchg(var, new, xchg);
+}
-#define erts_dw_atomic_init_ddrb erts_no_dw_atomic_init
-#define erts_dw_atomic_set_ddrb erts_no_dw_atomic_set
-#define erts_dw_atomic_read_ddrb erts_no_dw_atomic_read
-#define erts_dw_atomic_cmpxchg_ddrb erts_no_dw_atomic_cmpxchg
+#undef ERTS_ATOMIC64_OPS_IMPL__
+#undef ERTS_ATOMIC64_DW_CMPXCHG_IMPL__
+#undef ERTS_DW_SINT_TO_AINT64__
+#undef ERTS_AINT64_TO_DW_SINT__
-#define erts_dw_atomic_init_rb erts_no_dw_atomic_init
-#define erts_dw_atomic_set_rb erts_no_dw_atomic_set
-#define erts_dw_atomic_read_rb erts_no_dw_atomic_read
-#define erts_dw_atomic_cmpxchg_rb erts_no_dw_atomic_cmpxchg
+#ifdef ETHR_SU_DW_NAINT_T__
+#define ERTS_DW_SINT_TO_AINT64__(DW) \
+ ((erts_aint64_t) DW.dw_sint)
+#define ERTS_AINT64_TO_DW_SINT__(DW, AINT64) \
+ (DW.dw_sint = (ETHR_SU_DW_NAINT_T__) AINT64)
+#else /* !ETHR_SU_DW_NAINT_T__ */
+#define ERTS_DW_SINT_TO_AINT64__(DW) \
+ ((((erts_aint64_t) DW.sint[ETHR_DW_SINT_HIGH_WORD]) << 32) \
+ | (((erts_aint64_t) DW.sint[ETHR_DW_SINT_LOW_WORD]) \
+ & ((erts_aint64_t) 0xffffffff)))
+#define ERTS_AINT64_TO_DW_SINT__(DW, AINT64) \
+ do { \
+ DW.sint[ETHR_DW_SINT_LOW_WORD] = \
+ (ethr_sint_t) (AINT64 & 0xffffffff); \
+ DW.sint[ETHR_DW_SINT_HIGH_WORD] = \
+ (ethr_sint_t) ((AINT64 >> 32) & 0xffffffff); \
+ } while (0)
+#endif /* !ETHR_SU_DW_NAINT_T__ */
-#define erts_dw_atomic_init_wb erts_no_dw_atomic_init
-#define erts_dw_atomic_set_wb erts_no_dw_atomic_set
-#define erts_dw_atomic_read_wb erts_no_dw_atomic_read
-#define erts_dw_atomic_cmpxchg_wb erts_no_dw_atomic_cmpxchg
+#define ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(CmpXchgOp, \
+ AVarP, XchgVar, NewVar, \
+ ModificationCode) \
+do { \
+ ethr_dw_sint_t dw_xchg__, dw_new__; \
+ ethr_dw_atomic_read(AVarP, &dw_xchg__); \
+ do { \
+ XchgVar = ERTS_DW_SINT_TO_AINT64__(dw_xchg__); \
+ { \
+ ModificationCode; \
+ } \
+ ERTS_AINT64_TO_DW_SINT__(dw_new__, NewVar); \
+ } while (!CmpXchgOp((AVarP), &dw_new__, &dw_xchg__)); \
+} while (0)
-#define erts_dw_atomic_set_dirty erts_no_dw_atomic_set
-#define erts_dw_atomic_read_dirty erts_no_dw_atomic_read
+#define ERTS_ATOMIC64_OPS_IMPL__(BARRIER) \
+ \
+ERTS_GLB_INLINE void \
+erts_atomic64_init_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val) \
+{ \
+ ethr_dw_sint_t dw; \
+ ERTS_AINT64_TO_DW_SINT__(dw, val); \
+ ethr_dw_atomic_init_ ## BARRIER(var, &dw); \
+} \
+ \
+ERTS_GLB_INLINE void \
+erts_atomic64_set_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val) \
+{ \
+ ethr_dw_sint_t dw; \
+ ERTS_AINT64_TO_DW_SINT__(dw, val); \
+ ethr_dw_atomic_set_ ## BARRIER(var, &dw); \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_read_ ## BARRIER(erts_atomic64_t *var) \
+{ \
+ ethr_dw_sint_t dw; \
+ ethr_dw_atomic_read_ ## BARRIER(var, &dw); \
+ return ERTS_DW_SINT_TO_AINT64__(dw); \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_inc_read_ ## BARRIER(erts_atomic64_t *var) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = xchg + 1); \
+ return new; \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_dec_read_ ## BARRIER(erts_atomic64_t *var) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = xchg - 1); \
+ return new; \
+} \
+ \
+ERTS_GLB_INLINE void \
+erts_atomic64_inc_ ## BARRIER(erts_atomic64_t *var) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = xchg + 1); \
+} \
+ \
+ERTS_GLB_INLINE void \
+erts_atomic64_dec_ ## BARRIER(erts_atomic64_t *var) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = xchg - 1); \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_add_read_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = xchg + val); \
+ return new; \
+} \
+ \
+ERTS_GLB_INLINE void \
+erts_atomic64_add_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = xchg + val); \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_read_bor_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = xchg | val); \
+ return xchg; \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_read_band_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = xchg & val); \
+ return xchg; \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_xchg_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = val); \
+ return xchg; \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_cmpxchg_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t new, \
+ erts_aint64_t exp) \
+{ \
+ ethr_dw_sint_t dw_xchg, dw_new; \
+ ERTS_AINT64_TO_DW_SINT__(dw_xchg, exp); \
+ ERTS_AINT64_TO_DW_SINT__(dw_new, new); \
+ if (ethr_dw_atomic_cmpxchg_ ## BARRIER(var, &dw_new, &dw_xchg)) \
+ return exp; \
+ return ERTS_DW_SINT_TO_AINT64__(dw_xchg); \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_read_bset_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t mask, \
+ erts_aint64_t set) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ { \
+ new = xchg & ~mask; \
+ new |= mask & set; \
+ }); \
+ return xchg; \
+}
+
+ERTS_ATOMIC64_OPS_IMPL__(nob)
+ERTS_ATOMIC64_OPS_IMPL__(mb)
+ERTS_ATOMIC64_OPS_IMPL__(acqb)
+ERTS_ATOMIC64_OPS_IMPL__(relb)
+ERTS_ATOMIC64_OPS_IMPL__(ddrb)
+ERTS_ATOMIC64_OPS_IMPL__(rb)
+ERTS_ATOMIC64_OPS_IMPL__(wb)
+
+#undef ERTS_ATOMIC64_OPS_IMPL__
+#undef ERTS_ATOMIC64_DW_CMPXCHG_IMPL__
+
+ERTS_GLB_INLINE void
+erts_atomic64_set_dirty(erts_atomic64_t *var, erts_aint64_t val)
+{
+ ethr_sint_t *sint = ethr_dw_atomic_addr(var);
+ ethr_dw_sint_t dw;
+ ERTS_AINT64_TO_DW_SINT__(dw, val);
+ sint[0] = dw.sint[0];
+ sint[1] = dw.sint[1];
+}
-/* Word size atomics */
+ERTS_GLB_INLINE erts_aint64_t
+erts_atomic64_read_dirty(erts_atomic64_t *var)
+{
+ ethr_sint_t *sint;
+ ethr_dw_sint_t dw;
+ sint = ethr_dw_atomic_addr(var);
+ dw.sint[0] = sint[0];
+ dw.sint[1] = sint[1];
+ return ERTS_DW_SINT_TO_AINT64__(dw);
+}
-#define erts_atomic_init_nob erts_no_atomic_set
-#define erts_atomic_set_nob erts_no_atomic_set
-#define erts_atomic_read_nob erts_no_atomic_read
-#define erts_atomic_inc_read_nob erts_no_atomic_inc_read
-#define erts_atomic_dec_read_nob erts_no_atomic_dec_read
-#define erts_atomic_inc_nob erts_no_atomic_inc
-#define erts_atomic_dec_nob erts_no_atomic_dec
-#define erts_atomic_add_read_nob erts_no_atomic_add_read
-#define erts_atomic_add_nob erts_no_atomic_add
-#define erts_atomic_read_bor_nob erts_no_atomic_read_bor
-#define erts_atomic_read_band_nob erts_no_atomic_read_band
-#define erts_atomic_xchg_nob erts_no_atomic_xchg
-#define erts_atomic_cmpxchg_nob erts_no_atomic_cmpxchg
-#define erts_atomic_read_bset_nob erts_no_atomic_read_bset
-
-#define erts_atomic_init_mb erts_no_atomic_set
-#define erts_atomic_set_mb erts_no_atomic_set
-#define erts_atomic_read_mb erts_no_atomic_read
-#define erts_atomic_inc_read_mb erts_no_atomic_inc_read
-#define erts_atomic_dec_read_mb erts_no_atomic_dec_read
-#define erts_atomic_inc_mb erts_no_atomic_inc
-#define erts_atomic_dec_mb erts_no_atomic_dec
-#define erts_atomic_add_read_mb erts_no_atomic_add_read
-#define erts_atomic_add_mb erts_no_atomic_add
-#define erts_atomic_read_bor_mb erts_no_atomic_read_bor
-#define erts_atomic_read_band_mb erts_no_atomic_read_band
-#define erts_atomic_xchg_mb erts_no_atomic_xchg
-#define erts_atomic_cmpxchg_mb erts_no_atomic_cmpxchg
-#define erts_atomic_read_bset_mb erts_no_atomic_read_bset
-
-#define erts_atomic_init_acqb erts_no_atomic_set
-#define erts_atomic_set_acqb erts_no_atomic_set
-#define erts_atomic_read_acqb erts_no_atomic_read
-#define erts_atomic_inc_read_acqb erts_no_atomic_inc_read
-#define erts_atomic_dec_read_acqb erts_no_atomic_dec_read
-#define erts_atomic_inc_acqb erts_no_atomic_inc
-#define erts_atomic_dec_acqb erts_no_atomic_dec
-#define erts_atomic_add_read_acqb erts_no_atomic_add_read
-#define erts_atomic_add_acqb erts_no_atomic_add
-#define erts_atomic_read_bor_acqb erts_no_atomic_read_bor
-#define erts_atomic_read_band_acqb erts_no_atomic_read_band
-#define erts_atomic_xchg_acqb erts_no_atomic_xchg
-#define erts_atomic_cmpxchg_acqb erts_no_atomic_cmpxchg
-#define erts_atomic_read_bset_acqb erts_no_atomic_read_bset
-
-#define erts_atomic_init_relb erts_no_atomic_set
-#define erts_atomic_set_relb erts_no_atomic_set
-#define erts_atomic_read_relb erts_no_atomic_read
-#define erts_atomic_inc_read_relb erts_no_atomic_inc_read
-#define erts_atomic_dec_read_relb erts_no_atomic_dec_read
-#define erts_atomic_inc_relb erts_no_atomic_inc
-#define erts_atomic_dec_relb erts_no_atomic_dec
-#define erts_atomic_add_read_relb erts_no_atomic_add_read
-#define erts_atomic_add_relb erts_no_atomic_add
-#define erts_atomic_read_bor_relb erts_no_atomic_read_bor
-#define erts_atomic_read_band_relb erts_no_atomic_read_band
-#define erts_atomic_xchg_relb erts_no_atomic_xchg
-#define erts_atomic_cmpxchg_relb erts_no_atomic_cmpxchg
-#define erts_atomic_read_bset_relb erts_no_atomic_read_bset
-
-#define erts_atomic_init_ddrb erts_no_atomic_set
-#define erts_atomic_set_ddrb erts_no_atomic_set
-#define erts_atomic_read_ddrb erts_no_atomic_read
-#define erts_atomic_inc_read_ddrb erts_no_atomic_inc_read
-#define erts_atomic_dec_read_ddrb erts_no_atomic_dec_read
-#define erts_atomic_inc_ddrb erts_no_atomic_inc
-#define erts_atomic_dec_ddrb erts_no_atomic_dec
-#define erts_atomic_add_read_ddrb erts_no_atomic_add_read
-#define erts_atomic_add_ddrb erts_no_atomic_add
-#define erts_atomic_read_bor_ddrb erts_no_atomic_read_bor
-#define erts_atomic_read_band_ddrb erts_no_atomic_read_band
-#define erts_atomic_xchg_ddrb erts_no_atomic_xchg
-#define erts_atomic_cmpxchg_ddrb erts_no_atomic_cmpxchg
-#define erts_atomic_read_bset_ddrb erts_no_atomic_read_bset
-
-#define erts_atomic_init_rb erts_no_atomic_set
-#define erts_atomic_set_rb erts_no_atomic_set
-#define erts_atomic_read_rb erts_no_atomic_read
-#define erts_atomic_inc_read_rb erts_no_atomic_inc_read
-#define erts_atomic_dec_read_rb erts_no_atomic_dec_read
-#define erts_atomic_inc_rb erts_no_atomic_inc
-#define erts_atomic_dec_rb erts_no_atomic_dec
-#define erts_atomic_add_read_rb erts_no_atomic_add_read
-#define erts_atomic_add_rb erts_no_atomic_add
-#define erts_atomic_read_bor_rb erts_no_atomic_read_bor
-#define erts_atomic_read_band_rb erts_no_atomic_read_band
-#define erts_atomic_xchg_rb erts_no_atomic_xchg
-#define erts_atomic_cmpxchg_rb erts_no_atomic_cmpxchg
-#define erts_atomic_read_bset_rb erts_no_atomic_read_bset
-
-#define erts_atomic_init_wb erts_no_atomic_set
-#define erts_atomic_set_wb erts_no_atomic_set
-#define erts_atomic_read_wb erts_no_atomic_read
-#define erts_atomic_inc_read_wb erts_no_atomic_inc_read
-#define erts_atomic_dec_read_wb erts_no_atomic_dec_read
-#define erts_atomic_inc_wb erts_no_atomic_inc
-#define erts_atomic_dec_wb erts_no_atomic_dec
-#define erts_atomic_add_read_wb erts_no_atomic_add_read
-#define erts_atomic_add_wb erts_no_atomic_add
-#define erts_atomic_read_bor_wb erts_no_atomic_read_bor
-#define erts_atomic_read_band_wb erts_no_atomic_read_band
-#define erts_atomic_xchg_wb erts_no_atomic_xchg
-#define erts_atomic_cmpxchg_wb erts_no_atomic_cmpxchg
-#define erts_atomic_read_bset_wb erts_no_atomic_read_bset
-
-#define erts_atomic_set_dirty erts_no_atomic_set
-#define erts_atomic_read_dirty erts_no_atomic_read
+#undef ERTS_DW_SINT_TO_AINT64__
+#undef ERTS_AINT64_TO_DW_SINT__
-/* 32-bit atomics */
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* ARCH_32 */
-#define erts_atomic32_init_nob erts_no_atomic32_set
-#define erts_atomic32_set_nob erts_no_atomic32_set
-#define erts_atomic32_read_nob erts_no_atomic32_read
-#define erts_atomic32_inc_read_nob erts_no_atomic32_inc_read
-#define erts_atomic32_dec_read_nob erts_no_atomic32_dec_read
-#define erts_atomic32_inc_nob erts_no_atomic32_inc
-#define erts_atomic32_dec_nob erts_no_atomic32_dec
-#define erts_atomic32_add_read_nob erts_no_atomic32_add_read
-#define erts_atomic32_add_nob erts_no_atomic32_add
-#define erts_atomic32_read_bor_nob erts_no_atomic32_read_bor
-#define erts_atomic32_read_band_nob erts_no_atomic32_read_band
-#define erts_atomic32_xchg_nob erts_no_atomic32_xchg
-#define erts_atomic32_cmpxchg_nob erts_no_atomic32_cmpxchg
-#define erts_atomic32_read_bset_nob erts_no_atomic32_read_bset
-
-#define erts_atomic32_init_mb erts_no_atomic32_set
-#define erts_atomic32_set_mb erts_no_atomic32_set
-#define erts_atomic32_read_mb erts_no_atomic32_read
-#define erts_atomic32_inc_read_mb erts_no_atomic32_inc_read
-#define erts_atomic32_dec_read_mb erts_no_atomic32_dec_read
-#define erts_atomic32_inc_mb erts_no_atomic32_inc
-#define erts_atomic32_dec_mb erts_no_atomic32_dec
-#define erts_atomic32_add_read_mb erts_no_atomic32_add_read
-#define erts_atomic32_add_mb erts_no_atomic32_add
-#define erts_atomic32_read_bor_mb erts_no_atomic32_read_bor
-#define erts_atomic32_read_band_mb erts_no_atomic32_read_band
-#define erts_atomic32_xchg_mb erts_no_atomic32_xchg
-#define erts_atomic32_cmpxchg_mb erts_no_atomic32_cmpxchg
-#define erts_atomic32_read_bset_mb erts_no_atomic32_read_bset
-
-#define erts_atomic32_init_acqb erts_no_atomic32_set
-#define erts_atomic32_set_acqb erts_no_atomic32_set
-#define erts_atomic32_read_acqb erts_no_atomic32_read
-#define erts_atomic32_inc_read_acqb erts_no_atomic32_inc_read
-#define erts_atomic32_dec_read_acqb erts_no_atomic32_dec_read
-#define erts_atomic32_inc_acqb erts_no_atomic32_inc
-#define erts_atomic32_dec_acqb erts_no_atomic32_dec
-#define erts_atomic32_add_read_acqb erts_no_atomic32_add_read
-#define erts_atomic32_add_acqb erts_no_atomic32_add
-#define erts_atomic32_read_bor_acqb erts_no_atomic32_read_bor
-#define erts_atomic32_read_band_acqb erts_no_atomic32_read_band
-#define erts_atomic32_xchg_acqb erts_no_atomic32_xchg
-#define erts_atomic32_cmpxchg_acqb erts_no_atomic32_cmpxchg
-#define erts_atomic32_read_bset_acqb erts_no_atomic32_read_bset
-
-#define erts_atomic32_init_relb erts_no_atomic32_set
-#define erts_atomic32_set_relb erts_no_atomic32_set
-#define erts_atomic32_read_relb erts_no_atomic32_read
-#define erts_atomic32_inc_read_relb erts_no_atomic32_inc_read
-#define erts_atomic32_dec_read_relb erts_no_atomic32_dec_read
-#define erts_atomic32_inc_relb erts_no_atomic32_inc
-#define erts_atomic32_dec_relb erts_no_atomic32_dec
-#define erts_atomic32_add_read_relb erts_no_atomic32_add_read
-#define erts_atomic32_add_relb erts_no_atomic32_add
-#define erts_atomic32_read_bor_relb erts_no_atomic32_read_bor
-#define erts_atomic32_read_band_relb erts_no_atomic32_read_band
-#define erts_atomic32_xchg_relb erts_no_atomic32_xchg
-#define erts_atomic32_cmpxchg_relb erts_no_atomic32_cmpxchg
-#define erts_atomic32_read_bset_relb erts_no_atomic32_read_bset
-
-#define erts_atomic32_init_ddrb erts_no_atomic32_set
-#define erts_atomic32_set_ddrb erts_no_atomic32_set
-#define erts_atomic32_read_ddrb erts_no_atomic32_read
-#define erts_atomic32_inc_read_ddrb erts_no_atomic32_inc_read
-#define erts_atomic32_dec_read_ddrb erts_no_atomic32_dec_read
-#define erts_atomic32_inc_ddrb erts_no_atomic32_inc
-#define erts_atomic32_dec_ddrb erts_no_atomic32_dec
-#define erts_atomic32_add_read_ddrb erts_no_atomic32_add_read
-#define erts_atomic32_add_ddrb erts_no_atomic32_add
-#define erts_atomic32_read_bor_ddrb erts_no_atomic32_read_bor
-#define erts_atomic32_read_band_ddrb erts_no_atomic32_read_band
-#define erts_atomic32_xchg_ddrb erts_no_atomic32_xchg
-#define erts_atomic32_cmpxchg_ddrb erts_no_atomic32_cmpxchg
-#define erts_atomic32_read_bset_ddrb erts_no_atomic32_read_bset
-
-#define erts_atomic32_init_rb erts_no_atomic32_set
-#define erts_atomic32_set_rb erts_no_atomic32_set
-#define erts_atomic32_read_rb erts_no_atomic32_read
-#define erts_atomic32_inc_read_rb erts_no_atomic32_inc_read
-#define erts_atomic32_dec_read_rb erts_no_atomic32_dec_read
-#define erts_atomic32_inc_rb erts_no_atomic32_inc
-#define erts_atomic32_dec_rb erts_no_atomic32_dec
-#define erts_atomic32_add_read_rb erts_no_atomic32_add_read
-#define erts_atomic32_add_rb erts_no_atomic32_add
-#define erts_atomic32_read_bor_rb erts_no_atomic32_read_bor
-#define erts_atomic32_read_band_rb erts_no_atomic32_read_band
-#define erts_atomic32_xchg_rb erts_no_atomic32_xchg
-#define erts_atomic32_cmpxchg_rb erts_no_atomic32_cmpxchg
-#define erts_atomic32_read_bset_rb erts_no_atomic32_read_bset
-
-#define erts_atomic32_init_wb erts_no_atomic32_set
-#define erts_atomic32_set_wb erts_no_atomic32_set
-#define erts_atomic32_read_wb erts_no_atomic32_read
-#define erts_atomic32_inc_read_wb erts_no_atomic32_inc_read
-#define erts_atomic32_dec_read_wb erts_no_atomic32_dec_read
-#define erts_atomic32_inc_wb erts_no_atomic32_inc
-#define erts_atomic32_dec_wb erts_no_atomic32_dec
-#define erts_atomic32_add_read_wb erts_no_atomic32_add_read
-#define erts_atomic32_add_wb erts_no_atomic32_add
-#define erts_atomic32_read_bor_wb erts_no_atomic32_read_bor
-#define erts_atomic32_read_band_wb erts_no_atomic32_read_band
-#define erts_atomic32_xchg_wb erts_no_atomic32_xchg
-#define erts_atomic32_cmpxchg_wb erts_no_atomic32_cmpxchg
-#define erts_atomic32_read_bset_wb erts_no_atomic32_read_bset
-
-#define erts_atomic32_set_dirty erts_no_atomic32_set
-#define erts_atomic32_read_dirty erts_no_atomic32_read
-
-#endif /* !USE_THREADS */
+
+#include "erl_msacc.h"
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
erts_thr_init(erts_thr_init_data_t *id)
{
-#ifdef USE_THREADS
int res = ethr_init(id);
if (res)
erts_thr_fatal_error(res, "initialize thread library");
-#endif
}
ERTS_GLB_INLINE void
erts_thr_late_init(erts_thr_late_init_data_t *id)
{
-#ifdef USE_THREADS
int res = ethr_late_init(id);
if (res)
erts_thr_fatal_error(res, "complete initialization of thread library");
-#endif
}
ERTS_GLB_INLINE void
erts_thr_create(erts_tid_t *tid, void * (*func)(void *), void *arg,
erts_thr_opts_t *opts)
{
-#ifdef USE_THREADS
int res = ethr_thr_create(tid, func, arg, opts);
if (res)
erts_thr_fatal_error(res, "create thread");
-#endif
}
ERTS_GLB_INLINE void
erts_thr_join(erts_tid_t tid, void **thr_res)
{
-#ifdef USE_THREADS
int res = ethr_thr_join(tid, thr_res);
if (res)
erts_thr_fatal_error(res, "join thread");
-#endif
}
ERTS_GLB_INLINE void
erts_thr_detach(erts_tid_t tid)
{
-#ifdef USE_THREADS
int res = ethr_thr_detach(tid);
if (res)
erts_thr_fatal_error(res, "detach thread");
-#endif
}
ERTS_GLB_INLINE void
erts_thr_exit(void *res)
{
-#ifdef USE_THREADS
ethr_thr_exit(res);
erts_thr_fatal_error(0, "terminate thread");
-#endif
}
ERTS_GLB_INLINE void
erts_thr_install_exit_handler(void (*exit_handler)(void))
{
-#ifdef USE_THREADS
int res = ethr_install_exit_handler(exit_handler);
if (res != 0)
erts_thr_fatal_error(res, "install thread exit handler");
-#endif
}
ERTS_GLB_INLINE erts_tid_t
erts_thr_self(void)
{
-#ifdef USE_THREADS
return ethr_self();
-#else
- return 0;
-#endif
+}
+
+ERTS_GLB_INLINE int
+erts_thr_getname(erts_tid_t tid, char *buf, size_t len)
+{
+ return ethr_getname(tid, buf, len);
}
ERTS_GLB_INLINE int
erts_equal_tids(erts_tid_t x, erts_tid_t y)
{
-#ifdef USE_THREADS
return ethr_equal_tids(x, y);
-#else
- return 1;
-#endif
}
ERTS_GLB_INLINE void
-erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra, int enable_lcnt)
+erts_mtx_init(erts_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags)
{
-#ifdef USE_THREADS
int res = ethr_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize mutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- if (enable_lcnt)
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
- else
- erts_lcnt_init_lock_x(&mtx->lcnt, NULL, ERTS_LCNT_LT_MUTEX, extra);
-#endif
-#endif
-}
+ if (res) {
+ erts_thr_fatal_error(res, "initialize mutex");
+ }
-ERTS_GLB_INLINE void
-erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt,
- int enable_lcnt)
-{
-#ifdef USE_THREADS
- int res = ethr_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize mutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- if (enable_lcnt)
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX | opt, extra);
- else
- erts_lcnt_init_lock_x(&mtx->lcnt, NULL, ERTS_LCNT_LT_MUTEX | opt, extra);
-#endif
+ flags |= ERTS_LOCK_TYPE_MUTEX;
+#ifdef DEBUG
+ mtx->flags = flags;
#endif
-}
-
-ERTS_GLB_INLINE void
-erts_mtx_init_locked_x(erts_mtx_t *mtx, char *name, Eterm extra, int enable_lcnt)
-{
-#ifdef USE_THREADS
- int res = ethr_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize mutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- if (enable_lcnt)
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
- else
- erts_lcnt_init_lock_x(&mtx->lcnt, NULL, ERTS_LCNT_LT_MUTEX, extra);
-#endif
- ethr_mutex_lock(&mtx->mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_trylock(1, &mtx->lc);
+ erts_lc_init_lock_x(&mtx->lc, name, flags, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_trylock(&mtx->lcnt, 1);
-#endif
+ erts_lcnt_init_ref_x(&mtx->lcnt, name, extra, flags);
#endif
}
ERTS_GLB_INLINE void
-erts_mtx_init(erts_mtx_t *mtx, char *name)
+erts_mtx_init_locked(erts_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags)
{
-#ifdef USE_THREADS
- int res = ethr_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize mutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX);
-#endif
-#endif
-}
+ erts_mtx_init(mtx, name, extra, flags);
-ERTS_GLB_INLINE void
-erts_mtx_init_locked(erts_mtx_t *mtx, char *name)
-{
-#ifdef USE_THREADS
- int res = ethr_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize mutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX);
-#endif
ethr_mutex_lock(&mtx->mtx);
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_trylock(1, &mtx->lc);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_trylock(&mtx->lcnt, 1);
-#endif
-#endif
+ #ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_trylock(1, &mtx->lc);
+ #endif
+ #ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_trylock(&mtx->lcnt, 1);
+ #endif
}
ERTS_GLB_INLINE void
erts_mtx_destroy(erts_mtx_t *mtx)
{
-#ifdef USE_THREADS
int res;
+
+ ASSERT(!(mtx->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC));
+
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_destroy_lock(&mtx->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_destroy_lock(&mtx->lcnt);
+ erts_lcnt_uninstall(&mtx->lcnt);
#endif
res = ethr_mutex_destroy(&mtx->mtx);
if (res != 0) {
@@ -1687,7 +1660,6 @@ erts_mtx_destroy(erts_mtx_t *mtx)
#endif
erts_thr_fatal_error(res, "destroy mutex");
}
-#endif
}
ERTS_GLB_INLINE int
@@ -1697,7 +1669,6 @@ erts_mtx_trylock_x(erts_mtx_t *mtx, char *file, unsigned int line)
erts_mtx_trylock(erts_mtx_t *mtx)
#endif
{
-#ifdef USE_THREADS
int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -1719,9 +1690,6 @@ erts_mtx_trylock(erts_mtx_t *mtx)
erts_lcnt_trylock(&mtx->lcnt, res);
#endif
return res;
-#else
- return 0;
-#endif
}
@@ -1732,7 +1700,6 @@ erts_mtx_lock_x(erts_mtx_t *mtx, char *file, unsigned int line)
erts_mtx_lock(erts_mtx_t *mtx)
#endif
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
erts_lc_lock_x(&mtx->lc, file, line);
@@ -1747,13 +1714,11 @@ erts_mtx_lock(erts_mtx_t *mtx)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&mtx->lcnt, file, line);
#endif
-#endif
}
ERTS_GLB_INLINE void
erts_mtx_unlock(erts_mtx_t *mtx)
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock(&mtx->lc);
#endif
@@ -1761,16 +1726,16 @@ erts_mtx_unlock(erts_mtx_t *mtx)
erts_lcnt_unlock(&mtx->lcnt);
#endif
ethr_mutex_unlock(&mtx->mtx);
-#endif
}
ERTS_GLB_INLINE int
erts_lc_mtx_is_locked(erts_mtx_t *mtx)
{
-#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = mtx->lc;
- lc.flags = 0;
+ lc.flags = ERTS_LOCK_FLAGS_TYPE_MUTEX;
+ lc.taken_options = 0;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -1781,17 +1746,14 @@ erts_lc_mtx_is_locked(erts_mtx_t *mtx)
ERTS_GLB_INLINE void
erts_cnd_init(erts_cnd_t *cnd)
{
-#ifdef USE_THREADS
int res = ethr_cond_init(cnd);
if (res)
erts_thr_fatal_error(res, "initialize condition variable");
-#endif
}
ERTS_GLB_INLINE void
erts_cnd_destroy(erts_cnd_t *cnd)
{
-#ifdef USE_THREADS
int res = ethr_cond_destroy(cnd);
if (res != 0) {
#ifdef ERTS_THR_HAVE_BUSY_DESTROY_BUG
@@ -1804,14 +1766,13 @@ erts_cnd_destroy(erts_cnd_t *cnd)
#endif
erts_thr_fatal_error(res, "destroy condition variable");
}
-#endif
}
ERTS_GLB_INLINE void
erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx)
{
-#ifdef USE_THREADS
int res;
+ ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock(&mtx->lc);
#endif
@@ -1830,7 +1791,7 @@ erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx)
#endif
if (res != 0 && res != EINTR)
erts_thr_fatal_error(res, "wait on condition variable");
-#endif
+ ERTS_MSACC_POP_STATE();
}
/*
@@ -1846,18 +1807,14 @@ erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx)
ERTS_GLB_INLINE void
erts_cnd_signal(erts_cnd_t *cnd)
{
-#ifdef USE_THREADS
ethr_cond_signal(cnd);
-#endif
}
ERTS_GLB_INLINE void
erts_cnd_broadcast(erts_cnd_t *cnd)
{
-#ifdef USE_THREADS
ethr_cond_broadcast(cnd);
-#endif
}
/* rwmutex */
@@ -1865,81 +1822,54 @@ erts_cnd_broadcast(erts_cnd_t *cnd)
ERTS_GLB_INLINE void
erts_rwmtx_set_reader_group(int no)
{
-#ifdef USE_THREADS
int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_check_no_locked_of_type(ERTS_LC_FLG_LT_RWMUTEX);
+ erts_lc_check_no_locked_of_type(ERTS_LOCK_TYPE_RWMUTEX);
#endif
res = ethr_rwmutex_set_reader_group(no);
if (res != 0)
erts_thr_fatal_error(res, "set reader group");
-#endif
}
ERTS_GLB_INLINE void
-erts_rwmtx_init_opt_x(erts_rwmtx_t *rwmtx,
- erts_rwmtx_opt_t *opt,
- char *name,
- Eterm extra)
-{
-#ifdef USE_THREADS
+erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx, erts_rwmtx_opt_t *opt,
+ char *name, Eterm extra, erts_lock_flags_t flags) {
int res = ethr_rwmutex_init_opt(&rwmtx->rwmtx, opt);
- if (res != 0)
- erts_thr_fatal_error(res, "initialize rwmutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&rwmtx->lc, name, ERTS_LC_FLG_LT_RWMUTEX, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- if (name && name[0] == '\0')
- erts_lcnt_init_lock_x(&rwmtx->lcnt, NULL, ERTS_LCNT_LT_RWMUTEX, extra);
- else
- erts_lcnt_init_lock_x(&rwmtx->lcnt, name, ERTS_LCNT_LT_RWMUTEX, extra);
-#endif
-#endif
-}
+ if (res != 0) {
+ erts_thr_fatal_error(res, "initialize rwmutex");
+ }
-ERTS_GLB_INLINE void
-erts_rwmtx_init_x(erts_rwmtx_t *rwmtx,
- char *name,
- Eterm extra)
-{
- erts_rwmtx_init_opt_x(rwmtx, NULL, name, extra);
-}
+ flags |= ERTS_LOCK_TYPE_RWMUTEX;
+#ifdef DEBUG
+ rwmtx->flags = flags;
+#endif
-ERTS_GLB_INLINE void
-erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx,
- erts_rwmtx_opt_t *opt,
- char *name)
-{
-#ifdef USE_THREADS
- int res = ethr_rwmutex_init_opt(&rwmtx->rwmtx, opt);
- if (res != 0)
- erts_thr_fatal_error(res, "initialize rwmutex");
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock(&rwmtx->lc, name, ERTS_LC_FLG_LT_RWMUTEX);
+ erts_lc_init_lock_x(&rwmtx->lc, name, flags, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&rwmtx->lcnt, name, ERTS_LCNT_LT_RWMUTEX);
-#endif
+ erts_lcnt_init_ref_x(&rwmtx->lcnt, name, extra, flags);
#endif
}
ERTS_GLB_INLINE void
-erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name)
-{
- erts_rwmtx_init_opt(rwmtx, NULL, name);
+erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name, Eterm extra,
+ erts_lock_flags_t flags) {
+ erts_rwmtx_init_opt(rwmtx, NULL, name, extra, flags);
}
ERTS_GLB_INLINE void
erts_rwmtx_destroy(erts_rwmtx_t *rwmtx)
{
-#ifdef USE_THREADS
int res;
+
+ ASSERT(!(rwmtx->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC));
+
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_destroy_lock(&rwmtx->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_destroy_lock(&rwmtx->lcnt);
+ erts_lcnt_uninstall(&rwmtx->lcnt);
#endif
res = ethr_rwmutex_destroy(&rwmtx->rwmtx);
if (res != 0) {
@@ -1953,7 +1883,6 @@ erts_rwmtx_destroy(erts_rwmtx_t *rwmtx)
#endif
erts_thr_fatal_error(res, "destroy rwmutex");
}
-#endif
}
ERTS_GLB_INLINE int
@@ -1963,11 +1892,10 @@ erts_rwmtx_tryrlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line)
erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
#endif
{
-#ifdef USE_THREADS
int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
- if (erts_lc_trylock_force_busy_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ))
+ if (erts_lc_trylock_force_busy_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_READ))
return EBUSY; /* Make sure caller can handle the situation without
causing a lock order violation */
#endif
@@ -1976,19 +1904,16 @@ erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ,file,line);
+ erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LOCK_OPTIONS_READ,file,line);
#else
- erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ);
+ erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LOCK_OPTIONS_READ);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ);
+ erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LOCK_OPTIONS_READ);
#endif
return res;
-#else
- return 0;
-#endif
}
ERTS_GLB_INLINE void
@@ -1998,36 +1923,32 @@ erts_rwmtx_rlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line)
erts_rwmtx_rlock(erts_rwmtx_t *rwmtx)
#endif
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LC_FLG_LO_READ,file,line);
+ erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LOCK_OPTIONS_READ,file,line);
#else
- erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ);
+ erts_lc_lock_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_READ);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ);
+ erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LOCK_OPTIONS_READ);
#endif
ethr_rwmutex_rlock(&rwmtx->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&rwmtx->lcnt, file, line);
#endif
-#endif
}
ERTS_GLB_INLINE void
erts_rwmtx_runlock(erts_rwmtx_t *rwmtx)
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_unlock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ);
+ erts_lc_unlock_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ);
+ erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LOCK_OPTIONS_READ);
#endif
ethr_rwmutex_runlock(&rwmtx->rwmtx);
-#endif
}
@@ -2038,11 +1959,10 @@ erts_rwmtx_tryrwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line)
erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
#endif
{
-#ifdef USE_THREADS
int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
- if (erts_lc_trylock_force_busy_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE))
+ if (erts_lc_trylock_force_busy_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR))
return EBUSY; /* Make sure caller can handle the situation without
causing a lock order violation */
#endif
@@ -2051,19 +1971,16 @@ erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line);
+ erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR,file,line);
#else
- erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
+ erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LOCK_OPTIONS_RDWR);
#endif
return res;
-#else
- return 0;
-#endif
}
ERTS_GLB_INLINE void
@@ -2073,36 +1990,32 @@ erts_rwmtx_rwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line)
erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx)
#endif
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line);
+ erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR,file,line);
#else
- erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
+ erts_lc_lock_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LOCK_OPTIONS_RDWR);
#endif
ethr_rwmutex_rwlock(&rwmtx->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&rwmtx->lcnt, file, line);
#endif
-#endif
}
ERTS_GLB_INLINE void
erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx)
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_unlock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
+ erts_lc_unlock_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LOCK_OPTIONS_RDWR);
#endif
ethr_rwmutex_rwunlock(&rwmtx->rwmtx);
-#endif
}
#if 0 /* The following rwmtx function names are
@@ -2134,10 +2047,11 @@ erts_rwmtx_wunlock(erts_rwmtx_t *rwmtx)
ERTS_GLB_INLINE int
erts_lc_rwmtx_is_rlocked(erts_rwmtx_t *mtx)
{
-#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = mtx->lc;
- lc.flags = ERTS_LC_FLG_LO_READ;
+ lc.flags = ERTS_LOCK_TYPE_RWMUTEX;
+ lc.taken_options = ERTS_LOCK_OPTIONS_READ;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -2148,10 +2062,11 @@ erts_lc_rwmtx_is_rlocked(erts_rwmtx_t *mtx)
ERTS_GLB_INLINE int
erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx)
{
-#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = mtx->lc;
- lc.flags = ERTS_LC_FLG_LO_READ|ERTS_LC_FLG_LO_WRITE;
+ lc.flags = ERTS_LOCK_TYPE_RWMUTEX;
+ lc.taken_options = ERTS_LOCK_OPTIONS_RDWR;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -2159,298 +2074,41 @@ erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx)
#endif
}
-/* No atomic ops */
-
-ERTS_GLB_INLINE void
-erts_no_dw_atomic_set(erts_no_dw_atomic_t *var, erts_no_dw_atomic_t *val)
-{
- var->sint[0] = val->sint[0];
- var->sint[1] = val->sint[1];
-}
-
-ERTS_GLB_INLINE void
-erts_no_dw_atomic_read(erts_no_dw_atomic_t *var, erts_no_dw_atomic_t *val)
-{
- val->sint[0] = var->sint[0];
- val->sint[1] = var->sint[1];
-}
-
-ERTS_GLB_INLINE int erts_no_dw_atomic_cmpxchg(erts_no_dw_atomic_t *var,
- erts_no_dw_atomic_t *new_val,
- erts_no_dw_atomic_t *old_val)
-{
- if (var->sint[0] != old_val->sint[0] || var->sint[1] != old_val->sint[1]) {
- erts_no_dw_atomic_read(var, old_val);
- return 0;
- }
- else {
- erts_no_dw_atomic_set(var, new_val);
- return !0;
- }
-}
-
-ERTS_GLB_INLINE void
-erts_no_atomic_set(erts_no_atomic_t *var, erts_aint_t i)
-{
- *var = i;
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_no_atomic_read(erts_no_atomic_t *var)
-{
- return *var;
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_no_atomic_inc_read(erts_no_atomic_t *incp)
-{
- return ++(*incp);
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_no_atomic_dec_read(erts_no_atomic_t *decp)
-{
- return --(*decp);
-}
-
-ERTS_GLB_INLINE void
-erts_no_atomic_inc(erts_no_atomic_t *incp)
-{
- ++(*incp);
-}
-
-ERTS_GLB_INLINE void
-erts_no_atomic_dec(erts_no_atomic_t *decp)
-{
- --(*decp);
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_no_atomic_add_read(erts_no_atomic_t *addp, erts_aint_t i)
-{
- return *addp += i;
-}
-
-ERTS_GLB_INLINE void
-erts_no_atomic_add(erts_no_atomic_t *addp, erts_aint_t i)
-{
- *addp += i;
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_no_atomic_read_bor(erts_no_atomic_t *var, erts_aint_t mask)
-{
- erts_aint_t old;
- old = *var;
- *var |= mask;
- return old;
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_no_atomic_read_band(erts_no_atomic_t *var, erts_aint_t mask)
-{
- erts_aint_t old;
- old = *var;
- *var &= mask;
- return old;
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_no_atomic_xchg(erts_no_atomic_t *xchgp, erts_aint_t new)
-{
- erts_aint_t old = *xchgp;
- *xchgp = new;
- return old;
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_no_atomic_cmpxchg(erts_no_atomic_t *xchgp,
- erts_aint_t new,
- erts_aint_t expected)
-{
- erts_aint_t old = *xchgp;
- if (old == expected)
- *xchgp = new;
- return old;
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_no_atomic_read_bset(erts_no_atomic_t *var,
- erts_aint_t mask,
- erts_aint_t set)
-{
- erts_aint_t old = *var;
- *var &= ~mask;
- *var |= (mask & set);
- return old;
-}
-
-/* atomic32 */
-
-ERTS_GLB_INLINE void
-erts_no_atomic32_set(erts_no_atomic32_t *var, erts_aint32_t i)
-{
- *var = i;
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_no_atomic32_read(erts_no_atomic32_t *var)
-{
- return *var;
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_no_atomic32_inc_read(erts_no_atomic32_t *incp)
-{
- return ++(*incp);
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_no_atomic32_dec_read(erts_no_atomic32_t *decp)
-{
- return --(*decp);
-}
-
-ERTS_GLB_INLINE void
-erts_no_atomic32_inc(erts_no_atomic32_t *incp)
-{
- ++(*incp);
-}
-
-ERTS_GLB_INLINE void
-erts_no_atomic32_dec(erts_no_atomic32_t *decp)
-{
- --(*decp);
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_no_atomic32_add_read(erts_no_atomic32_t *addp, erts_aint32_t i)
-{
- return *addp += i;
-}
-
-ERTS_GLB_INLINE void
-erts_no_atomic32_add(erts_no_atomic32_t *addp, erts_aint32_t i)
-{
- *addp += i;
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_no_atomic32_read_bor(erts_no_atomic32_t *var, erts_aint32_t mask)
-{
- erts_aint32_t old;
- old = *var;
- *var |= mask;
- return old;
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_no_atomic32_read_band(erts_no_atomic32_t *var, erts_aint32_t mask)
-{
- erts_aint32_t old;
- old = *var;
- *var &= mask;
- return old;
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_no_atomic32_xchg(erts_no_atomic32_t *xchgp, erts_aint32_t new)
-{
- erts_aint32_t old = *xchgp;
- *xchgp = new;
- return old;
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_no_atomic32_cmpxchg(erts_no_atomic32_t *xchgp,
- erts_aint32_t new,
- erts_aint32_t expected)
-{
- erts_aint32_t old = *xchgp;
- if (old == expected)
- *xchgp = new;
- return old;
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_no_atomic32_read_bset(erts_no_atomic32_t *var,
- erts_aint32_t mask,
- erts_aint32_t set)
-{
- erts_aint32_t old = *var;
- *var &= ~mask;
- *var |= (mask & set);
- return old;
-}
-
/* spinlock */
ERTS_GLB_INLINE void
-erts_spinlock_init_x(erts_spinlock_t *lock, char *name, Eterm extra)
+erts_spinlock_init(erts_spinlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags)
{
-#ifdef USE_THREADS
int res = ethr_spinlock_init(&lock->slck);
- if (res)
- erts_thr_fatal_error(res, "init spinlock");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&lock->lc, name, ERTS_LC_FLG_LT_SPINLOCK, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&lock->lcnt, name, ERTS_LCNT_LT_SPINLOCK, extra);
-#endif
-#else
- (void)lock;
-#endif
-}
+ if (res) {
+ erts_thr_fatal_error(res, "init spinlock");
+ }
-ERTS_GLB_INLINE void
-erts_spinlock_init_x_opt(erts_spinlock_t *lock, char *name, Eterm extra,
- Uint16 opt)
-{
-#ifdef USE_THREADS
- int res = ethr_spinlock_init(&lock->slck);
- if (res)
- erts_thr_fatal_error(res, "init spinlock");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&lock->lc, name, ERTS_LC_FLG_LT_SPINLOCK, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&lock->lcnt, name, ERTS_LCNT_LT_SPINLOCK|opt, extra);
+ flags |= ERTS_LOCK_TYPE_SPINLOCK;
+#ifdef DEBUG
+ lock->flags = flags;
#endif
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_spinlock_init(erts_spinlock_t *lock, char *name)
-{
-#ifdef USE_THREADS
- int res = ethr_spinlock_init(&lock->slck);
- if (res)
- erts_thr_fatal_error(res, "init spinlock");
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock(&lock->lc, name, ERTS_LC_FLG_LT_SPINLOCK);
+ erts_lc_init_lock_x(&lock->lc, name, flags, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&lock->lcnt, name, ERTS_LCNT_LT_SPINLOCK);
-#endif
-#else
- (void)lock;
+ erts_lcnt_init_ref_x(&lock->lcnt, name, extra, flags);
#endif
}
ERTS_GLB_INLINE void
erts_spinlock_destroy(erts_spinlock_t *lock)
{
-#ifdef USE_THREADS
int res;
+
+ ASSERT(!(lock->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC));
+
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_destroy_lock(&lock->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_destroy_lock(&lock->lcnt);
+ erts_lcnt_uninstall(&lock->lcnt);
#endif
res = ethr_spinlock_destroy(&lock->slck);
if (res != 0) {
@@ -2464,15 +2122,11 @@ erts_spinlock_destroy(erts_spinlock_t *lock)
#endif
erts_thr_fatal_error(res, "destroy rwlock");
}
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE void
erts_spin_unlock(erts_spinlock_t *lock)
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock(&lock->lc);
#endif
@@ -2480,9 +2134,6 @@ erts_spin_unlock(erts_spinlock_t *lock)
erts_lcnt_unlock(&lock->lcnt);
#endif
ethr_spin_unlock(&lock->slck);
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE void
@@ -2492,7 +2143,6 @@ erts_spin_lock_x(erts_spinlock_t *lock, char *file, unsigned int line)
erts_spin_lock(erts_spinlock_t *lock)
#endif
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
erts_lc_lock_x(&lock->lc,file,line);
@@ -2507,18 +2157,16 @@ erts_spin_lock(erts_spinlock_t *lock)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&lock->lcnt, file, line);
#endif
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE int
erts_lc_spinlock_is_locked(erts_spinlock_t *lock)
{
-#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = lock->lc;
- lc.flags = 0;
+ lc.flags = ERTS_LOCK_TYPE_SPINLOCK;
+ lc.taken_options = 0;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -2529,51 +2177,38 @@ erts_lc_spinlock_is_locked(erts_spinlock_t *lock)
/* rwspinlock */
ERTS_GLB_INLINE void
-erts_rwlock_init_x(erts_rwlock_t *lock, char *name, Eterm extra)
+erts_rwlock_init(erts_rwlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags)
{
-#ifdef USE_THREADS
int res = ethr_rwlock_init(&lock->rwlck);
- if (res)
- erts_thr_fatal_error(res, "init rwlock");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&lock->lc, name, ERTS_LC_FLG_LT_RWSPINLOCK, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&lock->lcnt, name, ERTS_LCNT_LT_RWSPINLOCK, extra);
-#endif
-#else
- (void)lock;
+ if (res) {
+ erts_thr_fatal_error(res, "init rwlock");
+ }
+
+ flags |= ERTS_LOCK_TYPE_RWSPINLOCK;
+#ifdef DEBUG
+ lock->flags = flags;
#endif
-}
-ERTS_GLB_INLINE void
-erts_rwlock_init(erts_rwlock_t *lock, char *name)
-{
-#ifdef USE_THREADS
- int res = ethr_rwlock_init(&lock->rwlck);
- if (res)
- erts_thr_fatal_error(res, "init rwlock");
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock(&lock->lc, name, ERTS_LC_FLG_LT_RWSPINLOCK);
+ erts_lc_init_lock_x(&lock->lc, name, flags, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&lock->lcnt, name, ERTS_LCNT_LT_RWSPINLOCK);
-#endif
-#else
- (void)lock;
+ erts_lcnt_init_ref_x(&lock->lcnt, name, extra, flags);
#endif
}
ERTS_GLB_INLINE void
erts_rwlock_destroy(erts_rwlock_t *lock)
{
-#ifdef USE_THREADS
int res;
+
+ ASSERT(!(lock->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC));
+
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_destroy_lock(&lock->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_destroy_lock(&lock->lcnt);
+ erts_lcnt_uninstall(&lock->lcnt);
#endif
res = ethr_rwlock_destroy(&lock->rwlck);
if (res != 0) {
@@ -2587,25 +2222,18 @@ erts_rwlock_destroy(erts_rwlock_t *lock)
#endif
erts_thr_fatal_error(res, "destroy rwlock");
}
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE void
erts_read_unlock(erts_rwlock_t *lock)
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_unlock_flg(&lock->lc, ERTS_LC_FLG_LO_READ);
+ erts_lc_unlock_flg(&lock->lc, ERTS_LOCK_OPTIONS_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LCNT_LO_READ);
+ erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LOCK_OPTIONS_READ);
#endif
ethr_read_unlock(&lock->rwlck);
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE void
@@ -2615,40 +2243,32 @@ erts_read_lock_x(erts_rwlock_t *lock, char *file, unsigned int line)
erts_read_lock(erts_rwlock_t *lock)
#endif
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_lock_flg_x(&lock->lc, ERTS_LC_FLG_LO_READ,file,line);
+ erts_lc_lock_flg_x(&lock->lc, ERTS_LOCK_OPTIONS_READ,file,line);
#else
- erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ);
+ erts_lc_lock_flg(&lock->lc, ERTS_LOCK_OPTIONS_READ);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ);
+ erts_lcnt_lock_opt(&lock->lcnt, ERTS_LOCK_OPTIONS_READ);
#endif
ethr_read_lock(&lock->rwlck);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&lock->lcnt, file, line);
#endif
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE void
erts_write_unlock(erts_rwlock_t *lock)
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_unlock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE);
+ erts_lc_unlock_flg(&lock->lc, ERTS_LOCK_OPTIONS_RDWR);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LOCK_OPTIONS_RDWR);
#endif
ethr_write_unlock(&lock->rwlck);
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE void
@@ -2658,33 +2278,30 @@ erts_write_lock_x(erts_rwlock_t *lock, char *file, unsigned int line)
erts_write_lock(erts_rwlock_t *lock)
#endif
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_lock_flg_x(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line);
+ erts_lc_lock_flg_x(&lock->lc, ERTS_LOCK_OPTIONS_RDWR,file,line);
#else
- erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE);
+ erts_lc_lock_flg(&lock->lc, ERTS_LOCK_OPTIONS_RDWR);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_lock_opt(&lock->lcnt, ERTS_LOCK_OPTIONS_RDWR);
#endif
ethr_write_lock(&lock->rwlck);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&lock->lcnt, file, line);
#endif
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE int
erts_lc_rwlock_is_rlocked(erts_rwlock_t *lock)
{
-#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = lock->lc;
- lc.flags = ERTS_LC_FLG_LO_READ;
+ lc.flags = ERTS_LOCK_TYPE_RWSPINLOCK;
+ lc.taken_options = ERTS_LOCK_OPTIONS_READ;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -2695,10 +2312,11 @@ erts_lc_rwlock_is_rlocked(erts_rwlock_t *lock)
ERTS_GLB_INLINE int
erts_lc_rwlock_is_rwlocked(erts_rwlock_t *lock)
{
-#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = lock->lc;
- lc.flags = ERTS_LC_FLG_LO_READ|ERTS_LC_FLG_LO_WRITE;
+ lc.flags = ERTS_LOCK_TYPE_RWSPINLOCK;
+ lc.taken_options = ERTS_LOCK_OPTIONS_RDWR;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -2709,155 +2327,154 @@ erts_lc_rwlock_is_rwlocked(erts_rwlock_t *lock)
ERTS_GLB_INLINE void
erts_tsd_key_create(erts_tsd_key_t *keyp, char *keyname)
{
-#ifdef USE_THREADS
int res = ethr_tsd_key_create(keyp, keyname);
if (res)
erts_thr_fatal_error(res, "create thread specific data key");
-#endif
}
ERTS_GLB_INLINE void
erts_tsd_key_delete(erts_tsd_key_t key)
{
-#ifdef USE_THREADS
int res = ethr_tsd_key_delete(key);
if (res)
erts_thr_fatal_error(res, "delete thread specific data key");
-#endif
}
ERTS_GLB_INLINE void
erts_tsd_set(erts_tsd_key_t key, void *value)
{
-#ifdef USE_THREADS
int res = ethr_tsd_set(key, value);
if (res)
erts_thr_fatal_error(res, "set thread specific data");
-#endif
}
ERTS_GLB_INLINE void *
erts_tsd_get(erts_tsd_key_t key)
{
-#ifdef USE_THREADS
return ethr_tsd_get(key);
-#else
- return NULL;
-#endif
}
ERTS_GLB_INLINE erts_tse_t *erts_tse_fetch(void)
{
-#ifdef USE_THREADS
return (erts_tse_t *) ethr_get_ts_event();
-#else
- return (erts_tse_t *) NULL;
-#endif
}
ERTS_GLB_INLINE void erts_tse_return(erts_tse_t *ep)
{
-#ifdef USE_THREADS
ethr_leave_ts_event(ep);
-#endif
+}
+
+ERTS_GLB_INLINE void erts_tse_prepare_timed(erts_tse_t *ep)
+{
+ int res = ethr_event_prepare_timed(&((ethr_ts_event *) ep)->event);
+ if (res != 0)
+ erts_thr_fatal_error(res, "prepare timed");
}
ERTS_GLB_INLINE void erts_tse_set(erts_tse_t *ep)
{
-#ifdef USE_THREADS
ethr_event_set(&((ethr_ts_event *) ep)->event);
-#endif
}
ERTS_GLB_INLINE void erts_tse_reset(erts_tse_t *ep)
{
-#ifdef USE_THREADS
ethr_event_reset(&((ethr_ts_event *) ep)->event);
-#endif
}
ERTS_GLB_INLINE int erts_tse_wait(erts_tse_t *ep)
{
-#ifdef USE_THREADS
- return ethr_event_wait(&((ethr_ts_event *) ep)->event);
-#else
- return ENOTSUP;
-#endif
+ int res;
+ ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP);
+ res = ethr_event_wait(&((ethr_ts_event *) ep)->event);
+ ERTS_MSACC_POP_STATE();
+ return res;
}
ERTS_GLB_INLINE int erts_tse_swait(erts_tse_t *ep, int spincount)
{
-#ifdef USE_THREADS
- return ethr_event_swait(&((ethr_ts_event *) ep)->event, spincount);
-#else
- return ENOTSUP;
-#endif
+ int res;
+ ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP);
+ res = ethr_event_swait(&((ethr_ts_event *) ep)->event, spincount);
+ ERTS_MSACC_POP_STATE();
+ return res;
+}
+
+ERTS_GLB_INLINE int erts_tse_twait(erts_tse_t *ep, Sint64 tmo)
+{
+ int res;
+ ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP);
+ res = ethr_event_twait(&((ethr_ts_event *) ep)->event,
+ (ethr_sint64_t) tmo);
+ ERTS_MSACC_POP_STATE();
+ return res;
+}
+
+ERTS_GLB_INLINE int erts_tse_stwait(erts_tse_t *ep, int spincount, Sint64 tmo)
+{
+ int res;
+ ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP);
+ res = ethr_event_stwait(&((ethr_ts_event *) ep)->event,
+ spincount,
+ (ethr_sint64_t) tmo);
+ ERTS_MSACC_POP_STATE();
+ return res;
}
ERTS_GLB_INLINE int erts_tse_is_tmp(erts_tse_t *ep)
{
-#ifdef USE_THREADS
return (ep->iflgs & ETHR_TS_EV_TMP) == ETHR_TS_EV_TMP;
-#else
- return 0;
-#endif
}
ERTS_GLB_INLINE void erts_thr_set_main_status(int on, int no)
{
-#ifdef USE_THREADS
int res = ethr_set_main_thr_status(on, no);
if (res != 0)
erts_thr_fatal_error(res, "set thread main status");
-#endif
}
ERTS_GLB_INLINE int erts_thr_get_main_status(void)
{
-#ifdef USE_THREADS
int main_status;
int res = ethr_get_main_thr_status(&main_status);
if (res != 0)
erts_thr_fatal_error(res, "get thread main status");
return main_status;
-#else
- return 1;
-#endif
}
ERTS_GLB_INLINE void erts_thr_yield(void)
{
-#ifdef USE_THREADS
int res = ETHR_YIELD();
if (res != 0)
erts_thr_fatal_error(res, "yield");
-#endif
}
#ifdef ETHR_HAVE_ETHR_SIG_FUNCS
ERTS_GLB_INLINE void
+erts_thr_kill(erts_tid_t tid, int sig) {
+ int res = ethr_kill((ethr_tid)tid, sig);
+ if (res)
+ erts_thr_fatal_error(res, "killing thread");
+}
+
+ERTS_GLB_INLINE void
erts_thr_sigmask(int how, const sigset_t *set, sigset_t *oset)
{
-#ifdef USE_THREADS
int res = ethr_sigmask(how, set, oset);
if (res)
erts_thr_fatal_error(res, "get or set signal mask");
-#endif
}
ERTS_GLB_INLINE void
erts_thr_sigwait(const sigset_t *set, int *sig)
{
-#ifdef USE_THREADS
int res;
do {
res = ethr_sigwait(set, sig);
} while (res == EINTR);
if (res)
erts_thr_fatal_error(res, "to wait for signal");
-#endif
}
#endif /* #ifdef HAVE_ETHR_SIG_FUNCS */
@@ -2865,37 +2482,3 @@ erts_thr_sigwait(const sigset_t *set, int *sig)
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
#endif /* #ifndef ERL_THREAD_H__ */
-
-#ifdef ERTS_UNDEF_DEPRECATED_ATOMICS
-
-/* Deprecated functions to replace */
-
-#undef erts_atomic_init
-#undef erts_atomic_set
-#undef erts_atomic_read
-#undef erts_atomic_inctest
-#undef erts_atomic_dectest
-#undef erts_atomic_inc
-#undef erts_atomic_dec
-#undef erts_atomic_addtest
-#undef erts_atomic_add
-#undef erts_atomic_xchg
-#undef erts_atomic_cmpxchg
-#undef erts_atomic_bor
-#undef erts_atomic_band
-
-#undef erts_atomic32_init
-#undef erts_atomic32_set
-#undef erts_atomic32_read
-#undef erts_atomic32_inctest
-#undef erts_atomic32_dectest
-#undef erts_atomic32_inc
-#undef erts_atomic32_dec
-#undef erts_atomic32_addtest
-#undef erts_atomic32_add
-#undef erts_atomic32_xchg
-#undef erts_atomic32_cmpxchg
-#undef erts_atomic32_bor
-#undef erts_atomic32_band
-
-#endif
diff --git a/erts/emulator/beam/erl_time.h b/erts/emulator/beam/erl_time.h
index 4bbdcaa3e3..27164d50a0 100644
--- a/erts/emulator/beam/erl_time.h
+++ b/erts/emulator/beam/erl_time.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -20,94 +21,82 @@
#ifndef ERL_TIME_H__
#define ERL_TIME_H__
-#define ERTS_SHORT_TIME_T_MAX ERTS_AINT32_T_MAX
-#define ERTS_SHORT_TIME_T_MIN ERTS_AINT32_T_MIN
-typedef erts_aint32_t erts_short_time_t;
+#if 0
+# define ERTS_TW_DEBUG
+#endif
+#if defined(DEBUG) && !defined(ERTS_TW_DEBUG)
+# define ERTS_TW_DEBUG
+#endif
-extern erts_smp_atomic32_t do_time; /* set at clock interrupt */
-extern SysTimeval erts_first_emu_time;
+#if defined(ERTS_TW_DEBUG)
+#define ERTS_TIME_ASSERT(B) ERTS_ASSERT(B)
+#else
+#define ERTS_TIME_ASSERT(B) ((void) 1)
+#endif
+#ifdef ERTS_TW_DEBUG
/*
-** Timer entry:
-*/
-typedef struct erl_timer {
- struct erl_timer* next; /* next entry tiw slot or chain */
- struct erl_timer* prev; /* prev entry tiw slot or chain */
- Uint slot; /* slot in timer wheel */
- Uint count; /* number of loops remaining */
- int active; /* 1=activated, 0=deactivated */
- /* called when timeout */
- void (*timeout)(void*);
- /* called when cancel (may be NULL) */
- void (*cancel)(void*);
- void* arg; /* argument to timeout/cancel procs */
-} ErlTimer;
-
-typedef void (*ErlTimeoutProc)(void*);
-typedef void (*ErlCancelProc)(void*);
-
-#ifdef ERTS_SMP
+ * Soon wheel will handle about 1 seconds
+ * Later wheel will handle about 8 minutes
+ */
+# define ERTS_TW_SOON_WHEEL_BITS 10
+# define ERTS_TW_LATER_WHEEL_BITS 10
+#else
+# ifdef SMALL_MEMORY
/*
- * Process and port timer
+ * Soon wheel will handle about 4 seconds
+ * Later wheel will handle about 2 hours and 19 minutes
*/
-typedef union ErtsSmpPTimer_ ErtsSmpPTimer;
-union ErtsSmpPTimer_ {
- struct {
- ErlTimer tm;
- Eterm id;
- void (*timeout_func)(void*);
- ErtsSmpPTimer **timer_ref;
- Uint32 flags;
- } timer;
- ErtsSmpPTimer *next;
-};
-
-
-void erts_create_smp_ptimer(ErtsSmpPTimer **timer_ref,
- Eterm id,
- ErlTimeoutProc timeout_func,
- Uint timeout);
-void erts_cancel_smp_ptimer(ErtsSmpPTimer *ptimer);
+# define ERTS_TW_SOON_WHEEL_BITS 12
+# define ERTS_TW_LATER_WHEEL_BITS 12
+# else
+/*
+ * Soon wheel will handle about 16 seconds
+ * Later wheel will handle about 37 hours and 16 minutes
+ */
+# define ERTS_TW_SOON_WHEEL_BITS 14
+# define ERTS_TW_LATER_WHEEL_BITS 14
+# endif
#endif
-/* timer-wheel api */
+/*
+ * Number of slots in each timer wheel...
+ *
+ * These *need* to be a power of 2
+ */
+#define ERTS_TW_SOON_WHEEL_SIZE (1 << ERTS_TW_SOON_WHEEL_BITS)
+#define ERTS_TW_LATER_WHEEL_SIZE (1 << ERTS_TW_LATER_WHEEL_BITS)
-void erts_init_time(void);
-void erts_set_timer(ErlTimer*, ErlTimeoutProc, ErlCancelProc, void*, Uint);
-void erts_cancel_timer(ErlTimer*);
-void erts_bump_timer(erts_short_time_t);
-Uint erts_timer_wheel_memory_size(void);
-Uint erts_time_left(ErlTimer *);
-erts_short_time_t erts_next_time(void);
+typedef enum {
+ ERTS_NO_TIME_WARP_MODE,
+ ERTS_SINGLE_TIME_WARP_MODE,
+ ERTS_MULTI_TIME_WARP_MODE
+} ErtsTimeWarpMode;
-#ifdef DEBUG
-void erts_p_slpq(void);
-#endif
+typedef struct ErtsTimerWheel_ ErtsTimerWheel;
+typedef ErtsMonotonicTime * ErtsNextTimeoutRef;
-ERTS_GLB_INLINE erts_short_time_t erts_do_time_read_and_reset(void);
-ERTS_GLB_INLINE void erts_do_time_add(erts_short_time_t);
+extern SysTimeval erts_first_emu_time;
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-ERTS_GLB_INLINE erts_short_time_t erts_do_time_read_and_reset(void)
-{
- erts_short_time_t time = erts_smp_atomic32_xchg_acqb(&do_time, 0);
- if (time < 0)
- erl_exit(ERTS_ABORT_EXIT, "Internal time management error\n");
- return time;
-}
+void erts_monitor_time_offset(Eterm id, Eterm ref);
+int erts_demonitor_time_offset(Eterm ref);
-ERTS_GLB_INLINE void erts_do_time_add(erts_short_time_t elapsed)
-{
- erts_smp_atomic32_add_relb(&do_time, elapsed);
-}
+int erts_init_time_sup(int, ErtsTimeWarpMode);
+void erts_late_init_time_sup(void);
-#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+ErtsNextTimeoutRef erts_get_next_timeout_reference(ErtsTimerWheel *);
+void erts_init_time(int time_correction, ErtsTimeWarpMode time_warp_mode);
+void erts_bump_timers(ErtsTimerWheel *, ErtsMonotonicTime);
+Uint erts_timer_wheel_memory_size(void);
+#ifdef DEBUG
+void erts_p_slpq(void);
+#endif
/* time_sup */
-#if (defined(HAVE_GETHRVTIME) || defined(HAVE_CLOCK_GETTIME))
+#if (defined(HAVE_GETHRVTIME) || defined(HAVE_CLOCK_GETTIME_CPU_TIME))
# ifndef HAVE_ERTS_NOW_CPU
# define HAVE_ERTS_NOW_CPU
# ifdef HAVE_GETHRVTIME
@@ -121,25 +110,405 @@ void erts_get_now_cpu(Uint* megasec, Uint* sec, Uint* microsec);
typedef UWord erts_approx_time_t;
erts_approx_time_t erts_get_approx_time(void);
-void erts_get_timeval(SysTimeval *tv);
-erts_time_t erts_get_time(void);
+int erts_has_time_correction(void);
+int erts_check_time_adj_support(int time_correction,
+ ErtsTimeWarpMode time_warp_mode);
+
+ErtsTimeWarpMode erts_time_warp_mode(void);
-ERTS_GLB_INLINE int erts_cmp_timeval(SysTimeval *t1p, SysTimeval *t2p);
+typedef enum {
+ ERTS_TIME_OFFSET_PRELIMINARY,
+ ERTS_TIME_OFFSET_FINAL,
+ ERTS_TIME_OFFSET_VOLATILE
+} ErtsTimeOffsetState;
+
+ErtsTimeOffsetState erts_time_offset_state(void);
+ErtsTimeOffsetState erts_finalize_time_offset(void);
+struct process;
+Eterm erts_get_monotonic_start_time(struct process *c_p);
+Eterm erts_get_monotonic_end_time(struct process *c_p);
+Eterm erts_monotonic_time_source(struct process*c_p);
+Eterm erts_system_time_source(struct process*c_p);
+
+void erts_runtime_elapsed_both(ErtsMonotonicTime *ms_user,
+ ErtsMonotonicTime *ms_sys,
+ ErtsMonotonicTime *ms_user_diff,
+ ErtsMonotonicTime *ms_sys_diff);
+void erts_wall_clock_elapsed_both(ErtsMonotonicTime *total,
+ ErtsMonotonicTime *diff);
+
+#ifdef SYS_CLOCK_RESOLUTION
+#define ERTS_CLKTCK_RESOLUTION ((ErtsMonotonicTime) (SYS_CLOCK_RESOLUTION*1000))
+#else
+#define ERTS_CLKTCK_RESOLUTION (erts_time_sup__.r.o.clktck_resolution)
+#endif
+
+#define ERTS_TW_SOON_WHEEL_MSEC (ERTS_TW_SOON_WHEEL_SIZE/(ERTS_CLKTCK_RESOLUTION/1000))
+#define ERTS_TW_LATER_WHEEL_MSEC (ERTS_TW_LATER_WHEEL_SIZE*ERTS_TW_SOON_WHEEL_MSEC/2)
+
+#define ERTS_TIMER_WHEEL_MSEC ERTS_TW_LATER_WHEEL_MSEC
+
+struct erts_time_sup_read_only__ {
+ ErtsMonotonicTime monotonic_time_unit;
+#if !ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT
+ ErtsMonotonicTime start;
+ struct {
+ ErtsMonotonicTime native;
+ ErtsMonotonicTime nsec;
+ ErtsMonotonicTime usec;
+ ErtsMonotonicTime msec;
+ ErtsMonotonicTime sec;
+ } start_offset;
+#endif
+#ifndef SYS_CLOCK_RESOLUTION
+ ErtsMonotonicTime clktck_resolution;
+#endif
+};
+
+typedef struct {
+ union {
+ struct erts_time_sup_read_only__ o;
+ char align__[(((sizeof(struct erts_time_sup_read_only__) - 1)
+ / ASSUMED_CACHE_LINE_SIZE) + 1)
+ * ASSUMED_CACHE_LINE_SIZE];
+ } r;
+} ErtsTimeSupData;
+
+extern ErtsTimeSupData erts_time_sup__;
+
+ErtsMonotonicTime erts_napi_monotonic_time(int time_unit);
+ErtsMonotonicTime erts_napi_time_offset(int time_unit);
+ErtsMonotonicTime erts_napi_convert_time_unit(ErtsMonotonicTime val, int from, int to);
+
+ERTS_GLB_INLINE Uint64
+erts_time_unit_conversion(Uint64 value,
+ Uint32 from_time_unit,
+ Uint32 to_time_unit);
+
+ErtsSysPerfCounter erts_perf_counter_unit(void);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-ERTS_GLB_INLINE int
-erts_cmp_timeval(SysTimeval *t1p, SysTimeval *t2p)
+ERTS_GLB_INLINE Uint64
+erts_time_unit_conversion(Uint64 value,
+ Uint32 from_time_unit,
+ Uint32 to_time_unit)
{
- if (t1p->tv_sec == t2p->tv_sec) {
- if (t1p->tv_usec < t2p->tv_usec)
- return -1;
- else if (t1p->tv_usec > t2p->tv_usec)
- return 1;
- return 0;
- }
- return t1p->tv_sec < t2p->tv_sec ? -1 : 1;
+ Uint64 high, low, result;
+ if (value <= ~((Uint64) 0)/to_time_unit)
+ return (value*to_time_unit)/from_time_unit;
+
+ low = value & ((Uint64) 0xffffffff);
+ high = (value >> 32) & ((Uint64) 0xffffffff);
+
+ low *= to_time_unit;
+ high *= to_time_unit;
+
+ high += (low >> 32) & ((Uint64) 0xffffffff);
+ low &= ((Uint64) 0xffffffff);
+
+ result = high % from_time_unit;
+ high /= from_time_unit;
+ high <<= 32;
+
+ result <<= 32;
+ result += low;
+ result /= from_time_unit;
+ result += high;
+
+ return result;
}
-#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+/*
+ * Range of monotonic time internally
+ */
+
+#define ERTS_MONOTONIC_BEGIN \
+ ERTS_MONOTONIC_TIME_UNIT
+#define ERTS_MONOTONIC_END \
+ ((ERTS_MONOTONIC_TIME_MAX / ERTS_MONOTONIC_TIME_UNIT) \
+ * ERTS_MONOTONIC_TIME_UNIT)
+
+#if ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT
+
+/*
+ * If the monotonic time unit is a compile time constant,
+ * it is assumed (and need) to be a power of 10.
+ */
+
+#if ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT < 1000*1000
+# error Compile time time unit needs to be at least 1000000
+#endif
+
+#if ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT == 1000*1000*1000
+/* Nano-second time unit */
+
+#define ERTS_MONOTONIC_TO_SEC__(NSEC) ((NSEC) / (1000*1000*1000))
+#define ERTS_MONOTONIC_TO_MSEC__(NSEC) ((NSEC) / (1000*1000))
+#define ERTS_MONOTONIC_TO_USEC__(NSEC) ((NSEC) / 1000)
+#define ERTS_MONOTONIC_TO_NSEC__(NSEC) (NSEC)
+
+#define ERTS_SEC_TO_MONOTONIC__(SEC) (((ErtsMonotonicTime) (SEC))*(1000*1000*1000))
+#define ERTS_MSEC_TO_MONOTONIC__(MSEC) (((ErtsMonotonicTime) (MSEC))*(1000*1000))
+#define ERTS_USEC_TO_MONOTONIC__(USEC) (((ErtsMonotonicTime) (USEC))*1000)
+#define ERTS_NSEC_TO_MONOTONIC__(NSEC) ((ErtsMonotonicTime) (NSEC))
+
+#elif ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT == 1000*1000
+/* Micro-second time unit */
+
+#define ERTS_MONOTONIC_TO_SEC__(USEC) ((USEC) / (1000*1000))
+#define ERTS_MONOTONIC_TO_MSEC__(USEC) ((USEC) / 1000)
+#define ERTS_MONOTONIC_TO_USEC__(USEC) (USEC)
+#define ERTS_MONOTONIC_TO_NSEC__(USEC) ((USEC)*1000)
+
+#define ERTS_SEC_TO_MONOTONIC__(SEC) (((ErtsMonotonicTime) (SEC))*(1000*1000))
+#define ERTS_MSEC_TO_MONOTONIC__(MSEC) (((ErtsMonotonicTime) (MSEC))*1000)
+#define ERTS_USEC_TO_MONOTONIC__(USEC) ((ErtsMonotonicTime) (USEC))
+#define ERTS_NSEC_TO_MONOTONIC__(NSEC) (((ErtsMonotonicTime) (NSEC))/1000)
+
+#else
+#error Missing implementation for monotonic time unit
+#endif
+
+#define ERTS_MONOTONIC_TIME_UNIT \
+ ((ErtsMonotonicTime) ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT)
+
+/*
+ * NOTE! ERTS_MONOTONIC_TIME_START_EXTERNAL *need* to be a multiple
+ * of ERTS_MONOTONIC_TIME_UNIT.
+ */
+
+#ifdef ARCH_32
+/*
+ * Want to use a big-num of arity 2 as long as possible (584 years
+ * in the nano-second time unit case).
+ */
+#define ERTS_MONOTONIC_TIME_START_EXTERNAL \
+ (((((((ErtsMonotonicTime) 1) << 32)-1) \
+ / ERTS_MONOTONIC_TIME_UNIT) \
+ * ERTS_MONOTONIC_TIME_UNIT) \
+ + ERTS_MONOTONIC_TIME_UNIT)
+
+#else /* ARCH_64 */
+
+#if ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT <= 10*1000*1000
+
+/*
+ * Using micro second time unit or lower. Start at zero since
+ * time will remain an immediate for a very long time anyway
+ * (1827 years in the 10 micro second case)...
+ */
+#define ERTS_MONOTONIC_TIME_START_EXTERNAL ((ErtsMonotonicTime) 0)
+
+#else /* ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT > 10*1000*1000 */
+
+/*
+ * Want to use an immediate as long as possible (36 years in the
+ * nano-second time unit case).
+*/
+#define ERTS_MONOTONIC_TIME_START_EXTERNAL \
+ ((((ErtsMonotonicTime) MIN_SMALL) \
+ / ERTS_MONOTONIC_TIME_UNIT) \
+ * ERTS_MONOTONIC_TIME_UNIT)
+
+#endif /* ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT > 1000*1000 */
+
+#endif /* ARCH_64 */
+
+/*
+ * Offsets from internal monotonic time to external monotonic time
+ */
+
+#define ERTS_MONOTONIC_OFFSET_NATIVE \
+ (ERTS_MONOTONIC_TIME_START_EXTERNAL - ERTS_MONOTONIC_BEGIN)
+#define ERTS_MONOTONIC_OFFSET_NSEC \
+ ERTS_MONOTONIC_TO_NSEC__(ERTS_MONOTONIC_OFFSET_NATIVE)
+#define ERTS_MONOTONIC_OFFSET_USEC \
+ ERTS_MONOTONIC_TO_USEC__(ERTS_MONOTONIC_OFFSET_NATIVE)
+#define ERTS_MONOTONIC_OFFSET_MSEC \
+ ERTS_MONOTONIC_TO_MSEC__(ERTS_MONOTONIC_OFFSET_NATIVE)
+#define ERTS_MONOTONIC_OFFSET_SEC \
+ ERTS_MONOTONIC_TO_SEC__(ERTS_MONOTONIC_OFFSET_NATIVE)
+
+#define ERTS_MONOTONIC_TO_CLKTCKS__(MON) \
+ ((MON) / (ERTS_MONOTONIC_TIME_UNIT/ERTS_CLKTCK_RESOLUTION))
+#define ERTS_CLKTCKS_TO_MONOTONIC__(TCKS) \
+ ((TCKS) * (ERTS_MONOTONIC_TIME_UNIT/ERTS_CLKTCK_RESOLUTION))
+
+#else /* !ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT */
+
+/*
+ * Initialized in erts_init_sys_time_sup()
+ */
+#define ERTS_MONOTONIC_TIME_UNIT (erts_time_sup__.r.o.monotonic_time_unit)
+
+/*
+ * Offsets from internal monotonic time to external monotonic time
+ *
+ * Initialized in erts_init_time_sup()...
+ */
+#define ERTS_MONOTONIC_TIME_START_EXTERNAL (erts_time_sup__.r.o.start)
+#define ERTS_MONOTONIC_OFFSET_NATIVE (erts_time_sup__.r.o.start_offset.native)
+#define ERTS_MONOTONIC_OFFSET_NSEC (erts_time_sup__.r.o.start_offset.nsec)
+#define ERTS_MONOTONIC_OFFSET_USEC (erts_time_sup__.r.o.start_offset.usec)
+#define ERTS_MONOTONIC_OFFSET_MSEC (erts_time_sup__.r.o.start_offset.msec)
+#define ERTS_MONOTONIC_OFFSET_SEC (erts_time_sup__.r.o.start_offset.sec)
+
+#define ERTS_CONV_FROM_MON_UNIT___(M, TO) \
+ ((ErtsMonotonicTime) \
+ erts_time_unit_conversion((Uint64) (M), \
+ (Uint32) ERTS_MONOTONIC_TIME_UNIT, \
+ (Uint32) (TO)))
+
+#define ERTS_CONV_TO_MON_UNIT___(M, FROM) \
+ ((ErtsMonotonicTime) \
+ erts_time_unit_conversion((Uint64) (M), \
+ (Uint32) (FROM), \
+ (Uint32) ERTS_MONOTONIC_TIME_UNIT)) \
+
+#define ERTS_MONOTONIC_TO_SEC__(M) \
+ ERTS_CONV_FROM_MON_UNIT___((M), 1)
+#define ERTS_MONOTONIC_TO_MSEC__(M) \
+ ERTS_CONV_FROM_MON_UNIT___((M), 1000)
+#define ERTS_MONOTONIC_TO_USEC__(M) \
+ ERTS_CONV_FROM_MON_UNIT___((M), 1000*1000)
+#define ERTS_MONOTONIC_TO_NSEC__(M) \
+ ERTS_CONV_FROM_MON_UNIT___((M), 1000*1000*1000)
+
+#define ERTS_SEC_TO_MONOTONIC__(SEC) \
+ ERTS_CONV_TO_MON_UNIT___((SEC), 1)
+#define ERTS_MSEC_TO_MONOTONIC__(MSEC) \
+ ERTS_CONV_TO_MON_UNIT___((MSEC), 1000)
+#define ERTS_USEC_TO_MONOTONIC__(USEC) \
+ ERTS_CONV_TO_MON_UNIT___((USEC), 1000*1000)
+#define ERTS_NSEC_TO_MONOTONIC__(NSEC) \
+ ERTS_CONV_TO_MON_UNIT___((NSEC), 1000*1000*1000)
+
+#define ERTS_MONOTONIC_TO_CLKTCKS__(MON) \
+ ERTS_CONV_FROM_MON_UNIT___((MON), ERTS_CLKTCK_RESOLUTION)
+#define ERTS_CLKTCKS_TO_MONOTONIC__(TCKS) \
+ ERTS_CONV_TO_MON_UNIT___((TCKS), ERTS_CLKTCK_RESOLUTION)
+
+#endif /* !ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT */
+
+#define ERTS_MONOTONIC_TIME_END_EXTERNAL \
+ (ERTS_MONOTONIC_TIME_START_EXTERNAL < 0 \
+ ? (ERTS_MONOTONIC_TIME_START_EXTERNAL \
+ + (ERTS_MONOTONIC_END - ERTS_MONOTONIC_BEGIN)) \
+ : (ERTS_MONOTONIC_END - ERTS_MONOTONIC_TIME_START_EXTERNAL))
+
+#define ERTS_MSEC_TO_CLKTCKS__(MON) \
+ ((MON) * (ERTS_CLKTCK_RESOLUTION/1000))
+#define ERTS_CLKTCKS_TO_MSEC__(TCKS) \
+ ((TCKS) / (ERTS_CLKTCK_RESOLUTION/1000))
+
+#define ERTS_MONOTONIC_TO_SEC(X) \
+ (ERTS_TIME_ASSERT((X) >= 0), \
+ ERTS_MONOTONIC_TO_SEC__((X)))
+#define ERTS_MONOTONIC_TO_MSEC(X) \
+ (ERTS_TIME_ASSERT((X) >= 0), \
+ ERTS_MONOTONIC_TO_MSEC__((X)))
+#define ERTS_MONOTONIC_TO_USEC(X) \
+ (ERTS_TIME_ASSERT((X) >= 0), \
+ ERTS_MONOTONIC_TO_USEC__((X)))
+#define ERTS_MONOTONIC_TO_NSEC(X) \
+ (ERTS_TIME_ASSERT((X) >= 0), \
+ ERTS_MONOTONIC_TO_NSEC__((X)))
+#define ERTS_SEC_TO_MONOTONIC(X) \
+ (ERTS_TIME_ASSERT((X) >= 0), \
+ ERTS_SEC_TO_MONOTONIC__((X)))
+#define ERTS_MSEC_TO_MONOTONIC(X) \
+ (ERTS_TIME_ASSERT((X) >= 0), \
+ ERTS_MSEC_TO_MONOTONIC__((X)))
+#define ERTS_USEC_TO_MONOTONIC(X) \
+ (ERTS_TIME_ASSERT((X) >= 0), \
+ ERTS_USEC_TO_MONOTONIC__((X)))
+#define ERTS_NSEC_TO_MONOTONIC(X) \
+ (ERTS_TIME_ASSERT((X) >= 0), \
+ ERTS_NSEC_TO_MONOTONIC__((X)))
+
+#define ERTS_MONOTONIC_TO_CLKTCKS(X) \
+ (ERTS_TIME_ASSERT((X) >= 0), \
+ ERTS_MONOTONIC_TO_CLKTCKS__((X)))
+#define ERTS_CLKTCKS_TO_MONOTONIC(X) \
+ (ERTS_TIME_ASSERT((X) >= 0), \
+ ERTS_CLKTCKS_TO_MONOTONIC__((X)))
+
+#define ERTS_MSEC_TO_CLKTCKS(X) \
+ (ERTS_TIME_ASSERT((X) >= 0), \
+ ERTS_MSEC_TO_CLKTCKS__((X)))
+#define ERTS_CLKTCKS_TO_MSEC(X) \
+ (ERTS_TIME_ASSERT((X) >= 0), \
+ ERTS_CLKTCKS_TO_MSEC__((X)))
+
#endif /* ERL_TIME_H__ */
+
+/* timer-wheel api */
+#if defined(ERTS_WANT_TIMER_WHEEL_API) && !defined(ERTS_GOT_TIMER_WHEEL_API)
+#define ERTS_GOT_TIMER_WHEEL_API
+
+#include "erl_thr_progress.h"
+#include "erl_process.h"
+
+void erts_sched_init_time_sup(ErtsSchedulerData *esdp);
+
+
+#define ERTS_TW_SLOT_INACTIVE (-2)
+
+/*
+** Timer entry:
+*/
+typedef struct erl_timer {
+ ErtsMonotonicTime timeout_pos; /* Timeout in absolute clock ticks */
+ struct erl_timer* next; /* next entry tiw slot or chain */
+ struct erl_timer* prev; /* prev entry tiw slot or chain */
+ void (*timeout)(void*); /* called when timeout */
+ void* arg; /* argument to timeout/cancel procs */
+ int slot;
+} ErtsTWheelTimer;
+
+typedef void (*ErlTimeoutProc)(void*);
+
+void erts_twheel_set_timer(ErtsTimerWheel *tiw,
+ ErtsTWheelTimer *p, ErlTimeoutProc timeout,
+ void *arg, ErtsMonotonicTime timeout_pos);
+void erts_twheel_cancel_timer(ErtsTimerWheel *tiw, ErtsTWheelTimer *p);
+ErtsTimerWheel *erts_create_timer_wheel(ErtsSchedulerData *esdp);
+
+ErtsMonotonicTime erts_check_next_timeout_time(ErtsSchedulerData *);
+
+ERTS_GLB_INLINE void erts_twheel_init_timer(ErtsTWheelTimer *p);
+ERTS_GLB_INLINE ErtsMonotonicTime erts_next_timeout_time(ErtsNextTimeoutRef);
+ERTS_GLB_INLINE ErtsMonotonicTime erts_tweel_read_timeout(ErtsTWheelTimer *twt);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void erts_twheel_init_timer(ErtsTWheelTimer *p)
+{
+ p->slot = ERTS_TW_SLOT_INACTIVE;
+}
+
+ERTS_GLB_INLINE ErtsMonotonicTime erts_next_timeout_time(ErtsNextTimeoutRef nxt_tmo_ref)
+{
+ return *((ErtsMonotonicTime *) nxt_tmo_ref);
+}
+
+ERTS_GLB_INLINE ErtsMonotonicTime
+erts_tweel_read_timeout(ErtsTWheelTimer *twt)
+{
+ return twt->timeout_pos;
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+void
+erts_twheel_debug_foreach(ErtsTimerWheel *tiw,
+ void (*tclbk)(void *),
+ void (*func)(void *,
+ ErtsMonotonicTime,
+ void *),
+ void *arg);
+
+#endif /* timer wheel api */
diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c
index 3272a5326d..f2e0900fec 100644
--- a/erts/emulator/beam/erl_time_sup.c
+++ b/erts/emulator/beam/erl_time_sup.c
@@ -1,77 +1,28 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2012. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
/*
-** Support routines for the timer wheel
-**
-** This code contains two strategies for dealing with
-** date/time changes in the system.
-** If the system has some kind of high resolution timer (HAVE_GETHRTIME),
-** the high resolution timer is used to correct the time-of-day and the
-** timeouts, the base source is the hrtimer, but at certain intervals the
-** OS time-of-day is checked and if it is not within certain bounds, the
-** delivered time gets slowly adjusted for each call until
-** it corresponds to the system time (built-in adjtime...).
-** The call gethrtime() is detected by autoconf on Unix, but other
-** platforms may define it in erl_*_sys.h and implement
-** their own high resolution timer. The high resolution timer
-** strategy is (probably) best on all systems where the timer have
-** a resolution higher or equal to gettimeofday (or what's implemented
-** is sys_gettimeofday()). The actual resolution is the interesting thing,
-** not the unit's thats used (i.e. on VxWorks, nanoseconds can be
-** retrieved in terms of units, but the actual resolution is the same as
-** for the clock ticks).
-** If the systems best timer routine is kernel ticks returned from
-** sys_times(), and the actual resolution of sys_gettimeofday() is
-** better (like most unixes that does not have any realtime extensions),
-** another strategy is used. The tolerant gettimeofday() corrects
-** the value with respect to uptime (sys_times() return value) and checks
-** for correction both when delivering timeticks and delivering nowtime.
-** this strategy is slower, but accurate on systems without better timer
-** routines. The kernel tick resolution is not enough to implement
-** a gethrtime routine. On Linux and other non solaris unix-boxes the second
-** strategy is used, on all other platforms we use the first.
-**
-** The following is expected (from sys.[ch] and erl_*_sys.h):
-**
-** 64 bit integers. So it is, and so it will be.
-**
-** sys_init_time(), will return the clock resolution in MS and
-** that's about it. More could be added of course
-** If the clock-rate is constant (i.e. 1 ms) one can define
-** SYS_CLOCK_RESOLUTION (to 1),
-** which makes erts_deliver_time/erts_time_remaining a bit faster.
-**
-** if HAVE_GETHRTIME is defined:
-** sys_gethrtime() will return a SysHrTime (long long) representing
-** nanoseconds, sys_init_hrtime() will do any initialization.
-** else
-** a long (64bit) integer type called Sint64 should be defined.
-**
-** sys_times() will return clock_ticks since start and
-** fill in a SysTimes structure (struct tms). Instead of CLK_TCK,
-** SYS_CLK_TCK is used to determine the resolution of kernel ticks.
-**
-** sys_gettimeofday() will take a SysTimeval (a struct timeval) as parameter
-** and fill it in as gettimeofday(X,NULL).
-**
-*/
+ * Support routines for the time
+ */
+
+/* #define ERTS_TIME_CORRECTION_PRINT */
#ifdef HAVE_CONFIG_H
# include "config.h"
@@ -80,443 +31,1374 @@
#include "sys.h"
#include "erl_vm.h"
#include "global.h"
+#define ERTS_WANT_TIMER_WHEEL_API
+#include "erl_time.h"
+#include "erl_driver.h"
+#include "erl_nif.h"
+
+static erts_mtx_t erts_get_time_mtx;
+
+ /* used by erts_runtime_elapsed_both */
+typedef struct {
+ erts_mtx_t mtx;
+ ErtsMonotonicTime user;
+ ErtsMonotonicTime sys;
+} ErtsRunTimePrevData;
+
+static union {
+ ErtsRunTimePrevData data;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsRunTimePrevData))];
+} runtime_prev erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+
+static union {
+ erts_atomic64_t time;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_atomic64_t))];
+} wall_clock_prev erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+
+static union {
+ erts_atomic64_t time;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_atomic64_t))];
+} now_prev erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+
+static ErtsMonitor *time_offset_monitors = NULL;
+static Uint no_time_offset_monitors = 0;
+
+#ifdef DEBUG
+static int time_sup_initialized = 0;
+#endif
-static erts_smp_mtx_t erts_timeofday_mtx;
+#define ERTS_MONOTONIC_TIME_KILO \
+ ((ErtsMonotonicTime) 1000)
+#define ERTS_MONOTONIC_TIME_MEGA \
+ (ERTS_MONOTONIC_TIME_KILO*ERTS_MONOTONIC_TIME_KILO)
+#define ERTS_MONOTONIC_TIME_GIGA \
+ (ERTS_MONOTONIC_TIME_MEGA*ERTS_MONOTONIC_TIME_KILO)
+#define ERTS_MONOTONIC_TIME_TERA \
+ (ERTS_MONOTONIC_TIME_GIGA*ERTS_MONOTONIC_TIME_KILO)
-static SysTimeval inittv; /* Used everywhere, the initial time-of-day */
+static void init_time_napi(void);
+static void
+schedule_send_time_offset_changed_notifications(ErtsMonotonicTime new_offset);
+
+struct time_sup_read_only__ {
+ ErtsMonotonicTime (*get_time)(void);
+ int correction;
+ ErtsTimeWarpMode warp_mode;
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ ErtsMonotonicTime moffset;
+ int os_corrected_monotonic_time;
+ int os_monotonic_time_disable;
+ char *os_monotonic_time_func;
+ char *os_monotonic_time_clock_id;
+ int os_monotonic_time_locked;
+ Uint64 os_monotonic_time_resolution;
+ Uint64 os_monotonic_time_extended;
+#endif
+ char *os_system_time_func;
+ char *os_system_time_clock_id;
+ int os_system_time_locked;
+ Uint64 os_system_time_resolution;
+ Uint64 os_system_time_extended;
+ struct {
+ ErtsMonotonicTime large_diff;
+ ErtsMonotonicTime small_diff;
+ } adj;
+ struct {
+ ErtsMonotonicTime error;
+ ErtsMonotonicTime resolution;
+ int intervals;
+ int use_avg;
+ } drift_adj;
+};
+
+typedef struct {
+ ErtsMonotonicTime drift; /* Correction for os monotonic drift */
+ ErtsMonotonicTime error; /* Correction for error between system times */
+} ErtsMonotonicCorrection;
+
+typedef struct {
+ ErtsMonotonicTime erl_mtime;
+ ErtsMonotonicTime os_mtime;
+ ErtsMonotonicCorrection correction;
+} ErtsMonotonicCorrectionInstance;
+
+#define ERTS_MAX_DRIFT_INTERVALS 50
+typedef struct {
+ struct {
+ struct {
+ ErtsMonotonicTime sys;
+ ErtsMonotonicTime mon;
+ } diff;
+ struct {
+ ErtsMonotonicTime sys;
+ ErtsMonotonicTime mon;
+ } time;
+ } intervals[ERTS_MAX_DRIFT_INTERVALS];
+ struct {
+ ErtsMonotonicTime sys;
+ ErtsMonotonicTime mon;
+ } acc;
+ int ix;
+ int dirty_counter;
+} ErtsMonotonicDriftData;
+
+typedef struct {
+ ErtsMonotonicCorrectionInstance prev;
+ ErtsMonotonicCorrectionInstance curr;
+} ErtsMonotonicCorrectionInstances;
+
+typedef struct {
+ ErtsMonotonicCorrectionInstances insts;
+ ErtsMonotonicDriftData drift;
+ ErtsMonotonicTime last_check;
+ int short_check_interval;
+} ErtsMonotonicCorrectionData;
+
+struct time_sup_infrequently_changed__ {
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ struct {
+ erts_rwmtx_t rwmtx;
+ ErtsTWheelTimer timer;
+ ErtsMonotonicCorrectionData cdata;
+ } parmon;
+ ErtsMonotonicTime minit;
+#endif
+ ErtsSystemTime sinit;
+ ErtsMonotonicTime not_corrected_moffset;
+ erts_atomic64_t offset;
+ ErtsMonotonicTime shadow_offset;
+ erts_atomic32_t preliminary_offset;
+};
+
+struct time_sup_frequently_changed__ {
+ ErtsMonotonicTime last_not_corrected_time;
+};
+
+static struct {
+ union {
+ struct time_sup_read_only__ o;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(struct time_sup_read_only__))];
+ } r;
+ union {
+ struct time_sup_infrequently_changed__ c;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(struct time_sup_infrequently_changed__))];
+ } inf;
+ union {
+ struct time_sup_frequently_changed__ c;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(struct time_sup_frequently_changed__))];
+ } f;
+} time_sup erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+
+ErtsTimeSupData erts_time_sup__ erts_align_attribute(ERTS_CACHE_LINE_SIZE);
-static SysTimes t_start; /* Used in elapsed_time_both */
-static SysTimeval gtv; /* Used in wall_clock_elapsed_time_both */
-static SysTimeval then; /* Used in get_now */
-static SysTimeval last_emu_time; /* Used in erts_get_emu_time() */
-SysTimeval erts_first_emu_time; /* Used in erts_get_emu_time() */
+/*
+ * erts_get_approx_time() returns an *approximate* time
+ * in seconds. NOTE that this time may jump backwards!!!
+ */
+erts_approx_time_t
+erts_get_approx_time(void)
+{
+ ErtsSystemTime stime = erts_os_system_time();
+ return (erts_approx_time_t) ERTS_MONOTONIC_TO_SEC(stime);
+}
-union {
- erts_smp_atomic_t time;
- char align[ERTS_CACHE_LINE_SIZE];
-} approx erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+static ERTS_INLINE void
+init_time_offset(ErtsMonotonicTime offset)
+{
+ erts_atomic64_init_nob(&time_sup.inf.c.offset, (erts_aint64_t) offset);
+}
-static void
-init_approx_time(void)
+static ERTS_INLINE void
+set_time_offset(ErtsMonotonicTime offset)
{
- erts_smp_atomic_init_nob(&approx.time, 0);
+ erts_atomic64_set_relb(&time_sup.inf.c.offset, (erts_aint64_t) offset);
}
-static ERTS_INLINE erts_approx_time_t
-get_approx_time(void)
+static ERTS_INLINE ErtsMonotonicTime
+get_time_offset(void)
{
- return (erts_approx_time_t) erts_smp_atomic_read_nob(&approx.time);
+ return (ErtsMonotonicTime) erts_atomic64_read_acqb(&time_sup.inf.c.offset);
}
static ERTS_INLINE void
-update_approx_time(SysTimeval *tv)
+update_last_mtime(ErtsSchedulerData *esdp, ErtsMonotonicTime mtime)
{
- erts_approx_time_t new_secs = (erts_approx_time_t) tv->tv_sec;
- erts_approx_time_t old_secs = get_approx_time();
- if (old_secs != new_secs)
- erts_smp_atomic_set_nob(&approx.time, new_secs);
+ if (!esdp)
+ esdp = erts_get_scheduler_data();
+ if (esdp) {
+ ASSERT(mtime >= esdp->last_monotonic_time);
+ esdp->last_monotonic_time = mtime;
+ esdp->check_time_reds = 0;
+ }
}
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+
/*
- * erts_get_approx_time() returns an *approximate* time
- * in seconds. NOTE that this time may jump backwards!!!
+ * Time correction adjustments made due to
+ * error between Erlang system time and OS
+ * system time:
+ * - Large adjustment ~1%
+ * - Small adjustment ~0.05%
*/
-erts_approx_time_t
-erts_get_approx_time(void)
+#define ERTS_TCORR_ERR_UNIT 2048
+#define ERTS_TCORR_ERR_LARGE_ADJ 20
+#define ERTS_TCORR_ERR_SMALL_ADJ 1
+
+#define ERTS_INIT_SHORT_INTERVAL_COUNTER 10
+#define ERTS_LONG_TIME_CORRECTION_CHECK ERTS_SEC_TO_MONOTONIC(60)
+#define ERTS_SHORT_TIME_CORRECTION_CHECK ERTS_SEC_TO_MONOTONIC(15)
+
+#define ERTS_TIME_DRIFT_MAX_ADJ_DIFF ERTS_USEC_TO_MONOTONIC(50)
+#define ERTS_TIME_DRIFT_MIN_ADJ_DIFF ERTS_USEC_TO_MONOTONIC(5)
+
+/*
+ * Maximum drift of the OS monotonic clock expected.
+ *
+ * We use 1 milli second per second. If the monotonic
+ * clock drifts more than this we will fail to adjust for
+ * drift, and error correction will kick in instead.
+ * If it is larger than this, one could argue that the
+ * primitive is to poor to be used...
+ */
+#define ERTS_MAX_MONOTONIC_DRIFT ERTS_MSEC_TO_MONOTONIC(1)
+
+/*
+ * We assume that precision is 32 times worse than the
+ * resolution. This is a wild guess, but there are no
+ * practical way to determine actual precision.
+ */
+#define ERTS_ASSUMED_PRECISION_DROP 32
+
+#define ERTS_MIN_MONOTONIC_DRIFT_MEASUREMENT \
+ (ERTS_SHORT_TIME_CORRECTION_CHECK - 2*ERTS_MAX_MONOTONIC_DRIFT)
+
+
+static ERTS_INLINE ErtsMonotonicTime
+calc_corrected_erl_mtime(ErtsMonotonicTime os_mtime,
+ ErtsMonotonicCorrectionInstance *cip,
+ ErtsMonotonicTime *os_mdiff_p,
+ int os_drift_corrected)
{
- return get_approx_time();
+ ErtsMonotonicTime erl_mtime, diff = os_mtime - cip->os_mtime;
+ ERTS_TIME_ASSERT(diff >= 0);
+ if (!os_drift_corrected)
+ diff += (cip->correction.drift*diff)/ERTS_MONOTONIC_TIME_UNIT;
+ erl_mtime = cip->erl_mtime;
+ erl_mtime += diff;
+ erl_mtime += cip->correction.error*(diff/ERTS_TCORR_ERR_UNIT);
+ if (os_mdiff_p)
+ *os_mdiff_p = diff;
+ return erl_mtime;
}
-#ifdef HAVE_GETHRTIME
+static ERTS_INLINE ErtsMonotonicTime
+read_corrected_time(int os_drift_corrected)
+{
+ ErtsMonotonicTime os_mtime;
+ ErtsMonotonicCorrectionInstance ci;
-int erts_disable_tolerant_timeofday;
+ erts_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx);
-static SysHrTime hr_init_time, hr_last_correction_check,
- hr_correction, hr_last_time;
+ os_mtime = erts_os_monotonic_time();
-static void init_tolerant_timeofday(void)
-{
- /* Should be in sys.c */
-#if defined(HAVE_SYSCONF) && defined(_SC_NPROCESSORS_CONF)
- if (sysconf(_SC_NPROCESSORS_CONF) > 1) {
- char b[1024];
- int maj,min,build;
- os_flavor(b,1024);
- os_version(&maj,&min,&build);
- if (!strcmp(b,"sunos") && maj <= 5 && min <= 7) {
- erts_disable_tolerant_timeofday = 1;
- }
+ if (os_mtime >= time_sup.inf.c.parmon.cdata.insts.curr.os_mtime)
+ ci = time_sup.inf.c.parmon.cdata.insts.curr;
+ else {
+ if (os_mtime < time_sup.inf.c.parmon.cdata.insts.prev.os_mtime)
+ erts_exit(ERTS_ABORT_EXIT,
+ "OS monotonic time stepped backwards\n");
+ ci = time_sup.inf.c.parmon.cdata.insts.prev;
}
+
+ erts_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx);
+
+ return calc_corrected_erl_mtime(os_mtime, &ci, NULL,
+ os_drift_corrected);
+}
+
+static ErtsMonotonicTime get_os_drift_corrected_time(void)
+{
+ return read_corrected_time(!0);
+}
+
+static ErtsMonotonicTime get_corrected_time(void)
+{
+ return read_corrected_time(0);
+}
+
+#ifdef ERTS_TIME_CORRECTION_PRINT
+
+static ERTS_INLINE void
+print_correction(int change,
+ ErtsMonotonicTime sdiff,
+ ErtsMonotonicTime old_ecorr,
+ ErtsMonotonicTime old_dcorr,
+ ErtsMonotonicTime new_ecorr,
+ ErtsMonotonicTime new_dcorr,
+ Uint tmo)
+{
+ ErtsMonotonicTime usec_sdiff;
+ if (sdiff < 0)
+ usec_sdiff = -1*ERTS_MONOTONIC_TO_USEC(-1*sdiff);
+ else
+ usec_sdiff = ERTS_MONOTONIC_TO_USEC(sdiff);
+
+ if (!change)
+ erts_fprintf(stderr,
+ "sdiff = %b64d usec : [ec=%b64d ppm, dc=%b64d ppb] : "
+ "tmo = %bpu msec\r\n",
+ usec_sdiff,
+ (1000000*old_ecorr) / ERTS_TCORR_ERR_UNIT,
+ (1000000000*old_dcorr) / ERTS_MONOTONIC_TIME_UNIT,
+ tmo);
+ else
+ erts_fprintf(stderr,
+ "sdiff = %b64d usec : [ec=%b64d ppm, dc=%b64d ppb] "
+ "-> [ec=%b64d ppm, dc=%b64d ppb] : tmo = %bpu msec\r\n",
+ usec_sdiff,
+ (1000000*old_ecorr) / ERTS_TCORR_ERR_UNIT,
+ (1000000000*old_dcorr) / ERTS_MONOTONIC_TIME_UNIT,
+ (1000000*new_ecorr) / ERTS_TCORR_ERR_UNIT,
+ (1000000000*new_dcorr) / ERTS_MONOTONIC_TIME_UNIT,
+ tmo);
+}
+
#endif
- hr_init_time = sys_gethrtime();
- hr_last_correction_check = hr_last_time = hr_init_time;
- hr_correction = 0;
+
+static ERTS_INLINE ErtsMonotonicTime
+get_timeout_pos(ErtsMonotonicTime now, ErtsMonotonicTime tmo)
+{
+ ErtsMonotonicTime tpos;
+ tpos = ERTS_MONOTONIC_TO_CLKTCKS(now - 1);
+ tpos += ERTS_MSEC_TO_CLKTCKS(tmo);
+ tpos += 1;
+ return tpos;
}
-static void get_tolerant_timeofday(SysTimeval *tv)
+static void
+check_time_correction(void *vesdp)
{
- SysHrTime diff_time, curr;
+ int init_drift_adj = !vesdp;
+ ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
+ ErtsMonotonicCorrection new_correction;
+ ErtsMonotonicCorrectionInstance ci;
+ ErtsMonotonicTime mdiff, sdiff, os_mtime, erl_mtime, os_stime,
+ erl_stime, time_offset, timeout_pos;
+ Uint timeout;
+ int os_drift_corrected = time_sup.r.o.os_corrected_monotonic_time;
+ int set_new_correction = 0, begin_short_intervals = 0;
- if (erts_disable_tolerant_timeofday) {
- sys_gettimeofday(tv);
- return;
+ erts_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx);
+
+ erts_os_times(&os_mtime, &os_stime);
+
+ ci = time_sup.inf.c.parmon.cdata.insts.curr;
+
+ erts_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx);
+
+ if (os_mtime < ci.os_mtime)
+ erts_exit(ERTS_ABORT_EXIT,
+ "OS monotonic time stepped backwards\n");
+
+ erl_mtime = calc_corrected_erl_mtime(os_mtime, &ci, &mdiff,
+ os_drift_corrected);
+ time_offset = get_time_offset();
+ erl_stime = erl_mtime + time_offset;
+
+ sdiff = erl_stime - os_stime;
+
+ if (time_sup.inf.c.shadow_offset) {
+ ERTS_TIME_ASSERT(time_sup.r.o.warp_mode == ERTS_SINGLE_TIME_WARP_MODE);
+ if (erts_atomic32_read_nob(&time_sup.inf.c.preliminary_offset))
+ sdiff += time_sup.inf.c.shadow_offset;
+ else
+ time_sup.inf.c.shadow_offset = 0;
}
- *tv = inittv;
- diff_time = ((curr = sys_gethrtime()) + hr_correction - hr_init_time) / 1000;
- if (curr < hr_init_time) {
- erl_exit(1,"Unexpected behaviour from operating system high "
- "resolution timer");
+ new_correction = ci.correction;
+
+ if (time_sup.r.o.warp_mode == ERTS_MULTI_TIME_WARP_MODE
+ && (sdiff < -2*time_sup.r.o.adj.small_diff
+ || 2*time_sup.r.o.adj.small_diff < sdiff)) {
+ /* System time diff exeeded limits; change time offset... */
+ time_offset -= sdiff;
+ sdiff = 0;
+ set_time_offset(time_offset);
+ schedule_send_time_offset_changed_notifications(time_offset);
+ begin_short_intervals = 1;
+ if (ci.correction.error != 0) {
+ set_new_correction = 1;
+ new_correction.error = 0;
+ }
+ }
+ else if ((time_sup.r.o.warp_mode == ERTS_SINGLE_TIME_WARP_MODE
+ && erts_atomic32_read_nob(&time_sup.inf.c.preliminary_offset))
+ && (sdiff < -2*time_sup.r.o.adj.small_diff
+ || 2*time_sup.r.o.adj.small_diff < sdiff)) {
+ /*
+ * System time diff exeeded limits; change shadow offset
+ * and let OS system time leap away from Erlang system
+ * time.
+ */
+ time_sup.inf.c.shadow_offset -= sdiff;
+ sdiff = 0;
+ begin_short_intervals = 1;
+ if (ci.correction.error != 0) {
+ set_new_correction = 1;
+ new_correction.error = 0;
+ }
+ }
+ else if (ci.correction.error == 0) {
+ if (sdiff < -time_sup.r.o.adj.small_diff) {
+ set_new_correction = 1;
+ if (sdiff < -time_sup.r.o.adj.large_diff)
+ new_correction.error = ERTS_TCORR_ERR_LARGE_ADJ;
+ else
+ new_correction.error = ERTS_TCORR_ERR_SMALL_ADJ;
+ }
+ else if (sdiff > time_sup.r.o.adj.small_diff) {
+ set_new_correction = 1;
+ if (sdiff > time_sup.r.o.adj.large_diff)
+ new_correction.error = -ERTS_TCORR_ERR_LARGE_ADJ;
+ else
+ new_correction.error = -ERTS_TCORR_ERR_SMALL_ADJ;
+ }
+ }
+ else if (ci.correction.error > 0) {
+ if (sdiff < 0) {
+ if (ci.correction.error != ERTS_TCORR_ERR_LARGE_ADJ
+ && sdiff < -time_sup.r.o.adj.large_diff) {
+ new_correction.error = ERTS_TCORR_ERR_LARGE_ADJ;
+ set_new_correction = 1;
+ }
+ }
+ else if (sdiff > time_sup.r.o.adj.small_diff) {
+ set_new_correction = 1;
+ if (sdiff > time_sup.r.o.adj.large_diff)
+ new_correction.error = -ERTS_TCORR_ERR_LARGE_ADJ;
+ else
+ new_correction.error = -ERTS_TCORR_ERR_SMALL_ADJ;
+ }
+ else {
+ set_new_correction = 1;
+ new_correction.error = 0;
+ }
+ }
+ else /* if (ci.correction.error < 0) */ {
+ if (0 < sdiff) {
+ if (ci.correction.error != -ERTS_TCORR_ERR_LARGE_ADJ
+ && time_sup.r.o.adj.large_diff < sdiff) {
+ new_correction.error = -ERTS_TCORR_ERR_LARGE_ADJ;
+ set_new_correction = 1;
+ }
+ }
+ else if (sdiff < -time_sup.r.o.adj.small_diff) {
+ set_new_correction = 1;
+ if (sdiff < -time_sup.r.o.adj.large_diff)
+ new_correction.error = ERTS_TCORR_ERR_LARGE_ADJ;
+ else
+ new_correction.error = ERTS_TCORR_ERR_SMALL_ADJ;
+ }
+ else {
+ set_new_correction = 1;
+ new_correction.error = 0;
+ }
}
- if ((curr - hr_last_correction_check) / 1000 > 1000000) {
- /* Check the correction need */
- SysHrTime tv_diff, diffdiff;
- SysTimeval tmp;
- int done = 0;
-
- sys_gettimeofday(&tmp);
- tv_diff = ((SysHrTime) tmp.tv_sec) * 1000000 + tmp.tv_usec;
- tv_diff -= ((SysHrTime) inittv.tv_sec) * 1000000 + inittv.tv_usec;
- diffdiff = diff_time - tv_diff;
- if (diffdiff > 10000) {
- SysHrTime corr = (curr - hr_last_time) / 100;
- if (corr / 1000 >= diffdiff) {
- ++done;
- hr_correction -= ((SysHrTime)diffdiff) * 1000;
- } else {
- hr_correction -= corr;
+ if (!os_drift_corrected) {
+ ErtsMonotonicDriftData *ddp = &time_sup.inf.c.parmon.cdata.drift;
+ int ix = ddp->ix;
+ ErtsMonotonicTime mtime_diff, old_os_mtime;
+
+ old_os_mtime = ddp->intervals[ix].time.mon;
+ mtime_diff = os_mtime - old_os_mtime;
+
+ if ((mtime_diff >= ERTS_MIN_MONOTONIC_DRIFT_MEASUREMENT)
+ | init_drift_adj) {
+ ErtsMonotonicTime drift_adj, drift_adj_diff, old_os_stime,
+ smtime_diff, stime_diff, mtime_acc, stime_acc,
+ avg_drift_adj, max_drift;
+
+ old_os_stime = ddp->intervals[ix].time.sys;
+
+ mtime_acc = ddp->acc.mon;
+ stime_acc = ddp->acc.sys;
+
+ avg_drift_adj = (((stime_acc - mtime_acc)
+ * ERTS_MONOTONIC_TIME_UNIT)
+ / mtime_acc);
+
+ mtime_diff = os_mtime - old_os_mtime;
+ stime_diff = os_stime - old_os_stime;
+ smtime_diff = stime_diff - mtime_diff;
+ ix++;
+ if (ix >= time_sup.r.o.drift_adj.intervals)
+ ix = 0;
+ mtime_acc -= ddp->intervals[ix].diff.mon;
+ mtime_acc += mtime_diff;
+ stime_acc -= ddp->intervals[ix].diff.sys;
+ stime_acc += stime_diff;
+
+ ddp->intervals[ix].diff.mon = mtime_diff;
+ ddp->intervals[ix].diff.sys = stime_diff;
+ ddp->intervals[ix].time.mon = os_mtime;
+ ddp->intervals[ix].time.sys = os_stime;
+
+ ddp->ix = ix;
+ ddp->acc.mon = mtime_acc;
+ ddp->acc.sys = stime_acc;
+
+ max_drift = ERTS_MAX_MONOTONIC_DRIFT;
+ max_drift *= ERTS_MONOTONIC_TO_SEC(mtime_diff);
+
+ if (smtime_diff > time_sup.r.o.drift_adj.error + max_drift
+ || smtime_diff < -1*time_sup.r.o.drift_adj.error - max_drift) {
+ dirty_intervals:
+ /*
+ * We had a leap in system time. Mark array as
+ * dirty to ensure that dirty values are rotated
+ * out before we use it again...
+ */
+ ddp->dirty_counter = time_sup.r.o.drift_adj.intervals;
+ begin_short_intervals = 1;
}
- diff_time = (curr + hr_correction - hr_init_time) / 1000;
- } else if (diffdiff < -10000) {
- SysHrTime corr = (curr - hr_last_time) / 100;
- if (corr / 1000 >= -diffdiff) {
- ++done;
- hr_correction -= ((SysHrTime)diffdiff) * 1000;
- } else {
- hr_correction += corr;
+ else if (ddp->dirty_counter > 0) {
+ if (init_drift_adj) {
+ new_correction.drift = ((smtime_diff
+ * ERTS_MONOTONIC_TIME_UNIT)
+ / mtime_diff);
+ set_new_correction = 1;
+ }
+ ddp->dirty_counter--;
+ }
+ else {
+ if (ddp->dirty_counter == 0) {
+ /* Force set new drift correction... */
+ set_new_correction = 1;
+ ddp->dirty_counter--;
+ }
+
+ if (time_sup.r.o.drift_adj.use_avg)
+ drift_adj = (((stime_acc - mtime_acc)
+ * ERTS_MONOTONIC_TIME_UNIT)
+ / mtime_acc);
+ else
+ drift_adj = ((smtime_diff
+ * ERTS_MONOTONIC_TIME_UNIT)
+ / mtime_diff);
+
+ drift_adj_diff = avg_drift_adj - drift_adj;
+ if (drift_adj_diff < -ERTS_TIME_DRIFT_MAX_ADJ_DIFF
+ || ERTS_TIME_DRIFT_MAX_ADJ_DIFF < drift_adj_diff)
+ goto dirty_intervals;
+
+ drift_adj_diff = drift_adj - new_correction.drift;
+ if (drift_adj_diff) {
+ if (drift_adj_diff > ERTS_TIME_DRIFT_MAX_ADJ_DIFF)
+ drift_adj_diff = ERTS_TIME_DRIFT_MAX_ADJ_DIFF;
+ else if (drift_adj_diff < -ERTS_TIME_DRIFT_MAX_ADJ_DIFF)
+ drift_adj_diff = -ERTS_TIME_DRIFT_MAX_ADJ_DIFF;
+ new_correction.drift += drift_adj_diff;
+ if (drift_adj_diff < -ERTS_TIME_DRIFT_MIN_ADJ_DIFF
+ || ERTS_TIME_DRIFT_MIN_ADJ_DIFF < drift_adj_diff) {
+ set_new_correction = 1;
+ }
+ }
}
- diff_time = (curr + hr_correction - hr_init_time) / 1000;
- } else {
- ++done;
}
- if (done) {
- hr_last_correction_check = curr;
+ }
+
+ begin_short_intervals |= set_new_correction;
+
+ if (begin_short_intervals) {
+ time_sup.inf.c.parmon.cdata.short_check_interval
+ = ERTS_INIT_SHORT_INTERVAL_COUNTER;
+ }
+ else if ((os_mtime - time_sup.inf.c.parmon.cdata.last_check
+ >= ERTS_SHORT_TIME_CORRECTION_CHECK - ERTS_MONOTONIC_TIME_UNIT)
+ && time_sup.inf.c.parmon.cdata.short_check_interval > 0) {
+ time_sup.inf.c.parmon.cdata.short_check_interval--;
+ }
+ time_sup.inf.c.parmon.cdata.last_check = os_mtime;
+
+ if (new_correction.error == 0)
+ timeout = ERTS_MONOTONIC_TO_MSEC(ERTS_LONG_TIME_CORRECTION_CHECK);
+ else {
+ ErtsMonotonicTime ecorr = new_correction.error;
+ ErtsMonotonicTime abs_sdiff;
+ abs_sdiff = (sdiff < 0) ? -1*sdiff : sdiff;
+ if (ecorr < 0)
+ ecorr = -1*ecorr;
+ if (abs_sdiff > ecorr*(ERTS_LONG_TIME_CORRECTION_CHECK/ERTS_TCORR_ERR_UNIT))
+ timeout = ERTS_MONOTONIC_TO_MSEC(ERTS_LONG_TIME_CORRECTION_CHECK);
+ else {
+ timeout = ERTS_MONOTONIC_TO_MSEC((ERTS_TCORR_ERR_UNIT*abs_sdiff)/ecorr);
+ if (timeout < 10)
+ timeout = 10;
}
}
- tv->tv_sec += (int) (diff_time / ((SysHrTime) 1000000));
- tv->tv_usec += (int) (diff_time % ((SysHrTime) 1000000));
- if (tv->tv_usec >= 1000000) {
- tv->tv_usec -= 1000000;
- tv->tv_sec += 1;
+
+ if (timeout > ERTS_MONOTONIC_TO_MSEC(ERTS_SHORT_TIME_CORRECTION_CHECK)
+ && (time_sup.inf.c.parmon.cdata.short_check_interval
+ || time_sup.inf.c.parmon.cdata.drift.dirty_counter >= 0)) {
+ timeout = ERTS_MONOTONIC_TO_MSEC(ERTS_SHORT_TIME_CORRECTION_CHECK);
+ }
+
+ timeout_pos = get_timeout_pos(erl_mtime, timeout);
+
+#ifdef ERTS_TIME_CORRECTION_PRINT
+ print_correction(set_new_correction,
+ sdiff,
+ ci.correction.error,
+ ci.correction.drift,
+ new_correction.error,
+ new_correction.drift,
+ timeout);
+#endif
+
+ if (set_new_correction) {
+ erts_rwmtx_rwlock(&time_sup.inf.c.parmon.rwmtx);
+
+ os_mtime = erts_os_monotonic_time();
+
+ /* Save previous correction instance */
+ time_sup.inf.c.parmon.cdata.insts.prev = ci;
+
+ /*
+ * Current correction instance begin when
+ * OS monotonic time has increased two units.
+ */
+ os_mtime += 2;
+
+ /*
+ * Erlang monotonic time corresponding to
+ * next OS monotonic time using previous
+ * correction.
+ */
+ erl_mtime = calc_corrected_erl_mtime(os_mtime, &ci, NULL,
+ os_drift_corrected);
+
+ /*
+ * Save new current correction instance.
+ */
+ time_sup.inf.c.parmon.cdata.insts.curr.erl_mtime = erl_mtime;
+ time_sup.inf.c.parmon.cdata.insts.curr.os_mtime = os_mtime;
+ time_sup.inf.c.parmon.cdata.insts.curr.correction = new_correction;
+
+ erts_rwmtx_rwunlock(&time_sup.inf.c.parmon.rwmtx);
}
- hr_last_time = curr;
+
+ if (!esdp)
+ esdp = erts_get_scheduler_data();
+
+ erts_twheel_set_timer(esdp->timer_wheel,
+ &time_sup.inf.c.parmon.timer,
+ check_time_correction,
+ (void *) esdp,
+ timeout_pos);
}
-#define correction (hr_correction/1000000)
+static ErtsMonotonicTime get_os_corrected_time(void)
+{
+ ASSERT(time_sup.r.o.warp_mode == ERTS_MULTI_TIME_WARP_MODE);
+ return erts_os_monotonic_time() + time_sup.r.o.moffset;
+}
-#else /* !HAVE_GETHRTIME */
-#if !defined(CORRECT_USING_TIMES)
-#define init_tolerant_timeofday()
-#define get_tolerant_timeofday(tvp) sys_gettimeofday(tvp)
-#else
+static void
+check_time_offset(void *vesdp)
+{
+ ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
+ ErtsMonotonicTime sdiff, os_mtime, erl_mtime, os_stime,
+ erl_stime, time_offset, timeout, timeout_pos;
-typedef Sint64 Milli;
-
-static clock_t init_ct;
-static Sint64 ct_wrap;
-static Milli init_tv_m;
-static Milli correction_supress;
-static Milli last_ct_diff;
-static Milli last_cc;
-static clock_t last_ct;
-
-/* sys_times() might need to be wrapped and the values shifted (right)
- a bit to cope with newer linux (2.5.*) kernels, this has to be taken care
- of dynamically to start with, a special version that uses
- the times() return value as a high resolution timer can be made
- to fully utilize the faster ticks, like on windows, but for now, we'll
- settle with this silly workaround */
-#ifdef ERTS_WRAP_SYS_TIMES
-#define KERNEL_TICKS() (sys_times_wrap() & \
- ((1UL << ((sizeof(clock_t) * 8) - 1)) - 1))
-#else
-SysTimes dummy_tms;
+ ASSERT(time_sup.r.o.warp_mode == ERTS_MULTI_TIME_WARP_MODE);
+
+ erts_os_times(&os_mtime, &os_stime);
-#define KERNEL_TICKS() (sys_times(&dummy_tms) & \
- ((1UL << ((sizeof(clock_t) * 8) - 1)) - 1))
+ erl_mtime = os_mtime + time_sup.r.o.moffset;
+ time_offset = get_time_offset();
+ erl_stime = erl_mtime + time_offset;
+ sdiff = erl_stime - os_stime;
+
+ if ((sdiff < -2*time_sup.r.o.adj.small_diff
+ || 2*time_sup.r.o.adj.small_diff < sdiff)) {
+ /* System time diff exeeded limits; change time offset... */
+#ifdef ERTS_TIME_CORRECTION_PRINT
+ erts_fprintf(stderr, "sdiff = %b64d nsec -> 0 nsec\n",
+ ERTS_MONOTONIC_TO_NSEC(sdiff));
+#endif
+ time_offset -= sdiff;
+ sdiff = 0;
+ set_time_offset(time_offset);
+ schedule_send_time_offset_changed_notifications(time_offset);
+ }
+#ifdef ERTS_TIME_CORRECTION_PRINT
+ else erts_fprintf(stderr, "sdiff = %b64d nsec\n",
+ ERTS_MONOTONIC_TO_NSEC(sdiff));
#endif
-static void init_tolerant_timeofday(void)
+ timeout = ERTS_MONOTONIC_TO_MSEC(ERTS_LONG_TIME_CORRECTION_CHECK);
+ timeout_pos = get_timeout_pos(erl_mtime, timeout);
+
+ erts_twheel_set_timer(esdp->timer_wheel,
+ &time_sup.inf.c.parmon.timer,
+ check_time_offset,
+ vesdp,
+ timeout_pos);
+}
+
+static void
+init_check_time_correction(void *vesdp)
{
- last_ct = init_ct = KERNEL_TICKS();
- last_cc = 0;
- init_tv_m = (((Milli) inittv.tv_sec) * 1000) +
- (inittv.tv_usec / 1000);
- ct_wrap = 0;
- correction_supress = 0;
+ ErtsMonotonicDriftData *ddp;
+ ErtsMonotonicTime old_mtime, old_stime, mtime, stime, mtime_diff,
+ stime_diff, smtime_diff, max_drift;
+ int ix;
+
+ ddp = &time_sup.inf.c.parmon.cdata.drift;
+ ix = ddp->ix;
+ old_mtime = ddp->intervals[0].time.mon;
+ old_stime = ddp->intervals[0].time.sys;
+
+ erts_os_times(&mtime, &stime);
+
+ mtime_diff = mtime - old_mtime;
+ stime_diff = stime - old_stime;
+ smtime_diff = stime_diff - mtime_diff;
+
+ max_drift = ERTS_MAX_MONOTONIC_DRIFT;
+ max_drift *= ERTS_MONOTONIC_TO_SEC(mtime_diff);
+
+ if (smtime_diff > time_sup.r.o.drift_adj.error + max_drift
+ || smtime_diff < -1*time_sup.r.o.drift_adj.error - max_drift) {
+ /* Had a system time leap... pretend no drift... */
+ stime_diff = mtime_diff;
+ }
+
+ /*
+ * We use old time values in order to trigger
+ * a drift adjustment, and repeat this interval
+ * in all slots...
+ */
+ for (ix = 0; ix < time_sup.r.o.drift_adj.intervals; ix++) {
+ ddp->intervals[ix].diff.mon = mtime_diff;
+ ddp->intervals[ix].diff.sys = stime_diff;
+ ddp->intervals[ix].time.mon = old_mtime;
+ ddp->intervals[ix].time.sys = old_stime;
+ }
+
+ ddp->acc.sys = stime_diff*time_sup.r.o.drift_adj.intervals;
+ ddp->acc.mon = mtime_diff*time_sup.r.o.drift_adj.intervals;
+ ddp->ix = 0;
+ ddp->dirty_counter = time_sup.r.o.drift_adj.intervals;
+
+ check_time_correction(vesdp);
}
+static ErtsMonotonicTime
+finalize_corrected_time_offset(ErtsSystemTime *stimep)
+{
+ ErtsMonotonicTime os_mtime;
+ ErtsMonotonicCorrectionInstance ci;
+ int os_drift_corrected = time_sup.r.o.os_corrected_monotonic_time;
+
+ erts_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx);
+
+ erts_os_times(&os_mtime, stimep);
+
+ ci = time_sup.inf.c.parmon.cdata.insts.curr;
+
+ erts_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx);
+
+ if (os_mtime < ci.os_mtime)
+ erts_exit(ERTS_ABORT_EXIT,
+ "OS monotonic time stepped backwards\n");
+
+ return calc_corrected_erl_mtime(os_mtime, &ci, NULL,
+ os_drift_corrected);
+}
-static void get_tolerant_timeofday(SysTimeval *tvp)
+static void
+late_init_time_correction(ErtsSchedulerData *esdp)
{
- clock_t current_ct;
- SysTimeval current_tv;
- Milli ct_diff;
- Milli tv_diff;
- Milli current_correction;
- Milli act_correction; /* long shown to be too small */
- Milli max_adjust;
+ int quick_init_drift_adj;
+ void (*check_func)(void *);
+ ErtsMonotonicTime timeout, timeout_pos;
+
+ quick_init_drift_adj =
+ ERTS_MONOTONIC_TO_USEC(time_sup.r.o.drift_adj.error) == 0;
+
+ if (quick_init_drift_adj)
+ timeout = ERTS_MONOTONIC_TO_MSEC(ERTS_SHORT_TIME_CORRECTION_CHECK/10);
+ else
+ timeout = ERTS_MONOTONIC_TO_MSEC(ERTS_SHORT_TIME_CORRECTION_CHECK);
+
+ if (!time_sup.r.o.os_corrected_monotonic_time)
+ check_func = init_check_time_correction;
+ else if (time_sup.r.o.get_time == get_os_corrected_time) {
+ quick_init_drift_adj = 0;
+ check_func = check_time_offset;
+ }
+ else
+ check_func = check_time_correction;
+
+ timeout_pos = get_timeout_pos(erts_get_monotonic_time(esdp),
+ timeout);
+
+ erts_twheel_init_timer(&time_sup.inf.c.parmon.timer);
+ erts_twheel_set_timer(esdp->timer_wheel,
+ &time_sup.inf.c.parmon.timer,
+ check_func,
+ (quick_init_drift_adj
+ ? NULL
+ : esdp),
+ timeout_pos);
+}
+
+#endif /* ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT */
+
+static ErtsMonotonicTime get_not_corrected_time(void)
+{
+ ErtsMonotonicTime stime, mtime;
+
+ erts_mtx_lock(&erts_get_time_mtx);
+
+ stime = erts_os_system_time();
+
+ mtime = stime - time_sup.inf.c.not_corrected_moffset;
+
+ if (mtime >= time_sup.f.c.last_not_corrected_time)
+ time_sup.f.c.last_not_corrected_time = mtime;
+ else {
+ mtime = time_sup.f.c.last_not_corrected_time;
+
+ if (time_sup.r.o.warp_mode == ERTS_MULTI_TIME_WARP_MODE) {
+ ErtsMonotonicTime new_offset = stime - mtime;
+ new_offset = ERTS_MONOTONIC_TO_USEC(new_offset);
+ new_offset = ERTS_USEC_TO_MONOTONIC(new_offset);
+ if (time_sup.inf.c.not_corrected_moffset != new_offset) {
+ time_sup.inf.c.not_corrected_moffset = new_offset;
+ set_time_offset(new_offset);
+ schedule_send_time_offset_changed_notifications(new_offset);
+ }
+ }
- if (erts_disable_tolerant_timeofday) {
- sys_gettimeofday(tvp);
- return;
}
-#ifdef ERTS_WRAP_SYS_TIMES
-#define TICK_MS (1000 / SYS_CLK_TCK_WRAP)
+ ASSERT(stime == mtime + time_sup.inf.c.not_corrected_moffset);
+
+ erts_mtx_unlock(&erts_get_time_mtx);
+
+ return mtime;
+}
+
+int erts_check_time_adj_support(int time_correction,
+ ErtsTimeWarpMode time_warp_mode)
+{
+ if (!time_correction)
+ return 1;
+
+ /* User wants time correction */
+
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ return !time_sup.r.o.os_monotonic_time_disable;
#else
-#define TICK_MS (1000 / SYS_CLK_TCK)
+ return 0;
#endif
- current_ct = KERNEL_TICKS();
- sys_gettimeofday(&current_tv);
-
- /* I dont know if uptime can move some units backwards
- on some systems, but I allow for small backward
- jumps to avoid such problems if they exist...*/
- if (last_ct > 100 && current_ct < (last_ct - 100)) {
- ct_wrap += ((Sint64) 1) << ((sizeof(clock_t) * 8) - 1);
- }
- last_ct = current_ct;
- ct_diff = ((ct_wrap + current_ct) - init_ct) * TICK_MS;
+}
+
+int
+erts_has_time_correction(void)
+{
+ return time_sup.r.o.correction;
+}
+
+void erts_init_sys_time_sup(void)
+{
+ ErtsSysInitTimeResult sys_init_time_res
+ = ERTS_SYS_INIT_TIME_RESULT_INITER;
+
+ sys_init_time(&sys_init_time_res);
+
+ erts_time_sup__.r.o.monotonic_time_unit
+ = sys_init_time_res.os_monotonic_time_unit;
+
+#ifndef SYS_CLOCK_RESOLUTION
+ erts_time_sup__.r.o.clktck_resolution
+ = sys_init_time_res.sys_clock_resolution;
+ erts_time_sup__.r.o.clktck_resolution *= 1000;
+#endif
+
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ time_sup.r.o.os_monotonic_time_disable
+ = !sys_init_time_res.have_os_monotonic_time;
+ time_sup.r.o.os_corrected_monotonic_time =
+ sys_init_time_res.have_corrected_os_monotonic_time;
+ time_sup.r.o.os_monotonic_time_func
+ = sys_init_time_res.os_monotonic_time_info.func;
+ time_sup.r.o.os_monotonic_time_clock_id
+ = sys_init_time_res.os_monotonic_time_info.clock_id;
+ time_sup.r.o.os_monotonic_time_locked
+ = sys_init_time_res.os_monotonic_time_info.locked_use;
+ time_sup.r.o.os_monotonic_time_resolution
+ = sys_init_time_res.os_monotonic_time_info.resolution;
+ time_sup.r.o.os_monotonic_time_extended
+ = sys_init_time_res.os_monotonic_time_info.extended;
+#endif
+ time_sup.r.o.os_system_time_func
+ = sys_init_time_res.os_system_time_info.func;
+ time_sup.r.o.os_system_time_clock_id
+ = sys_init_time_res.os_system_time_info.clock_id;
+ time_sup.r.o.os_system_time_locked
+ = sys_init_time_res.os_system_time_info.locked_use;
+ time_sup.r.o.os_system_time_resolution
+ = sys_init_time_res.os_system_time_info.resolution;
+}
+
+int
+erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode)
+{
+ ErtsMonotonicTime resolution, ilength, intervals, short_isecs;
+#if !ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT
+ ErtsMonotonicTime abs_native_offset, native_offset;
+#endif
+
+ init_time_napi();
+
+ erts_hl_timer_init();
+
+ ASSERT(ERTS_MONOTONIC_TIME_MIN < ERTS_MONOTONIC_TIME_MAX);
+
+ erts_mtx_init(&erts_get_time_mtx, "get_time", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+ erts_mtx_init(&runtime_prev.data.mtx, "runtime", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+ runtime_prev.data.user = 0;
+ runtime_prev.data.sys = 0;
+
+ time_sup.r.o.correction = time_correction;
+ time_sup.r.o.warp_mode = time_warp_mode;
+
+ if (time_warp_mode == ERTS_SINGLE_TIME_WARP_MODE)
+ erts_atomic32_init_nob(&time_sup.inf.c.preliminary_offset, 1);
+ else
+ erts_atomic32_init_nob(&time_sup.inf.c.preliminary_offset, 0);
+ time_sup.inf.c.shadow_offset = 0;
+
+#if !ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT
/*
- * We will adjust the time in milliseconds and we allow for 1%
- * adjustments, but if this function is called more often then every 100
- * millisecond (which is obviously possible), we will never adjust, so
- * we accumulate small times by setting last_ct_diff iff max_adjust > 0
+ * NOTE! erts_time_sup__.r.o.start *need* to be a multiple
+ * of ERTS_MONOTONIC_TIME_UNIT.
*/
- if ((max_adjust = (ct_diff - last_ct_diff)/100) > 0)
- last_ct_diff = ct_diff;
- tv_diff = ((((Milli) current_tv.tv_sec) * 1000) +
- (current_tv.tv_usec / 1000)) - init_tv_m;
+#ifdef ARCH_32
+ erts_time_sup__.r.o.start = ((((ErtsMonotonicTime) 1) << 32)-1);
+ erts_time_sup__.r.o.start /= ERTS_MONOTONIC_TIME_UNIT;
+ erts_time_sup__.r.o.start *= ERTS_MONOTONIC_TIME_UNIT;
+ erts_time_sup__.r.o.start += ERTS_MONOTONIC_TIME_UNIT;
+ native_offset = erts_time_sup__.r.o.start - ERTS_MONOTONIC_BEGIN;
+ abs_native_offset = native_offset;
+#else /* ARCH_64 */
+ if (ERTS_MONOTONIC_TIME_UNIT <= 10*1000*1000) {
+ erts_time_sup__.r.o.start = 0;
+ native_offset = -ERTS_MONOTONIC_BEGIN;
+ abs_native_offset = ERTS_MONOTONIC_BEGIN;
+ }
+ else {
+ erts_time_sup__.r.o.start = ((ErtsMonotonicTime) MIN_SMALL);
+ erts_time_sup__.r.o.start /= ERTS_MONOTONIC_TIME_UNIT;
+ erts_time_sup__.r.o.start *= ERTS_MONOTONIC_TIME_UNIT;
+ native_offset = erts_time_sup__.r.o.start - ERTS_MONOTONIC_BEGIN;
+ abs_native_offset = -1*native_offset;
+ }
+#endif
- current_correction = ((ct_diff - tv_diff) / TICK_MS) * TICK_MS; /* trunc */
+ erts_time_sup__.r.o.start_offset.native = native_offset;
+ erts_time_sup__.r.o.start_offset.nsec = (ErtsMonotonicTime)
+ erts_time_unit_conversion((Uint64) abs_native_offset,
+ (Uint32) ERTS_MONOTONIC_TIME_UNIT,
+ (Uint32) 1000*1000*1000);
+ erts_time_sup__.r.o.start_offset.usec = (ErtsMonotonicTime)
+ erts_time_unit_conversion((Uint64) abs_native_offset,
+ (Uint32) ERTS_MONOTONIC_TIME_UNIT,
+ (Uint32) 1000*1000);
+ erts_time_sup__.r.o.start_offset.msec = (ErtsMonotonicTime)
+ erts_time_unit_conversion((Uint64) abs_native_offset,
+ (Uint32) ERTS_MONOTONIC_TIME_UNIT,
+ (Uint32) 1000);
+ erts_time_sup__.r.o.start_offset.sec = (ErtsMonotonicTime)
+ erts_time_unit_conversion((Uint64) abs_native_offset,
+ (Uint32) ERTS_MONOTONIC_TIME_UNIT,
+ (Uint32) 1);
+ if (native_offset < 0) {
+ erts_time_sup__.r.o.start_offset.nsec *= -1;
+ erts_time_sup__.r.o.start_offset.usec *= -1;
+ erts_time_sup__.r.o.start_offset.msec *= -1;
+ erts_time_sup__.r.o.start_offset.sec *= -1;
+ }
- /*
- * We allow the current_correction value to wobble a little, as it
- * suffers from the low resolution of the kernel ticks.
- * if it hasn't changed more than one tick in either direction,
- * we will keep the old value.
- */
- if ((last_cc > current_correction + TICK_MS) ||
- (last_cc < current_correction - TICK_MS)) {
- last_cc = current_correction;
- } else {
- current_correction = last_cc;
+#endif
+
+ resolution = time_sup.r.o.os_system_time_resolution;
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ if (resolution > time_sup.r.o.os_monotonic_time_resolution)
+ resolution = time_sup.r.o.os_monotonic_time_resolution;
+#endif
+
+ time_sup.r.o.adj.large_diff = erts_time_sup__.r.o.monotonic_time_unit;
+ time_sup.r.o.adj.large_diff *= 50;
+ time_sup.r.o.adj.large_diff /= resolution;
+ if (time_sup.r.o.adj.large_diff < ERTS_USEC_TO_MONOTONIC(500))
+ time_sup.r.o.adj.large_diff = ERTS_USEC_TO_MONOTONIC(500);
+ time_sup.r.o.adj.small_diff = time_sup.r.o.adj.large_diff/10;
+
+ time_sup.r.o.drift_adj.resolution = resolution;
+
+ if (time_sup.r.o.os_corrected_monotonic_time) {
+ time_sup.r.o.drift_adj.use_avg = 0;
+ time_sup.r.o.drift_adj.intervals = 0;
+ time_sup.r.o.drift_adj.error = 0;
+ time_sup.inf.c.parmon.cdata.drift.dirty_counter = -1;
}
-
- /*
- * As time goes, we try to get the actual correction to 0,
- * that is, make erlangs time correspond to the systems dito.
- * The act correction is what we seem to need (current_correction)
- * minus the correction suppression. The correction supression
- * will change slowly (max 1% of elapsed time) but in millisecond steps.
- */
- act_correction = current_correction - correction_supress;
- if (max_adjust > 0) {
+ else {
/*
- * Here we slowly adjust erlangs time to correspond with the
- * system time by changing the correction_supress variable.
- * It can change max_adjust milliseconds which is 1% of elapsed time
+ * Calculate length of the interval in seconds needed
+ * in order to get an error that is at most 1 micro second.
+ * If this interval is longer than the short time correction
+ * check interval we use the average of all values instead
+ * of the latest value.
*/
- if (act_correction > 0) {
- if (current_correction - correction_supress > max_adjust) {
- correction_supress += max_adjust;
- } else {
- correction_supress = current_correction;
- }
- act_correction = current_correction - correction_supress;
- } else if (act_correction < 0) {
- if (correction_supress - current_correction > max_adjust) {
- correction_supress -= max_adjust;
- } else {
- correction_supress = current_correction;
- }
- act_correction = current_correction - correction_supress;
+ short_isecs = ERTS_MONOTONIC_TO_SEC(ERTS_SHORT_TIME_CORRECTION_CHECK);
+ ilength = ERTS_ASSUMED_PRECISION_DROP * ERTS_MONOTONIC_TIME_UNIT;
+ ilength /= (resolution * ERTS_USEC_TO_MONOTONIC(1));
+ time_sup.r.o.drift_adj.use_avg = ilength > short_isecs;
+
+ if (ilength == 0)
+ intervals = 5;
+ else {
+ intervals = ilength / short_isecs;
+ if (intervals > ERTS_MAX_DRIFT_INTERVALS)
+ intervals = ERTS_MAX_DRIFT_INTERVALS;
+ else if (intervals < 5)
+ intervals = 5;
}
+ time_sup.r.o.drift_adj.intervals = (int) intervals;
+
+ /*
+ * drift_adj.error equals maximum assumed error
+ * over a short time interval. We use this value also
+ * when examining a large interval. In this case the
+ * error will be smaller, but we do not want to
+ * recalculate this over and over again.
+ */
+
+ time_sup.r.o.drift_adj.error = ERTS_MONOTONIC_TIME_UNIT;
+ time_sup.r.o.drift_adj.error *= ERTS_ASSUMED_PRECISION_DROP;
+ time_sup.r.o.drift_adj.error /= resolution * short_isecs;
}
- /*
- * The actual correction will correct the timeval so that system
- * time warps gets smothed down.
- */
- current_tv.tv_sec += act_correction / 1000;
- current_tv.tv_usec += (act_correction % 1000) * 1000;
-
- if (current_tv.tv_usec >= 1000000) {
- ++current_tv.tv_sec ;
- current_tv.tv_usec -= 1000000;
- } else if (current_tv.tv_usec < 0) {
- --current_tv.tv_sec;
- current_tv.tv_usec += 1000000;
+#ifdef ERTS_TIME_CORRECTION_PRINT
+ erts_fprintf(stderr, "resolution = %b64d\n", resolution);
+ erts_fprintf(stderr, "adj large diff = %b64d usec\n",
+ ERTS_MONOTONIC_TO_USEC(time_sup.r.o.adj.large_diff));
+ erts_fprintf(stderr, "adj small diff = %b64d usec\n",
+ ERTS_MONOTONIC_TO_USEC(time_sup.r.o.adj.small_diff));
+ if (!time_sup.r.o.os_corrected_monotonic_time) {
+ erts_fprintf(stderr, "drift intervals = %d\n",
+ time_sup.r.o.drift_adj.intervals);
+ erts_fprintf(stderr, "drift adj error = %b64d usec\n",
+ ERTS_MONOTONIC_TO_USEC(time_sup.r.o.drift_adj.error));
+ erts_fprintf(stderr, "drift adj max diff = %b64d nsec\n",
+ ERTS_MONOTONIC_TO_NSEC(ERTS_TIME_DRIFT_MAX_ADJ_DIFF));
+ erts_fprintf(stderr, "drift adj min diff = %b64d nsec\n",
+ ERTS_MONOTONIC_TO_NSEC(ERTS_TIME_DRIFT_MIN_ADJ_DIFF));
}
- *tvp = current_tv;
-#undef TICK_MS
-}
+#endif
-#endif /* CORRECT_USING_TIMES */
-#endif /* !HAVE_GETHRTIME */
+ if (ERTS_MONOTONIC_TIME_UNIT < ERTS_CLKTCK_RESOLUTION)
+ ERTS_INTERNAL_ERROR("Too small monotonic time time unit");
-/*
-** Why this? Well, most platforms have a constant clock resolution of 1,
-** we dont want the deliver_time/time_remaining routines to waste
-** time dividing and multiplying by/with a variable that's always one.
-** so the return value of sys_init_time is ignored on those platforms.
-*/
-
-#ifndef SYS_CLOCK_RESOLUTION
-static int clock_resolution;
-#define CLOCK_RESOLUTION clock_resolution
+#ifndef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ time_sup.r.o.correction = 0;
#else
-#define CLOCK_RESOLUTION SYS_CLOCK_RESOLUTION
+ if (time_sup.r.o.os_monotonic_time_disable)
+ time_sup.r.o.correction = 0;
+
+ if (time_sup.r.o.correction) {
+ ErtsMonotonicCorrectionData *cdatap;
+ erts_rwmtx_opt_t rwmtx_opts = ERTS_RWMTX_OPT_DEFAULT_INITER;
+ ErtsMonotonicTime offset;
+ erts_os_times(&time_sup.inf.c.minit,
+ &time_sup.inf.c.sinit);
+ time_sup.r.o.moffset = -1*time_sup.inf.c.minit;
+ time_sup.r.o.moffset += ERTS_MONOTONIC_BEGIN;
+ offset = time_sup.inf.c.sinit;
+ offset -= ERTS_MONOTONIC_BEGIN;
+ init_time_offset(offset);
+
+ rwmtx_opts.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
+ rwmtx_opts.lived = ERTS_RWMTX_LONG_LIVED;
+
+ erts_rwmtx_init_opt(&time_sup.inf.c.parmon.rwmtx, &rwmtx_opts,
+ "get_corrected_time", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+
+ cdatap = &time_sup.inf.c.parmon.cdata;
+
+ cdatap->drift.intervals[0].time.sys = time_sup.inf.c.sinit;
+ cdatap->drift.intervals[0].time.mon = time_sup.inf.c.minit;
+ cdatap->insts.curr.correction.drift = 0;
+ cdatap->insts.curr.correction.error = 0;
+ cdatap->insts.curr.erl_mtime = ERTS_MONOTONIC_BEGIN;
+ cdatap->insts.curr.os_mtime = time_sup.inf.c.minit;
+ cdatap->last_check = time_sup.inf.c.minit;
+ cdatap->short_check_interval = ERTS_INIT_SHORT_INTERVAL_COUNTER;
+ cdatap->insts.prev = cdatap->insts.curr;
+
+ if (!time_sup.r.o.os_corrected_monotonic_time)
+ time_sup.r.o.get_time = get_corrected_time;
+ else if (time_sup.r.o.warp_mode == ERTS_MULTI_TIME_WARP_MODE)
+ time_sup.r.o.get_time = get_os_corrected_time;
+ else
+ time_sup.r.o.get_time = get_os_drift_corrected_time;
+ }
+ else
#endif
+ {
+ ErtsMonotonicTime stime, offset;
+ time_sup.r.o.get_time = get_not_corrected_time;
+ stime = time_sup.inf.c.sinit = erts_os_system_time();
+ offset = stime - ERTS_MONOTONIC_BEGIN;
+ time_sup.inf.c.not_corrected_moffset = offset;
+ init_time_offset(offset);
+ time_sup.f.c.last_not_corrected_time = 0;
+ }
-/*
-** The clock resolution should really be the resolution of the
-** time function in use, which on most platforms
-** is 1. On VxWorks the resolution should be
-** the number of ticks per second (or 1, which would work nicely to).
-**
-** Setting lower resolutions is mostly interesting when timers are used
-** instead of something like select.
-*/
+ erts_atomic64_init_nob(&wall_clock_prev.time,
+ (erts_aint64_t) 0);
+
+ erts_atomic64_init_nob(
+ &now_prev.time,
+ (erts_aint64_t) ERTS_MONOTONIC_TO_USEC(get_time_offset()));
+
+
+#ifdef DEBUG
+ time_sup_initialized = 1;
+#endif
-static SysTimeval last_delivered;
+ return ERTS_CLKTCK_RESOLUTION/1000;
+}
-static void init_erts_deliver_time(const SysTimeval *inittv)
+void
+erts_late_init_time_sup(void)
{
- /* We set the initial values for deliver_time here */
- last_delivered = *inittv;
- last_delivered.tv_usec = 1000 * (last_delivered.tv_usec / 1000);
- /* ms resolution */
+ erts_late_sys_init_time();
}
-static void do_erts_deliver_time(const SysTimeval *current)
+void
+erts_sched_init_time_sup(ErtsSchedulerData *esdp)
{
- SysTimeval cur_time;
- erts_time_t elapsed;
-
- /* calculate and deliver appropriate number of ticks */
- cur_time = *current;
- cur_time.tv_usec = 1000 * (cur_time.tv_usec / 1000); /* ms resolution */
- elapsed = (1000 * (cur_time.tv_sec - last_delivered.tv_sec) +
- (cur_time.tv_usec - last_delivered.tv_usec) / 1000) /
- CLOCK_RESOLUTION;
-
- /* Sometimes the time jump backwards,
- resulting in a negative elapsed time. We compensate for
- this by simply pretend as if the time stood still. :) */
-
- if (elapsed > 0) {
+ esdp->timer_wheel = erts_create_timer_wheel(esdp);
+ esdp->next_tmo_ref = erts_get_next_timeout_reference(esdp->timer_wheel);
+ esdp->timer_service = erts_create_timer_service();
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ if (esdp->no == 1) {
+ /* A timer wheel to use must have beeen initialized */
+ if (time_sup.r.o.get_time != get_not_corrected_time)
+ late_init_time_correction(esdp);
+ }
+#endif
+}
- ASSERT(elapsed < ((erts_time_t) ERTS_SHORT_TIME_T_MAX));
+ErtsTimeWarpMode erts_time_warp_mode(void)
+{
+ return time_sup.r.o.warp_mode;
+}
- erts_do_time_add((erts_short_time_t) elapsed);
- last_delivered = cur_time;
+ErtsTimeOffsetState erts_time_offset_state(void)
+{
+ switch (time_sup.r.o.warp_mode) {
+ case ERTS_NO_TIME_WARP_MODE:
+ return ERTS_TIME_OFFSET_FINAL;
+ case ERTS_SINGLE_TIME_WARP_MODE:
+ if (erts_atomic32_read_nob(&time_sup.inf.c.preliminary_offset))
+ return ERTS_TIME_OFFSET_PRELIMINARY;
+ return ERTS_TIME_OFFSET_FINAL;
+ case ERTS_MULTI_TIME_WARP_MODE:
+ return ERTS_TIME_OFFSET_VOLATILE;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid time warp mode");
+ return ERTS_TIME_OFFSET_VOLATILE;
}
}
-int
-erts_init_time_sup(void)
+/*
+ * erts_finalize_time_offset() will only change time offset
+ * the first time it is called when the emulator has been
+ * started in "single time warp" mode. Returns previous
+ * state:
+ * * ERTS_TIME_OFFSET_PRELIMINARY - Finalization performed
+ * * ERTS_TIME_OFFSET_FINAL - Already finialized; nothing changed
+ * * ERTS_TIME_OFFSET_VOLATILE - Not supported, either in
+ * * no correction mode (or multi time warp mode; not yet implemented).
+ */
+
+ErtsTimeOffsetState
+erts_finalize_time_offset(void)
{
- erts_smp_mtx_init(&erts_timeofday_mtx, "timeofday");
+ switch (time_sup.r.o.warp_mode) {
+ case ERTS_NO_TIME_WARP_MODE:
+ return ERTS_TIME_OFFSET_FINAL;
+ case ERTS_MULTI_TIME_WARP_MODE:
+ return ERTS_TIME_OFFSET_VOLATILE;
+ case ERTS_SINGLE_TIME_WARP_MODE: {
+ ErtsTimeOffsetState res = ERTS_TIME_OFFSET_FINAL;
- init_approx_time();
+ erts_mtx_lock(&erts_get_time_mtx);
- last_emu_time.tv_sec = 0;
- last_emu_time.tv_usec = 0;
+ if (erts_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) {
+ ErtsMonotonicTime mtime, new_offset;
-#ifndef SYS_CLOCK_RESOLUTION
- clock_resolution = sys_init_time();
-#else
- (void) sys_init_time();
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ if (!time_sup.r.o.correction)
#endif
- sys_gettimeofday(&inittv);
-
-#ifdef HAVE_GETHRTIME
- sys_init_hrtime();
+ {
+ ErtsMonotonicTime stime = erts_os_system_time();
+
+ mtime = stime - time_sup.inf.c.not_corrected_moffset;
+
+ if (mtime >= time_sup.f.c.last_not_corrected_time) {
+ time_sup.f.c.last_not_corrected_time = mtime;
+ new_offset = time_sup.inf.c.not_corrected_moffset;
+ }
+ else {
+ mtime = time_sup.f.c.last_not_corrected_time;
+
+ ASSERT(time_sup.inf.c.not_corrected_moffset != stime - mtime);
+ new_offset = stime - mtime;
+ time_sup.inf.c.not_corrected_moffset = new_offset;
+ }
+
+ }
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ else {
+ ErtsSystemTime stime;
+ mtime = finalize_corrected_time_offset(&stime);
+ new_offset = stime - mtime;
+ }
#endif
- init_tolerant_timeofday();
+ new_offset = ERTS_MONOTONIC_TO_USEC(new_offset);
+ new_offset = ERTS_USEC_TO_MONOTONIC(new_offset);
+
+ set_time_offset(new_offset);
+ schedule_send_time_offset_changed_notifications(new_offset);
- init_erts_deliver_time(&inittv);
- gtv = inittv;
- then.tv_sec = then.tv_usec = 0;
+ erts_atomic32_set_nob(&time_sup.inf.c.preliminary_offset, 0);
+ res = ERTS_TIME_OFFSET_PRELIMINARY;
+ }
- erts_deliver_time();
+ erts_mtx_unlock(&erts_get_time_mtx);
+
+ return res;
+ }
+ default:
+ ERTS_INTERNAL_ERROR("Invalid time warp mode");
+ return ERTS_TIME_OFFSET_VOLATILE;
+ }
+}
- return CLOCK_RESOLUTION;
-}
/* info functions */
void
-elapsed_time_both(UWord *ms_user, UWord *ms_sys,
- UWord *ms_user_diff, UWord *ms_sys_diff)
+erts_runtime_elapsed_both(ErtsMonotonicTime *ms_user, ErtsMonotonicTime *ms_sys,
+ ErtsMonotonicTime *ms_user_diff, ErtsMonotonicTime *ms_sys_diff)
{
- UWord prev_total_user, prev_total_sys;
- UWord total_user, total_sys;
+ ErtsMonotonicTime prev_user, prev_sys, user, sys;
+
+#ifdef HAVE_GETRUSAGE
+
+ struct rusage now;
+
+ if (getrusage(RUSAGE_SELF, &now) != 0) {
+ erts_exit(ERTS_ABORT_EXIT, "getrusage(RUSAGE_SELF, _) failed: %d\n", errno);
+ return;
+ }
+
+ user = (ErtsMonotonicTime) now.ru_utime.tv_sec;
+ user *= (ErtsMonotonicTime) 1000000;
+ user += (ErtsMonotonicTime) now.ru_utime.tv_usec;
+ user /= (ErtsMonotonicTime) 1000;
+
+ sys = (ErtsMonotonicTime) now.ru_stime.tv_sec;
+ sys *= (ErtsMonotonicTime) 1000000;
+ sys += (ErtsMonotonicTime) now.ru_stime.tv_usec;
+ sys /= (ErtsMonotonicTime) 1000;
+
+#else
+
SysTimes now;
sys_times(&now);
- total_user = (now.tms_utime * 1000) / SYS_CLK_TCK;
- total_sys = (now.tms_stime * 1000) / SYS_CLK_TCK;
+ user = (ErtsMonotonicTime) now.tms_utime;
+ user *= (ErtsMonotonicTime) 1000;
+ user /= (ErtsMonotonicTime) SYS_CLK_TCK;
- if (ms_user != NULL)
- *ms_user = total_user;
- if (ms_sys != NULL)
- *ms_sys = total_sys;
+ sys = (ErtsMonotonicTime) now.tms_stime;
+ sys *= (ErtsMonotonicTime) 1000;
+ sys /= (ErtsMonotonicTime) SYS_CLK_TCK;
- erts_smp_mtx_lock(&erts_timeofday_mtx);
-
- prev_total_user = (t_start.tms_utime * 1000) / SYS_CLK_TCK;
- prev_total_sys = (t_start.tms_stime * 1000) / SYS_CLK_TCK;
- t_start = now;
-
- erts_smp_mtx_unlock(&erts_timeofday_mtx);
+#endif
+
+ if (ms_user)
+ *ms_user = user;
+ if (ms_sys)
+ *ms_sys = sys;
+
+ if (ms_user_diff || ms_sys_diff) {
+
+ erts_mtx_lock(&runtime_prev.data.mtx);
+
+ prev_user = runtime_prev.data.user;
+ prev_sys = runtime_prev.data.sys;
+ runtime_prev.data.user = user;
+ runtime_prev.data.sys = sys;
- if (ms_user_diff != NULL)
- *ms_user_diff = total_user - prev_total_user;
-
- if (ms_sys_diff != NULL)
- *ms_sys_diff = total_sys - prev_total_sys;
+ erts_mtx_unlock(&runtime_prev.data.mtx);
+
+ if (ms_user_diff)
+ *ms_user_diff = user - prev_user;
+ if (ms_sys_diff)
+ *ms_sys_diff = sys - prev_sys;
+ }
}
/* wall clock routines */
void
-wall_clock_elapsed_time_both(UWord *ms_total, UWord *ms_diff)
+erts_wall_clock_elapsed_both(ErtsMonotonicTime *ms_total, ErtsMonotonicTime *ms_diff)
{
- UWord prev_total;
- SysTimeval tv;
+ ErtsMonotonicTime now, elapsed;
- erts_smp_mtx_lock(&erts_timeofday_mtx);
+ now = time_sup.r.o.get_time();
+ update_last_mtime(NULL, now);
- get_tolerant_timeofday(&tv);
+ elapsed = ERTS_MONOTONIC_TO_MSEC(now);
+ elapsed -= ERTS_MONOTONIC_TO_MSEC(ERTS_MONOTONIC_BEGIN);
- *ms_total = 1000 * (tv.tv_sec - inittv.tv_sec) +
- (tv.tv_usec - inittv.tv_usec) / 1000;
+ *ms_total = elapsed;
- prev_total = 1000 * (gtv.tv_sec - inittv.tv_sec) +
- (gtv.tv_usec - inittv.tv_usec) / 1000;
- *ms_diff = *ms_total - prev_total;
- gtv = tv;
+ if (ms_diff) {
+ ErtsMonotonicTime prev;
- /* must sync the machine's idea of time here */
- do_erts_deliver_time(&tv);
+ prev = ((ErtsMonotonicTime)
+ erts_atomic64_xchg_mb(&wall_clock_prev.time,
+ (erts_aint64_t) elapsed));
- erts_smp_mtx_unlock(&erts_timeofday_mtx);
+ *ms_diff = elapsed - prev;
+ }
}
/* get current time */
@@ -680,7 +1562,7 @@ static time_t gregday(int year, int month, int day)
pyear = gyear - 1;
ndays = (pyear/4) - (pyear/100) + (pyear/400) + pyear*365 + 366;
}
- /* number of days in all months preceeding month */
+ /* number of days in all months preceding month */
for (m = 1; m < month; m++)
ndays += mdays[m];
/* Extra day if leap year and March or later */
@@ -890,146 +1772,754 @@ univ_to_local(Sint *year, Sint *month, Sint *day,
return 0;
}
-
/* get a timestamp */
void
get_now(Uint* megasec, Uint* sec, Uint* microsec)
{
- SysTimeval now;
+ ErtsMonotonicTime now_megasec, now_sec, now, prev, mtime, time_offset;
- erts_smp_mtx_lock(&erts_timeofday_mtx);
-
- get_tolerant_timeofday(&now);
- do_erts_deliver_time(&now);
-
- /* Make sure time is later than last */
- if (then.tv_sec > now.tv_sec ||
- (then.tv_sec == now.tv_sec && then.tv_usec >= now.tv_usec)) {
- now = then;
- now.tv_usec++;
- }
- /* Check for carry from above + general reasonability */
- if (now.tv_usec >= 1000000) {
- now.tv_usec = 0;
- now.tv_sec++;
+ mtime = time_sup.r.o.get_time();
+ time_offset = get_time_offset();
+ update_last_mtime(NULL, mtime);
+ now = ERTS_MONOTONIC_TO_USEC(mtime + time_offset);
+
+ /* Make sure now time is later than last time */
+ prev = erts_atomic64_read_nob(&now_prev.time);
+ while (1) {
+ ErtsMonotonicTime act;
+ if (now <= prev)
+ now = prev + 1;
+ act = ((ErtsMonotonicTime)
+ erts_atomic64_cmpxchg_mb(&now_prev.time,
+ (erts_aint64_t) now,
+ (erts_aint64_t) prev));
+ if (act == prev)
+ break;
+ prev = act;
}
- then = now;
-
- erts_smp_mtx_unlock(&erts_timeofday_mtx);
-
- *megasec = (Uint) (now.tv_sec / 1000000);
- *sec = (Uint) (now.tv_sec % 1000000);
- *microsec = (Uint) (now.tv_usec);
- update_approx_time(&now);
+ now_megasec = now / ERTS_MONOTONIC_TIME_TERA;
+ now_sec = now / ERTS_MONOTONIC_TIME_MEGA;
+ *megasec = (Uint) now_megasec;
+ *sec = (Uint) (now_sec - now_megasec*ERTS_MONOTONIC_TIME_MEGA);
+ *microsec = (Uint) (now - now_sec*ERTS_MONOTONIC_TIME_MEGA);
+
+ ASSERT(((ErtsMonotonicTime) *megasec)*ERTS_MONOTONIC_TIME_TERA
+ + ((ErtsMonotonicTime) *sec)*ERTS_MONOTONIC_TIME_MEGA
+ + ((ErtsMonotonicTime) *microsec) == now);
+}
+
+ErtsMonotonicTime
+erts_get_monotonic_time(ErtsSchedulerData *esdp)
+{
+ ErtsMonotonicTime mtime = time_sup.r.o.get_time();
+ update_last_mtime(esdp, mtime);
+ return mtime;
+}
+
+ErtsMonotonicTime
+erts_get_time_offset(void)
+{
+ return get_time_offset();
+}
+
+static ERTS_INLINE void
+make_timestamp_value(Uint* megasec, Uint* sec, Uint* microsec,
+ ErtsMonotonicTime mtime, ErtsMonotonicTime offset)
+{
+ ErtsMonotonicTime stime, as;
+ Uint ms;
+
+ stime = ERTS_MONOTONIC_TO_USEC(mtime + offset);
+
+ as = stime / ERTS_MONOTONIC_TIME_MEGA;
+ *megasec = ms = (Uint) (stime / ERTS_MONOTONIC_TIME_TERA);
+ *sec = (Uint) (as - (((ErtsMonotonicTime) ms)
+ * ERTS_MONOTONIC_TIME_MEGA));
+ *microsec = (Uint) (stime - as*ERTS_MONOTONIC_TIME_MEGA);
+
+ ASSERT(((ErtsMonotonicTime) ms)*ERTS_MONOTONIC_TIME_TERA
+ + ((ErtsMonotonicTime) *sec)*ERTS_MONOTONIC_TIME_MEGA
+ + *microsec == stime);
+}
+
+void
+erts_make_timestamp_value(Uint* megasec, Uint* sec, Uint* microsec,
+ ErtsMonotonicTime mtime, ErtsMonotonicTime offset)
+{
+ make_timestamp_value(megasec, sec, microsec, mtime, offset);
}
void
get_sys_now(Uint* megasec, Uint* sec, Uint* microsec)
{
- SysTimeval now;
-
- sys_gettimeofday(&now);
-
- *megasec = (Uint) (now.tv_sec / 1000000);
- *sec = (Uint) (now.tv_sec % 1000000);
- *microsec = (Uint) (now.tv_usec);
+ ErtsSystemTime stime = erts_os_system_time();
+ ErtsSystemTime ms, s, us;
+
+ us = ERTS_MONOTONIC_TO_USEC(stime);
+ s = us / (1000*1000);
+ ms = s / (1000*1000);
- update_approx_time(&now);
+ *megasec = (Uint) ms;
+ *sec = (Uint) (s - ms*(1000*1000));
+ *microsec = (Uint) (us - s*(1000*1000));
}
+#ifdef HAVE_ERTS_NOW_CPU
+void erts_get_now_cpu(Uint* megasec, Uint* sec, Uint* microsec) {
+ SysCpuTime t;
+ SysTimespec tp;
-/* deliver elapsed *ticks* to the machine - takes a pointer
- to a struct timeval representing current time (to save
- a gettimeofday() where possible) or NULL */
+ sys_get_proc_cputime(t, tp);
+ *microsec = (Uint)(tp.tv_nsec / 1000);
+ t = (tp.tv_sec / 1000000);
+ *megasec = (Uint)(t % 1000000);
+ *sec = (Uint)(tp.tv_sec % 1000000);
+}
+#endif
-void erts_deliver_time(void) {
- SysTimeval now;
-
- erts_smp_mtx_lock(&erts_timeofday_mtx);
-
- get_tolerant_timeofday(&now);
- do_erts_deliver_time(&now);
-
- erts_smp_mtx_unlock(&erts_timeofday_mtx);
+#include "big.h"
- update_approx_time(&now);
+void
+erts_monitor_time_offset(Eterm id, Eterm ref)
+{
+ erts_mtx_lock(&erts_get_time_mtx);
+ erts_add_monitor(&time_offset_monitors, MON_TIME_OFFSET, ref, id, NIL);
+ no_time_offset_monitors++;
+ erts_mtx_unlock(&erts_get_time_mtx);
}
-/* get *real* time (not ticks) remaining until next timeout - if there
- isn't one, give a "long" time, that is guaranteed
- to not cause overflow when we report elapsed time later on */
+int
+erts_demonitor_time_offset(Eterm ref)
+{
+ int res;
+ ErtsMonitor *mon;
+ ASSERT(is_internal_ref(ref));
+ erts_mtx_lock(&erts_get_time_mtx);
+ if (is_internal_ordinary_ref(ref))
+ mon = erts_remove_monitor(&time_offset_monitors, ref);
+ else
+ mon = NULL;
+ if (!mon)
+ res = 0;
+ else {
+ ASSERT(no_time_offset_monitors > 0);
+ no_time_offset_monitors--;
+ res = 1;
+ }
+ erts_mtx_unlock(&erts_get_time_mtx);
+ if (res)
+ erts_destroy_monitor(mon);
+ return res;
+}
+
+typedef struct {
+ Eterm pid;
+ Eterm ref;
+ Eterm heap[ERTS_REF_THING_SIZE];
+} ErtsTimeOffsetMonitorInfo;
-void erts_time_remaining(SysTimeval *rem_time)
+typedef struct {
+ Uint ix;
+ ErtsTimeOffsetMonitorInfo *to_mon_info;
+} ErtsTimeOffsetMonitorContext;
+
+static void
+save_time_offset_monitor(ErtsMonitor *mon, void *vcntxt)
{
- erts_time_t ticks;
- SysTimeval cur_time;
- erts_time_t elapsed;
+ ErtsTimeOffsetMonitorContext *cntxt;
+ Eterm *from_hp, *to_hp;
+ Uint mix;
+ int hix;
- /* erts_next_time() returns no of ticks to next timeout or -1 if none */
+ cntxt = (ErtsTimeOffsetMonitorContext *) vcntxt;
+ mix = (cntxt->ix)++;
+ cntxt->to_mon_info[mix].pid = mon->u.pid;
+ to_hp = &cntxt->to_mon_info[mix].heap[0];
- ticks = (erts_time_t) erts_next_time();
- if (ticks == (erts_time_t) -1) {
- /* timer queue empty */
- /* this will cause at most 100000000 ticks */
- rem_time->tv_sec = 100000;
- rem_time->tv_usec = 0;
- } else {
- /* next timeout after ticks ticks */
- ticks *= CLOCK_RESOLUTION;
-
- erts_smp_mtx_lock(&erts_timeofday_mtx);
-
- get_tolerant_timeofday(&cur_time);
- cur_time.tv_usec = 1000 *
- (cur_time.tv_usec / 1000);/* ms resolution*/
- elapsed = 1000 * (cur_time.tv_sec - last_delivered.tv_sec) +
- (cur_time.tv_usec - last_delivered.tv_usec) / 1000;
-
- erts_smp_mtx_unlock(&erts_timeofday_mtx);
+ ASSERT(is_internal_ordinary_ref(mon->ref));
+ from_hp = internal_ref_val(mon->ref);
+ ASSERT(thing_arityval(*from_hp) + 1 == ERTS_REF_THING_SIZE);
+
+ for (hix = 0; hix < ERTS_REF_THING_SIZE; hix++)
+ to_hp[hix] = from_hp[hix];
+
+ cntxt->to_mon_info[mix].ref
+ = make_internal_ref(&cntxt->to_mon_info[mix].heap[0]);
+
+}
+
+static void
+send_time_offset_changed_notifications(void *new_offsetp)
+{
+ ErtsMonotonicTime new_offset;
+ ErtsTimeOffsetMonitorInfo *to_mon_info = NULL; /* Shut up faulty warning */
+ Uint no_monitors;
+ char *tmp = NULL;
+
+#ifdef ARCH_64
+ new_offset = (ErtsMonotonicTime) new_offsetp;
+#else
+ new_offset = *((ErtsMonotonicTime *) new_offsetp);
+ erts_free(ERTS_ALC_T_NEW_TIME_OFFSET, new_offsetp);
+#endif
+ new_offset -= ERTS_MONOTONIC_OFFSET_NATIVE;
+
+ erts_mtx_lock(&erts_get_time_mtx);
+
+ no_monitors = no_time_offset_monitors;
+ if (no_monitors) {
+ ErtsTimeOffsetMonitorContext cntxt;
+ Uint alloc_sz;
- if (ticks <= elapsed) { /* Ooops, better hurry */
- rem_time->tv_sec = rem_time->tv_usec = 0;
- return;
+ /* Monitor info array size */
+ alloc_sz = no_monitors*sizeof(ErtsTimeOffsetMonitorInfo);
+ /* + template max size */
+ alloc_sz += 6*sizeof(Eterm); /* 5-tuple */
+ alloc_sz += ERTS_MAX_SINT64_HEAP_SIZE*sizeof(Eterm); /* max offset size */
+ tmp = erts_alloc(ERTS_ALC_T_TMP, alloc_sz);
+
+ to_mon_info = (ErtsTimeOffsetMonitorInfo *) tmp;
+ cntxt.ix = 0;
+ cntxt.to_mon_info = to_mon_info;
+
+ erts_doforall_monitors(time_offset_monitors,
+ save_time_offset_monitor,
+ &cntxt);
+
+ ASSERT(cntxt.ix == no_monitors);
+ }
+
+ erts_mtx_unlock(&erts_get_time_mtx);
+
+ if (no_monitors) {
+ Eterm *hp, *patch_refp, new_offset_term, message_template;
+ Uint mix, hsz;
+
+ /* Make message template */
+
+ hp = (Eterm *) (tmp + no_monitors*sizeof(ErtsTimeOffsetMonitorInfo));
+
+ hsz = 6; /* 5-tuple */
+ hsz += ERTS_REF_THING_SIZE;
+ hsz += ERTS_SINT64_HEAP_SIZE(new_offset);
+
+ if (IS_SSMALL(new_offset))
+ new_offset_term = make_small(new_offset);
+ else
+ new_offset_term = erts_sint64_to_big(new_offset, &hp);
+ message_template = TUPLE5(hp,
+ am_CHANGE,
+ THE_NON_VALUE, /* Patch point for ref */
+ am_time_offset,
+ am_clock_service,
+ new_offset_term);
+ patch_refp = &hp[2];
+
+ ASSERT(*patch_refp == THE_NON_VALUE);
+
+ for (mix = 0; mix < no_monitors; mix++) {
+ Process *rp = erts_proc_lookup(to_mon_info[mix].pid);
+ if (rp) {
+ Eterm ref = to_mon_info[mix].ref;
+ ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK;
+ erts_proc_lock(rp, ERTS_PROC_LOCK_LINK);
+ if (erts_lookup_monitor(ERTS_P_MONITORS(rp), ref)) {
+ ErtsMessage *mp;
+ ErlOffHeap *ohp;
+ Eterm message;
+
+ mp = erts_alloc_message_heap(rp, &rp_locks,
+ hsz, &hp, &ohp);
+ *patch_refp = ref;
+ ASSERT(hsz == size_object(message_template));
+ message = copy_struct(message_template, hsz, &hp, ohp);
+ erts_queue_message(rp, rp_locks, mp, message, am_clock_service);
+ }
+ erts_proc_unlock(rp, rp_locks);
+ }
}
- rem_time->tv_sec = (ticks - elapsed) / 1000;
- rem_time->tv_usec = 1000 * ((ticks - elapsed) % 1000);
+
+ erts_free(ERTS_ALC_T_TMP, tmp);
}
}
-void erts_get_timeval(SysTimeval *tv)
+static void
+schedule_send_time_offset_changed_notifications(ErtsMonotonicTime new_offset)
{
- erts_smp_mtx_lock(&erts_timeofday_mtx);
- get_tolerant_timeofday(tv);
- erts_smp_mtx_unlock(&erts_timeofday_mtx);
- update_approx_time(tv);
+#ifdef ARCH_64
+ void *new_offsetp = (void *) new_offset;
+ ASSERT(sizeof(void *) == sizeof(ErtsMonotonicTime));
+#else
+ void *new_offsetp = erts_alloc(ERTS_ALC_T_NEW_TIME_OFFSET,
+ sizeof(ErtsMonotonicTime));
+ *((ErtsMonotonicTime *) new_offsetp) = new_offset;
+#endif
+ erts_schedule_misc_aux_work(1,
+ send_time_offset_changed_notifications,
+ new_offsetp);
}
-erts_time_t
-erts_get_time(void)
+static ERTS_INLINE Eterm
+make_time_val(Process *c_p, ErtsMonotonicTime time_val)
{
- SysTimeval sys_tv;
-
- erts_smp_mtx_lock(&erts_timeofday_mtx);
-
- get_tolerant_timeofday(&sys_tv);
-
- erts_smp_mtx_unlock(&erts_timeofday_mtx);
+ Sint64 val = (Sint64) time_val;
+ Eterm *hp;
+ Uint sz;
- update_approx_time(&sys_tv);
+ if (IS_SSMALL(val))
+ return make_small(val);
- return sys_tv.tv_sec;
+ sz = ERTS_SINT64_HEAP_SIZE(val);
+ hp = HAlloc(c_p, sz);
+ return erts_sint64_to_big(val, &hp);
}
-#ifdef HAVE_ERTS_NOW_CPU
-void erts_get_now_cpu(Uint* megasec, Uint* sec, Uint* microsec) {
- SysCpuTime t;
- SysTimespec tp;
+Eterm
+erts_get_monotonic_start_time(struct process *c_p)
+{
+ return make_time_val(c_p, ERTS_MONOTONIC_TIME_START_EXTERNAL);
+}
- sys_get_proc_cputime(t, tp);
- *microsec = (Uint)(tp.tv_nsec / 1000);
- t = (tp.tv_sec / 1000000);
- *megasec = (Uint)(t % 1000000);
- *sec = (Uint)(tp.tv_sec % 1000000);
+Eterm
+erts_get_monotonic_end_time(struct process *c_p)
+{
+ return make_time_val(c_p, ERTS_MONOTONIC_TIME_END_EXTERNAL);
+}
+
+static Eterm
+bld_monotonic_time_source(Uint **hpp, Uint *szp, Sint64 os_mtime)
+{
+#ifndef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ return NIL;
+#else
+ int i = 0;
+ Eterm k[6];
+ Eterm v[6];
+
+ if (time_sup.r.o.os_monotonic_time_disable)
+ return NIL;
+
+ k[i] = erts_bld_atom(hpp, szp, "function");
+ v[i++] = erts_bld_atom(hpp, szp,
+ time_sup.r.o.os_monotonic_time_func);
+
+ if (time_sup.r.o.os_monotonic_time_clock_id) {
+ k[i] = erts_bld_atom(hpp, szp, "clock_id");
+ v[i++] = erts_bld_atom(hpp, szp,
+ time_sup.r.o.os_monotonic_time_clock_id);
+ }
+
+ k[i] = erts_bld_atom(hpp, szp, "resolution");
+ v[i++] = erts_bld_uint64(hpp, szp,
+ time_sup.r.o.os_monotonic_time_resolution);
+
+ k[i] = erts_bld_atom(hpp, szp, "extended");
+ v[i++] = time_sup.r.o.os_monotonic_time_extended ? am_yes : am_no;
+
+ k[i] = erts_bld_atom(hpp, szp, "parallel");
+ v[i++] = time_sup.r.o.os_monotonic_time_locked ? am_no : am_yes;
+
+ k[i] = erts_bld_atom(hpp, szp, "time");
+ v[i++] = erts_bld_sint64(hpp, szp, os_mtime);
+
+ return erts_bld_2tup_list(hpp, szp, (Sint) i, k, v);
+#endif
}
+
+Eterm
+erts_monotonic_time_source(struct process *c_p)
+{
+ Uint hsz = 0;
+ Eterm *hp = NULL;
+ Sint64 os_mtime = 0;
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ if (!time_sup.r.o.os_monotonic_time_disable)
+ os_mtime = (Sint64) erts_os_monotonic_time();
+#endif
+
+ bld_monotonic_time_source(NULL, &hsz, os_mtime);
+ if (hsz)
+ hp = HAlloc(c_p, hsz);
+ return bld_monotonic_time_source(&hp, NULL, os_mtime);
+}
+
+static Eterm
+bld_system_time_source(Uint **hpp, Uint *szp, Sint64 os_stime)
+{
+ int i = 0;
+ Eterm k[5];
+ Eterm v[5];
+
+ k[i] = erts_bld_atom(hpp, szp, "function");
+ v[i++] = erts_bld_atom(hpp, szp,
+ time_sup.r.o.os_system_time_func);
+
+ if (time_sup.r.o.os_system_time_clock_id) {
+ k[i] = erts_bld_atom(hpp, szp, "clock_id");
+ v[i++] = erts_bld_atom(hpp, szp,
+ time_sup.r.o.os_system_time_clock_id);
+ }
+
+ k[i] = erts_bld_atom(hpp, szp, "resolution");
+ v[i++] = erts_bld_uint64(hpp, szp,
+ time_sup.r.o.os_system_time_resolution);
+
+ k[i] = erts_bld_atom(hpp, szp, "parallel");
+ v[i++] = am_yes;
+
+ k[i] = erts_bld_atom(hpp, szp, "time");
+ v[i++] = erts_bld_sint64(hpp, szp, os_stime);
+
+ return erts_bld_2tup_list(hpp, szp, (Sint) i, k, v);
+}
+
+Eterm
+erts_system_time_source(struct process *c_p)
+{
+ Uint hsz = 0;
+ Eterm *hp = NULL;
+ Sint64 os_stime = (Sint64) erts_os_system_time();
+
+ bld_system_time_source(NULL, &hsz, os_stime);
+ if (hsz)
+ hp = HAlloc(c_p, hsz);
+ return bld_system_time_source(&hp, NULL, os_stime);
+}
+
+
+#include "bif.h"
+
+static ERTS_INLINE Eterm
+time_unit_conversion(Process *c_p, Eterm term, ErtsMonotonicTime val, ErtsMonotonicTime muloff)
+{
+ ErtsMonotonicTime result;
+ BIF_RETTYPE ret;
+
+ if (val < 0)
+ goto trap_to_erlang_code;
+
+ /* Convert to common user specified time units */
+ switch (term) {
+ case am_second:
+ case am_seconds:
+ case make_small(1):
+ result = ERTS_MONOTONIC_TO_SEC(val) + muloff*ERTS_MONOTONIC_OFFSET_SEC;
+ ERTS_BIF_PREP_RET(ret, make_time_val(c_p, result));
+ break;
+ case am_millisecond:
+ case am_milli_seconds:
+ case make_small(1000):
+ result = ERTS_MONOTONIC_TO_MSEC(val) + muloff*ERTS_MONOTONIC_OFFSET_MSEC;
+ ERTS_BIF_PREP_RET(ret, make_time_val(c_p, result));
+ break;
+ case am_microsecond:
+ case am_micro_seconds:
+ case make_small(1000*1000):
+ result = ERTS_MONOTONIC_TO_USEC(val) + muloff*ERTS_MONOTONIC_OFFSET_USEC;
+ ERTS_BIF_PREP_RET(ret, make_time_val(c_p, result));
+ break;
+#ifdef ARCH_64
+ case am_nanosecond:
+ case am_nano_seconds:
+ case make_small(1000*1000*1000):
+ result = ERTS_MONOTONIC_TO_NSEC(val) + muloff*ERTS_MONOTONIC_OFFSET_NSEC;
+ ERTS_BIF_PREP_RET(ret, make_time_val(c_p, result));
+ break;
+#endif
+ default: {
+ Eterm value, native_res;
+#ifndef ARCH_64
+ Sint user_res;
+ if (term == am_nanosecond || term == am_nano_seconds)
+ goto to_nano_seconds;
+ if (term_to_Sint(term, &user_res)) {
+ if (user_res == 1000*1000*1000) {
+ to_nano_seconds:
+ result = (ERTS_MONOTONIC_TO_NSEC(val)
+ + muloff*ERTS_MONOTONIC_OFFSET_NSEC);
+ ERTS_BIF_PREP_RET(ret, make_time_val(c_p, result));
+ break;
+ }
+ if (user_res <= 0)
+ goto badarg;
+ }
+#else
+ if (is_small(term)) {
+ if (signed_val(term) <= 0)
+ goto badarg;
+ }
#endif
+ else if (is_big(term)) {
+ if (big_sign(term))
+ goto badarg;
+ }
+ else {
+ badarg:
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+ break;
+ }
+
+ trap_to_erlang_code:
+ /* Do it in erlang code instead; pass along values to use... */
+ value = make_time_val(c_p, val + muloff*ERTS_MONOTONIC_OFFSET_NATIVE);
+ native_res = make_time_val(c_p, ERTS_MONOTONIC_TIME_UNIT);
+
+ ERTS_BIF_PREP_TRAP3(ret, erts_convert_time_unit_trap, c_p,
+ value, native_res, term);
+
+ break;
+ }
+ }
+
+ return ret;
+}
+
+
+/*
+ * Time Native API (drivers and NIFs)
+ */
+
+#define ERTS_NAPI_TIME_ERROR ((ErtsMonotonicTime) ERTS_NAPI_TIME_ERROR__)
+
+static void
+init_time_napi(void)
+{
+ /* Verify that time native api constants are as expected... */
+
+ ASSERT(sizeof(ErtsMonotonicTime) == sizeof(ErlDrvTime));
+ ASSERT(ERL_DRV_TIME_ERROR == (ErlDrvTime) ERTS_NAPI_TIME_ERROR);
+ ASSERT(ERL_DRV_TIME_ERROR < (ErlDrvTime) 0);
+ ASSERT(ERTS_NAPI_SEC__ == (int) ERL_DRV_SEC);
+ ASSERT(ERTS_NAPI_MSEC__ == (int) ERL_DRV_MSEC);
+ ASSERT(ERTS_NAPI_USEC__ == (int) ERL_DRV_USEC);
+ ASSERT(ERTS_NAPI_NSEC__ == (int) ERL_DRV_NSEC);
+
+ ASSERT(sizeof(ErtsMonotonicTime) == sizeof(ErlNifTime));
+ ASSERT(ERL_NIF_TIME_ERROR == (ErlNifTime) ERTS_NAPI_TIME_ERROR);
+ ASSERT(ERL_NIF_TIME_ERROR < (ErlNifTime) 0);
+ ASSERT(ERTS_NAPI_SEC__ == (int) ERL_NIF_SEC);
+ ASSERT(ERTS_NAPI_MSEC__ == (int) ERL_NIF_MSEC);
+ ASSERT(ERTS_NAPI_USEC__ == (int) ERL_NIF_USEC);
+ ASSERT(ERTS_NAPI_NSEC__ == (int) ERL_NIF_NSEC);
+}
+
+ErtsMonotonicTime
+erts_napi_monotonic_time(int time_unit)
+{
+ ErtsSchedulerData *esdp;
+ ErtsMonotonicTime mtime;
+
+ /* At least for now only allow schedulers to do this... */
+ esdp = erts_get_scheduler_data();
+ if (!esdp)
+ return ERTS_NAPI_TIME_ERROR;
+
+ mtime = time_sup.r.o.get_time();
+ update_last_mtime(esdp, mtime);
+
+ switch (time_unit) {
+ case ERTS_NAPI_SEC__:
+ mtime = ERTS_MONOTONIC_TO_SEC(mtime);
+ mtime += ERTS_MONOTONIC_OFFSET_SEC;
+ break;
+ case ERTS_NAPI_MSEC__:
+ mtime = ERTS_MONOTONIC_TO_MSEC(mtime);
+ mtime += ERTS_MONOTONIC_OFFSET_MSEC;
+ break;
+ case ERTS_NAPI_USEC__:
+ mtime = ERTS_MONOTONIC_TO_USEC(mtime);
+ mtime += ERTS_MONOTONIC_OFFSET_USEC;
+ break;
+ case ERTS_NAPI_NSEC__:
+ mtime = ERTS_MONOTONIC_TO_NSEC(mtime);
+ mtime += ERTS_MONOTONIC_OFFSET_NSEC;
+ break;
+ default:
+ return ERTS_NAPI_TIME_ERROR;
+ }
+
+ return mtime;
+}
+
+ErtsMonotonicTime
+erts_napi_time_offset(int time_unit)
+{
+ ErtsSchedulerData *esdp;
+ ErtsSystemTime offs;
+
+ /* At least for now only allow schedulers to do this... */
+ esdp = erts_get_scheduler_data();
+ if (!esdp)
+ return ERTS_NAPI_TIME_ERROR;
+
+ offs = get_time_offset();
+ switch (time_unit) {
+ case ERTS_NAPI_SEC__:
+ offs = ERTS_MONOTONIC_TO_SEC(offs);
+ offs -= ERTS_MONOTONIC_OFFSET_SEC;
+ break;
+ case ERTS_NAPI_MSEC__:
+ offs = ERTS_MONOTONIC_TO_MSEC(offs);
+ offs -= ERTS_MONOTONIC_OFFSET_MSEC;
+ break;
+ case ERTS_NAPI_USEC__:
+ offs = ERTS_MONOTONIC_TO_USEC(offs);
+ offs -= ERTS_MONOTONIC_OFFSET_USEC;
+ break;
+ case ERTS_NAPI_NSEC__:
+ offs = ERTS_MONOTONIC_TO_NSEC(offs);
+ offs -= ERTS_MONOTONIC_OFFSET_NSEC;
+ break;
+ default:
+ return ERTS_NAPI_TIME_ERROR;
+ }
+ return offs;
+}
+
+ErtsMonotonicTime
+erts_napi_convert_time_unit(ErtsMonotonicTime val, int from, int to)
+{
+ ErtsMonotonicTime ffreq, tfreq, denom;
+ /*
+ * Convertion between time units using floor function.
+ *
+ * Note that this needs to work also for negative
+ * values. Ordinary integer division on a negative
+ * value will give ceiling...
+ */
+
+ switch ((int) from) {
+ case ERTS_NAPI_SEC__: ffreq = 1; break;
+ case ERTS_NAPI_MSEC__: ffreq = 1000; break;
+ case ERTS_NAPI_USEC__: ffreq = 1000*1000; break;
+ case ERTS_NAPI_NSEC__: ffreq = 1000*1000*1000; break;
+ default: return ERTS_NAPI_TIME_ERROR;
+ }
+
+ switch ((int) to) {
+ case ERTS_NAPI_SEC__: tfreq = 1; break;
+ case ERTS_NAPI_MSEC__: tfreq = 1000; break;
+ case ERTS_NAPI_USEC__: tfreq = 1000*1000; break;
+ case ERTS_NAPI_NSEC__: tfreq = 1000*1000*1000; break;
+ default: return ERTS_NAPI_TIME_ERROR;
+ }
+
+ if (tfreq >= ffreq)
+ return val * (tfreq / ffreq);
+
+ denom = ffreq / tfreq;
+ if (val >= 0)
+ return val / denom;
+
+ return (val - (denom - 1)) / denom;
+}
+
+/* Built in functions */
+
+BIF_RETTYPE monotonic_time_0(BIF_ALIST_0)
+{
+ ErtsMonotonicTime mtime = time_sup.r.o.get_time();
+ update_last_mtime(erts_proc_sched_data(BIF_P), mtime);
+ mtime += ERTS_MONOTONIC_OFFSET_NATIVE;
+ BIF_RET(make_time_val(BIF_P, mtime));
+}
+
+BIF_RETTYPE monotonic_time_1(BIF_ALIST_1)
+{
+ ErtsMonotonicTime mtime = time_sup.r.o.get_time();
+ update_last_mtime(erts_proc_sched_data(BIF_P), mtime);
+ BIF_RET(time_unit_conversion(BIF_P, BIF_ARG_1, mtime, 1));
+}
+
+BIF_RETTYPE system_time_0(BIF_ALIST_0)
+{
+ ErtsMonotonicTime mtime, offset;
+ mtime = time_sup.r.o.get_time();
+ offset = get_time_offset();
+ update_last_mtime(erts_proc_sched_data(BIF_P), mtime);
+ BIF_RET(make_time_val(BIF_P, mtime + offset));
+}
+
+BIF_RETTYPE system_time_1(BIF_ALIST_0)
+{
+ ErtsMonotonicTime mtime, offset;
+ mtime = time_sup.r.o.get_time();
+ offset = get_time_offset();
+ update_last_mtime(erts_proc_sched_data(BIF_P), mtime);
+ BIF_RET(time_unit_conversion(BIF_P, BIF_ARG_1, mtime + offset, 0));
+}
+
+BIF_RETTYPE erts_internal_time_unit_0(BIF_ALIST_0)
+{
+ BIF_RET(make_time_val(BIF_P, ERTS_MONOTONIC_TIME_UNIT));
+}
+
+BIF_RETTYPE time_offset_0(BIF_ALIST_0)
+{
+ ErtsMonotonicTime time_offset = get_time_offset();
+ time_offset -= ERTS_MONOTONIC_OFFSET_NATIVE;
+ BIF_RET(make_time_val(BIF_P, time_offset));
+}
+
+BIF_RETTYPE time_offset_1(BIF_ALIST_1)
+{
+ BIF_RET(time_unit_conversion(BIF_P, BIF_ARG_1, get_time_offset(), -1));
+}
+
+
+BIF_RETTYPE timestamp_0(BIF_ALIST_0)
+{
+ Eterm *hp, res;
+ ErtsMonotonicTime mtime, offset;
+ Uint mega_sec, sec, micro_sec;
+
+ mtime = time_sup.r.o.get_time();
+ offset = get_time_offset();
+ update_last_mtime(erts_proc_sched_data(BIF_P), mtime);
+
+ make_timestamp_value(&mega_sec, &sec, &micro_sec, mtime, offset);
+
+ /*
+ * Mega seconds is the only value that potentially
+ * ever could be a bignum. However, that wont happen
+ * during at least the next 4 million years...
+ *
+ * (System time will also have wrapped in the
+ * 64-bit integer before we get there...)
+ */
+
+ ASSERT(IS_USMALL(0, mega_sec));
+ ASSERT(IS_USMALL(0, sec));
+ ASSERT(IS_USMALL(0, micro_sec));
+
+ hp = HAlloc(BIF_P, 4);
+ res = TUPLE3(hp,
+ make_small(mega_sec),
+ make_small(sec),
+ make_small(micro_sec));
+ BIF_RET(res);
+}
+
+BIF_RETTYPE os_system_time_0(BIF_ALIST_0)
+{
+ ErtsSystemTime stime = erts_os_system_time();
+ BIF_RET(make_time_val(BIF_P, stime));
+}
+
+BIF_RETTYPE os_system_time_1(BIF_ALIST_1)
+{
+ ErtsSystemTime stime = erts_os_system_time();
+ BIF_RET(time_unit_conversion(BIF_P, BIF_ARG_1, stime, 0));
+}
+
+BIF_RETTYPE
+os_perf_counter_0(BIF_ALIST_0)
+{
+ BIF_RET(make_time_val(BIF_P, erts_sys_perf_counter()));
+}
+
+BIF_RETTYPE erts_internal_perf_counter_unit_0(BIF_ALIST_0)
+{
+ BIF_RET(make_time_val(BIF_P, erts_sys_perf_counter_unit()));
+}
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 305058ceff..4b996d8fc2 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -1,24 +1,38 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
/*
* Support functions for tracing.
+ *
+ * Ideas for future speed improvements in tracing framework:
+ * * Move ErtsTracerNif into ErtsTracer
+ * + Removes need for locking
+ * + Removes hash lookup overhead
+ * + Use a refc on the ErtsTracerNif to know when it can
+ * be freed. We don't want to allocate a separate
+ * ErtsTracerNif for each module used.
+ * * Optimize GenericBp for cache locality by reusing equivalent
+ * GenericBp and GenericBpData in multiple tracer points.
+ * + Possibly we want to use specialized instructions for different
+ * types of trace so that the knowledge of which struct is used
+ * can be in the instruction.
*/
#ifdef HAVE_CONFIG_H
@@ -37,6 +51,8 @@
#include "erl_binary.h"
#include "erl_bits.h"
#include "erl_thr_progress.h"
+#include "erl_bif_unique.h"
+#include "erl_map.h"
#if 0
#define DEBUG_PRINTOUTS
@@ -44,17 +60,15 @@
#undef DEBUG_PRINTOUTS
#endif
-extern BeamInstr beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
-extern BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */
-extern BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
-
/* Pseudo export entries. Never filled in with data, only used to
yield unique pointers of the correct type. */
Export exp_send, exp_receive, exp_timeout;
-static Eterm system_seq_tracer;
-static Uint default_trace_flags;
-static Eterm default_tracer;
+static ErtsTracer system_seq_tracer;
+static Uint default_proc_trace_flags;
+static ErtsTracer default_proc_tracer;
+static Uint default_port_trace_flags;
+static ErtsTracer default_port_tracer;
static Eterm system_monitor;
static Eterm system_profile;
@@ -63,20 +77,230 @@ static Eterm system_profile;
int erts_cpu_timestamp;
#endif
-static erts_smp_mtx_t smq_mtx;
-static erts_smp_rwmtx_t sys_trace_rwmtx;
+static erts_mtx_t smq_mtx;
+static erts_rwmtx_t sys_trace_rwmtx;
enum ErtsSysMsgType {
SYS_MSG_TYPE_UNDEFINED,
- SYS_MSG_TYPE_TRACE,
- SYS_MSG_TYPE_SEQTRACE,
SYS_MSG_TYPE_SYSMON,
SYS_MSG_TYPE_ERRLGR,
SYS_MSG_TYPE_PROC_MSG,
SYS_MSG_TYPE_SYSPROF
};
-#ifdef ERTS_SMP
+#define ERTS_TRACE_TS_NOW_MAX_SIZE \
+ 4
+#define ERTS_TRACE_TS_MONOTONIC_MAX_SIZE \
+ ERTS_MAX_SINT64_HEAP_SIZE
+#define ERTS_TRACE_TS_STRICT_MONOTONIC_MAX_SIZE \
+ (3 + ERTS_MAX_SINT64_HEAP_SIZE \
+ + ERTS_MAX_UINT64_HEAP_SIZE)
+
+#define ERTS_TRACE_PATCH_TS_MAX_SIZE \
+ (1 + ((ERTS_TRACE_TS_NOW_MAX_SIZE \
+ > ERTS_TRACE_TS_MONOTONIC_MAX_SIZE) \
+ ? ((ERTS_TRACE_TS_NOW_MAX_SIZE \
+ > ERTS_TRACE_TS_STRICT_MONOTONIC_MAX_SIZE) \
+ ? ERTS_TRACE_TS_NOW_MAX_SIZE \
+ : ERTS_TRACE_TS_STRICT_MONOTONIC_MAX_SIZE) \
+ : ((ERTS_TRACE_TS_MONOTONIC_MAX_SIZE \
+ > ERTS_TRACE_TS_STRICT_MONOTONIC_MAX_SIZE) \
+ ? ERTS_TRACE_TS_MONOTONIC_MAX_SIZE \
+ : ERTS_TRACE_TS_STRICT_MONOTONIC_MAX_SIZE)))
+
+#define TFLGS_TS_TYPE(p) ERTS_TFLGS2TSTYPE(ERTS_TRACE_FLAGS((p)))
+
+/*
+ * FUTURE CHANGES:
+ *
+ * The timestamp functionality has intentionally been
+ * split in two parts for future use even though it
+ * is not used like this today. take_timestamp() takes
+ * the timestamp and calculate heap need for it (which
+ * is not constant). write_timestamp() writes the
+ * timestamp to the allocated heap. That is, one typically
+ * want to take the timestamp before allocating the heap
+ * and then write it to the heap.
+ *
+ * The trace output functionality now use patch_ts_size(),
+ * write_ts(), and patch_ts(). write_ts() both takes the
+ * timestamp and writes it. Since we don't know the
+ * heap need when allocating the heap area we need to
+ * over allocate (maximum size from patch_ts_size()) and
+ * then potentially (often) shrink the heap area after the
+ * timestamp has been written. The only reason it is
+ * currently done this way is because we do not want to
+ * make major changes of the trace behavior in a patch.
+ * This is planned to be changed in next major release.
+ */
+
+typedef struct {
+ int ts_type_flag;
+ union {
+ struct {
+ Uint ms;
+ Uint s;
+ Uint us;
+ } now;
+ struct {
+ ErtsMonotonicTime time;
+ Sint64 raw_unique;
+ } monotonic;
+ } u;
+} ErtsTraceTimeStamp;
+
+static ERTS_INLINE Uint
+take_timestamp(ErtsTraceTimeStamp *tsp, int ts_type)
+{
+ int ts_type_flag = ts_type & -ts_type; /* least significant flag */
+
+ ASSERT(ts_type_flag == ERTS_TRACE_FLG_NOW_TIMESTAMP
+ || ts_type_flag == ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP
+ || ts_type_flag == ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP
+ || ts_type_flag == 0);
+
+ tsp->ts_type_flag = ts_type_flag;
+ switch (ts_type_flag) {
+ case 0:
+ return (Uint) 0;
+ case ERTS_TRACE_FLG_NOW_TIMESTAMP:
+#ifdef HAVE_ERTS_NOW_CPU
+ if (erts_cpu_timestamp)
+ erts_get_now_cpu(&tsp->u.now.ms, &tsp->u.now.s, &tsp->u.now.us);
+ else
+#endif
+ get_now(&tsp->u.now.ms, &tsp->u.now.s, &tsp->u.now.us);
+ return (Uint) 4;
+ case ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP:
+ case ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP: {
+ Uint hsz = 0;
+ ErtsMonotonicTime mtime = erts_get_monotonic_time(NULL);
+ mtime = ERTS_MONOTONIC_TO_NSEC(mtime);
+ mtime += ERTS_MONOTONIC_OFFSET_NSEC;
+ hsz = (IS_SSMALL(mtime) ?
+ (Uint) 0
+ : ERTS_SINT64_HEAP_SIZE((Sint64) mtime));
+ tsp->u.monotonic.time = mtime;
+ if (ts_type_flag == ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP) {
+ Sint64 raw_unique;
+ hsz += 3; /* 2-tuple */
+ raw_unique = erts_raw_get_unique_monotonic_integer();
+ tsp->u.monotonic.raw_unique = raw_unique;
+ hsz += erts_raw_unique_monotonic_integer_heap_size(raw_unique, 0);
+ }
+ return hsz;
+ }
+ default:
+ ERTS_INTERNAL_ERROR("invalid timestamp type");
+ return 0;
+ }
+}
+
+static ERTS_INLINE Eterm
+write_timestamp(ErtsTraceTimeStamp *tsp, Eterm **hpp)
+{
+ int ts_type_flag = tsp->ts_type_flag;
+ Eterm res;
+
+ switch (ts_type_flag) {
+ case 0:
+ return NIL;
+ case ERTS_TRACE_FLG_NOW_TIMESTAMP:
+ res = TUPLE3(*hpp,
+ make_small(tsp->u.now.ms),
+ make_small(tsp->u.now.s),
+ make_small(tsp->u.now.us));
+ *hpp += 4;
+ return res;
+ case ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP:
+ case ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP: {
+ Sint64 mtime, raw;
+ Eterm unique, emtime;
+
+ mtime = (Sint64) tsp->u.monotonic.time;
+ emtime = (IS_SSMALL(mtime)
+ ? make_small((Sint64) mtime)
+ : erts_sint64_to_big((Sint64) mtime, hpp));
+
+ if (ts_type_flag == ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP)
+ return emtime;
+
+ raw = tsp->u.monotonic.raw_unique;
+ unique = erts_raw_make_unique_monotonic_integer_value(hpp, raw, 0);
+ res = TUPLE2(*hpp, emtime, unique);
+ *hpp += 3;
+ return res;
+ }
+ default:
+ ERTS_INTERNAL_ERROR("invalid timestamp type");
+ return THE_NON_VALUE;
+ }
+}
+
+
+static ERTS_INLINE Uint
+patch_ts_size(int ts_type)
+{
+ int ts_type_flag = ts_type & -ts_type; /* least significant flag */
+ switch (ts_type_flag) {
+ case 0:
+ return 0;
+ case ERTS_TRACE_FLG_NOW_TIMESTAMP:
+ return 1 + ERTS_TRACE_TS_NOW_MAX_SIZE;
+ case ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP:
+ return 1 + ERTS_TRACE_TS_MONOTONIC_MAX_SIZE;
+ case ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP:
+ return 1 + ERTS_TRACE_TS_STRICT_MONOTONIC_MAX_SIZE;
+ default:
+ ERTS_INTERNAL_ERROR("invalid timestamp type");
+ return 0;
+ }
+}
+
+/*
+ * Write a timestamp. The timestamp MUST be the last
+ * thing built on the heap. This since write_ts() might
+ * adjust the size of the used area.
+ */
+static Eterm
+write_ts(int ts_type, Eterm *hp, ErlHeapFragment *bp, Process *tracer)
+{
+ ErtsTraceTimeStamp ts;
+ Sint shrink;
+ Eterm res, *ts_hp = hp;
+ Uint hsz;
+
+ ASSERT(ts_type);
+
+ hsz = take_timestamp(&ts, ts_type);
+
+ res = write_timestamp(&ts, &ts_hp);
+
+ ASSERT(ts_hp == hp + hsz);
+
+ switch (ts.ts_type_flag) {
+ case ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP:
+ shrink = ERTS_TRACE_TS_MONOTONIC_MAX_SIZE;
+ break;
+ case ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP:
+ shrink = ERTS_TRACE_TS_STRICT_MONOTONIC_MAX_SIZE;
+ break;
+ default:
+ return res;
+ }
+
+ shrink -= hsz;
+
+ ASSERT(shrink >= 0);
+
+ if (shrink) {
+ if (bp)
+ bp->used_size -= shrink;
+ }
+
+ return res;
+}
+
static void enqueue_sys_msg_unlocked(enum ErtsSysMsgType type,
Eterm from,
Eterm to,
@@ -88,14 +312,22 @@ static void enqueue_sys_msg(enum ErtsSysMsgType type,
Eterm msg,
ErlHeapFragment *bp);
static void init_sys_msg_dispatcher(void);
-#endif
+
+static void init_tracer_nif(void);
+static int tracer_cmp_fun(void*, void*);
+static HashValue tracer_hash_fun(void*);
+static void *tracer_alloc_fun(void*);
+static void tracer_free_fun(void*);
+
+typedef struct ErtsTracerNif_ ErtsTracerNif;
void erts_init_trace(void) {
- erts_smp_rwmtx_opt_t rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
- rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
- rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+ erts_rwmtx_opt_t rwmtx_opts = ERTS_RWMTX_OPT_DEFAULT_INITER;
+ rwmtx_opts.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
+ rwmtx_opts.lived = ERTS_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&sys_trace_rwmtx, &rwmtx_opts, "sys_tracers");
+ erts_rwmtx_init_opt(&sys_trace_rwmtx, &rwmtx_opts, "sys_tracers", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
#ifdef HAVE_ERTS_NOW_CPU
erts_cpu_timestamp = 0;
@@ -103,269 +335,293 @@ void erts_init_trace(void) {
erts_bif_trace_init();
erts_system_monitor_clear(NULL);
erts_system_profile_clear(NULL);
- default_trace_flags = F_INITIAL_TRACE_FLAGS;
- default_tracer = NIL;
- system_seq_tracer = am_false;
-#ifdef ERTS_SMP
+ default_proc_trace_flags = F_INITIAL_TRACE_FLAGS;
+ default_proc_tracer = erts_tracer_nil;
+ default_port_trace_flags = F_INITIAL_TRACE_FLAGS;
+ default_port_tracer = erts_tracer_nil;
+ system_seq_tracer = erts_tracer_nil;
init_sys_msg_dispatcher();
-#endif
+ init_tracer_nif();
}
-static Eterm system_seq_tracer;
-
-#ifdef ERTS_SMP
#define ERTS_ALLOC_SYSMSG_HEAP(SZ, BPP, OHPP, UNUSED) \
(*(BPP) = new_message_buffer((SZ)), \
*(OHPP) = &(*(BPP))->off_heap, \
(*(BPP))->mem)
-#else
-#define ERTS_ALLOC_SYSMSG_HEAP(SZ, BPP, OHPP, RPP) \
- erts_alloc_message_heap((SZ), (BPP), (OHPP), (RPP), 0)
-#endif
-#ifdef ERTS_SMP
-#define ERTS_ENQ_TRACE_MSG(FPID, TPID, MSG, BP) \
-do { \
- ERTS_LC_ASSERT(erts_smp_lc_mtx_is_locked(&smq_mtx)); \
- enqueue_sys_msg_unlocked(SYS_MSG_TYPE_TRACE, (FPID), (TPID), (MSG), (BP)); \
-} while(0)
-#else
-#ifdef USE_VM_PROBES
-#define ERTS_ENQ_TRACE_MSG(FPID, TPROC, MSG, BP) \
- erts_queue_message((TPROC), NULL, (BP), (MSG), NIL, NIL)
-#else
-#define ERTS_ENQ_TRACE_MSG(FPID, TPROC, MSG, BP) \
- erts_queue_message((TPROC), NULL, (BP), (MSG), NIL)
-#endif
-#endif
+enum ErtsTracerOpt {
+ TRACE_FUN_DEFAULT = 0,
+ TRACE_FUN_ENABLED = 1,
+ TRACE_FUN_T_SEND = 2,
+ TRACE_FUN_T_RECEIVE = 3,
+ TRACE_FUN_T_CALL = 4,
+ TRACE_FUN_T_SCHED_PROC = 5,
+ TRACE_FUN_T_SCHED_PORT = 6,
+ TRACE_FUN_T_GC = 7,
+ TRACE_FUN_T_PROCS = 8,
+ TRACE_FUN_T_PORTS = 9,
+ TRACE_FUN_E_SEND = 10,
+ TRACE_FUN_E_RECEIVE = 11,
+ TRACE_FUN_E_CALL = 12,
+ TRACE_FUN_E_SCHED_PROC = 13,
+ TRACE_FUN_E_SCHED_PORT = 14,
+ TRACE_FUN_E_GC = 15,
+ TRACE_FUN_E_PROCS = 16,
+ TRACE_FUN_E_PORTS = 17
+};
-/*
- * NOTE that the ERTS_GET_TRACER_REF() returns from the function (!!!)
- * using it, and resets the parameters used if the tracer is invalid, i.e.,
- * use it with extreme care!
- */
-#ifdef ERTS_SMP
-#define ERTS_NULL_TRACER_REF NIL
-#define ERTS_TRACER_REF_TYPE Eterm
- /* In the smp case, we never find the tracer invalid here (the sys
- message dispatcher thread takes care of that). */
-#define ERTS_GET_TRACER_REF(RES, TPID, TRACEE_FLGS) \
-do { (RES) = (TPID); } while(0)
-int
-erts_is_tracer_proc_valid(Process* p)
-{
- return 1;
-}
-#else
-#define ERTS_NULL_TRACER_REF NULL
-#define ERTS_TRACER_REF_TYPE Process *
-#define ERTS_GET_TRACER_REF(RES, TPID, TRACEE_FLGS) \
-do { \
- (RES) = erts_proc_lookup((TPID)); \
- if (!(RES) || !(ERTS_TRACE_FLAGS((RES)) & F_TRACER)) { \
- (TPID) = NIL; \
- (TRACEE_FLGS) &= ~TRACEE_FLAGS; \
- return; \
- } \
-} while (0)
-int
-erts_is_tracer_proc_valid(Process* p)
-{
- Process* tracer;
+#define NIF_TRACER_TYPES (18)
- tracer = erts_proc_lookup(ERTS_TRACER_PROC(p));
- if (tracer && ERTS_TRACE_FLAGS(tracer) & F_TRACER) {
- return 1;
- } else {
- ERTS_TRACER_PROC(p) = NIL;
- ERTS_TRACE_FLAGS(p) = ~TRACEE_FLAGS;
- return 0;
- }
-}
-#endif
+
+static ERTS_INLINE int
+send_to_tracer_nif_raw(Process *c_p, Process *tracee, const ErtsTracer tracer,
+ Uint trace_flags, Eterm t_p_id, ErtsTracerNif *tnif,
+ enum ErtsTracerOpt topt,
+ Eterm tag, Eterm msg, Eterm extra, Eterm pam_result);
+static ERTS_INLINE int
+send_to_tracer_nif(Process *c_p, ErtsPTabElementCommon *t_p,
+ Eterm t_p_id, ErtsTracerNif *tnif,
+ enum ErtsTracerOpt topt,
+ Eterm tag, Eterm msg, Eterm extra,
+ Eterm pam_result);
+static ERTS_INLINE Eterm
+call_enabled_tracer(const ErtsTracer tracer,
+ ErtsTracerNif **tnif_ref,
+ enum ErtsTracerOpt topt,
+ Eterm tag, Eterm t_p_id);
+static int
+is_tracer_enabled(Process* c_p, ErtsProcLocks c_p_locks,
+ ErtsPTabElementCommon *t_p,
+ ErtsTracerNif **tnif_ret,
+ enum ErtsTracerOpt topt, Eterm tag);
static Uint active_sched;
void
erts_system_profile_setup_active_schedulers(void)
{
- ERTS_SMP_LC_ASSERT(erts_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
active_sched = erts_active_schedulers();
}
static void
exiting_reset(Eterm exiting)
{
- erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
- if (exiting == default_tracer) {
- default_tracer = NIL;
- default_trace_flags &= TRACEE_FLAGS;
-#ifdef DEBUG
- default_trace_flags |= F_INITIAL_TRACE_FLAGS;
-#endif
- }
- if (exiting == system_seq_tracer) {
-#ifdef DEBUG_PRINTOUTS
- erts_fprintf(stderr, "seq tracer %T exited\n", exiting);
-#endif
- system_seq_tracer = am_false;
- }
+ erts_rwmtx_rwlock(&sys_trace_rwmtx);
if (exiting == system_monitor) {
-#ifdef ERTS_SMP
system_monitor = NIL;
/* Let the trace message dispatcher clear flags, etc */
-#else
- erts_system_monitor_clear(NULL);
-#endif
}
if (exiting == system_profile) {
-#ifdef ERTS_SMP
system_profile = NIL;
/* Let the trace message dispatcher clear flags, etc */
-#else
- erts_system_profile_clear(NULL);
-#endif
}
- erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx);
+ erts_rwmtx_rwunlock(&sys_trace_rwmtx);
}
void
erts_trace_check_exiting(Eterm exiting)
{
int reset = 0;
- erts_smp_rwmtx_rlock(&sys_trace_rwmtx);
- if (exiting == default_tracer)
- reset = 1;
- else if (exiting == system_seq_tracer)
- reset = 1;
- else if (exiting == system_monitor)
+ erts_rwmtx_rlock(&sys_trace_rwmtx);
+ if (exiting == system_monitor)
reset = 1;
else if (exiting == system_profile)
reset = 1;
- erts_smp_rwmtx_runlock(&sys_trace_rwmtx);
+ erts_rwmtx_runlock(&sys_trace_rwmtx);
if (reset)
exiting_reset(exiting);
}
-static ERTS_INLINE int
-is_valid_tracer(Eterm tracer)
-{
- return erts_proc_lookup(tracer) || erts_is_valid_tracer_port(tracer);
-}
-
-Eterm
-erts_set_system_seq_tracer(Process *c_p, ErtsProcLocks c_p_locks, Eterm new)
+ErtsTracer
+erts_set_system_seq_tracer(Process *c_p, ErtsProcLocks c_p_locks, ErtsTracer new)
{
- Eterm old;
+ ErtsTracer old;
- if (new != am_false && !is_valid_tracer(new))
- return THE_NON_VALUE;
+ if (!ERTS_TRACER_IS_NIL(new)) {
+ Eterm nif_result = call_enabled_tracer(
+ new, NULL, TRACE_FUN_ENABLED, am_trace_status, am_undefined);
+ switch (nif_result) {
+ case am_trace: break;
+ default:
+ return THE_NON_VALUE;
+ }
+ }
- erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
+ erts_rwmtx_rwlock(&sys_trace_rwmtx);
old = system_seq_tracer;
- system_seq_tracer = new;
+ system_seq_tracer = erts_tracer_nil;
+ erts_tracer_update(&system_seq_tracer, new);
#ifdef DEBUG_PRINTOUTS
erts_fprintf(stderr, "set seq tracer new=%T old=%T\n", new, old);
#endif
- erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx);
+ erts_rwmtx_rwunlock(&sys_trace_rwmtx);
return old;
}
-Eterm
+ErtsTracer
erts_get_system_seq_tracer(void)
{
- Eterm st;
- erts_smp_rwmtx_rlock(&sys_trace_rwmtx);
+ ErtsTracer st;
+ erts_rwmtx_rlock(&sys_trace_rwmtx);
st = system_seq_tracer;
#ifdef DEBUG_PRINTOUTS
erts_fprintf(stderr, "get seq tracer %T\n", st);
#endif
- erts_smp_rwmtx_runlock(&sys_trace_rwmtx);
+ erts_rwmtx_runlock(&sys_trace_rwmtx);
+
+ if (st != erts_tracer_nil &&
+ call_enabled_tracer(st, NULL, TRACE_FUN_ENABLED,
+ am_trace_status, am_undefined) == am_remove) {
+ st = erts_set_system_seq_tracer(NULL, 0, erts_tracer_nil);
+ ERTS_TRACER_CLEAR(&st);
+ }
+
return st;
}
static ERTS_INLINE void
-get_default_tracing(Uint *flagsp, Eterm *tracerp)
-{
- if (!(default_trace_flags & TRACEE_FLAGS))
- default_tracer = NIL;
-
- if (is_nil(default_tracer)) {
- default_trace_flags &= ~TRACEE_FLAGS;
- } else if (is_internal_pid(default_tracer)) {
- if (!erts_proc_lookup(default_tracer)) {
- reset_tracer:
- default_trace_flags &= ~TRACEE_FLAGS;
- default_tracer = NIL;
- }
+get_default_tracing(Uint *flagsp, ErtsTracer *tracerp,
+ Uint *default_trace_flags,
+ ErtsTracer *default_tracer)
+{
+ if (!(*default_trace_flags & TRACEE_FLAGS))
+ ERTS_TRACER_CLEAR(default_tracer);
+
+ if (ERTS_TRACER_IS_NIL(*default_tracer)) {
+ *default_trace_flags &= ~TRACEE_FLAGS;
} else {
- ASSERT(is_internal_port(default_tracer));
- if (!erts_is_valid_tracer_port(default_tracer))
- goto reset_tracer;
+ Eterm nif_res;
+ nif_res = call_enabled_tracer(*default_tracer,
+ NULL, TRACE_FUN_ENABLED,
+ am_trace_status, am_undefined);
+ switch (nif_res) {
+ case am_trace: break;
+ default: {
+ ErtsTracer curr_default_tracer = *default_tracer;
+ if (tracerp) {
+ /* we only have a rlock, so we have to unlock and then rwlock */
+ erts_rwmtx_runlock(&sys_trace_rwmtx);
+ erts_rwmtx_rwlock(&sys_trace_rwmtx);
+ }
+ /* check if someone else changed default tracer
+ while we got the write lock, if so we don't do
+ anything. */
+ if (curr_default_tracer == *default_tracer) {
+ *default_trace_flags &= ~TRACEE_FLAGS;
+ ERTS_TRACER_CLEAR(default_tracer);
+ }
+ if (tracerp) {
+ erts_rwmtx_rwunlock(&sys_trace_rwmtx);
+ erts_rwmtx_rlock(&sys_trace_rwmtx);
+ }
+ }
+ }
}
if (flagsp)
- *flagsp = default_trace_flags;
- if (tracerp)
- *tracerp = default_tracer;
+ *flagsp = *default_trace_flags;
+ if (tracerp) {
+ erts_tracer_update(tracerp,*default_tracer);
+ }
+}
+
+static ERTS_INLINE void
+erts_change_default_tracing(int setflags, Uint flags,
+ const ErtsTracer tracer,
+ Uint *default_trace_flags,
+ ErtsTracer *default_tracer)
+{
+ if (setflags)
+ *default_trace_flags |= flags;
+ else
+ *default_trace_flags &= ~flags;
+
+ erts_tracer_update(default_tracer, tracer);
+
+ get_default_tracing(NULL, NULL, default_trace_flags, default_tracer);
}
void
-erts_change_default_tracing(int setflags, Uint *flagsp, Eterm *tracerp)
+erts_change_default_proc_tracing(int setflags, Uint flagsp,
+ const ErtsTracer tracer)
{
- erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
- if (flagsp) {
- if (setflags)
- default_trace_flags |= *flagsp;
- else
- default_trace_flags &= ~(*flagsp);
- }
- if (tracerp)
- default_tracer = *tracerp;
- get_default_tracing(flagsp, tracerp);
- erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx);
+ erts_rwmtx_rwlock(&sys_trace_rwmtx);
+ erts_change_default_tracing(
+ setflags, flagsp, tracer,
+ &default_proc_trace_flags,
+ &default_proc_tracer);
+ erts_rwmtx_rwunlock(&sys_trace_rwmtx);
+}
+
+void
+erts_change_default_port_tracing(int setflags, Uint flagsp,
+ const ErtsTracer tracer)
+{
+ erts_rwmtx_rwlock(&sys_trace_rwmtx);
+ erts_change_default_tracing(
+ setflags, flagsp, tracer,
+ &default_port_trace_flags,
+ &default_port_tracer);
+ erts_rwmtx_rwunlock(&sys_trace_rwmtx);
+}
+
+void
+erts_get_default_proc_tracing(Uint *flagsp, ErtsTracer *tracerp)
+{
+ erts_rwmtx_rlock(&sys_trace_rwmtx);
+ *tracerp = erts_tracer_nil; /* initialize */
+ get_default_tracing(
+ flagsp, tracerp,
+ &default_proc_trace_flags,
+ &default_proc_tracer);
+ erts_rwmtx_runlock(&sys_trace_rwmtx);
}
void
-erts_get_default_tracing(Uint *flagsp, Eterm *tracerp)
+erts_get_default_port_tracing(Uint *flagsp, ErtsTracer *tracerp)
{
- erts_smp_rwmtx_rlock(&sys_trace_rwmtx);
- get_default_tracing(flagsp, tracerp);
- erts_smp_rwmtx_runlock(&sys_trace_rwmtx);
+ erts_rwmtx_rlock(&sys_trace_rwmtx);
+ *tracerp = erts_tracer_nil; /* initialize */
+ get_default_tracing(
+ flagsp, tracerp,
+ &default_port_trace_flags,
+ &default_port_tracer);
+ erts_rwmtx_runlock(&sys_trace_rwmtx);
}
void
erts_set_system_monitor(Eterm monitor)
{
- erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
+ erts_rwmtx_rwlock(&sys_trace_rwmtx);
system_monitor = monitor;
- erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx);
+ erts_rwmtx_rwunlock(&sys_trace_rwmtx);
}
Eterm
erts_get_system_monitor(void)
{
Eterm monitor;
- erts_smp_rwmtx_rlock(&sys_trace_rwmtx);
+ erts_rwmtx_rlock(&sys_trace_rwmtx);
monitor = system_monitor;
- erts_smp_rwmtx_runlock(&sys_trace_rwmtx);
+ erts_rwmtx_runlock(&sys_trace_rwmtx);
return monitor;
}
/* Performance monitoring */
void erts_set_system_profile(Eterm profile) {
- erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
+ erts_rwmtx_rwlock(&sys_trace_rwmtx);
system_profile = profile;
- erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx);
+ erts_rwmtx_rwunlock(&sys_trace_rwmtx);
}
Eterm
erts_get_system_profile(void) {
Eterm profile;
- erts_smp_rwmtx_rlock(&sys_trace_rwmtx);
+ erts_rwmtx_rlock(&sys_trace_rwmtx);
profile = system_profile;
- erts_smp_rwmtx_runlock(&sys_trace_rwmtx);
+ erts_rwmtx_runlock(&sys_trace_rwmtx);
return profile;
}
@@ -383,32 +639,8 @@ do { \
#endif
-
-static Eterm* patch_ts(Eterm tuple4, Eterm* hp);
-
-#ifdef ERTS_SMP
-static void
-do_send_to_port(Eterm to,
- Port* unused_port,
- Eterm from,
- enum ErtsSysMsgType type,
- Eterm message)
-{
- Uint sz = size_object(message);
- ErlHeapFragment *bp = new_message_buffer(sz);
- Uint *hp = bp->mem;
- Eterm msg = copy_struct(message, sz, &hp, &bp->off_heap);
-
- enqueue_sys_msg_unlocked(type, from, to, msg, bp);
-}
-
-#define WRITE_SYS_MSG_TO_PORT write_sys_msg_to_port
-#else
-#define WRITE_SYS_MSG_TO_PORT do_send_to_port
-#endif
-
static void
-WRITE_SYS_MSG_TO_PORT(Eterm unused_to,
+write_sys_msg_to_port(Eterm unused_to,
Port* trace_port,
Eterm unused_from,
enum ErtsSysMsgType unused_type,
@@ -424,405 +656,29 @@ WRITE_SYS_MSG_TO_PORT(Eterm unused_to,
erts_encode_ext(message, &ptr);
if (!(ptr <= buffer+size)) {
- erl_exit(1, "Internal error in do_send_to_port: %d\n", ptr-buffer);
+ erts_exit(ERTS_ERROR_EXIT, "Internal error in do_send_to_port: %d\n", ptr-buffer);
}
-#ifndef ERTS_SMP
- if (!INVALID_TRACER_PORT(trace_port, trace_port->common.id))
-#endif
erts_raw_port_command(trace_port, buffer, ptr-buffer);
erts_free(ERTS_ALC_T_TMP, (void *) buffer);
}
-#ifndef ERTS_SMP
-/* Send {trace_ts, Pid, out, 0, Timestamp}
- * followed by {trace_ts, Pid, in, 0, NewTimestamp}
- *
- * 'NewTimestamp' is fetched from GET_NOW() through patch_ts().
- */
-static void
-do_send_schedfix_to_port(Port *trace_port, Eterm pid, Eterm timestamp) {
-#define LOCAL_HEAP_SIZE (4+5+5)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- Eterm message;
- Eterm *hp;
- Eterm mfarity;
-
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
- ASSERT(is_pid(pid));
- ASSERT(is_tuple(timestamp));
- ASSERT(*tuple_val(timestamp) == make_arityval(3));
-
- hp = local_heap;
- mfarity = make_small(0);
- message = TUPLE5(hp, am_trace_ts, pid, am_out, mfarity, timestamp);
- /* Note, hp is deliberately NOT incremented since it will be reused */
-
- do_send_to_port(trace_port->common.id,
- trace_port,
- pid,
- SYS_MSG_TYPE_UNDEFINED,
- message);
-
- message = TUPLE4(hp, am_trace_ts, pid, am_in, mfarity);
- hp += 5;
- hp = patch_ts(message, hp);
-
- do_send_to_port(trace_port->common.id,
- trace_port,
- pid,
- SYS_MSG_TYPE_UNDEFINED,
- message);
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
-}
-#endif
-
-/* If (c_p != NULL), a fake schedule out/in message pair will be sent,
- * if the driver so requests.
- * It is assumed that 'message' is not an 'out' message.
- *
- * 'c_p' is the currently executing process, "tracee" is the traced process
- * which 'message' concerns => if (*tracee_flags & F_TIMESTAMP),
- * 'message' must contain a timestamp.
- */
-static void
-send_to_port(Process *c_p, Eterm message,
- Eterm *tracer_pid, Uint *tracee_flags) {
- Port* trace_port;
-#ifndef ERTS_SMP
-#define LOCAL_HEAP_SIZE (4)
- Eterm ts, *hp;
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
-#endif
-
- ASSERT(is_internal_port(*tracer_pid));
-#ifdef ERTS_SMP
- if (is_not_internal_port(*tracer_pid))
- return;
-
- trace_port = NULL;
-#else
-
- trace_port = erts_id2port_sflgs(*tracer_pid,
- NULL,
- 0,
- ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
-
- if (!trace_port) {
- *tracee_flags &= ~TRACEE_FLAGS;
- *tracer_pid = NIL;
- return;
- }
-
- /*
- * Make a fake schedule only if the current process is traced
- * with 'running' and 'timestamp'.
- */
-
- if ( c_p == NULL ||
- (! IS_TRACED_FL(c_p, F_TRACE_SCHED | F_TIMESTAMP))) {
-#endif
- do_send_to_port(*tracer_pid,
- trace_port,
- c_p ? c_p->common.id : NIL,
- SYS_MSG_TYPE_TRACE,
- message);
-#ifndef ERTS_SMP
- erts_port_release(trace_port);
- return;
- }
-
- /*
- * Note that the process being traced for some type of trace messages
- * (e.g. getting_linked) need not be the current process. That other
- * process might not have timestamps enabled.
- */
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- if (*tracee_flags & F_TIMESTAMP) {
- ASSERT(is_tuple(message));
- hp = tuple_val(message);
- ts = hp[arityval(hp[0])];
- } else {
- /* A fake schedule might be needed,
- * but this message does not contain a timestamp.
- * Create a dummy trace message with timestamp to be
- * passed to do_send_schedfix_to_port().
- */
- Uint ms,s,us;
- GET_NOW(&ms, &s, &us);
- hp = local_heap;
- ts = TUPLE3(hp, make_small(ms), make_small(s), make_small(us));
- hp += 4;
- }
-
- trace_port->control_flags &= ~PORT_CONTROL_FLAG_HEAVY;
- do_send_to_port(*tracer_pid,
- trace_port,
- c_p ? c_p->common.id : NIL,
- SYS_MSG_TYPE_TRACE,
- message);
-
- if (trace_port->control_flags & PORT_CONTROL_FLAG_HEAVY) {
- /* The driver has just informed us that the last write took a
- * non-neglectible amount of time.
- *
- * We need to fake some trace messages to compensate for the time the
- * current process had to sacrifice for the writing of the previous
- * trace message. We pretend that the process got scheduled out
- * just after writning the real trace message, and now gets scheduled
- * in again.
- */
- do_send_schedfix_to_port(trace_port, c_p->common.id, ts);
- }
-
- erts_port_release(trace_port);
-
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
-#endif
-}
-
-#ifndef ERTS_SMP
-/* Profile send
- * Checks if profiler is port or process
- * Eterm msg is local, need copying.
- */
-
-static void
-profile_send(Eterm from, Eterm message) {
- Uint sz = 0;
- ErlHeapFragment *bp = NULL;
- Uint *hp = NULL;
- Eterm msg = NIL;
- Process *profile_p = NULL;
- ErlOffHeap *off_heap = NULL;
-
- Eterm profiler = erts_get_system_profile();
-
- /* do not profile profiler pid */
- if (from == profiler) return;
-
- if (is_internal_port(profiler)) {
- Port *profiler_port = NULL;
-
- /* not smp */
-
- profiler_port = erts_id2port_sflgs(profiler,
- NULL,
- 0,
- ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
- if (profiler_port) {
- do_send_to_port(profiler,
- profiler_port,
- NIL, /* or current process->common.id */
- SYS_MSG_TYPE_SYSPROF,
- message);
- erts_port_release(profiler_port);
- }
-
- } else {
- ASSERT(is_internal_pid(profiler));
-
- profile_p = erts_proc_lookup(profiler);
-
- if (!profile_p)
- return;
-
- sz = size_object(message);
- hp = erts_alloc_message_heap(sz, &bp, &off_heap, profile_p, 0);
- msg = copy_struct(message, sz, &hp, &bp->off_heap);
-
- erts_queue_message(profile_p, NULL, bp, msg, NIL
-#ifdef USE_VM_PROBES
- , NIL
-#endif
- );
- }
-}
-
-#endif
-
-
-/* A fake schedule out/in message pair will be sent,
- * if the driver so requests.
- * If (timestamp == NIL), one is fetched from GET_NOW().
- *
- * 'c_p' is the currently executing process, may be NULL.
- */
-static void
-seq_trace_send_to_port(Process *c_p,
- Eterm seq_tracer,
- Eterm message,
- Eterm timestamp)
-{
- Port* trace_port;
-#ifndef ERTS_SMP
- Eterm ts, *hp;
-#define LOCAL_HEAP_SIZE (4)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#endif
-
- ASSERT(is_internal_port(seq_tracer));
-#ifdef ERTS_SMP
- if (is_not_internal_port(seq_tracer))
- return;
-
- trace_port = NULL;
-#else
- trace_port = erts_id2port_sflgs(seq_tracer,
- NULL,
- 0,
- ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
- if (!trace_port) {
- system_seq_tracer = am_false;
-#ifndef ERTS_SMP
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#endif
- return;
- }
-
- if (c_p == NULL
- || (! IS_TRACED_FL(c_p, F_TRACE_SCHED | F_TIMESTAMP))) {
-#endif
- do_send_to_port(seq_tracer,
- trace_port,
- c_p ? c_p->common.id : NIL,
- SYS_MSG_TYPE_SEQTRACE,
- message);
-
-#ifndef ERTS_SMP
- erts_port_release(trace_port);
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
- return;
- }
- /* Make a fake schedule only if the current process is traced
- * with 'running' and 'timestamp'.
- */
-
- if (timestamp != NIL) {
- ts = timestamp;
- } else {
- /* A fake schedule might be needed,
- * but this message does not contain a timestamp.
- * Create a dummy trace message with timestamp to be
- * passed to do_send_schedfix_to_port().
- */
- Uint ms,s,us;
- GET_NOW(&ms, &s, &us);
- hp = local_heap;
- ts = TUPLE3(hp, make_small(ms), make_small(s), make_small(us));
- hp += 4;
- }
-
- trace_port->control_flags &= ~PORT_CONTROL_FLAG_HEAVY;
- do_send_to_port(seq_tracer,
- trace_port,
- c_p ? c_p->common.id : NIL,
- SYS_MSG_TYPE_SEQTRACE,
- message);
-
- if (trace_port->control_flags & PORT_CONTROL_FLAG_HEAVY) {
- /* The driver has just informed us that the last write took a
- * non-neglectible amount of time.
- *
- * We need to fake some trace messages to compensate for the time the
- * current process had to sacrifice for the writing of the previous
- * trace message. We pretend that the process got scheduled out
- * just after writing the real trace message, and now gets scheduled
- * in again.
- */
- do_send_schedfix_to_port(trace_port, c_p->common.id, ts);
- }
-
- erts_port_release(trace_port);
-
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
-#endif
-}
-
-#define TS_HEAP_WORDS 5
-#define TS_SIZE(p) ((ERTS_TRACE_FLAGS((p)) & F_TIMESTAMP) \
- ? TS_HEAP_WORDS \
- : 0)
-
-/*
- * Patch a timestamp into a tuple. The tuple must be the last thing
- * built on the heap.
- *
- * Returns the new hp pointer.
-*/
-static Eterm*
-patch_ts(Eterm tuple, Eterm* hp)
-{
- Uint ms, s, us;
- Eterm* ptr = tuple_val(tuple);
- int arity = arityval(*ptr);
-
- ASSERT((ptr+arity+1) == hp);
- ptr[0] = make_arityval(arity+1);
- ptr[1] = am_trace_ts;
- GET_NOW(&ms, &s, &us);
- *hp = TUPLE3(hp+1, make_small(ms), make_small(s), make_small(us));
- return hp+5;
-}
-
-static ERTS_INLINE void
-send_to_tracer(Process *tracee,
- ERTS_TRACER_REF_TYPE tracer_ref,
- Eterm msg,
- Eterm **hpp,
- ErlHeapFragment *bp,
- int no_fake_sched)
-{
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(tracee));
-
- erts_smp_mtx_lock(&smq_mtx);
-
- if (ERTS_TRACE_FLAGS(tracee) & F_TIMESTAMP)
- *hpp = patch_ts(msg, *hpp);
-
- if (is_internal_pid(ERTS_TRACER_PROC(tracee)))
- ERTS_ENQ_TRACE_MSG(tracee->common.id, tracer_ref, msg, bp);
- else {
- ASSERT(is_internal_port(ERTS_TRACER_PROC(tracee)));
- send_to_port(no_fake_sched ? NULL : tracee,
- msg,
- &ERTS_TRACER_PROC(tracee),
- &ERTS_TRACE_FLAGS(tracee));
- }
-
- erts_smp_mtx_unlock(&smq_mtx);
-
-}
-
static void
-trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
+trace_sched_aux(Process *p, ErtsProcLocks locks, Eterm what)
{
-#define LOCAL_HEAP_SIZE (5+4+1+TS_HEAP_WORDS)
- DeclareTmpHeap(local_heap,LOCAL_HEAP_SIZE,p);
- Eterm tmp, mess, *hp;
- ErlHeapFragment *bp = NULL;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref = ERTS_NULL_TRACER_REF;
- int sched_no, curr_func, to_port, no_fake_sched;
+ Eterm tmp, *hp;
+ int curr_func;
+ ErtsTracerNif *tnif = NULL;
- if (is_nil(ERTS_TRACER_PROC(p)))
+ if (ERTS_TRACER_IS_NIL(ERTS_TRACER(p)))
return;
- no_fake_sched = never_fake_sched;
-
switch (what) {
case am_out:
case am_out_exiting:
case am_out_exited:
- no_fake_sched = 1;
- break;
case am_in:
case am_in_exiting:
break;
@@ -831,16 +687,8 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
break;
}
- sched_no = IS_TRACED_FL(p, F_TRACE_SCHED_NO);
- to_port = is_internal_port(ERTS_TRACER_PROC(p));
-
- if (!to_port) {
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(p),
- ERTS_TRACE_FLAGS(p));
- }
+ if (!is_tracer_enabled(p, locks, &p->common, &tnif, TRACE_FUN_E_SCHED_PROC, what))
+ return;
if (ERTS_PROC_IS_EXITING(p))
curr_func = 0;
@@ -850,44 +698,17 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
curr_func = p->current != NULL;
}
- UseTmpHeap(LOCAL_HEAP_SIZE,p);
-
- if (to_port)
- hp = local_heap;
- else {
- Uint size = 5;
- if (curr_func)
- size += 4;
- if (sched_no)
- size += 1;
- size += TS_SIZE(p);
- hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
- }
-
if (!curr_func) {
tmp = make_small(0);
} else {
- tmp = TUPLE3(hp,p->current[0],p->current[1],make_small(p->current[2]));
+ hp = HAlloc(p, 4);
+ tmp = TUPLE3(hp,p->current->module,p->current->function,
+ make_small(p->current->arity));
hp += 4;
}
- if (!sched_no) {
- mess = TUPLE4(hp, am_trace, p->common.id, what, tmp);
- hp += 5;
- }
- else {
-#ifdef ERTS_SMP
- Eterm sched_id = make_small(p->scheduler_data->no);
-#else
- Eterm sched_id = make_small(1);
-#endif
- mess = TUPLE5(hp, am_trace, p->common.id, what, sched_id, tmp);
- hp += 6;
- }
-
- send_to_tracer(p, tracer_ref, mess, &hp, bp, no_fake_sched);
- UnUseTmpHeap(LOCAL_HEAP_SIZE,p);
-#undef LOCAL_HEAP_SIZE
+ send_to_tracer_nif(p, &p->common, p->common.id, tnif, TRACE_FUN_T_SCHED_PROC,
+ what, tmp, THE_NON_VALUE, am_true);
}
/* Send {trace_ts, Pid, What, {Mod, Func, Arity}, Timestamp}
@@ -897,9 +718,9 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
* 'out_exiting', or 'out_exited'.
*/
void
-trace_sched(Process *p, Eterm what)
+trace_sched(Process *p, ErtsProcLocks locks, Eterm what)
{
- trace_sched_aux(p, what, 0);
+ trace_sched_aux(p, locks, what);
}
/* Send {trace_ts, Pid, Send, Msg, DestPid, Timestamp}
@@ -910,150 +731,120 @@ trace_sched(Process *p, Eterm what)
void
trace_send(Process *p, Eterm to, Eterm msg)
{
- Eterm operation;
- unsigned sz_msg;
- unsigned sz_to;
- Eterm* hp;
- Eterm mess;
-
- if (!ARE_TRACE_FLAGS_ON(p, F_TRACE_SEND)) {
+ Eterm operation = am_send;
+ ErtsTracerNif *tnif = NULL;
+ ErtsTracingEvent* te;
+ Eterm pam_result;
+ ErtsThrPrgrDelayHandle dhndl;
+
+ ASSERT(ARE_TRACE_FLAGS_ON(p, F_TRACE_SEND));
+
+ te = &erts_send_tracing[erts_active_bp_ix()];
+ if (!te->on) {
return;
}
+ if (te->match_spec) {
+ Eterm args[2];
+ Uint32 return_flags;
+ args[0] = to;
+ args[1] = msg;
+ pam_result = erts_match_set_run_trace(p, p,
+ te->match_spec, args, 2,
+ ERTS_PAM_TMP_RESULT, &return_flags);
+ if (pam_result == am_false)
+ return;
+ if (ERTS_TRACE_FLAGS(p) & F_TRACE_SILENT) {
+ erts_match_set_release_result_trace(p, pam_result);
+ return;
+ }
+ } else
+ pam_result = am_true;
+
+ dhndl = erts_thr_progress_unmanaged_delay();
- operation = am_send;
if (is_internal_pid(to)) {
if (!erts_proc_lookup(to))
goto send_to_non_existing_process;
}
else if(is_external_pid(to)
&& external_pid_dist_entry(to) == erts_this_dist_entry) {
- char *s;
send_to_non_existing_process:
- s = "send_to_non_existing_process";
- operation = am_atom_put(s, sys_strlen(s));
+ operation = am_send_to_non_existing_process;
}
- if (is_internal_port(ERTS_TRACER_PROC(p))) {
-#define LOCAL_HEAP_SIZE (11)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- hp = local_heap;
- mess = TUPLE5(hp, am_trace, p->common.id, operation, msg, to);
- hp += 6;
- erts_smp_mtx_lock(&smq_mtx);
- if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
- send_to_port(p, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
- Uint need;
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref;
-
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(p),
- ERTS_TRACE_FLAGS(p));
+ if (is_tracer_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common, &tnif,
+ TRACE_FUN_E_SEND, operation)) {
+ send_to_tracer_nif(p, &p->common, p->common.id, tnif, TRACE_FUN_T_SEND,
+ operation, msg, to, pam_result);
+ }
- sz_msg = size_object(msg);
- sz_to = size_object(to);
- need = sz_msg + sz_to + 6 + TS_SIZE(p);
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(need, &bp, &off_heap, tracer_ref);
-
- to = copy_struct(to,
- sz_to,
- &hp,
- off_heap);
- msg = copy_struct(msg,
- sz_msg,
- &hp,
- off_heap);
- mess = TUPLE5(hp, am_trace, p->common.id, operation, msg, to);
- hp += 6;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
- patch_ts(mess, hp);
- }
+ erts_thr_progress_unmanaged_continue(dhndl);
- ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
- }
+ erts_match_set_release_result_trace(p, pam_result);
}
/* Send {trace_ts, Pid, receive, Msg, Timestamp}
* or {trace, Pid, receive, Msg}
*/
void
-trace_receive(Process *rp, Eterm msg)
+trace_receive(Process* receiver,
+ Eterm from,
+ Eterm msg, ErtsTracingEvent* te)
{
- Eterm mess;
- size_t sz_msg;
- Eterm* hp;
-
- if (is_internal_port(ERTS_TRACER_PROC(rp))) {
-#define LOCAL_HEAP_SIZE (10)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- hp = local_heap;
- mess = TUPLE4(hp, am_trace, rp->common.id, am_receive, msg);
- hp += 5;
- erts_smp_mtx_lock(&smq_mtx);
- if (ERTS_TRACE_FLAGS(rp) & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
- send_to_port(rp, mess, &ERTS_TRACER_PROC(rp), &ERTS_TRACE_FLAGS(rp));
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
- Uint hsz;
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref;
-
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(rp)));
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(rp),
- ERTS_TRACE_FLAGS(rp));
-
- sz_msg = size_object(msg);
-
- hsz = sz_msg + 5 + TS_SIZE(rp);
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(hsz, &bp, &off_heap, tracer_ref);
-
- msg = copy_struct(msg, sz_msg, &hp, off_heap);
- mess = TUPLE4(hp, am_trace, rp->common.id, am_receive, msg);
- hp += 5;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- if (ERTS_TRACE_FLAGS(rp) & F_TIMESTAMP) {
- patch_ts(mess, hp);
- }
-
- ERTS_ENQ_TRACE_MSG(rp->common.id, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
- }
+ ErtsTracerNif *tnif = NULL;
+ Eterm pam_result;
+
+ if (!te) {
+ te = &erts_receive_tracing[erts_active_bp_ix()];
+ if (!te->on)
+ return;
+ }
+ else ASSERT(te->on);
+
+ if (te->match_spec) {
+ Eterm args[3];
+ Uint32 return_flags;
+ if (is_pid(from)) {
+ args[0] = pid_node_name(from);
+ args[1] = from;
+ }
+ else {
+ ASSERT(is_atom(from));
+ args[0] = from; /* node name or other atom (e.g 'system') */
+ args[1] = am_undefined;
+ }
+ args[2] = msg;
+ pam_result = erts_match_set_run_trace(NULL, receiver,
+ te->match_spec, args, 3,
+ ERTS_PAM_TMP_RESULT, &return_flags);
+ if (pam_result == am_false)
+ return;
+ if (ERTS_TRACE_FLAGS(receiver) & F_TRACE_SILENT) {
+ erts_match_set_release_result_trace(NULL, pam_result);
+ return;
+ }
+ } else
+ pam_result = am_true;
+
+ if (is_tracer_enabled(NULL, 0, &receiver->common, &tnif,
+ TRACE_FUN_E_RECEIVE, am_receive)) {
+ send_to_tracer_nif(NULL, &receiver->common, receiver->common.id,
+ tnif, TRACE_FUN_T_RECEIVE,
+ am_receive, msg, THE_NON_VALUE, pam_result);
+ }
+ erts_match_set_release_result_trace(NULL, pam_result);
}
int
seq_trace_update_send(Process *p)
{
- Eterm seq_tracer = erts_get_system_seq_tracer();
+ ErtsTracer seq_tracer = erts_get_system_seq_tracer();
ASSERT((is_tuple(SEQ_TRACE_TOKEN(p)) || is_nil(SEQ_TRACE_TOKEN(p))));
- if ( (p->common.id == seq_tracer) || (SEQ_TRACE_TOKEN(p) == NIL)
+ if (have_no_seqtrace(SEQ_TRACE_TOKEN(p)) ||
+ (seq_tracer != NIL &&
+ call_enabled_tracer(seq_tracer, NULL,
+ TRACE_FUN_ENABLED, am_seq_trace,
+ p ? p->common.id : am_undefined) != am_trace)
#ifdef USE_VM_PROBES
|| (SEQ_TRACE_TOKEN(p) == am_have_dt_utag)
#endif
@@ -1085,19 +876,29 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
Eterm receiver, Process *process, Eterm exitfrom)
{
Eterm mess;
- ErlHeapFragment* bp;
Eterm* hp;
Eterm label;
Eterm lastcnt_serial;
Eterm type_atom;
- int sz_exit;
- Eterm seq_tracer;
+ ErtsTracer seq_tracer;
+ int seq_tracer_flags = 0;
+#define LOCAL_HEAP_SIZE (64)
+ DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
seq_tracer = erts_get_system_seq_tracer();
ASSERT(is_tuple(token) || is_nil(token));
- if (SEQ_TRACE_T_SENDER(token) == seq_tracer || token == NIL ||
- (process && ERTS_TRACE_FLAGS(process) & F_SENSITIVE)) {
+ if (token == NIL || (process && ERTS_TRACE_FLAGS(process) & F_SENSITIVE) ||
+ ERTS_TRACER_IS_NIL(seq_tracer) ||
+ call_enabled_tracer(seq_tracer,
+ NULL, TRACE_FUN_ENABLED,
+ am_seq_trace,
+ process ? process->common.id : am_undefined) != am_trace) {
+ return;
+ }
+
+ if ((unsigned_val(SEQ_TRACE_T_FLAGS(token)) & type) == 0) {
+ /* No flags set, nothing to do */
return;
}
@@ -1106,147 +907,32 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
case SEQ_TRACE_PRINT: type_atom = am_print; break;
case SEQ_TRACE_RECEIVE: type_atom = am_receive; break;
default:
- erl_exit(1, "invalid type in seq_trace_output_generic: %d:\n", type);
+ erts_exit(ERTS_ERROR_EXIT, "invalid type in seq_trace_output_generic: %d:\n", type);
return; /* To avoid warning */
}
- if ((unsigned_val(SEQ_TRACE_T_FLAGS(token)) & type) == 0) {
- /* No flags set, nothing to do */
- return;
- }
+ UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
- if (seq_tracer == am_false) {
- return; /* no need to send anything */
+ hp = local_heap;
+ label = SEQ_TRACE_T_LABEL(token);
+ lastcnt_serial = TUPLE2(hp, SEQ_TRACE_T_LASTCNT(token),
+ SEQ_TRACE_T_SERIAL(token));
+ hp += 3;
+ if (exitfrom != NIL) {
+ msg = TUPLE3(hp, am_EXIT, exitfrom, msg);
+ hp += 4;
}
+ mess = TUPLE5(hp, type_atom, lastcnt_serial, SEQ_TRACE_T_SENDER(token), receiver, msg);
+ hp += 6;
- if (is_internal_port(seq_tracer)) {
-#define LOCAL_HEAP_SIZE (64)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- hp = local_heap;
- label = SEQ_TRACE_T_LABEL(token);
- lastcnt_serial = TUPLE2(hp, SEQ_TRACE_T_LASTCNT(token),
- SEQ_TRACE_T_SERIAL(token));
- hp += 3;
- if (exitfrom != NIL) {
- msg = TUPLE3(hp, am_EXIT, exitfrom, msg);
- hp += 4;
- }
- mess = TUPLE5(hp, type_atom, lastcnt_serial, SEQ_TRACE_T_SENDER(token),
- receiver, msg);
- hp += 6;
- erts_smp_mtx_lock(&smq_mtx);
- if ((unsigned_val(SEQ_TRACE_T_FLAGS(token)) & SEQ_TRACE_TIMESTAMP) == 0) {
- mess = TUPLE3(hp, am_seq_trace, label, mess);
- seq_trace_send_to_port(NULL, seq_tracer, mess, NIL);
- } else {
- Uint ms,s,us,ts;
- GET_NOW(&ms, &s, &us);
- ts = TUPLE3(hp, make_small(ms),make_small(s), make_small(us));
- hp += 4;
- mess = TUPLE4(hp, am_seq_trace, label, mess, ts);
- seq_trace_send_to_port(process, seq_tracer, mess, ts);
- }
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
-#ifndef ERTS_SMP
- Process* tracer;
-#endif
- Eterm sender_copy;
- Eterm receiver_copy;
- Eterm m2;
- Uint sz_label, sz_lastcnt_serial, sz_msg, sz_ts, sz_sender,
- sz_exitfrom, sz_receiver;
-
- ASSERT(is_internal_pid(seq_tracer));
+ seq_tracer_flags |= ERTS_SEQTFLGS2TFLGS(unsigned_val(SEQ_TRACE_T_FLAGS(token)));
-#ifndef ERTS_SMP
+ send_to_tracer_nif_raw(NULL, process, seq_tracer, seq_tracer_flags,
+ label, NULL, TRACE_FUN_DEFAULT, am_seq_trace, mess,
+ THE_NON_VALUE, am_true);
- tracer = erts_proc_lookup(seq_tracer);
- if (!tracer) {
- system_seq_tracer = am_false;
- return; /* no need to send anything */
- }
-#endif
- if (receiver == seq_tracer) {
- return; /* no need to send anything */
- }
-
- sz_label = size_object(SEQ_TRACE_T_LABEL(token));
- sz_sender = size_object(SEQ_TRACE_T_SENDER(token));
- sz_receiver = size_object(receiver);
- sz_lastcnt_serial = 3; /* TUPLE2 */
- sz_msg = size_object(msg);
-
- sz_ts = ((unsigned_val(SEQ_TRACE_T_FLAGS(token)) & SEQ_TRACE_TIMESTAMP) ?
- 5 : 0);
- if (exitfrom != NIL) {
- sz_exit = 4; /* create {'EXIT',exitfrom,msg} */
- sz_exitfrom = size_object(exitfrom);
- }
- else {
- sz_exit = 0;
- sz_exitfrom = 0;
- }
- bp = new_message_buffer(4 /* TUPLE3 */ + sz_ts + 6 /* TUPLE5 */
- + sz_lastcnt_serial + sz_label + sz_msg
- + sz_exit + sz_exitfrom
- + sz_sender + sz_receiver);
- hp = bp->mem;
- label = copy_struct(SEQ_TRACE_T_LABEL(token), sz_label, &hp, &bp->off_heap);
- lastcnt_serial = TUPLE2(hp,SEQ_TRACE_T_LASTCNT(token),SEQ_TRACE_T_SERIAL(token));
- hp += 3;
- m2 = copy_struct(msg, sz_msg, &hp, &bp->off_heap);
- if (sz_exit) {
- Eterm exitfrom_copy = copy_struct(exitfrom,
- sz_exitfrom,
- &hp,
- &bp->off_heap);
- m2 = TUPLE3(hp, am_EXIT, exitfrom_copy, m2);
- hp += 4;
- }
- sender_copy = copy_struct(SEQ_TRACE_T_SENDER(token),
- sz_sender,
- &hp,
- &bp->off_heap);
- receiver_copy = copy_struct(receiver,
- sz_receiver,
- &hp,
- &bp->off_heap);
- mess = TUPLE5(hp,
- type_atom,
- lastcnt_serial,
- sender_copy,
- receiver_copy,
- m2);
- hp += 6;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- if (sz_ts) {/* timestamp should be included */
- Uint ms,s,us,ts;
- GET_NOW(&ms, &s, &us);
- ts = TUPLE3(hp, make_small(ms),make_small(s), make_small(us));
- hp += 4;
- mess = TUPLE4(hp, am_seq_trace, label, mess, ts);
- } else {
- mess = TUPLE3(hp, am_seq_trace, label, mess);
- }
-
-#ifdef ERTS_SMP
- enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SEQTRACE, NIL, NIL, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
-#else
- erts_queue_message(tracer, NULL, bp, mess, NIL
-#ifdef USE_VM_PROBES
- , NIL
-#endif
- ); /* trace_token must be NIL here */
-#endif
- }
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+#undef LOCAL_HEAP_SIZE
}
/* Send {trace_ts, Pid, return_to, {Mod, Func, Arity}, Timestamp}
@@ -1255,65 +941,20 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
void
erts_trace_return_to(Process *p, BeamInstr *pc)
{
-#define LOCAL_HEAP_SIZE (4+5+5)
- Eterm* hp;
Eterm mfa;
- Eterm mess;
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
-
- BeamInstr *code_ptr = find_function_from_pc(pc);
+ ErtsCodeMFA *cmfa = find_function_from_pc(pc);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- hp = local_heap;
-
- if (!code_ptr) {
+ if (!cmfa) {
mfa = am_undefined;
} else {
- mfa = TUPLE3(hp, code_ptr[0], code_ptr[1], make_small(code_ptr[2]));
- hp += 4;
+ Eterm *hp = HAlloc(p, 4);
+ mfa = TUPLE3(hp, cmfa->module, cmfa->function,
+ make_small(cmfa->arity));
}
-
- mess = TUPLE4(hp, am_trace, p->common.id, am_return_to, mfa);
- hp += 5;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
-
- if (is_internal_port(ERTS_TRACER_PROC(p))) {
- send_to_port(p, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
- } else {
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref;
- unsigned size;
-
- /*
- * Find the tracer.
- */
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(p),
- ERTS_TRACE_FLAGS(p));
- size = size_object(mess);
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
-
- /*
- * Copy the trace message into the buffer and enqueue it.
- */
- mess = copy_struct(mess, size, &hp, off_heap);
- ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
- }
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
+ send_to_tracer_nif(p, &p->common, p->common.id, NULL, TRACE_FUN_T_CALL,
+ am_return_to, mfa, THE_NON_VALUE, am_true);
}
@@ -1321,126 +962,51 @@ erts_trace_return_to(Process *p, BeamInstr *pc)
* or {trace, Pid, return_from, {Mod, Name, Arity}, Retval}
*/
void
-erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid)
+erts_trace_return(Process* p, ErtsCodeMFA *mfa,
+ Eterm retval, ErtsTracer *tracer)
{
Eterm* hp;
- Eterm mfa;
- Eterm mess;
- Eterm mod, name;
- int arity;
+ Eterm mfa_tuple;
Uint meta_flags, *tracee_flags;
-#ifdef ERTS_SMP
- Eterm tracee;
-#endif
-
- ASSERT(tracer_pid);
- if (*tracer_pid == am_true) {
+
+ ASSERT(tracer);
+ if (ERTS_TRACER_COMPARE(*tracer, erts_tracer_true)) {
/* Breakpoint trace enabled without specifying tracer =>
* use process tracer and flags
*/
- tracer_pid = &ERTS_TRACER_PROC(p);
+ tracer = &ERTS_TRACER(p);
}
- if (is_nil(*tracer_pid)) {
+ if (ERTS_TRACER_IS_NIL(*tracer)) {
/* Trace disabled */
return;
}
- ASSERT(is_internal_pid(*tracer_pid) || is_internal_port(*tracer_pid));
- if (*tracer_pid == p->common.id) {
- /* Do not generate trace messages to oneself */
- return;
- }
- if (tracer_pid == &ERTS_TRACER_PROC(p)) {
+ ASSERT(IS_TRACER_VALID(*tracer));
+ if (tracer == &ERTS_TRACER(p)) {
/* Tracer specified in process structure =>
* non-breakpoint trace =>
* use process flags
*/
tracee_flags = &ERTS_TRACE_FLAGS(p);
-#ifdef ERTS_SMP
- tracee = p->common.id;
-#endif
+ if (! (*tracee_flags & F_TRACE_CALLS)) {
+ return;
+ }
} else {
/* Tracer not specified in process structure =>
* tracer specified in breakpoint =>
* meta trace =>
* use fixed flag set instead of process flags
- */
- meta_flags = F_TRACE_CALLS | F_TIMESTAMP;
+ */
+ meta_flags = F_TRACE_CALLS | F_NOW_TS;
tracee_flags = &meta_flags;
-#ifdef ERTS_SMP
- tracee = NIL;
-#endif
- }
- if (! (*tracee_flags & F_TRACE_CALLS)) {
- return;
}
-
- mod = fi[0];
- name = fi[1];
- arity = fi[2];
-
- if (is_internal_port(*tracer_pid)) {
-#define LOCAL_HEAP_SIZE (4+6+5)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
- hp = local_heap;
- mfa = TUPLE3(hp, mod, name, make_small(arity));
- hp += 4;
- mess = TUPLE5(hp, am_trace, p->common.id, am_return_from, mfa, retval);
- hp += 6;
- erts_smp_mtx_lock(&smq_mtx);
- if (*tracee_flags & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
- send_to_port(p, mess, tracer_pid, tracee_flags);
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref;
- unsigned size;
- unsigned retval_size;
-#ifdef DEBUG
- Eterm* limit;
-#endif
- ASSERT(is_internal_pid(*tracer_pid));
-
- ERTS_GET_TRACER_REF(tracer_ref, *tracer_pid, *tracee_flags);
-
- retval_size = size_object(retval);
- size = 6 + 4 + retval_size;
- if (*tracee_flags & F_TIMESTAMP) {
- size += 1+4;
- }
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
-#ifdef DEBUG
- limit = hp + size;
-#endif
-
- /*
- * Build the trace tuple and put it into receive queue of the tracer process.
- */
-
- mfa = TUPLE3(hp, mod, name, make_small(arity));
- hp += 4;
- retval = copy_struct(retval, retval_size, &hp, off_heap);
- mess = TUPLE5(hp, am_trace, p->common.id, am_return_from, mfa, retval);
- hp += 6;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- if (*tracee_flags & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
-
- ASSERT(hp == limit);
-
- ERTS_ENQ_TRACE_MSG(tracee, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
- }
+ hp = HAlloc(p, 4);
+ mfa_tuple = TUPLE3(hp, mfa->module, mfa->function,
+ make_small(mfa->arity));
+ hp += 4;
+ send_to_tracer_nif_raw(p, NULL, *tracer, *tracee_flags, p->common.id,
+ NULL, TRACE_FUN_T_CALL, am_return_from, mfa_tuple,
+ retval, am_true);
}
/* Send {trace_ts, Pid, exception_from, {Mod, Name, Arity}, {Class,Value},
@@ -1451,130 +1017,51 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid)
* Where Class is atomic but Value is any term.
*/
void
-erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
- Eterm *tracer_pid)
+erts_trace_exception(Process* p, ErtsCodeMFA *mfa, Eterm class, Eterm value,
+ ErtsTracer *tracer)
{
Eterm* hp;
- Eterm mfa_tuple;
- Eterm cv;
- Eterm mess;
+ Eterm mfa_tuple, cv;
Uint meta_flags, *tracee_flags;
-#ifdef ERTS_SMP
- Eterm tracee;
-#endif
-
- ASSERT(tracer_pid);
- if (*tracer_pid == am_true) {
+
+ ASSERT(tracer);
+ if (ERTS_TRACER_COMPARE(*tracer, erts_tracer_true)) {
/* Breakpoint trace enabled without specifying tracer =>
* use process tracer and flags
*/
- tracer_pid = &ERTS_TRACER_PROC(p);
+ tracer = &ERTS_TRACER(p);
}
- if (is_nil(*tracer_pid)) {
+ if (ERTS_TRACER_IS_NIL(*tracer)) {
/* Trace disabled */
return;
}
- ASSERT(is_internal_pid(*tracer_pid) || is_internal_port(*tracer_pid));
- if (*tracer_pid == p->common.id) {
- /* Do not generate trace messages to oneself */
- return;
- }
- if (tracer_pid == &ERTS_TRACER_PROC(p)) {
+ ASSERT(IS_TRACER_VALID(*tracer));
+ if (tracer == &ERTS_TRACER(p)) {
/* Tracer specified in process structure =>
* non-breakpoint trace =>
* use process flags
*/
tracee_flags = &ERTS_TRACE_FLAGS(p);
-#ifdef ERTS_SMP
- tracee = p->common.id;
-#endif
- if (! (*tracee_flags & F_TRACE_CALLS)) {
- return;
- }
+ if (! (*tracee_flags & F_TRACE_CALLS)) {
+ return;
+ }
} else {
/* Tracer not specified in process structure =>
* tracer specified in breakpoint =>
* meta trace =>
* use fixed flag set instead of process flags
- */
- meta_flags = F_TRACE_CALLS | F_TIMESTAMP;
+ */
+ meta_flags = F_TRACE_CALLS | F_NOW_TS;
tracee_flags = &meta_flags;
-#ifdef ERTS_SMP
- tracee = NIL;
-#endif
}
-
- if (is_internal_port(*tracer_pid)) {
-#define LOCAL_HEAP_SIZE (4+3+6+5)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
- hp = local_heap;
- mfa_tuple = TUPLE3(hp, (Eterm) mfa[0], (Eterm) mfa[1], make_small((Eterm)mfa[2]));
- hp += 4;
- cv = TUPLE2(hp, class, value);
- hp += 3;
- mess = TUPLE5(hp, am_trace, p->common.id, am_exception_from, mfa_tuple, cv);
- hp += 6;
- ASSERT((hp - local_heap) <= LOCAL_HEAP_SIZE);
- erts_smp_mtx_lock(&smq_mtx);
- if (*tracee_flags & F_TIMESTAMP) {
- hp = patch_ts(mess, hp); /* hp += 5 */
- ASSERT((hp - local_heap) == LOCAL_HEAP_SIZE);
- }
- send_to_port(p, mess, tracer_pid, tracee_flags);
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref;
- unsigned size;
- unsigned value_size;
-#ifdef DEBUG
- Eterm* limit;
-#endif
-
- ASSERT(is_internal_pid(*tracer_pid));
-
- ERTS_GET_TRACER_REF(tracer_ref, *tracer_pid, *tracee_flags);
-
- value_size = size_object(value);
- size = 6 + 4 + 3 + value_size;
- if (*tracee_flags & F_TIMESTAMP) {
- size += 1+4;
- }
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
-#ifdef DEBUG
- limit = hp + size;
-#endif
-
- /*
- * Build the trace tuple and put it into receive queue of the tracer process.
- */
-
- mfa_tuple = TUPLE3(hp, (Eterm) mfa[0], (Eterm) mfa[1], make_small((Eterm) mfa[2]));
- hp += 4;
- value = copy_struct(value, value_size, &hp, off_heap);
- cv = TUPLE2(hp, class, value);
- hp += 3;
- mess = TUPLE5(hp, am_trace, p->common.id,
- am_exception_from, mfa_tuple, cv);
- hp += 6;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- if (*tracee_flags & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
-
- ASSERT(hp == limit);
-
- ERTS_ENQ_TRACE_MSG(tracee, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
- }
+ hp = HAlloc(p, 7);;
+ mfa_tuple = TUPLE3(hp, mfa->module, mfa->function, make_small(mfa->arity));
+ hp += 4;
+ cv = TUPLE2(hp, class, value);
+ hp += 3;
+ send_to_tracer_nif_raw(p, NULL, *tracer, *tracee_flags, p->common.id,
+ NULL, TRACE_FUN_T_CALL, am_exception_from, mfa_tuple, cv, am_true);
}
/*
@@ -1592,8 +1079,8 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
* if it is a pid or port we do a meta trace.
*/
Uint32
-erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
- Eterm* args, int local, Eterm *tracer_pid)
+erts_call_trace(Process* p, ErtsCodeInfo *info, Binary *match_spec,
+ Eterm* args, int local, ErtsTracer *tracer)
{
Eterm* hp;
Eterm mfa_tuple;
@@ -1601,54 +1088,67 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
int i;
Uint32 return_flags;
Eterm pam_result = am_true;
- Eterm mess;
Uint meta_flags, *tracee_flags;
-#ifdef ERTS_SMP
- Eterm tracee;
-#endif
+ ErtsTracerNif *tnif = NULL;
Eterm transformed_args[MAX_ARG];
- DeclareTypedTmpHeap(ErlSubBin,sub_bin_heap,p);
+ ErtsTracer pre_ms_tracer = erts_tracer_nil;
- ASSERT(tracer_pid);
- if (*tracer_pid == am_true) {
- /* Breakpoint trace enabled without specifying tracer =>
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN);
+
+ ASSERT(tracer);
+ if (ERTS_TRACER_COMPARE(*tracer, erts_tracer_true)) {
+ /* Breakpoint trace enabled without specifying tracer =>
* use process tracer and flags
*/
- tracer_pid = &ERTS_TRACER_PROC(p);
- }
- if (is_nil(*tracer_pid)) {
- /* Trace disabled */
- return 0;
+ tracer = &ERTS_TRACER(p);
}
- ASSERT(is_internal_pid(*tracer_pid) || is_internal_port(*tracer_pid));
- if (*tracer_pid == p->common.id) {
- /* Do not generate trace messages to oneself */
+ if (ERTS_TRACER_IS_NIL(*tracer)) {
+ /* Trace disabled */
return 0;
}
- if (tracer_pid == &ERTS_TRACER_PROC(p)) {
+ ASSERT(IS_TRACER_VALID(*tracer));
+ if (tracer == &ERTS_TRACER(p)) {
/* Tracer specified in process structure =>
* non-breakpoint trace =>
* use process flags
*/
tracee_flags = &ERTS_TRACE_FLAGS(p);
-#ifdef ERTS_SMP
- tracee = p->common.id;
-#endif
+ /* Is is not ideal at all to call this check twice,
+ it should be optimized so that only one call is made. */
+ if (!is_tracer_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common, &tnif,
+ TRACE_FUN_ENABLED, am_trace_status)
+ || !is_tracer_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common, &tnif,
+ TRACE_FUN_E_CALL, am_call)) {
+ return 0;
+ }
} else {
/* Tracer not specified in process structure =>
* tracer specified in breakpoint =>
* meta trace =>
* use fixed flag set instead of process flags
- */
- if (ERTS_TRACE_FLAGS(p) & F_SENSITIVE) {
- /* No trace messages for sensitive processes. */
- return 0;
- }
- meta_flags = F_TRACE_CALLS | F_TIMESTAMP;
+ */
+ if (ERTS_TRACE_FLAGS(p) & F_SENSITIVE) {
+ /* No trace messages for sensitive processes. */
+ return 0;
+ }
+ meta_flags = F_TRACE_CALLS | F_NOW_TS;
tracee_flags = &meta_flags;
-#ifdef ERTS_SMP
- tracee = NIL;
-#endif
+ switch (call_enabled_tracer(*tracer,
+ &tnif, TRACE_FUN_ENABLED,
+ am_trace_status, p->common.id)) {
+ default:
+ case am_remove: *tracer = erts_tracer_nil;
+ case am_discard: return 0;
+ case am_trace:
+ switch (call_enabled_tracer(*tracer,
+ &tnif, TRACE_FUN_T_CALL,
+ am_call, p->common.id)) {
+ default:
+ case am_discard: return 0;
+ case am_trace: break;
+ }
+ break;
+ }
}
/*
@@ -1658,19 +1158,14 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
* such as size_object() and copy_struct(), we must make sure that we
* temporarily convert any match contexts to sub binaries.
*/
- arity = (Eterm) mfa[2];
- UseTmpHeap(ERL_SUB_BIN_SIZE,p);
-#ifdef DEBUG
- sub_bin_heap->thing_word = 0;
-#endif
+ arity = info->mfa.arity;
for (i = 0; i < arity; i++) {
Eterm arg = args[i];
if (is_boxed(arg) && header_is_bin_matchstate(*boxed_val(arg))) {
ErlBinMatchState* ms = (ErlBinMatchState *) boxed_val(arg);
ErlBinMatchBuffer* mb = &ms->mb;
Uint bit_size;
-
- ASSERT(sub_bin_heap->thing_word == 0); /* At most one of match context */
+ ErlSubBin *sub_bin_heap = (ErlSubBin *)HAlloc(p, ERL_SUB_BIN_SIZE);
bit_size = mb->size - mb->offset;
sub_bin_heap->thing_word = HEADER_SUB_BIN;
@@ -1687,311 +1182,94 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
}
args = transformed_args;
- if (is_internal_port(*tracer_pid)) {
-#if HEAP_ON_C_STACK
- Eterm local_heap[64+MAX_ARG];
-#else
- Eterm *local_heap = erts_alloc(ERTS_ALC_T_TEMP_TERM,
- sizeof(Eterm)*(64+MAX_ARG));
-#endif
- hp = local_heap;
-
- if (!erts_is_valid_tracer_port(*tracer_pid)) {
-#ifdef ERTS_SMP
- ASSERT(is_nil(tracee) || tracer_pid == &ERTS_TRACER_PROC(p));
- if (is_not_nil(tracee))
- erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
-#endif
- *tracee_flags &= ~TRACEE_FLAGS;
- *tracer_pid = NIL;
-#ifdef ERTS_SMP
- if (is_not_nil(tracee))
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
-#endif
-#if !HEAP_ON_C_STACK
- erts_free(ERTS_ALC_T_TEMP_TERM,local_heap);
-#endif
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return 0;
- }
-
- /*
- * If there is a PAM program, run it. Return if it fails.
- *
- * Some precedence rules:
- *
- * - No proc flags, e.g 'silent' or 'return_to'
- * has any effect on meta trace.
- * - The 'silent' process trace flag silences all call
- * related messages, e.g 'call', 'return_to' and 'return_from'.
- * - The {message,_} PAM function does not affect {return_trace}.
- * - The {message,false} PAM function shall give the same
- * 'call' trace message as no PAM match.
- * - The {message,true} PAM function shall give the same
- * 'call' trace message as a nonexistent PAM program.
- */
-
- /* BEGIN this code should be the same for port and pid trace */
- return_flags = 0;
- if (match_spec) {
- pam_result = erts_match_set_run(p, match_spec, args, arity,
- ERTS_PAM_TMP_RESULT, &return_flags);
- if (is_non_value(pam_result)) {
- erts_match_set_release_result(p);
-#if !HEAP_ON_C_STACK
- erts_free(ERTS_ALC_T_TEMP_TERM,local_heap);
-#endif
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return 0;
- }
- }
- if (tracee_flags == &meta_flags) {
- /* Meta trace */
- if (pam_result == am_false) {
- erts_match_set_release_result(p);
-#if !HEAP_ON_C_STACK
- erts_free(ERTS_ALC_T_TEMP_TERM,local_heap);
-#endif
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return return_flags;
- }
- } else {
- /* Non-meta trace */
- if (*tracee_flags & F_TRACE_SILENT) {
- erts_match_set_release_result(p);
-#if !HEAP_ON_C_STACK
- erts_free(ERTS_ALC_T_TEMP_TERM,local_heap);
-#endif
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return 0;
- }
- if (pam_result == am_false) {
- erts_match_set_release_result(p);
-#if !HEAP_ON_C_STACK
- erts_free(ERTS_ALC_T_TEMP_TERM,local_heap);
-#endif
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return return_flags;
- }
- if (local && (*tracee_flags & F_TRACE_RETURN_TO)) {
- return_flags |= MATCH_SET_RETURN_TO_TRACE;
- }
- }
- /* END this code should be the same for port and pid trace */
-
- /*
- * Build the the {M,F,A} tuple in the local heap.
- * (A is arguments or arity.)
- */
-
- if (*tracee_flags & F_TRACE_ARITY_ONLY) {
- mfa_tuple = make_small(arity);
- } else {
- mfa_tuple = NIL;
- for (i = arity-1; i >= 0; i--) {
- mfa_tuple = CONS(hp, args[i], mfa_tuple);
- hp += 2;
- }
- }
- mfa_tuple = TUPLE3(hp, (Eterm) mfa[0], (Eterm) mfa[1], mfa_tuple);
- hp += 4;
-
- /*
- * Build the trace tuple and send it to the port.
- */
-
- mess = TUPLE4(hp, am_trace, p->common.id, am_call, mfa_tuple);
- hp += 5;
- if (pam_result != am_true) {
- hp[-5] = make_arityval(5);
- *hp++ = pam_result;
- }
- erts_smp_mtx_lock(&smq_mtx);
- if (*tracee_flags & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
- send_to_port(p, mess, tracer_pid, tracee_flags);
- erts_smp_mtx_unlock(&smq_mtx);
- erts_match_set_release_result(p);
-#if !HEAP_ON_C_STACK
- erts_free(ERTS_ALC_T_TEMP_TERM,local_heap);
-#endif
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return *tracer_pid == NIL ? 0 : return_flags;
+ /*
+ * If there is a PAM program, run it. Return if it fails.
+ *
+ * Some precedence rules:
+ *
+ * - No proc flags, e.g 'silent' or 'return_to'
+ * has any effect on meta trace.
+ * - The 'silent' process trace flag silences all call
+ * related messages, e.g 'call', 'return_to' and 'return_from'.
+ * - The {message,_} PAM function does not affect {return_trace}.
+ * - The {message,false} PAM function shall give the same
+ * 'call' trace message as no PAM match.
+ * - The {message,true} PAM function shall give the same
+ * 'call' trace message as a nonexistent PAM program.
+ */
+ return_flags = 0;
+ if (match_spec) {
+ /* we have to make a copy of the tracer here as the match spec
+ may remove it, and we still want to generate a trace message */
+ erts_tracer_update(&pre_ms_tracer, *tracer);
+ tracer = &pre_ms_tracer;
+ pam_result = erts_match_set_run_trace(p, p,
+ match_spec, args, arity,
+ ERTS_PAM_TMP_RESULT, &return_flags);
+ }
+
+ if (tracee_flags == &meta_flags) {
+ /* Meta trace */
+ if (pam_result == am_false) {
+ UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
+ ERTS_TRACER_CLEAR(&pre_ms_tracer);
+ return return_flags;
+ }
} else {
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- Process *tracer;
- ERTS_TRACER_REF_TYPE tracer_ref;
-#ifdef ERTS_SMP
- Eterm tpid;
-#endif
- unsigned size;
- unsigned sizes[MAX_ARG];
- unsigned pam_result_size = 0;
- int invalid_tracer;
-#ifdef DEBUG
- Eterm* limit;
-#endif
-
- ASSERT(is_internal_pid(*tracer_pid));
-
- tracer = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
- *tracer_pid, ERTS_PROC_LOCK_STATUS);
- if (!tracer)
- invalid_tracer = 1;
- else {
- invalid_tracer = !(ERTS_TRACE_FLAGS(tracer) & F_TRACER);
- erts_smp_proc_unlock(tracer, ERTS_PROC_LOCK_STATUS);
- }
+ /* Non-meta trace */
+ if (*tracee_flags & F_TRACE_SILENT) {
+ erts_match_set_release_result_trace(p, pam_result);
+ UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
+ ERTS_TRACER_CLEAR(&pre_ms_tracer);
+ return 0;
+ }
+ if (pam_result == am_false) {
+ UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
+ ERTS_TRACER_CLEAR(&pre_ms_tracer);
+ return return_flags;
+ }
+ if (local && (*tracee_flags & F_TRACE_RETURN_TO)) {
+ return_flags |= MATCH_SET_RETURN_TO_TRACE;
+ }
+ }
+
+ ASSERT(!ERTS_TRACER_IS_NIL(*tracer));
- if (invalid_tracer) {
-#ifdef ERTS_SMP
- ASSERT(is_nil(tracee)
- || tracer_pid == &ERTS_TRACER_PROC(p));
- if (is_not_nil(tracee))
- erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
-#endif
- *tracee_flags &= ~TRACEE_FLAGS;
- *tracer_pid = NIL;
-#ifdef ERTS_SMP
- if (is_not_nil(tracee))
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
-#endif
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return 0;
- }
-
-#ifdef ERTS_SMP
- tpid = *tracer_pid; /* Need to save tracer pid,
- since *tracer_pid might
- be reset by erts_match_set_run() */
- tracer_ref = tpid;
-#else
- tracer_ref = tracer;
-#endif
-
- /*
- * If there is a PAM program, run it. Return if it fails.
- *
- * See the rules above in the port trace code.
- */
-
- /* BEGIN this code should be the same for port and pid trace */
- return_flags = 0;
- if (match_spec) {
- pam_result = erts_match_set_run(p, match_spec, args, arity,
- ERTS_PAM_TMP_RESULT, &return_flags);
- if (is_non_value(pam_result)) {
- erts_match_set_release_result(p);
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return 0;
- }
- }
- if (tracee_flags == &meta_flags) {
- /* Meta trace */
- if (pam_result == am_false) {
- erts_match_set_release_result(p);
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return return_flags;
- }
- } else {
- /* Non-meta trace */
- if (*tracee_flags & F_TRACE_SILENT) {
- erts_match_set_release_result(p);
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return 0;
- }
- if (pam_result == am_false) {
- erts_match_set_release_result(p);
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return return_flags;
- }
- if (local && (*tracee_flags & F_TRACE_RETURN_TO)) {
- return_flags |= MATCH_SET_RETURN_TO_TRACE;
- }
- }
- /* END this code should be the same for port and pid trace */
-
- /*
- * Calculate number of words needed on heap.
- */
-
- size = 4 + 5; /* Trace tuple + MFA tuple. */
- if (! (*tracee_flags & F_TRACE_ARITY_ONLY)) {
- size += 2*arity;
- for (i = arity-1; i >= 0; i--) {
- sizes[i] = size_object(args[i]);
- size += sizes[i];
- }
- }
- if (*tracee_flags & F_TIMESTAMP) {
- size += 1 + 4;
- /* One element in trace tuple + timestamp tuple. */
- }
- if (pam_result != am_true) {
- pam_result_size = size_object(pam_result);
- size += 1 + pam_result_size;
- /* One element in trace tuple + term size. */
- }
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
-#ifdef DEBUG
- limit = hp + size;
-#endif
-
- /*
- * Build the the {M,F,A} tuple in the message buffer.
- * (A is arguments or arity.)
- */
-
- if (*tracee_flags & F_TRACE_ARITY_ONLY) {
- mfa_tuple = make_small(arity);
- } else {
- mfa_tuple = NIL;
- for (i = arity-1; i >= 0; i--) {
- Eterm term = copy_struct(args[i], sizes[i], &hp, off_heap);
- mfa_tuple = CONS(hp, term, mfa_tuple);
- hp += 2;
- }
- }
- mfa_tuple = TUPLE3(hp, (Eterm) mfa[0], (Eterm) mfa[1], mfa_tuple);
- hp += 4;
-
- /*
- * Copy the PAM result (if any) onto the heap.
- */
-
- if (pam_result != am_true) {
- pam_result = copy_struct(pam_result, pam_result_size, &hp, off_heap);
- }
+ /*
+ * Build the the {M,F,A} tuple in the local heap.
+ * (A is arguments or arity.)
+ */
- erts_match_set_release_result(p);
- /*
- * Build the trace tuple and enqueue it.
- */
-
- mess = TUPLE4(hp, am_trace, p->common.id, am_call, mfa_tuple);
- hp += 5;
- if (pam_result != am_true) {
- hp[-5] = make_arityval(5);
- *hp++ = pam_result;
- }
-
- erts_smp_mtx_lock(&smq_mtx);
+ if (*tracee_flags & F_TRACE_ARITY_ONLY) {
+ hp = HAlloc(p, 4);
+ mfa_tuple = make_small(arity);
+ } else {
+ hp = HAlloc(p, 4 + arity * 2);
+ mfa_tuple = NIL;
+ for (i = arity-1; i >= 0; i--) {
+ mfa_tuple = CONS(hp, args[i], mfa_tuple);
+ hp += 2;
+ }
+ }
+ mfa_tuple = TUPLE3(hp, info->mfa.module, info->mfa.function, mfa_tuple);
+ hp += 4;
- if (*tracee_flags & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
+ /*
+ * Build the trace tuple and send it to the port.
+ */
+ send_to_tracer_nif_raw(p, NULL, *tracer, *tracee_flags, p->common.id,
+ tnif, TRACE_FUN_T_CALL, am_call, mfa_tuple,
+ THE_NON_VALUE, pam_result);
- ASSERT(hp == limit);
- ERTS_ENQ_TRACE_MSG(tracee, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return return_flags;
+ if (match_spec) {
+ erts_match_set_release_result_trace(p, pam_result);
+ if (tracer == &pre_ms_tracer)
+ ERTS_TRACER_CLEAR(&pre_ms_tracer);
}
+
+ return return_flags;
}
/* Sends trace message:
@@ -2003,73 +1281,14 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
* 't_p' is the traced process.
*/
void
-trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
+trace_proc(Process *c_p, ErtsProcLocks c_p_locks,
+ Process *t_p, Eterm what, Eterm data)
{
- Eterm mess;
- Eterm* hp;
- int need;
-
- ERTS_SMP_LC_ASSERT((erts_proc_lc_my_proc_locks(t_p) != 0)
- || erts_thr_progress_is_blocking());
- if (is_internal_port(ERTS_TRACER_PROC(t_p))) {
-#define LOCAL_HEAP_SIZE (5+5)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
-
- hp = local_heap;
- mess = TUPLE4(hp, am_trace, t_p->common.id, what, data);
- hp += 5;
- erts_smp_mtx_lock(&smq_mtx);
- if (ERTS_TRACE_FLAGS(t_p) & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
- send_to_port(
-#ifndef ERTS_SMP
- /* No fake schedule out and in again after an exit */
- what == am_exit ? NULL : c_p,
-#else
- /* Fake schedule out and in are never sent when smp enabled */
- c_p,
-#endif
- mess,
- &ERTS_TRACER_PROC(t_p),
- &ERTS_TRACE_FLAGS(t_p));
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
- Eterm tmp;
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref;
- size_t sz_data;
-
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(t_p)));
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(t_p),
- ERTS_TRACE_FLAGS(t_p));
-
- sz_data = size_object(data);
-
- need = sz_data + 5 + TS_SIZE(t_p);
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(need, &bp, &off_heap, tracer_ref);
-
- tmp = copy_struct(data, sz_data, &hp, off_heap);
- mess = TUPLE4(hp, am_trace, t_p->common.id, what, tmp);
- hp += 5;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- if (ERTS_TRACE_FLAGS(t_p) & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
-
- ERTS_ENQ_TRACE_MSG(t_p->common.id, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
- }
+ ErtsTracerNif *tnif = NULL;
+ if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif,
+ TRACE_FUN_E_PROCS, what))
+ send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_PROCS,
+ what, data, THE_NON_VALUE, am_true);
}
@@ -2081,81 +1300,37 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
* and 'args' may be a deep term.
*/
void
-trace_proc_spawn(Process *p, Eterm pid,
+trace_proc_spawn(Process *p, Eterm what, Eterm pid,
Eterm mod, Eterm func, Eterm args)
{
- Eterm mfa;
- Eterm mess;
- Eterm* hp;
-
- if (is_internal_port(ERTS_TRACER_PROC(p))) {
-#define LOCAL_HEAP_SIZE (4+6+5)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- hp = local_heap;
- mfa = TUPLE3(hp, mod, func, args);
- hp += 4;
- mess = TUPLE5(hp, am_trace, p->common.id, am_spawn, pid, mfa);
- hp += 6;
- erts_smp_mtx_lock(&smq_mtx);
- if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
- send_to_port(p, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
- Eterm tmp;
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref;
- size_t sz_args, sz_pid;
- Uint need;
-
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
+ ErtsTracerNif *tnif = NULL;
+ if (is_tracer_enabled(NULL, 0,
+ &p->common, &tnif, TRACE_FUN_E_PROCS, what)) {
+ Eterm mfa;
+ Eterm* hp;
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(p),
- ERTS_TRACE_FLAGS(p));
-
- sz_args = size_object(args);
- sz_pid = size_object(pid);
- need = sz_args + 4 + 6 + TS_SIZE(p);
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(need, &bp, &off_heap, tracer_ref);
-
- tmp = copy_struct(args, sz_args, &hp, off_heap);
- mfa = TUPLE3(hp, mod, func, tmp);
- hp += 4;
- tmp = copy_struct(pid, sz_pid, &hp, off_heap);
- mess = TUPLE5(hp, am_trace, p->common.id, am_spawn, tmp, mfa);
- hp += 6;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
-
- ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
+ hp = HAlloc(p, 4);
+ mfa = TUPLE3(hp, mod, func, args);
+ hp += 4;
+ send_to_tracer_nif(NULL, &p->common, p->common.id, tnif, TRACE_FUN_T_PROCS,
+ what, pid, mfa, am_true);
}
}
void save_calls(Process *p, Export *e)
{
- struct saved_calls *scb = ERTS_PROC_GET_SAVED_CALLS_BUF(p);
- if (scb) {
- Export **ct = &scb->ct[0];
- int len = scb->len;
-
- ct[scb->cur] = e;
- if (++scb->cur >= len)
- scb->cur = 0;
- if (scb->n < len)
- scb->n++;
+ if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0) {
+ struct saved_calls *scb = ERTS_PROC_GET_SAVED_CALLS_BUF(p);
+ if (scb) {
+ Export **ct = &scb->ct[0];
+ int len = scb->len;
+
+ ct[scb->cur] = e;
+ if (++scb->cur >= len)
+ scb->cur = 0;
+ if (scb->n < len)
+ scb->n++;
+ }
}
}
@@ -2172,141 +1347,45 @@ void save_calls(Process *p, Export *e)
* are all small (atomic) integers.
*/
void
-trace_gc(Process *p, Eterm what)
+trace_gc(Process *p, Eterm what, Uint size, Eterm msg)
{
- ERTS_DECL_AM(bin_vheap_size);
- ERTS_DECL_AM(bin_vheap_block_size);
- ERTS_DECL_AM(bin_old_vheap_size);
- ERTS_DECL_AM(bin_old_vheap_block_size);
-
- ErlHeapFragment *bp = NULL;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref = ERTS_NULL_TRACER_REF; /* Initialized
- to eliminate
- compiler
- warning */
+ ErtsTracerNif *tnif = NULL;
+ Eterm* o_hp = NULL;
Eterm* hp;
- Eterm msg = NIL;
- Uint size;
-
- Eterm tags[] = {
- am_old_heap_block_size,
- am_heap_block_size,
- am_mbuf_size,
- am_recent_size,
- am_stack_size,
- am_old_heap_size,
- am_heap_size,
- AM_bin_vheap_size,
- AM_bin_vheap_block_size,
- AM_bin_old_vheap_size,
- AM_bin_old_vheap_block_size
- };
-
- UWord values[] = {
- OLD_HEAP(p) ? OLD_HEND(p) - OLD_HEAP(p) : 0,
- HEAP_SIZE(p),
- MBUF_SIZE(p),
- HIGH_WATER(p) - HEAP_START(p),
- STACK_START(p) - p->stop,
- OLD_HEAP(p) ? OLD_HTOP(p) - OLD_HEAP(p) : 0,
- HEAP_TOP(p) - HEAP_START(p),
- MSO(p).overhead,
- BIN_VHEAP_SZ(p),
- BIN_OLD_VHEAP(p),
- BIN_OLD_VHEAP_SZ(p)
- };
-#define LOCAL_HEAP_SIZE \
- (sizeof(values)/sizeof(*values)) * \
- (2/*cons*/ + 3/*2-tuple*/ + BIG_UINT_HEAP_SIZE) + \
- 5/*4-tuple */ + TS_HEAP_WORDS
- DeclareTmpHeap(local_heap,LOCAL_HEAP_SIZE,p);
-#ifdef DEBUG
- Eterm* limit;
-#endif
-
- ASSERT(sizeof(values)/sizeof(*values) == sizeof(tags)/sizeof(Eterm));
-
- UseTmpHeap(LOCAL_HEAP_SIZE,p);
-
- if (is_internal_port(ERTS_TRACER_PROC(p))) {
- hp = local_heap;
-#ifdef DEBUG
- size = 0;
- (void) erts_bld_atom_uword_2tup_list(NULL,
- &size,
- sizeof(values)/sizeof(*values),
- tags,
- values);
- size += 5/*4-tuple*/ + TS_SIZE(p);
-#endif
- } else {
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(p),
- ERTS_TRACE_FLAGS(p));
-
- size = 0;
- (void) erts_bld_atom_uword_2tup_list(NULL,
- &size,
- sizeof(values)/sizeof(*values),
- tags,
- values);
- size += 5/*4-tuple*/ + TS_SIZE(p);
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
- }
+ Uint sz = 0;
+ Eterm tup;
-#ifdef DEBUG
- limit = hp + size;
- ASSERT(size <= LOCAL_HEAP_SIZE);
-#endif
+ if (is_tracer_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common, &tnif,
+ TRACE_FUN_E_GC, what)) {
- msg = erts_bld_atom_uword_2tup_list(&hp,
- NULL,
- sizeof(values)/sizeof(*values),
- tags,
- values);
+ if (is_non_value(msg)) {
- msg = TUPLE4(hp, am_trace, p->common.id, what, msg);
- hp += 5;
+ (void) erts_process_gc_info(p, &sz, NULL, 0, 0);
+ o_hp = hp = erts_alloc(ERTS_ALC_T_TMP, (sz + 3 + 2) * sizeof(Eterm));
- erts_smp_mtx_lock(&smq_mtx);
+ msg = erts_process_gc_info(p, NULL, &hp, 0, 0);
+ tup = TUPLE2(hp, am_wordsize, make_small(size)); hp += 3;
+ msg = CONS(hp, tup, msg); hp += 2;
+ }
- if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
- hp = patch_ts(msg, hp);
+ send_to_tracer_nif(p, &p->common, p->common.id, tnif, TRACE_FUN_T_GC,
+ what, msg, THE_NON_VALUE, am_true);
+ if (o_hp)
+ erts_free(ERTS_ALC_T_TMP, o_hp);
}
- ASSERT(hp == limit);
- if (is_internal_port(ERTS_TRACER_PROC(p)))
- send_to_port(p, msg, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
- else
- ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, msg, bp);
- erts_smp_mtx_unlock(&smq_mtx);
- UnUseTmpHeap(LOCAL_HEAP_SIZE,p);
-#undef LOCAL_HEAP_SIZE
}
void
-monitor_long_schedule_proc(Process *p, BeamInstr *in_fp, BeamInstr *out_fp, Uint time)
+monitor_long_schedule_proc(Process *p, ErtsCodeMFA *in_fp,
+ ErtsCodeMFA *out_fp, Uint time)
{
ErlHeapFragment *bp;
ErlOffHeap *off_heap;
-#ifndef ERTS_SMP
- Process *monitor_p;
-#endif
Uint hsz;
Eterm *hp, list, in_mfa = am_undefined, out_mfa = am_undefined;
Eterm in_tpl, out_tpl, tmo_tpl, tmo, msg;
-#ifndef ERTS_SMP
- ASSERT(is_internal_pid(system_monitor));
- monitor_p = erts_proc_lookup(system_monitor);
- if (!monitor_p || p == monitor_p) {
- return;
- }
-#endif
/*
* Size: {monitor, pid, long_schedule, [{timeout, T}, {in, {M,F,A}},{out,{M,F,A}}]} ->
* 5 (top tuple of 4), (3 (elements) * 2 (cons)) + 3 (timeout tuple of 2) + size of Timeout +
@@ -2319,11 +1398,13 @@ monitor_long_schedule_proc(Process *p, BeamInstr *in_fp, BeamInstr *out_fp, Uint
hp = ERTS_ALLOC_SYSMSG_HEAP(hsz, &bp, &off_heap, monitor_p);
tmo = erts_bld_uint(&hp, NULL, time);
if (in_fp != NULL) {
- in_mfa = TUPLE3(hp,(Eterm) in_fp[0], (Eterm) in_fp[1], make_small(in_fp[2]));
+ in_mfa = TUPLE3(hp, in_fp->module, in_fp->function,
+ make_small(in_fp->arity));
hp +=4;
}
if (out_fp != NULL) {
- out_mfa = TUPLE3(hp,(Eterm) out_fp[0], (Eterm) out_fp[1], make_small(out_fp[2]));
+ out_mfa = TUPLE3(hp, out_fp->module, out_fp->function,
+ make_small(out_fp->arity));
hp +=4;
}
tmo_tpl = TUPLE2(hp,am_timeout, tmo);
@@ -2340,36 +1421,18 @@ monitor_long_schedule_proc(Process *p, BeamInstr *in_fp, BeamInstr *out_fp, Uint
hp += 2;
msg = TUPLE4(hp, am_monitor, p->common.id, am_long_schedule, list);
hp += 5;
-#ifdef ERTS_SMP
enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp);
-#else
- erts_queue_message(monitor_p, NULL, bp, msg, NIL
-#ifdef USE_VM_PROBES
- , NIL
-#endif
- );
-#endif
}
void
monitor_long_schedule_port(Port *pp, ErtsPortTaskType type, Uint time)
{
ErlHeapFragment *bp;
ErlOffHeap *off_heap;
-#ifndef ERTS_SMP
- Process *monitor_p;
-#endif
Uint hsz;
Eterm *hp, list, op;
Eterm op_tpl, tmo_tpl, tmo, msg;
-#ifndef ERTS_SMP
- ASSERT(is_internal_pid(system_monitor));
- monitor_p = erts_proc_lookup(system_monitor);
- if (!monitor_p) {
- return;
- }
-#endif
/*
* Size: {monitor, port, long_schedule, [{timeout, T}, {op, Operation}]} ->
* 5 (top tuple of 4), (2 (elements) * 2 (cons)) + 3 (timeout tuple of 2)
@@ -2386,7 +1449,6 @@ monitor_long_schedule_port(Port *pp, ErtsPortTaskType type, Uint time)
case ERTS_PORT_TASK_TIMEOUT: op = am_timeout; break;
case ERTS_PORT_TASK_INPUT: op = am_input; break;
case ERTS_PORT_TASK_OUTPUT: op = am_output; break;
- case ERTS_PORT_TASK_EVENT: op = am_event; break;
case ERTS_PORT_TASK_DIST_CMD: op = am_dist_cmd; break;
default: op = am_undefined; break;
}
@@ -2405,24 +1467,13 @@ monitor_long_schedule_port(Port *pp, ErtsPortTaskType type, Uint time)
hp += 2;
msg = TUPLE4(hp, am_monitor, pp->common.id, am_long_schedule, list);
hp += 5;
-#ifdef ERTS_SMP
enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, pp->common.id, NIL, msg, bp);
-#else
- erts_queue_message(monitor_p, NULL, bp, msg, NIL
-#ifdef USE_VM_PROBES
- , NIL
-#endif
- );
-#endif
}
void
monitor_long_gc(Process *p, Uint time) {
ErlHeapFragment *bp;
ErlOffHeap *off_heap;
-#ifndef ERTS_SMP
- Process *monitor_p;
-#endif
Uint hsz;
Eterm *hp, list, msg;
Eterm tags[] = {
@@ -2447,12 +1498,6 @@ monitor_long_gc(Process *p, Uint time) {
Eterm *hp_end;
#endif
-#ifndef ERTS_SMP
- ASSERT(is_internal_pid(system_monitor));
- monitor_p = erts_proc_lookup(system_monitor);
- if (!monitor_p || p == monitor_p)
- return;
-#endif
hsz = 0;
(void) erts_bld_atom_uword_2tup_list(NULL,
@@ -2480,24 +1525,13 @@ monitor_long_gc(Process *p, Uint time) {
ASSERT(hp == hp_end);
#endif
-#ifdef ERTS_SMP
enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp);
-#else
- erts_queue_message(monitor_p, NULL, bp, msg, NIL
-#ifdef USE_VM_PROBES
- , NIL
-#endif
- );
-#endif
}
void
monitor_large_heap(Process *p) {
ErlHeapFragment *bp;
ErlOffHeap *off_heap;
-#ifndef ERTS_SMP
- Process *monitor_p;
-#endif
Uint hsz;
Eterm *hp, list, msg;
Eterm tags[] = {
@@ -2521,13 +1555,6 @@ monitor_large_heap(Process *p) {
#endif
-#ifndef ERTS_SMP
- ASSERT(is_internal_pid(system_monitor));
- monitor_p = erts_proc_lookup(system_monitor);
- if (monitor_p || p == monitor_p) {
- return;
- }
-#endif
hsz = 0;
(void) erts_bld_atom_uword_2tup_list(NULL,
@@ -2555,47 +1582,22 @@ monitor_large_heap(Process *p) {
ASSERT(hp == hp_end);
#endif
-#ifdef ERTS_SMP
enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp);
-#else
- erts_queue_message(monitor_p, NULL, bp, msg, NIL
-#ifdef USE_VM_PROBES
- , NIL
-#endif
- );
-#endif
}
void
monitor_generic(Process *p, Eterm type, Eterm spec) {
ErlHeapFragment *bp;
ErlOffHeap *off_heap;
-#ifndef ERTS_SMP
- Process *monitor_p;
-#endif
Eterm *hp, msg;
-#ifndef ERTS_SMP
- ASSERT(is_internal_pid(system_monitor));
- monitor_p = erts_proc_lookup(system_monitor);
- if (!monitor_p || p == monitor_p)
- return;
-#endif
hp = ERTS_ALLOC_SYSMSG_HEAP(5, &bp, &off_heap, monitor_p);
msg = TUPLE4(hp, am_monitor, p->common.id, type, spec);
hp += 5;
-#ifdef ERTS_SMP
enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp);
-#else
- erts_queue_message(monitor_p, NULL, bp, msg, NIL
-#ifdef USE_VM_PROBES
- , NIL
-#endif
- );
-#endif
}
@@ -2605,25 +1607,17 @@ monitor_generic(Process *p, Eterm type, Eterm spec) {
void
profile_scheduler(Eterm scheduler_id, Eterm state) {
- Eterm *hp, msg, timestamp;
- Uint Ms, s, us;
+ Eterm *hp, msg;
+ ErlHeapFragment *bp = NULL;
-#ifndef ERTS_SMP
-#define LOCAL_HEAP_SIZE (4 + 7)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
- hp = local_heap;
-#else
- ErlHeapFragment *bp;
Uint hsz;
- hsz = 4 + 7;
+ hsz = 7 + patch_ts_size(erts_system_profile_ts_type)-1;
bp = new_message_buffer(hsz);
hp = bp->mem;
-#endif
- erts_smp_mtx_lock(&smq_mtx);
+ erts_mtx_lock(&smq_mtx);
switch (state) {
case am_active:
@@ -2637,126 +1631,28 @@ profile_scheduler(Eterm scheduler_id, Eterm state) {
break;
}
- GET_NOW(&Ms, &s, &us);
- timestamp = TUPLE3(hp, make_small(Ms), make_small(s), make_small(us)); hp += 4;
- msg = TUPLE6(hp, am_profile, am_scheduler, scheduler_id, state,
- make_small(active_sched), timestamp); hp += 7;
+ msg = TUPLE6(hp, am_profile, am_scheduler, scheduler_id,
+ state, make_small(active_sched),
+ NIL /* Will be overwritten by timestamp */);
+ hp += 7;
-#ifndef ERTS_SMP
- profile_send(NIL, msg);
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
-#else
- enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, NIL, NIL, msg, bp);
-#endif
- erts_smp_mtx_unlock(&smq_mtx);
-
-}
-
-void
-profile_scheduler_q(Eterm scheduler_id, Eterm state, Eterm no_schedulers, Uint Ms, Uint s, Uint us) {
- Eterm *hp, msg, timestamp;
-
-#ifndef ERTS_SMP
-#define LOCAL_HEAP_SIZE (4 + 7)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- hp = local_heap;
-#else
- ErlHeapFragment *bp;
- Uint hsz;
+ /* Write timestamp in element 6 of the 'msg' tuple */
+ hp[-1] = write_ts(erts_system_profile_ts_type, hp, bp, NULL);
- hsz = 4 + 7;
-
- bp = new_message_buffer(hsz);
- hp = bp->mem;
-#endif
-
- erts_smp_mtx_lock(&smq_mtx);
-
- timestamp = TUPLE3(hp, make_small(Ms), make_small(s), make_small(us)); hp += 4;
- msg = TUPLE6(hp, am_profile, am_scheduler, scheduler_id, state, no_schedulers, timestamp); hp += 7;
-#ifndef ERTS_SMP
- profile_send(NIL, msg);
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
-#else
enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, NIL, NIL, msg, bp);
-#endif
- erts_smp_mtx_unlock(&smq_mtx);
-
-}
-
-
-/* Send {trace_ts, Pid, What, {Mod, Func, Arity}, Timestamp}
- * or {trace, Pid, What, {Mod, Func, Arity}}
- *
- * where 'What' is supposed to be 'in' or 'out'.
- *
- * Virtual scheduling do not fake scheduling for ports.
- */
-
+ erts_mtx_unlock(&smq_mtx);
-void trace_virtual_sched(Process *p, Eterm what)
-{
- trace_sched_aux(p, what, 1);
}
/* Port profiling */
void
trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) {
- Eterm mess;
- Eterm* hp;
-
- if (is_internal_port(ERTS_TRACER_PROC(p))) {
-#define LOCAL_HEAP_SIZE (5+6)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- hp = local_heap;
-
- mess = TUPLE5(hp, am_trace, calling_pid, am_open, p->common.id, drv_name);
- hp += 6;
- erts_smp_mtx_lock(&smq_mtx);
- if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
- /* No fake schedule */
- send_to_port(NULL, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- size_t sz_data;
- ERTS_TRACER_REF_TYPE tracer_ref;
-
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
-
- sz_data = 6 + TS_SIZE(p);
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(p),
- ERTS_TRACE_FLAGS(p));
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(sz_data, &bp, &off_heap, tracer_ref);
-
- mess = TUPLE5(hp, am_trace, calling_pid, am_open, p->common.id, drv_name);
- hp += 6;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
-
- ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
- }
-
+ ErtsTracerNif *tnif = NULL;
+ ERTS_CHK_NO_PROC_LOCKS;
+ if (is_tracer_enabled(NULL, 0, &p->common, &tnif, TRACE_FUN_E_PORTS, am_open))
+ send_to_tracer_nif(NULL, &p->common, p->common.id, tnif, TRACE_FUN_T_PORTS,
+ am_open, calling_pid, drv_name, am_true);
}
/* Sends trace message:
@@ -2768,56 +1664,210 @@ trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) {
*/
void
trace_port(Port *t_p, Eterm what, Eterm data) {
- Eterm mess;
- Eterm* hp;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p)
+ ErtsTracerNif *tnif = NULL;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p)
|| erts_thr_progress_is_blocking());
+ ERTS_CHK_NO_PROC_LOCKS;
+ if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_PORTS, what))
+ send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_PORTS,
+ what, data, THE_NON_VALUE, am_true);
+}
- if (is_internal_port(ERTS_TRACER_PROC(t_p))) {
-#define LOCAL_HEAP_SIZE (5+5)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- hp = local_heap;
- mess = TUPLE4(hp, am_trace, t_p->common.id, what, data);
- hp += 5;
- erts_smp_mtx_lock(&smq_mtx);
- if (ERTS_TRACE_FLAGS(t_p) & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
- /* No fake schedule */
- send_to_port(NULL,mess,&ERTS_TRACER_PROC(t_p),&ERTS_TRACE_FLAGS(t_p));
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
+
+static Eterm
+trace_port_tmp_binary(char *bin, Sint sz, Binary **bptrp, Eterm **hp)
+{
+ if (sz <= ERL_ONHEAP_BIN_LIMIT) {
+ ErlHeapBin *hb = (ErlHeapBin *)*hp;
+ hb->thing_word = header_heap_bin(sz);
+ hb->size = sz;
+ sys_memcpy(hb->data, bin, sz);
+ *hp += heap_bin_size(sz);
+ return make_binary(hb);
} else {
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- size_t sz_data;
- ERTS_TRACER_REF_TYPE tracer_ref;
+ ProcBin* pb = (ProcBin *)*hp;
+ Binary *bptr = erts_bin_nrml_alloc(sz);
+ sys_memcpy(bptr->orig_bytes, bin, sz);
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = sz;
+ pb->next = NULL;
+ pb->val = bptr;
+ pb->bytes = (byte*) bptr->orig_bytes;
+ pb->flags = 0;
+ *bptrp = bptr;
+ *hp += PROC_BIN_SIZE;
+ return make_binary(pb);
+ }
+}
+
+/* Sends trace message:
+ * {trace, PortPid, 'receive', {pid(), {command, iolist()}}}
+ * {trace, PortPid, 'receive', {pid(), {control, pid()}}}
+ * {trace, PortPid, 'receive', {pid(), exit}}
+ *
+ */
+void
+trace_port_receive(Port *t_p, Eterm caller, Eterm what, ...)
+{
+ ErtsTracerNif *tnif = NULL;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p)
+ || erts_thr_progress_is_blocking());
+ ERTS_CHK_NO_PROC_LOCKS;
+ if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_RECEIVE, am_receive)) {
+ /* We can use a stack heap here, as the nif is called in the
+ context of a port */
+#define LOCAL_HEAP_SIZE (3 + 3 + heap_bin_size(ERL_ONHEAP_BIN_LIMIT) + 3)
+ DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
+
+ Eterm *hp, data, *orig_hp = NULL;
+ Binary *bptr = NULL;
+ va_list args;
+ UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+ hp = local_heap;
+
+ if (what == am_close) {
+ data = what;
+ } else {
+ Eterm arg;
+ va_start(args, what);
+ if (what == am_command) {
+ char *bin = va_arg(args, char *);
+ Sint sz = va_arg(args, Sint);
+ va_end(args);
+ arg = trace_port_tmp_binary(bin, sz, &bptr, &hp);
+ } else if (what == am_call || what == am_control) {
+ unsigned int command = va_arg(args, unsigned int);
+ char *bin = va_arg(args, char *);
+ Sint sz = va_arg(args, Sint);
+ Eterm cmd;
+ va_end(args);
+ arg = trace_port_tmp_binary(bin, sz, &bptr, &hp);
+#if defined(ARCH_32)
+ if (!IS_USMALL(0, command)) {
+ *hp = make_pos_bignum_header(1);
+ BIG_DIGIT(hp, 0) = (Uint)command;
+ cmd = make_big(hp);
+ hp += 2;
+ } else
+#endif
+ {
+ cmd = make_small((Sint)command);
+ }
+ arg = TUPLE2(hp, cmd, arg);
+ hp += 3;
+ } else if (what == am_commandv) {
+ ErlIOVec *evp = va_arg(args, ErlIOVec*);
+ int i;
+ va_end(args);
+ if ((6 + evp->vsize * (2+PROC_BIN_SIZE+ERL_SUB_BIN_SIZE)) > LOCAL_HEAP_SIZE) {
+ hp = erts_alloc(ERTS_ALC_T_TMP,
+ (6 + evp->vsize * (2+PROC_BIN_SIZE+ERL_SUB_BIN_SIZE)) * sizeof(Eterm));
+ orig_hp = hp;
+ }
+ arg = NIL;
+ /* Convert each element in the ErlIOVec to a sub bin that points
+ to a procbin. We don't have to increment the proc bin refc as
+ the port task keeps the reference alive. */
+ for (i = evp->vsize-1; i >= 0; i--) {
+ if (evp->iov[i].iov_len) {
+ ProcBin* pb = (ProcBin*)hp;
+ ErlSubBin *sb;
+ ASSERT(evp->binv[i]);
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->val = ErlDrvBinary2Binary(evp->binv[i]);
+ pb->size = pb->val->orig_size;
+ pb->next = NULL;
+ pb->bytes = (byte*) pb->val->orig_bytes;
+ pb->flags = 0;
+ hp += PROC_BIN_SIZE;
+
+ sb = (ErlSubBin*) hp;
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->size = evp->iov[i].iov_len;
+ sb->offs = (byte*)(evp->iov[i].iov_base) - pb->bytes;
+ sb->orig = make_binary(pb);
+ sb->bitoffs = 0;
+ sb->bitsize = 0;
+ sb->is_writable = 0;
+ hp += ERL_SUB_BIN_SIZE;
+
+ arg = CONS(hp, make_binary(sb), arg);
+ hp += 2;
+ }
+ }
+ what = am_command;
+ } else {
+ arg = va_arg(args, Eterm);
+ va_end(args);
+ }
+ data = TUPLE2(hp, what, arg);
+ hp += 3;
+ }
+
+ data = TUPLE2(hp, caller, data);
+ hp += 3;
+ ASSERT(hp <= (local_heap + LOCAL_HEAP_SIZE) || orig_hp);
+ send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif,
+ TRACE_FUN_T_RECEIVE,
+ am_receive, data, THE_NON_VALUE, am_true);
+
+ if (bptr)
+ erts_bin_release(bptr);
+
+ if (orig_hp)
+ erts_free(ERTS_ALC_T_TMP, orig_hp);
+
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+ }
+#undef LOCAL_HEAP_SIZE
+}
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(t_p)));
+void
+trace_port_send(Port *t_p, Eterm receiver, Eterm msg, int exists)
+{
+ ErtsTracerNif *tnif = NULL;
+ Eterm op = exists ? am_send : am_send_to_non_existing_process;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p)
+ || erts_thr_progress_is_blocking());
+ ERTS_CHK_NO_PROC_LOCKS;
+ if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_SEND, op))
+ send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_SEND,
+ op, msg, receiver, am_true);
+}
- sz_data = 5 + TS_SIZE(t_p);
+void trace_port_send_binary(Port *t_p, Eterm to, Eterm what, char *bin, Sint sz)
+{
+ ErtsTracerNif *tnif = NULL;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p)
+ || erts_thr_progress_is_blocking());
+ ERTS_CHK_NO_PROC_LOCKS;
+ if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_SEND, am_send)) {
+ Eterm msg;
+ Binary* bptr = NULL;
+#define LOCAL_HEAP_SIZE (3 + 3 + heap_bin_size(ERL_ONHEAP_BIN_LIMIT))
+ DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(t_p),
- ERTS_TRACE_FLAGS(t_p));
+ Eterm *hp;
- hp = ERTS_ALLOC_SYSMSG_HEAP(sz_data, &bp, &off_heap, tracer_ref);
+ ERTS_CT_ASSERT(heap_bin_size(ERL_ONHEAP_BIN_LIMIT) >= PROC_BIN_SIZE);
+ UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+ hp = local_heap;
- mess = TUPLE4(hp, am_trace, t_p->common.id, what, data);
- hp += 5;
+ msg = trace_port_tmp_binary(bin, sz, &bptr, &hp);
- erts_smp_mtx_lock(&smq_mtx);
+ msg = TUPLE2(hp, what, msg);
+ hp += 3;
+ msg = TUPLE2(hp, t_p->common.id, msg);
+ hp += 3;
- if (ERTS_TRACE_FLAGS(t_p) & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
+ send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_SEND,
+ am_send, msg, to, am_true);
+ if (bptr)
+ erts_bin_release(bptr);
- ERTS_ENQ_TRACE_MSG(t_p->common.id, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+#undef LOCAL_HEAP_SIZE
}
}
@@ -2831,211 +1881,106 @@ trace_port(Port *t_p, Eterm what, Eterm data) {
void
trace_sched_ports(Port *p, Eterm what) {
- trace_sched_ports_where(p,what, make_small(0));
+ trace_sched_ports_where(p, what, make_small(0));
}
-void
-trace_sched_ports_where(Port *p, Eterm what, Eterm where) {
- Eterm mess;
- Eterm* hp;
- int ws = 5;
- Eterm sched_id = am_undefined;
-
- if (is_internal_port(ERTS_TRACER_PROC(p))) {
-#define LOCAL_HEAP_SIZE (5+6)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- hp = local_heap;
-
- if (IS_TRACED_FL(p, F_TRACE_SCHED_NO)) {
-#ifdef ERTS_SMP
- ErtsSchedulerData *esd = erts_get_scheduler_data();
- if (esd) sched_id = make_small(esd->no);
- else sched_id = am_undefined;
-#else
- sched_id = make_small(1);
-#endif
- mess = TUPLE5(hp, am_trace, p->common.id, what, sched_id, where);
- ws = 6;
- } else {
- mess = TUPLE4(hp, am_trace, p->common.id, what, where);
- ws = 5;
- }
- hp += ws;
-
- erts_smp_mtx_lock(&smq_mtx);
- if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
-
- /* No fake scheduling */
- send_to_port(NULL, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref;
-
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
-
- if (IS_TRACED_FL(p, F_TRACE_SCHED_NO)) ws = 6; /* Make place for scheduler id */
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(p),
- ERTS_TRACE_FLAGS(p));
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(ws+TS_SIZE(p), &bp, &off_heap, tracer_ref);
-
- if (IS_TRACED_FL(p, F_TRACE_SCHED_NO)) {
-#ifdef ERTS_SMP
- ErtsSchedulerData *esd = erts_get_scheduler_data();
- if (esd) sched_id = make_small(esd->no);
- else sched_id = am_undefined;
-#else
- sched_id = make_small(1);
-#endif
- mess = TUPLE5(hp, am_trace, p->common.id, what, sched_id, where);
- } else {
- mess = TUPLE4(hp, am_trace, p->common.id, what, where);
- }
- hp += ws;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
-
- ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
- }
+void
+trace_sched_ports_where(Port *t_p, Eterm what, Eterm where) {
+ ErtsTracerNif *tnif = NULL;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p)
+ || erts_thr_progress_is_blocking());
+ ERTS_CHK_NO_PROC_LOCKS;
+ if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_SCHED_PORT, what))
+ send_to_tracer_nif(NULL, &t_p->common, t_p->common.id,
+ tnif, TRACE_FUN_T_SCHED_PORT,
+ what, where, THE_NON_VALUE, am_true);
}
/* Port profiling */
void
profile_runnable_port(Port *p, Eterm status) {
- Uint Ms, s, us;
- Eterm *hp, msg, timestamp;
-
+ Eterm *hp, msg;
+ ErlHeapFragment *bp = NULL;
Eterm count = make_small(0);
-#ifndef ERTS_SMP
-#define LOCAL_HEAP_SIZE (4 + 6)
-
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- hp = local_heap;
-
-#else
- ErlHeapFragment *bp;
Uint hsz;
- hsz = 4 + 6;
+ hsz = 6 + patch_ts_size(erts_system_profile_ts_type)-1;
bp = new_message_buffer(hsz);
hp = bp->mem;
-#endif
- erts_smp_mtx_lock(&smq_mtx);
+ erts_mtx_lock(&smq_mtx);
- GET_NOW(&Ms, &s, &us);
- timestamp = TUPLE3(hp, make_small(Ms), make_small(s), make_small(us)); hp += 4;
- msg = TUPLE5(hp, am_profile, p->common.id, status, count, timestamp); hp += 6;
+ msg = TUPLE5(hp, am_profile, p->common.id, status, count,
+ NIL /* Will be overwritten by timestamp */);
+ hp += 6;
+
+ /* Write timestamp in element 5 of the 'msg' tuple */
+ hp[-1] = write_ts(erts_system_profile_ts_type, hp, bp, NULL);
-#ifndef ERTS_SMP
- profile_send(p->common.id, msg);
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
-#else
enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->common.id, NIL, msg, bp);
-#endif
- erts_smp_mtx_unlock(&smq_mtx);
+ erts_mtx_unlock(&smq_mtx);
}
/* Process profiling */
void
profile_runnable_proc(Process *p, Eterm status){
- Uint Ms, s, us;
- Eterm *hp, msg, timestamp;
+ Eterm *hp, msg;
Eterm where = am_undefined;
+ ErlHeapFragment *bp = NULL;
+ ErtsCodeMFA *cmfa = NULL;
-#ifndef ERTS_SMP
-#define LOCAL_HEAP_SIZE (4 + 6 + 4)
-
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+ ErtsThrPrgrDelayHandle dhndl;
+ Uint hsz = 4 + 6 + patch_ts_size(erts_system_profile_ts_type)-1;
+ /* Assumptions:
+ * We possibly don't have the MAIN_LOCK for the process p here.
+ * We assume that we can read from p->current and p->i atomically
+ */
+ dhndl = erts_thr_progress_unmanaged_delay(); /* suspend purge operations */
- hp = local_heap;
-#else
- ErlHeapFragment *bp;
- Uint hsz = 4 + 6 + 4;
-#endif
-
- if (!p->current) {
- p->current = find_function_from_pc(p->i);
+ if (!ERTS_PROC_IS_EXITING(p)) {
+ if (p->current) {
+ cmfa = p->current;
+ } else {
+ cmfa = find_function_from_pc(p->i);
+ }
}
-#ifdef ERTS_SMP
- if (!p->current) {
- hsz = 4 + 6;
+ if (!cmfa) {
+ hsz -= 4;
}
bp = new_message_buffer(hsz);
hp = bp->mem;
-#endif
- if (p->current) {
- where = TUPLE3(hp, p->current[0], p->current[1], make_small(p->current[2])); hp += 4;
+ if (cmfa) {
+ where = TUPLE3(hp, cmfa->module, cmfa->function,
+ make_small(cmfa->arity));
+ hp += 4;
} else {
where = make_small(0);
}
+
+ erts_thr_progress_unmanaged_continue(dhndl);
- erts_smp_mtx_lock(&smq_mtx);
+ erts_mtx_lock(&smq_mtx);
+
+ msg = TUPLE5(hp, am_profile, p->common.id, status, where,
+ NIL /* Will be overwritten by timestamp */);
+ hp += 6;
+
+ /* Write timestamp in element 5 of the 'msg' tuple */
+ hp[-1] = write_ts(erts_system_profile_ts_type, hp, bp, NULL);
- GET_NOW(&Ms, &s, &us);
- timestamp = TUPLE3(hp, make_small(Ms), make_small(s), make_small(us)); hp += 4;
- msg = TUPLE5(hp, am_profile, p->common.id, status, where, timestamp); hp += 6;
-#ifndef ERTS_SMP
- profile_send(p->common.id, msg);
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
-#else
enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->common.id, NIL, msg, bp);
-#endif
- erts_smp_mtx_unlock(&smq_mtx);
+ erts_mtx_unlock(&smq_mtx);
}
/* End system_profile tracing */
-#ifdef ERTS_SMP
-
-void
-erts_check_my_tracer_proc(Process *p)
-{
- if (is_internal_pid(ERTS_TRACER_PROC(p))) {
- Process *tracer = erts_pid2proc(p,
- ERTS_PROC_LOCK_MAIN,
- ERTS_TRACER_PROC(p),
- ERTS_PROC_LOCK_STATUS);
- int invalid_tracer = (!tracer
- || !(ERTS_TRACE_FLAGS(tracer) & F_TRACER));
- if (tracer)
- erts_smp_proc_unlock(tracer, ERTS_PROC_LOCK_STATUS);
- if (invalid_tracer) {
- erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- ERTS_TRACE_FLAGS(p) &= ~TRACEE_FLAGS;
- ERTS_TRACER_PROC(p) = NIL;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- }
-}
-
typedef struct ErtsSysMsgQ_ ErtsSysMsgQ;
struct ErtsSysMsgQ_ {
@@ -3081,7 +2026,7 @@ enqueue_sys_msg_unlocked(enum ErtsSysMsgType type,
sys_message_queue = smqp;
}
sys_message_queue_end = smqp;
- erts_smp_cnd_signal(&smq_cnd);
+ erts_cnd_signal(&smq_cnd);
}
static void
@@ -3091,9 +2036,9 @@ enqueue_sys_msg(enum ErtsSysMsgType type,
Eterm msg,
ErlHeapFragment *bp)
{
- erts_smp_mtx_lock(&smq_mtx);
+ erts_mtx_lock(&smq_mtx);
enqueue_sys_msg_unlocked(type, from, to, msg, bp);
- erts_smp_mtx_unlock(&smq_mtx);
+ erts_mtx_unlock(&smq_mtx);
}
void
@@ -3114,12 +2059,6 @@ static void
print_msg_type(ErtsSysMsgQ *smqp)
{
switch (smqp->type) {
- case SYS_MSG_TYPE_TRACE:
- erts_fprintf(stderr, "TRACE ");
- break;
- case SYS_MSG_TYPE_SEQTRACE:
- erts_fprintf(stderr, "SEQTRACE ");
- break;
case SYS_MSG_TYPE_SYSMON:
erts_fprintf(stderr, "SYSMON ");
break;
@@ -3130,8 +2069,8 @@ print_msg_type(ErtsSysMsgQ *smqp)
erts_fprintf(stderr, "ERRLGR ");
break;
case SYS_MSG_TYPE_PROC_MSG:
- erts_fprintf(stderr, "PROC_MSG ");
- break;
+ erts_fprintf(stderr, "PROC_MSG ");
+ break;
default:
erts_fprintf(stderr, "??? ");
break;
@@ -3143,17 +2082,6 @@ static void
sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver)
{
switch (smqp->type) {
- case SYS_MSG_TYPE_TRACE:
- /* Invalid tracer_proc's are removed when processes
- are scheduled in. */
- break;
- case SYS_MSG_TYPE_SEQTRACE:
- /* Reset seq_tracer if it hasn't changed */
- erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
- if (system_seq_tracer == receiver)
- system_seq_tracer = am_false;
- erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx);
- break;
case SYS_MSG_TYPE_SYSMON:
if (receiver == NIL
&& !erts_system_monitor_long_gc
@@ -3162,10 +2090,10 @@ sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver)
&& !erts_system_monitor_flags.busy_port
&& !erts_system_monitor_flags.busy_dist_port)
break; /* Everything is disabled */
- erts_smp_thr_progress_block();
+ erts_thr_progress_block();
if (system_monitor == receiver || receiver == NIL)
erts_system_monitor_clear(NULL);
- erts_smp_thr_progress_unblock();
+ erts_thr_progress_unblock();
break;
case SYS_MSG_TYPE_SYSPROF:
if (receiver == NIL
@@ -3175,11 +2103,11 @@ sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver)
&& !erts_system_profile_flags.scheduler)
break;
/* Block system to clear flags */
- erts_smp_thr_progress_block();
+ erts_thr_progress_block();
if (system_profile == receiver || receiver == NIL) {
erts_system_profile_clear(NULL);
}
- erts_smp_thr_progress_unblock();
+ erts_thr_progress_unblock();
break;
case SYS_MSG_TYPE_ERRLGR: {
char *no_elgger = "(no error logger present)";
@@ -3214,7 +2142,7 @@ sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver)
break;
}
case SYS_MSG_TYPE_PROC_MSG:
- break;
+ break;
default:
ASSERT(0);
}
@@ -3224,38 +2152,38 @@ static void
sys_msg_dispatcher_wakeup(void *vwait_p)
{
int *wait_p = (int *) vwait_p;
- erts_smp_mtx_lock(&smq_mtx);
+ erts_mtx_lock(&smq_mtx);
*wait_p = 0;
- erts_smp_cnd_signal(&smq_cnd);
- erts_smp_mtx_unlock(&smq_mtx);
+ erts_cnd_signal(&smq_cnd);
+ erts_mtx_unlock(&smq_mtx);
}
static void
sys_msg_dispatcher_prep_wait(void *vwait_p)
{
int *wait_p = (int *) vwait_p;
- erts_smp_mtx_lock(&smq_mtx);
+ erts_mtx_lock(&smq_mtx);
*wait_p = 1;
- erts_smp_mtx_unlock(&smq_mtx);
+ erts_mtx_unlock(&smq_mtx);
}
static void
sys_msg_dispatcher_fin_wait(void *vwait_p)
{
int *wait_p = (int *) vwait_p;
- erts_smp_mtx_lock(&smq_mtx);
+ erts_mtx_lock(&smq_mtx);
*wait_p = 0;
- erts_smp_mtx_unlock(&smq_mtx);
+ erts_mtx_unlock(&smq_mtx);
}
static void
sys_msg_dispatcher_wait(void *vwait_p)
{
int *wait_p = (int *) vwait_p;
- erts_smp_mtx_lock(&smq_mtx);
+ erts_mtx_lock(&smq_mtx);
while (*wait_p)
- erts_smp_cnd_wait(&smq_cnd, &smq_mtx);
- erts_smp_mtx_unlock(&smq_mtx);
+ erts_cnd_wait(&smq_cnd, &smq_mtx);
+ erts_mtx_unlock(&smq_mtx);
}
static void *
@@ -3281,9 +2209,9 @@ sys_msg_dispatcher_func(void *unused)
int end_wait = 0;
ErtsSysMsgQ *smqp;
- ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(!erts_thr_progress_is_blocking());
- erts_smp_mtx_lock(&smq_mtx);
+ erts_mtx_lock(&smq_mtx);
/* Free previously used queue ... */
while (local_sys_message_queue) {
@@ -3294,21 +2222,21 @@ sys_msg_dispatcher_func(void *unused)
/* Fetch current trace message queue ... */
if (!sys_message_queue) {
- erts_smp_mtx_unlock(&smq_mtx);
+ erts_mtx_unlock(&smq_mtx);
end_wait = 1;
erts_thr_progress_active(NULL, 0);
erts_thr_progress_prepare_wait(NULL);
- erts_smp_mtx_lock(&smq_mtx);
+ erts_mtx_lock(&smq_mtx);
}
while (!sys_message_queue)
- erts_smp_cnd_wait(&smq_cnd, &smq_mtx);
+ erts_cnd_wait(&smq_cnd, &smq_mtx);
local_sys_message_queue = sys_message_queue;
sys_message_queue = NULL;
sys_message_queue_end = NULL;
- erts_smp_mtx_unlock(&smq_mtx);
+ erts_mtx_unlock(&smq_mtx);
if (end_wait) {
erts_thr_progress_finalize_wait(NULL);
@@ -3328,19 +2256,13 @@ sys_msg_dispatcher_func(void *unused)
if (erts_thr_progress_update(NULL))
erts_thr_progress_leader_update(NULL);
- ERTS_SCHED_FAIR_YIELD();
-
#ifdef DEBUG_PRINTOUTS
print_msg_type(smqp);
#endif
switch (smqp->type) {
- case SYS_MSG_TYPE_TRACE:
- case SYS_MSG_TYPE_PROC_MSG:
- receiver = smqp->to;
- break;
- case SYS_MSG_TYPE_SEQTRACE:
- receiver = erts_get_system_seq_tracer();
- break;
+ case SYS_MSG_TYPE_PROC_MSG:
+ receiver = smqp->to;
+ break;
case SYS_MSG_TYPE_SYSMON:
receiver = erts_get_system_monitor();
if (smqp->from == receiver) {
@@ -3375,29 +2297,20 @@ sys_msg_dispatcher_func(void *unused)
if (is_internal_pid(receiver)) {
proc = erts_pid2proc(NULL, 0, receiver, proc_locks);
- if (!proc
- || (smqp->type == SYS_MSG_TYPE_TRACE
- && !(ERTS_TRACE_FLAGS(proc) & F_TRACER))) {
+ if (!proc) {
/* Bad tracer */
-#ifdef DEBUG_PRINTOUTS
- if (smqp->type == SYS_MSG_TYPE_TRACE && proc)
- erts_fprintf(stderr,
- "<tracer alive but missing "
- "F_TRACER flag> ");
-#endif
goto failure;
}
else {
+ ErtsMessage *mp;
queue_proc_msg:
- erts_queue_message(proc,&proc_locks,smqp->bp,smqp->msg,NIL
-#ifdef USE_VM_PROBES
- , NIL
-#endif
- );
+ mp = erts_alloc_message(0, NULL);
+ mp->data.heap_frag = smqp->bp;
+ erts_queue_message(proc,proc_locks,mp,smqp->msg,am_system);
#ifdef DEBUG_PRINTOUTS
erts_fprintf(stderr, "delivered\n");
#endif
- erts_smp_proc_unlock(proc, proc_locks);
+ erts_proc_unlock(proc, proc_locks);
}
}
else if (receiver == am_error_logger) {
@@ -3435,7 +2348,7 @@ sys_msg_dispatcher_func(void *unused)
sys_msg_disp_failure(smqp, receiver);
drop_sys_msg:
if (proc)
- erts_smp_proc_unlock(proc, proc_locks);
+ erts_proc_unlock(proc, proc_locks);
if (smqp->bp)
free_message_buffer(smqp->bp);
#ifdef DEBUG_PRINTOUTS
@@ -3455,16 +2368,10 @@ erts_foreach_sys_msg_in_q(void (*func)(Eterm,
ErlHeapFragment *))
{
ErtsSysMsgQ *sm;
- erts_smp_mtx_lock(&smq_mtx);
+ erts_mtx_lock(&smq_mtx);
for (sm = sys_message_queue; sm; sm = sm->next) {
Eterm to;
switch (sm->type) {
- case SYS_MSG_TYPE_TRACE:
- to = sm->to;
- break;
- case SYS_MSG_TYPE_SEQTRACE:
- to = erts_get_system_seq_tracer();
- break;
case SYS_MSG_TYPE_SYSMON:
to = erts_get_system_monitor();
break;
@@ -3480,32 +2387,648 @@ erts_foreach_sys_msg_in_q(void (*func)(Eterm,
}
(*func)(sm->from, to, sm->msg, sm->bp);
}
- erts_smp_mtx_unlock(&smq_mtx);
+ erts_mtx_unlock(&smq_mtx);
}
static void
init_sys_msg_dispatcher(void)
{
- erts_smp_thr_opts_t thr_opts = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
-#ifdef __OSE__
- thr_opts.coreNo = 0;
-#endif
+ erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER;
thr_opts.detached = 1;
+ thr_opts.name = "sys_msg_dispatcher";
init_smq_element_alloc();
sys_message_queue = NULL;
sys_message_queue_end = NULL;
- erts_smp_cnd_init(&smq_cnd);
- erts_smp_mtx_init(&smq_mtx, "sys_msg_q");
-
-#ifdef ETHR_HAVE_THREAD_NAMES
- thr_opts.name = "sys_msg_dispatcher";
-#endif
-
- erts_smp_thr_create(&sys_msg_dispatcher_tid,
+ erts_cnd_init(&smq_cnd);
+ erts_mtx_init(&smq_mtx, "sys_msg_q", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
+ erts_thr_create(&sys_msg_dispatcher_tid,
sys_msg_dispatcher_func,
NULL,
&thr_opts);
}
+
+#include "erl_nif.h"
+
+typedef struct {
+ char *name;
+ Uint arity;
+ ErlNifFunc *cb;
+} ErtsTracerType;
+
+struct ErtsTracerNif_ {
+ HashBucket hb;
+ Eterm module;
+ struct erl_module_nif* nif_mod;
+ ErtsTracerType tracers[NIF_TRACER_TYPES];
+};
+
+static void init_tracer_template(ErtsTracerNif *tnif) {
+
+ /* default tracer functions */
+ tnif->tracers[TRACE_FUN_DEFAULT].name = "trace";
+ tnif->tracers[TRACE_FUN_DEFAULT].arity = 5;
+ tnif->tracers[TRACE_FUN_DEFAULT].cb = NULL;
+
+ tnif->tracers[TRACE_FUN_ENABLED].name = "enabled";
+ tnif->tracers[TRACE_FUN_ENABLED].arity = 3;
+ tnif->tracers[TRACE_FUN_ENABLED].cb = NULL;
+
+ /* specific tracer functions */
+ tnif->tracers[TRACE_FUN_T_SEND].name = "trace_send";
+ tnif->tracers[TRACE_FUN_T_SEND].arity = 5;
+ tnif->tracers[TRACE_FUN_T_SEND].cb = NULL;
+
+ tnif->tracers[TRACE_FUN_T_RECEIVE].name = "trace_receive";
+ tnif->tracers[TRACE_FUN_T_RECEIVE].arity = 5;
+ tnif->tracers[TRACE_FUN_T_RECEIVE].cb = NULL;
+
+ tnif->tracers[TRACE_FUN_T_CALL].name = "trace_call";
+ tnif->tracers[TRACE_FUN_T_CALL].arity = 5;
+ tnif->tracers[TRACE_FUN_T_CALL].cb = NULL;
+
+ tnif->tracers[TRACE_FUN_T_SCHED_PROC].name = "trace_running_procs";
+ tnif->tracers[TRACE_FUN_T_SCHED_PROC].arity = 5;
+ tnif->tracers[TRACE_FUN_T_SCHED_PROC].cb = NULL;
+
+ tnif->tracers[TRACE_FUN_T_SCHED_PORT].name = "trace_running_ports";
+ tnif->tracers[TRACE_FUN_T_SCHED_PORT].arity = 5;
+ tnif->tracers[TRACE_FUN_T_SCHED_PORT].cb = NULL;
+
+ tnif->tracers[TRACE_FUN_T_GC].name = "trace_garbage_collection";
+ tnif->tracers[TRACE_FUN_T_GC].arity = 5;
+ tnif->tracers[TRACE_FUN_T_GC].cb = NULL;
+
+ tnif->tracers[TRACE_FUN_T_PROCS].name = "trace_procs";
+ tnif->tracers[TRACE_FUN_T_PROCS].arity = 5;
+ tnif->tracers[TRACE_FUN_T_PROCS].cb = NULL;
+
+ tnif->tracers[TRACE_FUN_T_PORTS].name = "trace_ports";
+ tnif->tracers[TRACE_FUN_T_PORTS].arity = 5;
+ tnif->tracers[TRACE_FUN_T_PORTS].cb = NULL;
+
+ /* specific enabled functions */
+ tnif->tracers[TRACE_FUN_E_SEND].name = "enabled_send";
+ tnif->tracers[TRACE_FUN_E_SEND].arity = 3;
+ tnif->tracers[TRACE_FUN_E_SEND].cb = NULL;
+
+ tnif->tracers[TRACE_FUN_E_RECEIVE].name = "enabled_receive";
+ tnif->tracers[TRACE_FUN_E_RECEIVE].arity = 3;
+ tnif->tracers[TRACE_FUN_E_RECEIVE].cb = NULL;
+
+ tnif->tracers[TRACE_FUN_E_CALL].name = "enabled_call";
+ tnif->tracers[TRACE_FUN_E_CALL].arity = 3;
+ tnif->tracers[TRACE_FUN_E_CALL].cb = NULL;
+
+ tnif->tracers[TRACE_FUN_E_SCHED_PROC].name = "enabled_running_procs";
+ tnif->tracers[TRACE_FUN_E_SCHED_PROC].arity = 3;
+ tnif->tracers[TRACE_FUN_E_SCHED_PROC].cb = NULL;
+
+ tnif->tracers[TRACE_FUN_E_SCHED_PORT].name = "enabled_running_ports";
+ tnif->tracers[TRACE_FUN_E_SCHED_PORT].arity = 3;
+ tnif->tracers[TRACE_FUN_E_SCHED_PORT].cb = NULL;
+
+ tnif->tracers[TRACE_FUN_E_GC].name = "enabled_garbage_collection";
+ tnif->tracers[TRACE_FUN_E_GC].arity = 3;
+ tnif->tracers[TRACE_FUN_E_GC].cb = NULL;
+
+ tnif->tracers[TRACE_FUN_E_PROCS].name = "enabled_procs";
+ tnif->tracers[TRACE_FUN_E_PROCS].arity = 3;
+ tnif->tracers[TRACE_FUN_E_PROCS].cb = NULL;
+
+ tnif->tracers[TRACE_FUN_E_PORTS].name = "enabled_ports";
+ tnif->tracers[TRACE_FUN_E_PORTS].arity = 3;
+ tnif->tracers[TRACE_FUN_E_PORTS].cb = NULL;
+}
+
+static Hash *tracer_hash = NULL;
+static erts_rwmtx_t tracer_mtx;
+
+static ErtsTracerNif *
+load_tracer_nif(const ErtsTracer tracer)
+{
+ Module* mod = erts_get_module(ERTS_TRACER_MODULE(tracer),
+ erts_active_code_ix());
+ struct erl_module_instance *instance;
+ ErlNifFunc *funcs;
+ int num_of_funcs;
+ ErtsTracerNif tnif_tmpl, *tnif;
+ ErtsTracerType *tracers;
+ int i,j;
+
+ if (!mod || !mod->curr.nif) {
+ return NULL;
+ }
+
+ instance = &mod->curr;
+
+ init_tracer_template(&tnif_tmpl);
+ tnif_tmpl.nif_mod = instance->nif;
+ tnif_tmpl.module = ERTS_TRACER_MODULE(tracer);
+ tracers = tnif_tmpl.tracers;
+
+ num_of_funcs = erts_nif_get_funcs(instance->nif, &funcs);
+
+ for(i = 0; i < num_of_funcs; i++) {
+ for (j = 0; j < NIF_TRACER_TYPES; j++) {
+ if (strcmp(tracers[j].name, funcs[i].name) == 0 && tracers[j].arity == funcs[i].arity) {
+ tracers[j].cb = &(funcs[i]);
+ break;
+ }
+ }
+ }
+
+ if (tracers[TRACE_FUN_DEFAULT].cb == NULL || tracers[TRACE_FUN_ENABLED].cb == NULL ) {
+ return NULL;
+ }
+
+ erts_rwmtx_rwlock(&tracer_mtx);
+ tnif = hash_put(tracer_hash, &tnif_tmpl);
+ erts_rwmtx_rwunlock(&tracer_mtx);
+
+ return tnif;
+}
+
+static ERTS_INLINE ErtsTracerNif *
+lookup_tracer_nif(const ErtsTracer tracer)
+{
+ ErtsTracerNif tnif_tmpl;
+ ErtsTracerNif *tnif;
+ tnif_tmpl.module = ERTS_TRACER_MODULE(tracer);
+ erts_rwmtx_rlock(&tracer_mtx);
+ if ((tnif = hash_get(tracer_hash, &tnif_tmpl)) == NULL) {
+ erts_rwmtx_runlock(&tracer_mtx);
+ tnif = load_tracer_nif(tracer);
+ ASSERT(!tnif || tnif->nif_mod);
+ return tnif;
+ }
+ erts_rwmtx_runlock(&tracer_mtx);
+ ASSERT(tnif->nif_mod);
+ return tnif;
+}
+
+/* This function converts an Erlang tracer term to ErtsTracer.
+ It returns THE_NON_VALUE if an invalid tracer term was given.
+ Accepted input is:
+ pid() || port() || {prefix, pid()} || {prefix, port()} ||
+ {prefix, atom(), term()} || {atom(), term()}
+ */
+ErtsTracer
+erts_term_to_tracer(Eterm prefix, Eterm t)
+{
+ ErtsTracer tracer = erts_tracer_nil;
+ ASSERT(is_atom(prefix) || prefix == THE_NON_VALUE);
+ if (!is_nil(t)) {
+ Eterm module = am_erl_tracer, state = THE_NON_VALUE;
+ Eterm hp[2];
+ if (is_tuple(t)) {
+ Eterm *tp = tuple_val(t);
+ if (prefix != THE_NON_VALUE) {
+ if (arityval(tp[0]) == 2 && tp[1] == prefix)
+ t = tp[2];
+ else if (arityval(tp[0]) == 3 && tp[1] == prefix && is_atom(tp[2])) {
+ module = tp[2];
+ state = tp[3];
+ }
+ } else {
+ if (arityval(tp[0]) == 2 && is_atom(tp[2])) {
+ module = tp[1];
+ state = tp[2];
+ }
+ }
+ }
+ if (state == THE_NON_VALUE && (is_internal_pid(t) || is_internal_port(t)))
+ state = t;
+ if (state == THE_NON_VALUE)
+ return THE_NON_VALUE;
+ erts_tracer_update(&tracer, CONS(hp, module, state));
+ }
+ if (!lookup_tracer_nif(tracer)) {
+ ASSERT(ERTS_TRACER_MODULE(tracer) != am_erl_tracer);
+ ERTS_TRACER_CLEAR(&tracer);
+ return THE_NON_VALUE;
+ }
+ return tracer;
+}
+
+Eterm
+erts_tracer_to_term(Process *p, ErtsTracer tracer)
+{
+ if (ERTS_TRACER_IS_NIL(tracer))
+ return am_false;
+ if (ERTS_TRACER_MODULE(tracer) == am_erl_tracer)
+ /* Have to manage these specifically in order to be
+ backwards compatible */
+ return ERTS_TRACER_STATE(tracer);
+ else {
+ Eterm *hp = HAlloc(p, 3);
+ return TUPLE2(hp, ERTS_TRACER_MODULE(tracer),
+ copy_object(ERTS_TRACER_STATE(tracer), p));
+ }
+}
+
+
+static ERTS_INLINE int
+send_to_tracer_nif_raw(Process *c_p, Process *tracee,
+ const ErtsTracer tracer, Uint tracee_flags,
+ Eterm t_p_id, ErtsTracerNif *tnif,
+ enum ErtsTracerOpt topt,
+ Eterm tag, Eterm msg, Eterm extra, Eterm pam_result)
+{
+ if (tnif || (tnif = lookup_tracer_nif(tracer)) != NULL) {
+#define MAP_SIZE 4
+ Eterm argv[5], local_heap[3+MAP_SIZE /* values */ + (MAP_SIZE+1 /* keys */)];
+ flatmap_t *map = (flatmap_t*)(local_heap+(MAP_SIZE+1));
+ Eterm *map_values = flatmap_get_values(map);
+ Eterm *map_keys = local_heap + 1;
+ Uint map_elem_count = 0;
+
+ topt = (tnif->tracers[topt].cb) ? topt : TRACE_FUN_DEFAULT;
+ ASSERT(topt < NIF_TRACER_TYPES);
+
+ argv[0] = tag;
+ argv[1] = ERTS_TRACER_STATE(tracer);
+ argv[2] = t_p_id;
+ argv[3] = msg;
+ argv[4] = make_flatmap(map);
+
+ map->thing_word = MAP_HEADER_FLATMAP;
+
+ if (extra != THE_NON_VALUE) {
+ map_keys[map_elem_count] = am_extra;
+ map_values[map_elem_count++] = extra;
+ }
+
+ if (pam_result != am_true) {
+ map_keys[map_elem_count] = am_match_spec_result;
+ map_values[map_elem_count++] = pam_result;
+ }
+
+ if (tracee_flags & F_TRACE_SCHED_NO) {
+ map_keys[map_elem_count] = am_scheduler_id;
+ map_values[map_elem_count++] = make_small(erts_get_scheduler_id());
+ }
+ map_keys[map_elem_count] = am_timestamp;
+ if (tracee_flags & F_NOW_TS)
+#ifdef HAVE_ERTS_NOW_CPU
+ if (erts_cpu_timestamp)
+ map_values[map_elem_count++] = am_cpu_timestamp;
+ else
+#endif
+ map_values[map_elem_count++] = am_timestamp;
+ else if (tracee_flags & F_STRICT_MON_TS)
+ map_values[map_elem_count++] = am_strict_monotonic;
+ else if (tracee_flags & F_MON_TS)
+ map_values[map_elem_count++] = am_monotonic;
+
+ map->size = map_elem_count;
+ map->keys = make_tuple(local_heap);
+ local_heap[0] = make_arityval(map_elem_count);
+
+#undef MAP_SIZE
+ erts_nif_call_function(c_p, tracee ? tracee : c_p,
+ tnif->nif_mod,
+ tnif->tracers[topt].cb,
+ tnif->tracers[topt].arity,
+ argv);
+ }
+ return 1;
+}
+
+static ERTS_INLINE int
+send_to_tracer_nif(Process *c_p, ErtsPTabElementCommon *t_p,
+ Eterm t_p_id, ErtsTracerNif *tnif, enum ErtsTracerOpt topt,
+ Eterm tag, Eterm msg, Eterm extra, Eterm pam_result)
+{
+#if defined(ERTS_ENABLE_LOCK_CHECK)
+ if (c_p) {
+ /* We have to hold the main lock of the currently executing process */
+ erts_proc_lc_chk_have_proc_locks(c_p, ERTS_PROC_LOCK_MAIN);
+ }
+ if (is_internal_pid(t_p->id)) {
+ /* We have to have at least one lock */
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks((Process*)t_p) & ERTS_PROC_LOCKS_ALL);
+ } else {
+ ASSERT(is_internal_port(t_p->id));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p));
+ }
+#endif
+
+ return send_to_tracer_nif_raw(c_p,
+ is_internal_pid(t_p->id) ? (Process*)t_p : NULL,
+ t_p->tracer, t_p->trace_flags,
+ t_p_id, tnif, topt, tag, msg, extra,
+ pam_result);
+}
+
+static ERTS_INLINE Eterm
+call_enabled_tracer(const ErtsTracer tracer,
+ ErtsTracerNif **tnif_ret,
+ enum ErtsTracerOpt topt,
+ Eterm tag, Eterm t_p_id) {
+ ErtsTracerNif *tnif = lookup_tracer_nif(tracer);
+ if (tnif) {
+ Eterm argv[] = {tag, ERTS_TRACER_STATE(tracer), t_p_id},
+ ret;
+ topt = (tnif->tracers[topt].cb) ? topt : TRACE_FUN_ENABLED;
+ ASSERT(topt < NIF_TRACER_TYPES);
+ ASSERT(tnif->tracers[topt].cb != NULL);
+ if (tnif_ret) *tnif_ret = tnif;
+ ret = erts_nif_call_function(NULL, NULL, tnif->nif_mod,
+ tnif->tracers[topt].cb,
+ tnif->tracers[topt].arity,
+ argv);
+ if (tag == am_trace_status && ret != am_remove)
+ return am_trace;
+ ASSERT(tag == am_trace_status || ret != am_remove);
+ return ret;
+ }
+ return tag == am_trace_status ? am_remove : am_discard;
+}
+
+static int
+is_tracer_enabled(Process* c_p, ErtsProcLocks c_p_locks,
+ ErtsPTabElementCommon *t_p,
+ ErtsTracerNif **tnif_ret,
+ enum ErtsTracerOpt topt, Eterm tag) {
+ Eterm nif_result;
+
+#if defined(ERTS_ENABLE_LOCK_CHECK)
+ if (c_p)
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == c_p_locks
+ || erts_thr_progress_is_blocking());
+ if (is_internal_pid(t_p->id)) {
+ /* We have to have at least one lock */
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks((Process*)t_p) & ERTS_PROC_LOCKS_ALL
+ || erts_thr_progress_is_blocking());
+ } else {
+ ASSERT(is_internal_port(t_p->id));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p)
+ || erts_thr_progress_is_blocking());
+ }
#endif
+
+ nif_result = call_enabled_tracer(t_p->tracer, tnif_ret, topt, tag, t_p->id);
+ switch (nif_result) {
+ case am_discard: return 0;
+ case am_trace: return 1;
+ case THE_NON_VALUE:
+ case am_remove: ASSERT(tag == am_trace_status); break;
+ default:
+ /* only am_remove should be returned, but if
+ something else is returned we fall-through
+ and remove the tracer. */
+ ASSERT(0);
+ }
+
+ /* Only remove tracer on self() and ports */
+ if (is_internal_port(t_p->id) || (c_p && c_p->common.id == t_p->id)) {
+ ErtsProcLocks c_p_xlocks = 0;
+ if (is_internal_pid(t_p->id)) {
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN);
+ if (c_p_locks != ERTS_PROC_LOCKS_ALL) {
+ c_p_xlocks = ~c_p_locks & ERTS_PROC_LOCKS_ALL;
+ if (erts_proc_trylock(c_p, c_p_xlocks) == EBUSY) {
+ erts_proc_unlock(c_p, c_p_locks & ~ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
+ }
+ }
+ erts_tracer_replace(t_p, erts_tracer_nil);
+ t_p->trace_flags &= ~TRACEE_FLAGS;
+
+ if (c_p_xlocks)
+ erts_proc_unlock(c_p, c_p_xlocks);
+ }
+
+ return 0;
+}
+
+int erts_is_tracer_enabled(const ErtsTracer tracer, ErtsPTabElementCommon *t_p)
+{
+ ErtsTracerNif *tnif = lookup_tracer_nif(tracer);
+ if (tnif) {
+ Eterm nif_result = call_enabled_tracer(tracer, &tnif,
+ TRACE_FUN_ENABLED,
+ am_trace_status,
+ t_p->id);
+ switch (nif_result) {
+ case am_discard:
+ case am_trace: return 1;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+int erts_is_tracer_proc_enabled(Process* c_p, ErtsProcLocks c_p_locks,
+ ErtsPTabElementCommon *t_p)
+{
+ return is_tracer_enabled(c_p, c_p_locks, t_p, NULL, TRACE_FUN_ENABLED,
+ am_trace_status);
+}
+
+int erts_is_tracer_proc_enabled_send(Process* c_p, ErtsProcLocks c_p_locks,
+ ErtsPTabElementCommon *t_p)
+{
+ return is_tracer_enabled(c_p, c_p_locks, t_p, NULL, TRACE_FUN_T_SEND, am_send);
+}
+
+
+void erts_tracer_replace(ErtsPTabElementCommon *t_p, const ErtsTracer tracer)
+{
+#if defined(ERTS_ENABLE_LOCK_CHECK)
+ if (is_internal_pid(t_p->id) && !erts_thr_progress_is_blocking()) {
+ erts_proc_lc_chk_have_proc_locks((Process*)t_p, ERTS_PROC_LOCKS_ALL);
+ } else if (is_internal_port(t_p->id)) {
+ ERTS_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p)
+ || erts_thr_progress_is_blocking());
+ }
+#endif
+ if (ERTS_TRACER_COMPARE(t_p->tracer, tracer))
+ return;
+
+ erts_tracer_update(&t_p->tracer, tracer);
+}
+
+static void free_tracer(void *p)
+{
+ ErtsTracer tracer = (ErtsTracer)p;
+
+ if (is_immed(ERTS_TRACER_STATE(tracer))) {
+ erts_free(ERTS_ALC_T_HEAP_FRAG, ptr_val(tracer));
+ } else {
+ ErlHeapFragment *hf = (void*)((char*)(ptr_val(tracer)) - offsetof(ErlHeapFragment, mem));
+ free_message_buffer(hf);
+ }
+}
+
+/* un-define erts_tracer_update before implementation */
+#ifdef erts_tracer_update
+#undef erts_tracer_update
+#endif
+
+/*
+ * ErtsTracer is either NIL, 'true' or [Mod | State]
+ *
+ * - If State is immediate then the memory for
+ * the cons cell is just two words + sizeof(ErtsThrPrgrLaterOp) large.
+ * - If State is a complex term then the cons cell
+ * is allocated in an ErlHeapFragment where the cons
+ * ptr points to the mem field. So in order to get the
+ * ptr to the fragment you do this:
+ * (char*)(ptr_val(tracer)) - offsetof(ErlHeapFragment, mem)
+ * Normally you shouldn't have to care about this though
+ * as erts_tracer_update takes care of it for you.
+ *
+ * When ErtsTracer is stored in the stack as part of a
+ * return trace, the cons cell is stored on the heap of
+ * the process.
+ *
+ * The cons cell is not always stored on the heap as:
+ * 1) for port/meta tracing there is no heap
+ * 2) we would need the main lock in order to
+ * read the tracer which is undesirable.
+ *
+ * One way to optimize this (memory wise) is to keep an refc and only bump
+ * the refc when *tracer is NIL.
+ */
+void
+erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer)
+{
+ ErlHeapFragment *hf;
+
+ if (is_not_nil(*tracer)) {
+ Uint offs = 2;
+ UWord size = 2 * sizeof(Eterm) + sizeof(ErtsThrPrgrLaterOp);
+ ErtsThrPrgrLaterOp *lop;
+ ASSERT(is_list(*tracer));
+ if (is_not_immed(ERTS_TRACER_STATE(*tracer))) {
+ hf = (void*)(((char*)(ptr_val(*tracer)) - offsetof(ErlHeapFragment, mem)));
+ offs = hf->used_size;
+ size = hf->alloc_size * sizeof(Eterm) + sizeof(ErlHeapFragment);
+ ASSERT(offs == size_object(*tracer));
+ }
+
+ /* sparc assumes that all structs are double word aligned, so we
+ have to align the ErtsThrPrgrLaterOp struct otherwise it may
+ segfault.*/
+ if ((UWord)(ptr_val(*tracer) + offs) % (sizeof(UWord)*2) == sizeof(UWord))
+ offs += 1;
+
+ lop = (ErtsThrPrgrLaterOp*)(ptr_val(*tracer) + offs);
+ ASSERT((UWord)lop % (sizeof(UWord)*2) == 0);
+
+ /* We schedule the free:ing of the tracer until after a thread progress
+ has been made so that we know that no schedulers have any references
+ to it. Because we do this, it is possible to release all locks of a
+ process/port and still use the ErtsTracer of that port/process
+ without having to worry if it is free'd.
+ */
+ erts_schedule_thr_prgr_later_cleanup_op(
+ free_tracer, (void*)(*tracer), lop, size);
+ }
+
+ if (is_nil(new_tracer)) {
+ *tracer = new_tracer;
+ } else if (is_immed(ERTS_TRACER_STATE(new_tracer))) {
+ /* If tracer state is an immediate we only allocate a 2 Eterm heap.
+ Not sure if it is worth it, we save 4 words (sizeof(ErlHeapFragment))
+ per tracer. */
+ Eterm *hp = erts_alloc(ERTS_ALC_T_HEAP_FRAG,
+ 3*sizeof(Eterm) + sizeof(ErtsThrPrgrLaterOp));
+ *tracer = CONS(hp, ERTS_TRACER_MODULE(new_tracer),
+ ERTS_TRACER_STATE(new_tracer));
+ } else {
+ Eterm *hp, tracer_state = ERTS_TRACER_STATE(new_tracer),
+ tracer_module = ERTS_TRACER_MODULE(new_tracer);
+ Uint sz = size_object(tracer_state);
+ hf = new_message_buffer(sz + 2 /* cons cell */ +
+ (sizeof(ErtsThrPrgrLaterOp)+sizeof(Eterm)-1)/sizeof(Eterm) + 1);
+ hp = hf->mem + 2;
+ hf->used_size -= (sizeof(ErtsThrPrgrLaterOp)+sizeof(Eterm)-1)/sizeof(Eterm) + 1;
+ *tracer = copy_struct(tracer_state, sz, &hp, &hf->off_heap);
+ *tracer = CONS(hf->mem, tracer_module, *tracer);
+ ASSERT((void*)(((char*)(ptr_val(*tracer)) - offsetof(ErlHeapFragment, mem))) == hf);
+ }
+}
+
+static void init_tracer_nif()
+{
+ erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED;
+
+ erts_rwmtx_init_opt(&tracer_mtx, &rwmtx_opt, "tracer_mtx", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
+
+ erts_tracer_nif_clear();
+
+}
+
+int erts_tracer_nif_clear()
+{
+
+ erts_rwmtx_rlock(&tracer_mtx);
+ if (!tracer_hash || tracer_hash->nobjs) {
+
+ HashFunctions hf;
+ hf.hash = tracer_hash_fun;
+ hf.cmp = tracer_cmp_fun;
+ hf.alloc = tracer_alloc_fun;
+ hf.free = tracer_free_fun;
+ hf.meta_alloc = (HMALLOC_FUN) erts_alloc;
+ hf.meta_free = (HMFREE_FUN) erts_free;
+ hf.meta_print = (HMPRINT_FUN) erts_print;
+
+ erts_rwmtx_runlock(&tracer_mtx);
+ erts_rwmtx_rwlock(&tracer_mtx);
+
+ if (tracer_hash)
+ hash_delete(tracer_hash);
+
+ tracer_hash = hash_new(ERTS_ALC_T_TRACER_NIF, "tracer_hash", 10, hf);
+
+ erts_rwmtx_rwunlock(&tracer_mtx);
+ return 1;
+ }
+
+ erts_rwmtx_runlock(&tracer_mtx);
+ return 0;
+}
+
+static int tracer_cmp_fun(void* a, void* b)
+{
+ return ((ErtsTracerNif*)a)->module != ((ErtsTracerNif*)b)->module;
+}
+
+static HashValue tracer_hash_fun(void* obj)
+{
+ return make_internal_hash(((ErtsTracerNif*)obj)->module, 0);
+}
+
+static void *tracer_alloc_fun(void* tmpl)
+{
+ ErtsTracerNif *obj = erts_alloc(ERTS_ALC_T_TRACER_NIF,
+ sizeof(ErtsTracerNif) +
+ sizeof(ErtsThrPrgrLaterOp));
+ memcpy(obj, tmpl, sizeof(*obj));
+ return obj;
+}
+
+static void tracer_free_fun_cb(void* obj)
+{
+ erts_free(ERTS_ALC_T_TRACER_NIF, obj);
+}
+
+static void tracer_free_fun(void* obj)
+{
+ ErtsTracerNif *tnif = obj;
+ erts_schedule_thr_prgr_later_op(
+ tracer_free_fun_cb, obj,
+ (ErtsThrPrgrLaterOp*)(tnif + 1));
+
+}
diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h
index 4f2c70d6e7..dbf7ebd2a1 100644
--- a/erts/emulator/beam/erl_trace.h
+++ b/erts/emulator/beam/erl_trace.h
@@ -1,28 +1,70 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2012-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2012-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
+#ifndef ERL_TRACE_H__FLAGS__
+#define ERL_TRACE_H__FLAGS__
+/*
+ * NOTE! The bits used for these flags matter. The flag with
+ * the least significant bit will take precedence!
+ *
+ * The "now timestamp" has highest precedence due to
+ * compatibility reasons.
+ */
+#define ERTS_TRACE_FLG_NOW_TIMESTAMP (1 << 0)
+#define ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP (1 << 1)
+#define ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP (1 << 2)
-#ifndef ERL_TRACE_H__
+/*
+ * The bits used effects trace flags (of processes and ports)
+ * as well as sequential trace flags. If changed make sure
+ * these arn't messed up...
+ */
+#define ERTS_TRACE_TS_TYPE_BITS 3
+#define ERTS_TRACE_TS_TYPE_MASK \
+ ((1 << ERTS_TRACE_TS_TYPE_BITS) - 1)
+
+#define ERTS_TFLGS2TSTYPE(TFLGS) \
+ ((int) (((TFLGS) >> ERTS_TRACE_FLAGS_TS_TYPE_SHIFT) \
+ & ERTS_TRACE_TS_TYPE_MASK))
+#define ERTS_SEQTFLGS2TSTYPE(SEQTFLGS) \
+ ((int) (((SEQTFLGS) >> ERTS_SEQ_TRACE_FLAGS_TS_TYPE_SHIFT) \
+ & ERTS_TRACE_TS_TYPE_MASK))
+#define ERTS_SEQTFLGS2TFLGS(SEQTFLGS) \
+ (ERTS_SEQTFLGS2TSTYPE(SEQTFLGS) << ERTS_TRACE_FLAGS_TS_TYPE_SHIFT)
+
+#endif /* ERL_TRACE_H__FLAGS__ */
+
+#if !defined(ERL_TRACE_H__) && !defined(ERTS_ONLY_INCLUDE_TRACE_FLAGS)
#define ERL_TRACE_H__
struct binary;
+typedef struct
+{
+ int on;
+ struct binary* match_spec;
+} ErtsTracingEvent;
+
+extern ErtsTracingEvent erts_send_tracing[];
+extern ErtsTracingEvent erts_receive_tracing[];
+
/* erl_bif_trace.c */
Eterm erl_seq_trace_info(Process *p, Eterm arg1);
void erts_system_monitor_clear(Process *c_p);
@@ -31,17 +73,20 @@ void erts_system_profile_clear(Process *c_p);
/* erl_trace.c */
void erts_init_trace(void);
void erts_trace_check_exiting(Eterm exiting);
-Eterm erts_set_system_seq_tracer(Process *c_p,
- ErtsProcLocks c_p_locks,
- Eterm new);
-Eterm erts_get_system_seq_tracer(void);
-void erts_change_default_tracing(int setflags, Uint *flagsp, Eterm *tracerp);
-void erts_get_default_tracing(Uint *flagsp, Eterm *tracerp);
+ErtsTracer erts_set_system_seq_tracer(Process *c_p,
+ ErtsProcLocks c_p_locks,
+ ErtsTracer new);
+ErtsTracer erts_get_system_seq_tracer(void);
+void erts_change_default_proc_tracing(int setflags, Uint flagsp,
+ const ErtsTracer tracerp);
+void erts_get_default_proc_tracing(Uint *flagsp, ErtsTracer *tracerp);
+void erts_change_default_port_tracing(int setflags, Uint flagsp,
+ const ErtsTracer tracerp);
+void erts_get_default_port_tracing(Uint *flagsp, ErtsTracer *tracerp);
void erts_set_system_monitor(Eterm monitor);
Eterm erts_get_system_monitor(void);
-int erts_is_tracer_proc_valid(Process* p);
+int erts_is_tracer_valid(Process* p);
-#ifdef ERTS_SMP
void erts_check_my_tracer_proc(Process *);
void erts_block_sys_msg_dispatcher(void);
void erts_release_sys_msg_dispatcher(void);
@@ -50,28 +95,31 @@ void erts_foreach_sys_msg_in_q(void (*func)(Eterm,
Eterm,
ErlHeapFragment *));
void erts_queue_error_logger_message(Eterm, Eterm, ErlHeapFragment *);
-#endif
-
void erts_send_sys_msg_proc(Eterm, Eterm, Eterm, ErlHeapFragment *);
+
void trace_send(Process*, Eterm, Eterm);
-void trace_receive(Process*, Eterm);
-Uint32 erts_call_trace(Process *p, BeamInstr mfa[], struct binary *match_spec, Eterm* args,
- int local, Eterm *tracer_pid);
-void erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid);
-void erts_trace_exception(Process* p, BeamInstr mfa[], Eterm class, Eterm value,
- Eterm *tracer);
+void trace_receive(Process*, Eterm, Eterm, ErtsTracingEvent*);
+Uint32 erts_call_trace(Process *p, ErtsCodeInfo *info, struct binary *match_spec,
+ Eterm* args, int local, ErtsTracer *tracer);
+void erts_trace_return(Process* p, ErtsCodeMFA *mfa, Eterm retval,
+ ErtsTracer *tracer);
+void erts_trace_exception(Process* p, ErtsCodeMFA *mfa, Eterm class, Eterm value,
+ ErtsTracer *tracer);
void erts_trace_return_to(Process *p, BeamInstr *pc);
-void trace_sched(Process*, Eterm);
-void trace_proc(Process*, Process*, Eterm, Eterm);
-void trace_proc_spawn(Process*, Eterm pid, Eterm mod, Eterm func, Eterm args);
+void trace_sched(Process*, ErtsProcLocks, Eterm);
+void trace_proc(Process*, ErtsProcLocks, Process*, Eterm, Eterm);
+void trace_proc_spawn(Process*, Eterm what, Eterm pid, Eterm mod, Eterm func, Eterm args);
void save_calls(Process *p, Export *);
-void trace_gc(Process *p, Eterm what);
+void trace_gc(Process *p, Eterm what, Uint size, Eterm msg);
/* port tracing */
-void trace_virtual_sched(Process*, Eterm);
+void trace_virtual_sched(Process*, ErtsProcLocks, Eterm);
void trace_sched_ports(Port *pp, Eterm);
void trace_sched_ports_where(Port *pp, Eterm, Eterm);
void trace_port(Port *, Eterm what, Eterm data);
void trace_port_open(Port *, Eterm calling_pid, Eterm drv_name);
+void trace_port_receive(Port *, Eterm calling_pid, Eterm tag, ...);
+void trace_port_send(Port *, Eterm to, Eterm msg, int exists);
+void trace_port_send_binary(Port *, Eterm to, Eterm what, char *bin, Sint sz);
/* system_profile */
void erts_set_system_profile(Eterm profile);
@@ -84,25 +132,27 @@ void erts_system_profile_setup_active_schedulers(void);
/* system_monitor */
void monitor_long_gc(Process *p, Uint time);
-void monitor_long_schedule_proc(Process *p, BeamInstr *in_i, BeamInstr *out_i, Uint time);
+void monitor_long_schedule_proc(Process *p, ErtsCodeMFA *in_i,
+ ErtsCodeMFA *out_i, Uint time);
void monitor_long_schedule_port(Port *pp, ErtsPortTaskType type, Uint time);
void monitor_large_heap(Process *p);
void monitor_generic(Process *p, Eterm type, Eterm spec);
Uint erts_trace_flag2bit(Eterm flag);
int erts_trace_flags(Eterm List,
- Uint *pMask, Eterm *pTracer, int *pCpuTimestamp);
+ Uint *pMask, ErtsTracer *pTracer, int *pCpuTimestamp);
Eterm erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr *I);
+Eterm
+erts_bif_trace_epilogue(Process *p, Eterm result, int applying,
+ Export* ep, BeamInstr *cp, Uint32 flags,
+ Uint32 flags_meta, BeamInstr* I,
+ ErtsTracer meta_tracer);
-#ifdef ERTS_SMP
void erts_send_pending_trace_msgs(ErtsSchedulerData *esdp);
-#define ERTS_SMP_CHK_PEND_TRACE_MSGS(ESDP) \
+#define ERTS_CHK_PEND_TRACE_MSGS(ESDP) \
do { \
if ((ESDP)->pending_trace_msgs) \
erts_send_pending_trace_msgs((ESDP)); \
} while (0)
-#else
-#define ERTS_SMP_CHK_PEND_TRACE_MSGS(ESDP)
-#endif
#define seq_trace_output(token, msg, type, receiver, process) \
seq_trace_output_generic((token), (msg), (type), (receiver), (process), NIL)
@@ -126,19 +176,54 @@ struct trace_pattern_flags {
};
extern const struct trace_pattern_flags erts_trace_pattern_flags_off;
extern int erts_call_time_breakpoint_tracing;
-int erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
+int erts_set_trace_pattern(Process*p, ErtsCodeMFA *mfa, int specified,
struct binary* match_prog_set,
struct binary *meta_match_prog_set,
int on, struct trace_pattern_flags,
- Eterm meta_tracer_pid, int is_blocking);
+ ErtsTracer meta_tracer, int is_blocking);
void
erts_get_default_trace_pattern(int *trace_pattern_is_on,
struct binary **match_spec,
struct binary **meta_match_spec,
struct trace_pattern_flags *trace_pattern_flags,
- Eterm *meta_tracer_pid);
+ ErtsTracer *meta_tracer);
int erts_is_default_trace_enabled(void);
void erts_bif_trace_init(void);
int erts_finish_breakpointing(void);
+/* Nif tracer functions */
+int erts_is_tracer_proc_enabled(Process *c_p, ErtsProcLocks c_p_locks,
+ ErtsPTabElementCommon *t_p);
+int erts_is_tracer_proc_enabled_send(Process* c_p, ErtsProcLocks c_p_locks,
+ ErtsPTabElementCommon *t_p);
+int erts_is_tracer_enabled(const ErtsTracer tracer, ErtsPTabElementCommon *t_p);
+Eterm erts_tracer_to_term(Process *p, ErtsTracer tracer);
+ErtsTracer erts_term_to_tracer(Eterm prefix, Eterm term);
+void erts_tracer_replace(ErtsPTabElementCommon *t_p,
+ const ErtsTracer new_tracer);
+void erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer);
+int erts_tracer_nif_clear(void);
+
+#define erts_tracer_update(t,n) do { if (*(t) != (n)) erts_tracer_update(t,n); } while(0)
+#define ERTS_TRACER_CLEAR(t) erts_tracer_update(t, erts_tracer_nil)
+
+static const ErtsTracer
+ERTS_DECLARE_DUMMY(erts_tracer_true) = am_true;
+
+static const ErtsTracer
+ERTS_DECLARE_DUMMY(erts_tracer_nil) = NIL;
+
+#define ERTS_TRACER_COMPARE(t1, t2) \
+ (EQ((t1), (t2)))
+
+#define ERTS_TRACER_IS_NIL(t1) ERTS_TRACER_COMPARE(t1, erts_tracer_nil)
+
+#define IS_TRACER_VALID(tracer) \
+ (ERTS_TRACER_COMPARE(tracer,erts_tracer_true) \
+ || ERTS_TRACER_IS_NIL(tracer) \
+ || (is_list(tracer) && is_atom(CAR(list_val(tracer)))))
+
+#define ERTS_TRACER_FROM_ETERM(termp) \
+ ((ErtsTracer*)(termp))
+
#endif /* ERL_TRACE_H__ */
diff --git a/erts/emulator/beam/erl_unicode.c b/erts/emulator/beam/erl_unicode.c
index 3a968594f3..b7a5c45fea 100644
--- a/erts/emulator/beam/erl_unicode.c
+++ b/erts/emulator/beam/erl_unicode.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -54,7 +55,7 @@ static BIF_RETTYPE finalize_list_to_list(Process *p,
Uint num_processed_bytes,
Uint num_bytes_to_process,
Uint num_resulting_chars,
- int state, int left,
+ int state, Sint left,
Eterm tail);
static BIF_RETTYPE characters_to_utf8_trap(BIF_ALIST_3);
static BIF_RETTYPE characters_to_list_trap_1(BIF_ALIST_3);
@@ -122,19 +123,16 @@ static void cleanup_restart_context(RestartContext *rc)
}
}
-static void cleanup_restart_context_bin(Binary *bp)
+static int cleanup_restart_context_bin(Binary *bp)
{
RestartContext *rc = ERTS_MAGIC_BIN_DATA(bp);
cleanup_restart_context(rc);
+ return 1;
}
-static RestartContext *get_rc_from_bin(Eterm bin)
+static RestartContext *get_rc_from_bin(Eterm mref)
{
- Binary *mbp;
- ASSERT(ERTS_TERM_IS_MAGIC_BINARY(bin));
-
- mbp = ((ProcBin *) binary_val(bin))->val;
-
+ Binary *mbp = erts_magic_ref2bin(mref);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp)
== cleanup_restart_context_bin);
return (RestartContext *) ERTS_MAGIC_BIN_DATA(mbp);
@@ -147,8 +145,8 @@ static Eterm make_magic_bin_for_restart(Process *p, RestartContext *rc)
RestartContext *restartp = ERTS_MAGIC_BIN_DATA(mbp);
Eterm *hp;
memcpy(restartp,rc,sizeof(RestartContext));
- hp = HAlloc(p, PROC_BIN_SIZE);
- return erts_mk_magic_binary_term(&hp, &MSO(p), mbp);
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ return erts_mk_magic_ref(&hp, &MSO(p), mbp);
}
@@ -172,12 +170,13 @@ static ERTS_INLINE int allowed_iterations(Process *p)
else
return tmp;
}
-static ERTS_INLINE int cost_to_proc(Process *p, int cost)
+
+static ERTS_INLINE void cost_to_proc(Process *p, Sint cost)
{
- int x = (cost / LOOP_FACTOR);
+ Sint x = (cost / LOOP_FACTOR);
BUMP_REDS(p,x);
- return x;
}
+
static ERTS_INLINE int simple_loops_to_common(int cost)
{
int factor = (LOOP_FACTOR_SIMPLE / LOOP_FACTOR);
@@ -242,14 +241,15 @@ static int utf8_len(byte first)
return -1;
}
-static int copy_utf8_bin(byte *target, byte *source, Uint size,
- byte *leftover, int *num_leftovers,
- byte **err_pos, Uint *characters) {
- int copied = 0;
+static Uint copy_utf8_bin(byte *target, byte *source, Uint size,
+ byte *leftover, int *num_leftovers,
+ byte **err_pos, Uint *characters)
+{
+ Uint copied = 0;
if (leftover != NULL && *num_leftovers) {
int need = utf8_len(leftover[0]);
int from_source = need - (*num_leftovers);
- int c;
+ Uint c;
byte *tmp_err_pos = NULL;
ASSERT(need > 0);
ASSERT(from_source > 0);
@@ -501,8 +501,8 @@ L_Again: /* Restart with sublist, old listend was pushed on stack */
}
-static Eterm do_build_utf8(Process *p, Eterm ioterm, int *left, int latin1,
- byte *target, int *pos, Uint *characters, int *err,
+static Eterm do_build_utf8(Process *p, Eterm ioterm, Sint *left, int latin1,
+ byte *target, Uint *pos, Uint *characters, int *err,
byte *leftover, int *num_leftovers)
{
int c;
@@ -572,7 +572,7 @@ static Eterm do_build_utf8(Process *p, Eterm ioterm, int *left, int latin1,
}
if (!latin1) {
- int num;
+ Uint num;
byte *err_pos = NULL;
num = copy_utf8_bin(target + (*pos), bytes,
size, leftover, num_leftovers,&err_pos,characters);
@@ -803,7 +803,7 @@ static int check_leftovers(byte *source, int size)
-static BIF_RETTYPE build_utf8_return(Process *p,Eterm bin,int pos,
+static BIF_RETTYPE build_utf8_return(Process *p,Eterm bin,Uint pos,
Eterm rest_term,int err,
byte *leftover,int num_leftovers,Eterm latin1)
{
@@ -858,8 +858,8 @@ static BIF_RETTYPE characters_to_utf8_trap(BIF_ALIST_3)
#endif
byte* bytes;
Eterm rest_term;
- int left, sleft;
- int pos;
+ Sint left, sleft;
+ Uint pos;
int err;
byte leftover[4]; /* used for temp buffer too,
otherwise 3 bytes would have been enough */
@@ -873,7 +873,7 @@ static BIF_RETTYPE characters_to_utf8_trap(BIF_ALIST_3)
real_bin = binary_val(BIF_ARG_1);
ASSERT(*real_bin == HEADER_PROC_BIN);
#endif
- pos = (int) binary_size(BIF_ARG_1);
+ pos = binary_size(BIF_ARG_1);
bytes = binary_bytes(BIF_ARG_1);
sleft = left = allowed_iterations(BIF_P);
err = 0;
@@ -933,9 +933,9 @@ BIF_RETTYPE unicode_characters_to_binary_2(BIF_ALIST_2)
int latin1;
Eterm bin;
byte *bytes;
- int pos;
+ Uint pos;
int err;
- int left, sleft;
+ Sint left, sleft;
Eterm rest_term, subject;
byte leftover[4]; /* used for temp buffer too, o
therwise 3 bytes would have been enough */
@@ -998,7 +998,7 @@ BIF_RETTYPE unicode_characters_to_binary_2(BIF_ALIST_2)
byte *t = NULL;
Uint sz = binary_size(bin);
byte *by = erts_get_aligned_binary_bytes(bin,&t);
- int i;
+ Uint i;
erts_printf("<<");
for (i = 0;i < sz; ++i) {
erts_printf((i == sz -1) ? "0x%X" : "0x%X, ", (unsigned) by[i]);
@@ -1006,7 +1006,7 @@ BIF_RETTYPE unicode_characters_to_binary_2(BIF_ALIST_2)
erts_printf(">>: ");
erts_free_aligned_binary_bytes(t);
}
- erts_printf("%d - %d = %d\n",sleft,left,sleft - left);
+ erts_printf("%ld - %ld = %ld\n", sleft, left, sleft - left);
}
#endif
cost_to_proc(BIF_P, sleft - left);
@@ -1014,10 +1014,10 @@ BIF_RETTYPE unicode_characters_to_binary_2(BIF_ALIST_2)
leftover,num_leftovers,BIF_ARG_2);
}
-static BIF_RETTYPE build_list_return(Process *p, byte *bytes, int pos, Uint characters,
+static BIF_RETTYPE build_list_return(Process *p, byte *bytes, Uint pos, Uint characters,
Eterm rest_term, int err,
byte *leftover, int num_leftovers,
- Eterm latin1, int left)
+ Eterm latin1, Sint left)
{
Eterm *hp;
@@ -1069,11 +1069,11 @@ static BIF_RETTYPE characters_to_list_trap_1(BIF_ALIST_3)
{
RestartContext *rc;
byte* bytes;
- int pos;
+ Uint pos;
Uint characters;
int err;
Eterm rest_term;
- int left, sleft;
+ Sint left, sleft;
int latin1 = 0;
byte leftover[4]; /* used for temp buffer too,
@@ -1106,9 +1106,9 @@ BIF_RETTYPE unicode_characters_to_list_2(BIF_ALIST_2)
int latin1;
Uint characters = 0;
byte *bytes;
- int pos;
+ Uint pos;
int err;
- int left, sleft;
+ Sint left, sleft;
Eterm rest_term;
byte leftover[4]; /* used for temp buffer too, o
therwise 3 bytes would have been enough */
@@ -1540,7 +1540,7 @@ static BIF_RETTYPE finalize_list_to_list(Process *p,
Uint num_processed_bytes,
Uint num_bytes_to_process,
Uint num_resulting_chars,
- int state, int left,
+ int state, Sint left,
Eterm tail)
{
Uint num_built; /* characters */
@@ -1887,74 +1887,57 @@ binary_to_atom(Process* proc, Eterm bin, Eterm enc, int must_exist)
byte* bytes;
byte *temp_alloc = NULL;
Uint bin_size;
+ Eterm a;
if ((bytes = erts_get_aligned_binary_bytes(bin, &temp_alloc)) == 0) {
BIF_ERROR(proc, BADARG);
}
bin_size = binary_size(bin);
+
if (enc == am_latin1) {
- Eterm a;
- if (bin_size > MAX_ATOM_CHARACTERS) {
- system_limit:
- erts_free_aligned_binary_bytes(temp_alloc);
- BIF_ERROR(proc, SYSTEM_LIMIT);
- }
if (!must_exist) {
- a = erts_atom_put((byte *) bytes,
- bin_size,
- ERTS_ATOM_ENC_LATIN1,
- 0);
- erts_free_aligned_binary_bytes(temp_alloc);
- if (is_non_value(a))
- goto badarg;
- BIF_RET(a);
- } else if (erts_atom_get((char *)bytes, bin_size, &a, ERTS_ATOM_ENC_LATIN1)) {
- erts_free_aligned_binary_bytes(temp_alloc);
- BIF_RET(a);
- } else {
+ int lix = erts_atom_put_index((byte *) bytes,
+ bin_size,
+ ERTS_ATOM_ENC_LATIN1,
+ 0);
+ if (lix == ATOM_BAD_ENCODING_ERROR) {
+ badarg:
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_ERROR(proc, BADARG);
+ } else if (lix == ATOM_MAX_CHARS_ERROR) {
+ system_limit:
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_ERROR(proc, SYSTEM_LIMIT);
+ }
+
+ a = make_atom(lix);
+ } else if (!erts_atom_get((char *)bytes, bin_size, &a, ERTS_ATOM_ENC_LATIN1)) {
goto badarg;
}
- } else if (enc == am_utf8 || enc == am_unicode) {
- Eterm res;
- Uint num_chars = 0;
- const byte* p = bytes;
- Uint left = bin_size;
- while (left) {
- if (++num_chars > MAX_ATOM_CHARACTERS) {
+ } else if (enc == am_utf8 || enc == am_unicode) {
+ if (!must_exist) {
+ int uix = erts_atom_put_index((byte *) bytes,
+ bin_size,
+ ERTS_ATOM_ENC_UTF8,
+ 0);
+ if (uix == ATOM_BAD_ENCODING_ERROR) {
+ goto badarg;
+ } else if (uix == ATOM_MAX_CHARS_ERROR) {
goto system_limit;
}
- if ((p[0] & 0x80) == 0) {
- ++p;
- --left;
- }
- else if (left >= 2
- && (p[0] & 0xFE) == 0xC2 /* only allow latin1 subset */
- && (p[1] & 0xC0) == 0x80) {
- p += 2;
- left -= 2;
- }
- else goto badarg;
- }
- if (!must_exist) {
- res = erts_atom_put((byte *) bytes,
- bin_size,
- ERTS_ATOM_ENC_UTF8,
- 0);
+ a = make_atom(uix);
}
- else if (!erts_atom_get((char*)bytes, bin_size, &res, ERTS_ATOM_ENC_UTF8)) {
+ else if (!erts_atom_get((char*)bytes, bin_size, &a, ERTS_ATOM_ENC_UTF8)) {
goto badarg;
}
- erts_free_aligned_binary_bytes(temp_alloc);
- if (is_non_value(res))
- goto badarg;
- BIF_RET(res);
} else {
- badarg:
- erts_free_aligned_binary_bytes(temp_alloc);
- BIF_ERROR(proc, BADARG);
+ goto badarg;
}
+
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_RET(a);
}
BIF_RETTYPE binary_to_atom_2(BIF_ALIST_2)
@@ -2005,7 +1988,7 @@ char *erts_convert_filename_to_encoding(Eterm name, char *statbuf, size_t statbu
is_list(name) ||
(allow_empty && is_nil(name))) {
Sint need;
- if ((need = erts_native_filename_need(name,encoding)) < 0) {
+ if ((need = erts_native_filename_need(name, encoding)) < 0) {
return NULL;
}
if (encoding == ERL_FILENAME_WIN_WCHAR) {
@@ -2015,7 +1998,7 @@ char *erts_convert_filename_to_encoding(Eterm name, char *statbuf, size_t statbu
++need;
}
if (used)
- *used = (Sint) need;
+ *used = need;
if (need+extra > statbuf_size) {
name_buf = (char *) erts_alloc(alloc_type, need+extra);
} else {
@@ -2126,6 +2109,8 @@ Eterm erts_convert_native_to_filename(Process *p, byte *bytes)
mac = 1;
case ERL_FILENAME_UTF8:
size = strlen((char *) bytes);
+ if (size == 0)
+ return NIL;
if (erts_analyze_utf8(bytes,size,&err_pos,&num_chars,NULL) != ERTS_UTF8_OK) {
goto noconvert;
}
@@ -2167,12 +2152,13 @@ Eterm erts_convert_native_to_filename(Process *p, byte *bytes)
}
-Sint erts_native_filename_need(Eterm ioterm, int encoding)
+Sint erts_native_filename_need(Eterm ioterm, int encoding)
{
Eterm *objp;
Eterm obj;
DECLARE_ESTACK(stack);
Sint need = 0;
+ int seen_null = 0;
if (is_atom(ioterm)) {
Atom* ap;
@@ -2209,6 +2195,22 @@ Sint erts_native_filename_need(Eterm ioterm, int encoding)
default:
need = -1;
}
+ /*
+ * Do not allow null in
+ * the middle of filenames
+ */
+ if (need > 0) {
+ byte *name = ap->name;
+ int len = ap->len;
+ for (i = 0; i < len; i++) {
+ if (name[i] == 0)
+ seen_null = 1;
+ else if (seen_null) {
+ need = -1;
+ break;
+ }
+ }
+ }
DESTROY_ESTACK(stack);
return need;
}
@@ -2239,6 +2241,16 @@ L_Again: /* Restart with sublist, old listend was pushed on stack */
if (is_small(obj)) { /* Always small */
for(;;) {
Uint x = unsigned_val(obj);
+ /*
+ * Do not allow null in
+ * the middle of filenames
+ */
+ if (x == 0)
+ seen_null = 1;
+ else if (seen_null) {
+ DESTROY_ESTACK(stack);
+ return ((Sint) -1);
+ }
switch (encoding) {
case ERL_FILENAME_LATIN1:
if (x > 255) {
@@ -2504,7 +2516,7 @@ void erts_copy_utf8_to_utf16_little(byte *target, byte *bytes, int num_chars)
((Uint) (bytes[3] & ((byte) 0x3F)));
bytes += 4;
} else {
- erl_exit(1,"Internal unicode error in prim_file:internal_name2native/1");
+ erts_exit(ERTS_ERROR_EXIT,"Internal unicode error in prim_file:internal_name2native/1");
}
*target++ = (byte) (unipoint & 0xFF);
*target++ = (byte) ((unipoint >> 8) & 0xFF);
@@ -2512,6 +2524,38 @@ void erts_copy_utf8_to_utf16_little(byte *target, byte *bytes, int num_chars)
}
/*
+ * *** Requirements on Raw Filename Format ***
+ *
+ * These requirements are due to the 'filename' module
+ * in stdlib. This since it is documented that it
+ * should be able to operate on raw filenames as well
+ * as ordinary filenames.
+ *
+ * A raw filename *must* be a byte sequence where:
+ * 1. Codepoints 0-127 (7-bit ascii) *must* be encoded
+ * as a byte with the corresponding value. That is,
+ * the most significant bit in the byte encoding the
+ * codepoint is never set.
+ * 2. Codepoints greater than 127 *must* be encoded
+ * with the most significant bit set in *every* byte
+ * encoding it.
+ *
+ * Latin1 and UTF-8 meet these requirements while
+ * UTF-16 and UTF-32 don't.
+ *
+ * On Windows filenames are natively stored as malformed
+ * UTF-16LE (lonely surrogates may appear). A more correct
+ * description than UTF-16 would be an array of 16-bit
+ * words... In order to meet the requirements of the
+ * raw file format we convert the malformed UTF-16LE to
+ * malformed UTF-8 which meet the requirements.
+ *
+ * Note that these requirements are today only OTP
+ * internal (erts-stdlib internal) requirements that
+ * could be changed.
+ */
+
+/*
* This internal bif converts a filename to whatever format is suitable for the file driver
* It also adds zero termination so that prim_file needn't bother with the character encoding
* of the file driver
@@ -2522,6 +2566,12 @@ BIF_RETTYPE prim_file_internal_name2native_1(BIF_ALIST_1)
Sint need;
Eterm bin_term;
byte* bin_p;
+
+ /*
+ * See comment on "Requirements on Raw Filename Format"
+ * above.
+ */
+
/* Prim file explicitly does not allow atoms, although we could
very well cope with it. Instead of letting 'file' handle them,
it would probably be more efficient to handle them here. Subject to
@@ -2530,6 +2580,7 @@ BIF_RETTYPE prim_file_internal_name2native_1(BIF_ALIST_1)
BIF_ERROR(BIF_P,BADARG);
}
if (is_binary(BIF_ARG_1)) {
+ int seen_null = 0;
byte *temp_alloc = NULL;
byte *bytes;
byte *err_pos;
@@ -2539,10 +2590,18 @@ BIF_RETTYPE prim_file_internal_name2native_1(BIF_ALIST_1)
size = binary_size(BIF_ARG_1);
bytes = erts_get_aligned_binary_bytes(BIF_ARG_1, &temp_alloc);
if (encoding != ERL_FILENAME_WIN_WCHAR) {
+ Uint i;
/*Add 0 termination only*/
bin_term = new_binary(BIF_P, NULL, size+1);
bin_p = binary_bytes(bin_term);
- memcpy(bin_p,bytes,size);
+ for (i = 0; i < size; i++) {
+ /* Don't allow null in the middle of filenames... */
+ if (bytes[i] == 0)
+ seen_null = 1;
+ else if (seen_null)
+ goto bin_name_error;
+ bin_p[i] = bytes[i];
+ }
bin_p[size]=0;
erts_free_aligned_binary_bytes(temp_alloc);
BIF_RET(bin_term);
@@ -2556,6 +2615,11 @@ BIF_RETTYPE prim_file_internal_name2native_1(BIF_ALIST_1)
bin_term = new_binary(BIF_P, 0, (size+1)*2);
bin_p = binary_bytes(bin_term);
while (size--) {
+ /* Don't allow null in the middle of filenames... */
+ if (*bytes == 0)
+ seen_null = 1;
+ else if (seen_null)
+ goto bin_name_error;
*bin_p++ = *bytes++;
*bin_p++ = 0;
}
@@ -2573,11 +2637,14 @@ BIF_RETTYPE prim_file_internal_name2native_1(BIF_ALIST_1)
bin_p[num_chars*2+1] = 0;
erts_free_aligned_binary_bytes(temp_alloc);
BIF_RET(bin_term);
+ bin_name_error:
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_ERROR(BIF_P,BADARG);
} /* binary */
- if ((need = erts_native_filename_need(BIF_ARG_1,encoding)) < 0) {
- BIF_ERROR(BIF_P,BADARG);
+ if ((need = erts_native_filename_need(BIF_ARG_1, encoding)) < 0) {
+ BIF_ERROR(BIF_P,BADARG);
}
if (encoding == ERL_FILENAME_WIN_WCHAR) {
need += 2;
@@ -2611,6 +2678,11 @@ BIF_RETTYPE prim_file_internal_native2name_1(BIF_ALIST_1)
Eterm ret;
int mac = 0;
+ /*
+ * See comment on "Requirements on Raw Filename Format"
+ * above.
+ */
+
if (is_not_binary(BIF_ARG_1)) {
BIF_ERROR(BIF_P,BADARG);
}
diff --git a/erts/emulator/beam/erl_unicode.h b/erts/emulator/beam/erl_unicode.h
index 1b63b797c2..e01eaa787e 100644
--- a/erts/emulator/beam/erl_unicode.h
+++ b/erts/emulator/beam/erl_unicode.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
diff --git a/erts/emulator/beam/erl_unicode_normalize.h b/erts/emulator/beam/erl_unicode_normalize.h
index fb0a111ca2..21e2a52544 100644
--- a/erts/emulator/beam/erl_unicode_normalize.h
+++ b/erts/emulator/beam/erl_unicode_normalize.h
@@ -1,21 +1,22 @@
/*
-* %CopyrightBegin%
-*
-* Copyright Ericsson AB 1999-2010. All Rights Reserved.
-*
-* The contents of this file are subject to the Erlang Public License,
-* Version 1.1, (the "License"); you may not use this file except in
-* compliance with the License. You should have received a copy of the
-* Erlang Public License along with this software. If not, it can be
-* retrieved online at http://www.erlang.org/.
-*
-* Software distributed under the License is distributed on an "AS IS"
-* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-* the License for the specific language governing rights and limitations
-* under the License.
-*
-* %CopyrightEnd%
-*/
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1999-2016. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
/*
* This file is automatically generated by dec.erl, do not edit manually
*/
diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h
index 0807649ea1..44d8c85867 100644
--- a/erts/emulator/beam/erl_utils.h
+++ b/erts/emulator/beam/erl_utils.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2012-2014. All Rights Reserved.
+ * Copyright Ericsson AB 2012-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -21,123 +22,37 @@
#define ERL_UTILS_H__
#include "sys.h"
-#include "erl_smp.h"
#include "erl_printf.h"
struct process;
typedef struct {
-#ifdef DEBUG
- int smp_api;
-#endif
union {
Uint64 not_atomic;
-#ifdef ARCH_64
- erts_atomic_t atomic;
-#else
- erts_dw_atomic_t atomic;
-#endif
+ erts_atomic64_t atomic;
} counter;
} erts_interval_t;
void erts_interval_init(erts_interval_t *);
-void erts_smp_interval_init(erts_interval_t *);
Uint64 erts_step_interval_nob(erts_interval_t *);
Uint64 erts_step_interval_relb(erts_interval_t *);
-Uint64 erts_smp_step_interval_nob(erts_interval_t *);
-Uint64 erts_smp_step_interval_relb(erts_interval_t *);
Uint64 erts_ensure_later_interval_nob(erts_interval_t *, Uint64);
Uint64 erts_ensure_later_interval_acqb(erts_interval_t *, Uint64);
-Uint64 erts_smp_ensure_later_interval_nob(erts_interval_t *, Uint64);
-Uint64 erts_smp_ensure_later_interval_acqb(erts_interval_t *, Uint64);
-#ifdef ARCH_32
-ERTS_GLB_INLINE Uint64 erts_interval_dw_aint_to_val__(erts_dw_aint_t *);
-#endif
-ERTS_GLB_INLINE Uint64 erts_current_interval_nob__(erts_interval_t *);
-ERTS_GLB_INLINE Uint64 erts_current_interval_acqb__(erts_interval_t *);
ERTS_GLB_INLINE Uint64 erts_current_interval_nob(erts_interval_t *);
ERTS_GLB_INLINE Uint64 erts_current_interval_acqb(erts_interval_t *);
-ERTS_GLB_INLINE Uint64 erts_smp_current_interval_nob(erts_interval_t *);
-ERTS_GLB_INLINE Uint64 erts_smp_current_interval_acqb(erts_interval_t *);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-#ifdef ARCH_32
-
-ERTS_GLB_INLINE Uint64
-erts_interval_dw_aint_to_val__(erts_dw_aint_t *dw)
-{
-#ifdef ETHR_SU_DW_NAINT_T__
- return (Uint64) dw->dw_sint;
-#else
- Uint64 res;
- res = (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_HIGH_WORD]);
- res <<= 32;
- res |= (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_LOW_WORD]);
- return res;
-#endif
-}
-
-#endif
-
-ERTS_GLB_INLINE Uint64
-erts_current_interval_nob__(erts_interval_t *icp)
-{
-#ifdef ARCH_64
- return (Uint64) erts_atomic_read_nob(&icp->counter.atomic);
-#else
- erts_dw_aint_t dw;
- erts_dw_atomic_read_nob(&icp->counter.atomic, &dw);
- return erts_interval_dw_aint_to_val__(&dw);
-#endif
-}
-
-ERTS_GLB_INLINE Uint64
-erts_current_interval_acqb__(erts_interval_t *icp)
-{
-#ifdef ARCH_64
- return (Uint64) erts_atomic_read_acqb(&icp->counter.atomic);
-#else
- erts_dw_aint_t dw;
- erts_dw_atomic_read_acqb(&icp->counter.atomic, &dw);
- return erts_interval_dw_aint_to_val__(&dw);
-#endif
-}
-
ERTS_GLB_INLINE Uint64
erts_current_interval_nob(erts_interval_t *icp)
{
- ASSERT(!icp->smp_api);
- return erts_current_interval_nob__(icp);
+ return (Uint64) erts_atomic64_read_nob(&icp->counter.atomic);
}
ERTS_GLB_INLINE Uint64
erts_current_interval_acqb(erts_interval_t *icp)
{
- ASSERT(!icp->smp_api);
- return erts_current_interval_acqb__(icp);
-}
-
-ERTS_GLB_INLINE Uint64
-erts_smp_current_interval_nob(erts_interval_t *icp)
-{
- ASSERT(icp->smp_api);
-#ifdef ERTS_SMP
- return erts_current_interval_nob__(icp);
-#else
- return icp->counter.not_atomic;
-#endif
-}
-
-ERTS_GLB_INLINE Uint64
-erts_smp_current_interval_acqb(erts_interval_t *icp)
-{
- ASSERT(icp->smp_api);
-#ifdef ERTS_SMP
- return erts_current_interval_acqb__(icp);
-#else
- return icp->counter.not_atomic;
-#endif
+ return (Uint64) erts_atomic64_read_acqb(&icp->counter.atomic);
}
#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
@@ -150,12 +65,13 @@ void erts_silence_warn_unused_result(long unused);
int erts_fit_in_bits_int64(Sint64);
int erts_fit_in_bits_int32(Sint32);
-int erts_list_length(Eterm);
+int erts_fit_in_bits_uint(Uint);
+Sint erts_list_length(Eterm);
int erts_is_builtin(Eterm, Eterm, int);
-Uint32 make_broken_hash(Eterm);
Uint32 block_hash(byte *, unsigned, Uint32);
Uint32 make_hash2(Eterm);
Uint32 make_hash(Eterm);
+Uint32 make_internal_hash(Eterm, Uint32 salt);
void erts_save_emu_args(int argc, char **argv);
Eterm erts_get_emu_args(struct process *c_p);
@@ -166,6 +82,7 @@ Eterm erts_bld_uint(Uint **hpp, Uint *szp, Uint ui);
Eterm erts_bld_uword(Uint **hpp, Uint *szp, UWord uw);
Eterm erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64);
Eterm erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64);
+#define erts_bld_monotonic_time erts_bld_sint64
Eterm erts_bld_cons(Uint **hpp, Uint *szp, Eterm car, Eterm cdr);
Eterm erts_bld_tuple(Uint **hpp, Uint *szp, Uint arity, ...);
#define erts_bld_tuple2(H,S,E1,E2) erts_bld_tuple(H,S,2,E1,E2)
@@ -191,49 +108,46 @@ void erts_init_utils_mem(void);
erts_dsprintf_buf_t *erts_create_tmp_dsbuf(Uint);
void erts_destroy_tmp_dsbuf(erts_dsprintf_buf_t *);
-#if HALFWORD_HEAP
-int eq_rel(Eterm a, Eterm* a_base, Eterm b, Eterm* b_base);
-# define eq(A,B) eq_rel(A,NULL,B,NULL)
-#else
int eq(Eterm, Eterm);
-# define eq_rel(A,A_BASE,B,B_BASE) eq(A,B)
-#endif
#define EQ(x,y) (((x) == (y)) || (is_not_both_immed((x),(y)) && eq((x),(y))))
-#if HALFWORD_HEAP
-Sint erts_cmp_rel_opt(Eterm, Eterm*, Eterm, Eterm*, int);
-#define cmp_rel(A,A_BASE,B,B_BASE) erts_cmp_rel_opt(A,A_BASE,B,B_BASE,0)
-#define cmp_rel_term(A,A_BASE,B,B_BASE) erts_cmp_rel_opt(A,A_BASE,B,B_BASE,1)
-#define CMP(A,B) erts_cmp_rel_opt(A,NULL,B,NULL,0)
-#define CMP_TERM(A,B) erts_cmp_rel_opt(A,NULL,B,NULL,1)
-#else
-Sint cmp(Eterm, Eterm);
-Sint erts_cmp(Eterm, Eterm, int);
-#define cmp_rel(A,A_BASE,B,B_BASE) erts_cmp(A,B,0)
-#define cmp_rel_term(A,A_BASE,B,B_BASE) erts_cmp(A,B,1)
-#define CMP(A,B) erts_cmp(A,B,0)
-#define CMP_TERM(A,B) erts_cmp(A,B,1)
-#endif
-
-#define cmp_lt(a,b) (CMP((a),(b)) < 0)
-#define cmp_le(a,b) (CMP((a),(b)) <= 0)
-#define cmp_eq(a,b) (CMP((a),(b)) == 0)
-#define cmp_ne(a,b) (CMP((a),(b)) != 0)
-#define cmp_ge(a,b) (CMP((a),(b)) >= 0)
-#define cmp_gt(a,b) (CMP((a),(b)) > 0)
-
-#define cmp_lt_term(a,b) (CMP_TERM((a),(b)) < 0)
-#define cmp_le_term(a,b) (CMP_TERM((a),(b)) <= 0)
-#define cmp_ge_term(a,b) (CMP_TERM((a),(b)) >= 0)
-#define cmp_gt_term(a,b) (CMP_TERM((a),(b)) > 0)
-
-#define CMP_LT(a,b) ((a) != (b) && cmp_lt((a),(b)))
-#define CMP_GE(a,b) ((a) == (b) || cmp_ge((a),(b)))
-#define CMP_EQ(a,b) ((a) == (b) || cmp_eq((a),(b)))
-#define CMP_NE(a,b) ((a) != (b) && cmp_ne((a),(b)))
-
-#define CMP_LT_TERM(a,b) ((a) != (b) && cmp_lt_term((a),(b)))
-#define CMP_GE_TERM(a,b) ((a) == (b) || cmp_ge_term((a),(b)))
+int erts_cmp_atoms(Eterm a, Eterm b);
+Sint erts_cmp(Eterm, Eterm, int, int);
+Sint erts_cmp_compound(Eterm, Eterm, int, int);
+Sint cmp(Eterm a, Eterm b);
+#define CMP(A,B) erts_cmp(A,B,0,0)
+#define CMP_TERM(A,B) erts_cmp(A,B,1,0)
+#define CMP_EQ_ONLY(A,B) erts_cmp(A,B,0,1)
+
+#define CMP_LT(a,b) ((a) != (b) && CMP((a),(b)) < 0)
+#define CMP_LE(a,b) ((a) == (b) || CMP((a),(b)) <= 0)
+#define CMP_EQ(a,b) ((a) == (b) || CMP_EQ_ONLY((a),(b)) == 0)
+#define CMP_NE(a,b) ((a) != (b) && CMP_EQ_ONLY((a),(b)) != 0)
+#define CMP_GE(a,b) ((a) == (b) || CMP((a),(b)) >= 0)
+#define CMP_GT(a,b) ((a) != (b) && CMP((a),(b)) > 0)
+
+#define CMP_EQ_ACTION(X,Y,Action) \
+ if ((X) != (Y)) { CMP_SPEC((X),(Y),!=,Action,1); }
+#define CMP_NE_ACTION(X,Y,Action) \
+ if ((X) == (Y)) { Action; } else { CMP_SPEC((X),(Y),==,Action,1); }
+#define CMP_GE_ACTION(X,Y,Action) \
+ if ((X) != (Y)) { CMP_SPEC((X),(Y),<,Action,0); }
+#define CMP_LT_ACTION(X,Y,Action) \
+ if ((X) == (Y)) { Action; } else { CMP_SPEC((X),(Y),>=,Action,0); }
+
+#define CMP_SPEC(X,Y,Op,Action,EqOnly) \
+ if (is_atom(X) && is_atom(Y)) { \
+ if (erts_cmp_atoms(X, Y) Op 0) { Action; }; \
+ } else if (is_both_small(X, Y)) { \
+ if (signed_val(X) Op signed_val(Y)) { Action; }; \
+ } else if (is_float(X) && is_float(Y)) { \
+ FloatDef af, bf; \
+ GET_DOUBLE(X, af); \
+ GET_DOUBLE(Y, bf); \
+ if (af.fd Op bf.fd) { Action; }; \
+ } else { \
+ if (erts_cmp_compound(X,Y,0,EqOnly) Op 0) { Action; }; \
+ }
#endif
diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h
index b7de8208ad..76980b5871 100644
--- a/erts/emulator/beam/erl_vm.h
+++ b/erts/emulator/beam/erl_vm.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2014. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -20,8 +21,6 @@
#ifndef __ERL_VM_H__
#define __ERL_VM_H__
-/* #define ERTS_OPCODE_COUNTER_SUPPORT */
-
/* FORCE_HEAP_FRAGS:
* Debug provocation to make HAlloc always create heap fragments (if allowed)
* even if there is room on heap.
@@ -37,33 +36,24 @@
#define EMULATOR "BEAM"
#define SEQ_TRACE 1
-#define CONTEXT_REDS 2000 /* Swap process out after this number */
+#define CONTEXT_REDS 4000 /* Swap process out after this number */
#define MAX_ARG 255 /* Max number of arguments allowed */
#define MAX_REG 1024 /* Max number of x(N) registers used */
-/* Scheduler stores data for temporary heaps if
- !HEAP_ON_C_STACK. Macros (*TmpHeap*) in global.h selects if we put temporary
- heap data on the C stack or if we use the buffers in the scheduler data. */
-#define TMP_HEAP_SIZE 128 /* Number of Eterm in the schedulers
- small heap for transient heap data */
-#define ERL_ARITH_TMP_HEAP_SIZE 4 /* as does erl_arith... */
-#define BEAM_EMU_TMP_HEAP_SIZE 2 /* and beam_emu... */
-
/*
* The new arithmetic operations need some extra X registers in the register array.
* so does the gc_bif's (i_gc_bif3 need 3 extra).
*/
#define ERTS_X_REGS_ALLOCATED (MAX_REG+3)
-#define INPUT_REDUCTIONS (2 * CONTEXT_REDS)
-
#define H_DEFAULT_SIZE 233 /* default (heap + stack) min size */
#define VH_DEFAULT_SIZE 32768 /* default virtual (bin) heap min size (words) */
+#define H_DEFAULT_MAX_SIZE 0 /* default max heap size is off */
#define CP_SIZE 1
#define ErtsHAllocLockCheck(P) \
- ERTS_SMP_LC_ASSERT(erts_dbg_check_halloc_lock((P)))
+ ERTS_LC_ASSERT(erts_dbg_check_halloc_lock((P)))
#ifdef DEBUG
@@ -110,19 +100,30 @@
if ((ptr) == (endp)) { \
; \
} else if (HEAP_START(p) <= (ptr) && (ptr) < HEAP_TOP(p)) { \
+ ASSERT(HEAP_TOP(p) == (endp)); \
HEAP_TOP(p) = (ptr); \
} else { \
- erts_heap_frag_shrink(p, ptr); \
+ ASSERT(MBUF(p)->mem + MBUF(p)->used_size == (endp)); \
+ erts_heap_frag_shrink(p, ptr); \
}
#define HeapWordsLeft(p) (HEAP_LIMIT(p) - HEAP_TOP(p))
#if defined(DEBUG) || defined(CHECK_FOR_HOLES)
-#if HALFWORD_HEAP
-# define ERTS_HOLE_MARKER (0xaf5e78ccU)
-#else
-# define ERTS_HOLE_MARKER (((0xaf5e78ccUL << 24) << 8) | 0xaf5e78ccUL)
-#endif
+
+/*
+ * ERTS_HOLE_MARKER must *not* be mistaken for a valid term
+ * on the heap...
+ */
+# ifdef ARCH_64
+# define ERTS_HOLE_MARKER \
+ make_catch(UWORD_CONSTANT(0xdeadbeaf00000000) >> _TAG_IMMED2_SIZE)
+/* Will (at the time of writing) appear as 0xdeadbeaf0000001b */
+# else
+# define ERTS_HOLE_MARKER \
+ make_catch(UWORD_CONSTANT(0xdead0000) >> _TAG_IMMED2_SIZE)
+/* Will (at the time of writing) appear as 0xdead001b */
+# endif
#endif
/*
@@ -154,14 +155,17 @@
typedef struct op_entry {
char* name; /* Name of instruction. */
Uint32 mask[3]; /* Signature mask. */
+ unsigned involves_r; /* Needs special attention when matching. */
int sz; /* Number of loaded words. */
+ int adjust; /* Adjustment for start of instruction. */
char* pack; /* Instructions for packing engine. */
char* sign; /* Signature string. */
- unsigned count; /* Number of times executed. */
} OpEntry;
-extern OpEntry opc[]; /* Description of all instructions. */
-extern int num_instructions; /* Number of instruction in opc[]. */
+extern const OpEntry opc[]; /* Description of all instructions. */
+extern const int num_instructions; /* Number of instruction in opc[]. */
+
+extern Uint erts_instr_count[];
/* some constants for various table sizes etc */
@@ -172,11 +176,13 @@ extern int num_instructions; /* Number of instruction in opc[]. */
extern int H_MIN_SIZE; /* minimum (heap + stack) */
extern int BIN_VH_MIN_SIZE; /* minimum virtual (bin) heap */
+extern int H_MAX_SIZE; /* maximum (heap + stack) */
+extern int H_MAX_FLAGS; /* maximum heap flags */
extern int erts_atom_table_size;/* Atom table size */
+extern int erts_pd_initial_size;/* Initial Process dictionary table size */
#define ORIG_CREATION 0
-#define INTERNAL_CREATION 255
/* macros for extracting bytes from uint16's */
@@ -194,4 +200,24 @@ extern int erts_atom_table_size;/* Atom table size */
#include "erl_term.h"
+#if defined(NO_JUMP_TABLE)
+# define BeamOpsAreInitialized() (1)
+# define BeamOpCodeAddr(OpCode) ((BeamInstr)(OpCode))
+#else
+extern void** beam_ops;
+# define BeamOpsAreInitialized() (beam_ops != 0)
+# define BeamOpCodeAddr(OpCode) ((BeamInstr)beam_ops[(OpCode)])
+#endif
+
+#if defined(ARCH_64) && defined(CODE_MODEL_SMALL)
+# define BeamCodeAddr(InstrWord) ((BeamInstr)(Uint32)(InstrWord))
+# define BeamSetCodeAddr(InstrWord, Addr) (((InstrWord) & ~((1ull << 32)-1)) | (Addr))
+# define BeamExtraData(InstrWord) ((InstrWord) >> 32)
+#else
+# define BeamCodeAddr(InstrWord) ((BeamInstr)(InstrWord))
+# define BeamSetCodeAddr(InstrWord, Addr) (Addr)
+#endif
+
+#define BeamIsOpCode(InstrWord, OpCode) (BeamCodeAddr(InstrWord) == BeamOpCodeAddr(OpCode))
+
#endif /* __ERL_VM_H__ */
diff --git a/erts/emulator/beam/erl_zlib.c b/erts/emulator/beam/erl_zlib.c
index 8e33144f96..944ff2e35f 100644
--- a/erts/emulator/beam/erl_zlib.c
+++ b/erts/emulator/beam/erl_zlib.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2009-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2009-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
diff --git a/erts/emulator/beam/erl_zlib.h b/erts/emulator/beam/erl_zlib.h
index 160166c66b..c83c6f291f 100644
--- a/erts/emulator/beam/erl_zlib.h
+++ b/erts/emulator/beam/erl_zlib.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2009-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2009-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
diff --git a/erts/emulator/beam/erlang_dtrace.d b/erts/emulator/beam/erlang_dtrace.d
index e3ebbb84f4..237889e0f5 100644
--- a/erts/emulator/beam/erlang_dtrace.d
+++ b/erts/emulator/beam/erlang_dtrace.d
@@ -1,19 +1,20 @@
/*
* %CopyrightBegin%
*
- * Copyright Dustin Sallings, Michal Ptaszek, Scott Lystig Fritchie 2011-2012.
+ * Copyright Dustin Sallings, Michal Ptaszek, Scott Lystig Fritchie 2011-2016.
* All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -699,6 +700,35 @@ provider erlang {
*/
probe efile_drv__return(int, int, char *, int, int, int);
+
+/*
+ * The set of probes called by the erlang tracer nif backend. In order
+ * to receive events on these you both have to enable tracing in erlang
+ * using the trace bifs and also from dtrace/systemtap.
+ */
+
+
+ /**
+ * A trace message of type `event` was triggered by process `p`.
+ *
+ *
+ * @param p the PID (string form) of the process
+ * @param event the event that was triggered (i.e. call or spawn)
+ * @param state the state of the tracer nif as a string
+ * @param arg1 first argument to the trace event
+ * @param arg2 second argument to the trace event
+ */
+ probe trace(char *p, char *event, char *state, char *arg1, char *arg2);
+
+ /**
+ * A sequence trace message of type `label` was triggered.
+ *
+ * @param state the state of the tracer nif as a string
+ * @param label the seq trace label
+ * @param seq_info the seq trace info tuple as a string
+ */
+ probe trace_seq(char *state, char *label, char *seq_info);
+
/*
* NOTE:
* For formatting int64_t arguments within a D script, see:
diff --git a/erts/emulator/beam/erlang_lttng.c b/erts/emulator/beam/erlang_lttng.c
new file mode 100644
index 0000000000..fce40eedc1
--- /dev/null
+++ b/erts/emulator/beam/erlang_lttng.c
@@ -0,0 +1,32 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#ifdef USE_LTTNG
+#define TRACEPOINT_CREATE_PROBES
+/*
+ * The header containing our TRACEPOINT_EVENTs.
+ */
+#define TRACEPOINT_DEFINE
+#include "erlang_lttng.h"
+#endif /* USE_LTTNG */
diff --git a/erts/emulator/beam/erlang_lttng.h b/erts/emulator/beam/erlang_lttng.h
new file mode 100644
index 0000000000..feb05f4f4c
--- /dev/null
+++ b/erts/emulator/beam/erlang_lttng.h
@@ -0,0 +1,409 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef USE_LTTNG
+#undef TRACEPOINT_PROVIDER
+#define TRACEPOINT_PROVIDER org_erlang_otp
+
+#undef TRACEPOINT_INCLUDE
+#define TRACEPOINT_INCLUDE "erlang_lttng.h"
+
+#if !defined(__ERLANG_LTTNG_H__) || defined(TRACEPOINT_HEADER_MULTI_READ)
+#define __ERLANG_LTTNG_H__
+
+#include <lttng/tracepoint.h>
+
+/* Schedulers */
+
+TRACEPOINT_EVENT(
+ org_erlang_otp,
+ scheduler_poll,
+ TP_ARGS(
+ int, id,
+ int, runnable
+ ),
+ TP_FIELDS(
+ ctf_integer(int, scheduler, id)
+ ctf_integer(int, runnable, runnable)
+ )
+)
+
+#ifndef LTTNG_CARRIER_STATS
+#define LTTNG_CARRIER_STATS
+typedef struct {
+ unsigned long no;
+ unsigned long size;
+} lttng_stat_values_t;
+
+typedef struct {
+ lttng_stat_values_t carriers;
+ lttng_stat_values_t blocks;
+} lttng_carrier_stats_t;
+#endif
+
+
+/* Port and Driver Scheduling */
+
+TRACEPOINT_EVENT(
+ org_erlang_otp,
+ driver_start,
+ TP_ARGS(
+ char*, pid,
+ char*, driver,
+ char*, port
+ ),
+ TP_FIELDS(
+ ctf_string(pid, pid)
+ ctf_string(driver, driver)
+ ctf_string(port, port)
+ )
+)
+
+TRACEPOINT_EVENT(
+ org_erlang_otp,
+ driver_init,
+ TP_ARGS(
+ char*, driver,
+ int, major,
+ int, minor,
+ int, flags
+ ),
+ TP_FIELDS(
+ ctf_string(driver, driver)
+ ctf_integer(int, major, major)
+ ctf_integer(int, minor, minor)
+ ctf_integer(int, flags, flags)
+ )
+)
+
+TRACEPOINT_EVENT(
+ org_erlang_otp,
+ driver_outputv,
+ TP_ARGS(
+ char*, pid,
+ char*, port,
+ char*, driver,
+ size_t, bytes
+ ),
+ TP_FIELDS(
+ ctf_string(pid, pid)
+ ctf_string(port, port)
+ ctf_string(driver, driver)
+ ctf_integer(size_t, bytes, bytes)
+ )
+)
+
+TRACEPOINT_EVENT(
+ org_erlang_otp,
+ driver_output,
+ TP_ARGS(
+ char*, pid,
+ char*, port,
+ char*, driver,
+ size_t, bytes
+ ),
+ TP_FIELDS(
+ ctf_string(pid, pid)
+ ctf_string(port, port)
+ ctf_string(driver, driver)
+ ctf_integer(size_t, bytes, bytes)
+ )
+)
+
+TRACEPOINT_EVENT(
+ org_erlang_otp,
+ driver_ready_input,
+ TP_ARGS(
+ char*, pid,
+ char*, port,
+ char*, driver
+ ),
+ TP_FIELDS(
+ ctf_string(pid, pid)
+ ctf_string(port, port)
+ ctf_string(driver, driver)
+ )
+)
+
+TRACEPOINT_EVENT(
+ org_erlang_otp,
+ driver_ready_output,
+ TP_ARGS(
+ char*, pid,
+ char*, port,
+ char*, driver
+ ),
+ TP_FIELDS(
+ ctf_string(pid, pid)
+ ctf_string(port, port)
+ ctf_string(driver, driver)
+ )
+)
+
+TRACEPOINT_EVENT(
+ org_erlang_otp,
+ driver_timeout,
+ TP_ARGS(
+ char*, pid,
+ char*, port,
+ char*, driver
+ ),
+ TP_FIELDS(
+ ctf_string(pid, pid)
+ ctf_string(port, port)
+ ctf_string(driver, driver)
+ )
+)
+
+TRACEPOINT_EVENT(
+ org_erlang_otp,
+ driver_stop_select,
+ TP_ARGS(
+ char*, driver
+ ),
+ TP_FIELDS(
+ ctf_string(driver, driver)
+ )
+)
+
+TRACEPOINT_EVENT(
+ org_erlang_otp,
+ driver_flush,
+ TP_ARGS(
+ char*, pid,
+ char*, port,
+ char*, driver
+ ),
+ TP_FIELDS(
+ ctf_string(pid, pid)
+ ctf_string(port, port)
+ ctf_string(driver, driver)
+ )
+)
+
+TRACEPOINT_EVENT(
+ org_erlang_otp,
+ driver_stop,
+ TP_ARGS(
+ char*, pid,
+ char*, port,
+ char*, driver
+ ),
+ TP_FIELDS(
+ ctf_string(pid, pid)
+ ctf_string(port, port)
+ ctf_string(driver, driver)
+ )
+)
+
+TRACEPOINT_EVENT(
+ org_erlang_otp,
+ driver_process_exit,
+ TP_ARGS(
+ char*, pid,
+ char*, port,
+ char*, driver
+ ),
+ TP_FIELDS(
+ ctf_string(pid, pid)
+ ctf_string(port, port)
+ ctf_string(driver, driver)
+ )
+)
+
+TRACEPOINT_EVENT(
+ org_erlang_otp,
+ driver_ready_async,
+ TP_ARGS(
+ char*, pid,
+ char*, port,
+ char*, driver
+ ),
+ TP_FIELDS(
+ ctf_string(pid, pid)
+ ctf_string(port, port)
+ ctf_string(driver, driver)
+ )
+)
+
+TRACEPOINT_EVENT(
+ org_erlang_otp,
+ driver_finish,
+ TP_ARGS(
+ char*, driver
+ ),
+ TP_FIELDS(
+ ctf_string(driver, driver)
+ )
+)
+
+TRACEPOINT_EVENT(
+ org_erlang_otp,
+ driver_call,
+ TP_ARGS(
+ char*, pid,
+ char*, port,
+ char*, driver,
+ unsigned int, command,
+ size_t, bytes
+ ),
+ TP_FIELDS(
+ ctf_string(pid, pid)
+ ctf_string(port, port)
+ ctf_string(driver, driver)
+ ctf_integer(unsigned int, command, command)
+ ctf_integer(size_t, bytes, bytes)
+ )
+)
+
+TRACEPOINT_EVENT(
+ org_erlang_otp,
+ driver_control,
+ TP_ARGS(
+ char*, pid,
+ char*, port,
+ char*, driver,
+ unsigned int, command,
+ size_t, bytes
+ ),
+ TP_FIELDS(
+ ctf_string(pid, pid)
+ ctf_string(port, port)
+ ctf_string(driver, driver)
+ ctf_integer(unsigned int, command, command)
+ ctf_integer(size_t, bytes, bytes)
+ )
+)
+
+/* Async pool */
+
+TRACEPOINT_EVENT(
+ org_erlang_otp,
+ aio_pool_get,
+ TP_ARGS(
+ char*, port,
+ int, length
+ ),
+ TP_FIELDS(
+ ctf_string(port, port)
+ ctf_integer(int, length, length)
+ )
+)
+
+TRACEPOINT_EVENT(
+ org_erlang_otp,
+ aio_pool_put,
+ TP_ARGS(
+ char*, port,
+ int, length
+ ),
+ TP_FIELDS(
+ ctf_string(port, port)
+ ctf_integer(int, length, length)
+ )
+)
+
+
+/* Memory Allocator */
+
+TRACEPOINT_EVENT(
+ org_erlang_otp,
+ carrier_create,
+ TP_ARGS(
+ const char*, type,
+ int, instance,
+ unsigned long, size,
+ lttng_carrier_stats_t *, mbcs,
+ lttng_carrier_stats_t *, sbcs
+ ),
+ TP_FIELDS(
+ ctf_string(type, type)
+ ctf_integer(int, instance, instance)
+ ctf_integer(unsigned long, size, size)
+ ctf_integer(unsigned long, mbc_carriers, mbcs->carriers.no)
+ ctf_integer(unsigned long, mbc_carriers_size, mbcs->carriers.size)
+ ctf_integer(unsigned long, mbc_blocks, mbcs->blocks.no)
+ ctf_integer(unsigned long, mbc_blocks_size, mbcs->blocks.size)
+ ctf_integer(unsigned long, sbc_carriers, sbcs->carriers.no)
+ ctf_integer(unsigned long, sbc_carriers_size, sbcs->carriers.size)
+ ctf_integer(unsigned long, sbc_blocks, sbcs->blocks.no)
+ ctf_integer(unsigned long, sbc_blocks_size, sbcs->blocks.size)
+ )
+)
+
+
+TRACEPOINT_EVENT(
+ org_erlang_otp,
+ carrier_destroy,
+ TP_ARGS(
+ const char*, type,
+ int, instance,
+ unsigned long, size,
+ lttng_carrier_stats_t *, mbcs,
+ lttng_carrier_stats_t *, sbcs
+ ),
+ TP_FIELDS(
+ ctf_string(type, type)
+ ctf_integer(int, instance, instance)
+ ctf_integer(unsigned long, size, size)
+ ctf_integer(unsigned long, mbc_carriers, mbcs->carriers.no)
+ ctf_integer(unsigned long, mbc_carriers_size, mbcs->carriers.size)
+ ctf_integer(unsigned long, mbc_blocks, mbcs->blocks.no)
+ ctf_integer(unsigned long, mbc_blocks_size, mbcs->blocks.size)
+ ctf_integer(unsigned long, sbc_carriers, sbcs->carriers.no)
+ ctf_integer(unsigned long, sbc_carriers_size, sbcs->carriers.size)
+ ctf_integer(unsigned long, sbc_blocks, sbcs->blocks.no)
+ ctf_integer(unsigned long, sbc_blocks_size, sbcs->blocks.size)
+ )
+)
+
+TRACEPOINT_EVENT(
+ org_erlang_otp,
+ carrier_pool_put,
+ TP_ARGS(
+ const char*, name,
+ int, instance,
+ unsigned long, size
+ ),
+ TP_FIELDS(
+ ctf_string(type, name)
+ ctf_integer(int, instance, instance)
+ ctf_integer(unsigned long, size, size)
+ )
+)
+
+TRACEPOINT_EVENT(
+ org_erlang_otp,
+ carrier_pool_get,
+ TP_ARGS(
+ const char*, name,
+ int, instance,
+ unsigned long, size
+ ),
+ TP_FIELDS(
+ ctf_string(type, name)
+ ctf_integer(int, instance, instance)
+ ctf_integer(unsigned long, size, size)
+ )
+)
+
+#endif /* __ERLANG_LTTNG_H__ */
+#include <lttng/tracepoint-event.h>
+#endif /* USE_LTTNG */
diff --git a/erts/emulator/beam/error.h b/erts/emulator/beam/error.h
index ddc2c1396d..64c08b1570 100644
--- a/erts/emulator/beam/error.h
+++ b/erts/emulator/beam/error.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -20,6 +21,8 @@
#ifndef __ERROR_H__
#define __ERROR_H__
+#include "code_ix.h"
+
/*
* There are three primary exception classes:
*
@@ -36,14 +39,11 @@
*/
/*
- * Bits 0-1 index the 'exception class tag' table.
- */
-#define EXC_CLASSBITS 3
-#define GET_EXC_CLASS(x) ((x) & EXC_CLASSBITS)
-
-/*
* Exception class tags (indices into the 'exception_tag' array)
*/
+#define EXTAG_OFFSET 0
+#define EXTAG_BITS 2
+
#define EXTAG_ERROR 0
#define EXTAG_EXIT 1
#define EXTAG_THROWN 2
@@ -51,20 +51,31 @@
#define NUMBER_EXC_TAGS 3 /* The number of exception class tags */
/*
- * Exit code flags (bits 2-7)
+ * Index to the 'exception class tag' table.
+ */
+#define EXC_CLASSBITS ((1<<EXTAG_BITS)-1)
+#define GET_EXC_CLASS(x) ((x) & EXC_CLASSBITS)
+
+/*
+ * Exit code flags
*
* These flags make is easier and quicker to decide what to do with the
* exception in the early stages, before a handler is found, and also
* maintains some separation between the class tag and the actions.
*/
-#define EXF_PANIC (1<<2) /* ignore catches */
-#define EXF_THROWN (1<<3) /* nonlocal return */
-#define EXF_LOG (1<<4) /* write to logger on termination */
-#define EXF_NATIVE (1<<5) /* occurred in native code */
-#define EXF_SAVETRACE (1<<6) /* save stack trace in internal form */
-#define EXF_ARGLIST (1<<7) /* has arglist for top of trace */
+#define EXF_OFFSET EXTAG_BITS
+#define EXF_BITS 7
+
+#define EXF_PANIC (1<<(0+EXF_OFFSET)) /* ignore catches */
+#define EXF_THROWN (1<<(1+EXF_OFFSET)) /* nonlocal return */
+#define EXF_LOG (1<<(2+EXF_OFFSET)) /* write to logger on termination */
+#define EXF_NATIVE (1<<(3+EXF_OFFSET)) /* occurred in native code */
+#define EXF_SAVETRACE (1<<(4+EXF_OFFSET)) /* save stack trace in internal form */
+#define EXF_ARGLIST (1<<(5+EXF_OFFSET)) /* has arglist for top of trace */
+#define EXF_RESTORE_NIF (1<<(6+EXF_OFFSET)) /* restore original bif/nif */
-#define EXC_FLAGBITS 0x00fc
+#define EXC_FLAGBITS (((1<<(EXF_BITS+EXF_OFFSET))-1) \
+ & ~((1<<(EXF_OFFSET))-1))
/*
* The primary fields of an exception code
@@ -74,11 +85,16 @@
#define NATIVE_EXCEPTION(x) ((x) | EXF_NATIVE)
/*
- * Bits 8-12 of the error code are used for indexing into
+ * Error code used for indexing into
* the short-hand error descriptor table.
*/
-#define EXC_INDEXBITS 0x1f00
-#define GET_EXC_INDEX(x) (((x) & EXC_INDEXBITS) >> 8)
+#define EXC_OFFSET (EXF_OFFSET+EXF_BITS)
+#define EXC_BITS 5
+
+#define EXC_INDEXBITS (((1<<(EXC_BITS+EXC_OFFSET))-1) \
+ & ~((1<<(EXC_OFFSET))-1))
+
+#define GET_EXC_INDEX(x) (((x) & EXC_INDEXBITS) >> EXC_OFFSET)
/*
* Exit codes used for raising a fresh exception. The primary exceptions
@@ -104,54 +120,62 @@
/* Error with given arglist term
* (exit reason in p->fvalue) */
-#define EXC_NORMAL ((1 << 8) | EXC_EXIT)
+#define EXC_NORMAL ((1 << EXC_OFFSET) | EXC_EXIT)
/* Normal exit (reason 'normal') */
-#define EXC_INTERNAL_ERROR ((2 << 8) | EXC_ERROR | EXF_PANIC)
+#define EXC_INTERNAL_ERROR ((2 << EXC_OFFSET) | EXC_ERROR | EXF_PANIC)
/* Things that shouldn't happen */
-#define EXC_BADARG ((3 << 8) | EXC_ERROR)
+#define EXC_BADARG ((3 << EXC_OFFSET) | EXC_ERROR)
/* Bad argument to a BIF */
-#define EXC_BADARITH ((4 << 8) | EXC_ERROR)
+#define EXC_BADARITH ((4 << EXC_OFFSET) | EXC_ERROR)
/* Bad arithmetic */
-#define EXC_BADMATCH ((5 << 8) | EXC_ERROR)
+#define EXC_BADMATCH ((5 << EXC_OFFSET) | EXC_ERROR)
/* Bad match in function body */
-#define EXC_FUNCTION_CLAUSE ((6 << 8) | EXC_ERROR)
+#define EXC_FUNCTION_CLAUSE ((6 << EXC_OFFSET) | EXC_ERROR)
/* No matching function head */
-#define EXC_CASE_CLAUSE ((7 << 8) | EXC_ERROR)
+#define EXC_CASE_CLAUSE ((7 << EXC_OFFSET) | EXC_ERROR)
/* No matching case clause */
-#define EXC_IF_CLAUSE ((8 << 8) | EXC_ERROR)
+#define EXC_IF_CLAUSE ((8 << EXC_OFFSET) | EXC_ERROR)
/* No matching if clause */
-#define EXC_UNDEF ((9 << 8) | EXC_ERROR)
+#define EXC_UNDEF ((9 << EXC_OFFSET) | EXC_ERROR)
/* No farity that matches */
-#define EXC_BADFUN ((10 << 8) | EXC_ERROR)
+#define EXC_BADFUN ((10 << EXC_OFFSET) | EXC_ERROR)
/* Not an existing fun */
-#define EXC_BADARITY ((11 << 8) | EXC_ERROR)
+#define EXC_BADARITY ((11 << EXC_OFFSET) | EXC_ERROR)
/* Attempt to call fun with
* wrong number of arguments. */
-#define EXC_TIMEOUT_VALUE ((12 << 8) | EXC_ERROR)
+#define EXC_TIMEOUT_VALUE ((12 << EXC_OFFSET) | EXC_ERROR)
/* Bad time out value */
-#define EXC_NOPROC ((13 << 8) | EXC_ERROR)
+#define EXC_NOPROC ((13 << EXC_OFFSET) | EXC_ERROR)
/* No process or port */
-#define EXC_NOTALIVE ((14 << 8) | EXC_ERROR)
+#define EXC_NOTALIVE ((14 << EXC_OFFSET) | EXC_ERROR)
/* Not distributed */
-#define EXC_SYSTEM_LIMIT ((15 << 8) | EXC_ERROR)
+#define EXC_SYSTEM_LIMIT ((15 << EXC_OFFSET) | EXC_ERROR)
/* Ran out of something */
-#define EXC_TRY_CLAUSE ((16 << 8) | EXC_ERROR)
+#define EXC_TRY_CLAUSE ((16 << EXC_OFFSET) | EXC_ERROR)
/* No matching try clause */
-#define EXC_NOTSUP ((17 << 8) | EXC_ERROR)
+#define EXC_NOTSUP ((17 << EXC_OFFSET) | EXC_ERROR)
/* Not supported */
-#define NUMBER_EXIT_CODES 18 /* The number of exit code indices */
+#define EXC_BADMAP ((18 << EXC_OFFSET) | EXC_ERROR)
+ /* Bad map */
+
+#define EXC_BADKEY ((19 << EXC_OFFSET) | EXC_ERROR)
+ /* Bad key in map */
+
+#define NUMBER_EXIT_CODES 20 /* The number of exit code indices */
/*
* Internal pseudo-error codes.
*/
-#define TRAP (1 << 8) /* BIF Trap to erlang code */
+#define TRAP (1 << EXC_OFFSET) /* BIF Trap to erlang code */
/*
* Aliases for some common exit codes.
*/
#define BADARG EXC_BADARG
#define BADARITH EXC_BADARITH
+#define BADKEY EXC_BADKEY
+#define BADMAP EXC_BADMAP
#define BADMATCH EXC_BADMATCH
#define SYSTEM_LIMIT EXC_SYSTEM_LIMIT
@@ -188,7 +212,7 @@ struct StackTrace {
Eterm header; /* bignum header - must be first in struct */
Eterm freason; /* original exception reason is saved in the struct */
BeamInstr* pc;
- BeamInstr* current;
+ ErtsCodeMFA* current;
int depth; /* number of saved pointers in trace[] */
BeamInstr *trace[1]; /* varying size - must be last in struct */
};
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
index b0f08d8245..946ffeffb8 100644
--- a/erts/emulator/beam/export.c
+++ b/erts/emulator/beam/export.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -30,7 +31,7 @@
#define EXPORT_INITIAL_SIZE 4000
#define EXPORT_LIMIT (512*1024)
-#define EXPORT_HASH(m,f,a) ((m)*(f)+(a))
+#define EXPORT_HASH(m,f,a) ((atom_val(m) * atom_val(f)) ^ (a))
#ifdef DEBUG
# define IF_DEBUG(x) x
@@ -40,16 +41,13 @@
static IndexTable export_tables[ERTS_NUM_CODE_IX]; /* Active not locked */
-static erts_smp_atomic_t total_entries_bytes;
-
-#include "erl_smp.h"
+static erts_atomic_t total_entries_bytes;
/* This lock protects the staging export table from concurrent access
* AND it protects the staging table from becoming active.
*/
-erts_smp_mtx_t export_staging_lock;
+erts_mtx_t export_staging_lock;
-extern BeamInstr* em_call_error_handler;
extern BeamInstr* em_call_traced_function;
struct export_entry
@@ -78,24 +76,19 @@ struct export_templ
static struct export_blob* entry_to_blob(struct export_entry* ee)
{
- return (struct export_blob*)
- ((char*)ee->ep - offsetof(struct export_blob,exp));
+ return ErtsContainerStruct(ee->ep, struct export_blob, exp);
}
void
-export_info(int to, void *to_arg)
+export_info(fmtfn_t to, void *to_arg)
{
-#ifdef ERTS_SMP
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
export_staging_lock();
-#endif
index_info(to, to_arg, &export_tables[erts_active_code_ix()]);
hash_info(to, to_arg, &export_tables[erts_staging_code_ix()].htable);
-#ifdef ERTS_SMP
if (lock)
export_staging_unlock();
-#endif
}
@@ -103,7 +96,8 @@ static HashValue
export_hash(struct export_entry* ee)
{
Export* x = ee->ep;
- return EXPORT_HASH(x->code[0], x->code[1], x->code[2]);
+ return EXPORT_HASH(x->info.mfa.module, x->info.mfa.function,
+ x->info.mfa.arity);
}
static int
@@ -111,9 +105,9 @@ export_cmp(struct export_entry* tmpl_e, struct export_entry* obj_e)
{
Export* tmpl = tmpl_e->ep;
Export* obj = obj_e->ep;
- return !(tmpl->code[0] == obj->code[0] &&
- tmpl->code[1] == obj->code[1] &&
- tmpl->code[2] == obj->code[2]);
+ return !(tmpl->info.mfa.module == obj->info.mfa.module &&
+ tmpl->info.mfa.function == obj->info.mfa.function &&
+ tmpl->info.mfa.arity == obj->info.mfa.arity);
}
@@ -128,23 +122,28 @@ export_alloc(struct export_entry* tmpl_e)
Export* obj;
blob = (struct export_blob*) erts_alloc(ERTS_ALC_T_EXPORT, sizeof(*blob));
- erts_smp_atomic_add_nob(&total_entries_bytes, sizeof(*blob));
+ erts_atomic_add_nob(&total_entries_bytes, sizeof(*blob));
obj = &blob->exp;
- obj->fake_op_func_info_for_hipe[0] = 0;
- obj->fake_op_func_info_for_hipe[1] = 0;
- obj->code[0] = tmpl->code[0];
- obj->code[1] = tmpl->code[1];
- obj->code[2] = tmpl->code[2];
- obj->code[3] = (BeamInstr) em_call_error_handler;
- obj->code[4] = 0;
+ obj->info.op = 0;
+ obj->info.u.gen_bp = NULL;
+ obj->info.mfa.module = tmpl->info.mfa.module;
+ obj->info.mfa.function = tmpl->info.mfa.function;
+ obj->info.mfa.arity = tmpl->info.mfa.arity;
+ obj->beam[0] = 0;
+ if (BeamOpsAreInitialized()) {
+ obj->beam[0] = BeamOpCodeAddr(op_call_error_handler);
+ }
+ obj->beam[1] = 0;
for (ix=0; ix<ERTS_NUM_CODE_IX; ix++) {
- obj->addressv[ix] = obj->code+3;
+ obj->addressv[ix] = obj->beam;
blob->entryv[ix].slot.index = -1;
blob->entryv[ix].ep = &blob->exp;
}
ix = 0;
+
+ DBG_TRACE_MFA_P(&obj->info.mfa, "export allocation at %p", obj);
}
else { /* Existing entry in another table, use free entry in blob */
blob = entry_to_blob(tmpl_e);
@@ -163,11 +162,14 @@ export_free(struct export_entry* obj)
obj->slot.index = -1;
for (i=0; i < ERTS_NUM_CODE_IX; i++) {
if (blob->entryv[i].slot.index >= 0) {
+ DBG_TRACE_MFA_P(&blob->exp.info.mfa, "export entry slot %u freed for %p",
+ (obj - blob->entryv), &blob->exp);
return;
}
}
+ DBG_TRACE_MFA_P(&blob->exp.info.mfa, "export blob deallocation at %p", &blob->exp);
erts_free(ERTS_ALC_T_EXPORT, blob);
- erts_smp_atomic_add_nob(&total_entries_bytes, -sizeof(*blob));
+ erts_atomic_add_nob(&total_entries_bytes, -sizeof(*blob));
}
void
@@ -176,13 +178,17 @@ init_export_table(void)
HashFunctions f;
int i;
- erts_smp_mtx_init(&export_staging_lock, "export_tab");
- erts_smp_atomic_init_nob(&total_entries_bytes, 0);
+ erts_mtx_init(&export_staging_lock, "export_tab", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+ erts_atomic_init_nob(&total_entries_bytes, 0);
f.hash = (H_FUN) export_hash;
f.cmp = (HCMP_FUN) export_cmp;
f.alloc = (HALLOC_FUN) export_alloc;
f.free = (HFREE_FUN) export_free;
+ f.meta_alloc = (HMALLOC_FUN) erts_alloc;
+ f.meta_free = (HMFREE_FUN) erts_free;
+ f.meta_print = (HMPRINT_FUN) erts_print;
for (i=0; i<ERTS_NUM_CODE_IX; i++) {
erts_index_init(ERTS_ALC_T_EXPORT_TABLE, &export_tables[i], "export_list",
@@ -221,7 +227,9 @@ erts_find_export_entry(Eterm m, Eterm f, unsigned int a, ErtsCodeIndex code_ix)
while (b != (HashBucket*) 0) {
Export* ep = ((struct export_entry*) b)->ep;
- if (ep->code[0] == m && ep->code[1] == f && ep->code[2] == a) {
+ if (ep->info.mfa.module == m &&
+ ep->info.mfa.function == f &&
+ ep->info.mfa.arity == a) {
return ep;
}
b = b->next;
@@ -234,9 +242,9 @@ static struct export_entry* init_template(struct export_templ* templ,
{
templ->entry.ep = &templ->exp;
templ->entry.slot.index = -1;
- templ->exp.code[0] = m;
- templ->exp.code[1] = f;
- templ->exp.code[2] = a;
+ templ->exp.info.mfa.module = m;
+ templ->exp.info.mfa.function = f;
+ templ->exp.info.mfa.arity = a;
return &templ->entry;
}
@@ -260,8 +268,8 @@ erts_find_function(Eterm m, Eterm f, unsigned int a, ErtsCodeIndex code_ix)
ee = hash_get(&export_tables[code_ix].htable, init_template(&templ, m, f, a));
if (ee == NULL ||
- (ee->ep->addressv[code_ix] == ee->ep->code+3 &&
- ee->ep->code[3] != (BeamInstr) BeamOp(op_i_generic_breakpoint))) {
+ (ee->ep->addressv[code_ix] == ee->ep->beam &&
+ ! BeamIsOpCode(ee->ep->beam[0], op_i_generic_breakpoint))) {
return NULL;
}
return ee->ep;
@@ -345,7 +353,7 @@ Export *export_list(int i, ErtsCodeIndex code_ix)
int export_list_size(ErtsCodeIndex code_ix)
{
- return export_tables[code_ix].entries;
+ return erts_index_num_entries(&export_tables[code_ix]);
}
int export_table_sz(void)
@@ -361,7 +369,7 @@ int export_table_sz(void)
}
int export_entries_sz(void)
{
- return erts_smp_atomic_read_nob(&total_entries_bytes);
+ return erts_atomic_read_nob(&total_entries_bytes);
}
Export *export_get(Export *e)
{
diff --git a/erts/emulator/beam/export.h b/erts/emulator/beam/export.h
index 61a54de59f..194e514b12 100644
--- a/erts/emulator/beam/export.h
+++ b/erts/emulator/beam/export.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -20,14 +21,8 @@
#ifndef __EXPORT_H__
#define __EXPORT_H__
-#ifndef __SYS_H__
#include "sys.h"
-#endif
-
-#ifndef __INDEX_H__
#include "index.h"
-#endif
-
#include "code_ix.h"
/*
@@ -38,27 +33,25 @@ typedef struct export
{
void* addressv[ERTS_NUM_CODE_IX]; /* Pointer to code for function. */
- BeamInstr fake_op_func_info_for_hipe[2]; /* MUST be just before code[] */
+ ErtsCodeInfo info; /* MUST be just before beam[] */
+
/*
- * code[0]: Tagged atom for module.
- * code[1]: Tagged atom for function.
- * code[2]: Arity (untagged integer).
- * code[3]: This entry is 0 unless the 'address' field points to it.
+ * beam[0]: This entry is 0 unless the 'addressv' field points to it.
* Threaded code instruction to load function
* (em_call_error_handler), execute BIF (em_apply_bif),
* or a breakpoint instruction (op_i_generic_breakpoint).
- * code[4]: Function pointer to BIF function (for BIFs only),
+ * beam[1]: Function pointer to BIF function (for BIFs only),
* or pointer to threaded code if the module has an
* on_load function that has not been run yet, or pointer
- * to code for function code[3] is a breakpont instruction.
+ * to code if function beam[0] is a breakpoint instruction.
* Otherwise: 0.
*/
- BeamInstr code[5];
+ BeamInstr beam[2];
} Export;
void init_export_table(void);
-void export_info(int, void *);
+void export_info(fmtfn_t, void *);
ERTS_GLB_INLINE Export* erts_active_export_entry(Eterm m, Eterm f, unsigned a);
Export* erts_export_put(Eterm mod, Eterm func, unsigned int arity);
@@ -73,14 +66,14 @@ Export *export_get(Export*);
void export_start_staging(void);
void export_end_staging(int commit);
-extern erts_smp_mtx_t export_staging_lock;
-#define export_staging_lock() erts_smp_mtx_lock(&export_staging_lock)
-#define export_staging_unlock() erts_smp_mtx_unlock(&export_staging_lock)
+extern erts_mtx_t export_staging_lock;
+#define export_staging_lock() erts_mtx_lock(&export_staging_lock)
+#define export_staging_unlock() erts_mtx_unlock(&export_staging_lock)
#include "beam_load.h" /* For em_* extern declarations */
#define ExportIsBuiltIn(EntryPtr) \
-(((EntryPtr)->addressv[erts_active_code_ix()] == (EntryPtr)->code + 3) && \
- ((EntryPtr)->code[3] == (BeamInstr) em_apply_bif))
+(((EntryPtr)->addressv[erts_active_code_ix()] == (EntryPtr)->beam) && \
+ (BeamIsOpCode((EntryPtr)->beam[0], op_apply_bif)))
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index 656de7c49a..970158933f 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2014. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -36,7 +37,9 @@
#include "erl_process.h"
#include "error.h"
#include "external.h"
+#define ERL_WANT_HIPE_BIF_WRAPPER__
#include "bif.h"
+#undef ERL_WANT_HIPE_BIF_WRAPPER__
#include "big.h"
#include "dist.h"
#include "erl_binary.h"
@@ -44,14 +47,22 @@
#include "erl_zlib.h"
#include "erl_map.h"
-#ifdef HIPE
-#include "hipe_mode_switch.h"
-#endif
#define in_area(ptr,start,nbytes) ((UWord)((char*)(ptr) - (char*)(start)) < (nbytes))
#define MAX_STRING_LEN 0xffff
-#define is_valid_creation(Cre) ((unsigned)(Cre) < MAX_CREATION || (Cre) == INTERNAL_CREATION)
+/* MAX value for the creation field in pid, port and reference
+ for the local node and for the current external format.
+
+ Larger creation values than this are allowed in external pid, port and refs
+ encoded with NEW_PID_EXT, NEW_PORT_EXT and NEWER_REFERENCE_EXT.
+ The point here is to prepare for future upgrade to 32-bit creation.
+ OTP-19 (erts-8.0) can handle big creation values from other (newer) nodes,
+ but do not use big creation values for the local node yet,
+ as we still may have to communicate with older nodes.
+*/
+#define ERTS_MAX_LOCAL_CREATION (3)
+#define is_valid_creation(Cre) ((unsigned)(Cre) <= ERTS_MAX_LOCAL_CREATION)
#undef ERTS_DEBUG_USE_DIST_SEP
#ifdef DEBUG
@@ -95,9 +106,9 @@ static Uint is_external_string(Eterm obj, int* p_is_string);
static byte* enc_atom(ErtsAtomCacheMap *, Eterm, byte*, Uint32);
static byte* enc_pid(ErtsAtomCacheMap *, Eterm, byte*, Uint32);
struct B2TContext_t;
-static byte* dec_term(ErtsDistExternal *, Eterm**, byte*, ErlOffHeap*, Eterm*, struct B2TContext_t*);
+static byte* dec_term(ErtsDistExternal*, ErtsHeapFactory*, byte*, Eterm*, struct B2TContext_t*);
static byte* dec_atom(ErtsDistExternal *, byte*, Eterm*);
-static byte* dec_pid(ErtsDistExternal *, Eterm**, byte*, ErlOffHeap*, Eterm*);
+static byte* dec_pid(ErtsDistExternal *, ErtsHeapFactory*, byte*, Eterm*, byte tag);
static Sint decoded_size(byte *ep, byte* endp, int internal_tags, struct B2TContext_t*);
static BIF_RETTYPE term_to_binary_trap_1(BIF_ALIST_1);
@@ -111,26 +122,17 @@ static int encode_size_struct_int(struct TTBSizeContext_*, ErtsAtomCacheMap *acm
static Export binary_to_term_trap_export;
static BIF_RETTYPE binary_to_term_trap_1(BIF_ALIST_1);
-static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* context_b);
+static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* context_b,
+ Export *bif, Eterm arg0, Eterm arg1);
void erts_init_external(void) {
-#if 1 /* In R16 */
erts_init_trap_export(&term_to_binary_trap_export,
- am_erlang, am_term_to_binary_trap, 1,
+ am_erts_internal, am_term_to_binary_trap, 1,
&term_to_binary_trap_1);
erts_init_trap_export(&binary_to_term_trap_export,
- am_erlang, am_binary_to_term_trap, 1,
+ am_erts_internal, am_binary_to_term_trap, 1,
&binary_to_term_trap_1);
-#else
- sys_memset((void *) &term_to_binary_trap_export, 0, sizeof(Export));
- term_to_binary_trap_export.address = &term_to_binary_trap_export.code[3];
- term_to_binary_trap_export.code[0] = am_erlang;
- term_to_binary_trap_export.code[1] = am_term_to_binary_trap;
- term_to_binary_trap_export.code[2] = 1;
- term_to_binary_trap_export.code[3] = (BeamInstr) em_apply_bif;
- term_to_binary_trap_export.code[4] = (BeamInstr) &term_to_binary_trap_1;
-#endif
return;
}
@@ -510,15 +512,37 @@ byte *erts_encode_ext_dist_header_finalize(byte *ext, ErtsAtomCache *cache, Uint
return ep;
}
-Uint erts_encode_dist_ext_size(Eterm term, Uint32 flags, ErtsAtomCacheMap *acmp)
+int erts_encode_dist_ext_size(Eterm term, Uint32 flags, ErtsAtomCacheMap *acmp,
+ Uint* szp)
{
- Uint sz = 0;
+ Uint sz;
+ if (encode_size_struct_int(NULL, acmp, term, flags, NULL, &sz)) {
+ return -1;
+ } else {
#ifndef ERTS_DEBUG_USE_DIST_SEP
- if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE))
+ if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE))
#endif
- sz++ /* VERSION_MAGIC */;
- sz += encode_size_struct2(acmp, term, flags);
- return sz;
+ sz++ /* VERSION_MAGIC */;
+
+ *szp += sz;
+ return 0;
+ }
+}
+
+int erts_encode_dist_ext_size_int(Eterm term, struct erts_dsig_send_context* ctx, Uint* szp)
+{
+ Uint sz;
+ if (encode_size_struct_int(&ctx->u.sc, ctx->acmp, term, ctx->flags, &ctx->reds, &sz)) {
+ return -1;
+ } else {
+#ifndef ERTS_DEBUG_USE_DIST_SEP
+ if (!(ctx->flags & DFLAG_DIST_HDR_ATOM_CACHE))
+#endif
+ sz++ /* VERSION_MAGIC */;
+
+ *szp += sz;
+ return 0;
+ }
}
Uint erts_encode_ext_size(Eterm term)
@@ -539,19 +563,16 @@ Uint erts_encode_ext_size_ets(Eterm term)
}
-void erts_encode_dist_ext(Eterm term, byte **ext, Uint32 flags, ErtsAtomCacheMap *acmp)
+int erts_encode_dist_ext(Eterm term, byte **ext, Uint32 flags, ErtsAtomCacheMap *acmp,
+ TTBEncodeContext* ctx, Sint* reds)
{
- byte *ep = *ext;
-#ifndef ERTS_DEBUG_USE_DIST_SEP
- if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE))
-#endif
- *ep++ = VERSION_MAGIC;
- ep = enc_term(acmp, term, ep, flags, NULL);
- if (!ep)
- erl_exit(ERTS_ABORT_EXIT,
- "%s:%d:erts_encode_dist_ext(): Internal data structure error\n",
- __FILE__, __LINE__);
- *ext = ep;
+ if (!ctx || !ctx->wstack.wstart) {
+ #ifndef ERTS_DEBUG_USE_DIST_SEP
+ if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE))
+ #endif
+ *(*ext)++ = VERSION_MAGIC;
+ }
+ return enc_term_int(ctx, acmp, term, *ext, flags, NULL, reds, ext);
}
void erts_encode_ext(Eterm term, byte **ext)
@@ -560,7 +581,7 @@ void erts_encode_ext(Eterm term, byte **ext)
*ep++ = VERSION_MAGIC;
ep = enc_term(NULL, term, ep, TERM_TO_BINARY_DFLAGS, NULL);
if (!ep)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"%s:%d:erts_encode_ext(): Internal data structure error\n",
__FILE__, __LINE__);
*ext = ep;
@@ -595,7 +616,7 @@ erts_make_dist_ext_copy(ErtsDistExternal *edep, Uint xsize)
sys_memcpy((void *) ep, (void *) edep, dist_ext_sz);
ep += dist_ext_sz;
if (new_edep->dep)
- erts_refc_inc(&new_edep->dep->refc, 1);
+ erts_ref_dist_entry(new_edep->dep);
new_edep->extp = ep;
new_edep->ext_endp = ep + ext_sz;
new_edep->heap_size = -1;
@@ -608,7 +629,8 @@ erts_prepare_dist_ext(ErtsDistExternal *edep,
byte *ext,
Uint size,
DistEntry *dep,
- ErtsAtomCache *cache)
+ ErtsAtomCache *cache,
+ Uint32 *connection_id)
{
#undef ERTS_EXT_FAIL
#undef ERTS_EXT_HDR_FAIL
@@ -629,33 +651,36 @@ erts_prepare_dist_ext(ErtsDistExternal *edep,
if (size < 2)
ERTS_EXT_FAIL;
+ if (!dep)
+ ERTS_INTERNAL_ERROR("Invalid use");
+
if (ep[0] != VERSION_MAGIC) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- if (dep)
- erts_dsprintf(dsbufp,
- "** Got message from incompatible erlang on "
- "channel %d\n",
- dist_entry_channel_no(dep));
- else
- erts_dsprintf(dsbufp,
- "** Attempt to convert old incompatible "
- "binary %d\n",
- *ep);
+ erts_dsprintf(dsbufp,
+ "** Got message from incompatible erlang on "
+ "channel %d\n",
+ dist_entry_channel_no(dep));
erts_send_error_to_logger_nogl(dsbufp);
ERTS_EXT_FAIL;
}
edep->flags = 0;
edep->dep = dep;
- if (dep) {
- erts_smp_de_rlock(dep);
- if (dep->flags & DFLAG_DIST_HDR_ATOM_CACHE)
- edep->flags |= ERTS_DIST_EXT_DFLAG_HDR;
-
- edep->flags |= (dep->connection_id & ERTS_DIST_EXT_CON_ID_MASK);
- erts_smp_de_runlock(dep);
+
+ erts_de_rlock(dep);
+
+ if ((dep->status & (ERTS_DE_SFLG_EXITING|ERTS_DE_SFLG_CONNECTED))
+ != ERTS_DE_SFLG_CONNECTED) {
+ erts_de_runlock(dep);
+ return ERTS_PREP_DIST_EXT_CLOSED;
}
+ if (dep->flags & DFLAG_DIST_HDR_ATOM_CACHE)
+ edep->flags |= ERTS_DIST_EXT_DFLAG_HDR;
+
+ *connection_id = dep->connection_id;
+ edep->flags |= (dep->connection_id & ERTS_DIST_EXT_CON_ID_MASK);
+
if (ep[1] != DIST_HEADER) {
if (edep->flags & ERTS_DIST_EXT_DFLAG_HDR)
ERTS_EXT_HDR_FAIL;
@@ -814,14 +839,15 @@ erts_prepare_dist_ext(ErtsDistExternal *edep,
ERTS_EXT_FAIL;
#endif
- return 0;
+ erts_de_runlock(dep);
+
+ return ERTS_PREP_DIST_EXT_SUCCESS;
#undef CHKSIZE
#undef ERTS_EXT_FAIL
#undef ERTS_EXT_HDR_FAIL
- bad_hdr:
- if (dep) {
+ bad_hdr: {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp,
"%T got a corrupted distribution header from %T "
@@ -834,10 +860,11 @@ erts_prepare_dist_ext(ErtsDistExternal *edep,
erts_dsprintf(dsbufp, ">>");
erts_send_warning_to_logger_nogl(dsbufp);
}
- fail:
- if (dep)
- erts_kill_dist_connection(dep, dep->connection_id);
- return -1;
+ fail: {
+ erts_de_runlock(dep);
+ erts_kill_dist_connection(dep, *connection_id);
+ }
+ return ERTS_PREP_DIST_EXT_FAILED;
}
static void
@@ -921,8 +948,7 @@ Sint erts_decode_ext_size_ets(byte *ext, Uint size)
** on return hpp is updated to point after allocated data
*/
Eterm
-erts_decode_dist_ext(Eterm** hpp,
- ErlOffHeap* off_heap,
+erts_decode_dist_ext(ErtsHeapFactory* factory,
ErtsDistExternal *edep)
{
Eterm obj;
@@ -942,7 +968,7 @@ erts_decode_dist_ext(Eterm** hpp,
goto error;
ep++;
}
- ep = dec_term(edep, hpp, ep, off_heap, &obj, NULL);
+ ep = dec_term(edep, factory, ep, &obj, NULL);
if (!ep)
goto error;
@@ -951,33 +977,41 @@ erts_decode_dist_ext(Eterm** hpp,
return obj;
error:
+ erts_factory_undo(factory);
bad_dist_ext(edep);
return THE_NON_VALUE;
}
-Eterm erts_decode_ext(Eterm **hpp, ErlOffHeap *off_heap, byte **ext)
+Eterm erts_decode_ext(ErtsHeapFactory* factory, byte **ext, Uint32 flags)
{
+ ErtsDistExternal ede, *edep;
Eterm obj;
byte *ep = *ext;
- if (*ep++ != VERSION_MAGIC)
+ if (*ep++ != VERSION_MAGIC) {
+ erts_factory_undo(factory);
return THE_NON_VALUE;
- ep = dec_term(NULL, hpp, ep, off_heap, &obj, NULL);
+ }
+ if (flags) {
+ ASSERT(flags == ERTS_DIST_EXT_BTT_SAFE);
+ ede.flags = flags; /* a dummy struct just for the flags */
+ edep = &ede;
+ } else {
+ edep = NULL;
+ }
+ ep = dec_term(edep, factory, ep, &obj, NULL);
if (!ep) {
-#ifdef DEBUG
- bin_write(ERTS_PRINT_STDERR,NULL,*ext,500);
-#endif
return THE_NON_VALUE;
}
*ext = ep;
return obj;
}
-Eterm erts_decode_ext_ets(Eterm **hpp, ErlOffHeap *off_heap, byte *ext)
+Eterm erts_decode_ext_ets(ErtsHeapFactory* factory, byte *ext)
{
Eterm obj;
- ext = dec_term(NULL, hpp, ext, off_heap, &obj, NULL);
+ ext = dec_term(NULL, factory, ext, &obj, NULL);
ASSERT(ext);
return obj;
}
@@ -986,9 +1020,8 @@ Eterm erts_decode_ext_ets(Eterm **hpp, ErlOffHeap *off_heap, byte *ext)
BIF_RETTYPE erts_debug_dist_ext_to_term_2(BIF_ALIST_2)
{
+ ErtsHeapFactory factory;
Eterm res;
- Eterm *hp;
- Eterm *hendp;
Sint hsz;
ErtsDistExternal ede;
Eterm *tp;
@@ -1035,12 +1068,9 @@ BIF_RETTYPE erts_debug_dist_ext_to_term_2(BIF_ALIST_2)
if (hsz < 0)
goto badarg;
- hp = HAlloc(BIF_P, (Uint) hsz);
- hendp = hp + hsz;
-
- res = erts_decode_dist_ext(&hp, &MSO(BIF_P), &ede);
-
- HRelease(BIF_P, hendp, hp);
+ erts_factory_proc_prealloc_init(&factory, BIF_P, hsz);
+ res = erts_decode_dist_ext(&factory, &ede);
+ erts_factory_close(&factory);
if (is_value(res))
BIF_RET(res);
@@ -1055,7 +1085,7 @@ static BIF_RETTYPE term_to_binary_trap_1(BIF_ALIST_1)
Eterm *tp = tuple_val(BIF_ARG_1);
Eterm Term = tp[1];
Eterm bt = tp[2];
- Binary *bin = ((ProcBin *) binary_val(bt))->val;
+ Binary *bin = erts_magic_ref2bin(bt);
Eterm res = erts_term_to_binary_int(BIF_P, Term, 0, 0,bin);
if (is_tuple(res)) {
ASSERT(BIF_P->flags & F_DISABLE_GC);
@@ -1069,6 +1099,8 @@ static BIF_RETTYPE term_to_binary_trap_1(BIF_ALIST_1)
}
}
+HIPE_WRAPPER_BIF_DISABLE_GC(term_to_binary, 1)
+
BIF_RETTYPE term_to_binary_1(BIF_ALIST_1)
{
Eterm res = erts_term_to_binary_int(BIF_P, BIF_ARG_1, 0, TERM_TO_BINARY_DFLAGS, NULL);
@@ -1081,6 +1113,8 @@ BIF_RETTYPE term_to_binary_1(BIF_ALIST_1)
}
}
+HIPE_WRAPPER_BIF_DISABLE_GC(term_to_binary, 2)
+
BIF_RETTYPE term_to_binary_2(BIF_ALIST_2)
{
Process* p = BIF_P;
@@ -1101,8 +1135,11 @@ BIF_RETTYPE term_to_binary_2(BIF_ALIST_2)
case 0:
flags = TERM_TO_BINARY_DFLAGS & ~DFLAG_NEW_FLOATS;
break;
- case 1:
+ case 1: /* Current default... */
flags = TERM_TO_BINARY_DFLAGS;
+ break;
+ case 2:
+ flags = TERM_TO_BINARY_DFLAGS | DFLAG_UTF8_ATOMS;
break;
default:
goto error;
@@ -1164,12 +1201,11 @@ typedef struct {
byte* ep;
Eterm res;
Eterm* next;
- Eterm* hp_start;
- Eterm* hp;
- Eterm* hp_end;
+ ErtsHeapFactory factory;
int remaining_n;
char* remaining_bytes;
- Eterm* maps_head;
+ ErtsWStack flat_maps;
+ ErtsPStack hamt_array;
} B2TDecodeContext;
typedef struct {
@@ -1185,6 +1221,8 @@ typedef struct B2TContext_t {
Uint32 flags;
SWord reds;
Eterm trap_bin;
+ Export *bif;
+ Eterm arg[2];
enum B2TState state;
union {
B2TSizeContext sc;
@@ -1193,6 +1231,7 @@ typedef struct B2TContext_t {
} u;
} B2TContext;
+static B2TContext* b2t_export_context(Process*, B2TContext* src);
static uLongf binary2term_uncomp_size(byte* data, Sint size)
{
@@ -1225,7 +1264,7 @@ static uLongf binary2term_uncomp_size(byte* data, Sint size)
static ERTS_INLINE int
binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size,
- B2TContext* ctx)
+ B2TContext** ctxp, Process* p)
{
byte *bytes = data;
Sint size = data_size;
@@ -1239,8 +1278,8 @@ binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size,
size--;
if (size < 5 || *bytes != COMPRESSED) {
state->extp = bytes;
- if (ctx)
- ctx->state = B2TSizeInit;
+ if (ctxp)
+ (*ctxp)->state = B2TSizeInit;
}
else {
uLongf dest_len = (Uint32) get_int32(bytes+1);
@@ -1257,16 +1296,26 @@ binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size,
return -1;
}
state->extp = erts_alloc(ERTS_ALC_T_EXT_TERM_DATA, dest_len);
- ctx->reds -= dest_len;
+ if (ctxp)
+ (*ctxp)->reds -= dest_len;
}
state->exttmp = 1;
- if (ctx) {
+ if (ctxp) {
+ /*
+ * Start decompression by exporting trap context
+ * so we don't have to deal with deep-copying z_stream.
+ */
+ B2TContext* ctx = b2t_export_context(p, *ctxp);
+ ASSERT(state = &(*ctxp)->b2ts);
+ state = &ctx->b2ts;
+
if (erl_zlib_inflate_start(&ctx->u.uc.stream, bytes, size) != Z_OK)
return -1;
ctx->u.uc.dbytes = state->extp;
ctx->u.uc.dleft = dest_len;
ctx->state = B2TUncompressChunk;
+ *ctxp = ctx;
}
else {
uLongf dlen = dest_len;
@@ -1291,10 +1340,12 @@ binary2term_abort(ErtsBinary2TermState *state)
}
static ERTS_INLINE Eterm
-binary2term_create(ErtsDistExternal *edep, ErtsBinary2TermState *state, Eterm **hpp, ErlOffHeap *ohp)
+binary2term_create(ErtsDistExternal *edep, ErtsBinary2TermState *state,
+ ErtsHeapFactory* factory)
{
Eterm res;
- if (!dec_term(edep, hpp, state->extp, ohp, &res, NULL))
+
+ if (!dec_term(edep, factory, state->extp, &res, NULL))
res = THE_NON_VALUE;
if (state->exttmp) {
state->exttmp = 0;
@@ -1308,7 +1359,7 @@ erts_binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size
{
Sint res;
- if (binary2term_prepare(state, data, data_size, NULL) < 0 ||
+ if (binary2term_prepare(state, data, data_size, NULL, NULL) < 0 ||
(res=decoded_size(state->extp, state->extp + state->extsize, 0, NULL)) < 0) {
if (state->exttmp)
@@ -1327,9 +1378,9 @@ erts_binary2term_abort(ErtsBinary2TermState *state)
}
Eterm
-erts_binary2term_create(ErtsBinary2TermState *state, Eterm **hpp, ErlOffHeap *ohp)
+erts_binary2term_create(ErtsBinary2TermState *state, ErtsHeapFactory* factory)
{
- return binary2term_create(NULL,state, hpp, ohp);
+ return binary2term_create(NULL,state, factory);
}
static void b2t_destroy_context(B2TContext* context)
@@ -1338,25 +1389,40 @@ static void b2t_destroy_context(B2TContext* context)
ERTS_ALC_T_EXT_TERM_DATA);
context->aligned_alloc = NULL;
binary2term_abort(&context->b2ts);
- if (context->state == B2TUncompressChunk) {
+ switch (context->state) {
+ case B2TUncompressChunk:
erl_zlib_inflate_finish(&context->u.uc.stream);
+ break;
+ case B2TDecode:
+ case B2TDecodeList:
+ case B2TDecodeTuple:
+ case B2TDecodeString:
+ case B2TDecodeBinary:
+ if (context->u.dc.hamt_array.pstart) {
+ erts_free(context->u.dc.hamt_array.alloc_type,
+ context->u.dc.hamt_array.pstart);
+ }
+ break;
+ default:;
}
}
-static void b2t_context_destructor(Binary *context_bin)
+static int b2t_context_destructor(Binary *context_bin)
{
B2TContext* ctx = (B2TContext*) ERTS_MAGIC_BIN_DATA(context_bin);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(context_bin) == b2t_context_destructor);
b2t_destroy_context(ctx);
+ return 1;
}
static BIF_RETTYPE binary_to_term_trap_1(BIF_ALIST_1)
{
- Binary *context_bin = ((ProcBin *) binary_val(BIF_ARG_1))->val;
+ Binary *context_bin = erts_magic_ref2bin(BIF_ARG_1);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(context_bin) == b2t_context_destructor);
- return binary_to_term_int(BIF_P, 0, THE_NON_VALUE, context_bin);
+ return binary_to_term_int(BIF_P, 0, THE_NON_VALUE, context_bin, NULL,
+ THE_NON_VALUE, THE_NON_VALUE);
}
@@ -1386,13 +1452,15 @@ static B2TContext* b2t_export_context(Process* p, B2TContext* src)
if (ctx->state >= B2TDecode && ctx->u.dc.next == &src->u.dc.res) {
ctx->u.dc.next = &ctx->u.dc.res;
}
- hp = HAlloc(p, PROC_BIN_SIZE);
- ctx->trap_bin = erts_mk_magic_binary_term(&hp, &MSO(p), context_b);
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ ctx->trap_bin = erts_mk_magic_ref(&hp, &MSO(p), context_b);
return ctx;
}
-static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* context_b)
+static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* context_b,
+ Export *bif_init, Eterm arg0, Eterm arg1)
{
+ BIF_RETTYPE ret_val;
#ifdef EXTREME_B2T_TRAPPING
SWord initial_reds = 1 + b2t_rand() % 4;
#else
@@ -1409,6 +1477,9 @@ static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* con
ctx->state = B2TPrepare;
ctx->aligned_alloc = NULL;
ctx->flags = flags;
+ ctx->bif = bif_init;
+ ctx->arg[0] = arg0;
+ ctx->arg[1] = arg1;
IF_DEBUG(ctx->trap_bin = THE_NON_VALUE;)
} else {
is_first_call = 0;
@@ -1435,7 +1506,7 @@ static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* con
if (ctx->aligned_alloc) {
ctx->reds -= bin_size / 8;
}
- if (binary2term_prepare(&ctx->b2ts, bytes, bin_size, ctx) < 0) {
+ if (binary2term_prepare(&ctx->b2ts, bytes, bin_size, &ctx, p) < 0) {
ctx->state = B2TBadArg;
}
break;
@@ -1484,10 +1555,9 @@ static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* con
ctx->u.dc.ep = ctx->b2ts.extp;
ctx->u.dc.res = (Eterm) (UWord) NULL;
ctx->u.dc.next = &ctx->u.dc.res;
- ctx->u.dc.hp_start = HAlloc(p, ctx->heap_size);
- ctx->u.dc.hp = ctx->u.dc.hp_start;
- ctx->u.dc.hp_end = ctx->u.dc.hp_start + ctx->heap_size;
- ctx->u.dc.maps_head = NULL;
+ erts_factory_proc_prealloc_init(&ctx->u.dc.factory, p, ctx->heap_size);
+ ctx->u.dc.flat_maps.wstart = NULL;
+ ctx->u.dc.hamt_array.pstart = NULL;
ctx->state = B2TDecode;
/*fall through*/
case B2TDecode:
@@ -1497,34 +1567,46 @@ static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* con
case B2TDecodeBinary: {
ErtsDistExternal fakedep;
fakedep.flags = ctx->flags;
- dec_term(&fakedep, NULL, NULL, &MSO(p), NULL, ctx);
+ dec_term(&fakedep, NULL, NULL, NULL, ctx);
break;
}
case B2TDecodeFail:
- HRelease(p, ctx->u.dc.hp_end, ctx->u.dc.hp_start);
/*fall through*/
case B2TBadArg:
- b2t_destroy_context(ctx);
- if (!is_first_call) {
- erts_set_gc_state(p, 1);
- }
BUMP_REDS(p, (initial_reds - ctx->reds) / B2T_BYTES_PER_REDUCTION);
- BIF_ERROR(p, BADARG & ~EXF_SAVETRACE);
+
+ ASSERT(ctx->bif == bif_export[BIF_binary_to_term_1]
+ || ctx->bif == bif_export[BIF_binary_to_term_2]);
+
+ if (is_first_call)
+ ERTS_BIF_PREP_ERROR(ret_val, p, BADARG);
+ else {
+ erts_set_gc_state(p, 1);
+ if (is_non_value(ctx->arg[1]))
+ ERTS_BIF_PREP_ERROR_TRAPPED1(ret_val, p, BADARG, ctx->bif,
+ ctx->arg[0]);
+ else
+ ERTS_BIF_PREP_ERROR_TRAPPED2(ret_val, p, BADARG, ctx->bif,
+ ctx->arg[0], ctx->arg[1]);
+ }
+ b2t_destroy_context(ctx);
+ return ret_val;
case B2TDone:
b2t_destroy_context(ctx);
- if (ctx->u.dc.hp > ctx->u.dc.hp_end) {
- erl_exit(1, ":%s, line %d: heap overrun by %d words(s)\n",
- __FILE__, __LINE__, ctx->u.dc.hp - ctx->u.dc.hp_end);
+ if (ctx->u.dc.factory.hp > ctx->u.dc.factory.hp_end) {
+ erts_exit(ERTS_ERROR_EXIT, ":%s, line %d: heap overrun by %d words(s)\n",
+ __FILE__, __LINE__, ctx->u.dc.factory.hp - ctx->u.dc.factory.hp_end);
}
- HRelease(p, ctx->u.dc.hp_end, ctx->u.dc.hp);
+ erts_factory_close(&ctx->u.dc.factory);
if (!is_first_call) {
erts_set_gc_state(p, 1);
}
BUMP_REDS(p, (initial_reds - ctx->reds) / B2T_BYTES_PER_REDUCTION);
- return ctx->u.dc.res;
+ ERTS_BIF_PREP_RET(ret_val, ctx->u.dc.res);
+ return ret_val;
default:
ASSERT(!"Unknown state in binary_to_term");
@@ -1541,15 +1623,24 @@ static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* con
erts_set_gc_state(p, 0);
}
BUMP_ALL_REDS(p);
- BIF_TRAP1(&binary_to_term_trap_export, p, ctx->trap_bin);
+
+ ERTS_BIF_PREP_TRAP1(ret_val, &binary_to_term_trap_export,
+ p, ctx->trap_bin);
+
+ return ret_val;
}
-BIF_RETTYPE erts_internal_binary_to_term_1(BIF_ALIST_1)
+HIPE_WRAPPER_BIF_DISABLE_GC(binary_to_term, 1)
+
+BIF_RETTYPE binary_to_term_1(BIF_ALIST_1)
{
- return binary_to_term_int(BIF_P, 0, BIF_ARG_1, NULL);
+ return binary_to_term_int(BIF_P, 0, BIF_ARG_1, NULL, bif_export[BIF_binary_to_term_1],
+ BIF_ARG_1, THE_NON_VALUE);
}
-BIF_RETTYPE erts_internal_binary_to_term_2(BIF_ALIST_2)
+HIPE_WRAPPER_BIF_DISABLE_GC(binary_to_term, 2)
+
+BIF_RETTYPE binary_to_term_2(BIF_ALIST_2)
{
Eterm opts;
Eterm opt;
@@ -1570,7 +1661,8 @@ BIF_RETTYPE erts_internal_binary_to_term_2(BIF_ALIST_2)
if (is_not_nil(opts))
goto error;
- return binary_to_term_int(BIF_P, flags, BIF_ARG_1, NULL);
+ return binary_to_term_int(BIF_P, flags, BIF_ARG_1, NULL, bif_export[BIF_binary_to_term_2],
+ BIF_ARG_1, BIF_ARG_2);
error:
BIF_ERROR(BIF_P, BADARG);
@@ -1653,12 +1745,12 @@ erts_term_to_binary_simple(Process* p, Eterm Term, Uint size, int level, Uint fl
if ((endp = enc_term(NULL, Term, bytes, flags, NULL))
== NULL) {
- erl_exit(1, "%s, line %d: bad term: %x\n",
+ erts_exit(ERTS_ERROR_EXIT, "%s, line %d: bad term: %x\n",
__FILE__, __LINE__, Term);
}
real_size = endp - bytes;
if (real_size > size) {
- erl_exit(1, "%s, line %d: buffer overflow: %d word(s)\n",
+ erts_exit(ERTS_ERROR_EXIT, "%s, line %d: buffer overflow: %d word(s)\n",
__FILE__, __LINE__, real_size - size);
}
@@ -1698,12 +1790,12 @@ erts_term_to_binary_simple(Process* p, Eterm Term, Uint size, int level, Uint fl
bytes[0] = VERSION_MAGIC;
if ((endp = enc_term(NULL, Term, bytes+1, flags, NULL))
== NULL) {
- erl_exit(1, "%s, line %d: bad term: %x\n",
+ erts_exit(ERTS_ERROR_EXIT, "%s, line %d: bad term: %x\n",
__FILE__, __LINE__, Term);
}
real_size = endp - bytes;
if (real_size > size) {
- erl_exit(1, "%s, line %d: buffer overflow: %d word(s)\n",
+ erts_exit(ERTS_ERROR_EXIT, "%s, line %d: buffer overflow: %d word(s)\n",
__FILE__, __LINE__, endp - (bytes + size));
}
return erts_realloc_binary(bin, real_size);
@@ -1717,68 +1809,28 @@ erts_term_to_binary(Process* p, Eterm Term, int level, Uint flags) {
return erts_term_to_binary_simple(p, Term, size, level, flags);
}
-/* Define for testing */
-/* #define EXTREME_TTB_TRAPPING 1 */
+/* Define EXTREME_TTB_TRAPPING for testing in dist.h */
#ifndef EXTREME_TTB_TRAPPING
-#define TERM_TO_BINARY_LOOP_FACTOR 32
#define TERM_TO_BINARY_COMPRESS_CHUNK (1 << 18)
#else
-#define TERM_TO_BINARY_LOOP_FACTOR 1
#define TERM_TO_BINARY_COMPRESS_CHUNK 10
#endif
+#define TERM_TO_BINARY_MEMCPY_FACTOR 8
-
-typedef enum { TTBSize, TTBEncode, TTBCompress } TTBState;
-typedef struct TTBSizeContext_ {
- Uint flags;
- int level;
- Uint result;
- Eterm obj;
- ErtsEStack estack;
-} TTBSizeContext;
-
-typedef struct TTBEncodeContext_ {
- Uint flags;
- int level;
- byte* ep;
- Eterm obj;
- ErtsWStack wstack;
- Binary *result_bin;
-} TTBEncodeContext;
-
-typedef struct {
- Uint real_size;
- Uint dest_len;
- byte *dbytes;
- Binary *result_bin;
- Binary *destination_bin;
- z_stream stream;
-} TTBCompressContext;
-
-typedef struct {
- int alive;
- TTBState state;
- union {
- TTBSizeContext sc;
- TTBEncodeContext ec;
- TTBCompressContext cc;
- } s;
-} TTBContext;
-
-static void ttb_context_destructor(Binary *context_bin)
+static int ttb_context_destructor(Binary *context_bin)
{
TTBContext *context = ERTS_MAGIC_BIN_DATA(context_bin);
if (context->alive) {
context->alive = 0;
switch (context->state) {
case TTBSize:
- DESTROY_SAVED_ESTACK(&context->s.sc.estack);
+ DESTROY_SAVED_WSTACK(&context->s.sc.wstack);
break;
case TTBEncode:
DESTROY_SAVED_WSTACK(&context->s.ec.wstack);
if (context->s.ec.result_bin != NULL) { /* Set to NULL if ever made alive! */
- ASSERT(erts_refc_read(&(context->s.ec.result_bin->refc),0) == 0);
+ ASSERT(erts_refc_read(&(context->s.ec.result_bin->intern.refc),1));
erts_bin_free(context->s.ec.result_bin);
context->s.ec.result_bin = NULL;
}
@@ -1787,19 +1839,20 @@ static void ttb_context_destructor(Binary *context_bin)
erl_zlib_deflate_finish(&(context->s.cc.stream));
if (context->s.cc.destination_bin != NULL) { /* Set to NULL if ever made alive! */
- ASSERT(erts_refc_read(&(context->s.cc.destination_bin->refc),0) == 0);
+ ASSERT(erts_refc_read(&(context->s.cc.destination_bin->intern.refc),1));
erts_bin_free(context->s.cc.destination_bin);
context->s.cc.destination_bin = NULL;
}
if (context->s.cc.result_bin != NULL) { /* Set to NULL if ever made alive! */
- ASSERT(erts_refc_read(&(context->s.cc.result_bin->refc),0) == 0);
+ ASSERT(erts_refc_read(&(context->s.cc.result_bin->intern.refc),1));
erts_bin_free(context->s.cc.result_bin);
context->s.cc.result_bin = NULL;
}
break;
}
}
+ return 1;
}
static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint flags,
@@ -1829,8 +1882,8 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
#define RETURN_STATE() \
do { \
- hp = HAlloc(p, PROC_BIN_SIZE+3); \
- c_term = erts_mk_magic_binary_term(&hp, &MSO(p), context_b); \
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE+3); \
+ c_term = erts_mk_magic_ref(&hp, &MSO(p), context_b); \
res = TUPLE2(hp, Term, c_term); \
BUMP_ALL_REDS(p); \
return res; \
@@ -1841,7 +1894,7 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
/* Setup enough to get started */
context->state = TTBSize;
context->alive = 1;
- context->s.sc.estack.start = NULL;
+ context->s.sc.wstack.wstart = NULL;
context->s.sc.flags = flags;
context->s.sc.level = level;
} else {
@@ -1876,10 +1929,7 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
}
result_bin = erts_bin_nrml_alloc(size);
- result_bin->flags = 0;
- result_bin->orig_size = size;
- erts_refc_init(&result_bin->refc, 0);
- result_bin->orig_bytes[0] = VERSION_MAGIC;
+ result_bin->orig_bytes[0] = (byte)VERSION_MAGIC;
/* Next state immediately, no need to export context */
context->state = TTBEncode;
context->s.ec.flags = flags;
@@ -1918,8 +1968,7 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
pb->bytes = (byte*) result_bin->orig_bytes;
pb->flags = 0;
OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
- erts_refc_inc(&result_bin->refc, 1);
- if (context_b && erts_refc_read(&context_b->refc,0) == 0) {
+ if (context_b && erts_refc_read(&context_b->intern.refc,0) == 0) {
erts_bin_free(context_b);
}
return make_binary(pb);
@@ -1938,10 +1987,7 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
context->s.cc.result_bin = result_bin;
result_bin = erts_bin_nrml_alloc(real_size);
- result_bin->flags = 0;
- result_bin->orig_size = real_size;
- erts_refc_init(&result_bin->refc, 0);
- result_bin->orig_bytes[0] = VERSION_MAGIC;
+ result_bin->orig_bytes[0] = (byte) VERSION_MAGIC;
context->s.cc.destination_bin = result_bin;
context->s.cc.dest_len = 0;
@@ -1988,15 +2034,15 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
pb->next = MSO(p).first;
MSO(p).first = (struct erl_off_heap_header*)pb;
pb->val = result_bin;
+ ASSERT(erts_refc_read(&result_bin->intern.refc, 1));
pb->bytes = (byte*) result_bin->orig_bytes;
pb->flags = 0;
OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
- erts_refc_inc(&result_bin->refc, 1);
erts_bin_free(context->s.cc.result_bin);
context->s.cc.result_bin = NULL;
context->alive = 0;
BUMP_REDS(p, (this_time * CONTEXT_REDS) / TERM_TO_BINARY_COMPRESS_CHUNK);
- if (context_b && erts_refc_read(&context_b->refc,0) == 0) {
+ if (context_b && erts_refc_read(&context_b->intern.refc,0) == 0) {
erts_bin_free(context_b);
}
return make_binary(pb);
@@ -2015,13 +2061,13 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
pb->bytes = (byte*) result_bin->orig_bytes;
pb->flags = 0;
OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
- erts_refc_inc(&result_bin->refc, 1);
+ ASSERT(erts_refc_read(&result_bin->intern.refc, 1));
erl_zlib_deflate_finish(&(context->s.cc.stream));
erts_bin_free(context->s.cc.destination_bin);
context->s.cc.destination_bin = NULL;
context->alive = 0;
BUMP_REDS(p, (this_time * CONTEXT_REDS) / TERM_TO_BINARY_COMPRESS_CHUNK);
- if (context_b && erts_refc_read(&context_b->refc,0) == 0) {
+ if (context_b && erts_refc_read(&context_b->intern.refc,0) == 0) {
erts_bin_free(context_b);
}
return make_binary(pb);
@@ -2132,16 +2178,24 @@ enc_atom(ErtsAtomCacheMap *acmp, Eterm atom, byte *ep, Uint32 dflags)
return ep;
}
+/*
+ * We use this atom as sysname in local pid/port/refs
+ * for the ETS compressed format (DFLAG_INTERNAL_TAGS).
+ *
+ */
+#define INTERNAL_LOCAL_SYSNAME am_ErtsSecretAtom
+
static byte*
enc_pid(ErtsAtomCacheMap *acmp, Eterm pid, byte* ep, Uint32 dflags)
{
Uint on, os;
+ Eterm sysname = ((is_internal_pid(pid) && (dflags & DFLAG_INTERNAL_TAGS))
+ ? INTERNAL_LOCAL_SYSNAME : pid_node_name(pid));
+ Uint32 creation = pid_creation(pid);
+ byte* tagp = ep++;
- *ep++ = PID_EXT;
/* insert atom here containing host and sysname */
- ep = enc_atom(acmp, pid_node_name(pid), ep, dflags);
-
- /* two bytes for each number and serial */
+ ep = enc_atom(acmp, sysname, ep, dflags);
on = pid_number(pid);
os = pid_serial(pid);
@@ -2150,8 +2204,15 @@ enc_pid(ErtsAtomCacheMap *acmp, Eterm pid, byte* ep, Uint32 dflags)
ep += 4;
put_int32(os, ep);
ep += 4;
- *ep++ = (is_internal_pid(pid) && (dflags & DFLAG_INTERNAL_TAGS)) ?
- INTERNAL_CREATION : pid_creation(pid);
+ if (creation <= ERTS_MAX_LOCAL_CREATION) {
+ *tagp = PID_EXT;
+ *ep++ = creation;
+ } else {
+ ASSERT(is_external_pid(pid));
+ *tagp = NEW_PID_EXT;
+ put_int32(creation, ep);
+ ep += 4;
+ }
return ep;
}
@@ -2231,27 +2292,27 @@ dec_atom(ErtsDistExternal *edep, byte* ep, Eterm* objp)
return ep;
}
-static ERTS_INLINE ErlNode* dec_get_node(Eterm sysname, Uint creation)
+static ERTS_INLINE ErlNode* dec_get_node(Eterm sysname, Uint32 creation)
{
- switch (creation) {
- case INTERNAL_CREATION:
+ if (sysname == INTERNAL_LOCAL_SYSNAME) /* && DFLAG_INTERNAL_TAGS */
return erts_this_node;
- case ORIG_CREATION:
- if (sysname == erts_this_node->sysname) {
- creation = erts_this_node->creation;
- }
- }
+
+ if (sysname == erts_this_node->sysname
+ && (creation == erts_this_node->creation || creation == ORIG_CREATION))
+ return erts_this_node;
+
return erts_find_or_insert_node(sysname,creation);
}
static byte*
-dec_pid(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, Eterm* objp)
+dec_pid(ErtsDistExternal *edep, ErtsHeapFactory* factory, byte* ep,
+ Eterm* objp, byte tag)
{
Eterm sysname;
Uint data;
Uint num;
Uint ser;
- Uint cre;
+ Uint32 cre;
ErlNode *node;
*objp = NIL; /* In case we fail, don't leave a hole in the heap */
@@ -2267,12 +2328,19 @@ dec_pid(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, Ete
ep += 4;
if (ser > ERTS_MAX_PID_SERIAL)
return NULL;
- cre = get_int8(ep);
- ep += 1;
- if (!is_valid_creation(cre)) {
- return NULL;
+ if (tag == PID_EXT) {
+ cre = get_int8(ep);
+ ep += 1;
+ if (!is_valid_creation(cre)) {
+ return NULL;
+ }
+ } else {
+ ASSERT(tag == NEW_PID_EXT);
+ cre = get_int32(ep);
+ ep += 4;
}
+
data = make_pid_data(ser, num);
/*
@@ -2284,15 +2352,15 @@ dec_pid(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, Ete
if(node == erts_this_node) {
*objp = make_internal_pid(data);
} else {
- ExternalThing *etp = (ExternalThing *) *hpp;
- *hpp += EXTERNAL_THING_HEAD_SIZE + 1;
+ ExternalThing *etp = (ExternalThing *) factory->hp;
+ factory->hp += EXTERNAL_THING_HEAD_SIZE + 1;
etp->header = make_external_pid_header(1);
- etp->next = off_heap->first;
+ etp->next = factory->off_heap->first;
etp->node = node;
etp->data.ui[0] = data;
- off_heap->first = (struct erl_off_heap_header*) etp;
+ factory->off_heap->first = (struct erl_off_heap_header*) etp;
*objp = make_external_pid(etp);
}
return ep;
@@ -2302,8 +2370,10 @@ dec_pid(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, Ete
#define ENC_TERM ((Eterm) 0)
#define ENC_ONE_CONS ((Eterm) 1)
#define ENC_PATCH_FUN_SIZE ((Eterm) 2)
-#define ENC_LAST_ARRAY_ELEMENT ((Eterm) 3)
-
+#define ENC_BIN_COPY ((Eterm) 3)
+#define ENC_MAP_PAIR ((Eterm) 4)
+#define ENC_HASHMAP_NODE ((Eterm) 5)
+#define ENC_LAST_ARRAY_ELEMENT ((Eterm) 6)
static byte*
enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags,
@@ -2326,10 +2396,6 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
Eterm val;
FloatDef f;
Sint r = 0;
-#if HALFWORD_HEAP
- UWord wobj;
-#endif
-
if (ctx) {
WSTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
@@ -2339,6 +2405,9 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
WSTACK_RESTORE(s, &ctx->wstack);
ep = ctx->ep;
obj = ctx->obj;
+ if (is_non_value(obj)) {
+ goto outer_loop;
+ }
}
}
@@ -2346,11 +2415,8 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
outer_loop:
while (!WSTACK_ISEMPTY(s)) {
-#if HALFWORD_HEAP
- obj = (Eterm) (wobj = WSTACK_POP(s));
-#else
obj = WSTACK_POP(s);
-#endif
+
switch (val = WSTACK_POP(s)) {
case ENC_TERM:
break;
@@ -2362,49 +2428,76 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
obj = CAR(cons);
tl = CDR(cons);
- WSTACK_PUSH(s, is_list(tl) ? ENC_ONE_CONS : ENC_TERM);
- WSTACK_PUSH(s, tl);
+ WSTACK_PUSH2(s, (is_list(tl) ? ENC_ONE_CONS : ENC_TERM),
+ tl);
}
break;
case ENC_PATCH_FUN_SIZE:
{
-#if HALFWORD_HEAP
- byte* size_p = (byte *) wobj;
-#else
byte* size_p = (byte *) obj;
-#endif
put_int32(ep - size_p, size_p);
}
goto outer_loop;
+ case ENC_BIN_COPY: {
+ Uint bits = (Uint)obj;
+ Uint bitoffs = WSTACK_POP(s);
+ byte* bytes = (byte*) WSTACK_POP(s);
+ byte* dst = (byte*) WSTACK_POP(s);
+ if (bits > r * (TERM_TO_BINARY_MEMCPY_FACTOR * 8)) {
+ Uint n = r * TERM_TO_BINARY_MEMCPY_FACTOR;
+ WSTACK_PUSH5(s, (UWord)(dst + n), (UWord)(bytes + n), bitoffs,
+ ENC_BIN_COPY, bits - 8*n);
+ bits = 8*n;
+ copy_binary_to_buffer(dst, 0, bytes, bitoffs, bits);
+ obj = THE_NON_VALUE;
+ r = 0; /* yield */
+ break;
+ } else {
+ copy_binary_to_buffer(dst, 0, bytes, bitoffs, bits);
+ r -= bits / (TERM_TO_BINARY_MEMCPY_FACTOR * 8);
+ goto outer_loop;
+ }
+ }
+ case ENC_MAP_PAIR: {
+ Uint pairs_left = obj;
+ Eterm *vptr = (Eterm*) WSTACK_POP(s);
+ Eterm *kptr = (Eterm*) WSTACK_POP(s);
+
+ obj = *kptr;
+ if (--pairs_left > 0) {
+ WSTACK_PUSH4(s, (UWord)(kptr+1), (UWord)(vptr+1),
+ ENC_MAP_PAIR, pairs_left);
+ }
+ WSTACK_PUSH2(s, ENC_TERM, *vptr);
+ break;
+ }
+ case ENC_HASHMAP_NODE:
+ if (is_list(obj)) { /* leaf node [K|V] */
+ ptr = list_val(obj);
+ WSTACK_PUSH2(s, ENC_TERM, CDR(ptr));
+ obj = CAR(ptr);
+ }
+ break;
case ENC_LAST_ARRAY_ELEMENT:
/* obj is the tuple */
{
-#if HALFWORD_HEAP
- Eterm* ptr = (Eterm *) wobj;
-#else
Eterm* ptr = (Eterm *) obj;
-#endif
obj = *ptr;
}
break;
default: /* ENC_LAST_ARRAY_ELEMENT+1 and upwards */
{
-#if HALFWORD_HEAP
- Eterm* ptr = (Eterm *) wobj;
-#else
Eterm* ptr = (Eterm *) obj;
-#endif
- WSTACK_PUSH(s, val-1);
obj = *ptr++;
- WSTACK_PUSH(s, (UWord)ptr);
+ WSTACK_PUSH2(s, val-1, (UWord)ptr);
}
break;
}
L_jump_start:
- if (ctx && --r == 0) {
- *reds = r;
+ if (ctx && --r <= 0) {
+ *reds = 0;
ctx->obj = obj;
ctx->ep = ep;
WSTACK_SAVE(s, &ctx->wstack);
@@ -2487,16 +2580,28 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
case REF_DEF:
case EXTERNAL_REF_DEF: {
Uint32 *ref_num;
+ Eterm sysname = (((dflags & DFLAG_INTERNAL_TAGS) && is_internal_ref(obj))
+ ? INTERNAL_LOCAL_SYSNAME : ref_node_name(obj));
+ Uint32 creation = ref_creation(obj);
+ byte* tagp = ep++;
ASSERT(dflags & DFLAG_EXTENDED_REFERENCES);
- *ep++ = NEW_REFERENCE_EXT;
- i = ref_no_of_numbers(obj);
+ erts_magic_ref_save_bin(obj);
+
+ i = ref_no_numbers(obj);
put_int16(i, ep);
ep += 2;
- ep = enc_atom(acmp,ref_node_name(obj),ep,dflags);
- *ep++ = ((dflags & DFLAG_INTERNAL_TAGS) && is_internal_ref(obj)) ?
- INTERNAL_CREATION : ref_creation(obj);
+ ep = enc_atom(acmp, sysname, ep, dflags);
+ if (creation <= ERTS_MAX_LOCAL_CREATION) {
+ *tagp = NEW_REFERENCE_EXT;
+ *ep++ = creation;
+ } else {
+ ASSERT(is_external_ref(obj));
+ *tagp = NEWER_REFERENCE_EXT;
+ put_int32(creation, ep);
+ ep += 4;
+ }
ref_num = ref_numbers(obj);
for (j = 0; j < i; j++) {
put_int32(ref_num[j], ep);
@@ -2505,17 +2610,27 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
break;
}
case PORT_DEF:
- case EXTERNAL_PORT_DEF:
+ case EXTERNAL_PORT_DEF: {
+ Eterm sysname = (((dflags & DFLAG_INTERNAL_TAGS) && is_internal_port(obj))
+ ? INTERNAL_LOCAL_SYSNAME : port_node_name(obj));
+ Uint32 creation = port_creation(obj);
+ byte* tagp = ep++;
- *ep++ = PORT_EXT;
- ep = enc_atom(acmp,port_node_name(obj),ep,dflags);
+ ep = enc_atom(acmp, sysname, ep, dflags);
j = port_number(obj);
put_int32(j, ep);
ep += 4;
- *ep++ = ((dflags & DFLAG_INTERNAL_TAGS) && is_internal_port(obj)) ?
- INTERNAL_CREATION : port_creation(obj);
+ if (creation <= ERTS_MAX_LOCAL_CREATION) {
+ *tagp = PORT_EXT;
+ *ep++ = creation;
+ } else {
+ ASSERT(is_external_port(obj));
+ *tagp = NEW_PORT_EXT;
+ put_int32(creation, ep);
+ ep += 4;
+ }
break;
-
+ }
case LIST_DEF:
{
int is_str;
@@ -2553,39 +2668,59 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
ep += 4;
}
if (i > 0) {
- WSTACK_PUSH(s, ENC_LAST_ARRAY_ELEMENT+i-1);
- WSTACK_PUSH(s, (UWord)ptr);
+ WSTACK_PUSH2(s, ENC_LAST_ARRAY_ELEMENT+i-1, (UWord)ptr);
}
break;
case MAP_DEF:
- {
- map_t *mp = (map_t*)map_val(obj);
- Uint size = map_get_size(mp);
+ if (is_flatmap(obj)) {
+ flatmap_t *mp = (flatmap_t*)flatmap_val(obj);
+ Uint size = flatmap_get_size(mp);
*ep++ = MAP_EXT;
put_int32(size, ep); ep += 4;
if (size > 0) {
- Eterm *kptr = map_get_keys(mp);
- Eterm *vptr = map_get_values(mp);
-
- for (i = size-1; i >= 1; i--) {
- WSTACK_PUSH(s, ENC_TERM);
- WSTACK_PUSH(s, (UWord) vptr[i]);
- WSTACK_PUSH(s, ENC_TERM);
- WSTACK_PUSH(s, (UWord) kptr[i]);
- }
+ Eterm *kptr = flatmap_get_keys(mp);
+ Eterm *vptr = flatmap_get_values(mp);
- WSTACK_PUSH(s, ENC_TERM);
- WSTACK_PUSH(s, (UWord) vptr[0]);
+ WSTACK_PUSH4(s, (UWord)kptr, (UWord)vptr,
+ ENC_MAP_PAIR, size);
+ }
+ } else {
+ Eterm hdr;
+ Uint node_sz;
+ ptr = boxed_val(obj);
+ hdr = *ptr;
+ ASSERT(is_header(hdr));
+ switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ *ep++ = MAP_EXT;
+ ptr++;
+ put_int32(*ptr, ep); ep += 4;
+ node_sz = 16;
+ break;
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ *ep++ = MAP_EXT;
+ ptr++;
+ put_int32(*ptr, ep); ep += 4;
+ /*fall through*/
+ case HAMT_SUBTAG_NODE_BITMAP:
+ node_sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ ASSERT(node_sz < 17);
+ break;
+ default:
+ erts_exit(ERTS_ERROR_EXIT, "bad header\r\n");
+ }
- obj = kptr[0];
- goto L_jump_start;
+ ptr++;
+ WSTACK_RESERVE(s, node_sz*2);
+ while(node_sz--) {
+ WSTACK_FAST_PUSH(s, ENC_HASHMAP_NODE);
+ WSTACK_FAST_PUSH(s, *ptr++);
}
}
break;
-
case FLOAT_DEF:
GET_DOUBLE(obj, f);
if (dflags & DFLAG_NEW_FLOATS) {
@@ -2619,6 +2754,7 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
Uint bitoffs;
Uint bitsize;
byte* bytes;
+ byte* data_dst;
ERTS_GET_BINARY_BYTES(obj, bytes, bitoffs, bitsize);
if (dflags & DFLAG_INTERNAL_TAGS) {
@@ -2646,7 +2782,7 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
erts_emasculate_writable_binary(pb);
bytes += (pb->val->orig_bytes - before_realloc);
}
- erts_refc_inc(&pb->val->refc, 2);
+ erts_refc_inc(&pb->val->intern.refc, 2);
sys_memcpy(&tmp, pb, sizeof(ProcBin));
tmp.next = *off_heap;
@@ -2664,7 +2800,7 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
j = binary_size(obj);
put_int32(j, ep);
ep += 4;
- copy_binary_to_buffer(ep, 0, bytes, bitoffs, 8*j);
+ data_dst = ep;
ep += j;
} else if (dflags & DFLAG_BIT_BINARIES) {
/* Bit-level binary. */
@@ -2674,7 +2810,7 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
ep += 4;
*ep++ = bitsize;
ep[j] = 0; /* Zero unused bits at end of binary */
- copy_binary_to_buffer(ep, 0, bytes, bitoffs, 8*j+bitsize);
+ data_dst = ep;
ep += j + 1;
} else {
/*
@@ -2688,11 +2824,18 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
put_int32((j+1), ep);
ep += 4;
ep[j] = 0; /* Zero unused bits at end of binary */
- copy_binary_to_buffer(ep, 0, bytes, bitoffs, 8*j+bitsize);
+ data_dst = ep;
ep += j+1;
*ep++ = SMALL_INTEGER_EXT;
*ep++ = bitsize;
}
+ if (ctx && j > r * TERM_TO_BINARY_MEMCPY_FACTOR) {
+ WSTACK_PUSH5(s, (UWord)data_dst, (UWord)bytes, bitoffs,
+ ENC_BIN_COPY, 8*j + bitsize);
+ } else {
+ copy_binary_to_buffer(data_dst, 0, bytes, bitoffs,
+ 8 * j + bitsize);
+ }
}
break;
case EXPORT_DEF:
@@ -2700,9 +2843,10 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
Export* exp = *((Export **) (export_val(obj) + 1));
if ((dflags & DFLAG_EXPORT_PTR_TAG) != 0) {
*ep++ = EXPORT_EXT;
- ep = enc_atom(acmp, exp->code[0], ep, dflags);
- ep = enc_atom(acmp, exp->code[1], ep, dflags);
- ep = enc_term(acmp, make_small(exp->code[2]), ep, dflags, off_heap);
+ ep = enc_atom(acmp, exp->info.mfa.module, ep, dflags);
+ ep = enc_atom(acmp, exp->info.mfa.function, ep, dflags);
+ ep = enc_term(acmp, make_small(exp->info.mfa.arity),
+ ep, dflags, off_heap);
} else {
/* Tag, arity */
*ep++ = SMALL_TUPLE_EXT;
@@ -2710,10 +2854,10 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
ep += 1;
/* Module name */
- ep = enc_atom(acmp, exp->code[0], ep, dflags);
+ ep = enc_atom(acmp, exp->info.mfa.module, ep, dflags);
/* Function name */
- ep = enc_atom(acmp, exp->code[1], ep, dflags);
+ ep = enc_atom(acmp, exp->info.mfa.function, ep, dflags);
}
break;
}
@@ -2721,13 +2865,12 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
case FUN_DEF:
{
ErlFunThing* funp = (ErlFunThing *) fun_val(obj);
+ int ei;
if ((dflags & DFLAG_NEW_FUN_TAGS) != 0) {
- int ei;
-
*ep++ = NEW_FUN_EXT;
- WSTACK_PUSH(s, ENC_PATCH_FUN_SIZE);
- WSTACK_PUSH(s, (UWord) ep); /* Position for patching in size */
+ WSTACK_PUSH2(s, ENC_PATCH_FUN_SIZE,
+ (UWord) ep); /* Position for patching in size */
ep += 4;
*ep = funp->arity;
ep += 1;
@@ -2741,16 +2884,6 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
ep = enc_term(acmp, make_small(funp->fe->old_index), ep, dflags, off_heap);
ep = enc_term(acmp, make_small(funp->fe->old_uniq), ep, dflags, off_heap);
ep = enc_pid(acmp, funp->creator, ep, dflags);
-
- fun_env:
- for (ei = funp->num_free-1; ei > 0; ei--) {
- WSTACK_PUSH(s, ENC_TERM);
- WSTACK_PUSH(s, (UWord) funp->env[ei]);
- }
- if (funp->num_free != 0) {
- obj = funp->env[0];
- goto L_jump_start;
- }
} else {
/*
* Communicating with an obsolete erl_interface or
@@ -2782,7 +2915,13 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
*ep++ = SMALL_TUPLE_EXT;
put_int8(funp->num_free, ep);
ep += 1;
- goto fun_env;
+ }
+ for (ei = funp->num_free-1; ei > 0; ei--) {
+ WSTACK_PUSH2(s, ENC_TERM, (UWord) funp->env[ei]);
+ }
+ if (funp->num_free != 0) {
+ obj = funp->env[0];
+ goto L_jump_start;
}
}
break;
@@ -2836,57 +2975,42 @@ is_external_string(Eterm list, int* p_is_string)
return len;
}
-/* Assumes that the ones to undo are preluding the list. */
-static void
-undo_offheap_in_area(ErlOffHeap* off_heap, Eterm* start, Eterm* end)
-{
- const Uint area_sz = (end - start) * sizeof(Eterm);
- struct erl_off_heap_header* hdr;
- struct erl_off_heap_header** hdr_nextp = NULL;
-
- for (hdr = off_heap->first; ; hdr=hdr->next) {
- if (!in_area(hdr, start, area_sz)) {
- if (hdr_nextp != NULL) {
- *hdr_nextp = NULL;
- erts_cleanup_offheap(off_heap);
- off_heap->first = hdr;
- }
- break;
- }
- hdr_nextp = &hdr->next;
- }
- /* Assert that the ones to undo were indeed preluding the list. */
-#ifdef DEBUG
- for (hdr = off_heap->first; hdr != NULL; hdr = hdr->next) {
- ASSERT(!in_area(hdr, start, area_sz));
- }
-#endif /* DEBUG */
-}
+struct dec_term_hamt
+{
+ Eterm* objp; /* write result here */
+ Uint size; /* nr of leafs */
+ Eterm* leaf_array;
+};
/* Decode term from external format into *objp.
-** On failure return NULL and (R13B04) *hpp will be unchanged.
+** On failure calls erts_factory_undo() and returns NULL
*/
static byte*
-dec_term(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap,
- Eterm* objp, B2TContext* ctx)
+dec_term(ErtsDistExternal *edep,
+ ErtsHeapFactory* factory,
+ byte* ep,
+ Eterm* objp,
+ B2TContext* ctx)
{
- Eterm* hp_saved;
+#define PSTACK_TYPE struct dec_term_hamt
+ PSTACK_DECLARE(hamt_array, 5);
int n;
ErtsAtomEncoding char_enc;
register Eterm* hp; /* Please don't take the address of hp */
- Eterm *maps_head; /* for validation of maps */
+ DECLARE_WSTACK(flat_maps); /* for preprocessing of small maps */
Eterm* next;
SWord reds;
+#ifdef DEBUG
+ Eterm* dbg_resultp = ctx ? &ctx->u.dc.res : objp;
+#endif
if (ctx) {
- hp_saved = ctx->u.dc.hp_start;
reds = ctx->reds;
next = ctx->u.dc.next;
ep = ctx->u.dc.ep;
- hpp = &ctx->u.dc.hp;
- maps_head = ctx->u.dc.maps_head;
+ factory = &ctx->u.dc.factory;
if (ctx->state != B2TDecode) {
int n_limit = reds;
@@ -2913,7 +3037,7 @@ dec_term(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap,
case B2TDecodeList:
objp = next - 2;
while (n > 0) {
- objp[0] = (Eterm) COMPRESS_POINTER(next);
+ objp[0] = (Eterm) next;
objp[1] = make_list(next);
next = objp;
objp -= 2;
@@ -2924,14 +3048,14 @@ dec_term(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap,
case B2TDecodeTuple:
objp = next - 1;
while (n-- > 0) {
- objp[0] = (Eterm) COMPRESS_POINTER(next);
+ objp[0] = (Eterm) next;
next = objp;
objp--;
}
break;
case B2TDecodeString:
- hp = *hpp;
+ hp = factory->hp;
hp[-1] = make_list(hp); /* overwrite the premature NIL */
while (n-- > 0) {
hp[0] = make_small(*ep++);
@@ -2939,7 +3063,7 @@ dec_term(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap,
hp += 2;
}
hp[-1] = NIL;
- *hpp = hp;
+ factory->hp = hp;
break;
case B2TDecodeBinary:
@@ -2961,20 +3085,26 @@ dec_term(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap,
return NULL;
}
}
+ PSTACK_CHANGE_ALLOCATOR(hamt_array, ERTS_ALC_T_SAVED_ESTACK);
+ WSTACK_CHANGE_ALLOCATOR(flat_maps, ERTS_ALC_T_SAVED_ESTACK);
+ if (ctx->u.dc.hamt_array.pstart) {
+ PSTACK_RESTORE(hamt_array, &ctx->u.dc.hamt_array);
+ }
+ if (ctx->u.dc.flat_maps.wstart) {
+ WSTACK_RESTORE(flat_maps, &ctx->u.dc.flat_maps);
+ }
}
else {
- hp_saved = *hpp;
reds = ERTS_SWORD_MAX;
next = objp;
*next = (Eterm) (UWord) NULL;
- maps_head = NULL;
}
- hp = *hpp;
+ hp = factory->hp;
while (next != NULL) {
objp = next;
- next = (Eterm *) EXPAND_POINTER(*objp);
+ next = (Eterm *) *objp;
switch (*ep++) {
case INTEGER_EXT:
@@ -2982,10 +3112,10 @@ dec_term(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap,
Sint sn = get_int32(ep);
ep += 4;
-#if defined(ARCH_64) && !HALFWORD_HEAP
+#if defined(ARCH_64)
*objp = make_small(sn);
#else
- if (MY_IS_SSMALL(sn)) {
+ if (IS_SSMALL(sn)) {
*objp = make_small(sn);
} else {
*objp = small_to_big(sn, hp);
@@ -3031,6 +3161,8 @@ dec_term(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap,
big = make_small(0);
} else {
big = bytes_to_big(first, n, neg, hp);
+ if (is_nil(big))
+ goto error;
if (is_big(big)) {
hp += big_arity(big) + 1;
}
@@ -3103,7 +3235,7 @@ dec_term_atom_common:
reds -= n;
}
while (n-- > 0) {
- objp[0] = (Eterm) COMPRESS_POINTER(next);
+ objp[0] = (Eterm) next;
next = objp;
objp--;
}
@@ -3121,8 +3253,8 @@ dec_term_atom_common:
*objp = make_list(hp);
hp += 2 * n;
objp = hp - 2;
- objp[0] = (Eterm) COMPRESS_POINTER((objp+1));
- objp[1] = (Eterm) COMPRESS_POINTER(next);
+ objp[0] = (Eterm) (objp+1);
+ objp[1] = (Eterm) next;
next = objp;
objp -= 2;
n--;
@@ -3135,7 +3267,7 @@ dec_term_atom_common:
reds -= n;
}
while (n > 0) {
- objp[0] = (Eterm) COMPRESS_POINTER(next);
+ objp[0] = (Eterm) next;
objp[1] = make_list(next);
next = objp;
objp -= 2;
@@ -3203,20 +3335,23 @@ dec_term_atom_common:
hp += FLOAT_SIZE_OBJECT;
break;
}
- case PID_EXT:
- *hpp = hp;
- ep = dec_pid(edep, hpp, ep, off_heap, objp);
- hp = *hpp;
+ case PID_EXT:
+ case NEW_PID_EXT:
+ factory->hp = hp;
+ ep = dec_pid(edep, factory, ep, objp, ep[-1]);
+ hp = factory->hp;
if (ep == NULL) {
goto error;
}
break;
- case PORT_EXT:
+ case PORT_EXT:
+ case NEW_PORT_EXT:
{
Eterm sysname;
ErlNode *node;
Uint num;
- Uint cre;
+ Uint32 cre;
+ byte tag = ep[-1];
if ((ep = dec_atom(edep, ep, &sysname)) == NULL) {
goto error;
@@ -3225,12 +3360,17 @@ dec_term_atom_common:
goto error;
}
ep += 4;
- cre = get_int8(ep);
- ep++;
- if (!is_valid_creation(cre)) {
- goto error;
- }
-
+ if (tag == PORT_EXT) {
+ cre = get_int8(ep);
+ ep++;
+ if (!is_valid_creation(cre)) {
+ goto error;
+ }
+ }
+ else {
+ cre = get_int32(ep);
+ ep += 4;
+ }
node = dec_get_node(sysname, cre);
if(node == erts_this_node) {
*objp = make_internal_port(num);
@@ -3240,11 +3380,11 @@ dec_term_atom_common:
hp += EXTERNAL_THING_HEAD_SIZE + 1;
etp->header = make_external_port_header(1);
- etp->next = off_heap->first;
+ etp->next = factory->off_heap->first;
etp->node = node;
etp->data.ui[0] = num;
- off_heap->first = (struct erl_off_heap_header*)etp;
+ factory->off_heap->first = (struct erl_off_heap_header*)etp;
*objp = make_external_port(etp);
}
@@ -3255,7 +3395,7 @@ dec_term_atom_common:
Eterm sysname;
ErlNode *node;
int i;
- Uint cre;
+ Uint32 cre;
Uint32 *ref_num;
Uint32 r0;
Uint ref_words;
@@ -3279,9 +3419,6 @@ dec_term_atom_common:
ref_words = get_int16(ep);
ep += 2;
- if (ref_words > ERTS_MAX_REF_NUMBERS)
- goto error;
-
if ((ep = dec_atom(edep, ep, &sysname)) == NULL)
goto error;
@@ -3294,58 +3431,101 @@ dec_term_atom_common:
ep += 4;
if (r0 >= MAX_REFERENCE)
goto error;
+ goto ref_ext_common;
+
+ case NEWER_REFERENCE_EXT:
+ ref_words = get_int16(ep);
+ ep += 2;
- ref_ext_common:
+ if ((ep = dec_atom(edep, ep, &sysname)) == NULL)
+ goto error;
+
+ cre = get_int32(ep);
+ ep += 4;
+ r0 = get_int32(ep); /* allow full word */
+ ep += 4;
+
+ ref_ext_common: {
+ ErtsORefThing *rtp;
+
+ if (ref_words > ERTS_MAX_REF_NUMBERS)
+ goto error;
node = dec_get_node(sysname, cre);
if(node == erts_this_node) {
- RefThing *rtp = (RefThing *) hp;
- ref_num = (Uint32 *) (hp + REF_THING_HEAD_SIZE);
+
+ rtp = (ErtsORefThing *) hp;
+ ref_num = &rtp->num[0];
+ if (ref_words != ERTS_REF_NUMBERS) {
+ int i;
+ if (ref_words > ERTS_REF_NUMBERS)
+ goto error; /* Not a ref that we created... */
+ for (i = ref_words; i < ERTS_REF_NUMBERS; i++)
+ ref_num[i] = 0;
+ }
-#if defined(ARCH_64) && !HALFWORD_HEAP
- hp += REF_THING_HEAD_SIZE + ref_words/2 + 1;
- rtp->header = make_ref_thing_header(ref_words/2 + 1);
-#else
- hp += REF_THING_HEAD_SIZE + ref_words;
- rtp->header = make_ref_thing_header(ref_words);
+#ifdef ERTS_ORDINARY_REF_MARKER
+ rtp->marker = ERTS_ORDINARY_REF_MARKER;
#endif
+ hp += ERTS_REF_THING_SIZE;
+ rtp->header = ERTS_REF_THING_HEADER;
*objp = make_internal_ref(rtp);
}
else {
ExternalThing *etp = (ExternalThing *) hp;
-#if defined(ARCH_64) && !HALFWORD_HEAP
+ rtp = NULL;
+#if defined(ARCH_64)
hp += EXTERNAL_THING_HEAD_SIZE + ref_words/2 + 1;
#else
hp += EXTERNAL_THING_HEAD_SIZE + ref_words;
#endif
-#if defined(ARCH_64) && !HALFWORD_HEAP
+#if defined(ARCH_64)
etp->header = make_external_ref_header(ref_words/2 + 1);
#else
etp->header = make_external_ref_header(ref_words);
#endif
- etp->next = off_heap->first;
+ etp->next = factory->off_heap->first;
etp->node = node;
- off_heap->first = (struct erl_off_heap_header*)etp;
+ factory->off_heap->first = (struct erl_off_heap_header*)etp;
*objp = make_external_ref(etp);
ref_num = &(etp->data.ui32[0]);
+#if defined(ARCH_64)
+ *(ref_num++) = ref_words /* 32-bit arity */;
+#endif
}
-#if defined(ARCH_64) && !HALFWORD_HEAP
- *(ref_num++) = ref_words /* 32-bit arity */;
-#endif
ref_num[0] = r0;
+
for(i = 1; i < ref_words; i++) {
ref_num[i] = get_int32(ep);
ep += 4;
}
-#if defined(ARCH_64) && !HALFWORD_HEAP
+#if defined(ARCH_64)
if ((1 + ref_words) % 2)
ref_num[ref_words] = 0;
#endif
+ if (node == erts_this_node) {
+ /* Check if it was a magic reference... */
+ ErtsMagicBinary *mb = erts_magic_ref_lookup_bin(ref_num);
+ if (mb) {
+ /*
+ * Was a magic ref; adjust it...
+ *
+ * Refc on binary was increased by lookup above...
+ */
+ ASSERT(rtp);
+ hp = (Eterm *) rtp;
+ write_magic_ref_thing(hp, factory->off_heap, mb);
+ OH_OVERHEAD(factory->off_heap,
+ mb->orig_size / sizeof(Eterm));
+ hp += ERTS_MAGIC_REF_THING_SIZE;
+ }
+ }
break;
}
+ }
case BINARY_EXT:
{
n = get_int32(ep);
@@ -3362,15 +3542,13 @@ dec_term_atom_common:
} else {
Binary* dbin = erts_bin_nrml_alloc(n);
ProcBin* pb;
- dbin->flags = 0;
- dbin->orig_size = n;
- erts_refc_init(&dbin->refc, 1);
pb = (ProcBin *) hp;
hp += PROC_BIN_SIZE;
pb->thing_word = HEADER_PROC_BIN;
pb->size = n;
- pb->next = off_heap->first;
- off_heap->first = (struct erl_off_heap_header*)pb;
+ pb->next = factory->off_heap->first;
+ factory->off_heap->first = (struct erl_off_heap_header*)pb;
+ OH_OVERHEAD(factory->off_heap, pb->size / sizeof(Eterm));
pb->val = dbin;
pb->bytes = (byte*) dbin->orig_bytes;
pb->flags = 0;
@@ -3416,14 +3594,12 @@ dec_term_atom_common:
Binary* dbin = erts_bin_nrml_alloc(n);
ProcBin* pb;
- dbin->flags = 0;
- dbin->orig_size = n;
- erts_refc_init(&dbin->refc, 1);
pb = (ProcBin *) hp;
pb->thing_word = HEADER_PROC_BIN;
pb->size = n;
- pb->next = off_heap->first;
- off_heap->first = (struct erl_off_heap_header*)pb;
+ pb->next = factory->off_heap->first;
+ factory->off_heap->first = (struct erl_off_heap_header*)pb;
+ OH_OVERHEAD(factory->off_heap, pb->size / sizeof(Eterm));
pb->val = dbin;
pb->bytes = (byte*) dbin->orig_bytes;
pb->flags = 0;
@@ -3475,9 +3651,9 @@ dec_term_atom_common:
if ((ep = dec_atom(edep, ep, &name)) == NULL) {
goto error;
}
- *hpp = hp;
- ep = dec_term(edep, hpp, ep, off_heap, &temp, NULL);
- hp = *hpp;
+ factory->hp = hp;
+ ep = dec_term(edep, factory, ep, &temp, NULL);
+ hp = factory->hp;
if (ep == NULL) {
goto error;
}
@@ -3494,57 +3670,63 @@ dec_term_atom_common:
}
*objp = make_export(hp);
*hp++ = HEADER_EXPORT;
-#if HALFWORD_HEAP
- *((UWord *) (UWord) hp) = (UWord) erts_export_get_or_make_stub(mod, name, arity);
- hp += 2;
-#else
*hp++ = (Eterm) erts_export_get_or_make_stub(mod, name, arity);
-#endif
break;
}
break;
case MAP_EXT:
{
- map_t *mp;
Uint32 size,n;
Eterm *kptr,*vptr;
Eterm keys;
size = get_int32(ep); ep += 4;
- keys = make_tuple(hp);
- *hp++ = make_arityval(size);
- hp += size;
- kptr = hp - 1;
-
- mp = (map_t*)hp;
- hp += MAP_HEADER_SIZE;
- hp += size;
- vptr = hp - 1;
-
- /* kptr, last word for keys
- * vptr, last word for values
- */
-
- /*
- * Use thing_word to link through decoded maps.
- * The list of maps is for later validation.
- */
-
- mp->thing_word = (Eterm) COMPRESS_POINTER(maps_head);
- maps_head = (Eterm *) mp;
-
- mp->size = size;
- mp->keys = keys;
- *objp = make_map(mp);
-
- for (n = size; n; n--) {
- *vptr = (Eterm) COMPRESS_POINTER(next);
- *kptr = (Eterm) COMPRESS_POINTER(vptr);
- next = kptr;
- vptr--;
- kptr--;
- }
+ if (size <= MAP_SMALL_MAP_LIMIT) {
+ flatmap_t *mp;
+
+ keys = make_tuple(hp);
+ *hp++ = make_arityval(size);
+ hp += size;
+ kptr = hp - 1;
+
+ mp = (flatmap_t*)hp;
+ hp += MAP_HEADER_FLATMAP_SZ;
+ hp += size;
+ vptr = hp - 1;
+
+ /* kptr, last word for keys
+ * vptr, last word for values
+ */
+
+ WSTACK_PUSH(flat_maps, (UWord)mp);
+ mp->thing_word = MAP_HEADER_FLATMAP;
+ mp->size = size;
+ mp->keys = keys;
+ *objp = make_flatmap(mp);
+
+ for (n = size; n; n--) {
+ *vptr = (Eterm) next;
+ *kptr = (Eterm) vptr;
+ next = kptr;
+ vptr--;
+ kptr--;
+ }
+ }
+ else { /* Make hamt */
+ struct dec_term_hamt* hamt = PSTACK_PUSH(hamt_array);
+
+ hamt->objp = objp;
+ hamt->size = size;
+ hamt->leaf_array = hp;
+
+ for (n = size; n; n--) {
+ CDR(hp) = (Eterm) next;
+ CAR(hp) = (Eterm) &CDR(hp);
+ next = &CAR(hp);
+ hp += 2;
+ }
+ }
}
break;
case NEW_FUN_EXT:
@@ -3578,9 +3760,9 @@ dec_term_atom_common:
if ((ep = dec_atom(edep, ep, &module)) == NULL) {
goto error;
}
- *hpp = hp;
+ factory->hp = hp;
/* Index */
- if ((ep = dec_term(edep, hpp, ep, off_heap, &temp, NULL)) == NULL) {
+ if ((ep = dec_term(edep, factory, ep, &temp, NULL)) == NULL) {
goto error;
}
if (!is_small(temp)) {
@@ -3589,7 +3771,7 @@ dec_term_atom_common:
old_index = unsigned_val(temp);
/* Uniq */
- if ((ep = dec_term(edep, hpp, ep, off_heap, &temp, NULL)) == NULL) {
+ if ((ep = dec_term(edep, factory, ep, &temp, NULL)) == NULL) {
goto error;
}
if (!is_small(temp)) {
@@ -3601,27 +3783,26 @@ dec_term_atom_common:
* It is safe to link the fun into the fun list only when
* no more validity tests can fail.
*/
- funp->next = off_heap->first;
- off_heap->first = (struct erl_off_heap_header*)funp;
+ funp->next = factory->off_heap->first;
+ factory->off_heap->first = (struct erl_off_heap_header*)funp;
funp->fe = erts_put_fun_entry2(module, old_uniq, old_index,
uniq, index, arity);
funp->arity = arity;
#ifdef HIPE
if (funp->fe->native_address == NULL) {
- hipe_set_closure_stub(funp->fe, num_free);
+ hipe_set_closure_stub(funp->fe);
}
- funp->native_address = funp->fe->native_address;
#endif
- hp = *hpp;
+ hp = factory->hp;
/* Environment */
for (i = num_free-1; i >= 0; i--) {
- funp->env[i] = (Eterm) COMPRESS_POINTER(next);
+ funp->env[i] = (Eterm) next;
next = funp->env + i;
}
/* Creator */
- funp->creator = (Eterm) COMPRESS_POINTER(next);
+ funp->creator = (Eterm) next;
next = &(funp->creator);
break;
}
@@ -3639,15 +3820,15 @@ dec_term_atom_common:
ep += 4;
hp += ERL_FUN_SIZE;
hp += num_free;
- *hpp = hp;
+ factory->hp = hp;
funp->thing_word = HEADER_FUN;
funp->num_free = num_free;
*objp = make_fun(funp);
/* Creator pid */
- if (*ep != PID_EXT
- || (ep = dec_pid(edep, hpp, ++ep, off_heap,
- &funp->creator))==NULL) {
+ if ((*ep != PID_EXT && *ep != NEW_PID_EXT)
+ || (ep = dec_pid(edep, factory, ep+1,
+ &funp->creator, *ep))==NULL) {
goto error;
}
@@ -3657,7 +3838,7 @@ dec_term_atom_common:
}
/* Index */
- if ((ep = dec_term(edep, hpp, ep, off_heap, &temp, NULL)) == NULL) {
+ if ((ep = dec_term(edep, factory, ep, &temp, NULL)) == NULL) {
goto error;
}
if (!is_small(temp)) {
@@ -3666,7 +3847,7 @@ dec_term_atom_common:
old_index = unsigned_val(temp);
/* Uniq */
- if ((ep = dec_term(edep, hpp, ep, off_heap, &temp, NULL)) == NULL) {
+ if ((ep = dec_term(edep, factory, ep, &temp, NULL)) == NULL) {
goto error;
}
if (!is_small(temp)) {
@@ -3677,20 +3858,17 @@ dec_term_atom_common:
* It is safe to link the fun into the fun list only when
* no more validity tests can fail.
*/
- funp->next = off_heap->first;
- off_heap->first = (struct erl_off_heap_header*)funp;
+ funp->next = factory->off_heap->first;
+ factory->off_heap->first = (struct erl_off_heap_header*)funp;
old_uniq = unsigned_val(temp);
funp->fe = erts_put_fun_entry(module, old_uniq, old_index);
funp->arity = funp->fe->address[-1] - num_free;
-#ifdef HIPE
- funp->native_address = funp->fe->native_address;
-#endif
- hp = *hpp;
+ hp = factory->hp;
/* Environment */
for (i = num_free-1; i >= 0; i--) {
- funp->env[i] = (Eterm) COMPRESS_POINTER(next);
+ funp->env[i] = (Eterm) next;
next = funp->env + i;
}
break;
@@ -3718,10 +3896,11 @@ dec_term_atom_common:
sys_memcpy(pb, ep, sizeof(ProcBin));
ep += sizeof(ProcBin);
- erts_refc_inc(&pb->val->refc, 1);
+ erts_refc_inc(&pb->val->intern.refc, 1);
hp += PROC_BIN_SIZE;
- pb->next = off_heap->first;
- off_heap->first = (struct erl_off_heap_header*)pb;
+ pb->next = factory->off_heap->first;
+ factory->off_heap->first = (struct erl_off_heap_header*)pb;
+ OH_OVERHEAD(factory->off_heap, pb->size / sizeof(Eterm));
pb->flags = 0;
*objp = make_binary(pb);
break;
@@ -3735,10 +3914,11 @@ dec_term_atom_common:
sys_memcpy(pb, ep, sizeof(ProcBin));
ep += sizeof(ProcBin);
- erts_refc_inc(&pb->val->refc, 1);
+ erts_refc_inc(&pb->val->intern.refc, 1);
hp += PROC_BIN_SIZE;
- pb->next = off_heap->first;
- off_heap->first = (struct erl_off_heap_header*)pb;
+ pb->next = factory->off_heap->first;
+ factory->off_heap->first = (struct erl_off_heap_header*)pb;
+ OH_OVERHEAD(factory->off_heap, pb->size / sizeof(Eterm));
pb->flags = 0;
sub = (ErlSubBin*)hp;
@@ -3764,8 +3944,13 @@ dec_term_atom_common:
if (next || ctx->state != B2TDecode) {
ctx->u.dc.ep = ep;
ctx->u.dc.next = next;
- ctx->u.dc.hp = hp;
- ctx->u.dc.maps_head = maps_head;
+ ctx->u.dc.factory.hp = hp;
+ if (!WSTACK_ISEMPTY(flat_maps)) {
+ WSTACK_SAVE(flat_maps, &ctx->u.dc.flat_maps);
+ }
+ if (!PSTACK_IS_EMPTY(hamt_array)) {
+ PSTACK_SAVE(hamt_array, &ctx->u.dc.hamt_array);
+ }
ctx->reds = 0;
return NULL;
}
@@ -3776,24 +3961,47 @@ dec_term_atom_common:
}
}
- /* Iterate through all the maps and check for validity and sort keys
+ ASSERT(hp <= factory->hp_end
+ || (factory->mode == FACTORY_CLOSED && is_immed(*dbg_resultp)));
+ factory->hp = hp;
+ /*
+ * From here on factory may produce (more) heap fragments
+ */
+
+ if (!PSTACK_IS_EMPTY(hamt_array)) {
+ do {
+ struct dec_term_hamt* hamt = PSTACK_TOP(hamt_array);
+
+ *hamt->objp = erts_hashmap_from_array(factory,
+ hamt->leaf_array,
+ hamt->size,
+ 1);
+ if (is_non_value(*hamt->objp))
+ goto error_hamt;
+
+ (void) PSTACK_POP(hamt_array);
+ } while (!PSTACK_IS_EMPTY(hamt_array));
+ PSTACK_DESTROY(hamt_array);
+ }
+
+ /* Iterate through all the (flat)maps and check for validity and sort keys
* - done here for when we know it is complete.
*/
- while (maps_head) {
- next = (Eterm *)(EXPAND_POINTER(*maps_head));
- *maps_head = MAP_HEADER;
- if (!erts_validate_and_sort_map((map_t*)maps_head))
- goto error;
- maps_head = next;
+ while(!WSTACK_ISEMPTY(flat_maps)) {
+ next = (Eterm *)WSTACK_POP(flat_maps);
+ if (!erts_validate_and_sort_flatmap((flatmap_t*)next))
+ goto error;
}
+ WSTACK_DESTROY(flat_maps);
+
+ ASSERT((Eterm*)*dbg_resultp != NULL);
if (ctx) {
ctx->state = B2TDone;
ctx->reds = reds;
}
- *hpp = hp;
return ep;
error:
@@ -3801,15 +4009,21 @@ error:
* Must unlink all off-heap objects that may have been
* linked into the process.
*/
- if (hp < *hpp) { /* Sometimes we used hp and sometimes *hpp */
- hp = *hpp; /* the largest must be the freshest */
+ if (factory->mode != FACTORY_CLOSED) {
+ if (factory->hp < hp) { /* Sometimes we used hp and sometimes factory->hp */
+ factory->hp = hp; /* the largest must be the freshest */
+ }
}
- undo_offheap_in_area(off_heap, hp_saved, hp);
- *hpp = hp_saved;
+ else ASSERT(!factory->hp || factory->hp == hp);
+
+error_hamt:
+ erts_factory_undo(factory);
+ PSTACK_DESTROY(hamt_array);
if (ctx) {
ctx->state = B2TDecodeFail;
ctx->reds = reds;
}
+ WSTACK_DESTROY(flat_maps);
return NULL;
}
@@ -3828,51 +4042,35 @@ static int
encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
unsigned dflags, Sint *reds, Uint *res)
{
- DECLARE_ESTACK(s);
+ DECLARE_WSTACK(s);
Uint m, i, arity;
Uint result = 0;
Sint r = 0;
if (ctx) {
- ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
+ WSTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
r = *reds;
- if (ctx->estack.start) { /* restore saved stack */
- ESTACK_RESTORE(s, &ctx->estack);
+ if (ctx->wstack.wstart) { /* restore saved stack */
+ WSTACK_RESTORE(s, &ctx->wstack);
result = ctx->result;
obj = ctx->obj;
}
}
- goto L_jump_start;
+#define LIST_TAIL_OP ((0 << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER)
+#define TERM_ARRAY_OP(N) (((N) << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER)
+#define TERM_ARRAY_OP_DEC(OP) ((OP) - (1 << _TAG_PRIMARY_SIZE))
+
+
+ for (;;) {
+ ASSERT(!is_header(obj));
- outer_loop:
- while (!ESTACK_ISEMPTY(s)) {
- obj = ESTACK_POP(s);
- handle_popped_obj:
- if (is_list(obj)) {
- Eterm* cons = list_val(obj);
- Eterm tl;
-
- tl = CDR(cons);
- obj = CAR(cons);
- ESTACK_PUSH(s, tl);
- } else if (is_nil(obj)) {
- result++;
- goto outer_loop;
- } else {
- /*
- * Other term (in the tail of a non-proper list or
- * in a fun's environment).
- */
- }
-
- L_jump_start:
if (ctx && --r == 0) {
*reds = r;
ctx->obj = obj;
ctx->result = result;
- ESTACK_SAVE(s, &ctx->estack);
+ WSTACK_SAVE(s, &ctx->wstack);
return -1;
}
switch (tag_val_def(obj)) {
@@ -3933,20 +4131,29 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
else
result += 1 + 4 + 1 + i; /* tag,size,sign,digits */
break;
+ case EXTERNAL_PID_DEF:
+ if (external_pid_creation(obj) > ERTS_MAX_LOCAL_CREATION)
+ result += 3;
+ /*fall through*/
case PID_DEF:
- case EXTERNAL_PID_DEF:
result += (1 + encode_size_struct2(acmp, pid_node_name(obj), dflags) +
4 + 4 + 1);
break;
+ case EXTERNAL_REF_DEF:
+ if (external_ref_creation(obj) > ERTS_MAX_LOCAL_CREATION)
+ result += 3;
+ /*fall through*/
case REF_DEF:
- case EXTERNAL_REF_DEF:
ASSERT(dflags & DFLAG_EXTENDED_REFERENCES);
- i = ref_no_of_numbers(obj);
+ i = ref_no_numbers(obj);
result += (1 + 2 + encode_size_struct2(acmp, ref_node_name(obj), dflags) +
1 + 4*i);
break;
- case PORT_DEF:
- case EXTERNAL_PORT_DEF:
+ case EXTERNAL_PORT_DEF:
+ if (external_port_creation(obj) > ERTS_MAX_LOCAL_CREATION)
+ result += 3;
+ /*fall through*/
+ case PORT_DEF:
result += (1 + encode_size_struct2(acmp, port_node_name(obj), dflags) +
4 + 1);
break;
@@ -3955,70 +4162,79 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
result += m + 2 + 1;
} else {
result += 5;
- goto handle_popped_obj;
+ WSTACK_PUSH2(s, (UWord)CDR(list_val(obj)), (UWord)LIST_TAIL_OP);
+ obj = CAR(list_val(obj));
+ continue; /* big loop */
}
break;
case TUPLE_DEF:
{
Eterm* ptr = tuple_val(obj);
- Uint i;
arity = arityval(*ptr);
if (arity <= 0xff) {
result += 1 + 1;
} else {
result += 1 + 4;
}
- for (i = 1; i <= arity; ++i) {
- if (is_list(ptr[i])) {
- if ((m = is_string(obj)) && (m < MAX_STRING_LEN)) {
- result += m + 2 + 1;
- } else {
- result += 5;
- }
- }
- ESTACK_PUSH(s,ptr[i]);
+ if (arity > 1) {
+ WSTACK_PUSH2(s, (UWord) (ptr + 2),
+ (UWord) TERM_ARRAY_OP(arity-1));
}
- goto outer_loop;
+ else if (arity == 0) {
+ break;
+ }
+ obj = ptr[1];
+ continue; /* big loop */
}
- break;
case MAP_DEF:
- {
- map_t *mp = (map_t*)map_val(obj);
- Uint size = map_get_size(mp);
- Uint i;
- Eterm *ptr;
+ if (is_flatmap(obj)) {
+ flatmap_t *mp = (flatmap_t*)flatmap_val(obj);
+ Uint size = flatmap_get_size(mp);
result += 1 + 4; /* tag + 4 bytes size */
- /* push values first */
- ptr = map_get_values(mp);
- i = size;
- while(i--) {
- if (is_list(*ptr)) {
- if ((m = is_string(*ptr)) && (m < MAX_STRING_LEN)) {
- result += m + 2 + 1;
- } else {
- result += 5;
- }
- }
- ESTACK_PUSH(s,*ptr);
- ++ptr;
+ if (size) {
+ WSTACK_PUSH4(s, (UWord) flatmap_get_values(mp),
+ (UWord) TERM_ARRAY_OP(size),
+ (UWord) flatmap_get_keys(mp),
+ (UWord) TERM_ARRAY_OP(size));
+ }
+ } else {
+ Eterm *ptr;
+ Eterm hdr;
+ Uint node_sz;
+ ptr = boxed_val(obj);
+ hdr = *ptr;
+ ASSERT(is_header(hdr));
+ switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ ptr++;
+ node_sz = 16;
+ result += 1 + 4; /* tag + 4 bytes size */
+ break;
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ ptr++;
+ result += 1 + 4; /* tag + 4 bytes size */
+ /*fall through*/
+ case HAMT_SUBTAG_NODE_BITMAP:
+ node_sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ ASSERT(node_sz < 17);
+ break;
+ default:
+ erts_exit(ERTS_ERROR_EXIT, "bad header\r\n");
}
- ptr = map_get_keys(mp);
- i = size;
- while(i--) {
- if (is_list(*ptr)) {
- if ((m = is_string(*ptr)) && (m < MAX_STRING_LEN)) {
- result += m + 2 + 1;
- } else {
- result += 5;
- }
+ ptr++;
+ WSTACK_RESERVE(s, node_sz*2);
+ while(node_sz--) {
+ if (is_list(*ptr)) {
+ WSTACK_FAST_PUSH(s, CAR(list_val(*ptr)));
+ WSTACK_FAST_PUSH(s, CDR(list_val(*ptr)));
+ } else {
+ WSTACK_FAST_PUSH(s, *ptr);
}
- ESTACK_PUSH(s,*ptr);
- ++ptr;
+ ptr++;
}
- goto outer_loop;
}
break;
case FLOAT_DEF:
@@ -4071,25 +4287,13 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
result += 2 * (1 + 4); /* Index + Uniq */
result += 1 + (funp->num_free < 0x100 ? 1 : 4);
}
- for (i = 1; i < funp->num_free; i++) {
- obj = funp->env[i];
-
- if (is_not_list(obj)) {
- /* Push any non-list terms on the stack */
- ESTACK_PUSH(s, obj);
- } else {
- /* Lists must be handled specially. */
- if ((m = is_string(obj)) && (m < MAX_STRING_LEN)) {
- result += m + 2 + 1;
- } else {
- result += 5;
- ESTACK_PUSH(s, obj);
- }
- }
+ if (funp->num_free > 1) {
+ WSTACK_PUSH2(s, (UWord) (funp->env + 1),
+ (UWord) TERM_ARRAY_OP(funp->num_free-1));
}
if (funp->num_free != 0) {
obj = funp->env[0];
- goto L_jump_start;
+ continue; /* big loop */
}
break;
}
@@ -4097,32 +4301,59 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
case EXPORT_DEF:
{
Export* ep = *((Export **) (export_val(obj) + 1));
-#if HALFWORD_HEAP
- result += 2;
-#else
result += 1;
-#endif
- result += encode_size_struct2(acmp, ep->code[0], dflags);
- result += encode_size_struct2(acmp, ep->code[1], dflags);
- result += encode_size_struct2(acmp, make_small(ep->code[2]), dflags);
+ result += encode_size_struct2(acmp, ep->info.mfa.module, dflags);
+ result += encode_size_struct2(acmp, ep->info.mfa.function, dflags);
+ result += encode_size_struct2(acmp, make_small(ep->info.mfa.arity), dflags);
}
break;
default:
- erl_exit(1,"Internal data structure error (in encode_size_struct2)%x\n",
+ erts_exit(ERTS_ERROR_EXIT,"Internal data structure error (in encode_size_struct2)%x\n",
obj);
}
+
+ if (WSTACK_ISEMPTY(s)) {
+ break;
+ }
+ obj = (Eterm) WSTACK_POP(s);
+
+ if (is_header(obj)) {
+ switch (obj) {
+ case LIST_TAIL_OP:
+ obj = (Eterm) WSTACK_POP(s);
+ if (is_list(obj)) {
+ Eterm* cons = list_val(obj);
+
+ WSTACK_PUSH2(s, (UWord)CDR(cons), (UWord)LIST_TAIL_OP);
+ obj = CAR(cons);
+ }
+ break;
+
+ case TERM_ARRAY_OP(1):
+ obj = *(Eterm*)WSTACK_POP(s);
+ break;
+ default: { /* TERM_ARRAY_OP(N) when N > 1 */
+ Eterm* ptr = (Eterm*) WSTACK_POP(s);
+ WSTACK_PUSH2(s, (UWord) (ptr+1),
+ (UWord) TERM_ARRAY_OP_DEC(obj));
+ obj = *ptr;
+ }
+ }
+ }
}
- DESTROY_ESTACK(s);
+ WSTACK_DESTROY(s);
if (ctx) {
- ASSERT(ctx->estack.start == NULL);
+ ASSERT(ctx->wstack.wstart == NULL);
*reds = r;
}
*res = result;
return 0;
}
+
+
static Sint
decoded_size(byte *ep, byte* endp, int internal_tags, B2TContext* ctx)
{
@@ -4185,7 +4416,7 @@ init_done:
switch (tag) {
case INTEGER_EXT:
SKIP(4);
-#if !defined(ARCH_64) || HALFWORD_HEAP
+#if !defined(ARCH_64)
heap_size += BIG_UINT_HEAP_SIZE;
#endif
break;
@@ -4249,19 +4480,32 @@ init_done:
SKIP(1+atom_extra_skip);
atom_extra_skip = 0;
break;
- case PID_EXT:
+ case NEW_PID_EXT:
+ atom_extra_skip = 12;
+ goto case_PID;
+ case PID_EXT:
atom_extra_skip = 9;
+ case_PID:
/* In case it is an external pid */
heap_size += EXTERNAL_THING_HEAD_SIZE + 1;
terms++;
break;
- case PORT_EXT:
+ case NEW_PORT_EXT:
+ atom_extra_skip = 8;
+ goto case_PORT;
+ case PORT_EXT:
atom_extra_skip = 5;
+ case_PORT:
/* In case it is an external port */
heap_size += EXTERNAL_THING_HEAD_SIZE + 1;
terms++;
break;
- case NEW_REFERENCE_EXT:
+ case NEWER_REFERENCE_EXT:
+ atom_extra_skip = 4;
+ goto case_NEW_REFERENCE;
+ case NEW_REFERENCE_EXT:
+ atom_extra_skip = 1;
+ case_NEW_REFERENCE:
{
int id_words;
@@ -4272,9 +4516,9 @@ init_done:
goto error;
ep += 2;
- atom_extra_skip = 1 + 4*id_words;
+ atom_extra_skip += 4*id_words;
/* In case it is an external ref */
-#if defined(ARCH_64) && !HALFWORD_HEAP
+#if defined(ARCH_64)
heap_size += EXTERNAL_THING_HEAD_SIZE + id_words/2 + 1;
#else
heap_size += EXTERNAL_THING_HEAD_SIZE + id_words;
@@ -4316,7 +4560,11 @@ init_done:
n = get_int32(ep);
ep += 4;
ADDTERMS(2*n);
- heap_size += 3 + n + 1 + n;
+ if (n <= MAP_SMALL_MAP_LIMIT) {
+ heap_size += 3 + n + 1 + n;
+ } else {
+ heap_size += HASHMAP_ESTIMATED_HEAP_SIZE(n);
+ }
break;
case STRING_EXT:
CHKSIZE(2);
@@ -4356,11 +4604,7 @@ init_done:
break;
case EXPORT_EXT:
terms += 3;
-#if HALFWORD_HEAP
- heap_size += 3;
-#else
heap_size += 2;
-#endif
break;
case NEW_FUN_EXT:
{
@@ -4440,66 +4684,3 @@ error:
#undef SKIP2
#undef CHKSIZE
}
-
-
-#ifdef HIPE
-BIF_RETTYPE hipe_wrapper_term_to_binary_1(BIF_ALIST_1);
-BIF_RETTYPE hipe_wrapper_term_to_binary_2(BIF_ALIST_2);
-BIF_RETTYPE hipe_wrapper_erts_internal_binary_to_term_1(BIF_ALIST_1);
-BIF_RETTYPE hipe_wrapper_erts_internal_binary_to_term_2(BIF_ALIST_2);
-
-/* Hipe wrappers used by native code for BIFs that disable GC while trapping.
- *
- * Problem:
- * When native code calls a BIF that traps, hipe_mode_switch will push a
- * "trap frame" on the Erlang stack in order to find its way back from beam_emu
- * back to native caller when finally done. If GC is disabled and stack/heap
- * is full there is no place to push the "trap frame".
- *
- * Solution:
- * We reserve space on stack for the "trap frame" here before the BIF is called.
- * If the BIF does not trap, the space is reclaimed here before returning.
- * If the BIF traps, hipe_push_beam_trap_frame() will detect that a "trap frame"
- * already is reserved and use it.
- */
-BIF_RETTYPE hipe_wrapper_term_to_binary_1(BIF_ALIST_1)
-{
- Eterm res;
- hipe_reserve_beam_trap_frame(BIF_P, BIF__ARGS, 1);
- res = term_to_binary_1(BIF_P, BIF__ARGS);
- if (is_value(res) || BIF_P->freason != TRAP) {
- hipe_unreserve_beam_trap_frame(BIF_P);
- }
- return res;
-}
-BIF_RETTYPE hipe_wrapper_term_to_binary_2(BIF_ALIST_2)
-{
- Eterm res;
- hipe_reserve_beam_trap_frame(BIF_P, BIF__ARGS, 2);
- res = term_to_binary_2(BIF_P, BIF__ARGS);
- if (is_value(res) || BIF_P->freason != TRAP) {
- hipe_unreserve_beam_trap_frame(BIF_P);
- }
- return res;
-}
-BIF_RETTYPE hipe_wrapper_erts_internal_binary_to_term_1(BIF_ALIST_1)
-{
- Eterm res;
- hipe_reserve_beam_trap_frame(BIF_P, BIF__ARGS, 1);
- res = erts_internal_binary_to_term_1(BIF_P, BIF__ARGS);
- if (is_value(res) || BIF_P->freason != TRAP) {
- hipe_unreserve_beam_trap_frame(BIF_P);
- }
- return res;
-}
-BIF_RETTYPE hipe_wrapper_erts_internal_binary_to_term_2(BIF_ALIST_2)
-{
- Eterm res;
- hipe_reserve_beam_trap_frame(BIF_P, BIF__ARGS, 2);
- res = erts_internal_binary_to_term_2(BIF_P, BIF__ARGS);
- if (is_value(res) || BIF_P->freason != TRAP) {
- hipe_unreserve_beam_trap_frame(BIF_P);
- }
- return res;
-}
-#endif /*HIPE*/
diff --git a/erts/emulator/beam/external.h b/erts/emulator/beam/external.h
index bf00958eb1..3c61d013da 100644
--- a/erts/emulator/beam/external.h
+++ b/erts/emulator/beam/external.h
@@ -1,24 +1,23 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
-/* Same order as the ordering of terms in erlang */
-
/* Since there are 255 different External tag values to choose from
There is no reason to not be extravagant.
Hence, the different tags for large/small tuple e.t.c
@@ -36,9 +35,12 @@
#define SMALL_ATOM_EXT 's'
#define REFERENCE_EXT 'e'
#define NEW_REFERENCE_EXT 'r'
+#define NEWER_REFERENCE_EXT 'Z'
#define PORT_EXT 'f'
+#define NEW_PORT_EXT 'Y'
#define NEW_FLOAT_EXT 'F'
#define PID_EXT 'g'
+#define NEW_PID_EXT 'X'
#define SMALL_TUPLE_EXT 'h'
#define LARGE_TUPLE_EXT 'i'
#define NIL_EXT 'j'
@@ -148,21 +150,26 @@ typedef struct {
byte *extp;
int exttmp;
Uint extsize;
+ Uint heap_size;
} ErtsBinary2TermState;
+
/* -------------------------------------------------------------------------- */
void erts_init_atom_cache_map(ErtsAtomCacheMap *);
void erts_reset_atom_cache_map(ErtsAtomCacheMap *);
void erts_destroy_atom_cache_map(ErtsAtomCacheMap *);
void erts_finalize_atom_cache_map(ErtsAtomCacheMap *, Uint32);
-Uint erts_encode_ext_dist_header_size(ErtsAtomCacheMap *);
Uint erts_encode_ext_dist_header_size(ErtsAtomCacheMap *);
byte *erts_encode_ext_dist_header_setup(byte *, ErtsAtomCacheMap *);
byte *erts_encode_ext_dist_header_finalize(byte *, ErtsAtomCache *, Uint32);
-Uint erts_encode_dist_ext_size(Eterm, Uint32, ErtsAtomCacheMap *);
-void erts_encode_dist_ext(Eterm, byte **, Uint32, ErtsAtomCacheMap *);
+struct erts_dsig_send_context;
+int erts_encode_dist_ext_size(Eterm, Uint32, ErtsAtomCacheMap*, Uint* szp);
+int erts_encode_dist_ext_size_int(Eterm term, struct erts_dsig_send_context* ctx, Uint* szp);
+struct TTBEncodeContext_;
+int erts_encode_dist_ext(Eterm, byte **, Uint32, ErtsAtomCacheMap *,
+ struct TTBEncodeContext_ *, Sint* reds);
Uint erts_encode_ext_size(Eterm);
Uint erts_encode_ext_size_2(Eterm, unsigned);
@@ -178,21 +185,26 @@ ERTS_GLB_INLINE void *erts_dist_ext_trailer(ErtsDistExternal *);
ErtsDistExternal *erts_make_dist_ext_copy(ErtsDistExternal *, Uint);
void *erts_dist_ext_trailer(ErtsDistExternal *);
void erts_destroy_dist_ext_copy(ErtsDistExternal *);
+
+#define ERTS_PREP_DIST_EXT_FAILED (-1)
+#define ERTS_PREP_DIST_EXT_SUCCESS (0)
+#define ERTS_PREP_DIST_EXT_CLOSED (1)
+
int erts_prepare_dist_ext(ErtsDistExternal *, byte *, Uint,
- DistEntry *, ErtsAtomCache *);
+ DistEntry *, ErtsAtomCache *, Uint32 *);
Sint erts_decode_dist_ext_size(ErtsDistExternal *);
-Eterm erts_decode_dist_ext(Eterm **, ErlOffHeap *, ErtsDistExternal *);
+Eterm erts_decode_dist_ext(ErtsHeapFactory* factory, ErtsDistExternal *);
Sint erts_decode_ext_size(byte*, Uint);
Sint erts_decode_ext_size_ets(byte*, Uint);
-Eterm erts_decode_ext(Eterm **, ErlOffHeap *, byte**);
-Eterm erts_decode_ext_ets(Eterm **, ErlOffHeap *, byte*);
+Eterm erts_decode_ext(ErtsHeapFactory*, byte**, Uint32 flags);
+Eterm erts_decode_ext_ets(ErtsHeapFactory*, byte*);
Eterm erts_term_to_binary(Process* p, Eterm Term, int level, Uint flags);
Sint erts_binary2term_prepare(ErtsBinary2TermState *, byte *, Sint);
void erts_binary2term_abort(ErtsBinary2TermState *);
-Eterm erts_binary2term_create(ErtsBinary2TermState *, Eterm **hpp, ErlOffHeap *);
+Eterm erts_binary2term_create(ErtsBinary2TermState *, ErtsHeapFactory*);
int erts_debug_max_atom_out_cache_index(void);
int erts_debug_atom_to_out_cache_index(Eterm);
diff --git a/erts/emulator/beam/float_instrs.tab b/erts/emulator/beam/float_instrs.tab
new file mode 100644
index 0000000000..3d4db77892
--- /dev/null
+++ b/erts/emulator/beam/float_instrs.tab
@@ -0,0 +1,88 @@
+// -*- c -*-
+//
+// %CopyrightBegin%
+//
+// Copyright Ericsson AB 2017. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// %CopyrightEnd%
+//
+
+LOAD_DOUBLE(Src, Dst) {
+ GET_DOUBLE($Src, *(FloatDef *) &$Dst);
+}
+
+fload(Reg, Dst) {
+ $LOAD_DOUBLE($Reg, $Dst);
+}
+
+fstore(Float, Dst) {
+ PUT_DOUBLE(*((FloatDef *) &$Float), HTOP);
+ $Dst = make_float(HTOP);
+ HTOP += FLOAT_SIZE_OBJECT;
+}
+
+fconv(Src, Dst) {
+ Eterm src = $Src;
+
+ if (is_small(src)) {
+ $Dst = (double) signed_val(src);
+ } else if (is_big(src)) {
+ if (big_to_double(src, &$Dst) < 0) {
+ $BADARITH0();
+ }
+ } else if (is_float(src)) {
+ $LOAD_DOUBLE(src, $Dst);
+ } else {
+ $BADARITH0();
+ }
+}
+
+FLOAT_OP(Src1, OP, Src2, Dst) {
+ ERTS_NO_FPE_CHECK_INIT(c_p);
+ $Dst = $Src1 $OP $Src2;
+ ERTS_NO_FPE_ERROR(c_p, $Dst, $BADARITH0());
+}
+
+i_fadd(Src1, Src2, Dst) {
+ $FLOAT_OP($Src1, +, $Src2, $Dst);
+}
+
+i_fsub(Src1, Src2, Dst) {
+ $FLOAT_OP($Src1, -, $Src2, $Dst);
+}
+
+i_fmul(Src1, Src2, Dst) {
+ $FLOAT_OP($Src1, *, $Src2, $Dst);
+}
+
+i_fdiv(Src1, Src2, Dst) {
+ $FLOAT_OP($Src1, /, $Src2, $Dst);
+}
+
+i_fnegate(Src, Dst) {
+ ERTS_NO_FPE_CHECK_INIT(c_p);
+ $Dst = -$Src;
+ ERTS_NO_FPE_ERROR(c_p, $Dst, $BADARITH0());
+}
+
+%unless NO_FPE_SIGNALS
+fclearerror() {
+ ERTS_FP_CHECK_INIT(c_p);
+}
+
+i_fcheckerror() {
+ ERTS_FP_ERROR(c_p, freg[0].fd, $BADARITH0());
+}
+%endif
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 8fcb95d0e2..3dd3a60939 100755..100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2014. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -34,14 +35,23 @@
#include "register.h"
#include "erl_fun.h"
#include "erl_node_tables.h"
-#include "benchmark.h"
#include "erl_process.h"
#include "erl_sys_driver.h"
#include "erl_debug.h"
#include "error.h"
#include "erl_utils.h"
#include "erl_port.h"
+#include "erl_gc.h"
+#include "erl_nif.h"
+#define ERTS_BINARY_TYPES_ONLY__
+#include "erl_binary.h"
+#undef ERTS_BINARY_TYPES_ONLY__
+
+struct enif_func_t;
+#ifdef DEBUG
+# define ERTS_NIF_ASSERT_IN_ENV
+#endif
struct enif_environment_t /* ErlNifEnv */
{
struct erl_module_nif* mod_nif;
@@ -51,14 +61,76 @@ struct enif_environment_t /* ErlNifEnv */
ErlHeapFragment* heap_frag;
int fpe_was_unmasked;
struct enif_tmp_obj_t* tmp_obj_list;
+ int exception_thrown; /* boolean */
+ Process *tracee;
+ int exiting; /* boolean (dirty nifs might return in exiting state) */
+
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ int dbg_disable_assert_in_env;
+#endif
+};
+struct enif_resource_type_t
+{
+ struct enif_resource_type_t* next; /* list of all resource types */
+ struct enif_resource_type_t* prev;
+ struct erl_module_nif* owner; /* that created this type and thus implements the destructor*/
+ ErlNifResourceDtor* dtor; /* user destructor function */
+ ErlNifResourceStop* stop;
+ ErlNifResourceDown* down;
+ erts_refc_t refc; /* num of resources of this type (HOTSPOT warning)
+ +1 for active erl_module_nif */
+ Eterm module;
+ Eterm name;
};
+
+typedef struct
+{
+ erts_mtx_t lock;
+ ErtsMonitor* root;
+ int pending_failed_fire;
+ int is_dying;
+
+ size_t user_data_sz;
+} ErtsResourceMonitors;
+
+typedef struct ErtsResource_
+{
+ struct enif_resource_type_t* type;
+ ErtsResourceMonitors* monitors;
+#ifdef DEBUG
+ erts_refc_t nif_refc;
+#else
+# ifdef ARCH_32
+ byte align__[4];
+# endif
+#endif
+ char data[1];
+}ErtsResource;
+
+#define DATA_TO_RESOURCE(PTR) ErtsContainerStruct(PTR, ErtsResource, data)
+#define erts_resource_ref_size(P) ERTS_MAGIC_REF_THING_SIZE
+
+extern Eterm erts_bld_resource_ref(Eterm** hp, ErlOffHeap*, ErtsResource*);
+
extern void erts_pre_nif(struct enif_environment_t*, Process*,
- struct erl_module_nif*);
+ struct erl_module_nif*, Process* tracee);
extern void erts_post_nif(struct enif_environment_t* env);
+extern void erts_resource_stop(ErtsResource*, ErlNifEvent, int is_direct_call);
+void erts_fire_nif_monitor(ErtsResource*, Eterm pid, Eterm ref);
extern Eterm erts_nif_taints(Process* p);
-extern void erts_print_nif_taints(int to, void* to_arg);
+extern void erts_print_nif_taints(fmtfn_t to, void* to_arg);
void erts_unload_nif(struct erl_module_nif* nif);
extern void erl_nif_init(void);
+extern int erts_nif_get_funcs(struct erl_module_nif*,
+ struct enif_func_t **funcs);
+extern Eterm erts_nif_call_function(Process *p, Process *tracee,
+ struct erl_module_nif*,
+ struct enif_func_t *,
+ int argc, Eterm *argv);
+
+int erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p,
+ BeamInstr *I, Eterm *reg);
+
/* Driver handle (wrapper for old plain handle) */
#define ERL_DE_OK 0
@@ -101,7 +173,7 @@ typedef struct de_proc_entry {
PROC_AWAIT_LOAD == Wants to be notified when we
reloaded the driver (old was locked) */
Uint flags; /* ERL_FL_DE_DEREFERENCED when reload in progress */
- Eterm heap[REF_THING_SIZE]; /* "ref heap" */
+ Eterm heap[ERTS_REF_THING_SIZE]; /* "ref heap" */
struct de_proc_entry *next;
} DE_ProcEntry;
@@ -111,7 +183,7 @@ typedef struct {
or that wait for it to change state */
erts_refc_t refc; /* Number of ports/processes having
references to the driver */
- erts_smp_atomic32_t port_count; /* Number of ports using the driver */
+ erts_atomic32_t port_count; /* Number of ports using the driver */
Uint flags; /* ERL_DE_FL_KILL_PORTS */
int status; /* ERL_DE_xxx */
char *full_path; /* Full path of the driver */
@@ -135,9 +207,7 @@ struct erts_driver_t_ {
} version;
int flags;
DE_Handle *handle;
-#ifdef ERTS_SMP
- erts_smp_mtx_t *lock;
-#endif
+ erts_mtx_t *lock;
ErlDrvEntry *entry;
ErlDrvData (*start)(ErlDrvPort port, char *command, SysDriverOpts* opts);
void (*stop)(ErlDrvData drv_data);
@@ -152,18 +222,17 @@ struct erts_driver_t_ {
char *buf, ErlDrvSizeT len,
char **rbuf, ErlDrvSizeT rlen, /* Might be NULL */
unsigned int *flags);
- void (*event)(ErlDrvData drv_data, ErlDrvEvent event,
- ErlDrvEventData event_data);
void (*ready_input)(ErlDrvData drv_data, ErlDrvEvent event);
void (*ready_output)(ErlDrvData drv_data, ErlDrvEvent event);
void (*timeout)(ErlDrvData drv_data);
void (*ready_async)(ErlDrvData drv_data, ErlDrvThreadData thread_data); /* Might be NULL */
void (*process_exit)(ErlDrvData drv_data, ErlDrvMonitor *monitor);
void (*stop_select)(ErlDrvEvent event, void*); /* Might be NULL */
+ void (*emergency_close)(ErlDrvData drv_data); /* Might be NULL */
};
extern erts_driver_t *driver_list;
-extern erts_smp_rwmtx_t erts_driver_list_lock;
+extern erts_rwmtx_t erts_driver_list_lock;
extern void erts_ddll_init(void);
extern void erts_ddll_lock_driver(DE_Handle *dh, char *name);
@@ -188,98 +257,12 @@ extern Eterm erts_ddll_monitor_driver(Process *p,
ErtsProcLocks plocks);
/*
-** Just like the driver binary but with initial flags
-** Note that the two structures Binary and ErlDrvBinary HAVE to
-** be equal except for extra fields in the beginning of the struct.
-** ErlDrvBinary is defined in erl_driver.h.
-** When driver_alloc_binary is called, a Binary is allocated, but
-** the pointer returned is to the address of the first element that
-** also occurs in the ErlDrvBinary struct (driver.*binary takes care if this).
-** The driver need never know about additions to the internal Binary of the
-** emulator. One should however NEVER be sloppy when mixing ErlDrvBinary
-** and Binary, the macros below can convert one type to the other, as they both
-** in reality are equal.
-*/
-
-#ifdef ARCH_32
- /* *DO NOT USE* only for alignment. */
-#define ERTS_BINARY_STRUCT_ALIGNMENT Uint32 align__;
-#else
-#define ERTS_BINARY_STRUCT_ALIGNMENT
-#endif
-
-/* Add fields in ERTS_INTERNAL_BINARY_FIELDS, otherwise the drivers crash */
-#define ERTS_INTERNAL_BINARY_FIELDS \
- UWord flags; \
- erts_refc_t refc; \
- ERTS_BINARY_STRUCT_ALIGNMENT
-
-typedef struct binary {
- ERTS_INTERNAL_BINARY_FIELDS
- SWord orig_size;
- char orig_bytes[1]; /* to be continued */
-} Binary;
-
-#define ERTS_SIZEOF_Binary(Sz) \
- (offsetof(Binary,orig_bytes) + (Sz))
-
-typedef struct {
- ERTS_INTERNAL_BINARY_FIELDS
- SWord orig_size;
- void (*destructor)(Binary *);
- char magic_bin_data[1];
-} ErtsMagicBinary;
-
-typedef union {
- Binary binary;
- ErtsMagicBinary magic_binary;
- struct {
- ERTS_INTERNAL_BINARY_FIELDS
- ErlDrvBinary binary;
- } driver;
-} ErtsBinary;
-
-/*
- * 'Binary' alignment:
- * Address of orig_bytes[0] of a Binary should always be 8-byte aligned.
- * It is assumed that the flags, refc, and orig_size fields are 4 bytes on
- * 32-bits architectures and 8 bytes on 64-bits architectures.
- */
-
-#define ERTS_MAGIC_BIN_DESTRUCTOR(BP) \
- ((ErtsBinary *) (BP))->magic_binary.destructor
-#define ERTS_MAGIC_BIN_DATA(BP) \
- ((void *) ((ErtsBinary *) (BP))->magic_binary.magic_bin_data)
-#define ERTS_MAGIC_BIN_DATA_SIZE(BP) \
- ((BP)->orig_size - sizeof(void (*)(Binary *)))
-#define ERTS_MAGIC_BIN_ORIG_SIZE(Sz) \
- (sizeof(void (*)(Binary *)) + (Sz))
-#define ERTS_MAGIC_BIN_SIZE(Sz) \
- (offsetof(ErtsMagicBinary,magic_bin_data) + (Sz))
-#define ERTS_MAGIC_BIN_FROM_DATA(DATA) \
- ((ErtsBinary*)((char*)(DATA) - offsetof(ErtsMagicBinary,magic_bin_data)))
-
-#define Binary2ErlDrvBinary(B) (&((ErtsBinary *) (B))->driver.binary)
-#define ErlDrvBinary2Binary(D) ((Binary *) \
- (((char *) (D)) \
- - offsetof(ErtsBinary, driver.binary)))
-
-/* A "magic" binary flag */
-#define BIN_FLAG_MAGIC 1
-#define BIN_FLAG_USR1 2 /* Reserved for use by different modules too mark */
-#define BIN_FLAG_USR2 4 /* certain binaries as special (used by ets) */
-#define BIN_FLAG_DRV 8
-
-/*
* This structure represents one type of a binary in a process.
*/
typedef struct proc_bin {
Eterm thing_word; /* Subtag REFC_BINARY_SUBTAG. */
Uint size; /* Binary size in bytes. */
-#if HALFWORD_HEAP
- void* dummy_ptr_padding__;
-#endif
struct erl_off_heap_header *next;
Binary *val; /* Pointer to Binary structure. */
byte *bytes; /* Pointer to the actual data bytes. */
@@ -294,46 +277,12 @@ typedef struct proc_bin {
*/
#define PROC_BIN_SIZE (sizeof(ProcBin)/sizeof(Eterm))
-ERTS_GLB_INLINE Eterm erts_mk_magic_binary_term(Eterm **hpp,
- ErlOffHeap *ohp,
- Binary *mbp);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE Eterm
-erts_mk_magic_binary_term(Eterm **hpp, ErlOffHeap *ohp, Binary *mbp)
-{
- ProcBin *pb = (ProcBin *) *hpp;
- *hpp += PROC_BIN_SIZE;
-
- ASSERT(mbp->flags & BIN_FLAG_MAGIC);
-
- pb->thing_word = HEADER_PROC_BIN;
- pb->size = 0;
- pb->next = ohp->first;
- ohp->first = (struct erl_off_heap_header*) pb;
- pb->val = mbp;
- pb->bytes = (byte *) mbp->orig_bytes;
- pb->flags = 0;
-
- erts_refc_inc(&mbp->refc, 1);
-
- return make_binary(pb);
-}
-
-#endif
-
-#define ERTS_TERM_IS_MAGIC_BINARY(T) \
- (is_binary((T)) \
- && (thing_subtag(*binary_val((T))) == REFC_BINARY_SUBTAG) \
- && (((ProcBin *) binary_val((T)))->val->flags & BIN_FLAG_MAGIC))
-
-
union erl_off_heap_ptr {
struct erl_off_heap_header* hdr;
ProcBin *pb;
struct erl_fun_thing* fun;
struct external_thing_* ext;
+ ErtsMRefThing *mref;
Eterm* ep;
void* voidp;
};
@@ -344,15 +293,13 @@ extern Eterm node_cookie;
extern Uint display_items; /* no of items to display in traces etc */
extern int erts_backtrace_depth;
-extern erts_smp_atomic32_t erts_max_gen_gcs;
-
-extern int erts_disable_tolerant_timeofday;
+extern erts_atomic32_t erts_max_gen_gcs;
extern int bif_reductions; /* reductions + fcalls (when doing call_bif) */
extern int stackdump_on_exit;
/*
- * Here is an implementation of a lightweiht stack.
+ * Here is an implementation of a lightweight stack.
*
* Use it like this:
*
@@ -370,16 +317,17 @@ extern int stackdump_on_exit;
* DESTROY_ESTACK(Stack)
*/
-typedef struct {
+typedef struct ErtsEStack_ {
Eterm* start;
Eterm* sp;
Eterm* end;
+ Eterm* edefault;
ErtsAlcType_t alloc_type;
}ErtsEStack;
#define DEF_ESTACK_SIZE (16)
-void erl_grow_estack(ErtsEStack*, Eterm* def_stack);
+void erl_grow_estack(ErtsEStack*, Uint need);
#define ESTK_CONCAT(a,b) a##b
#define ESTK_DEF_STACK(s) ESTK_CONCAT(s,_default_estack)
@@ -389,22 +337,23 @@ void erl_grow_estack(ErtsEStack*, Eterm* def_stack);
ESTK_DEF_STACK(s), /* start */ \
ESTK_DEF_STACK(s), /* sp */ \
ESTK_DEF_STACK(s) + DEF_ESTACK_SIZE, /* end */ \
+ ESTK_DEF_STACK(s), /* default */ \
ERTS_ALC_T_ESTACK /* alloc_type */ \
}
#define ESTACK_CHANGE_ALLOCATOR(s,t) \
do { \
- if (s.start != ESTK_DEF_STACK(s)) { \
- erl_exit(1, "Internal error - trying to change allocator " \
+ if ((s).start != ESTK_DEF_STACK(s)) { \
+ erts_exit(ERTS_ERROR_EXIT, "Internal error - trying to change allocator " \
"type of active estack\n"); \
} \
- s.alloc_type = (t); \
+ (s).alloc_type = (t); \
} while (0)
#define DESTROY_ESTACK(s) \
do { \
- if (s.start != ESTK_DEF_STACK(s)) { \
- erts_free(s.alloc_type, s.start); \
+ if ((s).start != ESTK_DEF_STACK(s)) { \
+ erts_free((s).alloc_type, (s).start); \
} \
} while(0)
@@ -415,16 +364,17 @@ do { \
*/
#define ESTACK_SAVE(s,dst)\
do {\
- if (s.start == ESTK_DEF_STACK(s)) {\
+ if ((s).start == ESTK_DEF_STACK(s)) {\
UWord _wsz = ESTACK_COUNT(s);\
- (dst)->start = erts_alloc(s.alloc_type,\
+ (dst)->start = erts_alloc((s).alloc_type,\
DEF_ESTACK_SIZE * sizeof(Eterm));\
- memcpy((dst)->start, s.start,_wsz*sizeof(Eterm));\
+ memcpy((dst)->start, (s).start,_wsz*sizeof(Eterm));\
(dst)->sp = (dst)->start + _wsz;\
(dst)->end = (dst)->start + DEF_ESTACK_SIZE;\
- (dst)->alloc_type = s.alloc_type;\
+ (dst)->edefault = NULL;\
+ (dst)->alloc_type = (s).alloc_type;\
} else\
- *(dst) = s;\
+ *(dst) = (s);\
} while (0)
#define DESTROY_SAVED_ESTACK(estack)\
@@ -435,95 +385,146 @@ do {\
}\
} while(0)
+#define CLEAR_SAVED_ESTACK(estack) ((void) ((estack)->start = NULL))
+
/*
* Use on empty stack, only the allocator can be changed before this.
* The src stack is reset to NULL.
*/
#define ESTACK_RESTORE(s, src) \
do { \
- ASSERT(s.start == ESTK_DEF_STACK(s)); \
- s = *(src); /* struct copy */ \
+ ASSERT((s).start == ESTK_DEF_STACK(s)); \
+ (s) = *(src); /* struct copy */ \
(src)->start = NULL; \
- ASSERT(s.sp >= s.start); \
- ASSERT(s.sp <= s.end); \
+ ASSERT((s).sp >= (s).start); \
+ ASSERT((s).sp <= (s).end); \
} while (0)
-#define ESTACK_IS_STATIC(s) (s.start == ESTK_DEF_STACK(s)))
+#define ESTACK_IS_STATIC(s) ((s).start == ESTK_DEF_STACK(s))
-#define ESTACK_PUSH(s, x) \
-do { \
- if (s.sp == s.end) { \
- erl_grow_estack(&s, ESTK_DEF_STACK(s)); \
- } \
- *s.sp++ = (x); \
+#define ESTACK_PUSH(s, x) \
+do { \
+ if ((s).sp == (s).end) { \
+ erl_grow_estack(&(s), 1); \
+ } \
+ *(s).sp++ = (x); \
} while(0)
#define ESTACK_PUSH2(s, x, y) \
do { \
- if (s.sp > s.end - 2) { \
- erl_grow_estack(&s, ESTK_DEF_STACK(s)); \
+ if ((s).sp > (s).end - 2) { \
+ erl_grow_estack(&(s), 2); \
} \
- *s.sp++ = (x); \
- *s.sp++ = (y); \
+ *(s).sp++ = (x); \
+ *(s).sp++ = (y); \
} while(0)
#define ESTACK_PUSH3(s, x, y, z) \
do { \
- if (s.sp > s.end - 3) { \
- erl_grow_estack(&s, ESTK_DEF_STACK(s)); \
+ if ((s).sp > (s).end - 3) { \
+ erl_grow_estack(&s, 3); \
} \
- *s.sp++ = (x); \
- *s.sp++ = (y); \
- *s.sp++ = (z); \
+ *(s).sp++ = (x); \
+ *(s).sp++ = (y); \
+ *(s).sp++ = (z); \
} while(0)
-#define ESTACK_COUNT(s) (s.sp - s.start)
-#define ESTACK_ISEMPTY(s) (s.sp == s.start)
-#define ESTACK_POP(s) (*(--s.sp))
+#define ESTACK_PUSH4(s, E1, E2, E3, E4) \
+do { \
+ if ((s).sp > (s).end - 4) { \
+ erl_grow_estack(&s, 4); \
+ } \
+ *(s).sp++ = (E1); \
+ *(s).sp++ = (E2); \
+ *(s).sp++ = (E3); \
+ *(s).sp++ = (E4); \
+} while(0)
+
+#define ESTACK_RESERVE(s, push_cnt) \
+do { \
+ if ((s).sp > (s).end - (push_cnt)) { \
+ erl_grow_estack(&(s), (push_cnt)); \
+ } \
+} while(0)
+
+/* Must be preceded by ESTACK_RESERVE */
+#define ESTACK_FAST_PUSH(s, x) \
+do { \
+ ASSERT((s).sp < (s).end); \
+ *s.sp++ = (x); \
+} while(0)
+
+#define ESTACK_COUNT(s) ((s).sp - (s).start)
+#define ESTACK_ISEMPTY(s) ((s).sp == (s).start)
+#define ESTACK_POP(s) (*(--(s).sp))
/*
* WSTACK: same as ESTACK but with UWord instead of Eterm
*/
-typedef struct {
+typedef struct ErtsWStack_ {
UWord* wstart;
UWord* wsp;
UWord* wend;
+ UWord* wdefault;
ErtsAlcType_t alloc_type;
}ErtsWStack;
#define DEF_WSTACK_SIZE (16)
-void erl_grow_wstack(ErtsWStack*, UWord* def_stack);
+void erl_grow_wstack(ErtsWStack*, Uint need);
#define WSTK_CONCAT(a,b) a##b
#define WSTK_DEF_STACK(s) WSTK_CONCAT(s,_default_wstack)
-#define DECLARE_WSTACK(s) \
+#define WSTACK_DECLARE(s) \
UWord WSTK_DEF_STACK(s)[DEF_WSTACK_SIZE]; \
ErtsWStack s = { \
WSTK_DEF_STACK(s), /* wstart */ \
WSTK_DEF_STACK(s), /* wsp */ \
WSTK_DEF_STACK(s) + DEF_WSTACK_SIZE, /* wend */ \
+ WSTK_DEF_STACK(s), /* wdflt */ \
ERTS_ALC_T_ESTACK /* alloc_type */ \
}
+#define DECLARE_WSTACK WSTACK_DECLARE
+
+typedef struct ErtsDynamicWStack_ {
+ UWord default_stack[DEF_WSTACK_SIZE];
+ ErtsWStack ws;
+}ErtsDynamicWStack;
+
+#define WSTACK_INIT(dwsp, ALC_TYPE) \
+do { \
+ (dwsp)->ws.wstart = (dwsp)->default_stack; \
+ (dwsp)->ws.wsp = (dwsp)->default_stack; \
+ (dwsp)->ws.wend = (dwsp)->default_stack + DEF_WSTACK_SIZE;\
+ (dwsp)->ws.wdefault = (dwsp)->default_stack; \
+ (dwsp)->ws.alloc_type = ALC_TYPE; \
+} while (0)
#define WSTACK_CHANGE_ALLOCATOR(s,t) \
do { \
if (s.wstart != WSTK_DEF_STACK(s)) { \
- erl_exit(1, "Internal error - trying to change allocator " \
+ erts_exit(ERTS_ERROR_EXIT, "Internal error - trying to change allocator " \
"type of active wstack\n"); \
} \
s.alloc_type = (t); \
} while (0)
-#define DESTROY_WSTACK(s) \
+#define WSTACK_DESTROY(s) \
do { \
- if (s.wstart != WSTK_DEF_STACK(s)) { \
+ if (s.wstart != s.wdefault) { \
erts_free(s.alloc_type, s.wstart); \
} \
} while(0)
+#define DESTROY_WSTACK WSTACK_DESTROY
+#define WSTACK_DEBUG(s) \
+ do { \
+ fprintf(stderr, "wstack size = %ld\r\n", s.wsp - s.wstart); \
+ fprintf(stderr, "wstack wstart = %p\r\n", s.wstart); \
+ fprintf(stderr, "wstack wsp = %p\r\n", s.wsp); \
+ } while(0)
/*
* Do not free the stack after this, it may have pointers into what
@@ -538,6 +539,7 @@ do {\
memcpy((dst)->wstart, s.wstart,_wsz*sizeof(UWord));\
(dst)->wsp = (dst)->wstart + _wsz;\
(dst)->wend = (dst)->wstart + DEF_WSTACK_SIZE;\
+ (dst)->wdefault = NULL;\
(dst)->alloc_type = s.alloc_type;\
} else\
*(dst) = s;\
@@ -551,6 +553,8 @@ do {\
}\
} while(0)
+#define CLEAR_SAVED_WSTACK(wstack) ((void) ((wstack)->wstart = NULL))
+
/*
* Use on empty stack, only the allocator can be changed before this.
* The src stack is reset to NULL.
@@ -564,12 +568,12 @@ do { \
ASSERT(s.wsp <= s.wend); \
} while (0)
-#define WSTACK_IS_STATIC(s) (s.wstart == WSTK_DEF_STACK(s)))
+#define WSTACK_IS_STATIC(s) (s.wstart == WSTK_DEF_STACK(s))
#define WSTACK_PUSH(s, x) \
do { \
if (s.wsp == s.wend) { \
- erl_grow_wstack(&s, WSTK_DEF_STACK(s)); \
+ erl_grow_wstack(&s, 1); \
} \
*s.wsp++ = (x); \
} while(0)
@@ -577,7 +581,7 @@ do { \
#define WSTACK_PUSH2(s, x, y) \
do { \
if (s.wsp > s.wend - 2) { \
- erl_grow_wstack(&s, WSTK_DEF_STACK(s)); \
+ erl_grow_wstack(&s, 2); \
} \
*s.wsp++ = (x); \
*s.wsp++ = (y); \
@@ -585,24 +589,276 @@ do { \
#define WSTACK_PUSH3(s, x, y, z) \
do { \
- if (s.wsp > s.wend - 3) { \
- erl_grow_wstack(&s, WSTK_DEF_STACK(s)); \
+ if (s.wsp > s.wend - 3) { \
+ erl_grow_wstack(&s, 3); \
} \
*s.wsp++ = (x); \
*s.wsp++ = (y); \
*s.wsp++ = (z); \
} while(0)
+#define WSTACK_PUSH4(s, A1, A2, A3, A4) \
+do { \
+ if (s.wsp > s.wend - 4) { \
+ erl_grow_wstack(&s, 4); \
+ } \
+ *s.wsp++ = (A1); \
+ *s.wsp++ = (A2); \
+ *s.wsp++ = (A3); \
+ *s.wsp++ = (A4); \
+} while(0)
+
+#define WSTACK_PUSH5(s, A1, A2, A3, A4, A5) \
+do { \
+ if (s.wsp > s.wend - 5) { \
+ erl_grow_wstack(&s, 5); \
+ } \
+ *s.wsp++ = (A1); \
+ *s.wsp++ = (A2); \
+ *s.wsp++ = (A3); \
+ *s.wsp++ = (A4); \
+ *s.wsp++ = (A5); \
+} while(0)
+
+#define WSTACK_PUSH6(s, A1, A2, A3, A4, A5, A6) \
+do { \
+ if (s.wsp > s.wend - 6) { \
+ erl_grow_wstack(&s, 6); \
+ } \
+ *s.wsp++ = (A1); \
+ *s.wsp++ = (A2); \
+ *s.wsp++ = (A3); \
+ *s.wsp++ = (A4); \
+ *s.wsp++ = (A5); \
+ *s.wsp++ = (A6); \
+} while(0)
+
+#define WSTACK_RESERVE(s, push_cnt) \
+do { \
+ if (s.wsp > s.wend - (push_cnt)) { \
+ erl_grow_wstack(&s, (push_cnt)); \
+ } \
+} while(0)
+
+/* Must be preceded by WSTACK_RESERVE */
+#define WSTACK_FAST_PUSH(s, x) \
+do { \
+ ASSERT(s.wsp < s.wend); \
+ *s.wsp++ = (x); \
+} while(0)
+
#define WSTACK_COUNT(s) (s.wsp - s.wstart)
#define WSTACK_ISEMPTY(s) (s.wsp == s.wstart)
-#define WSTACK_POP(s) (*(--s.wsp))
+#define WSTACK_POP(s) ((ASSERT(s.wsp > s.wstart)),*(--s.wsp))
+
+#define WSTACK_ROLLBACK(s, count) (ASSERT(WSTACK_COUNT(s) >= (count)), \
+ s.wsp = s.wstart + (count))
+
+/* PSTACK - Stack of any type.
+ * Usage:
+ * {
+ * #define PSTACK_TYPE MyType
+ * PSTACK_DECLARE(s,16);
+ * MyType *sp = PSTACK_PUSH(s);
+ *
+ * sp->x = ....
+ * sp->y = ....
+ * sp = PSTACK_PUSH(s);
+ * ...
+ * sp = PSTACK_POP(s);
+ * if (PSTACK_IS_EMPTY(s)) {
+ * // sp is invalid when stack is empty after pop
+ * }
+ *
+ * PSTACK_DESTROY(s);
+ * }
+ */
+
+
+typedef struct ErtsPStack_ {
+ byte* pstart;
+ int offs; /* "stack pointer" as byte offset from pstart */
+ int size; /* allocated size in bytes */
+ ErtsAlcType_t alloc_type;
+}ErtsPStack;
+
+void erl_grow_pstack(ErtsPStack* s, void* default_pstack, unsigned need_bytes);
+#define PSTK_CONCAT(a,b) a##b
+#define PSTK_DEF_STACK(s) PSTK_CONCAT(s,_default_pstack)
+
+#define PSTACK_DECLARE(s, DEF_PSTACK_SIZE) \
+PSTACK_TYPE PSTK_DEF_STACK(s)[DEF_PSTACK_SIZE]; \
+ErtsPStack s = { (byte*)PSTK_DEF_STACK(s), /* pstart */ \
+ -(int)sizeof(PSTACK_TYPE), /* offs */ \
+ DEF_PSTACK_SIZE*sizeof(PSTACK_TYPE), /* size */ \
+ ERTS_ALC_T_ESTACK /* alloc_type */ \
+}
+
+#define PSTACK_CHANGE_ALLOCATOR(s,t) \
+do { \
+ if (s.pstart != (byte*)PSTK_DEF_STACK(s)) { \
+ erts_exit(ERTS_ERROR_EXIT, "Internal error - trying to change allocator " \
+ "type of active pstack\n"); \
+ } \
+ s.alloc_type = (t); \
+ } while (0)
+
+#define PSTACK_DESTROY(s) \
+do { \
+ if (s.pstart != (byte*)PSTK_DEF_STACK(s)) { \
+ erts_free(s.alloc_type, s.pstart); \
+ } \
+} while(0)
+
+#define PSTACK_IS_EMPTY(s) (s.offs < 0)
+
+#define PSTACK_COUNT(s) ((s.offs + sizeof(PSTACK_TYPE)) / sizeof(PSTACK_TYPE))
+
+#define PSTACK_TOP(s) (ASSERT(!PSTACK_IS_EMPTY(s)), \
+ (PSTACK_TYPE*)(s.pstart + s.offs))
+
+#define PSTACK_PUSH(s) \
+ (s.offs += sizeof(PSTACK_TYPE), \
+ ((s.offs == s.size) ? erl_grow_pstack(&s, PSTK_DEF_STACK(s), \
+ sizeof(PSTACK_TYPE)) : (void)0), \
+ ((PSTACK_TYPE*) (s.pstart + s.offs)))
+
+#define PSTACK_POP(s) ((s.offs -= sizeof(PSTACK_TYPE)), \
+ (PSTACK_TYPE*)(s.pstart + s.offs))
+
+/*
+ * Do not free the stack after this, it may have pointers into what
+ * was saved in 'dst'.
+ */
+#define PSTACK_SAVE(s,dst)\
+do {\
+ if (s.pstart == (byte*)PSTK_DEF_STACK(s)) {\
+ UWord _pbytes = PSTACK_COUNT(s) * sizeof(PSTACK_TYPE);\
+ (dst)->pstart = erts_alloc(s.alloc_type,\
+ sizeof(PSTK_DEF_STACK(s)));\
+ sys_memcpy((dst)->pstart, s.pstart, _pbytes);\
+ (dst)->offs = s.offs;\
+ (dst)->size = s.size;\
+ (dst)->alloc_type = s.alloc_type;\
+ } else\
+ *(dst) = s;\
+ } while (0)
+
+/*
+ * Use on empty stack, only the allocator can be changed before this.
+ * The src stack is reset to NULL.
+ */
+#define PSTACK_RESTORE(s, src) \
+do { \
+ ASSERT(s.pstart == (byte*)PSTK_DEF_STACK(s)); \
+ s = *(src); /* struct copy */ \
+ (src)->pstart = NULL; \
+ ASSERT(s.offs >= -(int)sizeof(PSTACK_TYPE)); \
+ ASSERT(s.offs < s.size); \
+} while (0)
+
+#define PSTACK_DESTROY_SAVED(pstack)\
+do {\
+ if ((pstack)->pstart) {\
+ erts_free((pstack)->alloc_type, (pstack)->pstart);\
+ (pstack)->pstart = NULL;\
+ }\
+} while(0)
+
+
+/*
+ * An implementation of lightweight unbounded queues,
+ * using a circular dynamic array.
+ * It does not include support for change_allocator.
+ *
+ * Use it like this:
+ *
+ * DECLARE_EQUEUE(Queue) (At the start of a block)
+ * ...
+ * EQUEUE_PUT(Queue, Term)
+ * ...
+ * if (EQUEUE_ISEMPTY(Queue)) {
+ * Queue is empty
+ * } else {
+ * Term = EQUEUE_GET(Stack);
+ * Process popped Term here
+ * }
+ * ...
+ * DESTROY_EQUEUE(Queue)
+ */
+
+typedef struct {
+ Eterm* start;
+ Eterm* front;
+ Eterm* back;
+ int possibly_empty;
+ Eterm* end;
+ ErtsAlcType_t alloc_type;
+} ErtsEQueue;
+
+#define DEF_EQUEUE_SIZE (16)
+
+void erl_grow_equeue(ErtsEQueue*, Eterm* def_queue);
+#define EQUE_CONCAT(a,b) a##b
+#define EQUE_DEF_QUEUE(q) EQUE_CONCAT(q,_default_equeue)
+
+#define DECLARE_EQUEUE(q) \
+ UWord EQUE_DEF_QUEUE(q)[DEF_EQUEUE_SIZE]; \
+ ErtsEQueue q = { \
+ EQUE_DEF_QUEUE(q), /* start */ \
+ EQUE_DEF_QUEUE(q), /* front */ \
+ EQUE_DEF_QUEUE(q), /* back */ \
+ 1, /* possibly_empty */ \
+ EQUE_DEF_QUEUE(q) + DEF_EQUEUE_SIZE, /* end */ \
+ ERTS_ALC_T_ESTACK /* alloc_type */ \
+ }
+
+#define DESTROY_EQUEUE(q) \
+do { \
+ if (q.start != EQUE_DEF_QUEUE(q)) { \
+ erts_free(q.alloc_type, q.start); \
+ } \
+} while(0)
+
+#define EQUEUE_PUT_UNCHECKED(q, x) \
+do { \
+ q.possibly_empty = 0; \
+ *(q.back) = (x); \
+ if (++(q.back) == q.end) { \
+ q.back = q.start; \
+ } \
+} while(0)
+
+#define EQUEUE_PUT(q, x) \
+do { \
+ if (q.back == q.front && !q.possibly_empty) { \
+ erl_grow_equeue(&q, EQUE_DEF_QUEUE(q)); \
+ } \
+ EQUEUE_PUT_UNCHECKED(q, x); \
+} while(0)
+
+#define EQUEUE_ISEMPTY(q) (q.back == q.front && q.possibly_empty)
+ERTS_GLB_INLINE Eterm erts_equeue_get(ErtsEQueue *q);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE Eterm erts_equeue_get(ErtsEQueue *q) {
+ Eterm x;
+ q->possibly_empty = 1;
+ x = *(q->front);
+ if (++(q->front) == q->end) {
+ q->front = q->start;
+ }
+ return x;
+}
+#endif
+#define EQUEUE_GET(q) erts_equeue_get(&(q));
/* binary.c */
void erts_emasculate_writable_binary(ProcBin* pb);
Eterm erts_new_heap_binary(Process *p, byte *buf, int len, byte** datap);
-Eterm erts_new_mso_binary(Process*, byte*, int);
+Eterm erts_new_mso_binary(Process*, byte*, Uint);
Eterm new_binary(Process*, byte*, Uint);
Eterm erts_realloc_binary(Eterm bin, size_t size);
@@ -618,24 +874,6 @@ erts_bld_port_info(Eterm **hpp,
void erts_bif_info_init(void);
/* bif.c */
-Eterm erts_make_ref(Process *);
-Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE]);
-void erts_make_ref_in_array(Uint32 ref[ERTS_MAX_REF_NUMBERS]);
-
-ERTS_GLB_INLINE Eterm
-erts_proc_store_ref(Process *c_p, Uint32 ref[ERTS_MAX_REF_NUMBERS]);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE Eterm
-erts_proc_store_ref(Process *c_p, Uint32 ref[ERTS_MAX_REF_NUMBERS])
-{
- Eterm *hp = HAlloc(c_p, REF_THING_SIZE);
- write_ref_thing(hp, ref[0], ref[1], ref[2]);
- return make_internal_ref(hp);
-}
-
-#endif
void erts_queue_monitor_message(Process *,
ErtsProcLocks*,
@@ -644,7 +882,7 @@ void erts_queue_monitor_message(Process *,
Eterm,
Eterm);
void erts_init_trap_export(Export* ep, Eterm m, Eterm f, Uint a,
- Eterm (*bif)(Process*,Eterm*));
+ Eterm (*bif)(Process*, Eterm*, BeamInstr*));
void erts_init_bif(void);
Eterm erl_send(Process *p, Eterm to, Eterm msg);
@@ -653,12 +891,29 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg);
Eterm erl_is_function(Process* p, Eterm arg1, Eterm arg2);
/* beam_bif_load.c */
-Eterm erts_check_process_code(Process *c_p, Eterm module, int allow_gc, int *redsp);
+Eterm erts_check_process_code(Process *c_p, Eterm module, int *redsp, int fcalls);
+Eterm erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed);
+
+typedef struct ErtsLiteralArea_ {
+ struct erl_off_heap_header *off_heap;
+ Eterm *end;
+ Eterm start[1]; /* beginning of area */
+} ErtsLiteralArea;
+
+#define ERTS_LITERAL_AREA_ALLOC_SIZE(N) \
+ (sizeof(ErtsLiteralArea) + sizeof(Eterm)*((N) - 1))
+
+extern erts_atomic_t erts_copy_literal_area__;
+#define ERTS_COPY_LITERAL_AREA() \
+ ((ErtsLiteralArea *) erts_atomic_read_nob(&erts_copy_literal_area__))
+extern Process *erts_literal_area_collector;
+extern Process *erts_dirty_process_code_checker;
+extern Process *erts_code_purger;
/* beam_load.c */
typedef struct {
- BeamInstr* current; /* Pointer to: Mod, Name, Arity */
+ ErtsCodeMFA* mfa; /* Pointer to: Mod, Name, Arity */
Uint needed; /* Heap space needed for entire tuple */
Uint32 loc; /* Location in source code */
Eterm* fname_ptr; /* Pointer to fname table */
@@ -666,6 +921,7 @@ typedef struct {
Binary* erts_alloc_loader_state(void);
Eterm erts_module_for_prepared_code(Binary* magic);
+Eterm erts_has_code_on_load(Binary* magic);
Eterm erts_prepare_loading(Binary* loader_state, Process *c_p,
Eterm group_leader, Eterm* modp,
byte* code, Uint size);
@@ -674,74 +930,151 @@ Eterm erts_finish_loading(Binary* loader_state, Process* c_p,
Eterm erts_preload_module(Process *c_p, ErtsProcLocks c_p_locks,
Eterm group_leader, Eterm* mod, byte* code, Uint size);
void init_load(void);
-BeamInstr* find_function_from_pc(BeamInstr* pc);
+ErtsCodeMFA* find_function_from_pc(BeamInstr* pc);
Eterm* erts_build_mfa_item(FunctionInfo* fi, Eterm* hp,
Eterm args, Eterm* mfa_p);
-void erts_set_current_function(FunctionInfo* fi, BeamInstr* current);
+void erts_set_current_function(FunctionInfo* fi, ErtsCodeMFA* mfa);
Eterm erts_module_info_0(Process* p, Eterm module);
Eterm erts_module_info_1(Process* p, Eterm module, Eterm what);
Eterm erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info);
+int erts_commit_hipe_patch_load(Eterm hipe_magic_bin);
/* beam_ranges.c */
void erts_init_ranges(void);
-void erts_start_staging_ranges(void);
+void erts_start_staging_ranges(int num_new);
void erts_end_staging_ranges(int commit);
void erts_update_ranges(BeamInstr* code, Uint size);
void erts_remove_from_ranges(BeamInstr* code);
UWord erts_ranges_sz(void);
void erts_lookup_function_info(FunctionInfo* fi, BeamInstr* pc, int full_info);
+ErtsLiteralArea** erts_dump_lit_areas;
+Uint erts_dump_num_lit_areas;
/* break.c */
void init_break_handler(void);
void erts_set_ignore_break(void);
void erts_replace_intr(void);
-void process_info(int, void *);
-void print_process_info(int, void *, Process*);
-void info(int, void *);
-void loaded(int, void *);
+void process_info(fmtfn_t, void *);
+void print_process_info(fmtfn_t, void *, Process*);
+void info(fmtfn_t, void *);
+void loaded(fmtfn_t, void *);
-/* config.c */
+/* sighandler sys.c */
+int erts_set_signal(Eterm signal, Eterm type);
-__decl_noreturn void __noreturn erl_exit(int n, char*, ...);
-__decl_noreturn void __noreturn erl_exit_flush_async(int n, char*, ...);
-void erl_error(char*, va_list);
+/* erl_arith.c */
+double erts_get_positive_zero_float(void);
-/* copy.c */
-Eterm copy_object(Eterm, Process*);
+/* config.c */
-#if HALFWORD_HEAP
-Uint size_object_rel(Eterm, Eterm*);
-# define size_object(A) size_object_rel(A,NULL)
+__decl_noreturn void __noreturn erts_exit_epilogue(void);
+__decl_noreturn void __noreturn erts_exit(int n, char*, ...);
+__decl_noreturn void __noreturn erts_flush_async_exit(int n, char*, ...);
+void erl_error(char*, va_list);
-Eterm copy_struct_rel(Eterm, Uint, Eterm**, ErlOffHeap*, Eterm* src_base, Eterm* dst_base);
-# define copy_struct(OBJ,SZ,HPP,OH) copy_struct_rel(OBJ,SZ,HPP,OH, NULL,NULL)
+/* This controls whether sharing-preserving copy is used by Erlang */
-Eterm copy_shallow_rel(Eterm*, Uint, Eterm**, ErlOffHeap*, Eterm* src_base);
-# define copy_shallow(A,B,C,D) copy_shallow_rel(A,B,C,D,NULL)
+#ifdef SHCOPY
+#define SHCOPY_SEND
+#define SHCOPY_SPAWN
+#endif
-#else /* !HALFWORD_HEAP */
+/* The persistent state while the sharing-preserving copier works */
-Uint size_object(Eterm);
-# define size_object_rel(A,B) size_object(A)
+typedef struct {
+ Eterm queue_default[DEF_EQUEUE_SIZE];
+ Eterm* queue_start;
+ Eterm* queue_end;
+ ErtsAlcType_t queue_alloc_type;
+ UWord bitstore_default[DEF_WSTACK_SIZE];
+ UWord* bitstore_start;
+ ErtsAlcType_t bitstore_alloc_type;
+ Eterm shtable_default[DEF_ESTACK_SIZE];
+ Eterm* shtable_start;
+ ErtsAlcType_t shtable_alloc_type;
+ Uint literal_size;
+ Eterm *lit_purge_ptr;
+ Uint lit_purge_sz;
+} erts_shcopy_t;
+
+#define INITIALIZE_SHCOPY(info) \
+ do { \
+ ErtsLiteralArea *larea__ = ERTS_COPY_LITERAL_AREA(); \
+ info.queue_start = info.queue_default; \
+ info.bitstore_start = info.bitstore_default; \
+ info.shtable_start = info.shtable_default; \
+ info.literal_size = 0; \
+ if (larea__) { \
+ info.lit_purge_ptr = &larea__->start[0]; \
+ info.lit_purge_sz = larea__->end - info.lit_purge_ptr; \
+ } \
+ else { \
+ info.lit_purge_ptr = NULL; \
+ info.lit_purge_sz = 0; \
+ } \
+ } while(0)
+
+#define DESTROY_SHCOPY(info) \
+do { \
+ if (info.queue_start != info.queue_default) { \
+ erts_free(info.queue_alloc_type, info.queue_start); \
+ } \
+ if (info.bitstore_start != info.bitstore_default) { \
+ erts_free(info.bitstore_alloc_type, info.bitstore_start); \
+ } \
+ if (info.shtable_start != info.shtable_default) { \
+ erts_free(info.shtable_alloc_type, info.shtable_start); \
+ } \
+} while(0)
-Eterm copy_struct(Eterm, Uint, Eterm**, ErlOffHeap*);
-# define copy_struct_rel(OBJ,SZ,HPP,OH, SB,DB) copy_struct(OBJ,SZ,HPP,OH)
+/* copy.c */
+typedef struct {
+ Eterm *lit_purge_ptr;
+ Uint lit_purge_sz;
+} erts_literal_area_t;
+
+#define INITIALIZE_LITERAL_PURGE_AREA(Area) \
+ do { \
+ ErtsLiteralArea *larea__ = ERTS_COPY_LITERAL_AREA(); \
+ if (larea__) { \
+ (Area).lit_purge_ptr = &larea__->start[0]; \
+ (Area).lit_purge_sz = larea__->end - (Area).lit_purge_ptr; \
+ } \
+ else { \
+ (Area).lit_purge_ptr = NULL; \
+ (Area).lit_purge_sz = 0; \
+ } \
+ } while(0)
+
+Eterm copy_object_x(Eterm, Process*, Uint);
+#define copy_object(Term, Proc) copy_object_x(Term,Proc,0)
+
+Uint size_object_x(Eterm, erts_literal_area_t*);
+#define size_object(Term) size_object_x(Term,NULL)
+#define size_object_litopt(Term,LitArea) size_object_x(Term,LitArea)
+
+Uint copy_shared_calculate(Eterm, erts_shcopy_t*);
+Eterm copy_shared_perform(Eterm, Uint, erts_shcopy_t*, Eterm**, ErlOffHeap*);
+
+Uint size_shared(Eterm);
+
+Eterm copy_struct_x(Eterm, Uint, Eterm**, ErlOffHeap*, Uint*, erts_literal_area_t*);
+#define copy_struct(Obj,Sz,HPP,OH) \
+ copy_struct_x(Obj,Sz,HPP,OH,NULL,NULL)
+#define copy_struct_litopt(Obj,Sz,HPP,OH,LitArea) \
+ copy_struct_x(Obj,Sz,HPP,OH,NULL,LitArea)
Eterm copy_shallow(Eterm*, Uint, Eterm**, ErlOffHeap*);
-# define copy_shallow_rel(A,B,C,D, BASE) copy_shallow(A,B,C,D)
-
-#endif
-
-void move_multi_frags(Eterm** hpp, ErlOffHeap*, ErlHeapFragment* first,
- Eterm* refs, unsigned nrefs);
+void erts_move_multi_frags(Eterm** hpp, ErlOffHeap*, ErlHeapFragment* first,
+ Eterm* refs, unsigned nrefs, int literals);
/* Utilities */
extern void erts_delete_nodes_monitors(Process *, ErtsProcLocks);
extern Eterm erts_monitor_nodes(Process *, Eterm, Eterm);
extern Eterm erts_processes_monitoring_nodes(Process *);
extern int erts_do_net_exits(DistEntry*, Eterm);
-extern int distribution_info(int, void *);
+extern int distribution_info(fmtfn_t, void *);
extern int is_node_name_atom(Eterm a);
extern int erts_net_message(Port *, DistEntry *,
@@ -759,7 +1092,8 @@ void print_pass_through(int, byte*, int);
/* beam_emu.c */
int catchlevel(Process*);
void init_emulator(void);
-void process_main(void);
+void process_main(Eterm* x_reg_array, FloatDef* f_reg_array);
+void erts_dirty_process_main(ErtsSchedulerData *);
Eterm build_stacktrace(Process* c_p, Eterm exc);
Eterm expand_error_value(Process* c_p, Uint freason, Eterm Value);
void erts_save_stacktrace(Process* p, struct StackTrace* s, int depth);
@@ -769,7 +1103,6 @@ void erts_save_stacktrace(Process* p, struct StackTrace* s, int depth);
typedef struct {
Eterm delay_time;
int context_reds;
- int input_reds;
} ErtsModifiedTimings;
extern Export *erts_delay_trap;
@@ -789,10 +1122,11 @@ extern Eterm erts_error_logger_warnings;
extern int erts_initialized;
extern int erts_compat_rel;
extern int erts_use_sender_punish;
-void erts_short_init(void);
void erl_start(int, char**);
void erts_usage(void);
Eterm erts_preloaded(Process* p);
+
+
/* erl_md5.c */
typedef struct {
@@ -805,23 +1139,6 @@ void MD5Init(MD5_CTX *);
void MD5Update(MD5_CTX *, unsigned char *, unsigned int);
void MD5Final(unsigned char [16], MD5_CTX *);
-/* ggc.c */
-
-void erts_gc_info(ErtsGCInfo *gcip);
-void erts_init_gc(void);
-int erts_garbage_collect(Process*, int, Eterm*, int);
-void erts_garbage_collect_hibernate(Process* p);
-Eterm erts_gc_after_bif_call(Process* p, Eterm result, Eterm* regs, Uint arity);
-void erts_garbage_collect_literals(Process* p, Eterm* literals,
- Uint lit_size,
- struct erl_off_heap_header* oh);
-Uint erts_next_heap_size(Uint, Uint);
-Eterm erts_heap_sizes(Process* p);
-
-void erts_offset_off_heap(ErlOffHeap *, Sint, Eterm*, Eterm*);
-void erts_offset_heap_ptr(Eterm*, Uint, Sint, Eterm*, Eterm*);
-void erts_offset_heap(Eterm*, Uint, Sint, Eterm*, Eterm*);
-void erts_free_heap_frags(Process* p);
/* io.c */
@@ -848,9 +1165,13 @@ Uint erts_port_ioq_size(Port *pp);
void erts_stale_drv_select(Eterm, ErlDrvPort, ErlDrvEvent, int, int);
Port *erts_get_heart_port(void);
+void erts_emergency_close_ports(void);
+void erts_ref_to_driver_monitor(Eterm ref, ErlDrvMonitor *mon);
+Eterm erts_driver_monitor_to_ref(Eterm* hp, const ErlDrvMonitor *mon);
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
-void erts_lcnt_enable_io_lock_count(int enable);
+#if defined(ERTS_ENABLE_LOCK_COUNT)
+void erts_lcnt_update_driver_locks(int enable);
+void erts_lcnt_update_port_locks(int enable);
#endif
/* driver_tab.c */
@@ -869,6 +1190,11 @@ Uint64 erts_timestamp_millis(void);
Export* erts_find_function(Eterm, Eterm, unsigned int, ErtsCodeIndex);
+void *erts_calc_stacklimit(char *prev_c, UWord stacksize);
+int erts_check_below_limit(char *ptr, char *limit);
+int erts_check_above_limit(char *ptr, char *limit);
+void *erts_ptr_id(void *ptr);
+
Eterm store_external_or_ref_in_proc_(Process *, Eterm);
Eterm store_external_or_ref_(Uint **, ErlOffHeap*, Eterm);
@@ -901,6 +1227,14 @@ Sint erts_binary_set_loop_limit(Sint limit);
/* external.c */
void erts_init_external(void);
+/* erl_map.c */
+void erts_init_map(void);
+
+/* beam_debug.c */
+UWord erts_check_stack_recursion_downwards(char *start_c);
+UWord erts_check_stack_recursion_upwards(char *start_c);
+int erts_is_above_stack_limit(char *ptr);
+
/* erl_unicode.c */
void erts_init_unicode(void);
Sint erts_unicode_set_loop_limit(Sint limit);
@@ -939,11 +1273,13 @@ int erts_utf8_to_latin1(byte* dest, const byte* source, int slen);
#define ERTS_UTF8_ANALYZE_MORE 3
#define ERTS_UTF8_OK_MAX_CHARS 4
-void bin_write(int, void*, byte*, size_t);
-int intlist_to_buf(Eterm, char*, int); /* most callers pass plain char*'s */
+void bin_write(fmtfn_t, void*, byte*, size_t);
+Sint intlist_to_buf(Eterm, char*, Sint); /* most callers pass plain char*'s */
+int erts_unicode_list_to_buf(Eterm list, byte *buf, Sint len, Sint* written);
+Sint erts_unicode_list_to_buf_len(Eterm list);
struct Sint_buf {
-#if defined(ARCH_64) && !HALFWORD_HEAP
+#if defined(ARCH_64)
char s[22];
#else
char s[12];
@@ -951,22 +1287,69 @@ struct Sint_buf {
};
char* Sint_to_buf(Sint, struct Sint_buf*);
+#define ERTS_IOLIST_STATE_INITER(C_P, OBJ) \
+ {(C_P), 0, 0, (OBJ), {NULL, NULL, NULL, ERTS_ALC_T_INVALID}, 0, 0}
+
+#define ERTS_IOLIST_STATE_MOVE(TO, FROM) \
+ sys_memcpy((void *) (TO), (void *) (FROM), sizeof(ErtsIOListState))
+
+#define ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED 8
+
+typedef struct {
+ Process *c_p;
+ ErlDrvSizeT size;
+ Uint offs;
+ Eterm obj;
+ ErtsEStack estack;
+ int reds_left;
+ int have_size;
+} ErtsIOListState;
+
+#define ERTS_IOLIST2BUF_STATE_INITER(C_P, OBJ) \
+ {ERTS_IOLIST_STATE_INITER((C_P), (OBJ)), {NULL, 0, 0, 0}, NULL, 0, NULL, 0}
+
+#define ERTS_IOLIST2BUF_STATE_MOVE(TO, FROM) \
+ sys_memcpy((void *) (TO), (void *) (FROM), sizeof(ErtsIOList2BufState))
+
+#define ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT 32
+#define ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED 8
+#define ERTS_IOLIST_TO_BUF_BYTES_PER_RED \
+ (ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED*ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT)
+
+typedef struct {
+ ErtsIOListState iolist;
+ struct {
+ byte *bptr;
+ size_t size;
+ Uint bitoffs;
+ Uint bitsize;
+ } bcopy;
+ char *buf;
+ ErlDrvSizeT len;
+ Eterm *objp;
+ int offset;
+} ErtsIOList2BufState;
+
#define ERTS_IOLIST_OK 0
#define ERTS_IOLIST_OVERFLOW 1
#define ERTS_IOLIST_TYPE 2
+#define ERTS_IOLIST_YIELD 3
Eterm buf_to_intlist(Eterm**, const char*, size_t, Eterm); /* most callers pass plain char*'s */
#define ERTS_IOLIST_TO_BUF_OVERFLOW (~((ErlDrvSizeT) 0))
#define ERTS_IOLIST_TO_BUF_TYPE_ERROR (~((ErlDrvSizeT) 1))
+#define ERTS_IOLIST_TO_BUF_YIELD (~((ErlDrvSizeT) 2))
#define ERTS_IOLIST_TO_BUF_FAILED(R) \
- (((R) & (~((ErlDrvSizeT) 1))) == (~((ErlDrvSizeT) 1)))
+ (((R) & (~((ErlDrvSizeT) 3))) == (~((ErlDrvSizeT) 3)))
#define ERTS_IOLIST_TO_BUF_SUCCEEDED(R) \
(!ERTS_IOLIST_TO_BUF_FAILED((R)))
ErlDrvSizeT erts_iolist_to_buf(Eterm, char*, ErlDrvSizeT);
+ErlDrvSizeT erts_iolist_to_buf_yielding(ErtsIOList2BufState *);
+int erts_iolist_size_yielding(ErtsIOListState *state);
int erts_iolist_size(Eterm, ErlDrvSizeT *);
-int is_string(Eterm);
+Sint is_string(Eterm);
void erl_at_exit(void (*) (void*), void*);
Eterm collect_memory(Process *);
void dump_memory_to_fd(int);
@@ -994,23 +1377,34 @@ Eterm erts_gc_bor(Process* p, Eterm* reg, Uint live);
Eterm erts_gc_bxor(Process* p, Eterm* reg, Uint live);
Eterm erts_gc_bnot(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_length_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_size_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_bit_size_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_byte_size_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_map_size_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_abs_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_float_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_round_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_trunc_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_binary_part_3(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_binary_part_2(Process* p, Eterm* reg, Uint live);
-
Uint erts_current_reductions(Process* current, Process *p);
-int erts_print_system_version(int to, void *arg, Process *c_p);
+int erts_print_system_version(fmtfn_t to, void *arg, Process *c_p);
+
+int erts_hibernate(Process* c_p, Eterm* reg);
+
+ERTS_GLB_FORCE_INLINE int erts_is_literal(Eterm tptr, Eterm *ptr);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_FORCE_INLINE int erts_is_literal(Eterm tptr, Eterm *ptr)
+{
+ ASSERT(is_boxed(tptr) || is_list(tptr));
+ ASSERT(ptr == ptr_val(tptr));
+
+#if defined(ERTS_HAVE_IS_IN_LITERAL_RANGE)
+ return erts_is_in_literal_range(ptr);
+#elif defined(TAG_LITERAL_PTR)
+ return is_literal_ptr(tptr);
+#else
+# error Not able to detect literals...
+#endif
+
+}
-int erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* reg);
+#endif
+
+Eterm erts_msacc_request(Process *c_p, int action, Eterm *threads);
/*
** Call_trace uses this API for the parameter matching functions
@@ -1019,22 +1413,31 @@ int erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm
#define MatchSetRef(MPSP) \
do { \
if ((MPSP) != NULL) { \
- erts_refc_inc(&(MPSP)->refc, 1); \
+ erts_refc_inc(&(MPSP)->intern.refc, 1); \
} \
} while (0)
#define MatchSetUnref(MPSP) \
do { \
- if (((MPSP) != NULL) && erts_refc_dectest(&(MPSP)->refc, 0) <= 0) { \
- erts_bin_free(MPSP); \
+ if (((MPSP) != NULL)) { \
+ erts_bin_release(MPSP); \
} \
} while(0)
#define MatchSetGetSource(MPSP) erts_match_set_get_source(MPSP)
-extern Binary *erts_match_set_compile(Process *p, Eterm matchexpr);
-Eterm erts_match_set_lint(Process *p, Eterm matchexpr);
+extern Binary *erts_match_set_compile(Process *p, Eterm matchexpr, Eterm MFA);
extern void erts_match_set_release_result(Process* p);
+ERTS_GLB_INLINE void erts_match_set_release_result_trace(Process* p, Eterm);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE
+void erts_match_set_release_result_trace(Process* p, Eterm pam_result)
+{
+ if (is_not_immed(pam_result))
+ erts_match_set_release_result(p);
+}
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
enum erts_pam_run_flags {
ERTS_PAM_TMP_RESULT=1,
@@ -1042,10 +1445,12 @@ enum erts_pam_run_flags {
ERTS_PAM_CONTIGUOUS_TUPLE=4,
ERTS_PAM_IGNORE_TRACE_SILENT=8
};
-extern Eterm erts_match_set_run(Process *p, Binary *mpsp,
- Eterm *args, int num_args,
- enum erts_pam_run_flags in_flags,
- Uint32 *return_flags);
+extern Eterm erts_match_set_run_trace(Process *p,
+ Process *self,
+ Binary *mpsp,
+ Eterm *args, int num_args,
+ enum erts_pam_run_flags in_flags,
+ Uint32 *return_flags);
extern Eterm erts_match_set_get_source(Binary *mpsp);
extern void erts_match_prog_foreach_offheap(Binary *b,
void (*)(ErlOffHeap *, void *),
@@ -1060,195 +1465,26 @@ extern void erts_match_prog_foreach_offheap(Binary *b,
extern erts_driver_t vanilla_driver;
extern erts_driver_t spawn_driver;
+extern erts_driver_t forker_driver;
extern erts_driver_t fd_driver;
int erts_beam_jump_table(void);
-/* Should maybe be placed in erl_message.h, but then we get an include mess. */
-ERTS_GLB_INLINE Eterm *
-erts_alloc_message_heap_state(Uint size,
- ErlHeapFragment **bpp,
- ErlOffHeap **ohpp,
- Process *receiver,
- ErtsProcLocks *receiver_locks,
- erts_aint32_t *statep);
-
-ERTS_GLB_INLINE Eterm *
-erts_alloc_message_heap(Uint size,
- ErlHeapFragment **bpp,
- ErlOffHeap **ohpp,
- Process *receiver,
- ErtsProcLocks *receiver_locks);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-/*
- * NOTE: erts_alloc_message_heap() releases msg q and status
- * lock on receiver without ensuring that other locks are
- * held. User is responsible to ensure that the receiver
- * pointer cannot become invalid until after message has
- * been passed. This is normal done either by increasing
- * reference count on process (preferred) or by holding
- * main or link lock over the whole message passing
- * operation.
- */
-
-ERTS_GLB_INLINE Eterm *
-erts_alloc_message_heap_state(Uint size,
- ErlHeapFragment **bpp,
- ErlOffHeap **ohpp,
- Process *receiver,
- ErtsProcLocks *receiver_locks,
- erts_aint32_t *statep)
-{
- Eterm *hp;
- erts_aint32_t state;
-#ifdef ERTS_SMP
- int locked_main = 0;
- state = erts_smp_atomic32_read_acqb(&receiver->state);
- if (statep)
- *statep = state;
- if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))
- goto allocate_in_mbuf;
-#endif
-
- if (size > (Uint) INT_MAX)
- erl_exit(ERTS_ABORT_EXIT, "HUGE size (%beu)\n", size);
-
- if (
-#if defined(ERTS_SMP)
- *receiver_locks & ERTS_PROC_LOCK_MAIN
-#else
- 1
-#endif
- ) {
-#ifdef ERTS_SMP
- try_allocate_on_heap:
-#endif
- state = erts_smp_atomic32_read_nob(&receiver->state);
- if (statep)
- *statep = state;
- if ((state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))
- || (receiver->flags & F_DISABLE_GC)
- || HEAP_LIMIT(receiver) - HEAP_TOP(receiver) <= size) {
- /*
- * The heap is either potentially in an inconsistent
- * state, or not large enough.
- */
-#ifdef ERTS_SMP
- if (locked_main) {
- *receiver_locks &= ~ERTS_PROC_LOCK_MAIN;
- erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MAIN);
- }
-#endif
- goto allocate_in_mbuf;
- }
- hp = HEAP_TOP(receiver);
- HEAP_TOP(receiver) = hp + size;
- *bpp = NULL;
- *ohpp = &MSO(receiver);
- }
-#ifdef ERTS_SMP
- else if (erts_smp_proc_trylock(receiver, ERTS_PROC_LOCK_MAIN) == 0) {
- locked_main = 1;
- *receiver_locks |= ERTS_PROC_LOCK_MAIN;
- goto try_allocate_on_heap;
- }
-#endif
- else {
- ErlHeapFragment *bp;
- allocate_in_mbuf:
- bp = new_message_buffer(size);
- hp = bp->mem;
- *bpp = bp;
- *ohpp = &bp->off_heap;
- }
-
- return hp;
-}
-
-ERTS_GLB_INLINE Eterm *
-erts_alloc_message_heap(Uint size,
- ErlHeapFragment **bpp,
- ErlOffHeap **ohpp,
- Process *receiver,
- ErtsProcLocks *receiver_locks)
-{
- return erts_alloc_message_heap_state(size, bpp, ohpp, receiver,
- receiver_locks, NULL);
-}
-
-#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-
-#if !HEAP_ON_C_STACK
-# if defined(DEBUG)
-# define DeclareTmpHeap(VariableName,Size,Process) \
- Eterm *VariableName = erts_debug_allocate_tmp_heap(Size,Process)
-# define DeclareTypedTmpHeap(Type,VariableName,Process) \
- Type *VariableName = (Type *) erts_debug_allocate_tmp_heap(sizeof(Type)/sizeof(Eterm),Process)
-# define DeclareTmpHeapNoproc(VariableName,Size) \
- Eterm *VariableName = erts_debug_allocate_tmp_heap(Size,NULL)
-# define UseTmpHeap(Size,Proc) \
- do { \
- erts_debug_use_tmp_heap((Size),(Proc)); \
- } while (0)
-# define UnUseTmpHeap(Size,Proc) \
- do { \
- erts_debug_unuse_tmp_heap((Size),(Proc)); \
- } while (0)
-# define UseTmpHeapNoproc(Size) \
- do { \
- erts_debug_use_tmp_heap(Size,NULL); \
- } while (0)
-# define UnUseTmpHeapNoproc(Size) \
- do { \
- erts_debug_unuse_tmp_heap(Size,NULL); \
- } while (0)
-# else
-# define DeclareTmpHeap(VariableName,Size,Process) \
- Eterm *VariableName = (ERTS_PROC_GET_SCHDATA(Process)->tmp_heap)+(ERTS_PROC_GET_SCHDATA(Process)->num_tmp_heap_used)
-# define DeclareTypedTmpHeap(Type,VariableName,Process) \
- Type *VariableName = (Type *) (ERTS_PROC_GET_SCHDATA(Process)->tmp_heap)+(ERTS_PROC_GET_SCHDATA(Process)->num_tmp_heap_used)
-# define DeclareTmpHeapNoproc(VariableName,Size) \
- Eterm *VariableName = (erts_get_scheduler_data()->tmp_heap)+(erts_get_scheduler_data()->num_tmp_heap_used)
-# define UseTmpHeap(Size,Proc) \
- do { \
- ERTS_PROC_GET_SCHDATA(Proc)->num_tmp_heap_used += (Size); \
- } while (0)
-# define UnUseTmpHeap(Size,Proc) \
- do { \
- ERTS_PROC_GET_SCHDATA(Proc)->num_tmp_heap_used -= (Size); \
- } while (0)
-# define UseTmpHeapNoproc(Size) \
- do { \
- erts_get_scheduler_data()->num_tmp_heap_used += (Size); \
- } while (0)
-# define UnUseTmpHeapNoproc(Size) \
- do { \
- erts_get_scheduler_data()->num_tmp_heap_used -= (Size); \
- } while (0)
-
-
-# endif
-
-#else
-# define DeclareTmpHeap(VariableName,Size,Process) \
+#define DeclareTmpHeap(VariableName,Size,Process) \
Eterm VariableName[Size]
-# define DeclareTypedTmpHeap(Type,VariableName,Process) \
+#define DeclareTypedTmpHeap(Type,VariableName,Process) \
Type VariableName[1]
-# define DeclareTmpHeapNoproc(VariableName,Size) \
+#define DeclareTmpHeapNoproc(VariableName,Size) \
Eterm VariableName[Size]
-# define UseTmpHeap(Size,Proc) /* Nothing */
-# define UnUseTmpHeap(Size,Proc) /* Nothing */
-# define UseTmpHeapNoproc(Size) /* Nothing */
-# define UnUseTmpHeapNoproc(Size) /* Nothing */
-#endif /* HEAP_ON_C_STACK */
+#define UseTmpHeap(Size,Proc) /* Nothing */
+#define UnUseTmpHeap(Size,Proc) /* Nothing */
+#define UseTmpHeapNoproc(Size) /* Nothing */
+#define UnUseTmpHeapNoproc(Size) /* Nothing */
ERTS_GLB_INLINE void dtrace_pid_str(Eterm pid, char *process_buf);
ERTS_GLB_INLINE void dtrace_proc_str(Process *process, char *process_buf);
ERTS_GLB_INLINE void dtrace_port_str(Port *port, char *port_buf);
-ERTS_GLB_INLINE void dtrace_fun_decode(Process *process,
- Eterm module, Eterm function, int arity,
+ERTS_GLB_INLINE void dtrace_fun_decode(Process *process, ErtsCodeMFA *mfa,
char *process_buf, char *mfa_buf);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -1258,10 +1494,15 @@ ERTS_GLB_INLINE void dtrace_fun_decode(Process *process,
ERTS_GLB_INLINE void
dtrace_pid_str(Eterm pid, char *process_buf)
{
- erts_snprintf(process_buf, DTRACE_TERM_BUF_SIZE, "<%lu.%lu.%lu>",
- pid_channel_no(pid),
- pid_number(pid),
- pid_serial(pid));
+ if (is_pid(pid))
+ erts_snprintf(process_buf, DTRACE_TERM_BUF_SIZE, "<%lu.%lu.%lu>",
+ pid_channel_no(pid),
+ pid_number(pid),
+ pid_serial(pid));
+ else if (is_port(pid))
+ erts_snprintf(process_buf, DTRACE_TERM_BUF_SIZE, "#Port<%lu.%lu>",
+ port_channel_no(pid),
+ port_number(pid));
}
ERTS_GLB_INLINE void
@@ -1273,14 +1514,11 @@ dtrace_proc_str(Process *process, char *process_buf)
ERTS_GLB_INLINE void
dtrace_port_str(Port *port, char *port_buf)
{
- erts_snprintf(port_buf, DTRACE_TERM_BUF_SIZE, "#Port<%lu.%lu>",
- port_channel_no(port->common.id),
- port_number(port->common.id));
+ dtrace_pid_str(port->common.id, port_buf);
}
ERTS_GLB_INLINE void
-dtrace_fun_decode(Process *process,
- Eterm module, Eterm function, int arity,
+dtrace_fun_decode(Process *process, ErtsCodeMFA *mfa,
char *process_buf, char *mfa_buf)
{
if (process_buf) {
@@ -1288,8 +1526,9 @@ dtrace_fun_decode(Process *process,
}
erts_snprintf(mfa_buf, DTRACE_TERM_BUF_SIZE, "%T:%T/%d",
- module, function, arity);
+ mfa->module, mfa->function, mfa->arity);
}
+
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
#endif /* !__GLOBAL_H__ */
diff --git a/erts/emulator/beam/hash.c b/erts/emulator/beam/hash.c
index afaf32f8ce..8548e30e8b 100644
--- a/erts/emulator/beam/hash.c
+++ b/erts/emulator/beam/hash.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -26,8 +27,6 @@
#endif
#include "sys.h"
-#include "erl_vm.h"
-#include "global.h"
#include "hash.h"
/*
@@ -36,9 +35,9 @@
static const int h_size_table[] = {
2, 5, 11, 23, 47, 97, 197, 397, 797, /* double upto here */
1201, 1597,
- 2411, 3203,
+ 2411, 3203,
4813, 6421,
- 9643, 12853,
+ 9643, 12853,
19289, 25717,
51437,
102877,
@@ -50,8 +49,8 @@ static const int h_size_table[] = {
6584983,
13169977,
26339969,
- 52679969,
- -1
+ 52679969,
+ -1
};
/*
@@ -65,24 +64,29 @@ void hash_get_info(HashInfo *hi, Hash *h)
int i;
int max_depth = 0;
int objects = 0;
+ int used = 0;
for (i = 0; i < size; i++) {
int depth = 0;
HashBucket* b = h->bucket[i];
-
+
while (b != (HashBucket*) 0) {
objects++;
depth++;
b = b->next;
}
- if (depth > max_depth)
- max_depth = depth;
+ if (depth) {
+ used++;
+ if (depth > max_depth)
+ max_depth = depth;
+ }
}
+ ASSERT(objects == h->nobjs);
hi->name = h->name;
hi->size = h->size;
- hi->used = h->used;
- hi->objs = objects;
+ hi->used = used;
+ hi->objs = h->nobjs;
hi->depth = max_depth;
}
@@ -91,24 +95,24 @@ void hash_get_info(HashInfo *hi, Hash *h)
**
*/
-void hash_info(int to, void *arg, Hash* h)
+void hash_info(fmtfn_t to, void *arg, Hash* h)
{
HashInfo hi;
hash_get_info(&hi, h);
- erts_print(to, arg, "=hash_table:%s\n", hi.name);
- erts_print(to, arg, "size: %d\n", hi.size);
- erts_print(to, arg, "used: %d\n", hi.used);
- erts_print(to, arg, "objs: %d\n", hi.objs);
- erts_print(to, arg, "depth: %d\n", hi.depth);
+ h->fun.meta_print(to, arg, "=hash_table:%s\n", hi.name);
+ h->fun.meta_print(to, arg, "size: %d\n", hi.size);
+ h->fun.meta_print(to, arg, "used: %d\n", hi.used);
+ h->fun.meta_print(to, arg, "objs: %d\n", hi.objs);
+ h->fun.meta_print(to, arg, "depth: %d\n", hi.depth);
}
/*
* Returns size of table in bytes. Stored objects not included.
*/
-int
+int
hash_table_sz(Hash *h)
{
int i;
@@ -118,47 +122,56 @@ hash_table_sz(Hash *h)
}
+static ERTS_INLINE void set_thresholds(Hash* h)
+{
+ h->grow_threshold = (8*h->size)/5; /* grow at 160% load */
+ if (h->size_ix > h->min_size_ix)
+ h->shrink_threshold = h->size / 5; /* shrink at 20% load */
+ else
+ h->shrink_threshold = -1; /* never shrink below inital size */
+}
+
/*
** init a pre allocated or static hash structure
** and allocate buckets.
*/
-Hash* hash_init(ErtsAlcType_t type, Hash* h, char* name, int size, HashFunctions fun)
+Hash* hash_init(int type, Hash* h, char* name, int size, HashFunctions fun)
{
int sz;
int ix = 0;
- h->type = type;
+ h->meta_alloc_type = type;
while (h_size_table[ix] != -1 && h_size_table[ix] < size)
ix++;
if (h_size_table[ix] == -1)
- erl_exit(1, "panic: too large hash table size (%d)\n", size);
+ return NULL;
size = h_size_table[ix];
sz = size*sizeof(HashBucket*);
- h->bucket = (HashBucket**) erts_alloc(h->type, sz);
+ h->bucket = (HashBucket**) fun.meta_alloc(h->meta_alloc_type, sz);
sys_memzero(h->bucket, sz);
h->is_allocated = 0;
h->name = name;
h->fun = fun;
h->size = size;
- h->size20percent = h->size/5;
- h->size80percent = (4*h->size)/5;
- h->ix = ix;
- h->used = 0;
+ h->size_ix = ix;
+ h->min_size_ix = ix;
+ h->nobjs = 0;
+ set_thresholds(h);
return h;
}
/*
** Create a new hash table
*/
-Hash* hash_new(ErtsAlcType_t type, char* name, int size, HashFunctions fun)
+Hash* hash_new(int type, char* name, int size, HashFunctions fun)
{
Hash* h;
- h = erts_alloc(type, sizeof(Hash));
+ h = fun.meta_alloc(type, sizeof(Hash));
h = hash_init(type, h, name, size, fun);
h->is_allocated = 1;
@@ -177,14 +190,14 @@ void hash_delete(Hash* h)
HashBucket* b = h->bucket[i];
while (b != (HashBucket*) 0) {
HashBucket* b_next = b->next;
-
+
h->fun.free((void*) b);
b = b_next;
}
}
- erts_free(h->type, h->bucket);
+ h->fun.meta_free(h->meta_alloc_type, h->bucket);
if (h->is_allocated)
- erts_free(h->type, (void*) h);
+ h->fun.meta_free(h->meta_alloc_type, (void*) h);
}
/*
@@ -198,39 +211,34 @@ static void rehash(Hash* h, int grow)
int i;
if (grow) {
- if ((h_size_table[h->ix+1]) == -1)
+ if ((h_size_table[h->size_ix+1]) == -1)
return;
- h->ix++;
+ h->size_ix++;
}
else {
- if (h->ix == 0)
+ if (h->size_ix == 0)
return;
- h->ix--;
+ h->size_ix--;
}
- h->size = h_size_table[h->ix];
- h->size20percent = h->size/5;
- h->size80percent = (4*h->size)/5;
+ h->size = h_size_table[h->size_ix];
sz = h->size*sizeof(HashBucket*);
- new_bucket = (HashBucket **) erts_alloc(h->type, sz);
+ new_bucket = (HashBucket **) h->fun.meta_alloc(h->meta_alloc_type, sz);
sys_memzero(new_bucket, sz);
- h->used = 0;
-
for (i = 0; i < old_size; i++) {
HashBucket* b = h->bucket[i];
while (b != (HashBucket*) 0) {
HashBucket* b_next = b->next;
int ix = b->hvalue % h->size;
- if (new_bucket[ix] == NULL)
- h->used++;
b->next = new_bucket[ix];
new_bucket[ix] = b;
b = b_next;
}
}
- erts_free(h->type, (void *) h->bucket);
+ h->fun.meta_free(h->meta_alloc_type, (void *) h->bucket);
h->bucket = new_bucket;
+ set_thresholds(h);
}
/*
@@ -242,7 +250,7 @@ void* hash_get(Hash* h, void* tmpl)
HashValue hval = h->fun.hash(tmpl);
int ix = hval % h->size;
HashBucket* b = h->bucket[ix];
-
+
while(b != (HashBucket*) 0) {
if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0))
return (void*) b;
@@ -267,68 +275,15 @@ void* hash_put(Hash* h, void* tmpl)
}
b = (HashBucket*) h->fun.alloc(tmpl);
- if (h->bucket[ix] == NULL)
- h->used++;
-
b->hvalue = hval;
b->next = h->bucket[ix];
h->bucket[ix] = b;
- if (h->used > h->size80percent) /* rehash at 80% */
+ if (++h->nobjs > h->grow_threshold)
rehash(h, 1);
return (void*) b;
}
-static void
-hash_insert_entry(Hash* h, HashBucket* entry)
-{
- HashValue hval = entry->hvalue;
- int ix = hval % h->size;
- HashBucket* b = h->bucket[ix];
-
- while (b != (HashBucket*) 0) {
- if ((b->hvalue == hval) && (h->fun.cmp((void*)entry, (void*)b) == 0)) {
- abort(); /* Should not happen */
- }
- b = b->next;
- }
-
- if (h->bucket[ix] == NULL)
- h->used++;
-
- entry->next = h->bucket[ix];
- h->bucket[ix] = entry;
-
- if (h->used > h->size80percent) /* rehash at 80% */
- rehash(h, 1);
-}
-
-
-/*
- * Move all entries in src into dst; empty src.
- * Entries in src must not exist in dst.
- */
-void
-erts_hash_merge(Hash* src, Hash* dst)
-{
- int limit = src->size;
- HashBucket** bucket = src->bucket;
- int i;
-
- src->used = 0;
- for (i = 0; i < limit; i++) {
- HashBucket* b = bucket[i];
- HashBucket* next;
-
- bucket[i] = NULL;
- while (b) {
- next = b->next;
- hash_insert_entry(dst, b);
- b = next;
- }
- }
-}
-
/*
** Erase hash entry return template if erased
** return 0 if not erased
@@ -339,7 +294,7 @@ void* hash_erase(Hash* h, void* tmpl)
int ix = hval % h->size;
HashBucket* b = h->bucket[ix];
HashBucket* prev = 0;
-
+
while(b != 0) {
if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) {
if (prev != 0)
@@ -347,9 +302,7 @@ void* hash_erase(Hash* h, void* tmpl)
else
h->bucket[ix] = b->next;
h->fun.free((void*)b);
- if (h->bucket[ix] == NULL)
- h->used--;
- if (h->used < h->size20percent) /* rehash at 20% */
+ if (--h->nobjs < h->shrink_threshold)
rehash(h, 0);
return tmpl;
}
@@ -373,16 +326,14 @@ hash_remove(Hash *h, void *tmpl)
int ix = hval % h->size;
HashBucket *b = h->bucket[ix];
HashBucket *prev = NULL;
-
+
while (b) {
if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) {
if (prev)
prev->next = b->next;
else
h->bucket[ix] = b->next;
- if (h->bucket[ix] == NULL)
- h->used--;
- if (h->used < h->size20percent) /* rehash at 20% */
+ if (--h->nobjs < h->shrink_threshold)
rehash(h, 0);
return (void *) b;
}
@@ -404,4 +355,3 @@ void hash_foreach(Hash* h, void (*func)(void *, void *), void *func_arg2)
}
}
}
-
diff --git a/erts/emulator/beam/hash.h b/erts/emulator/beam/hash.h
index 6dd66fc9b3..d319aaca83 100644
--- a/erts/emulator/beam/hash.h
+++ b/erts/emulator/beam/hash.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -24,18 +25,19 @@
#ifndef __HASH_H__
#define __HASH_H__
-#ifndef __SYS_H__
#include "sys.h"
-#endif
-
-#include "erl_alloc.h"
typedef unsigned long HashValue;
+typedef struct hash Hash;
typedef int (*HCMP_FUN)(void*, void*);
typedef HashValue (*H_FUN)(void*);
typedef void* (*HALLOC_FUN)(void*);
typedef void (*HFREE_FUN)(void*);
+/* Meta functions */
+typedef void* (*HMALLOC_FUN)(int,size_t);
+typedef void (*HMFREE_FUN)(int,void*);
+typedef int (*HMPRINT_FUN)(fmtfn_t,void*,char*, ...);
/*
** This bucket must be placed in top of
@@ -54,6 +56,9 @@ typedef struct hash_functions
HCMP_FUN cmp;
HALLOC_FUN alloc;
HFREE_FUN free;
+ HMALLOC_FUN meta_alloc;
+ HMFREE_FUN meta_free;
+ HMPRINT_FUN meta_print;
} HashFunctions;
typedef struct {
@@ -64,26 +69,27 @@ typedef struct {
int depth;
} HashInfo;
-typedef struct hash
+struct hash
{
HashFunctions fun; /* Function block */
int is_allocated; /* 0 iff hash structure is on stack or is static */
- ErtsAlcType_t type;
+ int meta_alloc_type; /* argument to pass to meta_alloc and meta_free */
char* name; /* Table name (static string, for debugging) */
int size; /* Number of slots */
- int size20percent; /* 20 percent of number of slots */
- int size80percent; /* 80 percent of number of slots */
- int ix; /* Size index in size table */
- int used; /* Number of slots used */
+ int shrink_threshold;
+ int grow_threshold;
+ int size_ix; /* Size index in size table */
+ int min_size_ix; /* Never shrink table smaller than this */
+ int nobjs; /* Number of objects in table */
HashBucket** bucket; /* Vector of bucket pointers (objects) */
-} Hash;
+};
-Hash* hash_new(ErtsAlcType_t, char*, int, HashFunctions);
-Hash* hash_init(ErtsAlcType_t, Hash*, char*, int, HashFunctions);
+Hash* hash_new(int, char*, int, HashFunctions);
+Hash* hash_init(int, Hash*, char*, int, HashFunctions);
void hash_delete(Hash*);
void hash_get_info(HashInfo*, Hash*);
-void hash_info(int, void *, Hash*);
+void hash_info(fmtfn_t, void *, Hash*);
int hash_table_sz(Hash *);
void* hash_get(Hash*, void*);
@@ -92,6 +98,4 @@ void* hash_erase(Hash*, void*);
void* hash_remove(Hash*, void*);
void hash_foreach(Hash*, void (*func)(void *, void *), void *);
-void erts_hash_merge(Hash* src, Hash* dst);
-
#endif
diff --git a/erts/emulator/beam/index.c b/erts/emulator/beam/index.c
index 79c3ecf1b3..93d1111904 100644
--- a/erts/emulator/beam/index.c
+++ b/erts/emulator/beam/index.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2012. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -26,7 +27,7 @@
#include "global.h"
#include "index.h"
-void index_info(int to, void *arg, IndexTable *t)
+void index_info(fmtfn_t to, void *arg, IndexTable *t)
{
hash_info(to, arg, &t->htable);
erts_print(to, arg, "=index_table:%s\n", t->htable.name);
@@ -83,16 +84,23 @@ index_put_entry(IndexTable* t, void* tmpl)
Uint sz;
if (ix >= t->limit) {
/* A core dump is unnecessary */
- erl_exit(ERTS_DUMP_EXIT, "no more index entries in %s (max=%d)\n",
+ erts_exit(ERTS_DUMP_EXIT, "no more index entries in %s (max=%d)\n",
t->htable.name, t->limit);
}
sz = INDEX_PAGE_SIZE*sizeof(IndexSlot*);
t->seg_table[ix>>INDEX_PAGE_SHIFT] = erts_alloc(t->type, sz);
t->size += INDEX_PAGE_SIZE;
}
- t->entries++;
p->index = ix;
t->seg_table[ix>>INDEX_PAGE_SHIFT][ix&INDEX_PAGE_MASK] = p;
+
+ /*
+ * Do a write barrier here to allow readers to do lock free iteration.
+ * erts_index_num_entries() does matching read barrier.
+ */
+ ERTS_THR_WRITE_MEMORY_BARRIER;
+ t->entries++;
+
return p;
}
@@ -122,7 +130,7 @@ void erts_index_merge(Hash* src, IndexTable* dst)
ix = dst->entries++;
if (ix >= dst->size) {
if (ix >= dst->limit) {
- erl_exit(1, "no more index entries in %s (max=%d)\n",
+ erts_exit(ERTS_ERROR_EXIT, "no more index entries in %s (max=%d)\n",
dst->htable.name, dst->limit);
}
sz = INDEX_PAGE_SIZE*sizeof(IndexSlot*);
diff --git a/erts/emulator/beam/index.h b/erts/emulator/beam/index.h
index 537bc11056..30bc6a1121 100644
--- a/erts/emulator/beam/index.h
+++ b/erts/emulator/beam/index.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -25,9 +26,8 @@
#ifndef __INDEX_H__
#define __INDEX_H__
-#ifndef __HASH_H__
#include "hash.h"
-#endif
+#include "erl_alloc.h"
typedef struct index_slot
{
@@ -51,7 +51,7 @@ typedef struct index_table
#define INDEX_PAGE_MASK ((1 << INDEX_PAGE_SHIFT)-1)
IndexTable *erts_index_init(ErtsAlcType_t,IndexTable*,char*,int,int,HashFunctions);
-void index_info(int, void *, IndexTable*);
+void index_info(fmtfn_t, void *, IndexTable*);
int index_table_sz(IndexTable *);
int index_get(IndexTable*, void*);
@@ -65,6 +65,7 @@ void index_erase_latest_from(IndexTable*, Uint ix);
ERTS_GLB_INLINE int index_put(IndexTable*, void*);
ERTS_GLB_INLINE IndexSlot* erts_index_lookup(IndexTable*, Uint);
+ERTS_GLB_INLINE int erts_index_num_entries(IndexTable* t);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -78,6 +79,19 @@ erts_index_lookup(IndexTable* t, Uint ix)
{
return t->seg_table[ix>>INDEX_PAGE_SHIFT][ix&INDEX_PAGE_MASK];
}
+
+ERTS_GLB_INLINE int erts_index_num_entries(IndexTable* t)
+{
+ int ret = t->entries;
+ /*
+ * Do a read barrier here to allow lock free iteration
+ * on tables where entries are never erased.
+ * index_put_entry() does matching write barrier.
+ */
+ ERTS_THR_READ_MEMORY_BARRIER;
+ return ret;
+}
+
#endif
#endif
diff --git a/erts/emulator/beam/instrs.tab b/erts/emulator/beam/instrs.tab
new file mode 100644
index 0000000000..c17d1a8f69
--- /dev/null
+++ b/erts/emulator/beam/instrs.tab
@@ -0,0 +1,926 @@
+// -*- c -*-
+//
+// %CopyrightBegin%
+//
+// Copyright Ericsson AB 2017. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// %CopyrightEnd%
+//
+
+// Stack manipulation instructions
+
+allocate(NeedStack, Live) {
+ $AH($NeedStack, 0, $Live);
+}
+
+allocate_heap(NeedStack, NeedHeap, Live) {
+ $AH($NeedStack, $NeedHeap, $Live);
+}
+
+allocate_init(NeedStack, Live, Y) {
+ $AH($NeedStack, 0, $Live);
+ make_blank($Y);
+}
+
+allocate_zero(NeedStack, Live) {
+ Eterm* ptr;
+ int i = $NeedStack;
+ $AH(i, 0, $Live);
+ for (ptr = E + i; ptr > E; ptr--) {
+ make_blank(*ptr);
+ }
+}
+
+allocate_heap_zero(NeedStack, NeedHeap, Live) {
+ Eterm* ptr;
+ int i = $NeedStack;
+ $AH(i, $NeedHeap, $Live);
+ for (ptr = E + i; ptr > E; ptr--) {
+ make_blank(*ptr);
+ }
+}
+
+// This instruction is probably never used (because it is combined with a
+// a return). However, a future compiler might for some reason emit a
+// deallocate not followed by a return, and that should work.
+
+deallocate(Deallocate) {
+ //| -no_prefetch
+ SET_CP(c_p, (BeamInstr *) cp_val(*E));
+ E = ADD_BYTE_OFFSET(E, $Deallocate);
+}
+
+deallocate_return(Deallocate) {
+ //| -no_next
+ int words_to_pop = $Deallocate;
+ SET_I((BeamInstr *) cp_val(*E));
+ E = ADD_BYTE_OFFSET(E, words_to_pop);
+ CHECK_TERM(x(0));
+ DispatchReturn;
+}
+
+move_deallocate_return(Src, Deallocate) {
+ x(0) = $Src;
+ $deallocate_return($Deallocate);
+}
+
+// Call instructions
+
+DISPATCH_REL(CallDest) {
+ //| -no_next
+ $SET_I_REL($CallDest);
+ DTRACE_LOCAL_CALL(c_p, erts_code_to_codemfa(I));
+ Dispatch();
+}
+
+DISPATCH_ABS(CallDest) {
+ //| -no_next
+ SET_I((BeamInstr *) $CallDest);
+ DTRACE_LOCAL_CALL(c_p, erts_code_to_codemfa(I));
+ Dispatch();
+}
+
+i_call(CallDest) {
+ SET_CP(c_p, $NEXT_INSTRUCTION);
+ $DISPATCH_REL($CallDest);
+}
+
+move_call(Src, CallDest) {
+ x(0) = $Src;
+ SET_CP(c_p, $NEXT_INSTRUCTION);
+ $DISPATCH_REL($CallDest);
+}
+
+i_call_last(CallDest, Deallocate) {
+ $deallocate($Deallocate);
+ $DISPATCH_REL($CallDest);
+}
+
+move_call_last(Src, CallDest, Deallocate) {
+ x(0) = $Src;
+ $i_call_last($CallDest, $Deallocate);
+}
+
+i_call_only(CallDest) {
+ $DISPATCH_REL($CallDest);
+}
+
+move_call_only(Src, CallDest) {
+ x(0) = $Src;
+ $i_call_only($CallDest);
+}
+
+DISPATCHX(Dest) {
+ //| -no_next
+ DTRACE_GLOBAL_CALL_FROM_EXPORT(c_p, $Dest);
+ // Dispatchx assumes the Export* is in Arg(0)
+ I = (&$Dest) - 1;
+ Dispatchx();
+}
+
+i_call_ext(Dest) {
+ SET_CP(c_p, $NEXT_INSTRUCTION);
+ $DISPATCHX($Dest);
+}
+
+i_move_call_ext(Src, Dest) {
+ x(0) = $Src;
+ $i_call_ext($Dest);
+}
+
+i_call_ext_only(Dest) {
+ $DISPATCHX($Dest);
+}
+
+i_move_call_ext_only(Dest, Src) {
+ x(0) = $Src;
+ $i_call_ext_only($Dest);
+}
+
+i_call_ext_last(Dest, Deallocate) {
+ $deallocate($Deallocate);
+ $DISPATCHX($Dest);
+}
+
+i_move_call_ext_last(Dest, StackOffset, Src) {
+ x(0) = $Src;
+ $i_call_ext_last($Dest, $StackOffset);
+}
+
+APPLY(I, Deallocate, Next) {
+ //| -no_next
+ HEAVY_SWAPOUT;
+ $Next = apply(c_p, reg, $I, $Deallocate);
+ HEAVY_SWAPIN;
+}
+
+HANDLE_APPLY_ERROR() {
+ I = handle_error(c_p, I, reg, &bif_export[BIF_apply_3]->info.mfa);
+ goto post_error_handling;
+}
+
+i_apply() {
+ BeamInstr *next;
+ $APPLY(NULL, 0, next);
+ if (ERTS_LIKELY(next != NULL)) {
+ SET_CP(c_p, $NEXT_INSTRUCTION);
+ $DISPATCH_ABS(next);
+ }
+ $HANDLE_APPLY_ERROR();
+}
+
+i_apply_last(Deallocate) {
+ BeamInstr *next;
+ $APPLY(I, $Deallocate, next);
+ if (ERTS_LIKELY(next != NULL)) {
+ $deallocate($Deallocate);
+ $DISPATCH_ABS(next);
+ }
+ $HANDLE_APPLY_ERROR();
+}
+
+i_apply_only() {
+ BeamInstr *next;
+ $APPLY(I, 0, next);
+ if (ERTS_LIKELY(next != NULL)) {
+ $DISPATCH_ABS(next);
+ }
+ $HANDLE_APPLY_ERROR();
+}
+
+FIXED_APPLY(Arity, I, Deallocate, Next) {
+ //| -no_next
+ HEAVY_SWAPOUT;
+ $Next = fixed_apply(c_p, reg, $Arity, $I, $Deallocate);
+ HEAVY_SWAPIN;
+}
+
+apply(Arity) {
+ BeamInstr *next;
+ $FIXED_APPLY($Arity, NULL, 0, next);
+ if (ERTS_LIKELY(next != NULL)) {
+ SET_CP(c_p, $NEXT_INSTRUCTION);
+ $DISPATCH_ABS(next);
+ }
+ $HANDLE_APPLY_ERROR();
+}
+
+apply_last(Arity, Deallocate) {
+ BeamInstr *next;
+ $FIXED_APPLY($Arity, I, $Deallocate, next);
+ if (ERTS_LIKELY(next != NULL)) {
+ $deallocate($Deallocate);
+ $DISPATCH_ABS(next);
+ }
+ $HANDLE_APPLY_ERROR();
+}
+
+APPLY_FUN(Next) {
+ HEAVY_SWAPOUT;
+ $Next = apply_fun(c_p, r(0), x(1), reg);
+ HEAVY_SWAPIN;
+}
+
+HANDLE_APPLY_FUN_ERROR() {
+ goto find_func_info;
+}
+
+DISPATCH_FUN(I) {
+ SET_I($I);
+ Dispatchfun();
+}
+
+i_apply_fun() {
+ BeamInstr *next;
+ $APPLY_FUN(next);
+ if (ERTS_LIKELY(next != NULL)) {
+ SET_CP(c_p, $NEXT_INSTRUCTION);
+ $DISPATCH_FUN(next);
+ }
+ $HANDLE_APPLY_FUN_ERROR();
+}
+
+i_apply_fun_last(Deallocate) {
+ BeamInstr *next;
+ $APPLY_FUN(next);
+ if (ERTS_LIKELY(next != NULL)) {
+ $deallocate($Deallocate);
+ $DISPATCH_FUN(next);
+ }
+ $HANDLE_APPLY_FUN_ERROR();
+}
+
+i_apply_fun_only() {
+ BeamInstr *next;
+ $APPLY_FUN(next);
+ if (ERTS_LIKELY(next != NULL)) {
+ $DISPATCH_FUN(next);
+ }
+ $HANDLE_APPLY_FUN_ERROR();
+}
+
+CALL_FUN(Fun, Next) {
+ //| -no_next
+ HEAVY_SWAPOUT;
+ $Next = call_fun(c_p, $Fun, reg, THE_NON_VALUE);
+ HEAVY_SWAPIN;
+}
+
+i_call_fun(Fun) {
+ BeamInstr *next;
+ $CALL_FUN($Fun, next);
+ if (ERTS_LIKELY(next != NULL)) {
+ SET_CP(c_p, $NEXT_INSTRUCTION);
+ $DISPATCH_FUN(next);
+ }
+ $HANDLE_APPLY_FUN_ERROR();
+}
+
+i_call_fun_last(Fun, Deallocate) {
+ BeamInstr *next;
+ $CALL_FUN($Fun, next);
+ if (ERTS_LIKELY(next != NULL)) {
+ $deallocate($Deallocate);
+ $DISPATCH_FUN(next);
+ }
+ $HANDLE_APPLY_FUN_ERROR();
+}
+
+return() {
+ SET_I(c_p->cp);
+ DTRACE_RETURN_FROM_PC(c_p);
+
+ /*
+ * We must clear the CP to make sure that a stale value do not
+ * create a false module dependcy preventing code upgrading.
+ * It also means that we can use the CP in stack backtraces.
+ */
+ c_p->cp = 0;
+ CHECK_TERM(r(0));
+ HEAP_SPACE_VERIFIED(0);
+ DispatchReturn;
+}
+
+get_list(Src, Hd, Tl) {
+ Eterm* tmp_ptr = list_val($Src);
+ Eterm hd, tl;
+ hd = CAR(tmp_ptr);
+ tl = CDR(tmp_ptr);
+ $Hd = hd;
+ $Tl = tl;
+}
+
+i_get(Src, Dst) {
+ $Dst = erts_pd_hash_get(c_p, $Src);
+}
+
+i_get_hash(Src, Hash, Dst) {
+ $Dst = erts_pd_hash_get_with_hx(c_p, $Hash, $Src);
+}
+
+i_get_tuple_element(Src, Element, Dst) {
+ Eterm* src = ADD_BYTE_OFFSET(tuple_val($Src), $Element);
+ $Dst = *src;
+}
+
+i_get_tuple_element2(Src, Element, Dst) {
+ Eterm* src;
+ Eterm* dst;
+ Eterm E1, E2;
+ src = ADD_BYTE_OFFSET(tuple_val($Src), $Element);
+ dst = &($Dst);
+ E1 = src[0];
+ E2 = src[1];
+ dst[0] = E1;
+ dst[1] = E2;
+}
+
+i_get_tuple_element2y(Src, Element, D1, D2) {
+ Eterm* src;
+ Eterm E1, E2;
+ src = ADD_BYTE_OFFSET(tuple_val($Src), $Element);
+ E1 = src[0];
+ E2 = src[1];
+ $D1 = E1;
+ $D2 = E2;
+}
+
+i_get_tuple_element3(Src, Element, Dst) {
+ Eterm* src;
+ Eterm* dst;
+ Eterm E1, E2, E3;
+ src = ADD_BYTE_OFFSET(tuple_val($Src), $Element);
+ dst = &($Dst);
+ E1 = src[0];
+ E2 = src[1];
+ E3 = src[2];
+ dst[0] = E1;
+ dst[1] = E2;
+ dst[2] = E3;
+}
+
+i_element := element_group.fetch.execute;
+
+
+element_group.head() {
+ Eterm element_tuple;
+}
+
+element_group.fetch(Src) {
+ element_tuple = $Src;
+}
+
+element_group.execute(Fail, Index, Dst) {
+ Eterm element_index = $Index;
+ if (ERTS_LIKELY(is_small(element_index) && is_tuple(element_tuple))) {
+ Eterm* tp = tuple_val(element_tuple);
+
+ if ((signed_val(element_index) >= 1) &&
+ (signed_val(element_index) <= arityval(*tp))) {
+ $Dst = tp[signed_val(element_index)];
+ $NEXT0();
+ }
+ }
+ c_p->freason = BADARG;
+ $BIF_ERROR_ARITY_2($Fail, BIF_element_2, element_index, element_tuple);
+}
+
+i_fast_element := fast_element_group.fetch.execute;
+
+fast_element_group.head() {
+ Eterm fast_element_tuple;
+}
+
+fast_element_group.fetch(Src) {
+ fast_element_tuple = $Src;
+}
+
+fast_element_group.execute(Fail, Index, Dst) {
+ if (ERTS_LIKELY(is_tuple(fast_element_tuple))) {
+ Eterm* tp = tuple_val(fast_element_tuple);
+ Eterm pos = $Index; /* Untagged integer >= 1 */
+ if (pos <= arityval(*tp)) {
+ $Dst = tp[pos];
+ $NEXT0();
+ }
+ }
+ c_p->freason = BADARG;
+ $BIF_ERROR_ARITY_2($Fail, BIF_element_2, make_small($Index), fast_element_tuple);
+}
+
+init(Y) {
+ make_blank($Y);
+}
+
+init2(Y1, Y2) {
+ make_blank($Y1);
+ make_blank($Y2);
+}
+
+init3(Y1, Y2, Y3) {
+ make_blank($Y1);
+ make_blank($Y2);
+ make_blank($Y3);
+}
+
+i_make_fun(FunP, NumFree) {
+ HEAVY_SWAPOUT;
+ x(0) = new_fun(c_p, reg, (ErlFunEntry *) $FunP, $NumFree);
+ HEAVY_SWAPIN;
+}
+
+i_trim(Words) {
+ Uint cp = E[0];
+ E += $Words;
+ E[0] = cp;
+}
+
+move(Src, Dst) {
+ $Dst = $Src;
+}
+
+move3(S1, D1, S2, D2, S3, D3) {
+ $D1 = $S1;
+ $D2 = $S2;
+ $D3 = $S3;
+}
+
+move_dup(Src, D1, D2) {
+ $D1 = $D2 = $Src;
+}
+
+move2_par(S1, D1, S2, D2) {
+ Eterm V1, V2;
+ V1 = $S1;
+ V2 = $S2;
+ $D1 = V1;
+ $D2 = V2;
+}
+
+move_shift(Src, SD, D) {
+ Eterm V;
+ V = $Src;
+ $D = $SD;
+ $SD = V;
+}
+
+move_window3(S1, S2, S3, D) {
+ Eterm xt0, xt1, xt2;
+ Eterm* y = &$D;
+ xt0 = $S1;
+ xt1 = $S2;
+ xt2 = $S3;
+ y[0] = xt0;
+ y[1] = xt1;
+ y[2] = xt2;
+}
+
+move_window4(S1, S2, S3, S4, D) {
+ Eterm xt0, xt1, xt2, xt3;
+ Eterm* y = &$D;
+ xt0 = $S1;
+ xt1 = $S2;
+ xt2 = $S3;
+ xt3 = $S4;
+ y[0] = xt0;
+ y[1] = xt1;
+ y[2] = xt2;
+ y[3] = xt3;
+}
+
+move_window5(S1, S2, S3, S4, S5, D) {
+ Eterm xt0, xt1, xt2, xt3, xt4;
+ Eterm *y = &$D;
+ xt0 = $S1;
+ xt1 = $S2;
+ xt2 = $S3;
+ xt3 = $S4;
+ xt4 = $S5;
+ y[0] = xt0;
+ y[1] = xt1;
+ y[2] = xt2;
+ y[3] = xt3;
+ y[4] = xt4;
+}
+
+move_return(Src) {
+ //| -no_next
+ x(0) = $Src;
+ SET_I(c_p->cp);
+ c_p->cp = 0;
+ DispatchReturn;
+}
+
+move_x1(Src) {
+ x(1) = $Src;
+}
+
+move_x2(Src) {
+ x(2) = $Src;
+}
+
+node(Dst) {
+ $Dst = erts_this_node->sysname;
+}
+
+put_list(Hd, Tl, Dst) {
+ HTOP[0] = $Hd;
+ HTOP[1] = $Tl;
+ $Dst = make_list(HTOP);
+ HTOP += 2;
+}
+
+i_put_tuple := i_put_tuple.make.fill;
+
+i_put_tuple.make(Dst) {
+ $Dst = make_tuple(HTOP);
+}
+
+i_put_tuple.fill(Arity) {
+ Eterm* hp = HTOP;
+ Eterm arity = $Arity;
+
+ //| -no_next
+ *hp++ = make_arityval(arity);
+ I = $NEXT_INSTRUCTION;
+ do {
+ Eterm term = *I++;
+ switch (loader_tag(term)) {
+ case LOADER_X_REG:
+ *hp++ = x(loader_x_reg_index(term));
+ break;
+ case LOADER_Y_REG:
+ *hp++ = y(loader_y_reg_index(term));
+ break;
+ default:
+ *hp++ = term;
+ break;
+ }
+ } while (--arity != 0);
+ HTOP = hp;
+ ASSERT(VALID_INSTR(* (Eterm *)I));
+ Goto(*I);
+}
+
+self(Dst) {
+ $Dst = c_p->common.id;
+}
+
+set_tuple_element(Element, Tuple, Offset) {
+ Eterm* p;
+
+ ASSERT(is_tuple($Tuple));
+ p = (Eterm *) ((unsigned char *) tuple_val($Tuple) + $Offset);
+ *p = $Element;
+}
+
+swap(R1, R2) {
+ Eterm V = $R1;
+ $R1 = $R2;
+ $R2 = V;
+}
+
+swap_temp(R1, R2, Tmp) {
+ Eterm V = $R1;
+ $R1 = $R2;
+ $R2 = $Tmp = V;
+}
+
+test_heap(Nh, Live) {
+ $GC_TEST(0, $Nh, $Live);
+}
+
+test_heap_1_put_list(Nh, Reg) {
+ $test_heap($Nh, 1);
+ $put_list($Reg, x(0), x(0));
+}
+
+is_integer_allocate(Fail, Src, NeedStack, Live) {
+ //| -no_prefetch
+ $is_integer($Fail, $Src);
+ $AH($NeedStack, 0, $Live);
+}
+
+is_nonempty_list(Fail, Src) {
+ //| -no_prefetch
+ if (is_not_list($Src)) {
+ $FAIL($Fail);
+ }
+}
+
+is_nonempty_list_test_heap(Fail, Need, Live) {
+ //| -no_prefetch
+ $is_nonempty_list($Fail, x(0));
+ $test_heap($Need, $Live);
+}
+
+is_nonempty_list_allocate(Fail, Src, Need, Live) {
+ //| -no_prefetch
+ $is_nonempty_list($Fail, $Src);
+ $AH($Need, 0, $Live);
+}
+
+is_nonempty_list_get_list(Fail, Src, Hd, Tl) {
+ //| -no_prefetch
+ $is_nonempty_list($Fail, $Src);
+ $get_list($Src, $Hd, $Tl);
+}
+
+jump(Fail) {
+ $JUMP($Fail);
+}
+
+move_jump(Fail, Src) {
+ x(0) = $Src;
+ $jump($Fail);
+}
+
+//
+// Test instructions.
+//
+
+is_atom(Fail, Src) {
+ if (is_not_atom($Src)) {
+ $FAIL($Fail);
+ }
+}
+
+is_boolean(Fail, Src) {
+ if (($Src) != am_true && ($Src) != am_false) {
+ $FAIL($Fail);
+ }
+}
+
+is_binary(Fail, Src) {
+ if (is_not_binary($Src) || binary_bitsize($Src) != 0) {
+ $FAIL($Fail);
+ }
+}
+
+is_bitstring(Fail, Src) {
+ if (is_not_binary($Src)) {
+ $FAIL($Fail);
+ }
+}
+
+is_float(Fail, Src) {
+ if (is_not_float($Src)) {
+ $FAIL($Fail);
+ }
+}
+
+is_function(Fail, Src) {
+ if ( !(is_any_fun($Src)) ) {
+ $FAIL($Fail);
+ }
+}
+
+is_function2(Fail, Fun, Arity) {
+ if (erl_is_function(c_p, $Fun, $Arity) != am_true ) {
+ $FAIL($Fail);
+ }
+}
+
+is_integer(Fail, Src) {
+ if (is_not_integer($Src)) {
+ $FAIL($Fail);
+ }
+}
+
+is_list(Fail, Src) {
+ if (is_not_list($Src) && is_not_nil($Src)) {
+ $FAIL($Fail);
+ }
+}
+
+is_map(Fail, Src) {
+ if (is_not_map($Src)) {
+ $FAIL($Fail);
+ }
+}
+
+is_nil(Fail, Src) {
+ if (is_not_nil($Src)) {
+ $FAIL($Fail);
+ }
+}
+
+is_number(Fail, Src) {
+ if (is_not_integer($Src) && is_not_float($Src)) {
+ $FAIL($Fail);
+ }
+}
+
+is_pid(Fail, Src) {
+ if (is_not_pid($Src)) {
+ $FAIL($Fail);
+ }
+}
+
+is_port(Fail, Src) {
+ if (is_not_port($Src)) {
+ $FAIL($Fail);
+ }
+}
+
+is_reference(Fail, Src) {
+ if (is_not_ref($Src)) {
+ $FAIL($Fail);
+ }
+}
+
+is_tagged_tuple(Fail, Src, Arityval, Tag) {
+ Eterm term = $Src;
+ if (!(BEAM_IS_TUPLE(term) &&
+ (tuple_val(term))[0] == $Arityval &&
+ (tuple_val(term))[1] == $Tag)) {
+ $FAIL($Fail);
+ }
+}
+
+is_tuple(Fail, Src) {
+ if (is_not_tuple($Src)) {
+ $FAIL($Fail);
+ }
+}
+
+is_tuple_of_arity(Fail, Src, Arityval) {
+ Eterm term = $Src;
+ if (!(BEAM_IS_TUPLE(term) && *tuple_val(term) == $Arityval)) {
+ $FAIL($Fail);
+ }
+}
+
+test_arity(Fail, Pointer, Arity) {
+ if (*tuple_val($Pointer) != $Arity) {
+ $FAIL($Fail);
+ }
+}
+
+i_is_eq_exact_immed(Fail, X, Y) {
+ if ($X != $Y) {
+ $FAIL($Fail);
+ }
+}
+
+i_is_ne_exact_immed(Fail, X, Y) {
+ if ($X == $Y) {
+ $FAIL($Fail);
+ }
+}
+
+is_eq_exact(Fail, X, Y) {
+ if (!EQ($X, $Y)) {
+ $FAIL($Fail);
+ }
+}
+
+i_is_eq_exact_literal(Fail, Src, Literal) {
+ Eterm src = $Src;
+ if (is_immed(src) || !eq(src, $Literal)) {
+ $FAIL($Fail);
+ }
+}
+
+is_ne_exact(Fail, X, Y) {
+ if (EQ($X, $Y)) {
+ $FAIL($Fail);
+ }
+}
+
+i_is_ne_exact_literal(Fail, Src, Literal) {
+ Eterm src = $Src;
+ if (!is_immed(src) && eq(src, $Literal)) {
+ $FAIL($Fail);
+ }
+}
+
+is_eq(Fail, X, Y) {
+ CMP_EQ_ACTION($X, $Y, $FAIL($Fail));
+}
+
+is_ne(Fail, X, Y) {
+ CMP_NE_ACTION($X, $Y, $FAIL($Fail));
+}
+
+is_lt(Fail, X, Y) {
+ CMP_LT_ACTION($X, $Y, $FAIL($Fail));
+}
+
+is_ge(Fail, X, Y) {
+ CMP_GE_ACTION($X, $Y, $FAIL($Fail));
+}
+
+badarg(Fail) {
+ $BADARG($Fail);
+ //| -no_next;
+}
+
+badmatch(Src) {
+ c_p->fvalue = $Src;
+ c_p->freason = BADMATCH;
+ goto find_func_info;
+ //| -no_next;
+}
+
+case_end(Src) {
+ c_p->fvalue = $Src;
+ c_p->freason = EXC_CASE_CLAUSE;
+ goto find_func_info;
+ //| -no_next;
+}
+
+if_end() {
+ c_p->freason = EXC_IF_CLAUSE;
+ goto find_func_info;
+ //| -no_next;
+}
+
+system_limit(Fail) {
+ $SYSTEM_LIMIT($Fail);
+ //| -no_next;
+}
+
+catch(Y, Fail) {
+ c_p->catches++;
+ $Y = $Fail;
+}
+
+catch_end(Y) {
+ $try_end($Y);
+ if (is_non_value(r(0))) {
+ c_p->fvalue = NIL;
+ if (x(1) == am_throw) {
+ r(0) = x(2);
+ } else {
+ if (x(1) == am_error) {
+ SWAPOUT;
+ x(2) = add_stacktrace(c_p, x(2), x(3));
+ SWAPIN;
+ }
+ /* only x(2) is included in the rootset here */
+ if (E - HTOP < 3) {
+ SWAPOUT;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ FCALLS -= erts_garbage_collect_nobump(c_p, 3, reg+2, 1, FCALLS);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ SWAPIN;
+ }
+ r(0) = TUPLE2(HTOP, am_EXIT, x(2));
+ HTOP += 3;
+ }
+ }
+ CHECK_TERM(r(0));
+}
+
+try_end(Y) {
+ c_p->catches--;
+ make_blank($Y);
+}
+
+try_case(Y) {
+ $try_end($Y);
+ ASSERT(is_non_value(r(0)));
+ c_p->fvalue = NIL;
+ r(0) = x(1);
+ x(1) = x(2);
+ x(2) = x(3);
+}
+
+try_case_end(Src) {
+ c_p->fvalue = $Src;
+ c_p->freason = EXC_TRY_CLAUSE;
+ goto find_func_info;
+ //| -no_next;
+}
+
+i_raise() {
+ Eterm raise_trace = x(2);
+ Eterm raise_value = x(1);
+ struct StackTrace *s;
+
+ c_p->fvalue = raise_value;
+ c_p->ftrace = raise_trace;
+ s = get_trace_from_exc(raise_trace);
+ if (s == NULL) {
+ c_p->freason = EXC_ERROR;
+ } else {
+ c_p->freason = PRIMARY_EXCEPTION(s->freason);
+ }
+ goto find_func_info;
+ //| -no_next
+}
+
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index edf4a28784..85013af3ad 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2014. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -46,25 +47,29 @@
#define ERTS_WANT_EXTERNAL_TAGS
#include "external.h"
#include "dtrace-wrapper.h"
+#include "lttng-wrapper.h"
#include "erl_map.h"
+#include "erl_bif_unique.h"
+#include "erl_hl_timer.h"
+#include "erl_time.h"
+#include "erl_io_queue.h"
extern ErlDrvEntry fd_driver_entry;
-#ifndef __OSE__
extern ErlDrvEntry vanilla_driver_entry;
-#endif
extern ErlDrvEntry spawn_driver_entry;
+#ifndef __WIN32__
+extern ErlDrvEntry forker_driver_entry;
+#endif
extern ErlDrvEntry *driver_tab[]; /* table of static drivers, only used during initialization */
erts_driver_t *driver_list; /* List of all drivers, static and dynamic. */
-erts_smp_rwmtx_t erts_driver_list_lock; /* Mutex for driver list */
-static erts_smp_tsd_key_t driver_list_lock_status_key; /*stop recursive locks when calling
+erts_rwmtx_t erts_driver_list_lock; /* Mutex for driver list */
+static erts_tsd_key_t driver_list_lock_status_key; /*stop recursive locks when calling
driver init */
-static erts_smp_tsd_key_t driver_list_last_error_key; /* Save last DDLL error on a
+static erts_tsd_key_t driver_list_last_error_key; /* Save last DDLL error on a
per thread basis (for BC interfaces) */
ErtsPTab erts_port erts_align_attribute(ERTS_CACHE_LINE_SIZE); /* The port table */
-erts_smp_atomic_t erts_bytes_out; /* No bytes sent out of the system */
-erts_smp_atomic_t erts_bytes_in; /* No bytes gotten into the system */
const ErlDrvTermData driver_term_nil = (ErlDrvTermData)NIL;
@@ -72,32 +77,33 @@ const Port erts_invalid_port = {{ERTS_INVALID_PORT}};
erts_driver_t vanilla_driver;
erts_driver_t spawn_driver;
+#ifndef __WIN32__
+erts_driver_t forker_driver;
+#endif
erts_driver_t fd_driver;
int erts_port_synchronous_ops = 0;
int erts_port_schedule_all_ops = 0;
int erts_port_parallelism = 0;
-static void deliver_result(Eterm sender, Eterm pid, Eterm res);
+static erts_atomic64_t bytes_in;
+static erts_atomic64_t bytes_out;
+
+static void deliver_result(Port *p, Eterm sender, Eterm pid, Eterm res);
static int init_driver(erts_driver_t *, ErlDrvEntry *, DE_Handle *);
static void terminate_port(Port *p);
static void pdl_init(void);
-#ifdef ERTS_SMP
+static int driver_failure_term(ErlDrvPort ix, Eterm term, int eof);
static void driver_monitor_lock_pdl(Port *p);
static void driver_monitor_unlock_pdl(Port *p);
#define DRV_MONITOR_LOOKUP_PORT_LOCK_PDL(Port) erts_thr_drvport2port((Port), 1)
#define DRV_MONITOR_LOCK_PDL(Port) driver_monitor_lock_pdl(Port)
#define DRV_MONITOR_UNLOCK_PDL(Port) driver_monitor_unlock_pdl(Port)
-#else
-#define DRV_MONITOR_LOOKUP_PORT_LOCK_PDL(Port) erts_thr_drvport2port((Port), 0)
-#define DRV_MONITOR_LOCK_PDL(Port) /* nothing */
-#define DRV_MONITOR_UNLOCK_PDL(Port) /* nothing */
-#endif
#define ERL_SMALL_IO_BIN_LIMIT (4*ERL_ONHEAP_BIN_LIMIT)
#define SMALL_WRITE_VEC 16
-static ERTS_INLINE ErlIOQueue*
+static ERTS_INLINE ErlPortIOQueue*
drvport2ioq(ErlDrvPort drvport)
{
Port *prt = erts_thr_drvport2port(drvport, 0);
@@ -110,13 +116,13 @@ static ERTS_INLINE int
is_port_ioq_empty(Port *pp)
{
int res;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(pp));
if (!pp->port_data_lock)
- res = (pp->ioq.size == 0);
+ res = (erts_ioq_size(&pp->ioq) == 0);
else {
ErlDrvPDL pdl = pp->port_data_lock;
erts_mtx_lock(&pdl->mtx);
- res = (pp->ioq.size == 0);
+ res = (erts_ioq_size(&pp->ioq) == 0);
erts_mtx_unlock(&pdl->mtx);
}
return res;
@@ -131,14 +137,14 @@ erts_is_port_ioq_empty(Port *pp)
Uint
erts_port_ioq_size(Port *pp)
{
- int res;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+ ErlDrvSizeT res;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(pp));
if (!pp->port_data_lock)
- res = pp->ioq.size;
+ res = erts_ioq_size(&pp->ioq);
else {
ErlDrvPDL pdl = pp->port_data_lock;
erts_mtx_lock(&pdl->mtx);
- res = pp->ioq.size;
+ res = erts_ioq_size(&pp->ioq);
erts_mtx_unlock(&pdl->mtx);
}
return (Uint) res;
@@ -200,13 +206,13 @@ dtrace_drvport_str(ErlDrvPort drvport, char *port_buf)
static ERTS_INLINE void
kill_port(Port *pp)
{
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(pp));
+ ERTS_TRACER_CLEAR(&ERTS_TRACER(pp));
erts_ptab_delete_element(&erts_port, &pp->common); /* Time of death */
erts_port_task_free_port(pp);
/* In non-smp case the port structure may have been deallocated now */
}
-#ifdef ERTS_SMP
#ifdef ERTS_ENABLE_LOCK_CHECK
int
@@ -214,12 +220,11 @@ erts_lc_is_port_locked(Port *prt)
{
if (!prt)
return 0;
- ERTS_SMP_LC_ASSERT(prt->lock);
- return erts_smp_lc_mtx_is_locked(prt->lock);
+ ERTS_LC_ASSERT(prt->lock);
+ return erts_lc_mtx_is_locked(prt->lock);
}
#endif
-#endif /* #ifdef ERTS_SMP */
static void initq(Port* prt);
@@ -243,32 +248,21 @@ static ERTS_INLINE void port_init_instr(Port *prt
* Stuff that need to be initialized with the port id
* in the instrumented case, but not in the normal case.
*/
-#ifdef ERTS_SMP
ASSERT(prt->drv_ptr && prt->lock);
if (!prt->drv_ptr->lock) {
- char *lock_str = "port_lock";
- erts_mtx_init_locked_x(prt->lock, lock_str, id,
-#ifdef ERTS_ENABLE_LOCK_COUNT
- (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK)
-#else
- 0
-#endif
- );
+ erts_mtx_init_locked(prt->lock, "port_lock", id, ERTS_LOCK_FLAGS_CATEGORY_IO);
}
-#endif
erts_port_task_init_sched(&prt->sched, id);
}
#if !ERTS_PORT_INIT_INSTR_NEED_ID
static ERTS_INLINE void port_init_instr_abort(Port *prt)
{
-#ifdef ERTS_SMP
ASSERT(prt->drv_ptr && prt->lock);
if (!prt->drv_ptr->lock) {
erts_mtx_unlock(prt->lock);
erts_mtx_destroy(prt->lock);
}
-#endif
erts_port_task_fini_sched(&prt->sched);
}
#endif
@@ -303,21 +297,22 @@ static Port *create_port(char *name,
size_t port_size, busy_port_queue_size, size;
erts_aint32_t state = ERTS_PORT_SFLG_CONNECTED;
erts_aint32_t x_pts_flgs = 0;
-#ifdef DEBUG
- /* Make sure the debug flags survives until port is freed */
- state |= ERTS_PORT_SFLG_PORT_DEBUG;
-#endif
-#ifdef ERTS_SMP
+ ErtsRunQueue *runq;
if (!driver_lock) {
/* Align size for mutex following port struct */
port_size = size = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(Port));
size += sizeof(erts_mtx_t);
}
else
-#endif
port_size = size = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(Port));
+#ifdef DEBUG
+ /* Make sure the debug flags survives until port is freed */
+ state |= ERTS_PORT_SFLG_PORT_DEBUG;
+#endif
+
+
busy_port_queue_size
= ((driver->flags & ERL_DRV_FLAG_NO_BUSY_MSGQ)
? 0
@@ -343,7 +338,6 @@ static Port *create_port(char *name,
p += busy_port_queue_size;
}
-#ifdef ERTS_SMP
if (driver_lock) {
prt->lock = driver_lock;
erts_mtx_lock(driver_lock);
@@ -353,13 +347,13 @@ static Port *create_port(char *name,
p += sizeof(erts_mtx_t);
state |= ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK;
}
- erts_smp_atomic_set_nob(&prt->run_queue,
- (erts_aint_t) erts_get_runq_current(NULL));
+ if (erts_get_scheduler_data())
+ runq = erts_get_runq_current(NULL);
+ else
+ runq = ERTS_RUNQ_IX(0);
+ erts_atomic_set_nob(&prt->run_queue, (erts_aint_t) runq);
+
prt->xports = NULL;
-#else
- erts_atomic32_init_nob(&prt->refc, 1);
- prt->cleanup = 0;
-#endif
erts_port_task_pre_init_sched(&prt->sched, busy_port_queue);
@@ -378,20 +372,17 @@ static Port *create_port(char *name,
prt->dist_entry = NULL;
ERTS_PORT_INIT_CONNECTED(prt, pid);
prt->common.u.alive.reg = NULL;
-#ifdef ERTS_SMP
- prt->common.u.alive.ptimer = NULL;
-#else
- sys_memset(&prt->common.u.alive.tm, 0, sizeof(ErlTimer));
-#endif
+ ERTS_PTMR_INIT(prt);
erts_port_task_handle_init(&prt->timeout_task);
- prt->psd = NULL;
+ erts_atomic_init_nob(&prt->psd, (erts_aint_t) NULL);
+ prt->async_open_port = NULL;
prt->drv_data = (SWord) 0;
prt->os_pid = -1;
/* Set default tracing */
- erts_get_default_tracing(&ERTS_TRACE_FLAGS(prt), &ERTS_TRACER_PROC(prt));
+ erts_get_default_port_tracing(&ERTS_TRACE_FLAGS(prt), &ERTS_TRACER(prt));
- ASSERT(((char *) prt) == ((char *) &prt->common));
+ ERTS_CT_ASSERT(offsetof(Port,common) == 0);
#if !ERTS_PORT_INIT_INSTR_NEED_ID
/*
@@ -410,10 +401,8 @@ static Port *create_port(char *name,
#if !ERTS_PORT_INIT_INSTR_NEED_ID
port_init_instr_abort(prt);
#endif
-#ifdef ERTS_SMP
if (driver_lock)
erts_mtx_unlock(driver_lock);
-#endif
if (enop)
*enop = 0;
erts_free(ERTS_ALC_T_PORT, prt);
@@ -426,7 +415,7 @@ static Port *create_port(char *name,
initq(prt);
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
if (erts_port_schedule_all_ops)
x_pts_flgs |= ERTS_PTS_FLG_FORCE_SCHED;
@@ -435,42 +424,30 @@ static Port *create_port(char *name,
x_pts_flgs |= ERTS_PTS_FLG_PARALLELISM;
if (x_pts_flgs)
- erts_smp_atomic32_read_bor_nob(&prt->sched.flags, x_pts_flgs);
+ erts_atomic32_read_bor_nob(&prt->sched.flags, x_pts_flgs);
erts_atomic32_set_relb(&prt->state, state);
return prt;
}
-#ifndef ERTS_SMP
-void
-erts_port_cleanup(Port *prt)
-{
- if (prt->drv_ptr && prt->drv_ptr->handle)
- erts_ddll_dereference_driver(prt->drv_ptr->handle);
- prt->drv_ptr = NULL;
- erts_port_dec_refc(prt);
-}
-#endif
void
erts_port_free(Port *prt)
{
-#if defined(ERTS_SMP) || defined(DEBUG) || defined(ERTS_ENABLE_LOCK_CHECK)
erts_aint32_t state = erts_atomic32_read_nob(&prt->state);
-#endif
ERTS_LC_ASSERT(state & (ERTS_PORT_SFLG_INITIALIZING
| ERTS_PORT_SFLG_FREE));
ASSERT(state & ERTS_PORT_SFLG_PORT_DEBUG);
-#ifdef ERTS_SMP
- ERTS_LC_ASSERT(erts_atomic32_read_nob(&prt->common.refc) == 0);
-#else
- ERTS_LC_ASSERT(erts_atomic32_read_nob(&prt->refc) == 0);
-#endif
+ ERTS_LC_ASSERT(erts_atomic_read_nob(&prt->common.refc.atmc) == 0);
erts_port_task_fini_sched(&prt->sched);
-#ifdef ERTS_SMP
+ if (prt->async_open_port) {
+ erts_free(ERTS_ALC_T_PRTSD, prt->async_open_port);
+ prt->async_open_port = NULL;
+ }
+
ASSERT(prt->lock);
if (state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK)
erts_mtx_destroy(prt->lock);
@@ -487,7 +464,6 @@ erts_port_free(Port *prt)
*/
if (prt->drv_ptr->handle)
erts_ddll_dereference_driver(prt->drv_ptr->handle);
-#endif
erts_free(ERTS_ALC_T_PORT, prt);
}
@@ -498,41 +474,17 @@ erts_port_free(Port *prt)
*/
static void initq(Port* prt)
{
- ErlIOQueue* q = &prt->ioq;
-
ERTS_LC_ASSERT(!prt->port_data_lock);
-
- q->size = 0;
- q->v_head = q->v_tail = q->v_start = q->v_small;
- q->v_end = q->v_small + SMALL_IO_QUEUE;
- q->b_head = q->b_tail = q->b_start = q->b_small;
- q->b_end = q->b_small + SMALL_IO_QUEUE;
+ erts_ioq_init(&prt->ioq, ERTS_ALC_T_IOQ, 1);
}
static void stopq(Port* prt)
{
- ErlIOQueue* q;
- ErlDrvBinary** binp;
if (prt->port_data_lock)
driver_pdl_lock(prt->port_data_lock);
- q = &prt->ioq;
- binp = q->b_head;
-
- if (q->v_start != q->v_small)
- erts_free(ERTS_ALC_T_IOQ, (void *) q->v_start);
-
- while(binp < q->b_tail) {
- if (*binp != NULL)
- driver_free_binary(*binp);
- binp++;
- }
- if (q->b_start != q->b_small)
- erts_free(ERTS_ALC_T_IOQ, (void *) q->b_start);
- q->v_start = q->v_end = q->v_head = q->v_tail = NULL;
- q->b_start = q->b_end = q->b_head = q->b_tail = NULL;
- q->size = 0;
+ erts_ioq_clear(&prt->ioq);
if (prt->port_data_lock) {
driver_pdl_unlock(prt->port_data_lock);
@@ -546,7 +498,7 @@ erts_save_suspend_process_on_port(Port *prt, Process *process)
int saved;
erts_aint32_t flags;
erts_port_task_sched_lock(&prt->sched);
- flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ flags = erts_atomic32_read_nob(&prt->sched.flags);
saved = (flags & ERTS_PTS_FLGS_BUSY) && !(flags & ERTS_PTS_FLG_EXIT);
if (saved)
erts_proclist_store_last(&prt->suspended, erts_proclist_create(process));
@@ -590,16 +542,16 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
erts_mtx_t *driver_lock = NULL;
int cprt_flgs = 0;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
- erts_smp_rwmtx_rlock(&erts_driver_list_lock);
+ erts_rwmtx_rlock(&erts_driver_list_lock);
if (!driver) {
for (driver = driver_list; driver; driver = driver->next) {
if (sys_strcmp(driver->name, name) == 0)
break;
}
if (!driver) {
- erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ erts_rwmtx_runlock(&erts_driver_list_lock);
ERTS_OPEN_DRIVER_RET(NULL, -3, BADARG);
}
}
@@ -644,19 +596,17 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
}
if (driver == NULL || (driver != &spawn_driver && opts->exit_status)) {
- erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ erts_rwmtx_runlock(&erts_driver_list_lock);
ERTS_OPEN_DRIVER_RET(NULL, -3, BADARG);
}
-#ifdef ERTS_SMP
driver_lock = driver->lock;
-#endif
if (driver->handle != NULL) {
erts_ddll_increment_port_count(driver->handle);
erts_ddll_reference_driver(driver->handle);
}
- erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ erts_rwmtx_runlock(&erts_driver_list_lock);
/*
* We'll set up the port before calling the start function,
@@ -669,9 +619,9 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
port = create_port(name, driver, driver_lock, cprt_flgs, pid, &port_errno);
if (!port) {
if (driver->handle) {
- erts_smp_rwmtx_rlock(&erts_driver_list_lock);
+ erts_rwmtx_rlock(&erts_driver_list_lock);
erts_ddll_decrement_port_count(driver->handle);
- erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ erts_rwmtx_runlock(&erts_driver_list_lock);
erts_ddll_dereference_driver(driver->handle);
}
if (port_errno)
@@ -691,8 +641,9 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
error_number = error_type = 0;
if (driver->start) {
+ ERTS_MSACC_PUSH_STATE_M();
if (IS_TRACED_FL(port, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports_where(port, am_in, am_start);
+ trace_sched_ports_where(port, am_in, am_open);
}
port->caller = pid;
#ifdef USE_VM_PROBES
@@ -701,6 +652,19 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
DTRACE3(driver_start, process_str, driver->name, port_str);
}
#endif
+
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_PORT);
+
+#ifdef USE_LTTNG_VM_TRACEPOINTS
+ if (LTTNG_ENABLED(driver_start)) {
+ lttng_decl_portbuf(port_str);
+ lttng_decl_procbuf(proc_str);
+ lttng_pid_to_str(pid, proc_str);
+ lttng_port_to_str(port, port_str);
+ LTTNG3(driver_start, proc_str, driver->name, port_str);
+ }
+#endif
+
fpe_was_unmasked = erts_block_fpe();
drv_data = (*driver->start)(ERTS_Port2ErlDrvPort(port), name, opts);
if (((SWord) drv_data) == -1)
@@ -720,26 +684,21 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
}
erts_unblock_fpe(fpe_was_unmasked);
+ ERTS_MSACC_POP_STATE_M();
port->caller = NIL;
if (IS_TRACED_FL(port, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports_where(port, am_out, am_start);
+ trace_sched_ports_where(port, am_out, am_open);
}
-#ifdef ERTS_SMP
if (port->xports)
erts_port_handle_xports(port);
ASSERT(!port->xports);
-#endif
}
if (error_type) {
/*
* Must clean up the port.
*/
-#ifdef ERTS_SMP
- erts_cancel_smp_ptimer(port->common.u.alive.ptimer);
-#else
- erts_cancel_timer(&(port->common.u.alive.tm));
-#endif
+ erts_cancel_port_timer(port);
stopq(port);
if (port->linebuf != NULL) {
erts_free(ERTS_ALC_T_LINEBUF,
@@ -747,9 +706,9 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
port->linebuf = NULL;
}
if (driver->handle != NULL) {
- erts_smp_rwmtx_rlock(&erts_driver_list_lock);
+ erts_rwmtx_rlock(&erts_driver_list_lock);
erts_ddll_decrement_port_count(driver->handle);
- erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ erts_rwmtx_runlock(&erts_driver_list_lock);
}
kill_port(port);
erts_port_release(port);
@@ -761,7 +720,6 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
#undef ERTS_OPEN_DRIVER_RET
}
-#ifdef ERTS_SMP
struct ErtsXPortsList_ {
ErtsXPortsList *next;
@@ -770,7 +728,6 @@ struct ErtsXPortsList_ {
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(xports_list, ErtsXPortsList, 50, ERTS_ALC_T_XPORTS_LIST)
-#endif
/*
* Driver function to create new instances of a driver
@@ -790,7 +747,7 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */
Process *rp;
erts_mtx_t *driver_lock = NULL;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
/* Need to be called from a scheduler thread */
if (!erts_get_scheduler_id())
@@ -804,12 +761,12 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */
if (!rp)
return ERTS_INVALID_ERL_DRV_PORT;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(creator_port));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(creator_port));
driver = creator_port->drv_ptr;
- erts_smp_rwmtx_rlock(&erts_driver_list_lock);
+ erts_rwmtx_rlock(&erts_driver_list_lock);
if (!erts_ddll_driver_ok(driver->handle)) {
- erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ erts_rwmtx_runlock(&erts_driver_list_lock);
return ERTS_INVALID_ERL_DRV_PORT;
}
@@ -818,35 +775,33 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */
erts_ddll_reference_referenced_driver(driver->handle);
}
-#ifdef ERTS_SMP
driver_lock = driver->lock;
-#endif
- erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ erts_rwmtx_runlock(&erts_driver_list_lock);
/* Inherit parallelism flag from parent */
if (ERTS_PTS_FLG_PARALLELISM &
- erts_smp_atomic32_read_nob(&creator_port->sched.flags))
+ erts_atomic32_read_nob(&creator_port->sched.flags))
cprt_flgs |= ERTS_CREATE_PORT_FLAG_PARALLELISM;
port = create_port(name, driver, driver_lock, cprt_flgs, pid, NULL);
if (!port) {
if (driver->handle) {
- erts_smp_rwmtx_rlock(&erts_driver_list_lock);
+ erts_rwmtx_rlock(&erts_driver_list_lock);
erts_ddll_decrement_port_count(driver->handle);
- erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ erts_rwmtx_runlock(&erts_driver_list_lock);
erts_ddll_dereference_driver(driver->handle);
}
return ERTS_INVALID_ERL_DRV_PORT;
}
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(port));
- erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK);
+ erts_proc_lock(rp, ERTS_PROC_LOCK_LINK);
if (ERTS_PROC_IS_EXITING(rp)) {
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
if (driver->handle) {
- erts_smp_rwmtx_rlock(&erts_driver_list_lock);
+ erts_rwmtx_rlock(&erts_driver_list_lock);
erts_ddll_decrement_port_count(driver->handle);
- erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ erts_rwmtx_runlock(&erts_driver_list_lock);
}
kill_port(port);
erts_port_release(port);
@@ -855,23 +810,20 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */
erts_add_link(&ERTS_P_LINKS(port), LINK_PID, pid);
erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, port->common.id);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
-#ifdef ERTS_SMP
if (!driver_lock) {
ErtsXPortsList *xplp = xports_list_alloc();
xplp->port = port;
xplp->next = creator_port->xports;
creator_port->xports = xplp;
}
-#endif
port->drv_data = (UWord) drv_data;
return ERTS_Port2ErlDrvPort(port);
}
-#ifdef ERTS_SMP
int erts_port_handle_xports(Port *prt)
{
int reds = 0;
@@ -900,293 +852,6 @@ int erts_port_handle_xports(Port *prt)
prt->xports = NULL;
return reds;
}
-#endif
-
-/* Fills a possibly deep list of chars and binaries into vec
-** Small characters are first stored in the buffer buf of length ln
-** binaries found are copied and linked into msoh
-** Return vector length on succsess,
-** -1 on overflow
-** -2 on type error
-*/
-
-#define SET_VEC(iov, bv, bin, ptr, len, vlen) do { \
- (iov)->iov_base = (ptr); \
- (iov)->iov_len = (len); \
- if (sizeof((iov)->iov_len) < sizeof(len) \
- /* Check if (len) overflowed (iov)->iov_len */ \
- && (iov)->iov_len != (len)) { \
- goto L_overflow; \
- } \
- *(bv)++ = (bin); \
- (iov)++; \
- (vlen)++; \
-} while(0)
-
-static int
-io_list_to_vec(Eterm obj, /* io-list */
- SysIOVec* iov, /* io vector */
- ErlDrvBinary** binv, /* binary reference vector */
- ErlDrvBinary* cbin, /* binary to store characters */
- ErlDrvSizeT bin_limit) /* small binaries limit */
-{
- DECLARE_ESTACK(s);
- Eterm* objp;
- char *buf = cbin->orig_bytes;
- Uint len = cbin->orig_size;
- Uint csize = 0;
- int vlen = 0;
- char* cptr = buf;
-
- goto L_jump_start; /* avoid push */
-
- while (!ESTACK_ISEMPTY(s)) {
- obj = ESTACK_POP(s);
- L_jump_start:
- if (is_list(obj)) {
- L_iter_list:
- objp = list_val(obj);
- obj = CAR(objp);
- if (is_byte(obj)) {
- if (len == 0)
- goto L_overflow;
- *buf++ = unsigned_val(obj);
- csize++;
- len--;
- } else if (is_binary(obj)) {
- ESTACK_PUSH(s, CDR(objp));
- goto handle_binary;
- } else if (is_list(obj)) {
- ESTACK_PUSH(s, CDR(objp));
- goto L_iter_list; /* on head */
- } else if (!is_nil(obj)) {
- goto L_type_error;
- }
- obj = CDR(objp);
- if (is_list(obj))
- goto L_iter_list; /* on tail */
- else if (is_binary(obj)) {
- goto handle_binary;
- } else if (!is_nil(obj)) {
- goto L_type_error;
- }
- } else if (is_binary(obj)) {
- Eterm real_bin;
- Uint offset;
- Eterm* bptr;
- ErlDrvSizeT size;
- int bitoffs;
- int bitsize;
-
- handle_binary:
- size = binary_size(obj);
- ERTS_GET_REAL_BIN(obj, real_bin, offset, bitoffs, bitsize);
- ASSERT(bitsize == 0);
- bptr = binary_val(real_bin);
- if (*bptr == HEADER_PROC_BIN) {
- ProcBin* pb = (ProcBin *) bptr;
- if (bitoffs != 0) {
- if (len < size) {
- goto L_overflow;
- }
- erts_copy_bits(pb->bytes+offset, bitoffs, 1,
- (byte *) buf, 0, 1, size*8);
- csize += size;
- buf += size;
- len -= size;
- } else if (bin_limit && size < bin_limit) {
- if (len < size) {
- goto L_overflow;
- }
- sys_memcpy(buf, pb->bytes+offset, size);
- csize += size;
- buf += size;
- len -= size;
- } else {
- if (csize != 0) {
- SET_VEC(iov, binv, cbin, cptr, csize, vlen);
- cptr = buf;
- csize = 0;
- }
- if (pb->flags) {
- erts_emasculate_writable_binary(pb);
- }
- SET_VEC(iov, binv, Binary2ErlDrvBinary(pb->val),
- pb->bytes+offset, size, vlen);
- }
- } else {
- ErlHeapBin* hb = (ErlHeapBin *) bptr;
- if (len < size) {
- goto L_overflow;
- }
- copy_binary_to_buffer(buf, 0,
- ((byte *) hb->data)+offset, bitoffs,
- 8*size);
- csize += size;
- buf += size;
- len -= size;
- }
- } else if (!is_nil(obj)) {
- goto L_type_error;
- }
- }
-
- if (csize != 0) {
- SET_VEC(iov, binv, cbin, cptr, csize, vlen);
- }
-
- DESTROY_ESTACK(s);
- return vlen;
-
- L_type_error:
- DESTROY_ESTACK(s);
- return -2;
-
- L_overflow:
- DESTROY_ESTACK(s);
- return -1;
-}
-
-#define IO_LIST_VEC_COUNT(obj) \
-do { \
- Uint _size = binary_size(obj); \
- Eterm _real; \
- ERTS_DECLARE_DUMMY(Uint _offset); \
- int _bitoffs; \
- int _bitsize; \
- ERTS_GET_REAL_BIN(obj, _real, _offset, _bitoffs, _bitsize); \
- if (_bitsize != 0) goto L_type_error; \
- if (thing_subtag(*binary_val(_real)) == REFC_BINARY_SUBTAG && \
- _bitoffs == 0) { \
- b_size += _size; \
- if (b_size < _size) goto L_overflow_error; \
- in_clist = 0; \
- v_size++; \
- if (_size >= ERL_SMALL_IO_BIN_LIMIT) { \
- p_in_clist = 0; \
- p_v_size++; \
- } else { \
- p_c_size += _size; \
- if (!p_in_clist) { \
- p_in_clist = 1; \
- p_v_size++; \
- } \
- } \
- } else { \
- c_size += _size; \
- if (c_size < _size) goto L_overflow_error; \
- if (!in_clist) { \
- in_clist = 1; \
- v_size++; \
- } \
- p_c_size += _size; \
- if (!p_in_clist) { \
- p_in_clist = 1; \
- p_v_size++; \
- } \
- } \
-} while (0)
-
-
-/*
- * Returns 0 if successful and a non-zero value otherwise.
- *
- * Return values through pointers:
- * *vsize - SysIOVec size needed for a writev
- * *csize - Number of bytes not in binary (in the common binary)
- * *pvsize - SysIOVec size needed if packing small binaries
- * *pcsize - Number of bytes in the common binary if packing
- * *total_size - Total size of iolist in bytes
- */
-
-static int
-io_list_vec_len(Eterm obj, int* vsize, Uint* csize,
- Uint* pvsize, Uint* pcsize,
- ErlDrvSizeT* total_size)
-{
- DECLARE_ESTACK(s);
- Eterm* objp;
- Uint v_size = 0;
- Uint c_size = 0;
- Uint b_size = 0;
- Uint in_clist = 0;
- Uint p_v_size = 0;
- Uint p_c_size = 0;
- Uint p_in_clist = 0;
- Uint total; /* Uint due to halfword emulator */
-
- goto L_jump_start; /* avoid a push */
-
- while (!ESTACK_ISEMPTY(s)) {
- obj = ESTACK_POP(s);
- L_jump_start:
- if (is_list(obj)) {
- L_iter_list:
- objp = list_val(obj);
- obj = CAR(objp);
-
- if (is_byte(obj)) {
- c_size++;
- if (c_size == 0) {
- goto L_overflow_error;
- }
- if (!in_clist) {
- in_clist = 1;
- v_size++;
- }
- p_c_size++;
- if (!p_in_clist) {
- p_in_clist = 1;
- p_v_size++;
- }
- }
- else if (is_binary(obj)) {
- IO_LIST_VEC_COUNT(obj);
- }
- else if (is_list(obj)) {
- ESTACK_PUSH(s, CDR(objp));
- goto L_iter_list; /* on head */
- }
- else if (!is_nil(obj)) {
- goto L_type_error;
- }
-
- obj = CDR(objp);
- if (is_list(obj))
- goto L_iter_list; /* on tail */
- else if (is_binary(obj)) { /* binary tail is OK */
- IO_LIST_VEC_COUNT(obj);
- }
- else if (!is_nil(obj)) {
- goto L_type_error;
- }
- }
- else if (is_binary(obj)) {
- IO_LIST_VEC_COUNT(obj);
- }
- else if (!is_nil(obj)) {
- goto L_type_error;
- }
- }
-
- total = c_size + b_size;
- if (total < c_size) {
- goto L_overflow_error;
- }
- *total_size = (ErlDrvSizeT) total;
-
- DESTROY_ESTACK(s);
- *vsize = v_size;
- *csize = c_size;
- *pvsize = p_v_size;
- *pcsize = p_c_size;
- return 0;
-
- L_type_error:
- L_overflow_error:
- DESTROY_ESTACK(s);
- return 1;
-}
typedef enum {
ERTS_TRY_IMM_DRV_CALL_OK,
@@ -1213,14 +878,15 @@ typedef struct {
/*
* Try doing an immediate driver callback call from a process. If
* this fail, the operation should be scheduled in the normal case...
- *
+ * Returns: ok to do the call, or error (lock busy, does not exist, etc)
*/
static ERTS_INLINE ErtsTryImmDrvCallResult
try_imm_drv_call(ErtsTryImmDrvCallState *sp)
{
+ unsigned int prof_runnable_ports;
ErtsTryImmDrvCallResult res;
int reds_left_in;
- erts_aint32_t invalid_state, invalid_sched_flags;
+ erts_aint32_t act, exp, invalid_state, invalid_sched_flags;
Port *prt = sp->port;
Process *c_p = sp->c_p;
@@ -1232,12 +898,12 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp)
invalid_sched_flags |= ERTS_PTS_FLG_PARALLELISM;
if (sp->pre_chk_sched_flags) {
- sp->sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ sp->sched_flags = erts_atomic32_read_nob(&prt->sched.flags);
if (sp->sched_flags & invalid_sched_flags)
return ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS;
}
- if (erts_smp_port_trylock(prt) == EBUSY)
+ if (erts_port_trylock(prt) == EBUSY)
return ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK;
invalid_state = sp->state;
@@ -1247,37 +913,61 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp)
goto locked_fail;
}
- sp->sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
- if (sp->sched_flags & invalid_sched_flags) {
- res = ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS;
- goto locked_fail;
- }
+ prof_runnable_ports = erts_system_profile_flags.runnable_ports;
+ if (prof_runnable_ports)
+ erts_port_task_sched_lock(&prt->sched);
+ act = erts_atomic32_read_nob(&prt->sched.flags);
+
+ do {
+ erts_aint32_t new;
+
+ if (act & invalid_sched_flags) {
+ res = ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS;
+ sp->sched_flags = act;
+ goto locked_fail;
+ }
+ exp = act;
+ new = act | ERTS_PTS_FLG_EXEC_IMM;
+ act = erts_atomic32_cmpxchg_mb(&prt->sched.flags, new, exp);
+ } while (act != exp);
+
+ sp->sched_flags = act;
if (!c_p)
reds_left_in = CONTEXT_REDS/10;
else {
if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS))
- trace_virtual_sched(c_p, am_out);
+ trace_sched(c_p, ERTS_PROC_LOCK_MAIN, am_out);
+ /*
+ * No status lock held while sending runnable
+ * proc trace messages. It is however not needed
+ * in this case, since only this thread can send
+ * such messages for this process until the process
+ * has been scheduled out.
+ */
if (erts_system_profile_flags.runnable_procs
&& erts_system_profile_flags.exclusive)
profile_runnable_proc(c_p, am_inactive);
reds_left_in = ERTS_BIF_REDS_LEFT(c_p);
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
}
ASSERT(0 <= reds_left_in && reds_left_in <= CONTEXT_REDS);
sp->reds_left_in = reds_left_in;
prt->reds = CONTEXT_REDS - reds_left_in;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
- if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
- trace_sched_ports_where(prt, am_in, sp->port_op);
- if (erts_system_profile_flags.runnable_ports
- && !erts_port_is_scheduled(prt))
- profile_runnable_port(prt, am_active);
+ if (prof_runnable_ports | IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) {
+ if (prof_runnable_ports && !(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
+ profile_runnable_port(prt, am_active);
+ if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
+ trace_sched_ports_where(prt, am_in, sp->port_op);
+ if (prof_runnable_ports)
+ erts_port_task_sched_unlock(&prt->sched);
+ }
sp->fpe_was_unmasked = erts_block_fpe();
@@ -1294,22 +984,36 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp)
int reds;
Port *prt = sp->port;
Process *c_p = sp->c_p;
+ erts_aint32_t act;
+ unsigned int prof_runnable_ports;
reds = prt->reds;
reds += erts_port_driver_callback_epilogue(prt, NULL);
erts_unblock_fpe(sp->fpe_was_unmasked);
- if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
- trace_sched_ports_where(prt, am_out, sp->port_op);
- if (erts_system_profile_flags.runnable_ports
- && !erts_port_is_scheduled(prt))
- profile_runnable_port(prt, am_inactive);
+ prof_runnable_ports = erts_system_profile_flags.runnable_ports;
+ if (prof_runnable_ports)
+ erts_port_task_sched_lock(&prt->sched);
+
+ act = erts_atomic32_read_band_mb(&prt->sched.flags,
+ ~ERTS_PTS_FLG_EXEC_IMM);
+ ERTS_LC_ASSERT(act & ERTS_PTS_FLG_EXEC_IMM);
+
+ if (prof_runnable_ports | IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) {
+ if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
+ trace_sched_ports_where(prt, am_out, sp->port_op);
+ if (prof_runnable_ports) {
+ if (!(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
+ profile_runnable_port(prt, am_inactive);
+ erts_port_task_sched_unlock(&prt->sched);
+ }
+ }
erts_port_release(prt);
if (c_p) {
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
if (reds != (CONTEXT_REDS - sp->reds_left_in)) {
int bump_reds = reds - (CONTEXT_REDS - sp->reds_left_in);
@@ -1318,7 +1022,14 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp)
}
if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS))
- trace_virtual_sched(c_p, am_in);
+ trace_sched(c_p, ERTS_PROC_LOCK_MAIN, am_in);
+ /*
+ * No status lock held while sending runnable
+ * proc trace messages. It is however not needed
+ * in this case, since only this thread can send
+ * such messages for this process until the process
+ * has been scheduled out.
+ */
if (erts_system_profile_flags.runnable_procs
&& erts_system_profile_flags.exclusive)
profile_runnable_proc(c_p, am_active);
@@ -1354,56 +1065,39 @@ finalize_force_imm_drv_call(ErtsTryImmDrvCallState *sp)
erts_unblock_fpe(sp->fpe_was_unmasked);
}
-#define ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE (REF_THING_SIZE + 3)
+#define ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE (ERTS_REF_THING_SIZE + 3)
static ERTS_INLINE void
queue_port_sched_op_reply(Process *rp,
- ErtsProcLocks *rp_locksp,
- Eterm *hp_start,
- Eterm *hp,
- Uint h_size,
- ErlHeapFragment* bp,
+ ErtsProcLocks rp_locks,
+ ErtsHeapFactory* factory,
Uint32 *ref_num,
- Eterm msg)
+ Eterm msg,
+ Port* prt)
{
- Eterm ref = make_internal_ref(hp);
+ Eterm* hp = erts_produce_heap(factory, ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE, 0);
+ Eterm ref;
+
+ ref= make_internal_ref(hp);
write_ref_thing(hp, ref_num[0], ref_num[1], ref_num[2]);
- hp += REF_THING_SIZE;
+ hp += ERTS_REF_THING_SIZE;
msg = TUPLE2(hp, ref, msg);
- hp += 3;
- if (!bp) {
- HRelease(rp, hp_start + h_size, hp);
- }
- else {
- Uint used_h_size = hp - hp_start;
- ASSERT(h_size >= used_h_size);
- if (h_size > used_h_size)
- bp = erts_resize_message_buffer(bp, used_h_size, &msg, 1);
- }
+ erts_factory_trim_and_close(factory, &msg, 1);
- erts_queue_message(rp,
- rp_locksp,
- bp,
- msg,
- NIL
-#ifdef USE_VM_PROBES
- , NIL
-#endif
- );
+ erts_queue_message(rp, rp_locks, factory->message, msg,
+ prt ? prt->common.id : am_undefined);
}
static void
-port_sched_op_reply(Eterm to, Uint32 *ref_num, Eterm msg)
+port_sched_op_reply(Eterm to, Uint32 *ref_num, Eterm msg, Port* prt)
{
Process *rp = erts_proc_lookup_raw(to);
if (rp) {
- ErlOffHeap *ohp;
- ErlHeapFragment* bp;
+ ErtsHeapFactory factory;
Eterm msg_copy;
Uint hsz, msg_sz;
- Eterm *hp, *hp_start;
ErtsProcLocks rp_locks = 0;
hsz = ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE;
@@ -1414,32 +1108,28 @@ port_sched_op_reply(Eterm to, Uint32 *ref_num, Eterm msg)
hsz += msg_sz;
}
- hp_start = hp = erts_alloc_message_heap(hsz,
- &bp,
- &ohp,
- rp,
- &rp_locks);
- if (is_immed(msg))
- msg_copy = msg;
- else
- msg_copy = copy_struct(msg, msg_sz, &hp, ohp);
+ (void) erts_factory_message_create(&factory, rp,
+ &rp_locks, hsz);
+ msg_copy = (is_immed(msg)
+ ? msg
+ : copy_struct(msg, msg_sz,
+ &factory.hp,
+ factory.off_heap));
queue_port_sched_op_reply(rp,
- &rp_locks,
- hp_start,
- hp,
- hsz,
- bp,
+ rp_locks,
+ &factory,
ref_num,
- msg_copy);
+ msg_copy,
+ prt);
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
}
}
-ErtsPortOpResult
+static ErtsPortOpResult
erts_schedule_proc2port_signal(Process *c_p,
Port *prt,
Eterm caller,
@@ -1452,7 +1142,7 @@ erts_schedule_proc2port_signal(Process *c_p,
int sched_res;
if (!refp) {
if (c_p)
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
}
else {
ASSERT(c_p);
@@ -1473,22 +1163,21 @@ erts_schedule_proc2port_signal(Process *c_p,
* otherwise, next receive will *not* work
* as expected!
*/
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
if (ERTS_PROC_PENDING_EXIT(c_p)) {
/* need to exit caller instead */
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
KILL_CATCHES(c_p);
c_p->freason = EXC_EXIT;
return ERTS_PORT_OP_CALLER_EXIT;
}
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
+ ERTS_MSGQ_MV_INQ2PRIVQ(c_p);
c_p->msg.save = c_p->msg.last;
- erts_smp_proc_unlock(c_p,
- (ERTS_PROC_LOCK_MAIN
- | ERTS_PROC_LOCKS_MSG_RECEIVE));
+ erts_proc_unlock(c_p, (ERTS_PROC_LOCKS_MSG_RECEIVE
+ | ERTS_PROC_LOCK_MAIN));
}
@@ -1503,31 +1192,60 @@ erts_schedule_proc2port_signal(Process *c_p,
task_flags);
if (c_p)
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
if (sched_res != 0) {
- if (refp)
+ if (refp) {
+ /*
+ * We need to restore the message queue save
+ * pointer to the beginning of the message queue
+ * since the caller now wont wait for a message
+ * containing the reference created above...
+ */
+ ASSERT(c_p);
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ JOIN_MESSAGE(c_p);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
*refp = NIL;
+ }
return ERTS_PORT_OP_DROPPED;
}
return ERTS_PORT_OP_SCHEDULED;
}
-static ERTS_INLINE void
-send_badsig(Port *prt)
+static int
+erts_schedule_port2port_signal(Eterm port_num, ErtsProc2PortSigData *sigdp,
+ int task_flags,
+ ErtsProc2PortSigCallback callback)
{
+ Port *prt = erts_port_lookup_raw(port_num);
+
+ if (!prt)
+ return -1;
+
+ sigdp->caller = ERTS_INVALID_PID;
+
+ return erts_port_task_schedule(prt->common.id,
+ NULL,
+ ERTS_PORT_TASK_PROC_SIG,
+ sigdp,
+ callback,
+ task_flags);
+}
+
+static ERTS_INLINE void
+send_badsig(Port *prt) {
ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
Process* rp;
Eterm connected = ERTS_PORT_GET_CONNECTED(prt);
-
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
ERTS_LC_ASSERT(erts_get_scheduler_id());
ASSERT(is_internal_pid(connected));
rp = erts_proc_lookup_raw(connected);
if (rp) {
- erts_smp_proc_lock(rp, rp_locks);
+ erts_proc_lock(rp, rp_locks);
if (!ERTS_PROC_IS_EXITING(rp))
(void) erts_send_exit_signal(NULL,
prt->common.id,
@@ -1538,16 +1256,14 @@ send_badsig(Port *prt)
NULL,
0);
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
- }
-}
+ erts_proc_unlock(rp, rp_locks);
+ } /* exit sent */
+} /* send_badsig */
static void
-badsig_received(int bang_op,
- Port *prt,
+badsig_received(int bang_op, Port *prt,
erts_aint32_t state,
- int bad_output_value)
-{
+ int bad_output_value) {
/*
* if (bang_op)
* we are part of a "Prt ! Something" operation
@@ -1563,30 +1279,28 @@ badsig_received(int bang_op,
}
if (bang_op)
send_badsig(prt);
- }
-}
+ } /* not invalid */
+} /* behaved accordingly */
static int
-port_badsig(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigdp)
-{
+port_badsig(Port *prt, erts_aint32_t state, int op,
+ ErtsProc2PortSigData *sigdp) {
if (op == ERTS_PROC2PORT_SIG_EXEC)
badsig_received(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_BANG_OP,
prt,
state,
sigdp->flags & ERTS_P2P_SIG_DATA_FLG_BAD_OUTPUT);
if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
- port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg);
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg, prt);
return ERTS_PORT_REDS_BADSIG;
-}
-
-
-/*
- * bad_port_signal() will
+} /* port_badsig */
+/* bad_port_signal() will
* - preserve signal order of signals.
* - send a 'badsig' exit signal to connected process if 'from' is an
* internal pid and the port is alive when the bad signal reaches
* it.
*/
+
static ErtsPortOpResult
bad_port_signal(Process *c_p,
int flags,
@@ -1661,24 +1375,44 @@ call_driver_outputv(int bang_op,
if (bang_op && from != ERTS_PORT_GET_CONNECTED(prt))
send_badsig(prt);
else {
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
ErlDrvSizeT size = evp->size;
+ ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_PORT);
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)
|| ERTS_IS_CRASH_DUMPING);
+
+ if (IS_TRACED_FL(prt, F_TRACE_RECEIVE))
+ trace_port_receive(prt, caller, am_commandv, evp);
+
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(driver_outputv)) {
DTRACE_FORMAT_COMMON_PID_AND_PORT(caller, prt);
DTRACE4(driver_outputv, process_str, port_str, prt->name, size);
}
#endif
+#ifdef USE_LTTNG_VM_TRACEPOINTS
+ if (LTTNG_ENABLED(driver_outputv)) {
+ lttng_decl_portbuf(port_str);
+ lttng_decl_procbuf(proc_str);
+ lttng_pid_to_str(caller, proc_str);
+ lttng_port_to_str(prt, port_str);
+ LTTNG4(driver_outputv, proc_str, port_str, prt->name, size);
+ }
+#endif
prt->caller = caller;
(*drv->outputv)((ErlDrvData) prt->drv_data, evp);
prt->caller = NIL;
prt->bytes_out += size;
- erts_smp_atomic_add_nob(&erts_bytes_out, size);
+ if (esdp)
+ esdp->io.out += (Uint64) size;
+ else
+ erts_atomic64_add_nob(&bytes_out, (erts_aint64_t) size);
+
+ ERTS_MSACC_POP_STATE_M();
}
}
@@ -1688,11 +1422,9 @@ cleanup_scheduled_outputv(ErlIOVec *ev, ErlDrvBinary *cbinp)
int i;
/* Need to free all binaries */
for (i = 1; i < ev->vsize; i++)
- if (ev->binv[i])
- driver_free_binary(ev->binv[i]);
+ driver_free_binary(ev->binv[i]);
if (cbinp)
driver_free_binary(cbinp);
- erts_free(ERTS_ALC_T_DRV_CMD_DATA, ev);
}
static int
@@ -1704,7 +1436,7 @@ port_sig_outputv(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *s
case ERTS_PROC2PORT_SIG_EXEC:
/* Execution of a scheduled outputv() call */
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP)
reply = am_badarg;
@@ -1727,7 +1459,7 @@ port_sig_outputv(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *s
}
if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
- port_sched_op_reply(sigdp->caller, sigdp->ref, reply);
+ port_sched_op_reply(sigdp->caller, sigdp->ref, reply, prt);
cleanup_scheduled_outputv(sigdp->u.outputv.evp,
sigdp->u.outputv.cbinp);
@@ -1758,8 +1490,9 @@ call_driver_output(int bang_op,
if (bang_op && from != ERTS_PORT_GET_CONNECTED(prt))
send_badsig(prt);
else {
-
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_PORT);
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)
|| ERTS_IS_CRASH_DUMPING);
#ifdef USE_VM_PROBES
@@ -1768,13 +1501,30 @@ call_driver_output(int bang_op,
DTRACE4(driver_output, process_str, port_str, prt->name, size);
}
#endif
+#ifdef USE_LTTNG_VM_TRACEPOINTS
+ if (LTTNG_ENABLED(driver_output)) {
+ lttng_decl_portbuf(port_str);
+ lttng_decl_procbuf(proc_str);
+ lttng_pid_to_str(caller, proc_str);
+ lttng_port_to_str(prt, port_str);
+ LTTNG4(driver_output, proc_str, port_str, prt->name, size);
+ }
+#endif
+
+ if (IS_TRACED_FL(prt, F_TRACE_RECEIVE))
+ trace_port_receive(prt, caller, am_command, bufp, size);
prt->caller = caller;
(*drv->output)((ErlDrvData) prt->drv_data, bufp, size);
prt->caller = NIL;
prt->bytes_out += size;
- erts_smp_atomic_add_nob(&erts_bytes_out, size);
+ if (esdp)
+ esdp->io.out += (Uint64) size;
+ else
+ erts_atomic64_add_nob(&bytes_out, (erts_aint64_t) size);
+
+ ERTS_MSACC_POP_STATE_M();
}
}
@@ -1793,7 +1543,7 @@ port_sig_output(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *si
case ERTS_PROC2PORT_SIG_EXEC:
/* Execution of a scheduled output() call */
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP)
reply = am_badarg;
@@ -1817,13 +1567,157 @@ port_sig_output(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *si
}
if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
- port_sched_op_reply(sigdp->caller, sigdp->ref, reply);
+ port_sched_op_reply(sigdp->caller, sigdp->ref, reply, prt);
cleanup_scheduled_output(sigdp->u.output.bufp);
return ERTS_PORT_REDS_CMD_OUTPUT;
}
+
+/*
+ * This erts_port_output will always create a port task.
+ * The call is treated as a port_command call, i.e. no
+ * badsig i generated if the input in invalid. However
+ * an error_logger message is generated.
+ */
+int
+erts_port_output_async(Port *prt, Eterm from, Eterm list)
+{
+
+ ErtsProc2PortSigData *sigdp;
+ erts_driver_t *drv = prt->drv_ptr;
+ size_t size;
+ int task_flags;
+ ErtsProc2PortSigCallback port_sig_callback;
+ ErtsIOQBinary *cbin = NULL;
+ ErtsIOVec *evp = NULL;
+ char *buf = NULL;
+ ErtsPortTaskHandle *ns_pthp;
+
+ if (drv->outputv) {
+ SysIOVec* ivp;
+ ErtsIOQBinary** bvp;
+ int vsize;
+ Uint csize;
+ Uint pvsize;
+ Uint pcsize;
+ size_t iov_offset, binv_offset, alloc_size;
+ Uint blimit = 0;
+ char *ptr;
+ int i;
+
+ if (erts_ioq_iodata_vec_len(list, &vsize, &csize, &pvsize, &pcsize,
+ &size, ERL_SMALL_IO_BIN_LIMIT))
+ goto bad_value;
+
+ /* To pack or not to pack (small binaries) ...? */
+ if (vsize >= SMALL_WRITE_VEC) {
+ /* Do pack */
+ vsize = pvsize + 1;
+ csize = pcsize;
+ blimit = ERL_SMALL_IO_BIN_LIMIT;
+ }
+ if (csize) {
+ cbin = (ErtsIOQBinary *)driver_alloc_binary(csize);
+ if (!cbin)
+ erts_alloc_enomem(ERTS_ALC_T_DRV_BINARY, ERTS_SIZEOF_Binary(csize));
+ }
+
+ iov_offset = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(ErlIOVec));
+ binv_offset = iov_offset;
+ binv_offset += ERTS_ALC_DATA_ALIGN_SIZE((vsize+1)*sizeof(SysIOVec));
+ alloc_size = binv_offset;
+ alloc_size += (vsize+1)*sizeof(ErtsIOQBinary *);
+
+ sigdp = erts_port_task_alloc_p2p_sig_data_extra(alloc_size, (void**)&ptr);
+
+ evp = (ErtsIOVec *) ptr;
+ ivp = evp->driver.iov = (SysIOVec *) (ptr + iov_offset);
+ bvp = evp->common.binv = (ErtsIOQBinary **) (ptr + binv_offset);
+
+ ivp[0].iov_base = NULL;
+ ivp[0].iov_len = 0;
+ bvp[0] = NULL;
+
+ evp->driver.vsize = erts_ioq_iodata_to_vec(list, ivp+1, bvp+1, cbin,
+ blimit, 1);
+ if (evp->driver.vsize < 0) {
+ erts_free(ERTS_ALC_T_DRV_CMD_DATA, evp);
+ driver_free_binary(&cbin->driver);
+ goto bad_value;
+ }
+#if 0
+ /* This assertion may say something useful, but it can
+ be falsified during the emulator test suites. */
+ ASSERT(evp->vsize == vsize);
+#endif
+ evp->driver.vsize++;
+ evp->driver.size = size; /* total size */
+
+ /* Need to increase refc on all binaries */
+ for (i = 1; i < evp->driver.vsize; i++)
+ if (bvp[i])
+ driver_binary_inc_refc(&bvp[i]->driver);
+
+ sigdp->flags = ERTS_P2P_SIG_TYPE_OUTPUTV;
+ sigdp->u.outputv.from = from;
+ sigdp->u.outputv.evp = &evp->driver;
+ sigdp->u.outputv.cbinp = &cbin->driver;
+ port_sig_callback = port_sig_outputv;
+ } else {
+ ErlDrvSizeT ERTS_DECLARE_DUMMY(r);
+
+ /*
+ * Apperently there exist code that write 1 byte to
+ * much in buffer. Where it resides I don't know, but
+ * we can live with one byte extra allocated...
+ */
+
+ if (erts_iolist_size(list, &size))
+ goto bad_value;
+
+ buf = erts_alloc(ERTS_ALC_T_DRV_CMD_DATA, size + 1);
+
+ r = erts_iolist_to_buf(list, buf, size);
+ ASSERT(ERTS_IOLIST_TO_BUF_SUCCEEDED(r));
+
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_OUTPUT;
+ sigdp->u.output.from = from;
+ sigdp->u.output.bufp = buf;
+ sigdp->u.output.size = size;
+ port_sig_callback = port_sig_output;
+ }
+ ns_pthp = NULL;
+ task_flags = 0;
+
+ erts_schedule_proc2port_signal(NULL,
+ prt,
+ ERTS_INVALID_PID,
+ NULL,
+ sigdp,
+ task_flags,
+ ns_pthp,
+ port_sig_callback);
+
+ return 1;
+
+bad_value:
+
+ /*
+ * We call badsig directly here as this function is called with
+ * the main lock of the calling process still held.
+ * At the moment this operation is always not a bang_op, so
+ * only an error_logger message should be generated, no badsig.
+ */
+
+ badsig_received(0, prt, erts_atomic32_read_nob(&prt->state), 1);
+
+ return 0;
+
+}
+
ErtsPortOpResult
erts_port_output(Process *c_p,
int flags,
@@ -1833,15 +1727,15 @@ erts_port_output(Process *c_p,
Eterm *refp)
{
ErtsPortOpResult res;
- ErtsProc2PortSigData *sigdp;
+ ErtsProc2PortSigData *sigdp = NULL;
erts_driver_t *drv = prt->drv_ptr;
size_t size;
int try_call;
erts_aint32_t sched_flags, busy_flgs, invalid_flags;
int task_flags;
ErtsProc2PortSigCallback port_sig_callback;
- ErlDrvBinary *cbin = NULL;
- ErlIOVec *evp = NULL;
+ ErtsIOQBinary *cbin = NULL;
+ ErtsIOVec *evp = NULL;
char *buf = NULL;
int force_immediate_call = (flags & ERTS_PORT_SIG_FLG_FORCE_IMM_CALL);
int async_nosuspend;
@@ -1864,7 +1758,7 @@ erts_port_output(Process *c_p,
* Assumes caller have checked that port is valid...
*/
- sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ sched_flags = erts_atomic32_read_nob(&prt->sched.flags);
if (sched_flags & (busy_flgs|ERTS_PTS_FLG_EXIT))
return ((sched_flags & ERTS_PTS_FLG_EXIT)
? ERTS_PORT_OP_DROPPED
@@ -1886,13 +1780,12 @@ erts_port_output(Process *c_p,
DTRACE4(port_command, process_str, port_str, prt->name, "command");
}
#endif
-
if (drv->outputv) {
- ErlIOVec ev;
+ ErtsIOVec ev;
SysIOVec iv[SMALL_WRITE_VEC];
- ErlDrvBinary* bv[SMALL_WRITE_VEC];
+ ErtsIOQBinary* bv[SMALL_WRITE_VEC];
SysIOVec* ivp;
- ErlDrvBinary** bvp;
+ ErtsIOQBinary** bvp;
int vsize;
Uint csize;
Uint pvsize;
@@ -1900,28 +1793,32 @@ erts_port_output(Process *c_p,
Uint blimit;
size_t iov_offset, binv_offset, alloc_size;
- if (io_list_vec_len(list, &vsize, &csize, &pvsize, &pcsize, &size))
+ if (erts_ioq_iodata_vec_len(list, &vsize, &csize, &pvsize, &pcsize,
+ &size, ERL_SMALL_IO_BIN_LIMIT))
goto bad_value;
iov_offset = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(ErlIOVec));
binv_offset = iov_offset;
binv_offset += ERTS_ALC_DATA_ALIGN_SIZE((vsize+1)*sizeof(SysIOVec));
alloc_size = binv_offset;
- alloc_size += (vsize+1)*sizeof(ErlDrvBinary *);
+ alloc_size += (vsize+1)*sizeof(ErtsIOQBinary *);
if (try_call && vsize < SMALL_WRITE_VEC) {
- ivp = ev.iov = iv;
- bvp = ev.binv = bv;
+ ivp = ev.common.iov = iv;
+ bvp = ev.common.binv = bv;
evp = &ev;
}
else {
- char *ptr = erts_alloc((try_call
- ? ERTS_ALC_T_TMP
- : ERTS_ALC_T_DRV_CMD_DATA), alloc_size);
-
- evp = (ErlIOVec *) ptr;
- ivp = evp->iov = (SysIOVec *) (ptr + iov_offset);
- bvp = evp->binv = (ErlDrvBinary **) (ptr + binv_offset);
+ char *ptr;
+ if (try_call) {
+ ptr = erts_alloc(ERTS_ALC_T_TMP, alloc_size);
+ } else {
+ sigdp = erts_port_task_alloc_p2p_sig_data_extra(
+ alloc_size, (void**)&ptr);
+ }
+ evp = (ErtsIOVec *) ptr;
+ ivp = evp->driver.iov = (SysIOVec *) (ptr + iov_offset);
+ bvp = evp->common.binv = (ErtsIOQBinary **) (ptr + binv_offset);
}
/* To pack or not to pack (small binaries) ...? */
@@ -1937,20 +1834,26 @@ erts_port_output(Process *c_p,
}
/* Use vsize and csize from now on */
- cbin = driver_alloc_binary(csize);
- if (!cbin)
- erts_alloc_enomem(ERTS_ALC_T_DRV_BINARY, ERTS_SIZEOF_Binary(csize));
+ if (csize) {
+ cbin = (ErtsIOQBinary *)driver_alloc_binary(csize);
+ if (!cbin)
+ erts_alloc_enomem(ERTS_ALC_T_DRV_BINARY, ERTS_SIZEOF_Binary(csize));
+ }
/* Element 0 is for driver usage to add header block */
ivp[0].iov_base = NULL;
ivp[0].iov_len = 0;
bvp[0] = NULL;
- evp->vsize = io_list_to_vec(list, ivp+1, bvp+1, cbin, blimit);
- if (evp->vsize < 0) {
- if (evp != &ev)
- erts_free(try_call ? ERTS_ALC_T_TMP : ERTS_ALC_T_DRV_CMD_DATA,
- evp);
- driver_free_binary(cbin);
+ evp->driver.vsize = erts_ioq_iodata_to_vec(list, ivp+1, bvp+1,
+ cbin, blimit, 1);
+ if (evp->driver.vsize < 0) {
+ if (evp != &ev) {
+ if (try_call)
+ erts_free(ERTS_ALC_T_TMP, evp);
+ else
+ erts_port_task_free_p2p_sig_data(sigdp);
+ }
+ driver_free_binary(&cbin->driver);
goto bad_value;
}
#if 0
@@ -1958,19 +1861,19 @@ erts_port_output(Process *c_p,
be falsified during the emulator test suites. */
ASSERT(evp->vsize == vsize);
#endif
- evp->vsize++;
- evp->size = size; /* total size */
+ evp->driver.vsize++;
+ evp->driver.size = size; /* total size */
if (!try_call) {
int i;
/* Need to increase refc on all binaries */
- for (i = 1; i < evp->vsize; i++)
- if (bvp[i])
- driver_binary_inc_refc(bvp[i]);
+ for (i = 1; i < evp->driver.vsize; i++)
+ if (bvp[i])
+ driver_binary_inc_refc(&bvp[i]->driver);
}
else {
int i;
- ErlIOVec *new_evp;
+ ErtsIOVec *new_evp;
ErtsTryImmDrvCallResult try_call_res;
ErtsTryImmDrvCallState try_call_state
= ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
@@ -1993,16 +1896,18 @@ erts_port_output(Process *c_p,
from,
prt,
drv,
- evp);
+ &evp->driver);
if (force_immediate_call)
finalize_force_imm_drv_call(&try_call_state);
else
finalize_imm_drv_call(&try_call_state);
/* Fall through... */
case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
- driver_free_binary(cbin);
- if (evp != &ev)
+ driver_free_binary(&cbin->driver);
+ if (evp != &ev) {
+ ASSERT(!sigdp);
erts_free(ERTS_ALC_T_TMP, evp);
+ }
if (try_call_res != ERTS_TRY_IMM_DRV_CALL_OK)
return ERTS_PORT_OP_DROPPED;
if (c_p)
@@ -2012,9 +1917,11 @@ erts_port_output(Process *c_p,
sched_flags = try_call_state.sched_flags;
if (async_nosuspend
&& (sched_flags & (busy_flgs|ERTS_PTS_FLG_EXIT))) {
- driver_free_binary(cbin);
- if (evp != &ev)
+ driver_free_binary(&cbin->driver);
+ if (evp != &ev) {
+ ASSERT(!sigdp);
erts_free(ERTS_ALC_T_TMP, evp);
+ }
return ((sched_flags & ERTS_PTS_FLG_EXIT)
? ERTS_PORT_OP_DROPPED
: ERTS_PORT_OP_BUSY);
@@ -2025,26 +1932,33 @@ erts_port_output(Process *c_p,
}
/* Need to increase refc on all binaries */
- for (i = 1; i < evp->vsize; i++)
+ for (i = 1; i < evp->driver.vsize; i++)
if (bvp[i])
- driver_binary_inc_refc(bvp[i]);
+ driver_binary_inc_refc(&bvp[i]->driver);
- new_evp = erts_alloc(ERTS_ALC_T_DRV_CMD_DATA, alloc_size);
+ /* The port task and iovec is allocated in the
+ same structure as an optimization. This
+ is especially important in erts_port_output_async
+ of when !try_call */
+ ASSERT(sigdp == NULL);
+ sigdp = erts_port_task_alloc_p2p_sig_data_extra(
+ alloc_size, (void**)&new_evp);
if (evp != &ev) {
+ /* Copy from TMP alloc to port task */
sys_memcpy((void *) new_evp, (void *) evp, alloc_size);
- new_evp->iov = (SysIOVec *) (((char *) new_evp)
- + iov_offset);
- bvp = new_evp->binv = (ErlDrvBinary **) (((char *) new_evp)
- + binv_offset);
+ new_evp->driver.iov = (SysIOVec *) (((char *) new_evp)
+ + iov_offset);
+ bvp = new_evp->common.binv = (ErtsIOQBinary **) (((char *) new_evp)
+ + binv_offset);
#ifdef DEBUG
- ASSERT(new_evp->vsize == evp->vsize);
- ASSERT(new_evp->size == evp->size);
- for (i = 0; i < evp->vsize; i++) {
- ASSERT(new_evp->iov[i].iov_len == evp->iov[i].iov_len);
- ASSERT(new_evp->iov[i].iov_base == evp->iov[i].iov_base);
- ASSERT(new_evp->binv[i] == evp->binv[i]);
+ ASSERT(new_evp->driver.vsize == evp->driver.vsize);
+ ASSERT(new_evp->driver.size == evp->driver.size);
+ for (i = 0; i < evp->driver.vsize; i++) {
+ ASSERT(new_evp->driver.iov[i].iov_len == evp->driver.iov[i].iov_len);
+ ASSERT(new_evp->driver.iov[i].iov_base == evp->driver.iov[i].iov_base);
+ ASSERT(new_evp->driver.binv[i] == evp->driver.binv[i]);
}
#endif
@@ -2053,24 +1967,24 @@ erts_port_output(Process *c_p,
else { /* from stack allocated structure; offsets may differ */
sys_memcpy((void *) new_evp, (void *) evp, sizeof(ErlIOVec));
- new_evp->iov = (SysIOVec *) (((char *) new_evp)
- + iov_offset);
- sys_memcpy((void *) new_evp->iov,
- (void *) evp->iov,
- evp->vsize * sizeof(SysIOVec));
- new_evp->binv = (ErlDrvBinary **) (((char *) new_evp)
- + binv_offset);
- sys_memcpy((void *) new_evp->binv,
- (void *) evp->binv,
- evp->vsize * sizeof(ErlDrvBinary *));
+ new_evp->driver.iov = (SysIOVec *) (((char *) new_evp)
+ + iov_offset);
+ sys_memcpy((void *) new_evp->driver.iov,
+ (void *) evp->driver.iov,
+ evp->driver.vsize * sizeof(SysIOVec));
+ new_evp->common.binv = (ErtsIOQBinary **) (((char *) new_evp)
+ + binv_offset);
+ sys_memcpy((void *) new_evp->common.binv,
+ (void *) evp->common.binv,
+ evp->driver.vsize * sizeof(ErtsIOQBinary *));
#ifdef DEBUG
- ASSERT(new_evp->vsize == evp->vsize);
- ASSERT(new_evp->size == evp->size);
- for (i = 0; i < evp->vsize; i++) {
- ASSERT(new_evp->iov[i].iov_len == evp->iov[i].iov_len);
- ASSERT(new_evp->iov[i].iov_base == evp->iov[i].iov_base);
- ASSERT(new_evp->binv[i] == evp->binv[i]);
+ ASSERT(new_evp->driver.vsize == evp->driver.vsize);
+ ASSERT(new_evp->driver.size == evp->driver.size);
+ for (i = 0; i < evp->driver.vsize; i++) {
+ ASSERT(new_evp->driver.iov[i].iov_len == evp->driver.iov[i].iov_len);
+ ASSERT(new_evp->driver.iov[i].iov_base == evp->driver.iov[i].iov_base);
+ ASSERT(new_evp->driver.binv[i] == evp->driver.binv[i]);
}
#endif
@@ -2079,11 +1993,10 @@ erts_port_output(Process *c_p,
evp = new_evp;
}
- sigdp = erts_port_task_alloc_p2p_sig_data();
sigdp->flags = ERTS_P2P_SIG_TYPE_OUTPUTV;
sigdp->u.outputv.from = from;
- sigdp->u.outputv.evp = evp;
- sigdp->u.outputv.cbinp = cbin;
+ sigdp->u.outputv.evp = &evp->driver;
+ sigdp->u.outputv.cbinp = &cbin->driver;
port_sig_callback = port_sig_outputv;
}
else {
@@ -2201,7 +2114,7 @@ erts_port_output(Process *c_p,
sigdp->flags &= ~ERTS_P2P_SIG_DATA_FLG_NOSUSPEND;
else if (async_nosuspend) {
ErtsSchedulerData *esdp = (c_p
- ? ERTS_PROC_GET_SCHDATA(c_p)
+ ? erts_proc_sched_data(c_p)
: erts_get_scheduler_data());
ASSERT(esdp);
ns_pthp = &esdp->nosuspend_port_task_handle;
@@ -2224,15 +2137,11 @@ erts_port_output(Process *c_p,
port_sig_callback);
if (res != ERTS_PORT_OP_SCHEDULED) {
- if (drv->outputv)
- cleanup_scheduled_outputv(evp, cbin);
- else
- cleanup_scheduled_output(buf);
return res;
}
if (!(flags & ERTS_PORT_SIG_FLG_FORCE)) {
- sched_flags = erts_smp_atomic32_read_acqb(&prt->sched.flags);
+ sched_flags = erts_atomic32_read_acqb(&prt->sched.flags);
if (!(sched_flags & ERTS_PTS_FLG_BUSY_PORT)) {
if (async_nosuspend)
erts_port_task_tmp_handle_detach(ns_pthp);
@@ -2288,7 +2197,10 @@ call_deliver_port_exit(int bang_op,
return ERTS_PORT_OP_DROPPED;
}
- if (!erts_deliver_port_exit(prt, from, reason, bang_op))
+ if (IS_TRACED_FL(prt, F_TRACE_RECEIVE))
+ trace_port_receive(prt, from, am_close);
+
+ if (!erts_deliver_port_exit(prt, from, reason, bang_op, broken_link))
return ERTS_PORT_OP_DROPPED;
#ifdef USE_VM_PROBES
@@ -2325,7 +2237,7 @@ port_sig_exit(Port *prt,
if (sigdp->u.exit.bp)
free_message_buffer(sigdp->u.exit.bp);
if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
- port_sched_op_reply(sigdp->caller, sigdp->ref, msg);
+ port_sched_op_reply(sigdp->caller, sigdp->ref, msg, prt);
return ERTS_PORT_REDS_EXIT;
}
@@ -2347,6 +2259,11 @@ erts_port_exit(Process *c_p,
| ERTS_PORT_SIG_FLG_BROKEN_LINK
| ERTS_PORT_SIG_FLG_FORCE_SCHED)) == 0);
+#ifndef __WIN32__
+ if (prt->drv_ptr == &forker_driver)
+ return ERTS_PORT_OP_DROPPED;
+#endif
+
if (!(flags & ERTS_PORT_SIG_FLG_FORCE_SCHED)) {
ErtsTryImmDrvCallState try_call_state
= ERTS_INIT_TRY_IMM_DRV_CALL_STATE(c_p,
@@ -2354,13 +2271,13 @@ erts_port_exit(Process *c_p,
ERTS_PORT_SFLGS_INVALID_LOOKUP,
0,
!refp,
- am_exit);
+ am_close);
switch (try_imm_drv_call(&try_call_state)) {
case ERTS_TRY_IMM_DRV_CALL_OK: {
res = call_deliver_port_exit(flags & ERTS_PORT_SIG_FLG_BANG_OP,
- from,
+ c_p ? c_p->common.id : from,
prt,
try_call_state.state,
reason,
@@ -2398,21 +2315,14 @@ erts_port_exit(Process *c_p,
&bp->off_heap);
}
- res = erts_schedule_proc2port_signal(c_p,
- prt,
- c_p ? c_p->common.id : from,
- refp,
- sigdp,
- 0,
- NULL,
- port_sig_exit);
-
- if (res == ERTS_PORT_OP_DROPPED) {
- if (bp)
- free_message_buffer(bp);
- }
-
- return res;
+ return erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p ? c_p->common.id : from,
+ refp,
+ sigdp,
+ 0,
+ NULL,
+ port_sig_exit);
}
static ErtsPortOpResult
@@ -2439,8 +2349,11 @@ set_port_connected(int bang_op,
return ERTS_PORT_OP_DROPPED;
}
+ if (IS_TRACED_FL(prt, F_TRACE_RECEIVE))
+ trace_port_receive(prt, from, am_connect, connect);
+
ERTS_PORT_SET_CONNECTED(prt, connect);
- deliver_result(prt->common.id, from, am_connected);
+ deliver_result(prt, prt->common.id, from, am_connected);
#ifdef USE_VM_PROBES
if(DTRACE_ENABLED(port_command)) {
@@ -2453,18 +2366,30 @@ set_port_connected(int bang_op,
Process *rp = erts_proc_lookup_raw(connect);
if (!rp)
return ERTS_PORT_OP_DROPPED;
- erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK);
+ erts_proc_lock(rp, ERTS_PROC_LOCK_LINK);
if (ERTS_PROC_IS_EXITING(rp)) {
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
return ERTS_PORT_OP_DROPPED;
}
erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, prt->common.id);
erts_add_link(&ERTS_P_LINKS(prt), LINK_PID, connect);
+ if (IS_TRACED_FL(rp, F_TRACE_PROCS))
+ trace_proc(NULL, 0, rp, am_getting_linked, prt->common.id);
+
ERTS_PORT_SET_CONNECTED(prt, connect);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+
+ if (IS_TRACED_FL(prt, F_TRACE_PORTS))
+ trace_port(prt, am_getting_linked, connect);
+ if (IS_TRACED_FL(prt, F_TRACE_RECEIVE))
+ trace_port_receive(prt, from, am_connect, connect);
+ if (IS_TRACED_FL(prt, F_TRACE_SEND)) {
+ Eterm hp[3];
+ trace_port_send(prt, from, TUPLE2(hp, prt->common.id, am_connected), 1);
+ }
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(port_connect)) {
@@ -2498,7 +2423,7 @@ port_sig_connect(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *s
msg = am_true;
}
if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
- port_sched_op_reply(sigdp->caller, sigdp->ref, msg);
+ port_sched_op_reply(sigdp->caller, sigdp->ref, msg, prt);
return ERTS_PORT_REDS_CONNECT;
}
@@ -2568,8 +2493,11 @@ static void
port_unlink(Port *prt, Eterm from)
{
ErtsLink *lnk = erts_remove_link(&ERTS_P_LINKS(prt), from);
- if (lnk)
+ if (lnk) {
+ if (IS_TRACED_FL(prt, F_TRACE_PORTS))
+ trace_port(prt, am_getting_unlinked, from);
erts_destroy_link(lnk);
+ }
}
static int
@@ -2578,7 +2506,7 @@ port_sig_unlink(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *si
if (op == ERTS_PROC2PORT_SIG_EXEC)
port_unlink(prt, sigdp->u.unlink.from);
if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
- port_sched_op_reply(sigdp->caller, sigdp->ref, am_true);
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_true, prt);
return ERTS_PORT_REDS_UNLINK;
}
@@ -2639,13 +2567,13 @@ port_link_failure(Eterm port_id, Eterm linker)
NIL,
NULL,
0);
- if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
+ if (xres >= 0) {
/* We didn't exit the process and it is traced */
if (IS_TRACED_FL(rp, F_TRACE_PROCS))
- trace_proc(NULL, rp, am_getting_unlinked, port_id);
+ trace_proc(NULL, 0, rp, am_getting_unlinked, port_id);
}
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
}
}
}
@@ -2653,10 +2581,15 @@ port_link_failure(Eterm port_id, Eterm linker)
static void
port_link(Port *prt, erts_aint32_t state, Eterm to)
{
- if (!(state & ERTS_PORT_SFLGS_INVALID_LOOKUP))
+ if (IS_TRACED_FL(prt, F_TRACE_PORTS))
+ trace_port(prt, am_getting_linked, to);
+ if (!(state & ERTS_PORT_SFLGS_INVALID_LOOKUP)) {
erts_add_link(&ERTS_P_LINKS(prt), LINK_PID, to);
- else
+ } else {
port_link_failure(prt->common.id, to);
+ if (IS_TRACED_FL(prt, F_TRACE_PORTS))
+ trace_port(prt, am_unlink, to);
+ }
}
static int
@@ -2664,10 +2597,11 @@ port_sig_link(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigd
{
if (op == ERTS_PROC2PORT_SIG_EXEC)
port_link(prt, state, sigdp->u.link.to);
- else
+ else {
port_link_failure(sigdp->u.link.port, sigdp->u.link.to);
+ }
if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
- port_sched_op_reply(sigdp->caller, sigdp->ref, am_true);
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_true, prt);
return ERTS_PORT_REDS_LINK;
}
@@ -2711,24 +2645,336 @@ erts_port_link(Process *c_p, Port *prt, Eterm to, Eterm *refp)
port_sig_link);
}
+static void
+port_monitor_failure(Eterm port_id, Eterm origin, Eterm ref_DOWN)
+{
+ Process *origin_p;
+ ErtsProcLocks p_locks = ERTS_PROC_LOCK_LINK;
+ ASSERT(is_internal_pid(origin));
+
+ origin_p = erts_pid2proc(NULL, 0, origin, p_locks);
+ if (! origin_p) { return; }
+
+ /* Send the DOWN message immediately. Ref is made on the fly because
+ * caller has never seen it yet. */
+ erts_queue_monitor_message(origin_p, &p_locks, ref_DOWN,
+ am_port, port_id, am_noproc);
+ erts_proc_unlock(origin_p, p_locks);
+}
+
+/* Origin wants to monitor port Prt. State contains possible error, which has
+ * happened just before. Name is either NIL or an atom, if user monitors
+ * a port by name. Ref is premade reference that will be returned to user */
+static void
+port_monitor(Port *prt, erts_aint32_t state, Eterm origin,
+ Eterm name, Eterm ref)
+{
+ Eterm name_or_nil = is_atom(name) ? name : NIL;
+
+ ASSERT(is_pid(origin));
+ ASSERT(is_atom(name) || is_port(name) || name == NIL);
+ ASSERT(is_internal_ordinary_ref(ref));
+
+ if (!(state & ERTS_PORT_SFLGS_INVALID_LOOKUP)) {
+ ErtsProcLocks p_locks = ERTS_PROC_LOCK_LINK;
+
+ Process *origin_p = erts_pid2proc(NULL, 0, origin, p_locks);
+ if (! origin_p) {
+ goto failure;
+ }
+ erts_add_monitor(&ERTS_P_MONITORS(origin_p), MON_ORIGIN, ref,
+ prt->common.id, name_or_nil);
+ erts_add_monitor(&ERTS_P_MONITORS(prt), MON_TARGET, ref,
+ origin, name_or_nil);
+
+ erts_proc_unlock(origin_p, p_locks);
+ } else {
+failure:
+ port_monitor_failure(prt->common.id, origin, ref);
+ }
+}
+
+static int
+port_sig_monitor(Port *prt, erts_aint32_t state, int op,
+ ErtsProc2PortSigData *sigdp)
+{
+ Eterm hp[ERTS_REF_THING_SIZE];
+ Eterm ref = make_internal_ref(&hp);
+ write_ref_thing(hp, sigdp->ref[0], sigdp->ref[1], sigdp->ref[2]);
+
+ if (op == ERTS_PROC2PORT_SIG_EXEC) {
+ /* erts_add_monitor call inside port_monitor will copy ref from hp */
+ port_monitor(prt, state,
+ sigdp->u.monitor.origin,
+ sigdp->u.monitor.name,
+ ref);
+ } else {
+ port_monitor_failure(sigdp->u.monitor.name,
+ sigdp->u.monitor.origin,
+ ref);
+ }
+ if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY) {
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_true, prt);
+ }
+ return ERTS_PORT_REDS_MONITOR;
+}
+
+/* Creates monitor between Origin and Target. Ref must be initialized to
+ * a reference (ref may be rewritten to be used to serve additionally as a
+ * signal id). Name is atom if user monitors port by name or NIL */
+ErtsPortOpResult
+erts_port_monitor(Process *origin, Port *port, Eterm name, Eterm *refp)
+{
+ ErtsProc2PortSigData *sigdp;
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
+ origin, port, ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ 0,
+ 0, /* trap_ref is always set so !trap_ref always is false */
+ am_monitor);
+
+ ASSERT(origin);
+ ASSERT(port);
+ ASSERT(is_atom(name) || is_port(name));
+ ASSERT(refp);
+
+ switch (try_imm_drv_call(&try_call_state)) {
+ case ERTS_TRY_IMM_DRV_CALL_OK:
+ port_monitor(port, try_call_state.state, origin->common.id, name, *refp);
+ finalize_imm_drv_call(&try_call_state);
+ BUMP_REDS(origin, ERTS_PORT_REDS_MONITOR);
+ return ERTS_PORT_OP_DONE;
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ return ERTS_PORT_OP_BADARG;
+ default:
+ break; /* Schedule call instead... */
+ }
+
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_MONITOR;
+ sigdp->u.monitor.origin = origin->common.id;
+ sigdp->u.monitor.name = name; /* either named monitor, or port id */
+
+ /* Ref contents will be initialized here */
+ return erts_schedule_proc2port_signal(origin, port, origin->common.id,
+ refp, sigdp, 0, NULL,
+ port_sig_monitor);
+}
+
+static void
+port_demonitor_failure(Eterm port_id, Eterm origin, Eterm ref)
+{
+ Process *origin_p;
+ ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK;
+ ErtsMonitor *mon1;
+ ASSERT(is_internal_pid(origin));
+
+ origin_p = erts_pid2proc(NULL, 0, origin, rp_locks);
+ if (! origin_p) { return; }
+
+ /* do not send any DOWN messages, drop monitors on process */
+ mon1 = erts_remove_monitor(&ERTS_P_MONITORS(origin_p), ref);
+ if (mon1 != NULL) {
+ erts_destroy_monitor(mon1);
+ }
+
+ erts_proc_unlock(origin_p, rp_locks);
+}
+
+/* Origin wants to demonitor port Prt. State contains possible error, which has
+ * happened just before. Ref is reference to monitor */
+static void
+port_demonitor(Port *port, erts_aint32_t state, Eterm origin, Eterm ref)
+{
+ ASSERT(port);
+ ASSERT(is_pid(origin));
+ ASSERT(is_internal_ref(ref));
+
+ if (!(state & ERTS_PORT_SFLGS_INVALID_LOOKUP)) {
+ ErtsProcLocks p_locks = ERTS_PROC_LOCK_LINK;
+ Process *origin_p = erts_pid2proc(NULL, 0, origin, p_locks);
+ if (origin_p) {
+ ErtsMonitor *mon1 = erts_remove_monitor(&ERTS_P_MONITORS(origin_p),
+ ref);
+ if (mon1 != NULL) {
+ erts_destroy_monitor(mon1);
+ }
+ }
+ if (1) {
+ ErtsMonitor *mon2 = erts_remove_monitor(&ERTS_P_MONITORS(port),
+ ref);
+ if (mon2 != NULL) {
+ erts_destroy_monitor(mon2);
+ }
+ }
+ if (origin_p) { /* when origin is dying, it won't be found */
+ erts_proc_unlock(origin_p, p_locks);
+ }
+ } else {
+ port_demonitor_failure(port->common.id, origin, ref);
+ }
+}
+
+static int
+port_sig_demonitor(Port *prt, erts_aint32_t state, int op,
+ ErtsProc2PortSigData *sigdp)
+{
+ Eterm hp[ERTS_REF_THING_SIZE];
+ Eterm ref = make_internal_ref(&hp);
+ write_ref_thing(hp, sigdp->u.demonitor.ref[0],
+ sigdp->u.demonitor.ref[1],
+ sigdp->u.demonitor.ref[2]);
+ if (op == ERTS_PROC2PORT_SIG_EXEC) {
+ port_demonitor(prt, state, sigdp->u.demonitor.origin, ref);
+ } else {
+ port_demonitor_failure(sigdp->u.demonitor.name,
+ sigdp->u.demonitor.origin,
+ ref);
+ }
+ if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY) {
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_true, prt);
+ }
+ return ERTS_PORT_REDS_DEMONITOR;
+}
+
+/* Removes monitor between origin and target, identified by ref.
+ * Mode defines normal or relaxed demonitor rules (process is at death) */
+ErtsPortOpResult erts_port_demonitor(Process *origin, ErtsDemonitorMode mode,
+ Port *target, Eterm ref,
+ Eterm *trap_ref)
+{
+ Process *c_p = mode == ERTS_PORT_DEMONITOR_NORMAL ? origin : NULL;
+ ErtsProc2PortSigData *sigdp;
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
+ c_p,
+ target, ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ 0,
+ !trap_ref,
+ am_demonitor);
+
+ ASSERT(origin);
+ ASSERT(target);
+ ASSERT(is_internal_ref(ref));
+
+ switch (try_imm_drv_call(&try_call_state)) {
+ case ERTS_TRY_IMM_DRV_CALL_OK:
+ port_demonitor(target, try_call_state.state, origin->common.id, ref);
+ finalize_imm_drv_call(&try_call_state);
+ if (mode == ERTS_PORT_DEMONITOR_NORMAL) {
+ BUMP_REDS(origin, ERTS_PORT_REDS_DEMONITOR);
+ }
+ return ERTS_PORT_OP_DONE;
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ return ERTS_PORT_OP_BADARG;
+ default:
+ break; /* Schedule call instead... */
+ }
+
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_DEMONITOR;
+ sigdp->u.demonitor.origin = origin->common.id;
+ sigdp->u.demonitor.name = target->common.id;
+ {
+ Uint32 *nums = internal_ref_numbers(ref);
+ /* Start from 1 skip ref arity */
+ sys_memcpy(sigdp->u.demonitor.ref,
+ nums,
+ sizeof(sigdp->u.demonitor.ref));
+ }
+
+ /* Ref contents will be initialized here */
+ return erts_schedule_proc2port_signal(c_p, target, origin->common.id,
+ trap_ref, sigdp, 0, NULL,
+ port_sig_demonitor);
+}
+
+static void
+init_ack_send_reply(Port *port, Eterm resp)
+{
+
+ if (!is_internal_port(resp)) {
+ Process *rp = erts_proc_lookup_raw(port->async_open_port->to);
+ erts_proc_lock(rp, ERTS_PROC_LOCK_LINK);
+ erts_remove_link(&ERTS_P_LINKS(port), port->async_open_port->to);
+ erts_remove_link(&ERTS_P_LINKS(rp), port->common.id);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ }
+ port_sched_op_reply(port->async_open_port->to,
+ port->async_open_port->ref,
+ resp,
+ port);
+
+ erts_free(ERTS_ALC_T_PRTSD, port->async_open_port);
+ port->async_open_port = NULL;
+}
+
+void
+erl_drv_init_ack(ErlDrvPort ix, ErlDrvData res) {
+ Port *port = erts_drvport2port(ix);
+ SWord err_type = (SWord)res;
+ Eterm resp;
+
+ if (port == ERTS_INVALID_ERL_DRV_PORT && port->async_open_port)
+ return;
+
+ if (port->async_open_port) {
+ switch(err_type) {
+ case -3:
+ resp = am_badarg;
+ break;
+ case -2: {
+ char *str = erl_errno_id(errno);
+ resp = erts_atom_put((byte *) str, strlen(str),
+ ERTS_ATOM_ENC_LATIN1, 1);
+ break;
+ }
+ case -1:
+ resp = am_einval;
+ break;
+ default:
+ resp = port->common.id;
+ break;
+ }
+
+ init_ack_send_reply(port, resp);
+
+ if (err_type == -1 || err_type == -2 || err_type == -3)
+ driver_failure_term(ix, am_normal, 0);
+ port->drv_data = err_type;
+ }
+}
+
+void
+erl_drv_set_os_pid(ErlDrvPort ix, ErlDrvSInt pid) {
+ Port *port = erts_drvport2port(ix);
+
+ if (port == ERTS_INVALID_ERL_DRV_PORT)
+ return;
+
+ port->os_pid = (SWord)pid;
+
+}
+
void erts_init_io(int port_tab_size,
int port_tab_size_ignore_files,
int legacy_port_tab)
{
ErlDrvEntry** dp;
UWord common_element_size;
- erts_smp_rwmtx_opt_t drv_list_rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
- drv_list_rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
- drv_list_rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+ erts_rwmtx_opt_t drv_list_rwmtx_opts = ERTS_RWMTX_OPT_DEFAULT_INITER;
+ drv_list_rwmtx_opts.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
+ drv_list_rwmtx_opts.lived = ERTS_RWMTX_LONG_LIVED;
+
+ erts_atomic64_init_nob(&bytes_in, 0);
+ erts_atomic64_init_nob(&bytes_out, 0);
common_element_size = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(Port));
common_element_size += ERTS_ALC_DATA_ALIGN_SIZE(sizeof(ErtsPortTaskBusyPortQ));
common_element_size += 10; /* name */
-#ifdef ERTS_SMP
common_element_size += sizeof(erts_mtx_t);
init_xports_list_alloc();
-#endif
pdl_init();
@@ -2743,13 +2989,12 @@ void erts_init_io(int port_tab_size,
else if (port_tab_size < ERTS_MIN_PORTS)
port_tab_size = ERTS_MIN_PORTS;
- erts_smp_rwmtx_init_opt(&erts_driver_list_lock,
- &drv_list_rwmtx_opts,
- "driver_list");
+ erts_rwmtx_init_opt(&erts_driver_list_lock, &drv_list_rwmtx_opts, "driver_list", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO);
driver_list = NULL;
- erts_smp_tsd_key_create(&driver_list_lock_status_key,
+ erts_tsd_key_create(&driver_list_lock_status_key,
"erts_driver_list_lock_status_key");
- erts_smp_tsd_key_create(&driver_list_last_error_key,
+ erts_tsd_key_create(&driver_list_last_error_key,
"erts_driver_list_last_error_key");
erts_ptab_init_table(&erts_port,
@@ -2757,90 +3002,120 @@ void erts_init_io(int port_tab_size,
NULL,
(ErtsPTabElementCommon *) &erts_invalid_port.common,
port_tab_size,
- common_element_size, /* Doesn't need to be excact */
+ common_element_size, /* Doesn't need to be exact */
"port_table",
- legacy_port_tab);
-
- erts_smp_atomic_init_nob(&erts_bytes_out, 0);
- erts_smp_atomic_init_nob(&erts_bytes_in, 0);
+ legacy_port_tab,
+ 1);
sys_init_io();
- erts_smp_tsd_set(driver_list_lock_status_key, (void *) 1);
- erts_smp_rwmtx_rwlock(&erts_driver_list_lock);
+ erts_tsd_set(driver_list_lock_status_key, (void *) 1);
+ erts_rwmtx_rwlock(&erts_driver_list_lock);
init_driver(&fd_driver, &fd_driver_entry, NULL);
-#ifndef __OSE__
init_driver(&vanilla_driver, &vanilla_driver_entry, NULL);
-#endif
init_driver(&spawn_driver, &spawn_driver_entry, NULL);
+#ifndef __WIN32__
+ init_driver(&forker_driver, &forker_driver_entry, NULL);
+#endif
erts_init_static_drivers();
for (dp = driver_tab; *dp != NULL; dp++)
erts_add_driver_entry(*dp, NULL, 1);
- erts_smp_tsd_set(driver_list_lock_status_key, NULL);
- erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
+ erts_tsd_set(driver_list_lock_status_key, NULL);
+ erts_rwmtx_rwunlock(&erts_driver_list_lock);
}
-#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP)
-
-static ERTS_INLINE void lcnt_enable_drv_lock_count(erts_driver_t *dp, int enable)
+#if defined(ERTS_ENABLE_LOCK_COUNT)
+static void lcnt_enable_driver_lock_count(erts_driver_t *dp, int enable)
{
if (dp->lock) {
- if (enable)
- erts_lcnt_init_lock_x(&dp->lock->lcnt,
- "driver_lock",
- ERTS_LCNT_LT_MUTEX,
- erts_atom_put((byte*)dp->name,
- sys_strlen(dp->name),
- ERTS_ATOM_ENC_LATIN1,
- 1));
-
- else
- erts_lcnt_destroy_lock(&dp->lock->lcnt);
-
+ if (enable) {
+ Eterm name_as_atom = erts_atom_put((byte*)dp->name, sys_strlen(dp->name),
+ ERTS_ATOM_ENC_LATIN1, 1);
+
+ erts_lcnt_install_new_lock_info(&dp->lock->lcnt, "driver_lock", name_as_atom,
+ ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+ } else {
+ erts_lcnt_uninstall(&dp->lock->lcnt);
+ }
}
}
-static ERTS_INLINE void lcnt_enable_port_lock_count(Port *prt, int enable)
+static void lcnt_enable_port_lock_count(Port *prt, int enable)
{
erts_aint32_t state = erts_atomic32_read_nob(&prt->state);
- if (!enable) {
- erts_lcnt_destroy_lock(&prt->sched.mtx.lcnt);
- if (state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK)
- erts_lcnt_destroy_lock(&prt->lock->lcnt);
- }
- else {
- erts_lcnt_init_lock_x(&prt->sched.mtx.lcnt,
- "port_sched_lock",
- ERTS_LCNT_LT_MUTEX,
- prt->common.id);
- if (state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK)
- erts_lcnt_init_lock_x(&prt->lock->lcnt,
- "port_lock",
- ERTS_LCNT_LT_MUTEX,
- prt->common.id);
+
+ if(enable) {
+ ErlDrvPDL pdl = prt->port_data_lock;
+
+ erts_lcnt_install_new_lock_info(&prt->sched.mtx.lcnt, "port_sched_lock",
+ prt->common.id, ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+
+ if(pdl) {
+ erts_lcnt_install_new_lock_info(&pdl->mtx.lcnt, "port_data_lock",
+ prt->common.id, ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+ }
+
+ if(state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) {
+ erts_lcnt_install_new_lock_info(&prt->lock->lcnt, "port_lock",
+ prt->common.id, ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+ }
+ } else {
+ erts_lcnt_uninstall(&prt->sched.mtx.lcnt);
+
+ if(prt->port_data_lock) {
+ erts_lcnt_uninstall(&prt->port_data_lock->mtx.lcnt);
+ }
+
+ if(state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) {
+ erts_lcnt_uninstall(&prt->lock->lcnt);
+ }
}
}
-void erts_lcnt_enable_io_lock_count(int enable)
-{
- erts_driver_t *dp;
- int i, max = erts_ptab_max(&erts_port);
+void erts_lcnt_update_driver_locks(int enable) {
+ erts_driver_t *driver;
- for (i = 0; i < max; i++) {
- Port *prt = erts_pix2port(i);
- if (prt)
- lcnt_enable_port_lock_count(prt, enable);
+ lcnt_enable_driver_lock_count(&vanilla_driver, enable);
+ lcnt_enable_driver_lock_count(&spawn_driver, enable);
+#ifndef __WIN32__
+ lcnt_enable_driver_lock_count(&forker_driver, enable);
+#endif
+ lcnt_enable_driver_lock_count(&fd_driver, enable);
+
+ erts_rwmtx_rlock(&erts_driver_list_lock);
+
+ for (driver = driver_list; driver; driver = driver->next) {
+ lcnt_enable_driver_lock_count(driver, enable);
}
- lcnt_enable_drv_lock_count(&vanilla_driver, enable);
- lcnt_enable_drv_lock_count(&spawn_driver, enable);
- lcnt_enable_drv_lock_count(&fd_driver, enable);
- for (dp = driver_list; dp; dp = dp->next)
- lcnt_enable_drv_lock_count(dp, enable);
+ erts_rwmtx_runlock(&erts_driver_list_lock);
}
-#endif
+
+void erts_lcnt_update_port_locks(int enable) {
+ int i, max;
+
+ max = erts_ptab_max(&erts_port);
+
+ for(i = 0; i < max; i++) {
+ int delay_handle;
+ Port *port;
+
+ delay_handle = erts_thr_progress_unmanaged_delay();
+ port = erts_pix2port(i);
+
+ if(port != NULL) {
+ lcnt_enable_port_lock_count(port, enable);
+ }
+
+ if(delay_handle != ERTS_THR_PRGR_DHANDLE_MANAGED) {
+ erts_thr_progress_unmanaged_continue(delay_handle);
+ }
+ }
+}
+
+#endif /* defined(ERTS_ENABLE_LOCK_COUNT) */
/*
* Buffering of data when using line oriented I/O on ports
@@ -3014,41 +3289,46 @@ static int read_linebuf(LineBufContext *bp)
}
static void
-deliver_result(Eterm sender, Eterm pid, Eterm res)
+deliver_result(Port *prt, Eterm sender, Eterm pid, Eterm res)
{
Process *rp;
ErtsProcLocks rp_locks = 0;
int scheduler = erts_get_scheduler_id() != 0;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
+
+ ASSERT(!prt || prt->common.id == sender);
+ ERTS_LC_ASSERT(!prt || erts_lc_is_port_locked(prt));
ASSERT(is_internal_port(sender) && is_internal_pid(pid));
rp = (scheduler
? erts_proc_lookup(pid)
- : erts_pid2proc_opt(NULL, 0, pid, 0, ERTS_P2P_FLG_SMP_INC_REFC));
+ : erts_pid2proc_opt(NULL, 0, pid, 0, ERTS_P2P_FLG_INC_REFC));
+
+ if (prt && IS_TRACED_FL(prt, F_TRACE_SEND)) {
+ Eterm hp[3];
+ trace_port_send(prt, pid, TUPLE2(hp, sender, res), !!rp);
+ }
if (rp) {
Eterm tuple;
- ErlHeapFragment *bp;
+ ErtsMessage *mp;
ErlOffHeap *ohp;
Eterm* hp;
Uint sz_res;
sz_res = size_object(res);
- hp = erts_alloc_message_heap(sz_res + 3, &bp, &ohp, rp, &rp_locks);
+ mp = erts_alloc_message_heap(rp, &rp_locks,
+ sz_res + 3, &hp, &ohp);
res = copy_struct(res, sz_res, &hp, ohp);
tuple = TUPLE2(hp, sender, res);
- erts_queue_message(rp, &rp_locks, bp, tuple, NIL
-#ifdef USE_VM_PROBES
- , NIL
-#endif
- );
+ erts_queue_message(rp, rp_locks, mp, tuple, sender);
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
if (!scheduler)
- erts_smp_proc_dec_refc(rp);
+ erts_proc_dec_refc(rp);
}
}
@@ -3071,13 +3351,14 @@ static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to,
Eterm tuple;
Process* rp;
Eterm* hp;
- ErlHeapFragment *bp;
+ ErtsMessage *mp;
ErlOffHeap *ohp;
ErtsProcLocks rp_locks = 0;
int scheduler = erts_get_scheduler_id() != 0;
+ int trace_send = IS_TRACED_FL(prt, F_TRACE_SEND);
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_CHK_NO_PROC_LOCKS;
need = 3 + 3 + 2*hlen;
@@ -3092,12 +3373,12 @@ static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to,
rp = (scheduler
? erts_proc_lookup(to)
- : erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_SMP_INC_REFC));
+ : erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_INC_REFC));
if (!rp)
return;
- hp = erts_alloc_message_heap(need, &bp, &ohp, rp, &rp_locks);
+ mp = erts_alloc_message_heap(trace_send ? NULL : rp, &rp_locks, need, &hp, &ohp);
listp = NIL;
if ((state & ERTS_PORT_SFLG_BINARY_IO) == 0) {
@@ -3107,9 +3388,6 @@ static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to,
Binary* bptr;
bptr = erts_bin_nrml_alloc(len);
- bptr->flags = 0;
- bptr->orig_size = len;
- erts_refc_init(&bptr->refc, 1);
sys_memcpy(bptr->orig_bytes, buf, len);
pb = (ProcBin *) hp;
@@ -3141,15 +3419,15 @@ static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to,
tuple = TUPLE2(hp, prt->common.id, tuple);
hp += 3;
- erts_queue_message(rp, &rp_locks, bp, tuple, am_undefined
-#ifdef USE_VM_PROBES
- , NIL
-#endif
- );
+ if (trace_send)
+ trace_port_send(prt, to, tuple, 1);
+
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ erts_queue_message(rp, rp_locks, mp, tuple, prt->common.id);
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
if (!scheduler)
- erts_smp_proc_dec_refc(rp);
+ erts_proc_dec_refc(rp);
}
/*
@@ -3182,7 +3460,7 @@ static void flush_linebuf_messages(Port *prt, erts_aint32_t state)
LineBufContext lc;
int ret;
- ERTS_SMP_LC_ASSERT(!prt || erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(!prt || erts_lc_is_port_locked(prt));
if (!prt)
return;
@@ -3219,14 +3497,15 @@ deliver_vec_message(Port* prt, /* Port */
Eterm tuple;
Process* rp;
Eterm* hp;
- ErlHeapFragment *bp;
+ ErtsMessage *mp;
ErlOffHeap *ohp;
ErtsProcLocks rp_locks = 0;
int scheduler = erts_get_scheduler_id() != 0;
erts_aint32_t state;
+ int trace_send = IS_TRACED_FL(prt, F_TRACE_SEND);
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_CHK_NO_PROC_LOCKS;
/*
* Check arguments for validity.
@@ -3235,7 +3514,7 @@ deliver_vec_message(Port* prt, /* Port */
rp = (scheduler
? erts_proc_lookup(to)
- : erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_SMP_INC_REFC));
+ : erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_INC_REFC));
if (!rp)
return;
@@ -3251,7 +3530,7 @@ deliver_vec_message(Port* prt, /* Port */
need += (hlen+csize)*2;
}
- hp = erts_alloc_message_heap(need, &bp, &ohp, rp, &rp_locks);
+ mp = erts_alloc_message_heap(trace_send ? NULL : rp, &rp_locks, need, &hp, &ohp);
listp = NIL;
iov += vsize;
@@ -3312,14 +3591,14 @@ deliver_vec_message(Port* prt, /* Port */
tuple = TUPLE2(hp, prt->common.id, tuple);
hp += 3;
- erts_queue_message(rp, &rp_locks, bp, tuple, am_undefined
-#ifdef USE_VM_PROBES
- , NIL
-#endif
- );
- erts_smp_proc_unlock(rp, rp_locks);
+ if (IS_TRACED_FL(prt, F_TRACE_SEND))
+ trace_port_send(prt, to, tuple, 1);
+
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ erts_queue_message(rp, rp_locks, mp, tuple, prt->common.id);
+ erts_proc_unlock(rp, rp_locks);
if (!scheduler)
- erts_smp_proc_dec_refc(rp);
+ erts_proc_dec_refc(rp);
}
@@ -3352,30 +3631,42 @@ static void flush_port(Port *p)
{
int fpe_was_unmasked;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
+ ERTS_CHK_NO_PROC_LOCKS;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(p));
if (p->drv_ptr->flush != NULL) {
+ ERTS_MSACC_PUSH_STATE_M();
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(driver_flush)) {
DTRACE_FORMAT_COMMON_PID_AND_PORT(ERTS_PORT_GET_CONNECTED(p), p)
DTRACE3(driver_flush, process_str, port_str, p->name);
}
#endif
+#ifdef USE_LTTNG_VM_TRACEPOINTS
+ if (LTTNG_ENABLED(driver_flush)) {
+ lttng_decl_portbuf(port_str);
+ lttng_decl_procbuf(proc_str);
+ lttng_pid_to_str(ERTS_PORT_GET_CONNECTED(p), proc_str);
+ lttng_port_to_str(p, port_str);
+ LTTNG3(driver_flush, proc_str, port_str, p->name);
+ }
+#endif
+
+
if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
trace_sched_ports_where(p, am_in, am_flush);
}
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_PORT);
fpe_was_unmasked = erts_block_fpe();
(*p->drv_ptr->flush)((ErlDrvData)p->drv_data);
erts_unblock_fpe(fpe_was_unmasked);
+ ERTS_MSACC_POP_STATE_M();
if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
trace_sched_ports_where(p, am_out, am_flush);
}
-#ifdef ERTS_SMP
if (p->xports)
erts_port_handle_xports(p);
ASSERT(!p->xports);
-#endif
}
if ((erts_atomic32_read_nob(&p->state) & ERTS_PORT_SFLGS_DEAD) == 0
&& is_port_ioq_empty(p)) {
@@ -3391,9 +3682,10 @@ terminate_port(Port *prt)
Eterm connected_id = NIL /* Initialize to silence compiler */;
erts_driver_t *drv;
erts_aint32_t state;
+ ErtsPrtSD *psd;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_CHK_NO_PROC_LOCKS;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
ASSERT(!ERTS_P_LINKS(prt));
ASSERT(!ERTS_P_MONITORS(prt));
@@ -3409,33 +3701,45 @@ terminate_port(Port *prt)
send_closed_port_id = NIL;
}
-#ifdef ERTS_SMP
- erts_cancel_smp_ptimer(prt->common.u.alive.ptimer);
-#else
- erts_cancel_timer(&prt->common.u.alive.tm);
-#endif
+ if (ERTS_PTMR_IS_SET(prt))
+ erts_cancel_port_timer(prt);
drv = prt->drv_ptr;
if ((drv != NULL) && (drv->stop != NULL)) {
int fpe_was_unmasked = erts_block_fpe();
+ ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_PORT);
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(driver_stop)) {
DTRACE_FORMAT_COMMON_PID_AND_PORT(connected_id, prt)
DTRACE3(driver_stop, process_str, drv->name, port_str);
}
#endif
+#ifdef USE_LTTNG_VM_TRACEPOINTS
+ if (LTTNG_ENABLED(driver_stop)) {
+ lttng_decl_portbuf(port_str);
+ lttng_decl_procbuf(proc_str);
+ lttng_pid_to_str(connected_id, proc_str);
+ lttng_port_to_str(prt, port_str);
+ LTTNG3(driver_stop, proc_str, port_str, drv->name);
+ }
+#endif
+
(*drv->stop)((ErlDrvData)prt->drv_data);
erts_unblock_fpe(fpe_was_unmasked);
-#ifdef ERTS_SMP
+ ERTS_MSACC_POP_STATE_M();
if (prt->xports)
erts_port_handle_xports(prt);
ASSERT(!prt->xports);
-#endif
}
+
+ if (is_internal_port(send_closed_port_id)
+ && IS_TRACED_FL(prt, F_TRACE_SEND))
+ trace_port_send(prt, connected_id, am_closed, 1);
+
if(drv->handle != NULL) {
- erts_smp_rwmtx_rlock(&erts_driver_list_lock);
+ erts_rwmtx_rlock(&erts_driver_list_lock);
erts_ddll_decrement_port_count(drv->handle);
- erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ erts_rwmtx_runlock(&erts_driver_list_lock);
}
stopq(prt); /* clear queue memory */
if(prt->linebuf != NULL){
@@ -3445,8 +3749,9 @@ terminate_port(Port *prt)
erts_cleanup_port_data(prt);
- if (prt->psd)
- erts_free(ERTS_ALC_T_PRTSD, prt->psd);
+ psd = (ErtsPrtSD *) erts_atomic_read_nob(&prt->psd);
+ if (psd)
+ erts_free(ERTS_ALC_T_PRTSD, psd);
ASSERT(prt->dist_entry == NULL);
@@ -3457,12 +3762,12 @@ terminate_port(Port *prt)
* port has been removed from the port table (in kill_port()).
*/
if ((state & ERTS_PORT_SFLG_HALT)
- && (erts_smp_atomic32_dec_read_nob(&erts_halt_progress) == 0)) {
+ && (erts_atomic32_dec_read_nob(&erts_halt_progress) == 0)) {
erts_port_release(prt); /* We will exit and never return */
- erl_exit_flush_async(erts_halt_code, "");
+ erts_flush_async_exit(erts_halt_code, "");
}
if (is_internal_port(send_closed_port_id))
- deliver_result(send_closed_port_id, connected_id, am_closed);
+ deliver_result(NULL, send_closed_port_id, connected_id, am_closed);
}
void
@@ -3471,23 +3776,30 @@ erts_terminate_port(Port *pp)
terminate_port(pp);
}
+static void port_fire_one_monitor(ErtsMonitor *mon, void *ctx0);
static void sweep_one_monitor(ErtsMonitor *mon, void *vpsc)
{
- ErtsMonitor *rmon;
- Process *rp;
+ switch (mon->type) {
+ case MON_ORIGIN: {
+ ErtsMonitor *rmon;
+ Process *rp;
- ASSERT(mon->type == MON_ORIGIN);
- ASSERT(is_internal_pid(mon->pid));
- rp = erts_pid2proc(NULL, 0, mon->pid, ERTS_PROC_LOCK_LINK);
- if (!rp) {
- goto done;
- }
- rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- if (rmon == NULL) {
- goto done;
+ ASSERT(is_internal_pid(mon->u.pid));
+ rp = erts_pid2proc(NULL, 0, mon->u.pid, ERTS_PROC_LOCK_LINK);
+ if (!rp) {
+ goto done;
+ }
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ if (rmon == NULL) {
+ goto done;
+ }
+ erts_destroy_monitor(rmon);
+ } break;
+ case MON_TARGET: {
+ port_fire_one_monitor(mon, vpsc); /* forward call */
+ } break;
}
- erts_destroy_monitor(rmon);
done:
erts_destroy_monitor(mon);
}
@@ -3495,7 +3807,7 @@ static void sweep_one_monitor(ErtsMonitor *mon, void *vpsc)
typedef struct {
- Eterm port;
+ Port *port;
Eterm reason;
} SweepContext;
@@ -3504,10 +3816,13 @@ static void sweep_one_link(ErtsLink *lnk, void *vpsc)
SweepContext *psc = vpsc;
DistEntry *dep;
Process *rp;
-
+ Eterm port_id = psc->port->common.id;
ASSERT(lnk->type == LINK_PID);
-
+
+ if (IS_TRACED_FL(psc->port, F_TRACE_PORTS))
+ trace_port(psc->port, am_unlink, lnk->pid);
+
if (is_external_pid(lnk->pid)) {
dep = external_pid_dist_entry(lnk->pid);
if(dep != erts_this_dist_entry) {
@@ -3520,9 +3835,9 @@ static void sweep_one_link(ErtsLink *lnk, void *vpsc)
case ERTS_DSIG_PREP_NOT_CONNECTED:
break;
case ERTS_DSIG_PREP_CONNECTED:
- erts_remove_dist_link(&dld, psc->port, lnk->pid, dep);
+ erts_remove_dist_link(&dld, port_id, lnk->pid, dep);
erts_destroy_dist_link(&dld);
- code = erts_dsig_send_exit(&dsd, psc->port, lnk->pid,
+ code = erts_dsig_send_exit(&dsd, port_id, lnk->pid,
psc->reason);
ASSERT(code == ERTS_DSIG_SEND_OK);
break;
@@ -3536,33 +3851,72 @@ static void sweep_one_link(ErtsLink *lnk, void *vpsc)
ASSERT(is_internal_pid(lnk->pid));
rp = erts_pid2proc(NULL, 0, lnk->pid, rp_locks);
if (rp) {
- ErtsLink *rlnk = erts_remove_link(&ERTS_P_LINKS(rp), psc->port);
+ ErtsLink *rlnk = erts_remove_link(&ERTS_P_LINKS(rp), port_id);
if (rlnk) {
int xres = erts_send_exit_signal(NULL,
- psc->port,
+ port_id,
rp,
&rp_locks,
psc->reason,
NIL,
NULL,
0);
- if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
+ if (xres >= 0) {
+ if (rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) {
+ erts_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND);
+ rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND;
+ }
/* We didn't exit the process and it is traced */
- if (IS_TRACED_FL(rp, F_TRACE_PROCS)) {
- trace_proc(NULL, rp, am_getting_unlinked,
- psc->port);
- }
+ if (IS_TRACED_FL(rp, F_TRACE_PROCS))
+ trace_proc(NULL, 0, rp, am_getting_unlinked, port_id);
}
erts_destroy_link(rlnk);
}
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
}
}
erts_destroy_link(lnk);
}
+static void
+port_fire_one_monitor(ErtsMonitor *mon, void *ctx0)
+{
+ Process *origin;
+ ErtsProcLocks origin_locks;
+
+ if (mon->type != MON_TARGET || ! is_pid(mon->u.pid)) {
+ return;
+ }
+ /*
+ * Proceed here if someone monitors us, we (port) are the target and
+ * origin is some process
+ */
+ origin_locks = ERTS_PROC_LOCKS_MSG_SEND | ERTS_PROC_LOCK_LINK;
+
+ origin = erts_pid2proc(NULL, 0, mon->u.pid, origin_locks);
+ if (origin) {
+ DeclareTmpHeapNoproc(lhp,3);
+ SweepContext *ctx = (SweepContext *)ctx0;
+ ErtsMonitor *rmon;
+ Eterm watched = (is_atom(mon->name)
+ ? TUPLE2(lhp, mon->name, erts_this_dist_entry->sysname)
+ : ctx->port->common.id);
+
+ erts_queue_monitor_message(origin, &origin_locks, mon->ref, am_port,
+ watched, ctx->reason);
+ UnUseTmpHeapNoproc(3);
+
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(origin), mon->ref);
+ erts_proc_unlock(origin, origin_locks);
+
+ if (rmon) {
+ erts_destroy_monitor(rmon);
+ }
+ }
+}
+
/* 'from' is sending 'this_port' an exit signal, (this_port must be internal).
* If reason is normal we don't do anything, *unless* from is our connected
* process in which case we close the port. Any other reason kills the port.
@@ -3574,91 +3928,95 @@ static void sweep_one_link(ErtsLink *lnk, void *vpsc)
*/
int
-erts_deliver_port_exit(Port *p, Eterm from, Eterm reason, int send_closed)
+erts_deliver_port_exit(Port *prt, Eterm from, Eterm reason, int send_closed,
+ int drop_normal)
{
ErtsLink *lnk;
- Eterm rreason;
+ Eterm modified_reason;
erts_aint32_t state, set_state_flags;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
+ ERTS_CHK_NO_PROC_LOCKS;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
- rreason = (reason == am_kill) ? am_killed : reason;
+ modified_reason = (reason == am_kill) ? am_killed : reason;
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(port_exit)) {
DTRACE_CHARBUF(from_str, DTRACE_TERM_BUF_SIZE);
DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE);
- DTRACE_CHARBUF(rreason_str, 64);
+ DTRACE_CHARBUF(reason_str, 64);
erts_snprintf(from_str, sizeof(DTRACE_CHARBUF_NAME(from_str)), "%T", from);
- dtrace_port_str(p, port_str);
- erts_snprintf(rreason_str, sizeof(DTRACE_CHARBUF_NAME(rreason_str)), "%T", rreason);
- DTRACE4(port_exit, from_str, port_str, p->name, rreason_str);
+ dtrace_port_str(prt, port_str);
+ erts_snprintf(reason_str, sizeof(DTRACE_CHARBUF_NAME(reason_str)), "%T",
+ modified_reason);
+ DTRACE4(port_exit, from_str, port_str, prt->name, reason_str);
}
#endif
- state = erts_atomic32_read_nob(&p->state);
+ state = erts_atomic32_read_nob(&prt->state);
if (state & (ERTS_PORT_SFLGS_DEAD
| ERTS_PORT_SFLG_EXITING
| ERTS_PORT_SFLG_CLOSING))
return 0;
- if (reason == am_normal && from != ERTS_PORT_GET_CONNECTED(p) && from != p->common.id)
+ if (reason == am_normal && from != ERTS_PORT_GET_CONNECTED(prt)
+ && from != prt->common.id && drop_normal) {
return 0;
+ }
set_state_flags = ERTS_PORT_SFLG_EXITING;
if (send_closed)
set_state_flags |= ERTS_PORT_SFLG_SEND_CLOSED;
- erts_port_task_sched_enter_exiting_state(&p->sched);
+ erts_port_task_sched_enter_exiting_state(&prt->sched);
- state = erts_atomic32_read_bor_mb(&p->state, set_state_flags);
+ state = erts_atomic32_read_bor_mb(&prt->state, set_state_flags);
state |= set_state_flags;
- if (IS_TRACED_FL(p, F_TRACE_PORTS)) {
- trace_port(p, am_closed, reason);
- }
+ if (IS_TRACED_FL(prt, F_TRACE_PORTS))
+ trace_port(prt, am_closed, reason);
- erts_trace_check_exiting(p->common.id);
+ erts_trace_check_exiting(prt->common.id);
- set_busy_port(ERTS_Port2ErlDrvPort(p), 0);
+ set_busy_port(ERTS_Port2ErlDrvPort(prt), 0);
- if (p->common.u.alive.reg != NULL)
- (void) erts_unregister_name(NULL, 0, p, p->common.u.alive.reg->name);
+ if (prt->common.u.alive.reg != NULL)
+ (void) erts_unregister_name(NULL, 0, prt, prt->common.u.alive.reg->name);
{
- SweepContext sc = {p->common.id, rreason};
- lnk = ERTS_P_LINKS(p);
- ERTS_P_LINKS(p) = NULL;
+ SweepContext sc = {prt, modified_reason};
+ lnk = ERTS_P_LINKS(prt);
+ ERTS_P_LINKS(prt) = NULL;
erts_sweep_links(lnk, &sweep_one_link, &sc);
}
- DRV_MONITOR_LOCK_PDL(p);
+ DRV_MONITOR_LOCK_PDL(prt);
{
- ErtsMonitor *moni = ERTS_P_MONITORS(p);
- ERTS_P_MONITORS(p) = NULL;
- erts_sweep_monitors(moni, &sweep_one_monitor, NULL);
+ SweepContext ctx = {prt, modified_reason};
+ ErtsMonitor *moni = ERTS_P_MONITORS(prt);
+ ERTS_P_MONITORS(prt) = NULL;
+ erts_sweep_monitors(moni, &sweep_one_monitor, &ctx);
}
- DRV_MONITOR_UNLOCK_PDL(p);
+ DRV_MONITOR_UNLOCK_PDL(prt);
- if ((state & ERTS_PORT_SFLG_DISTRIBUTION) && p->dist_entry) {
- erts_do_net_exits(p->dist_entry, rreason);
- erts_deref_dist_entry(p->dist_entry);
- p->dist_entry = NULL;
- erts_atomic32_read_band_relb(&p->state,
+ if ((state & ERTS_PORT_SFLG_DISTRIBUTION) && prt->dist_entry) {
+ erts_do_net_exits(prt->dist_entry, modified_reason);
+ erts_deref_dist_entry(prt->dist_entry);
+ prt->dist_entry = NULL;
+ erts_atomic32_read_band_relb(&prt->state,
~ERTS_PORT_SFLG_DISTRIBUTION);
}
- if ((reason != am_kill) && !is_port_ioq_empty(p)) {
+ if ((reason != am_kill) && !is_port_ioq_empty(prt)) {
/* must turn exiting flag off */
- erts_atomic32_read_bset_relb(&p->state,
+ erts_atomic32_read_bset_relb(&prt->state,
(ERTS_PORT_SFLG_EXITING
| ERTS_PORT_SFLG_CLOSING),
ERTS_PORT_SFLG_CLOSING);
- flush_port(p);
+ flush_port(prt);
}
else {
- terminate_port(p);
+ terminate_port(prt);
}
return 1;
@@ -3736,6 +4094,7 @@ call_driver_control(Eterm caller,
ErlDrvSizeT *from_size)
{
ErlDrvSSizeT cres;
+ ERTS_MSACC_PUSH_STATE_M();
if (!prt->drv_ptr->control)
return ERTS_PORT_OP_BADARG;
@@ -3750,6 +4109,21 @@ call_driver_control(Eterm caller,
}
#endif
+ if (IS_TRACED_FL(prt, F_TRACE_RECEIVE))
+ trace_port_receive(prt, caller, am_control, command, bufp, size);
+
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_PORT);
+
+#ifdef USE_LTTNG_VM_TRACEPOINTS
+ if (LTTNG_ENABLED(driver_control)) {
+ lttng_decl_procbuf(proc_str);
+ lttng_decl_portbuf(port_str);
+ lttng_pid_to_str(caller, proc_str);
+ lttng_port_to_str(prt, port_str);
+ LTTNG5(driver_control, proc_str, port_str, prt->name, command, size);
+ }
+#endif
+
prt->caller = caller;
cres = prt->drv_ptr->control((ErlDrvData) prt->drv_data,
command,
@@ -3759,9 +4133,14 @@ call_driver_control(Eterm caller,
*from_size);
prt->caller = NIL;
+ ERTS_MSACC_POP_STATE_M();
+
if (cres < 0)
return ERTS_PORT_OP_BADARG;
+ if (IS_TRACED_FL(prt, F_TRACE_SEND))
+ trace_port_send_binary(prt, caller, am_control, *resp_bufp, cres);
+
*from_size = (ErlDrvSizeT) cres;
return ERTS_PORT_OP_DONE;
@@ -3771,8 +4150,7 @@ static void
cleanup_scheduled_control(Binary *binp, char *bufp)
{
if (binp) {
- if (erts_refc_dectest(&binp->refc, 0) == 0)
- erts_bin_free(binp);
+ erts_bin_release(binp);
}
else {
if (bufp)
@@ -3810,7 +4188,6 @@ write_port_control_result(int control_flags,
ErlDrvSizeT resp_size,
char *pre_alloc_buf,
Eterm **hpp,
- ErlHeapFragment *bp,
ErlOffHeap *ohp)
{
Eterm res;
@@ -3884,58 +4261,51 @@ port_sig_control(Port *prt,
if (res == ERTS_PORT_OP_DONE) {
Eterm msg;
- Eterm *hp, *hp_start;
- ErlHeapFragment *bp;
- ErlOffHeap *ohp;
+ ErtsHeapFactory factory;
Process *rp;
ErtsProcLocks rp_locks = 0;
- Uint hsz;
+ Uint hsz, rsz;
int control_flags;
- rp = erts_proc_lookup_raw(sigdp->caller);
+ rp = sigdp->caller == ERTS_INVALID_PID ? NULL : erts_proc_lookup_raw(sigdp->caller);
if (!rp)
goto done;
control_flags = prt->control_flags;
- hsz = ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE;
- hsz += port_control_result_size(control_flags,
+ rsz = port_control_result_size(control_flags,
resp_bufp,
&resp_size,
&resp_buf[0]);
+ hsz = rsz + ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE;
+
- hp_start = hp = erts_alloc_message_heap(hsz,
- &bp,
- &ohp,
- rp,
- &rp_locks);
+ (void) erts_factory_message_create(&factory, rp,
+ &rp_locks, hsz);
msg = write_port_control_result(control_flags,
resp_bufp,
resp_size,
&resp_buf[0],
- &hp,
- bp,
- ohp);
-
+ &factory.hp,
+ factory.off_heap);
queue_port_sched_op_reply(rp,
- &rp_locks,
- hp_start,
- hp,
- hsz,
- bp,
+ rp_locks,
+ &factory,
sigdp->ref,
- msg);
+ msg,
+ prt);
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
goto done;
}
}
/* failure */
- port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg);
+ if (sigdp->caller != ERTS_INVALID_PID)
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg, prt);
done:
@@ -3945,6 +4315,23 @@ done:
return ERTS_PORT_REDS_CONTROL;
}
+/*
+ * This is an asynchronous control call. I.e. it will not return anything
+ * to the caller.
+ */
+int
+erl_drv_port_control(Eterm port_num, char cmd, char* buff, ErlDrvSizeT size)
+{
+ ErtsProc2PortSigData *sigdp = erts_port_task_alloc_p2p_sig_data();
+
+ sigdp->flags = ERTS_P2P_SIG_TYPE_CONTROL | ERTS_P2P_SIG_DATA_FLG_REPLY;
+ sigdp->u.control.binp = NULL;
+ sigdp->u.control.command = cmd;
+ sigdp->u.control.bufp = buff;
+ sigdp->u.control.size = size;
+
+ return erts_schedule_port2port_signal(port_num, sigdp, 0, port_sig_control);
+}
ErtsPortOpResult
erts_port_control(Process* c_p,
@@ -3963,7 +4350,7 @@ erts_port_control(Process* c_p,
int copy;
ErtsProc2PortSigData *sigdp;
- sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ sched_flags = erts_atomic32_read_nob(&prt->sched.flags);
if (sched_flags & ERTS_PTS_FLG_EXIT)
return ERTS_PORT_OP_BADARG;
@@ -4041,6 +4428,9 @@ erts_port_control(Process* c_p,
size,
&resp_bufp,
&resp_size);
+
+ control_flags = prt->control_flags;
+
finalize_imm_drv_call(&try_call_state);
if (tmp_alloced)
erts_free(ERTS_ALC_T_TMP, bufp);
@@ -4048,8 +4438,6 @@ erts_port_control(Process* c_p,
return ERTS_PORT_OP_BADARG;
}
- control_flags = prt->control_flags;
-
hsz = port_control_result_size(control_flags,
resp_bufp,
&resp_size,
@@ -4060,7 +4448,6 @@ erts_port_control(Process* c_p,
resp_size,
&resp_buf[0],
&hp,
- NULL,
&c_p->off_heap);
BUMP_REDS(c_p, ERTS_PORT_REDS_CONTROL);
return ERTS_PORT_OP_DONE;
@@ -4082,18 +4469,32 @@ erts_port_control(Process* c_p,
binp = NULL;
if (is_binary(data) && binary_bitoffset(data) == 0) {
- Eterm *ebinp = binary_val_rel(data, NULL);
+ Eterm *ebinp = binary_val(data);
ASSERT(!tmp_alloced);
if (*ebinp == HEADER_SUB_BIN)
- ebinp = binary_val_rel(((ErlSubBin *) ebinp)->orig, NULL);
+ ebinp = binary_val(((ErlSubBin *) ebinp)->orig);
+
if (*ebinp != HEADER_PROC_BIN)
copy = 1;
else {
- binp = ((ProcBin *) ebinp)->val;
+ ProcBin *pb = (ProcBin *) ebinp;
+ int offset = bufp - pb->val->orig_bytes;
+
+ ASSERT(pb->val->orig_bytes <= bufp
+ && bufp + size <= pb->val->orig_bytes + pb->val->orig_size);
+
+ if (pb->flags) {
+ erts_emasculate_writable_binary(pb);
+
+ /* The procbin may have been reallocated, so update bufp */
+ bufp = pb->val->orig_bytes + offset;
+ }
+
+ binp = pb->val;
ASSERT(bufp <= bufp + size);
ASSERT(binp->orig_bytes <= bufp
&& bufp + size <= binp->orig_bytes + binp->orig_size);
- erts_refc_inc(&binp->refc, 1);
+ erts_refc_inc(&binp->intern.refc, 1);
}
}
@@ -4120,10 +4521,9 @@ erts_port_control(Process* c_p,
0,
NULL,
port_sig_control);
- if (res != ERTS_PORT_OP_SCHEDULED) {
- cleanup_scheduled_control(binp, bufp);
+ if (res != ERTS_PORT_OP_SCHEDULED)
return ERTS_PORT_OP_BADARG;
- }
+
return res;
}
@@ -4138,6 +4538,7 @@ call_driver_call(Eterm caller,
unsigned *ret_flagsp)
{
ErlDrvSSizeT cres;
+ ERTS_MSACC_PUSH_STATE_M();
if (!prt->drv_ptr->call)
return ERTS_PORT_OP_BADARG;
@@ -4152,6 +4553,20 @@ call_driver_call(Eterm caller,
DTRACE5(driver_call, process_str, port_str, prt->name, command, size);
}
#endif
+#ifdef USE_LTTNG_VM_TRACEPOINTS
+ if (LTTNG_ENABLED(driver_call)) {
+ lttng_decl_procbuf(proc_str);
+ lttng_decl_portbuf(port_str);
+ lttng_pid_to_str(caller,proc_str);
+ lttng_port_to_str(prt, port_str);
+ LTTNG5(driver_call, proc_str, port_str, prt->name, command, size);
+ }
+#endif
+
+ if (IS_TRACED_FL(prt, F_TRACE_RECEIVE))
+ trace_port_receive(prt, caller, am_call, command, bufp, size);
+
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_PORT);
prt->caller = caller;
cres = prt->drv_ptr->call((ErlDrvData) prt->drv_data,
@@ -4163,10 +4578,15 @@ call_driver_call(Eterm caller,
ret_flagsp);
prt->caller = NIL;
+ ERTS_MSACC_POP_STATE_M();
+
if (cres <= 0
|| ((byte) (*resp_bufp)[0]) != VERSION_MAGIC)
return ERTS_PORT_OP_BADARG;
+ if (IS_TRACED_FL(prt, F_TRACE_SEND))
+ trace_port_send_binary(prt, caller, am_call, *resp_bufp, cres);
+
*from_size = (ErlDrvSizeT) cres;
return ERTS_PORT_OP_DONE;
@@ -4209,8 +4629,6 @@ port_sig_call(Port *prt,
if (res == ERTS_PORT_OP_DONE) {
Eterm msg;
Eterm *hp;
- ErlHeapFragment *bp;
- ErlOffHeap *ohp;
Process *rp;
ErtsProcLocks rp_locks = 0;
Sint hsz;
@@ -4221,45 +4639,39 @@ port_sig_call(Port *prt,
hsz = erts_decode_ext_size((byte *) resp_bufp, resp_size);
if (hsz >= 0) {
- Eterm *hp_start;
+ ErtsHeapFactory factory;
byte *endp;
hsz += 3; /* ok tuple */
hsz += ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE;
- hp_start = hp = erts_alloc_message_heap(hsz,
- &bp,
- &ohp,
- rp,
- &rp_locks);
+ (void) erts_factory_message_create(&factory, rp, &rp_locks, hsz);
endp = (byte *) resp_bufp;
- msg = erts_decode_ext(&hp, ohp, &endp);
+ msg = erts_decode_ext(&factory, &endp, 0);
if (is_value(msg)) {
+ hp = erts_produce_heap(&factory,
+ 3,
+ ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE);
msg = TUPLE2(hp, am_ok, msg);
- hp += 3;
queue_port_sched_op_reply(rp,
- &rp_locks,
- hp_start,
- hp,
- hsz,
- bp,
+ rp_locks,
+ &factory,
sigdp->ref,
- msg);
+ msg,
+ prt);
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
goto done;
}
- if (bp)
- free_message_buffer(bp);
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
}
}
}
- port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg);
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg, prt);
done:
@@ -4288,7 +4700,7 @@ erts_port_call(Process* c_p,
erts_aint32_t sched_flags;
ErtsProc2PortSigData *sigdp;
- sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ sched_flags = erts_atomic32_read_nob(&prt->sched.flags);
if (sched_flags & ERTS_PTS_FLG_EXIT) {
return ERTS_PORT_OP_BADARG;
}
@@ -4329,10 +4741,11 @@ erts_port_call(Process* c_p,
try_call_res = try_imm_drv_call(&try_call_state);
switch (try_call_res) {
case ERTS_TRY_IMM_DRV_CALL_OK: {
- Eterm *hp, *hp_end;
+ ErtsHeapFactory factory;
Sint hsz;
unsigned ret_flags = 0U;
Eterm term;
+ Eterm* hp;
res = call_driver_call(c_p->common.id,
prt,
@@ -4352,15 +4765,14 @@ erts_port_call(Process* c_p,
if (hsz < 0)
return ERTS_PORT_OP_BADARG;
hsz += 3;
- hp = HAlloc(c_p, hsz);
- hp_end = hp + hsz;
+ erts_factory_proc_prealloc_init(&factory, c_p, hsz);
endp = (byte *) resp_bufp;
- term = erts_decode_ext(&hp, &MSO(c_p), &endp);
+ term = erts_decode_ext(&factory, &endp, 0);
if (term == THE_NON_VALUE)
return ERTS_PORT_OP_BADARG;
+ hp = erts_produce_heap(&factory,3,0);
*retvalp = TUPLE2(hp, am_ok, term);
- hp += 3;
- HRelease(c_p, hp_end, hp);
+ erts_factory_close(&factory);
if (resp_bufp != &resp_buf[0]
&& !(ret_flags & DRIVER_CALL_KEEP_BUFFER))
driver_free(resp_bufp);
@@ -4401,10 +4813,9 @@ erts_port_call(Process* c_p,
0,
NULL,
port_sig_call);
- if (res != ERTS_PORT_OP_SCHEDULED) {
- cleanup_scheduled_call(bufp);
+ if (res != ERTS_PORT_OP_SCHEDULED)
return ERTS_PORT_OP_BADARG;
- }
+
return res;
}
@@ -4438,7 +4849,7 @@ make_port_info_term(Eterm **hpp_start,
int len;
int start;
static Eterm item[] = ERTS_PORT_INFO_1_ITEMS;
- static Eterm value[sizeof(item)/sizeof(item[0])];
+ Eterm value[sizeof(item)/sizeof(item[0])];
start = 0;
len = sizeof(item)/sizeof(item[0]);
@@ -4474,7 +4885,7 @@ port_sig_info(Port *prt,
{
ASSERT(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY);
if (op != ERTS_PROC2PORT_SIG_EXEC)
- port_sched_op_reply(sigdp->caller, sigdp->ref, am_undefined);
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_undefined, prt);
else {
Eterm *hp, *hp_start;
Uint hsz;
@@ -4495,17 +4906,19 @@ port_sig_info(Port *prt,
prt,
sigdp->u.info.item);
if (is_value(value)) {
+ ErtsHeapFactory factory;
+ ErtsMessage *mp = erts_alloc_message(0, NULL);
+ mp->data.heap_frag = bp;
+ erts_factory_selfcontained_message_init(&factory, mp, hp);
queue_port_sched_op_reply(rp,
- &rp_locks,
- hp_start,
- hp,
- hsz,
- bp,
+ rp_locks,
+ &factory,
sigdp->ref,
- value);
+ value,
+ prt);
}
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
}
return ERTS_PORT_REDS_INFO;
}
@@ -4571,14 +4984,109 @@ erts_port_info(Process* c_p,
}
typedef struct {
- int to;
+ Uint sched_id;
+ Eterm pid;
+ Uint32 refn[ERTS_REF_NUMBERS];
+ erts_atomic32_t refc;
+} ErtsIOBytesReq;
+
+static void
+reply_io_bytes(void *vreq)
+{
+ ErtsIOBytesReq *req = (ErtsIOBytesReq *) vreq;
+ Process *rp;
+
+ rp = erts_proc_lookup(req->pid);
+ if (rp) {
+ ErlOffHeap *ohp;
+ ErtsMessage *mp;
+ ErtsProcLocks rp_locks;
+ Eterm ref, msg, ein, eout, *hp;
+ Uint64 in, out;
+ Uint hsz;
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ Uint sched_id = esdp->no;
+ in = esdp->io.in;
+ out = esdp->io.out;
+ if (req->sched_id != sched_id)
+ rp_locks = 0;
+ else {
+ in += (Uint64) erts_atomic64_read_nob(&bytes_in);
+ out += (Uint64) erts_atomic64_read_nob(&bytes_out);
+ rp_locks = ERTS_PROC_LOCK_MAIN;
+ }
+
+ hsz = 5 /* 4-tuple */ + ERTS_REF_THING_SIZE;
+
+ erts_bld_uint64(NULL, &hsz, in);
+ erts_bld_uint64(NULL, &hsz, out);
+
+ mp = erts_alloc_message_heap(rp, &rp_locks, hsz, &hp, &ohp);
+
+ ref = make_internal_ref(hp);
+ write_ref_thing(hp, req->refn[0], req->refn[1], req->refn[2]);
+ hp += ERTS_REF_THING_SIZE;
+
+ ein = erts_bld_uint64(&hp, NULL, in);
+ eout = erts_bld_uint64(&hp, NULL, out);
+
+ msg = TUPLE4(hp, ref, make_small(sched_id), ein, eout);
+
+ erts_queue_message(rp, rp_locks, mp, msg, am_system);
+
+ if (req->sched_id == sched_id)
+ rp_locks &= ~ERTS_PROC_LOCK_MAIN;
+ if (rp_locks)
+ erts_proc_unlock(rp, rp_locks);
+ }
+
+ if (erts_atomic32_dec_read_nob(&req->refc) == 0)
+ erts_free(ERTS_ALC_T_IOB_REQ, req);
+}
+
+Eterm
+erts_request_io_bytes(Process *c_p)
+{
+ Uint *hp;
+ Eterm ref;
+ Uint32 *refn;
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
+ ErtsIOBytesReq *req = erts_alloc(ERTS_ALC_T_IOB_REQ,
+ sizeof(ErtsIOBytesReq));
+
+ hp = HAlloc(c_p, ERTS_REF_THING_SIZE);
+ ref = erts_sched_make_ref_in_buffer(esdp, hp);
+ refn = internal_ref_numbers(ref);
+
+ req->sched_id = esdp->no;
+ req->pid = c_p->common.id;
+ req->refn[0] = refn[0];
+ req->refn[1] = refn[1];
+ req->refn[2] = refn[2];
+ erts_atomic32_init_nob(&req->refc,
+ (erts_aint32_t) erts_no_schedulers);
+
+ if (erts_no_schedulers > 1)
+ erts_schedule_multi_misc_aux_work(1,
+ erts_no_schedulers,
+ reply_io_bytes,
+ (void *) req);
+
+ reply_io_bytes((void *) req);
+
+ return ref;
+}
+
+
+typedef struct {
+ fmtfn_t to;
void *arg;
} prt_one_lnk_data;
static void prt_one_monitor(ErtsMonitor *mon, void *vprtd)
{
prt_one_lnk_data *prtd = (prt_one_lnk_data *) vprtd;
- erts_print(prtd->to, prtd->arg, "(%T,%T)", mon->pid,mon->ref);
+ erts_print(prtd->to, prtd->arg, "(%T,%T)", mon->u.pid, mon->ref);
}
static void prt_one_lnk(ErtsLink *lnk, void *vprtd)
@@ -4588,7 +5096,7 @@ static void prt_one_lnk(ErtsLink *lnk, void *vprtd)
}
void
-print_port_info(Port *p, int to, void *arg)
+print_port_info(Port *p, fmtfn_t to, void *arg)
{
erts_aint32_t state = erts_atomic32_read_nob(&p->state);
@@ -4628,6 +5136,10 @@ print_port_info(Port *p, int to, void *arg)
erts_print(to, arg, "Port is a file: %s\n",p->name);
} else if (p->drv_ptr == &spawn_driver) {
erts_print(to, arg, "Port controls external process: %s\n",p->name);
+#ifndef __WIN32__
+ } else if (p->drv_ptr == &forker_driver) {
+ erts_print(to, arg, "Port controls forker process: %s\n",p->name);
+#endif
} else {
erts_print(to, arg, "Port controls linked-in driver: %s\n",p->name);
}
@@ -4643,14 +5155,14 @@ set_busy_port(ErlDrvPort dprt, int on)
DTRACE_CHARBUF(port_str, 16);
#endif
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
prt = erts_drvport2port(dprt);
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return;
if (on) {
- flags = erts_smp_atomic32_read_bor_acqb(&prt->sched.flags,
+ flags = erts_atomic32_read_bor_acqb(&prt->sched.flags,
ERTS_PTS_FLG_BUSY_PORT);
if (flags & ERTS_PTS_FLG_BUSY_PORT)
return; /* Already busy */
@@ -4666,7 +5178,7 @@ set_busy_port(ErlDrvPort dprt, int on)
}
#endif
} else {
- flags = erts_smp_atomic32_read_band_acqb(&prt->sched.flags,
+ flags = erts_atomic32_read_band_acqb(&prt->sched.flags,
~ERTS_PTS_FLG_BUSY_PORT);
if (!(flags & ERTS_PTS_FLG_BUSY_PORT))
return; /* Already non-busy */
@@ -4760,7 +5272,7 @@ int get_port_flags(ErlDrvPort ix)
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return 0;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
flags = 0;
if (state & ERTS_PORT_SFLG_BINARY_IO)
@@ -4774,12 +5286,13 @@ int get_port_flags(ErlDrvPort ix)
void erts_raw_port_command(Port* p, byte* buf, Uint len)
{
int fpe_was_unmasked;
+ ERTS_MSACC_PUSH_STATE_M();
- ERTS_SMP_CHK_NO_PROC_LOCKS;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
+ ERTS_CHK_NO_PROC_LOCKS;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(p));
if (len > (Uint) INT_MAX)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"Absurdly large data buffer (%beu bytes) passed to"
"output callback of %s driver.\n",
len,
@@ -4794,28 +5307,41 @@ void erts_raw_port_command(Port* p, byte* buf, Uint len)
DTRACE4(driver_output, "-raw-", port_str, p->name, len);
}
#endif
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_PORT);
fpe_was_unmasked = erts_block_fpe();
(*p->drv_ptr->output)((ErlDrvData)p->drv_data, (char*) buf, (int) len);
erts_unblock_fpe(fpe_was_unmasked);
+ ERTS_MSACC_POP_STATE_M();
}
int async_ready(Port *p, void* data)
{
int need_free = 1;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (p) {
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(p));
if (p->drv_ptr->ready_async != NULL) {
+ ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_PORT);
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(driver_ready_async)) {
DTRACE_FORMAT_COMMON_PID_AND_PORT(ERTS_PORT_GET_CONNECTED(p), p)
DTRACE3(driver_ready_async, process_str, port_str, p->name);
}
#endif
+#ifdef USE_LTTNG_VM_TRACEPOINTS
+ if (LTTNG_ENABLED(driver_ready_async)) {
+ lttng_decl_portbuf(port_str);
+ lttng_decl_procbuf(proc_str);
+ lttng_pid_to_str(ERTS_PORT_GET_CONNECTED(p), proc_str);
+ lttng_port_to_str(p, port_str);
+ LTTNG3(driver_ready_async, proc_str, port_str, p->name);
+ }
+#endif
(*p->drv_ptr->ready_async)((ErlDrvData)p->drv_data, data);
need_free = 0;
+ ERTS_MSACC_POP_STATE_M();
}
erts_port_driver_callback_epilogue(p, NULL);
@@ -4864,24 +5390,17 @@ erts_stale_drv_select(Eterm port,
switch (mode) {
case ERL_DRV_READ | ERL_DRV_WRITE:
type = "Input/Output";
- goto deselect;
case ERL_DRV_WRITE:
type = "Output";
- goto deselect;
case ERL_DRV_READ:
type = "Input";
- deselect:
- if (deselect) {
- driver_select(drv_port, hndl,
- mode | ERL_DRV_USE_NO_CALLBACK,
- 0);
- }
- break;
default:
- type = "Event";
- if (deselect)
- driver_event(drv_port, hndl, NULL);
- break;
+ type = "";
+ }
+ if (deselect) {
+ driver_select(drv_port, hndl,
+ mode | ERL_DRV_USE_NO_CALLBACK,
+ 0);
}
dsbufp = erts_create_logger_dsbuf();
@@ -4959,24 +5478,6 @@ erts_free_port_names(ErtsPortNames *pnp)
erts_free(ERTS_ALC_T_PORT_NAMES, pnp);
}
-static void schedule_port_timeout(Port *p)
-{
- /*
- * Scheduling of port timeouts can be done without port locking, but
- * since the task handle is stored in the port structure and the ptimer
- * structure is protected by the port lock we require the port to be
- * locked for now...
- *
- * TODO: Implement scheduling of port timeouts without locking
- * the port.
- * /Rickard
- */
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
- erts_port_task_schedule(p->common.id,
- &p->timeout_task,
- ERTS_PORT_TASK_TIMEOUT);
-}
-
ErlDrvTermData driver_mk_term_nil(void)
{
return driver_term_nil;
@@ -4985,45 +5486,46 @@ ErlDrvTermData driver_mk_term_nil(void)
void driver_report_exit(ErlDrvPort ix, int status)
{
Eterm* hp;
+ ErlOffHeap *ohp;
Eterm tuple;
Process *rp;
Eterm pid;
- ErlHeapFragment *bp = NULL;
- ErlOffHeap *ohp;
+ ErtsMessage *mp;
ErtsProcLocks rp_locks = 0;
int scheduler = erts_get_scheduler_id() != 0;
Port* prt = erts_drvport2port(ix);
+ int trace_send = IS_TRACED_FL(prt, F_TRACE_SEND);
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_CHK_NO_PROC_LOCKS;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
pid = ERTS_PORT_GET_CONNECTED(prt);
ASSERT(is_internal_pid(pid));
rp = (scheduler
? erts_proc_lookup(pid)
- : erts_pid2proc_opt(NULL, 0, pid, 0, ERTS_P2P_FLG_SMP_INC_REFC));
+ : erts_pid2proc_opt(NULL, 0, pid, 0, ERTS_P2P_FLG_INC_REFC));
if (!rp)
return;
- hp = erts_alloc_message_heap(3+3, &bp, &ohp, rp, &rp_locks);
+ mp = erts_alloc_message_heap(trace_send ? NULL : rp, &rp_locks, 3+3, &hp, &ohp);
tuple = TUPLE2(hp, am_exit_status, make_small(status));
hp += 3;
tuple = TUPLE2(hp, prt->common.id, tuple);
- erts_queue_message(rp, &rp_locks, bp, tuple, am_undefined
-#ifdef USE_VM_PROBES
- , NIL
-#endif
- );
+ if (IS_TRACED_FL(prt, F_TRACE_SEND))
+ trace_port_send(prt, pid, tuple, 1);
+
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ erts_queue_message(rp, rp_locks, mp, tuple, prt->common.id);
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
if (!scheduler)
- erts_smp_proc_dec_refc(rp);
+ erts_proc_dec_refc(rp);
}
#define ERTS_B2T_STATES_DEF_STATES_SZ 5
@@ -5113,24 +5615,28 @@ cleanup_b2t_states(struct b2t_states__ *b2tsp)
*/
static int
-driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
+driver_deliver_term(Port *prt, Eterm to, ErlDrvTermData* data, int len)
{
+#define HEAP_EXTRA 200
#define ERTS_DDT_FAIL do { res = -1; goto done; } while (0)
Uint need = 0;
int depth = 0;
- int res;
- Eterm *hp = NULL, *hp_start = NULL, *hp_end = NULL;
+ int res = 0;
ErlDrvTermData* ptr;
ErlDrvTermData* ptr_end;
DECLARE_ESTACK(stack);
- Eterm mess = NIL; /* keeps compiler happy */
+ Eterm mess;
Process* rp = NULL;
- ErlHeapFragment *bp = NULL;
- ErlOffHeap *ohp;
+ ErtsHeapFactory factory;
ErtsProcLocks rp_locks = 0;
struct b2t_states__ b2t;
- int scheduler = 1; /* Silence erroneous warning... */
+ int scheduler;
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+
+ ERTS_UNDEF(mess,NIL);
+ ERTS_UNDEF(scheduler,1);
+ factory.mode = FACTORY_CLOSED;
init_b2t_states(&b2t);
/*
@@ -5163,25 +5669,17 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
break;
case ERL_DRV_INT: /* signed int argument */
ERTS_DDT_CHK_ENOUGH_ARGS(1);
-#if HALFWORD_HEAP
- erts_bld_sint64(NULL, &need, (Sint64)ptr[0]);
-#else
/* check for bignum */
if (!IS_SSMALL((Sint)ptr[0]))
need += BIG_UINT_HEAP_SIZE; /* use small_to_big */
-#endif
ptr++;
depth++;
break;
case ERL_DRV_UINT: /* unsigned int argument */
ERTS_DDT_CHK_ENOUGH_ARGS(1);
-#if HALFWORD_HEAP
- erts_bld_uint64(NULL, &need, (Uint64)ptr[0]);
-#else
/* check for bignum */
if (!IS_USMALL(0, (Uint)ptr[0]))
need += BIG_UINT_HEAP_SIZE; /* use small_to_big */
-#endif
ptr++;
depth++;
break;
@@ -5294,9 +5792,10 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
#ifdef DEBUG
b2t.org_ext[b2t.ix] = ext;
#endif
- hsz = erts_binary2term_prepare(&b2t.state[b2t.ix++], ext, size);
+ hsz = erts_binary2term_prepare(&b2t.state[b2t.ix], ext, size);
if (hsz < 0)
ERTS_DDT_FAIL; /* Invalid data */
+ b2t.state[b2t.ix++].heap_size = hsz;
need += hsz;
ptr += 2;
depth++;
@@ -5305,7 +5804,11 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
case ERL_DRV_MAP: { /* int */
ERTS_DDT_CHK_ENOUGH_ARGS(1);
if ((int) ptr[0] < 0) ERTS_DDT_FAIL;
- need += MAP_HEADER_SIZE + 1 + 2*ptr[0];
+ if (ptr[0] > MAP_SMALL_MAP_LIMIT) {
+ need += HASHMAP_ESTIMATED_HEAP_SIZE(ptr[0]);
+ } else {
+ need += MAP_HEADER_FLATMAP_SZ + 1 + 2*ptr[0];
+ }
depth -= 2*ptr[0];
if (depth < 0) ERTS_DDT_FAIL;
ptr++;
@@ -5335,14 +5838,28 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
scheduler = erts_get_scheduler_id() != 0;
rp = (scheduler
? erts_proc_lookup(to)
- : erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_SMP_INC_REFC));
+ : erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_INC_REFC));
if (!rp) {
- res = 0;
- goto done;
- }
+ if (!prt || !IS_TRACED_FL(prt, F_TRACE_SEND))
+ goto done;
+ if (!erts_is_tracer_proc_enabled_send(NULL, 0, &prt->common))
+ goto done;
- hp_start = hp = erts_alloc_message_heap(need, &bp, &ohp, rp, &rp_locks);
- hp_end = hp + need;
+ res = -2;
+
+ /* We allocate a temporary heap to be used to create
+ the message that may be sent using tracing */
+ erts_factory_tmp_init(&factory, erts_alloc(ERTS_ALC_T_DRIVER, need*sizeof(Eterm)),
+ need, ERTS_ALC_T_DRIVER);
+
+ } else {
+ /* We force the creation of a heap fragment (rp == NULL) when send
+ tracing so that we don't have the main lock of the process while
+ tracing */
+ Process *trace_rp = prt && IS_TRACED_FL(prt, F_TRACE_SEND) ? NULL : rp;
+ (void) erts_factory_message_create(&factory, trace_rp, &rp_locks, need);
+ res = 1;
+ }
/*
* Interpret the instructions and build the term.
@@ -5362,40 +5879,36 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
break;
case ERL_DRV_INT: /* signed int argument */
-#if HALFWORD_HEAP
- mess = erts_bld_sint64(&hp, NULL, (Sint64)ptr[0]);
-#else
+ erts_reserve_heap(&factory, BIG_UINT_HEAP_SIZE);
if (IS_SSMALL((Sint)ptr[0]))
mess = make_small((Sint)ptr[0]);
else {
- mess = small_to_big((Sint)ptr[0], hp);
- hp += BIG_UINT_HEAP_SIZE;
+ mess = small_to_big((Sint)ptr[0], factory.hp);
+ factory.hp += BIG_UINT_HEAP_SIZE;
}
-#endif
ptr++;
break;
case ERL_DRV_UINT: /* unsigned int argument */
-#if HALFWORD_HEAP
- mess = erts_bld_uint64(&hp, NULL, (Uint64)ptr[0]);
-#else
+ erts_reserve_heap(&factory, BIG_UINT_HEAP_SIZE);
if (IS_USMALL(0, (Uint)ptr[0]))
mess = make_small((Uint)ptr[0]);
else {
- mess = uint_to_big((Uint)ptr[0], hp);
- hp += BIG_UINT_HEAP_SIZE;
+ mess = uint_to_big((Uint)ptr[0], factory.hp);
+ factory.hp += BIG_UINT_HEAP_SIZE;
}
-#endif
ptr++;
break;
case ERL_DRV_INT64: /* pointer to unsigned 64-bit int argument */
- mess = erts_bld_sint64(&hp, NULL, *((Sint64 *) ptr[0]));
+ erts_reserve_heap(&factory, BIG_NEED_FOR_BITS(64));
+ mess = erts_bld_sint64(&factory.hp, NULL, *((Sint64 *) ptr[0]));
ptr++;
break;
case ERL_DRV_UINT64: /* pointer to unsigned 64-bit int argument */
- mess = erts_bld_uint64(&hp, NULL, *((Uint64 *) ptr[0]));
+ erts_reserve_heap(&factory, BIG_NEED_FOR_BITS(64));
+ mess = erts_bld_uint64(&factory.hp, NULL, *((Uint64 *) ptr[0]));
ptr++;
break;
@@ -5409,11 +5922,14 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
Uint size = ptr[1];
Uint offset = ptr[2];
- erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) size);
+ if (esdp)
+ esdp->io.in += (Uint64) size;
+ else
+ erts_atomic64_add_nob(&bytes_in, (erts_aint64_t) size);
if (size <= ERL_ONHEAP_BIN_LIMIT) {
- ErlHeapBin* hbp = (ErlHeapBin *) hp;
- hp += heap_bin_size(size);
+ ErlHeapBin* hbp = (ErlHeapBin *) erts_produce_heap(&factory,
+ heap_bin_size(size), HEAP_EXTRA);
hbp->thing_word = header_heap_bin(size);
hbp->size = size;
if (size > 0) {
@@ -5422,18 +5938,18 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
mess = make_binary(hbp);
}
else {
- ProcBin* pb = (ProcBin *) hp;
+ ProcBin* pb = (ProcBin *) erts_produce_heap(&factory,
+ PROC_BIN_SIZE, HEAP_EXTRA);
driver_binary_inc_refc(b); /* caller will free binary */
pb->thing_word = HEADER_PROC_BIN;
pb->size = size;
- pb->next = ohp->first;
- ohp->first = (struct erl_off_heap_header*)pb;
+ pb->next = factory.off_heap->first;
+ factory.off_heap->first = (struct erl_off_heap_header*)pb;
pb->val = ErlDrvBinary2Binary(b);
pb->bytes = ((byte*) b->orig_bytes) + offset;
pb->flags = 0;
mess = make_binary(pb);
- hp += PROC_BIN_SIZE;
- OH_OVERHEAD(ohp, pb->size / sizeof(Eterm));
+ OH_OVERHEAD(factory.off_heap, pb->size / sizeof(Eterm));
}
ptr += 3;
break;
@@ -5443,11 +5959,15 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
byte *bufp = (byte *) ptr[0];
Uint size = (Uint) ptr[1];
- erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) size);
+ if (esdp)
+ esdp->io.in += (Uint64) size;
+ else
+ erts_atomic64_add_nob(&bytes_in, (erts_aint64_t) size);
if (size <= ERL_ONHEAP_BIN_LIMIT) {
- ErlHeapBin* hbp = (ErlHeapBin *) hp;
- hp += heap_bin_size(size);
+ ErlHeapBin* hbp = (ErlHeapBin *) erts_produce_heap(&factory,
+ heap_bin_size(size),
+ HEAP_EXTRA);
hbp->thing_word = header_heap_bin(size);
hbp->size = size;
if (size > 0) {
@@ -5460,20 +5980,17 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
ProcBin* pbp;
Binary* bp = erts_bin_nrml_alloc(size);
ASSERT(bufp);
- bp->flags = 0;
- bp->orig_size = (SWord) size;
- erts_refc_init(&bp->refc, 1);
sys_memcpy((void *) bp->orig_bytes, (void *) bufp, size);
- pbp = (ProcBin *) hp;
- hp += PROC_BIN_SIZE;
+ pbp = (ProcBin *) erts_produce_heap(&factory,
+ PROC_BIN_SIZE, HEAP_EXTRA);
pbp->thing_word = HEADER_PROC_BIN;
pbp->size = size;
- pbp->next = ohp->first;
- ohp->first = (struct erl_off_heap_header*)pbp;
+ pbp->next = factory.off_heap->first;
+ factory.off_heap->first = (struct erl_off_heap_header*)pbp;
pbp->val = bp;
pbp->bytes = (byte*) bp->orig_bytes;
pbp->flags = 0;
- OH_OVERHEAD(ohp, pbp->size / sizeof(Eterm));
+ OH_OVERHEAD(factory.off_heap, pbp->size / sizeof(Eterm));
mess = make_binary(pbp);
}
ptr += 2;
@@ -5481,14 +5998,19 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
}
case ERL_DRV_STRING: /* char*, length */
- erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) ptr[1]);
- mess = buf_to_intlist(&hp, (char*)ptr[0], ptr[1], NIL);
+ if (esdp)
+ esdp->io.in += (Uint64) ptr[1];
+ else
+ erts_atomic64_add_nob(&bytes_in, (erts_aint64_t) ptr[1]);
+ erts_reserve_heap(&factory, 2*ptr[1]);
+ mess = buf_to_intlist(&factory.hp, (char*)ptr[0], ptr[1], NIL);
ptr += 2;
break;
case ERL_DRV_STRING_CONS: /* char*, length */
mess = ESTACK_POP(stack);
- mess = buf_to_intlist(&hp, (char*)ptr[0], ptr[1], mess);
+ erts_reserve_heap(&factory, 2*ptr[1]);
+ mess = buf_to_intlist(&factory.hp, (char*)ptr[0], ptr[1], mess);
ptr += 2;
break;
@@ -5497,11 +6019,12 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
mess = ESTACK_POP(stack);
i--;
+ erts_reserve_heap(&factory, 2*i);
while(i > 0) {
Eterm hd = ESTACK_POP(stack);
- mess = CONS(hp, hd, mess);
- hp += 2;
+ mess = CONS(factory.hp, hd, mess);
+ factory.hp += 2;
i--;
}
ptr++;
@@ -5510,13 +6033,12 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
case ERL_DRV_TUPLE: { /* int */
int size = (int)ptr[0];
- Eterm* tp = hp;
+ Eterm* tp = erts_produce_heap(&factory, size+1, HEAP_EXTRA);
*tp = make_arityval(size);
mess = make_tuple(tp);
tp += size; /* point at last element */
- hp = tp+1; /* advance "heap" pointer */
while(size--) {
*tp-- = ESTACK_POP(stack);
@@ -5532,18 +6054,22 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
case ERL_DRV_FLOAT: { /* double * */
FloatDef f;
+ Eterm* fp = erts_produce_heap(&factory, FLOAT_SIZE_OBJECT, HEAP_EXTRA);
- mess = make_float(hp);
+ mess = make_float(fp);
f.fd = *((double *) ptr[0]);
- PUT_DOUBLE(f, hp);
- hp += FLOAT_SIZE_OBJECT;
+ if (!erts_isfinite(f.fd))
+ ERTS_DDT_FAIL;
+ PUT_DOUBLE(f, fp);
ptr++;
break;
}
case ERL_DRV_EXT2TERM: /* char *ext, int size */
ASSERT(b2t.org_ext[b2t.ix] == (byte *) ptr[0]);
- mess = erts_binary2term_create(&b2t.state[b2t.ix++], &hp, ohp);
+
+ erts_reserve_heap(&factory, b2t.state[b2t.ix].heap_size);
+ mess = erts_binary2term_create(&b2t.state[b2t.ix++], &factory);
if (mess == THE_NON_VALUE)
ERTS_DDT_FAIL;
ptr += 2;
@@ -5551,31 +6077,43 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
case ERL_DRV_MAP: { /* int */
int size = (int)ptr[0];
- Eterm* tp = hp;
- Eterm* vp;
- map_t *mp;
-
- *tp = make_arityval(size);
-
- hp += 1 + size;
- mp = (map_t*)hp;
- mp->thing_word = MAP_HEADER;
- mp->size = size;
- mp->keys = make_tuple(tp);
- mess = make_map(mp);
-
- hp += MAP_HEADER_SIZE + size; /* advance "heap" pointer */
-
- tp += size; /* point at last key */
- vp = hp - 1; /* point at last value */
-
- while(size--) {
- *vp-- = ESTACK_POP(stack);
- *tp-- = ESTACK_POP(stack);
- }
- if (!erts_validate_and_sort_map(mp))
- ERTS_DDT_FAIL;
- ptr++;
+ if (size > MAP_SMALL_MAP_LIMIT) {
+ int ix = 2*size;
+ Eterm* leafs;
+
+ erts_produce_heap(&factory, ix, HEAP_EXTRA);
+ leafs = factory.hp;
+ while(ix--) { *--leafs = ESTACK_POP(stack); }
+
+ mess = erts_hashmap_from_array(&factory, leafs, size, 1);
+ if (is_non_value(mess))
+ ERTS_DDT_FAIL;
+ } else {
+ Eterm* vp;
+ flatmap_t *mp;
+ Eterm* tp = erts_produce_heap(&factory,
+ 2*size + 1 + MAP_HEADER_FLATMAP_SZ,
+ HEAP_EXTRA);
+
+ *tp = make_arityval(size);
+
+ mp = (flatmap_t*) (tp + 1 + size);
+ mp->thing_word = MAP_HEADER_FLATMAP;
+ mp->size = size;
+ mp->keys = make_tuple(tp);
+ mess = make_flatmap(mp);
+
+ tp += size; /* point at last key */
+ vp = factory.hp - 1; /* point at last value */
+
+ while(size--) {
+ *vp-- = ESTACK_POP(stack);
+ *tp-- = ESTACK_POP(stack);
+ }
+ if (!erts_validate_and_sort_flatmap(mp))
+ ERTS_DDT_FAIL;
+ }
+ ptr++;
break;
}
@@ -5583,95 +6121,109 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
ESTACK_PUSH(stack, mess);
}
- res = 1;
-
done:
if (res > 0) {
+ Eterm from = am_undefined;
mess = ESTACK_POP(stack); /* get resulting value */
- if (bp)
- bp = erts_resize_message_buffer(bp, hp - hp_start, &mess, 1);
- else {
- ASSERT(hp);
- HRelease(rp, hp_end, hp);
+ erts_factory_trim_and_close(&factory, &mess, 1);
+
+ if (prt) {
+ if (IS_TRACED_FL(prt, F_TRACE_SEND)) {
+ trace_port_send(prt, to, mess, 1);
+ }
+ from = prt->common.id;
}
+
/* send message */
- erts_queue_message(rp, &rp_locks, bp, mess, am_undefined
-#ifdef USE_VM_PROBES
- , NIL
-#endif
- );
+ ERL_MESSAGE_TOKEN(factory.message) = am_undefined;
+ erts_queue_message(rp, rp_locks, factory.message, mess, from);
+ }
+ else if (res == -2) {
+ /* this clause only happens when we were requested to
+ generate a send trace, but the process to send to
+ did not exist any more */
+ mess = ESTACK_POP(stack); /* get resulting value */
+
+ trace_port_send(prt, to, mess, 0);
+
+ erts_factory_trim_and_close(&factory, &mess, 1);
+ erts_free(ERTS_ALC_T_DRIVER, factory.hp_start);
+ res = 0;
}
else {
if (b2t.ix > b2t.used)
b2t.used = b2t.ix;
for (b2t.ix = 0; b2t.ix < b2t.used; b2t.ix++)
erts_binary2term_abort(&b2t.state[b2t.ix]);
- if (bp)
- free_message_buffer(bp);
- else if (hp) {
- HRelease(rp, hp_end, hp);
- }
+ if (factory.mode != FACTORY_CLOSED) {
+ ERL_MESSAGE_TERM(factory.message) = am_undefined;
+ erts_factory_undo(&factory);
+ }
}
-#ifdef ERTS_SMP
if (rp) {
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
if (!scheduler)
- erts_smp_proc_dec_refc(rp);
+ erts_proc_dec_refc(rp);
}
-#endif
cleanup_b2t_states(&b2t);
DESTROY_ESTACK(stack);
return res;
#undef ERTS_DDT_FAIL
+#undef HEAP_EXTRA
}
static ERTS_INLINE int
-deliver_term_check_port(ErlDrvTermData port_id, Eterm *connected_p)
+deliver_term_check_port(ErlDrvTermData port_id, Eterm *connected_p,
+ Port **trace_prt)
{
-#ifdef ERTS_SMP
ErtsThrPrgrDelayHandle dhndl = erts_thr_progress_unmanaged_delay();
-#endif
erts_aint32_t state;
+ int res = 1;
Port *prt = erts_port_lookup_raw((Eterm) port_id);
- if (!prt)
- return -1;
+ if (!prt) {
+ res = -1;
+ goto done;
+ }
state = erts_atomic32_read_nob(&prt->state);
if (state & (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
| ERTS_PORT_SFLG_CLOSING)) {
if (state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
- return -1;
+ res = -1;
else
- return 0;
+ res = 0;
+ goto done;
}
if (connected_p) {
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
ETHR_MEMBAR(ETHR_LoadLoad);
-#endif
*connected_p = ERTS_PORT_GET_CONNECTED(prt);
}
-#ifdef ERTS_SMP
+
+done:
+
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) {
+ ERTS_LC_ASSERT(!prt || !erts_lc_is_port_locked(prt));
erts_thr_progress_unmanaged_continue(dhndl);
ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+ } else
+ if (res == 1) {
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
+ *trace_prt = prt;
}
-#endif
- ERTS_SMP_LC_ASSERT(dhndl == ERTS_THR_PRGR_DHANDLE_MANAGED
- ? erts_lc_is_port_locked(prt)
- : !erts_lc_is_port_locked(prt));
- return 1;
+ return res;
}
int erl_drv_output_term(ErlDrvTermData port_id, ErlDrvTermData* data, int len)
{
/* May be called from arbitrary thread */
- Eterm connected;
- int res = deliver_term_check_port(port_id, &connected);
+ Eterm connected = NIL; /* Shut up faulty warning... */
+ Port *prt = NULL;
+ int res = deliver_term_check_port(port_id, &connected, &prt);
if (res <= 0)
return res;
- return driver_deliver_term(connected, data, len);
+ return driver_deliver_term(prt, connected, data, len);
}
/*
@@ -5685,17 +6237,17 @@ driver_output_term(ErlDrvPort drvport, ErlDrvTermData* data, int len)
erts_aint32_t state;
Port* prt;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
/* NOTE! It *not* safe to access 'drvport' from unmanaged threads. */
prt = erts_drvport2port_state(drvport, &state);
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1; /* invalid (dead) */
- ERTS_SMP_CHK_NO_PROC_LOCKS;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_CHK_NO_PROC_LOCKS;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
if (state & ERTS_PORT_SFLG_CLOSING)
return 0;
- return driver_deliver_term(ERTS_PORT_GET_CONNECTED(prt), data, len);
+ return driver_deliver_term(prt, ERTS_PORT_GET_CONNECTED(prt), data, len);
}
int erl_drv_send_term(ErlDrvTermData port_id,
@@ -5704,10 +6256,11 @@ int erl_drv_send_term(ErlDrvTermData port_id,
int len)
{
/* May be called from arbitrary thread */
- int res = deliver_term_check_port(port_id, NULL);
+ Port *prt = NULL;
+ int res = deliver_term_check_port(port_id, NULL, &prt);
if (res <= 0)
return res;
- return driver_deliver_term(to, data, len);
+ return driver_deliver_term(prt, to, data, len);
}
/*
@@ -5726,20 +6279,19 @@ driver_send_term(ErlDrvPort drvport,
* to make this access safe without using a less efficient
* internal data representation for ErlDrvPort.
*/
- ERTS_SMP_CHK_NO_PROC_LOCKS;
-#ifdef ERTS_SMP
+ Port* prt = NULL;
+ ERTS_CHK_NO_PROC_LOCKS;
if (erts_thr_progress_is_managed_thread())
-#endif
{
erts_aint32_t state;
- Port* prt = erts_drvport2port_state(drvport, &state);
+ prt = erts_drvport2port_state(drvport, &state);
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1; /* invalid (dead) */
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
if (state & ERTS_PORT_SFLG_CLOSING)
return 0;
}
- return driver_deliver_term(to, data, len);
+ return driver_deliver_term(prt, to, data, len);
}
@@ -5753,18 +6305,23 @@ int driver_output_binary(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
{
erts_aint32_t state;
Port* prt = erts_drvport2port_state(ix, &state);
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
if (state & ERTS_PORT_SFLG_CLOSING)
return 0;
prt->bytes_in += (hlen + len);
- erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) (hlen + len));
+ if (esdp)
+ esdp->io.in += (Uint64) (hlen + len);
+ else
+ erts_atomic64_add_nob(&bytes_in, (erts_aint64_t) (hlen + len));
if (state & ERTS_PORT_SFLG_DISTRIBUTION) {
+ erts_atomic64_inc_nob(&prt->dist_entry->in);
return erts_net_message(prt,
prt->dist_entry,
(byte*) hbuf, hlen,
@@ -5788,19 +6345,24 @@ int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
{
erts_aint32_t state;
Port* prt = erts_drvport2port_state(ix, &state);
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
if (state & ERTS_PORT_SFLG_CLOSING)
return 0;
prt->bytes_in += (hlen + len);
- erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) (hlen + len));
+ if (esdp)
+ esdp->io.in += (Uint64) (hlen + len);
+ else
+ erts_atomic64_add_nob(&bytes_in, (erts_aint64_t) (hlen + len));
if (state & ERTS_PORT_SFLG_DISTRIBUTION) {
+ erts_atomic64_inc_nob(&prt->dist_entry->in);
if (len == 0)
return erts_net_message(prt,
prt->dist_entry,
@@ -5825,7 +6387,7 @@ int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
int driver_output(ErlDrvPort ix, char* buf, ErlDrvSizeT len)
{
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
return driver_output2(ix, NULL, 0, buf, len);
}
@@ -5839,8 +6401,9 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
ErlDrvBinary** binv;
Port* prt;
erts_aint32_t state;
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
ASSERT(vec->size >= skip);
if (vec->size <= skip)
@@ -5851,7 +6414,7 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
if (state & ERTS_PORT_SFLG_CLOSING)
return 0;
@@ -5877,7 +6440,10 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
/* XXX handle distribution !!! */
prt->bytes_in += (hlen + size);
- erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) (hlen + size));
+ if (esdp)
+ esdp->io.in += (Uint64) (hlen + size);
+ else
+ erts_atomic64_add_nob(&bytes_in, (erts_aint64_t) (hlen + size));
deliver_vec_message(prt, ERTS_PORT_GET_CONNECTED(prt), hbuf, hlen,
binv, iov, n, size);
return 0;
@@ -5909,12 +6475,6 @@ ErlDrvSizeT driver_vec_to_buf(ErlIOVec *vec, char *buf, ErlDrvSizeT len)
return (orig_len - len);
}
-
-/*
- * - driver_alloc_binary() is thread safe (efile driver depend on it).
- * - driver_realloc_binary(), and driver_free_binary() are *not* thread safe.
- */
-
/*
* reference count on driver binaries...
*/
@@ -5923,21 +6483,21 @@ ErlDrvSInt
driver_binary_get_refc(ErlDrvBinary *dbp)
{
Binary* bp = ErlDrvBinary2Binary(dbp);
- return (ErlDrvSInt) erts_refc_read(&bp->refc, 1);
+ return (ErlDrvSInt) erts_refc_read(&bp->intern.refc, 1);
}
ErlDrvSInt
driver_binary_inc_refc(ErlDrvBinary *dbp)
{
Binary* bp = ErlDrvBinary2Binary(dbp);
- return (ErlDrvSInt) erts_refc_inctest(&bp->refc, 2);
+ return (ErlDrvSInt) erts_refc_inctest(&bp->intern.refc, 2);
}
ErlDrvSInt
driver_binary_dec_refc(ErlDrvBinary *dbp)
{
Binary* bp = ErlDrvBinary2Binary(dbp);
- return (ErlDrvSInt) erts_refc_dectest(&bp->refc, 1);
+ return (ErlDrvSInt) erts_refc_dectest(&bp->intern.refc, 1);
}
@@ -5953,39 +6513,24 @@ driver_alloc_binary(ErlDrvSizeT size)
bin = erts_bin_drv_alloc_fnf((Uint) size);
if (!bin)
return NULL; /* The driver write must take action */
- bin->flags = BIN_FLAG_DRV;
- erts_refc_init(&bin->refc, 1);
- bin->orig_size = (SWord) size;
return Binary2ErlDrvBinary(bin);
}
-/* Reallocate space hold by binary */
+/* Reallocate space held by binary */
ErlDrvBinary* driver_realloc_binary(ErlDrvBinary* bin, ErlDrvSizeT size)
{
Binary* oldbin;
Binary* newbin;
- if (!bin) {
- erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp,
- "Bad use of driver_realloc_binary(%p, %lu): "
- "called with ",
- bin, (unsigned long)size);
- if (!bin) {
- erts_dsprintf(dsbufp, "NULL pointer as first argument");
- }
- erts_send_warning_to_logger_nogl(dsbufp);
- if (!bin)
- return driver_alloc_binary(size);
- }
+ if (!bin)
+ return driver_alloc_binary(size);
oldbin = ErlDrvBinary2Binary(bin);
newbin = (Binary *) erts_bin_realloc_fnf(oldbin, size);
if (!newbin)
return NULL;
- newbin->orig_size = size;
return Binary2ErlDrvBinary(newbin);
}
@@ -5993,18 +6538,11 @@ ErlDrvBinary* driver_realloc_binary(ErlDrvBinary* bin, ErlDrvSizeT size)
void driver_free_binary(ErlDrvBinary* dbin)
{
Binary *bin;
- if (!dbin) {
- erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp,
- "Bad use of driver_free_binary(%p): called with "
- "NULL pointer as argument", dbin);
- erts_send_warning_to_logger_nogl(dsbufp);
+ if (!dbin)
return;
- }
bin = ErlDrvBinary2Binary(dbin);
- if (erts_refc_dectest(&bin->refc, 0) == 0)
- erts_bin_free(bin);
+ erts_bin_release(bin);
}
@@ -6090,7 +6628,6 @@ static ERTS_INLINE void pdl_destroy(ErlDrvPDL pdl)
erts_free(ERTS_ALC_T_PORT_DATA_LOCK, pdl);
}
-#ifdef ERTS_SMP
static void driver_monitor_lock_pdl(Port *p) {
if (p->port_data_lock) {
@@ -6099,7 +6636,7 @@ static void driver_monitor_lock_pdl(Port *p) {
/* Now we either have the port lock or the port_data_lock */
ERTS_LC_ASSERT(!p->port_data_lock
|| erts_lc_mtx_is_locked(&(p->port_data_lock->mtx)));
- ERTS_SMP_LC_ASSERT(p->port_data_lock
+ ERTS_LC_ASSERT(p->port_data_lock
|| erts_lc_is_port_locked(p));
}
@@ -6107,14 +6644,13 @@ static void driver_monitor_unlock_pdl(Port *p) {
/* We should either have the port lock or the port_data_lock */
ERTS_LC_ASSERT(!p->port_data_lock
|| erts_lc_mtx_is_locked(&(p->port_data_lock->mtx)));
- ERTS_SMP_LC_ASSERT(p->port_data_lock
+ ERTS_LC_ASSERT(p->port_data_lock
|| erts_lc_is_port_locked(p));
if (p->port_data_lock) {
driver_pdl_unlock(p->port_data_lock);
}
}
-#endif
/*
* exported driver_pdl_* functions ...
@@ -6129,7 +6665,7 @@ driver_pdl_create(ErlDrvPort dp)
return NULL;
pdl = erts_alloc(ERTS_ALC_T_PORT_DATA_LOCK,
sizeof(struct erl_drv_port_data_lock));
- erts_mtx_init(&pdl->mtx, "port_data_lock");
+ erts_mtx_init(&pdl->mtx, "port_data_lock", pp->common.id, ERTS_LOCK_FLAGS_CATEGORY_IO);
pdl_init_refc(pdl);
erts_port_inc_refc(pp);
pdl->prt = pp;
@@ -6193,307 +6729,51 @@ driver_pdl_dec_refc(ErlDrvPDL pdl)
return refc;
}
-/* expand queue to hold n elements in tail or head */
-static int expandq(ErlIOQueue* q, int n, int tail)
-/* tail: 0 if make room in head, make room in tail otherwise */
-{
- int h_sz; /* room before header */
- int t_sz; /* room after tail */
- int q_sz; /* occupied */
- int nvsz;
- SysIOVec* niov;
- ErlDrvBinary** nbinv;
-
- h_sz = q->v_head - q->v_start;
- t_sz = q->v_end - q->v_tail;
- q_sz = q->v_tail - q->v_head;
-
- if (tail && (n <= t_sz)) /* do we need to expand tail? */
- return 0;
- else if (!tail && (n <= h_sz)) /* do we need to expand head? */
- return 0;
- else if (n > (h_sz + t_sz)) { /* need to allocate */
- /* we may get little extra but it ok */
- nvsz = (q->v_end - q->v_start) + n;
-
- niov = erts_alloc_fnf(ERTS_ALC_T_IOQ, nvsz * sizeof(SysIOVec));
- if (!niov)
- return -1;
- nbinv = erts_alloc_fnf(ERTS_ALC_T_IOQ, nvsz * sizeof(ErlDrvBinary**));
- if (!nbinv) {
- erts_free(ERTS_ALC_T_IOQ, (void *) niov);
- return -1;
- }
- if (tail) {
- sys_memcpy(niov, q->v_head, q_sz*sizeof(SysIOVec));
- if (q->v_start != q->v_small)
- erts_free(ERTS_ALC_T_IOQ, (void *) q->v_start);
- q->v_start = niov;
- q->v_end = niov + nvsz;
- q->v_head = q->v_start;
- q->v_tail = q->v_head + q_sz;
-
- sys_memcpy(nbinv, q->b_head, q_sz*sizeof(ErlDrvBinary*));
- if (q->b_start != q->b_small)
- erts_free(ERTS_ALC_T_IOQ, (void *) q->b_start);
- q->b_start = nbinv;
- q->b_end = nbinv + nvsz;
- q->b_head = q->b_start;
- q->b_tail = q->b_head + q_sz;
- }
- else {
- sys_memcpy(niov+nvsz-q_sz, q->v_head, q_sz*sizeof(SysIOVec));
- if (q->v_start != q->v_small)
- erts_free(ERTS_ALC_T_IOQ, (void *) q->v_start);
- q->v_start = niov;
- q->v_end = niov + nvsz;
- q->v_tail = q->v_end;
- q->v_head = q->v_tail - q_sz;
-
- sys_memcpy(nbinv+nvsz-q_sz, q->b_head, q_sz*sizeof(ErlDrvBinary*));
- if (q->b_start != q->b_small)
- erts_free(ERTS_ALC_T_IOQ, (void *) q->b_start);
- q->b_start = nbinv;
- q->b_end = nbinv + nvsz;
- q->b_tail = q->b_end;
- q->b_head = q->b_tail - q_sz;
- }
- }
- else if (tail) { /* move to beginning to make room in tail */
- sys_memmove(q->v_start, q->v_head, q_sz*sizeof(SysIOVec));
- q->v_head = q->v_start;
- q->v_tail = q->v_head + q_sz;
- sys_memmove(q->b_start, q->b_head, q_sz*sizeof(ErlDrvBinary*));
- q->b_head = q->b_start;
- q->b_tail = q->b_head + q_sz;
- }
- else { /* move to end to make room */
- sys_memmove(q->v_end-q_sz, q->v_head, q_sz*sizeof(SysIOVec));
- q->v_tail = q->v_end;
- q->v_head = q->v_tail-q_sz;
- sys_memmove(q->b_end-q_sz, q->b_head, q_sz*sizeof(ErlDrvBinary*));
- q->b_tail = q->b_end;
- q->b_head = q->b_tail-q_sz;
- }
-
- return 0;
-}
-
-
-
/* Put elements from vec at q tail */
int driver_enqv(ErlDrvPort ix, ErlIOVec* vec, ErlDrvSizeT skip)
{
- int n;
- size_t len;
- ErlDrvSizeT size;
- SysIOVec* iov;
- ErlDrvBinary** binv;
- ErlDrvBinary* b;
- ErlIOQueue* q = drvport2ioq(ix);
-
- if (q == NULL)
- return -1;
-
- ASSERT(vec->size >= skip); /* debug only */
- if (vec->size <= skip)
- return 0;
- size = vec->size - skip;
-
- iov = vec->iov;
- binv = vec->binv;
- n = vec->vsize;
-
- /* we use do here to strip iov_len=0 from beginning */
- do {
- len = iov->iov_len;
- if (len <= skip) {
- skip -= len;
- iov++;
- binv++;
- n--;
- }
- else {
- iov->iov_base = ((char *)(iov->iov_base)) + skip;
- iov->iov_len -= skip;
- skip = 0;
- }
- } while(skip > 0);
-
- if (q->v_tail + n >= q->v_end)
- expandq(q, n, 1);
-
- /* Queue and reference all binaries (remove zero length items) */
- while(n--) {
- if ((len = iov->iov_len) > 0) {
- if ((b = *binv) == NULL) { /* speical case create binary ! */
- b = driver_alloc_binary(len);
- sys_memcpy(b->orig_bytes, iov->iov_base, len);
- *q->b_tail++ = b;
- q->v_tail->iov_len = len;
- q->v_tail->iov_base = b->orig_bytes;
- q->v_tail++;
- }
- else {
- driver_binary_inc_refc(b);
- *q->b_tail++ = b;
- *q->v_tail++ = *iov;
- }
- }
- iov++;
- binv++;
- }
- q->size += size; /* update total size in queue */
- return 0;
+ ASSERT(vec->size >= skip);
+ return erts_ioq_enqv(drvport2ioq(ix), (ErtsIOVec*)vec, skip);
}
/* Put elements from vec at q head */
int driver_pushqv(ErlDrvPort ix, ErlIOVec* vec, ErlDrvSizeT skip)
{
- int n;
- size_t len;
- ErlDrvSizeT size;
- SysIOVec* iov;
- ErlDrvBinary** binv;
- ErlDrvBinary* b;
- ErlIOQueue* q = drvport2ioq(ix);
-
- if (q == NULL)
- return -1;
-
- if (vec->size <= skip)
- return 0;
- size = vec->size - skip;
-
- iov = vec->iov;
- binv = vec->binv;
- n = vec->vsize;
-
- /* we use do here to strip iov_len=0 from beginning */
- do {
- len = iov->iov_len;
- if (len <= skip) {
- skip -= len;
- iov++;
- binv++;
- n--;
- }
- else {
- iov->iov_base = ((char *)(iov->iov_base)) + skip;
- iov->iov_len -= skip;
- skip = 0;
- }
- } while(skip > 0);
-
- if (q->v_head - n < q->v_start)
- expandq(q, n, 0);
-
- /* Queue and reference all binaries (remove zero length items) */
- iov += (n-1); /* move to end */
- binv += (n-1); /* move to end */
- while(n--) {
- if ((len = iov->iov_len) > 0) {
- if ((b = *binv) == NULL) { /* speical case create binary ! */
- b = driver_alloc_binary(len);
- sys_memcpy(b->orig_bytes, iov->iov_base, len);
- *--q->b_head = b;
- q->v_head--;
- q->v_head->iov_len = len;
- q->v_head->iov_base = b->orig_bytes;
- }
- else {
- driver_binary_inc_refc(b);
- *--q->b_head = b;
- *--q->v_head = *iov;
- }
- }
- iov--;
- binv--;
- }
- q->size += size; /* update total size in queue */
- return 0;
+ ASSERT(vec->size >= skip);
+ return erts_ioq_pushqv(drvport2ioq(ix), (ErtsIOVec*)vec, skip);
}
-
/*
** Remove size bytes from queue head
** Return number of bytes that remain in queue
*/
ErlDrvSizeT driver_deq(ErlDrvPort ix, ErlDrvSizeT size)
{
- ErlIOQueue* q = drvport2ioq(ix);
- ErlDrvSizeT len;
-
- if ((q == NULL) || (q->size < size))
- return -1;
- q->size -= size;
- while (size > 0) {
- ASSERT(q->v_head != q->v_tail);
-
- len = q->v_head->iov_len;
- if (len <= size) {
- size -= len;
- driver_free_binary(*q->b_head);
- *q->b_head++ = NULL;
- q->v_head++;
- }
- else {
- q->v_head->iov_base = ((char *)(q->v_head->iov_base)) + size;
- q->v_head->iov_len -= size;
- size = 0;
- }
- }
-
- /* restart pointers (optimised for enq) */
- if (q->v_head == q->v_tail) {
- q->v_head = q->v_tail = q->v_start;
- q->b_head = q->b_tail = q->b_start;
- }
- return q->size;
+ ErlPortIOQueue *q = drvport2ioq(ix);
+ if (erts_ioq_deq(q, size) == -1)
+ return -1;
+ return erts_ioq_size(q);
}
-ErlDrvSizeT driver_peekqv(ErlDrvPort ix, ErlIOVec *ev) {
- ErlIOQueue *q = drvport2ioq(ix);
- ASSERT(ev);
-
- if (! q) {
- return (ErlDrvSizeT) -1;
- } else {
- if ((ev->vsize = q->v_tail - q->v_head) == 0) {
- ev->size = 0;
- ev->iov = NULL;
- ev->binv = NULL;
- } else {
- ev->size = q->size;
- ev->iov = q->v_head;
- ev->binv = q->b_head;
- }
- return q->size;
- }
+ErlDrvSizeT driver_peekqv(ErlDrvPort ix, ErlIOVec *ev)
+{
+ return erts_ioq_peekqv(drvport2ioq(ix), (ErtsIOVec*)ev);
}
SysIOVec* driver_peekq(ErlDrvPort ix, int* vlenp) /* length of io-vector */
{
- ErlIOQueue* q = drvport2ioq(ix);
-
- if (q == NULL) {
- *vlenp = -1;
- return NULL;
- }
- if ((*vlenp = (q->v_tail - q->v_head)) == 0)
- return NULL;
- return q->v_head;
+ return erts_ioq_peekq(drvport2ioq(ix), vlenp);
}
ErlDrvSizeT driver_sizeq(ErlDrvPort ix)
{
- ErlIOQueue* q = drvport2ioq(ix);
+ ErlPortIOQueue *q = drvport2ioq(ix);
if (q == NULL)
- return (size_t) -1;
- return q->size;
+ return (ErlDrvSizeT) -1;
+ return erts_ioq_size(q);
}
@@ -6569,42 +6849,19 @@ int driver_pushq(ErlDrvPort ix, char* buffer, ErlDrvSizeT len)
return code;
}
-static ERTS_INLINE void
-drv_cancel_timer(Port *prt)
-{
-#ifdef ERTS_SMP
- erts_cancel_smp_ptimer(prt->common.u.alive.ptimer);
-#else
- erts_cancel_timer(&prt->common.u.alive.tm);
-#endif
- if (erts_port_task_is_scheduled(&prt->timeout_task))
- erts_port_task_abort(&prt->timeout_task);
-}
-
int driver_set_timer(ErlDrvPort ix, unsigned long t)
{
Port* prt = erts_drvport2port(ix);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1;
if (prt->drv_ptr->timeout == NULL)
return -1;
- drv_cancel_timer(prt);
-#ifdef ERTS_SMP
- erts_create_smp_ptimer(&prt->common.u.alive.ptimer,
- prt->common.id,
- (ErlTimeoutProc) schedule_port_timeout,
- t);
-#else
- erts_set_timer(&prt->common.u.alive.tm,
- (ErlTimeoutProc) schedule_port_timeout,
- NULL,
- prt,
- t);
-#endif
+
+ erts_set_port_timer(prt, (Sint64) t);
return 0;
}
@@ -6613,29 +6870,29 @@ int driver_cancel_timer(ErlDrvPort ix)
Port* prt = erts_drvport2port(ix);
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- drv_cancel_timer(prt);
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
+ erts_cancel_port_timer(prt);
return 0;
}
-
int
driver_read_timer(ErlDrvPort ix, unsigned long* t)
{
Port* prt = erts_drvport2port(ix);
+ Sint64 left;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
-#ifdef ERTS_SMP
- *t = (prt->common.u.alive.ptimer
- ? erts_time_left(&prt->common.u.alive.ptimer->timer.tm)
- : 0);
-#else
- *t = erts_time_left(&prt->common.u.alive.tm);
-#endif
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
+
+ left = erts_read_port_timer(prt);
+ if (left < 0)
+ left = 0;
+
+ *t = (unsigned long) left;
+
return 0;
}
@@ -6643,7 +6900,7 @@ int
driver_get_now(ErlDrvNowData *now_data)
{
Uint mega,secs,micro;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (now_data == NULL) {
return -1;
@@ -6655,22 +6912,51 @@ driver_get_now(ErlDrvNowData *now_data)
return 0;
}
-static void ref_to_driver_monitor(Eterm ref, ErlDrvMonitor *mon)
+ErlDrvTime
+erl_drv_monotonic_time(ErlDrvTimeUnit time_unit)
{
- RefThing *refp;
- ASSERT(is_internal_ref(ref));
- ASSERT(sizeof(RefThing) <= sizeof(ErlDrvMonitor));
- refp = ref_thing_ptr(ref);
- memset(mon,0,sizeof(ErlDrvMonitor));
- memcpy(mon,refp,sizeof(RefThing));
+ return (ErlDrvTime) erts_napi_monotonic_time((int) time_unit);
+}
+
+ErlDrvTime
+erl_drv_time_offset(ErlDrvTimeUnit time_unit)
+{
+ return (ErlDrvTime) erts_napi_time_offset((int) time_unit);
}
+ErlDrvTime
+erl_drv_convert_time_unit(ErlDrvTime val,
+ ErlDrvTimeUnit from,
+ ErlDrvTimeUnit to)
+{
+ return (ErlDrvTime) erts_napi_convert_time_unit((ErtsMonotonicTime) val,
+ (int) from,
+ (int) to);
+}
+
+void erts_ref_to_driver_monitor(Eterm ref, ErlDrvMonitor *mon)
+{
+ ERTS_CT_ASSERT(ERTS_REF_THING_SIZE*sizeof(Uint) <= sizeof(ErlDrvMonitor));
+ ASSERT(is_internal_ordinary_ref(ref));
+ sys_memcpy((void *) mon, (void *) internal_ref_val(ref),
+ ERTS_REF_THING_SIZE*sizeof(Uint));
+}
+
+Eterm erts_driver_monitor_to_ref(Eterm *hp, const ErlDrvMonitor *mon)
+{
+ Eterm ref;
+ ERTS_CT_ASSERT(ERTS_REF_THING_SIZE*sizeof(Uint) <= sizeof(ErlDrvMonitor));
+ sys_memcpy((void *) hp, (void *) mon, ERTS_REF_THING_SIZE*sizeof(Uint));
+ ref = make_internal_ref(hp);
+ ASSERT(is_internal_ordinary_ref(ref));
+ return ref;
+}
static int do_driver_monitor_process(Port *prt,
- Eterm *buf,
ErlDrvTermData process,
ErlDrvMonitor *monitor)
{
+ Eterm buf[ERTS_REF_THING_SIZE];
Process *rp;
Eterm ref;
@@ -6688,8 +6974,8 @@ static int do_driver_monitor_process(Port *prt,
erts_add_monitor(&ERTS_P_MONITORS(prt), MON_ORIGIN, ref, rp->common.id, NIL);
erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, ref, prt->common.id, NIL);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- ref_to_driver_monitor(ref,monitor);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ erts_ref_to_driver_monitor(ref,monitor);
return 0;
}
@@ -6702,7 +6988,7 @@ int driver_monitor_process(ErlDrvPort drvport,
{
Port *prt;
int ret;
-#if !HEAP_ON_C_STACK || (defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK))
+#if defined(ERTS_ENABLE_LOCK_CHECK)
ErtsSchedulerData *sched = erts_get_scheduler_data();
#endif
@@ -6712,43 +6998,28 @@ int driver_monitor_process(ErlDrvPort drvport,
/* Now (in SMP) we should have either the port lock (if we have a scheduler) or the port data lock
(if we're a driver thread) */
- ERTS_SMP_LC_ASSERT((sched != NULL || prt->port_data_lock));
-
-#if !HEAP_ON_C_STACK
- if (!sched) {
- /* Need a separate allocation for the ref :( */
- Eterm *buf = erts_alloc(ERTS_ALC_T_TEMP_TERM,
- sizeof(Eterm)*REF_THING_SIZE);
- ret = do_driver_monitor_process(prt,buf,process,monitor);
- erts_free(ERTS_ALC_T_TEMP_TERM,buf);
- } else
-#endif
- {
- DeclareTmpHeapNoproc(buf,REF_THING_SIZE);
- UseTmpHeapNoproc(REF_THING_SIZE);
- ret = do_driver_monitor_process(prt,buf,process,monitor);
- UnUseTmpHeapNoproc(REF_THING_SIZE);
- }
+ ERTS_LC_ASSERT((sched != NULL || prt->port_data_lock));
+ ret = do_driver_monitor_process(prt,process,monitor);
DRV_MONITOR_UNLOCK_PDL(prt);
return ret;
}
-static int do_driver_demonitor_process(Port *prt, Eterm *buf,
- const ErlDrvMonitor *monitor)
+static int do_driver_demonitor_process(Port *prt, const ErlDrvMonitor *monitor)
{
+ Eterm heap[ERTS_REF_THING_SIZE];
Process *rp;
Eterm ref;
ErtsMonitor *mon;
Eterm to;
- memcpy(buf,monitor,sizeof(Eterm)*REF_THING_SIZE);
- ref = make_internal_ref(buf);
+ ref = erts_driver_monitor_to_ref(heap, monitor);
+
mon = erts_lookup_monitor(ERTS_P_MONITORS(prt), ref);
if (mon == NULL) {
return 1;
}
ASSERT(mon->type == MON_ORIGIN);
- to = mon->pid;
+ to = mon->u.pid;
ASSERT(is_internal_pid(to));
rp = erts_pid2proc_opt(NULL,
0,
@@ -6762,7 +7033,7 @@ static int do_driver_demonitor_process(Port *prt, Eterm *buf,
if (rp) {
ErtsMonitor *rmon;
rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
if (rmon != NULL) {
erts_destroy_monitor(rmon);
}
@@ -6775,7 +7046,7 @@ int driver_demonitor_process(ErlDrvPort drvport,
{
Port *prt;
int ret;
-#if !HEAP_ON_C_STACK || (defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK))
+#if defined(ERTS_ENABLE_LOCK_CHECK)
ErtsSchedulerData *sched = erts_get_scheduler_data();
#endif
@@ -6785,41 +7056,27 @@ int driver_demonitor_process(ErlDrvPort drvport,
/* Now we should have either the port lock (if we have a scheduler) or the port data lock
(if we're a driver thread) */
- ERTS_SMP_LC_ASSERT((sched != NULL || prt->port_data_lock));
-#if !HEAP_ON_C_STACK
- if (!sched) {
- /* Need a separate allocation for the ref :( */
- Eterm *buf = erts_alloc(ERTS_ALC_T_TEMP_TERM,
- sizeof(Eterm)*REF_THING_SIZE);
- ret = do_driver_demonitor_process(prt,buf,monitor);
- erts_free(ERTS_ALC_T_TEMP_TERM,buf);
- } else
-#endif
- {
- DeclareTmpHeapNoproc(buf,REF_THING_SIZE);
- UseTmpHeapNoproc(REF_THING_SIZE);
- ret = do_driver_demonitor_process(prt,buf,monitor);
- UnUseTmpHeapNoproc(REF_THING_SIZE);
- }
+ ERTS_LC_ASSERT((sched != NULL || prt->port_data_lock));
+ ret = do_driver_demonitor_process(prt,monitor);
DRV_MONITOR_UNLOCK_PDL(prt);
return ret;
}
-static ErlDrvTermData do_driver_get_monitored_process(Port *prt, Eterm *buf,
- const ErlDrvMonitor *monitor)
+static ErlDrvTermData do_driver_get_monitored_process(Port *prt,const ErlDrvMonitor *monitor)
{
Eterm ref;
ErtsMonitor *mon;
Eterm to;
+ Eterm heap[ERTS_REF_THING_SIZE];
+
+ ref = erts_driver_monitor_to_ref(heap, monitor);
- memcpy(buf,monitor,sizeof(Eterm)*REF_THING_SIZE);
- ref = make_internal_ref(buf);
mon = erts_lookup_monitor(ERTS_P_MONITORS(prt), ref);
if (mon == NULL) {
return driver_term_nil;
}
ASSERT(mon->type == MON_ORIGIN);
- to = mon->pid;
+ to = mon->u.pid;
ASSERT(is_internal_pid(to));
return (ErlDrvTermData) to;
}
@@ -6830,7 +7087,7 @@ ErlDrvTermData driver_get_monitored_process(ErlDrvPort drvport,
{
Port *prt;
ErlDrvTermData ret;
-#if !HEAP_ON_C_STACK || (defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK))
+#if defined(ERTS_ENABLE_LOCK_CHECK)
ErtsSchedulerData *sched = erts_get_scheduler_data();
#endif
@@ -6840,32 +7097,17 @@ ErlDrvTermData driver_get_monitored_process(ErlDrvPort drvport,
/* Now we should have either the port lock (if we have a scheduler) or the port data lock
(if we're a driver thread) */
- ERTS_SMP_LC_ASSERT((sched != NULL || prt->port_data_lock));
-
-#if !HEAP_ON_C_STACK
- if (!sched) {
- /* Need a separate allocation for the ref :( */
- Eterm *buf = erts_alloc(ERTS_ALC_T_TEMP_TERM,
- sizeof(Eterm)*REF_THING_SIZE);
- ret = do_driver_get_monitored_process(prt,buf,monitor);
- erts_free(ERTS_ALC_T_TEMP_TERM,buf);
- } else
-#endif
- {
- DeclareTmpHeapNoproc(buf,REF_THING_SIZE);
- UseTmpHeapNoproc(REF_THING_SIZE);
- ret = do_driver_get_monitored_process(prt,buf,monitor);
- UnUseTmpHeapNoproc(REF_THING_SIZE);
- }
+ ERTS_LC_ASSERT((sched != NULL || prt->port_data_lock));
+ ret = do_driver_get_monitored_process(prt,monitor);
DRV_MONITOR_UNLOCK_PDL(prt);
return ret;
}
-
int driver_compare_monitors(const ErlDrvMonitor *monitor1,
const ErlDrvMonitor *monitor2)
{
- return memcmp(monitor1,monitor2,sizeof(ErlDrvMonitor));
+ return sys_memcmp((void *) monitor1, (void *) monitor2,
+ ERTS_REF_THING_SIZE*sizeof(Eterm));
}
void erts_fire_port_monitor(Port *prt, Eterm ref)
@@ -6874,8 +7116,9 @@ void erts_fire_port_monitor(Port *prt, Eterm ref)
void (*callback)(ErlDrvData drv_data, ErlDrvMonitor *monitor);
ErlDrvMonitor drv_monitor;
int fpe_was_unmasked;
+ ERTS_MSACC_PUSH_STATE_M();
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
ASSERT(prt->drv_ptr != NULL);
DRV_MONITOR_LOCK_PDL(prt);
if (erts_lookup_monitor(ERTS_P_MONITORS(prt), ref) == NULL) {
@@ -6884,7 +7127,8 @@ void erts_fire_port_monitor(Port *prt, Eterm ref)
}
callback = prt->drv_ptr->process_exit;
ASSERT(callback != NULL);
- ref_to_driver_monitor(ref,&drv_monitor);
+ erts_ref_to_driver_monitor(ref,&drv_monitor);
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_PORT);
DRV_MONITOR_UNLOCK_PDL(prt);
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(driver_process_exit)) {
@@ -6892,10 +7136,20 @@ void erts_fire_port_monitor(Port *prt, Eterm ref)
DTRACE3(driver_process_exit, process_str, port_str, prt->name);
}
#endif
+#ifdef USE_LTTNG_VM_TRACEPOINTS
+ if (LTTNG_ENABLED(driver_process_exit)) {
+ lttng_decl_portbuf(port_str);
+ lttng_decl_procbuf(proc_str);
+ lttng_pid_to_str(ERTS_PORT_GET_CONNECTED(prt), proc_str);
+ lttng_port_to_str(prt, port_str);
+ LTTNG3(driver_process_exit, proc_str, port_str, prt->name);
+ }
+#endif
fpe_was_unmasked = erts_block_fpe();
(*callback)((ErlDrvData) (prt->drv_data), &drv_monitor);
erts_unblock_fpe(fpe_was_unmasked);
DRV_MONITOR_LOCK_PDL(prt);
+ ERTS_MSACC_POP_STATE_M();
/* remove monitor *after* callback */
rmon = erts_remove_monitor(&ERTS_P_MONITORS(prt), ref);
DRV_MONITOR_UNLOCK_PDL(prt);
@@ -6911,17 +7165,20 @@ driver_failure_term(ErlDrvPort ix, Eterm term, int eof)
erts_aint32_t state;
Port* prt = erts_drvport2port_state(ix, &state);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
+
+ if (prt->async_open_port)
+ init_ack_send_reply(prt, prt->common.id);
if (eof)
flush_linebuf_messages(prt, state);
if (state & ERTS_PORT_SFLG_CLOSING) {
terminate_port(prt);
} else if (eof && (state & ERTS_PORT_SFLG_SOFT_EOF)) {
- deliver_result(prt->common.id, ERTS_PORT_GET_CONNECTED(prt), am_eof);
+ deliver_result(prt, prt->common.id, ERTS_PORT_GET_CONNECTED(prt), am_eof);
} else {
/* XXX UGLY WORK AROUND, Let erts_deliver_port_exit() terminate the port */
if (prt->port_data_lock)
@@ -6929,7 +7186,7 @@ driver_failure_term(ErlDrvPort ix, Eterm term, int eof)
prt->ioq.size = 0;
if (prt->port_data_lock)
driver_pdl_unlock(prt->port_data_lock);
- erts_deliver_port_exit(prt, prt->common.id, eof ? am_normal : term, 0);
+ erts_deliver_port_exit(prt, prt->common.id, eof ? am_normal : term, 0, 0);
}
return 0;
}
@@ -6947,7 +7204,7 @@ int driver_exit(ErlDrvPort ix, int err)
ErtsLink *lnk, *rlnk = NULL;
Eterm connected;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1;
@@ -6960,10 +7217,8 @@ int driver_exit(ErlDrvPort ix, int err)
lnk = erts_remove_link(&ERTS_P_LINKS(prt), connected);
-#ifdef ERTS_SMP
if (rp)
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
-#endif
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
if (rlnk != NULL) {
erts_destroy_link(rlnk);
@@ -7017,7 +7272,7 @@ ErlDrvTermData driver_mk_atom(char* string)
sys_strlen(string),
ERTS_ATOM_ENC_LATIN1,
1);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
return (ErlDrvTermData) am;
}
@@ -7026,27 +7281,27 @@ ErlDrvTermData driver_mk_port(ErlDrvPort ix)
Port* prt = erts_drvport2port(ix);
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return (ErlDrvTermData) NIL;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
return (ErlDrvTermData) prt->common.id;
}
ErlDrvTermData driver_connected(ErlDrvPort ix)
{
Port* prt = erts_drvport2port(ix);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return NIL;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
return ERTS_PORT_GET_CONNECTED(prt);
}
ErlDrvTermData driver_caller(ErlDrvPort ix)
{
Port* prt = erts_drvport2port(ix);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return NIL;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
return prt->caller;
}
@@ -7055,20 +7310,20 @@ int driver_lock_driver(ErlDrvPort ix)
Port* prt = erts_drvport2port(ix);
DE_Handle* dh;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1;
- erts_smp_rwmtx_rwlock(&erts_driver_list_lock);
+ erts_rwmtx_rwlock(&erts_driver_list_lock);
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
if ((dh = (DE_Handle*)prt->drv_ptr->handle ) == NULL) {
- erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
+ erts_rwmtx_rwunlock(&erts_driver_list_lock);
return -1;
}
erts_ddll_lock_driver(dh, prt->drv_ptr->name);
- erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
+ erts_rwmtx_rwunlock(&erts_driver_list_lock);
return 0;
}
@@ -7076,9 +7331,9 @@ int driver_lock_driver(ErlDrvPort ix)
static int maybe_lock_driver_list(void)
{
void *rec_lock;
- rec_lock = erts_smp_tsd_get(driver_list_lock_status_key);
+ rec_lock = erts_tsd_get(driver_list_lock_status_key);
if (rec_lock == 0) {
- erts_smp_rwmtx_rwlock(&erts_driver_list_lock);
+ erts_rwmtx_rwlock(&erts_driver_list_lock);
return 1;
}
return 0;
@@ -7086,7 +7341,7 @@ static int maybe_lock_driver_list(void)
static void maybe_unlock_driver_list(int doit)
{
if (doit) {
- erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
+ erts_rwmtx_rwunlock(&erts_driver_list_lock);
}
}
/*
@@ -7109,7 +7364,7 @@ void *driver_dl_open(char * path)
{
void *ptr;
int res;
- int *last_error_p = erts_smp_tsd_get(driver_list_last_error_key);
+ int *last_error_p = erts_tsd_get(driver_list_last_error_key);
int locked = maybe_lock_driver_list();
if ((res = erts_sys_ddll_open(path, &ptr, NULL)) == 0) {
maybe_unlock_driver_list(locked);
@@ -7117,7 +7372,7 @@ void *driver_dl_open(char * path)
} else {
if (!last_error_p) {
last_error_p = erts_alloc(ERTS_ALC_T_DDLL_ERRCODES, sizeof(int));
- erts_smp_tsd_set(driver_list_last_error_key,last_error_p);
+ erts_tsd_set(driver_list_last_error_key,last_error_p);
}
*last_error_p = res;
maybe_unlock_driver_list(locked);
@@ -7129,7 +7384,7 @@ void *driver_dl_sym(void * handle, char *func_name)
{
void *ptr;
int res;
- int *last_error_p = erts_smp_tsd_get(driver_list_lock_status_key);
+ int *last_error_p = erts_tsd_get(driver_list_lock_status_key);
int locked = maybe_lock_driver_list();
if ((res = erts_sys_ddll_sym(handle, func_name, &ptr)) == 0) {
maybe_unlock_driver_list(locked);
@@ -7137,7 +7392,7 @@ void *driver_dl_sym(void * handle, char *func_name)
} else {
if (!last_error_p) {
last_error_p = erts_alloc(ERTS_ALC_T_DDLL_ERRCODES, sizeof(int));
- erts_smp_tsd_set(driver_list_lock_status_key,last_error_p);
+ erts_tsd_set(driver_list_lock_status_key,last_error_p);
}
*last_error_p = res;
maybe_unlock_driver_list(locked);
@@ -7157,7 +7412,7 @@ int driver_dl_close(void *handle)
char *driver_dl_error(void)
{
char *res;
- int *last_error_p = erts_smp_tsd_get(driver_list_lock_status_key);
+ int *last_error_p = erts_tsd_get(driver_list_lock_status_key);
int locked = maybe_lock_driver_list();
res = erts_ddll_error((last_error_p != NULL) ? (*last_error_p) : ERL_DE_ERROR_UNSPECIFIED);
maybe_unlock_driver_list(locked);
@@ -7166,7 +7421,7 @@ char *driver_dl_error(void)
#define ERL_DRV_SYS_INFO_SIZE(LAST_FIELD) \
- (((size_t) &((ErlDrvSysInfo *) 0)->LAST_FIELD) \
+ (offsetof(ErlDrvSysInfo, LAST_FIELD) \
+ sizeof(((ErlDrvSysInfo *) 0)->LAST_FIELD))
void
@@ -7182,7 +7437,7 @@ driver_system_info(ErlDrvSysInfo *sip, size_t si_size)
* of ErlDrvSysInfo (introduced in driver version 1.0).
*/
if (!sip || si_size < ERL_DRV_SYS_INFO_SIZE(smp_support))
- erl_exit(1,
+ erts_exit(ERTS_ERROR_EXIT,
"driver_system_info(%p, %ld) called with invalid arguments\n",
sip, si_size);
@@ -7195,20 +7450,8 @@ driver_system_info(ErlDrvSysInfo *sip, size_t si_size)
sip->driver_minor_version = ERL_DRV_EXTENDED_MINOR_VERSION;
sip->erts_version = ERLANG_VERSION;
sip->otp_release = ERLANG_OTP_RELEASE;
- sip->thread_support =
-#ifdef USE_THREADS
- 1
-#else
- 0
-#endif
- ;
- sip->smp_support =
-#ifdef ERTS_SMP
- 1
-#else
- 0
-#endif
- ;
+ sip->thread_support = 1;
+ sip->smp_support = 1;
}
@@ -7228,6 +7471,16 @@ driver_system_info(ErlDrvSysInfo *sip, size_t si_size)
sip->nif_major_version = ERL_NIF_MAJOR_VERSION;
sip->nif_minor_version = ERL_NIF_MINOR_VERSION;
}
+ /*
+ * 'dirty_scheduler_support' is the last field in the 4th version
+ * (driver version 3.1, NIF version 2.7)
+ */
+ if (si_size >= ERL_DRV_SYS_INFO_SIZE(dirty_scheduler_support)) {
+ sip->dirty_scheduler_support =
+ 1
+ ;
+ }
+
}
@@ -7251,14 +7504,6 @@ no_output_callback(ErlDrvData drv_data, char *buf, ErlDrvSizeT len)
}
static void
-no_event_callback(ErlDrvData drv_data, ErlDrvEvent event, ErlDrvEventData event_data)
-{
- Port *prt = get_current_port();
- report_missing_drv_callback(prt, "Event", "event()");
- driver_event(ERTS_Port2ErlDrvPort(prt), event, NULL);
-}
-
-static void
no_ready_input_callback(ErlDrvData drv_data, ErlDrvEvent event)
{
Port *prt = get_current_port();
@@ -7291,6 +7536,8 @@ no_stop_select_callback(ErlDrvEvent event, void* private)
erts_send_error_to_logger_nogl(dsbufp);
}
+#define IS_DRIVER_VERSION_GE(DE,MAJOR,MINOR) \
+ ((DE)->major_version >= (MAJOR) && (DE)->minor_version >= (MINOR))
static int
init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
@@ -7302,26 +7549,17 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
drv->version.minor = de->minor_version;
drv->flags = de->driver_flags;
drv->handle = handle;
-#ifdef ERTS_SMP
- if (drv->flags & ERL_DRV_FLAG_USE_PORT_LOCKING)
- drv->lock = NULL;
- else {
- drv->lock = erts_alloc(ERTS_ALC_T_DRIVER_LOCK,
- sizeof(erts_mtx_t));
- erts_mtx_init_x(drv->lock,
- "driver_lock",
-#if defined(ERTS_ENABLE_LOCK_CHECK) || defined(ERTS_ENABLE_LOCK_COUNT)
- erts_atom_put((byte *) drv->name,
- sys_strlen(drv->name),
- ERTS_ATOM_ENC_LATIN1,
- 1),
-#else
- NIL,
-#endif
- 1
- );
+ if (drv->flags & ERL_DRV_FLAG_USE_PORT_LOCKING) {
+ drv->lock = NULL;
+ } else {
+ Eterm driver_id = erts_atom_put((byte *) drv->name,
+ sys_strlen(drv->name),
+ ERTS_ATOM_ENC_LATIN1, 1);
+
+ drv->lock = erts_alloc(ERTS_ALC_T_DRIVER_LOCK, sizeof(erts_mtx_t));
+
+ erts_mtx_init(drv->lock, "driver_lock", driver_id, ERTS_LOCK_FLAGS_CATEGORY_IO);
}
-#endif
drv->entry = de;
drv->start = de->start;
@@ -7332,12 +7570,12 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
drv->outputv = de->outputv;
drv->control = de->control;
drv->call = de->call;
- drv->event = de->event ? de->event : no_event_callback;
drv->ready_input = de->ready_input ? de->ready_input : no_ready_input_callback;
drv->ready_output = de->ready_output ? de->ready_output : no_ready_output_callback;
drv->timeout = de->timeout ? de->timeout : no_timeout_callback;
drv->ready_async = de->ready_async;
drv->process_exit = de->process_exit;
+ drv->emergency_close = IS_DRIVER_VERSION_GE(de,3,2) ? de->emergency_close : NULL;
if (de->stop_select)
drv->stop_select = de->stop_select;
else
@@ -7350,21 +7588,23 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
int fpe_was_unmasked = erts_block_fpe();
DTRACE4(driver_init, drv->name, drv->version.major, drv->version.minor,
drv->flags);
+ LTTNG4(driver_init, drv->name, drv->version.major, drv->version.minor,
+ drv->flags);
res = (*de->init)();
erts_unblock_fpe(fpe_was_unmasked);
return res;
}
}
+#undef IS_DRIVER_VERSION_GE
+
void
erts_destroy_driver(erts_driver_t *drv)
{
-#ifdef ERTS_SMP
if (drv->lock) {
- erts_smp_mtx_destroy(drv->lock);
+ erts_mtx_destroy(drv->lock);
erts_free(ERTS_ALC_T_DRIVER_LOCK, drv->lock);
}
-#endif
erts_free(ERTS_ALC_T_DRIVER, drv);
}
@@ -7375,7 +7615,7 @@ erts_destroy_driver(erts_driver_t *drv)
void add_driver_entry(ErlDrvEntry *drv){
void *rec_lock;
- rec_lock = erts_smp_tsd_get(driver_list_lock_status_key);
+ rec_lock = erts_tsd_get(driver_list_lock_status_key);
/*
* Ignore result of erts_add_driver_entry, the init is not
* allowed to fail when drivers are added by drivers.
@@ -7389,7 +7629,7 @@ int erts_add_driver_entry(ErlDrvEntry *de, DE_Handle *handle, int driver_list_lo
int res;
if (!driver_list_locked) {
- erts_smp_rwmtx_rwlock(&erts_driver_list_lock);
+ erts_rwmtx_rwlock(&erts_driver_list_lock);
}
dp->next = driver_list;
@@ -7400,7 +7640,7 @@ int erts_add_driver_entry(ErlDrvEntry *de, DE_Handle *handle, int driver_list_lo
driver_list = dp;
if (!driver_list_locked) {
- erts_smp_tsd_set(driver_list_lock_status_key, (void *) 1);
+ erts_tsd_set(driver_list_lock_status_key, (void *) 1);
}
res = init_driver(dp, de, handle);
@@ -7417,8 +7657,8 @@ int erts_add_driver_entry(ErlDrvEntry *de, DE_Handle *handle, int driver_list_lo
}
if (!driver_list_locked) {
- erts_smp_tsd_set(driver_list_lock_status_key, NULL);
- erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
+ erts_tsd_set(driver_list_lock_status_key, NULL);
+ erts_rwmtx_rwunlock(&erts_driver_list_lock);
}
return res;
}
@@ -7429,9 +7669,9 @@ int remove_driver_entry(ErlDrvEntry *drv)
erts_driver_t *dp;
void *rec_lock;
- rec_lock = erts_smp_tsd_get(driver_list_lock_status_key);
+ rec_lock = erts_tsd_get(driver_list_lock_status_key);
if (rec_lock == NULL) {
- erts_smp_rwmtx_rwlock(&erts_driver_list_lock);
+ erts_rwmtx_rwlock(&erts_driver_list_lock);
}
dp = driver_list;
while (dp && dp->entry != drv)
@@ -7439,7 +7679,7 @@ int remove_driver_entry(ErlDrvEntry *drv)
if (dp) {
if (dp->handle) {
if (rec_lock == NULL) {
- erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
+ erts_rwmtx_rwunlock(&erts_driver_list_lock);
}
return -1;
}
@@ -7453,12 +7693,12 @@ int remove_driver_entry(ErlDrvEntry *drv)
}
erts_destroy_driver(dp);
if (rec_lock == NULL) {
- erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
+ erts_rwmtx_rwunlock(&erts_driver_list_lock);
}
return 1;
}
if (rec_lock == NULL) {
- erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
+ erts_rwmtx_rwunlock(&erts_driver_list_lock);
}
return 0;
}
@@ -7472,15 +7712,15 @@ int null_func(void)
}
int
-erl_drv_putenv(char *key, char *value)
+erl_drv_putenv(const char *key, char *value)
{
- return erts_sys_putenv_raw(key, value);
+ return erts_sys_putenv_raw((char*)key, value);
}
int
-erl_drv_getenv(char *key, char *value, size_t *value_size)
+erl_drv_getenv(const char *key, char *value, size_t *value_size)
{
- return erts_sys_getenv_raw(key, value, value_size);
+ return erts_sys_getenv_raw((char*)key, value, value_size);
}
/* get heart_port
@@ -7499,7 +7739,7 @@ Port *erts_get_heart_port(void)
if (!port)
continue;
/* only examine undead or alive ports */
- if (erts_atomic32_read_nob(&port->state) & ERTS_PORT_SFLGS_DEAD)
+ if (erts_atomic32_read_nob(&port->state) & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
continue;
/* immediate atom compare */
reg = port->common.u.alive.reg;
@@ -7510,3 +7750,23 @@ Port *erts_get_heart_port(void)
return NULL;
}
+
+void erts_emergency_close_ports(void)
+{
+ int ix, max = erts_ptab_max(&erts_port);
+
+ for (ix = 0; ix < max; ix++) {
+ Port *port = erts_pix2port(ix);
+
+ if (!port)
+ continue;
+ /* only examine undead or alive ports */
+ if (erts_atomic32_read_nob(&port->state) & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
+ continue;
+
+ /* emergency close socket */
+ if (port->drv_ptr->emergency_close) {
+ port->drv_ptr->emergency_close((ErlDrvData) port->drv_data);
+ }
+ }
+}
diff --git a/erts/emulator/beam/lttng-wrapper.h b/erts/emulator/beam/lttng-wrapper.h
new file mode 100644
index 0000000000..0bc75c1552
--- /dev/null
+++ b/erts/emulator/beam/lttng-wrapper.h
@@ -0,0 +1,107 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef __LTTNG_WRAPPER_H__
+#define __LTTNG_WRAPPER_H__
+
+#ifdef USE_LTTNG
+
+#include "erlang_lttng.h"
+#define USE_LTTNG_VM_TRACEPOINTS
+
+#define LTTNG_BUFFER_SZ (256)
+#define LTTNG_PROC_BUFFER_SZ (16)
+#define LTTNG_PORT_BUFFER_SZ (20)
+#define LTTNG_MFA_BUFFER_SZ (256)
+
+#define lttng_decl_procbuf(Name) \
+ char Name[LTTNG_PROC_BUFFER_SZ]
+
+#define lttng_decl_portbuf(Name) \
+ char Name[LTTNG_PORT_BUFFER_SZ]
+
+#define lttng_decl_mfabuf(Name) \
+ char Name[LTTNG_MFA_BUFFER_SZ]
+
+#define lttng_decl_carrier_stats(Name) \
+ lttng_carrier_stats_t Name##_STATSTRUCT, *Name = &Name##_STATSTRUCT
+
+#define lttng_pid_to_str(pid, name) \
+ erts_snprintf(name, LTTNG_PROC_BUFFER_SZ, "%T", (pid))
+
+#define lttng_portid_to_str(pid, name) \
+ erts_snprintf(name, LTTNG_PORT_BUFFER_SZ, "%T", (pid))
+
+#define lttng_proc_to_str(p, name) \
+ lttng_pid_to_str(((p) ? (p)->common.id : ERTS_INVALID_PID), name)
+
+#define lttng_port_to_str(p, name) \
+ lttng_portid_to_str(((p) ? (p)->common.id : ERTS_INVALID_PORT), name)
+
+#define lttng_mfa_to_str(m,f,a, Name) \
+ erts_snprintf(Name, LTTNG_MFA_BUFFER_SZ, "%T:%T/%lu", (Eterm)(m), (Eterm)(f), (Uint)(a))
+
+#define lttng_proc_to_mfa_str(p, Name) \
+ do { \
+ if (ERTS_PROC_IS_EXITING((p))) { \
+ strcpy(Name, "<exiting>"); \
+ } else { \
+ BeamInstr *_fptr = find_function_from_pc((p)->i); \
+ if (_fptr) { \
+ lttng_mfa_to_str(_fptr[0],_fptr[1],_fptr[2], Name); \
+ } else { \
+ strcpy(Name, "<unknown>"); \
+ } \
+ } \
+ } while(0)
+
+/* ErtsRunQueue->ErtsSchedulerData->Uint */
+#define lttng_rq_to_id(RQ) \
+ (RQ)->scheduler->no
+
+#define LTTNG_ENABLED(Name) \
+ tracepoint_enabled(org_erlang_otp, Name)
+
+/* include a special LTTNG_DO for do_tracepoint ? */
+#define LTTNG1(Name, Arg1) \
+ tracepoint(org_erlang_otp, Name, (Arg1))
+
+#define LTTNG2(Name, Arg1, Arg2) \
+ tracepoint(org_erlang_otp, Name, (Arg1), (Arg2))
+
+#define LTTNG3(Name, Arg1, Arg2, Arg3) \
+ tracepoint(org_erlang_otp, Name, (Arg1), (Arg2), (Arg3))
+
+#define LTTNG4(Name, Arg1, Arg2, Arg3, Arg4) \
+ tracepoint(org_erlang_otp, Name, (Arg1), (Arg2), (Arg3), (Arg4))
+
+#define LTTNG5(Name, Arg1, Arg2, Arg3, Arg4, Arg5) \
+ tracepoint(org_erlang_otp, Name, (Arg1), (Arg2), (Arg3), (Arg4), (Arg5))
+
+#else /* USE_LTTNG */
+
+#define LTTNG1(Name, Arg1) do {} while(0)
+#define LTTNG2(Name, Arg1, Arg2) do {} while(0)
+#define LTTNG3(Name, Arg1, Arg2, Arg3) do {} while(0)
+#define LTTNG4(Name, Arg1, Arg2, Arg3, Arg4) do {} while(0)
+#define LTTNG5(Name, Arg1, Arg2, Arg3, Arg4, Arg5) do {} while(0)
+
+#endif /* USE_LTTNG */
+#endif /* __LTTNG_WRAPPER_H__ */
diff --git a/erts/emulator/beam/macros.tab b/erts/emulator/beam/macros.tab
new file mode 100644
index 0000000000..e0b5f56b53
--- /dev/null
+++ b/erts/emulator/beam/macros.tab
@@ -0,0 +1,174 @@
+// -*- c -*-
+//
+// %CopyrightBegin%
+//
+// Copyright Ericsson AB 2017. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// %CopyrightEnd%
+//
+
+//
+// Use if there is a garbage collection before storing to a
+// general destination (either X or Y register).
+//
+
+REFRESH_GEN_DEST() {
+ dst_ptr = REG_TARGET_PTR(dst);
+}
+
+// $Offset is relative to the start of the instruction (not to the
+// location of the failure label reference). Since combined
+// instructions may increment the instruction pointer (e.g. in
+// 'increment') for some of the instructions in the group, we actually
+// use a virtual start position common to all instructions in the
+// group. To calculate the correct virtual position, we will need to
+// add $IP_ADJUSTMENT to the offset. ($IP_ADJUSTMENT will usually be
+// zero, except in a few bit syntax instructions.)
+
+SET_I_REL(Offset) {
+ ASSERT(VALID_INSTR(*(I + ($Offset) + $IP_ADJUSTMENT)));
+ I += $Offset + $IP_ADJUSTMENT;
+}
+
+SET_CP_I_ABS(Target) {
+ c_p->i = $Target;
+ ASSERT(VALID_INSTR(*c_p->i));
+}
+
+SET_REL_I(Dst, Offset) {
+ $Dst = I + ($Offset);
+ ASSERT(VALID_INSTR(*$Dst));
+}
+
+FAIL(Fail) {
+ //| -no_prefetch
+ $SET_I_REL($Fail);
+ Goto(*I);
+}
+
+JUMP(Fail) {
+ //| -no_next
+ $SET_I_REL($Fail);
+ Goto(*I);
+}
+
+GC_TEST(Ns, Nh, Live) {
+ Uint need = $Nh + $Ns;
+ if (ERTS_UNLIKELY(E - HTOP < need)) {
+ SWAPOUT;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, $Live, FCALLS);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ SWAPIN;
+ }
+ HEAP_SPACE_VERIFIED($Nh);
+}
+
+GC_TEST_PRESERVE(NeedHeap, Live, PreserveTerm) {
+ Uint need = $NeedHeap;
+ if (ERTS_UNLIKELY(E - HTOP < need)) {
+ SWAPOUT;
+ reg[$Live] = $PreserveTerm;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, $Live+1, FCALLS);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ $PreserveTerm = reg[$Live];
+ SWAPIN;
+ }
+ HEAP_SPACE_VERIFIED($NeedHeap);
+}
+
+
+// Make sure that there are NeedStack + NeedHeap + 1 words available
+// on the combined heap/stack segment, then allocates NeedHeap + 1
+// words on the stack and saves CP.
+AH(NeedStack, NeedHeap, Live) {
+ unsigned needed = $NeedStack + 1;
+ $GC_TEST(needed, $NeedHeap, $Live);
+ E -= needed;
+ *E = make_cp(c_p->cp);
+ c_p->cp = 0;
+}
+
+NEXT0() {
+ //| -no_next
+ SET_I((BeamInstr *) $NEXT_INSTRUCTION);
+ Goto(*I);
+}
+
+NEXT(Addr) {
+ //| -no_next
+ SET_I((BeamInstr *) $Addr);
+ Goto(*I);
+}
+
+FAIL_BODY() {
+ //| -no_prefetch
+ goto find_func_info;
+}
+
+FAIL_HEAD_OR_BODY(Fail) {
+ //| -no_prefetch
+
+ /*
+ * In a correctly working program, we expect failures in
+ * guards to be more likely than failures in bodies.
+ */
+
+ if (ERTS_LIKELY($Fail)) {
+ $FAIL($Fail);
+ }
+ goto find_func_info;
+}
+
+BADARG(Fail) {
+ c_p->freason = BADARG;
+ $FAIL_HEAD_OR_BODY($Fail);
+}
+
+BADARITH0() {
+ c_p->freason = BADARITH;
+ goto find_func_info;
+}
+
+SYSTEM_LIMIT(Fail) {
+ c_p->freason = SYSTEM_LIMIT;
+ $FAIL_HEAD_OR_BODY($Fail);
+}
+
+BIF_ERROR_ARITY_1(Fail, BIF, Op1) {
+ //| -no_prefetch
+ if (ERTS_LIKELY($Fail)) {
+ $FAIL($Fail);
+ }
+ reg[0] = $Op1;
+ SWAPOUT;
+ I = handle_error(c_p, I, reg, &bif_export[$BIF]->info.mfa);
+ goto post_error_handling;
+}
+
+BIF_ERROR_ARITY_2(Fail, BIF, Op1, Op2) {
+ //| -no_prefetch
+ if (ERTS_LIKELY($Fail)) {
+ $FAIL($Fail);
+ }
+ reg[0] = $Op1;
+ reg[1] = $Op2;
+ SWAPOUT;
+ I = handle_error(c_p, I, reg, &bif_export[$BIF]->info.mfa);
+ goto post_error_handling;
+}
diff --git a/erts/emulator/beam/map_instrs.tab b/erts/emulator/beam/map_instrs.tab
new file mode 100644
index 0000000000..bbb2f49b66
--- /dev/null
+++ b/erts/emulator/beam/map_instrs.tab
@@ -0,0 +1,159 @@
+// -*- c -*-
+//
+// %CopyrightBegin%
+//
+// Copyright Ericsson AB 2017. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// %CopyrightEnd%
+//
+
+ensure_map(Map) {
+ if (is_not_map($Map)) {
+ c_p->freason = BADMAP;
+ c_p->fvalue = $Map;
+ $FAIL_BODY();
+ }
+}
+
+new_map(Dst, Live, N) {
+ Eterm res;
+
+ HEAVY_SWAPOUT;
+ res = new_map(c_p, reg, $Live, $N, $NEXT_INSTRUCTION);
+ HEAVY_SWAPIN;
+ $REFRESH_GEN_DEST();
+ $Dst = res;
+ $NEXT($NEXT_INSTRUCTION+$N);
+}
+
+i_new_small_map_lit(Dst, Live, Keys) {
+ Eterm res;
+ Uint n;
+ Eterm keys = $Keys;
+
+ HEAVY_SWAPOUT;
+ res = new_small_map_lit(c_p, reg, keys, $Live, $NEXT_INSTRUCTION);
+ HEAVY_SWAPIN;
+ $REFRESH_GEN_DEST();
+ $Dst = res;
+ n = arityval(*tuple_val(keys));
+ $NEXT($NEXT_INSTRUCTION+n);
+}
+
+i_get_map_element(Fail, Src, Key, Dst) {
+ Eterm res = get_map_element($Src, $Key);
+ if (is_non_value(res)) {
+ $FAIL($Fail);
+ }
+ $Dst = res;
+}
+
+i_get_map_element_hash(Fail, Src, Key, Hx, Dst) {
+ Eterm res = get_map_element_hash($Src, $Key, $Hx);
+ if (is_non_value(res)) {
+ $FAIL($Fail);
+ }
+ $Dst = res;
+}
+
+i_get_map_elements(Fail, Src, N) {
+ Eterm map;
+ BeamInstr *fs;
+ Uint sz, n;
+
+ map = $Src;
+
+ /* This instruction assumes Arg1 is a map,
+ * i.e. that it follows a test is_map if needed.
+ */
+
+ n = (Uint)$N / 3;
+ fs = $NEXT_INSTRUCTION;
+
+ if (is_flatmap(map)) {
+ flatmap_t *mp;
+ Eterm *ks;
+ Eterm *vs;
+
+ mp = (flatmap_t *)flatmap_val(map);
+ sz = flatmap_get_size(mp);
+
+ if (sz == 0) {
+ $FAIL($Fail);
+ }
+
+ ks = flatmap_get_keys(mp);
+ vs = flatmap_get_values(mp);
+
+ while(sz) {
+ if (EQ((Eterm) fs[0], *ks)) {
+ PUT_TERM_REG(*vs, fs[1]);
+ n--;
+ fs += 3;
+ /* no more values to fetch, we are done */
+ if (n == 0) {
+ $NEXT(fs);
+ }
+ }
+ ks++, sz--, vs++;
+ }
+ $FAIL($Fail);
+ } else {
+ const Eterm *v;
+ Uint32 hx;
+ ASSERT(is_hashmap(map));
+ while(n--) {
+ hx = fs[2];
+ ASSERT(hx == hashmap_make_hash((Eterm)fs[0]));
+ if ((v = erts_hashmap_get(hx, (Eterm)fs[0], map)) == NULL) {
+ $FAIL($Fail);
+ }
+ PUT_TERM_REG(*v, fs[1]);
+ fs += 3;
+ }
+ $NEXT(fs);
+ }
+}
+
+update_map_assoc(Src, Dst, Live, N) {
+ Eterm res;
+ Uint live = $Live;
+
+ reg[live] = $Src;
+ HEAVY_SWAPOUT;
+ res = update_map_assoc(c_p, reg, live, $N, $NEXT_INSTRUCTION);
+ HEAVY_SWAPIN;
+ ASSERT(is_value(res));
+ $REFRESH_GEN_DEST();
+ $Dst = res;
+ $NEXT($NEXT_INSTRUCTION+$N);
+}
+
+update_map_exact(Fail, Src, Dst, Live, N) {
+ Eterm res;
+ Uint live = $Live;
+
+ reg[live] = $Src;
+ HEAVY_SWAPOUT;
+ res = update_map_exact(c_p, reg, live, $N, $NEXT_INSTRUCTION);
+ HEAVY_SWAPIN;
+ if (is_value(res)) {
+ $REFRESH_GEN_DEST();
+ $Dst = res;
+ $NEXT($NEXT_INSTRUCTION+$N);
+ } else {
+ $FAIL_HEAD_OR_BODY($Fail);
+ }
+}
diff --git a/erts/emulator/beam/module.c b/erts/emulator/beam/module.c
index daa6e136c5..1712dc803c 100644
--- a/erts/emulator/beam/module.c
+++ b/erts/emulator/beam/module.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -25,6 +26,7 @@
#include "erl_vm.h"
#include "global.h"
#include "module.h"
+#include "beam_catches.h"
#ifdef DEBUG
# define IF_DEBUG(x) x
@@ -37,9 +39,9 @@
static IndexTable module_tables[ERTS_NUM_CODE_IX];
-erts_smp_rwmtx_t the_old_code_rwlocks[ERTS_NUM_CODE_IX];
+erts_rwmtx_t the_old_code_rwlocks[ERTS_NUM_CODE_IX];
-static erts_smp_atomic_t tot_module_bytes;
+static erts_atomic_t tot_module_bytes;
/* SMP note: Active module table lookup and current module instance can be
* read without any locks. Old module instances are protected by
@@ -47,9 +49,7 @@ static erts_smp_atomic_t tot_module_bytes;
* Staging table is protected by the "code_ix lock".
*/
-#include "erl_smp.h"
-
-void module_info(int to, void *to_arg)
+void module_info(fmtfn_t to, void *to_arg)
{
index_info(to, to_arg, &module_tables[erts_active_code_ix()]);
}
@@ -66,31 +66,37 @@ static int module_cmp(Module* tmpl, Module* obj)
return tmpl->module != obj->module;
}
+void erts_module_instance_init(struct erl_module_instance* modi)
+{
+ modi->code_hdr = 0;
+ modi->code_length = 0;
+ modi->catches = BEAM_CATCHES_NIL;
+ modi->nif = NULL;
+ modi->num_breakpoints = 0;
+ modi->num_traced_exports = 0;
+#ifdef HIPE
+ modi->hipe_code = NULL;
+#endif
+}
static Module* module_alloc(Module* tmpl)
{
Module* obj = (Module*) erts_alloc(ERTS_ALC_T_MODULE, sizeof(Module));
- erts_smp_atomic_add_nob(&tot_module_bytes, sizeof(Module));
+ erts_atomic_add_nob(&tot_module_bytes, sizeof(Module));
obj->module = tmpl->module;
- obj->curr.code = 0;
- obj->old.code = 0;
- obj->curr.code_length = 0;
- obj->old.code_length = 0;
obj->slot.index = -1;
- obj->curr.nif = NULL;
- obj->old.nif = NULL;
- obj->curr.num_breakpoints = 0;
- obj->old.num_breakpoints = 0;
- obj->curr.num_traced_exports = 0;
- obj->old.num_traced_exports = 0;
+ erts_module_instance_init(&obj->curr);
+ erts_module_instance_init(&obj->old);
+ obj->on_load = 0;
+ DBG_TRACE_MFA(make_atom(obj->module), 0, 0, "module_alloc");
return obj;
}
static void module_free(Module* mod)
{
erts_free(ERTS_ALC_T_MODULE, mod);
- erts_smp_atomic_add_nob(&tot_module_bytes, -sizeof(Module));
+ erts_atomic_add_nob(&tot_module_bytes, -sizeof(Module));
}
void init_module_table(void)
@@ -102,6 +108,9 @@ void init_module_table(void)
f.cmp = (HCMP_FUN) module_cmp;
f.alloc = (HALLOC_FUN) module_alloc;
f.free = (HFREE_FUN) module_free;
+ f.meta_alloc = (HMALLOC_FUN) erts_alloc;
+ f.meta_free = (HMFREE_FUN) erts_free;
+ f.meta_print = (HMPRINT_FUN) erts_print;
for (i = 0; i < ERTS_NUM_CODE_IX; i++) {
erts_index_init(ERTS_ALC_T_MODULE_TABLE, &module_tables[i], "module_code",
@@ -109,11 +118,13 @@ void init_module_table(void)
}
for (i=0; i<ERTS_NUM_CODE_IX; i++) {
- erts_smp_rwmtx_init_x(&the_old_code_rwlocks[i], "old_code", make_small(i));
+ erts_rwmtx_init(&the_old_code_rwlocks[i], "old_code", make_small(i),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
}
- erts_smp_atomic_init_nob(&tot_module_bytes, 0);
+ erts_atomic_init_nob(&tot_module_bytes, 0);
}
+
Module*
erts_get_module(Eterm mod, ErtsCodeIndex code_ix)
{
@@ -134,27 +145,31 @@ erts_get_module(Eterm mod, ErtsCodeIndex code_ix)
}
}
-Module*
-erts_put_module(Eterm mod)
+
+static Module* put_module(Eterm mod, IndexTable* mod_tab)
{
Module e;
- IndexTable* mod_tab;
int oldsz, newsz;
Module* res;
ASSERT(is_atom(mod));
- ERTS_SMP_LC_ASSERT(erts_initialized == 0
- || erts_has_code_write_permission());
-
- mod_tab = &module_tables[erts_staging_code_ix()];
e.module = atom_val(mod);
oldsz = index_table_sz(mod_tab);
res = (Module*) index_put_entry(mod_tab, (void*) &e);
newsz = index_table_sz(mod_tab);
- erts_smp_atomic_add_nob(&tot_module_bytes, (newsz - oldsz));
+ erts_atomic_add_nob(&tot_module_bytes, (newsz - oldsz));
return res;
}
+Module*
+erts_put_module(Eterm mod)
+{
+ ERTS_LC_ASSERT(erts_initialized == 0
+ || erts_has_code_write_permission());
+
+ return put_module(mod, &module_tables[erts_staging_code_ix()]);
+}
+
Module *module_code(int i, ErtsCodeIndex code_ix)
{
return (Module*) erts_index_lookup(&module_tables[code_ix], i);
@@ -167,7 +182,7 @@ int module_code_size(ErtsCodeIndex code_ix)
int module_table_sz(void)
{
- return erts_smp_atomic_read_nob(&tot_module_bytes);
+ return erts_atomic_read_nob(&tot_module_bytes);
}
#ifdef DEBUG
@@ -176,6 +191,13 @@ static ErtsCodeIndex dbg_load_code_ix = 0;
static int entries_at_start_staging = 0;
+static ERTS_INLINE void copy_module(Module* dst_mod, Module* src_mod)
+{
+ dst_mod->curr = src_mod->curr;
+ dst_mod->old = src_mod->old;
+ dst_mod->on_load = src_mod->on_load;
+}
+
void module_start_staging(void)
{
IndexTable* src = &module_tables[erts_active_code_ix()];
@@ -194,9 +216,7 @@ void module_start_staging(void)
src_mod = (Module*) erts_index_lookup(src, i);
dst_mod = (Module*) erts_index_lookup(dst, i);
ASSERT(src_mod->module == dst_mod->module);
-
- dst_mod->curr = src_mod->curr;
- dst_mod->old = src_mod->old;
+ copy_module(dst_mod, src_mod);
}
/*
@@ -208,11 +228,10 @@ void module_start_staging(void)
dst_mod = (Module*) index_put_entry(dst, src_mod);
ASSERT(dst_mod != src_mod);
- dst_mod->curr = src_mod->curr;
- dst_mod->old = src_mod->old;
+ copy_module(dst_mod, src_mod);
}
newsz = index_table_sz(dst);
- erts_smp_atomic_add_nob(&tot_module_bytes, (newsz - oldsz));
+ erts_atomic_add_nob(&tot_module_bytes, (newsz - oldsz));
entries_at_start_staging = dst->entries;
IF_DEBUG(dbg_load_code_ix = erts_staging_code_ix());
@@ -230,9 +249,8 @@ void module_end_staging(int commit)
oldsz = index_table_sz(tab);
index_erase_latest_from(tab, entries_at_start_staging);
newsz = index_table_sz(tab);
- erts_smp_atomic_add_nob(&tot_module_bytes, (newsz - oldsz));
+ erts_atomic_add_nob(&tot_module_bytes, (newsz - oldsz));
}
IF_DEBUG(dbg_load_code_ix = -1);
}
-
diff --git a/erts/emulator/beam/module.h b/erts/emulator/beam/module.h
index 5235528e98..a3f1ce1705 100644
--- a/erts/emulator/beam/module.h
+++ b/erts/emulator/beam/module.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -20,34 +21,42 @@
#ifndef __MODULE_H__
#define __MODULE_H__
-#ifndef __INDEX_H__
#include "index.h"
+
+#ifdef HIPE
+#include "hipe_module.h"
#endif
struct erl_module_instance {
- BeamInstr* code;
+ BeamCodeHeader* code_hdr;
int code_length; /* Length of loaded code in bytes. */
unsigned catches;
struct erl_module_nif* nif;
int num_breakpoints;
int num_traced_exports;
+#ifdef HIPE
+ HipeModule *hipe_code;
+#endif
};
typedef struct erl_module {
IndexSlot slot; /* Must be located at top of struct! */
int module; /* Atom index for module (not tagged). */
+ int seen; /* Used by finish_loading() */
struct erl_module_instance curr;
- struct erl_module_instance old; /* protected by "old_code" rwlock */
+ struct erl_module_instance old; /* active protected by "old_code" rwlock */
+ struct erl_module_instance* on_load;
} Module;
+void erts_module_instance_init(struct erl_module_instance* modi);
Module* erts_get_module(Eterm mod, ErtsCodeIndex code_ix);
Module* erts_put_module(Eterm mod);
void init_module_table(void);
void module_start_staging(void);
void module_end_staging(int commit);
-void module_info(int, void *);
+void module_info(fmtfn_t, void *);
Module *module_code(int, ErtsCodeIndex);
int module_code_size(ErtsCodeIndex);
@@ -63,29 +72,29 @@ int erts_is_old_code_rlocked(ErtsCodeIndex);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-extern erts_smp_rwmtx_t the_old_code_rwlocks[ERTS_NUM_CODE_IX];
+extern erts_rwmtx_t the_old_code_rwlocks[ERTS_NUM_CODE_IX];
ERTS_GLB_INLINE void erts_rwlock_old_code(ErtsCodeIndex code_ix)
{
- erts_smp_rwmtx_rwlock(&the_old_code_rwlocks[code_ix]);
+ erts_rwmtx_rwlock(&the_old_code_rwlocks[code_ix]);
}
ERTS_GLB_INLINE void erts_rwunlock_old_code(ErtsCodeIndex code_ix)
{
- erts_smp_rwmtx_rwunlock(&the_old_code_rwlocks[code_ix]);
+ erts_rwmtx_rwunlock(&the_old_code_rwlocks[code_ix]);
}
ERTS_GLB_INLINE void erts_rlock_old_code(ErtsCodeIndex code_ix)
{
- erts_smp_rwmtx_rlock(&the_old_code_rwlocks[code_ix]);
+ erts_rwmtx_rlock(&the_old_code_rwlocks[code_ix]);
}
ERTS_GLB_INLINE void erts_runlock_old_code(ErtsCodeIndex code_ix)
{
- erts_smp_rwmtx_runlock(&the_old_code_rwlocks[code_ix]);
+ erts_rwmtx_runlock(&the_old_code_rwlocks[code_ix]);
}
#ifdef ERTS_ENABLE_LOCK_CHECK
ERTS_GLB_INLINE int erts_is_old_code_rlocked(ErtsCodeIndex code_ix)
{
- return erts_smp_lc_rwmtx_is_rlocked(&the_old_code_rwlocks[code_ix]);
+ return erts_lc_rwmtx_is_rlocked(&the_old_code_rwlocks[code_ix]);
}
#endif
diff --git a/erts/emulator/beam/msg_instrs.tab b/erts/emulator/beam/msg_instrs.tab
new file mode 100644
index 0000000000..8055a8616f
--- /dev/null
+++ b/erts/emulator/beam/msg_instrs.tab
@@ -0,0 +1,390 @@
+// -*- c -*-
+//
+// %CopyrightBegin%
+//
+// Copyright Ericsson AB 2017. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// %CopyrightEnd%
+//
+
+// /*
+// * Skeleton for receive statement:
+// *
+// * recv_mark L1 Optional
+// * call make_ref/monitor Optional
+// * ...
+// * recv_set L1 Optional
+// * L1: <-------------------+
+// * <-----------+ |
+// * | |
+// * loop_rec L2 ------+---+ |
+// * ... | | |
+// * remove_message | | |
+// * jump L3 | | |
+// * ... | | |
+// * loop_rec_end L1 --+ | |
+// * L2: <---------------+ |
+// * wait L1 -------------------+ or wait_timeout
+// * timeout
+// *
+// * L3: Code after receive...
+// *
+// */
+
+recv_mark(Dest) {
+ /*
+ * Save the current position in message buffer and the
+ * the label for the loop_rec/2 instruction for the
+ * the receive statement.
+ */
+ $SET_REL_I(c_p->msg.mark, $Dest);
+ c_p->msg.saved_last = c_p->msg.last;
+}
+
+i_recv_set() {
+ /*
+ * If the mark is valid (points to the loop_rec/2
+ * instruction that follows), we know that the saved
+ * position points to the first message that could
+ * possibly be matched out.
+ *
+ * If the mark is invalid, we do nothing, meaning that
+ * we will look through all messages in the message queue.
+ */
+ if (c_p->msg.mark == (BeamInstr *) ($NEXT_INSTRUCTION)) {
+ c_p->msg.save = c_p->msg.saved_last;
+ }
+ SET_I($NEXT_INSTRUCTION);
+ goto loop_rec_top__;
+ //| -no_next
+}
+
+i_loop_rec(Dest) {
+ //| -no_prefetch
+
+ /*
+ * Pick up the next message and place it in x(0).
+ * If no message, jump to a wait or wait_timeout instruction.
+ */
+
+ ErtsMessage* msgp;
+
+ /* Entry point from recv_set */
+ loop_rec_top__:
+ ;
+
+ /*
+ * We need to disable GC while matching messages
+ * in the queue. This since messages with data outside
+ * the heap will be corrupted by a GC.
+ */
+ ASSERT(!(c_p->flags & F_DELAY_GC));
+ c_p->flags |= F_DELAY_GC;
+
+ /* Entry point from loop_rec_end */
+ loop_rec__:
+
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+
+ msgp = PEEK_MESSAGE(c_p);
+
+ if (!msgp) {
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ /* Make sure messages wont pass exit signals... */
+ if (ERTS_PROC_PENDING_EXIT(c_p)) {
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ SWAPOUT;
+ c_p->flags &= ~F_DELAY_GC;
+ c_p->arity = 0;
+ goto do_schedule; /* Will be rescheduled for exit */
+ }
+ ERTS_MSGQ_MV_INQ2PRIVQ(c_p);
+ msgp = PEEK_MESSAGE(c_p);
+ if (msgp) {
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ } else {
+ c_p->flags &= ~F_DELAY_GC;
+ $SET_I_REL($Dest);
+ Goto(*I); /* Jump to a wait or wait_timeout instruction */
+ }
+ }
+ if (is_non_value(ERL_MESSAGE_TERM(msgp))) {
+ SWAPOUT; /* erts_decode_dist_message() may write to heap... */
+ if (!erts_decode_dist_message(c_p, ERTS_PROC_LOCK_MAIN, msgp, 0)) {
+ /*
+ * A corrupt distribution message that we weren't able to decode;
+ * remove it...
+ */
+ /* No swapin should be needed */
+ ASSERT(HTOP == c_p->htop && E == c_p->stop);
+ /* TODO: Add DTrace probe for this bad message situation? */
+ UNLINK_MESSAGE(c_p, msgp);
+ msgp->next = NULL;
+ erts_cleanup_messages(msgp);
+ goto loop_rec__;
+ }
+ SWAPIN;
+ }
+ r(0) = ERL_MESSAGE_TERM(msgp);
+}
+
+remove_message() {
+ //| -no_prefetch
+
+ /*
+ * Remove a (matched) message from the message queue.
+ */
+
+ ErtsMessage* msgp;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+
+ ERTS_CHK_MBUF_SZ(c_p);
+
+ msgp = PEEK_MESSAGE(c_p);
+
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) {
+ save_calls(c_p, &exp_receive);
+ }
+ if (ERL_MESSAGE_TOKEN(msgp) == NIL) {
+#ifdef USE_VM_PROBES
+ if (DT_UTAG(c_p) != NIL) {
+ if (DT_UTAG_FLAGS(c_p) & DT_UTAG_PERMANENT) {
+ SEQ_TRACE_TOKEN(c_p) = am_have_dt_utag;
+ } else {
+ DT_UTAG(c_p) = NIL;
+ SEQ_TRACE_TOKEN(c_p) = NIL;
+ }
+ } else {
+#endif
+ SEQ_TRACE_TOKEN(c_p) = NIL;
+#ifdef USE_VM_PROBES
+ }
+ DT_UTAG_FLAGS(c_p) &= ~DT_UTAG_SPREADING;
+#endif
+ } else if (ERL_MESSAGE_TOKEN(msgp) != am_undefined) {
+ Eterm msg;
+ SEQ_TRACE_TOKEN(c_p) = ERL_MESSAGE_TOKEN(msgp);
+#ifdef USE_VM_PROBES
+ if (ERL_MESSAGE_TOKEN(msgp) == am_have_dt_utag) {
+ if (DT_UTAG(c_p) == NIL) {
+ DT_UTAG(c_p) = ERL_MESSAGE_DT_UTAG(msgp);
+ }
+ DT_UTAG_FLAGS(c_p) |= DT_UTAG_SPREADING;
+ } else {
+#endif
+ ASSERT(is_tuple(SEQ_TRACE_TOKEN(c_p)));
+ ASSERT(SEQ_TRACE_TOKEN_ARITY(c_p) == 5);
+ ASSERT(is_small(SEQ_TRACE_TOKEN_SERIAL(c_p)));
+ ASSERT(is_small(SEQ_TRACE_TOKEN_LASTCNT(c_p)));
+ ASSERT(is_small(SEQ_TRACE_TOKEN_FLAGS(c_p)));
+ ASSERT(is_pid(SEQ_TRACE_TOKEN_SENDER(c_p)));
+ c_p->seq_trace_lastcnt = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p));
+ if (c_p->seq_trace_clock < unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p))) {
+ c_p->seq_trace_clock = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p));
+ }
+ msg = ERL_MESSAGE_TERM(msgp);
+ seq_trace_output(SEQ_TRACE_TOKEN(c_p), msg, SEQ_TRACE_RECEIVE,
+ c_p->common.id, c_p);
+#ifdef USE_VM_PROBES
+ }
+#endif
+ }
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(message_receive)) {
+ Eterm token2 = NIL;
+ DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE);
+ Sint tok_label = 0;
+ Sint tok_lastcnt = 0;
+ Sint tok_serial = 0;
+
+ dtrace_proc_str(c_p, receiver_name);
+ token2 = SEQ_TRACE_TOKEN(c_p);
+ if (have_seqtrace(token2)) {
+ tok_label = signed_val(SEQ_TRACE_T_LABEL(token2));
+ tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token2));
+ tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token2));
+ }
+ DTRACE6(message_receive,
+ receiver_name, size_object(ERL_MESSAGE_TERM(msgp)),
+ c_p->msg.len - 1, tok_label, tok_lastcnt, tok_serial);
+ }
+#endif
+ UNLINK_MESSAGE(c_p, msgp);
+ JOIN_MESSAGE(c_p);
+ CANCEL_TIMER(c_p);
+
+ erts_save_message_in_proc(c_p, msgp);
+ c_p->flags &= ~F_DELAY_GC;
+
+ if (ERTS_IS_GC_DESIRED_INTERNAL(c_p, HTOP, E)) {
+ /*
+ * We want to GC soon but we leave a few
+ * reductions giving the message some time
+ * to turn into garbage.
+ */
+ ERTS_VBUMP_LEAVE_REDS_INTERNAL(c_p, 5, FCALLS);
+ }
+
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+ ERTS_CHK_MBUF_SZ(c_p);
+
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+}
+
+loop_rec_end(Dest) {
+ //| -no_next
+ /*
+ * Advance the save pointer to the next message (the current
+ * message didn't match), then jump to the loop_rec instruction.
+ */
+
+ ASSERT(c_p->flags & F_DELAY_GC);
+
+ $SET_I_REL($Dest);
+ SAVE_MESSAGE(c_p);
+ if (FCALLS > 0 || FCALLS > neg_o_reds) {
+ FCALLS--;
+ goto loop_rec__;
+ }
+
+ c_p->flags &= ~F_DELAY_GC;
+ $SET_CP_I_ABS(I);
+ SWAPOUT;
+ c_p->arity = 0;
+ c_p->current = NULL;
+ goto do_schedule;
+}
+
+timeout_locked() {
+ /*
+ * A timeout has occurred. Reset the save pointer so that the next
+ * receive statement will examine the first message first.
+ */
+
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ $timeout();
+}
+
+timeout() {
+ if (IS_TRACED_FL(c_p, F_TRACE_RECEIVE)) {
+ trace_receive(c_p, am_clock_service, am_timeout, NULL);
+ }
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) {
+ save_calls(c_p, &exp_timeout);
+ }
+ c_p->flags &= ~F_TIMO;
+ JOIN_MESSAGE(c_p);
+}
+
+TIMEOUT_VALUE() {
+ c_p->freason = EXC_TIMEOUT_VALUE;
+ goto find_func_info;
+ //| -no_next
+}
+
+i_wait_error_locked() {
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ $TIMEOUT_VALUE();
+}
+
+i_wait_error() {
+ $TIMEOUT_VALUE();
+}
+
+wait_timeout_unlocked_int := wait.lock.int.execute;
+wait_timeout_locked_int := wait.int.execute;
+
+wait_timeout_unlocked := wait.lock.src.execute;
+wait_timeout_locked := wait.src.execute;
+
+wait_unlocked := wait.lock.execute;
+wait_locked := wait.unlocked.execute;
+
+wait.lock() {
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+}
+
+wait.unlocked() {
+}
+
+wait.int(Int) {
+ /*
+ * If we have already set the timer, we must NOT set it again. Therefore,
+ * we must test the F_INSLPQUEUE flag as well as the F_TIMO flag.
+ */
+ if ((c_p->flags & (F_INSLPQUEUE | F_TIMO)) == 0) {
+ BeamInstr** pi = (BeamInstr **) c_p->def_arg_reg;
+ *pi = $NEXT_INSTRUCTION;
+ erts_set_proc_timer_uword(c_p, $Int);
+ }
+}
+
+wait.src(Src) {
+ /*
+ * If we have already set the timer, we must NOT set it again. Therefore,
+ * we must test the F_INSLPQUEUE flag as well as the F_TIMO flag.
+ */
+ if ((c_p->flags & (F_INSLPQUEUE | F_TIMO)) == 0) {
+ Eterm timeout_value = $Src;
+ if (timeout_value == make_small(0)) {
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ $NEXT0();
+ } else if (timeout_value == am_infinity) {
+ c_p->flags |= F_TIMO;
+ } else {
+ int tres = erts_set_proc_timer_term(c_p, timeout_value);
+ if (tres == 0) {
+ /*
+ * The timer routiner will set c_p->i to the value in
+ * c_p->def_arg_reg[0]. Note that it is safe to use this
+ * location because there are no living x registers in
+ * a receive statement.
+ */
+ BeamInstr** pi = (BeamInstr**) c_p->def_arg_reg;
+ *pi = $NEXT_INSTRUCTION;
+ } else { /* Wrong time */
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ c_p->freason = EXC_TIMEOUT_VALUE;
+ goto find_func_info;
+ }
+ }
+ }
+}
+
+//
+// Prepare to wait indefinitely for a new message to arrive
+// (or the time set above if falling through from above).
+// When a new message arrives, control will be transferred
+// the loop_rec instruction (at label L1). In case of
+// of timeout, control will be transferred to the timeout
+// instruction following the wait_timeout instruction.
+//
+
+wait.execute(JumpTarget) {
+ $SET_REL_I(c_p->i, $JumpTarget); /* L1 */
+ SWAPOUT;
+ c_p->arity = 0;
+
+ if (!ERTS_PTMR_IS_TIMED_OUT(c_p)) {
+ erts_atomic32_read_band_relb(&c_p->state,
+ ~ERTS_PSFLG_ACTIVE);
+ }
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ c_p->current = NULL;
+ goto do_schedule;
+ //| -no_next
+}
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index 68fcc177ae..7a2c39b3a8 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -1,18 +1,19 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1997-2013. All Rights Reserved.
+# Copyright Ericsson AB 1997-2017. All Rights Reserved.
#
-# The contents of this file are subject to the Erlang Public License,
-# Version 1.1, (the "License"); you may not use this file except in
-# compliance with the License. You should have received a copy of the
-# Erlang Public License along with this software. If not, it can be
-# retrieved online at http://www.erlang.org/.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
#
-# Software distributed under the License is distributed on an "AS IS"
-# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-# the License for the specific language governing rights and limitations
-# under the License.
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
#
# %CopyrightEnd%
#
@@ -38,8 +39,8 @@ too_old_compiler | never() =>
# necessary.) Since the instructions don't work correctly in R12B, simply
# refuse to load the module.
-func_info M=a a==am_module_info A=u==0 | label L | move n r => too_old_compiler
-func_info M=a a==am_module_info A=u==1 | label L | move n r => too_old_compiler
+func_info M=a a==am_module_info A=u==0 | label L | move n x==0 => too_old_compiler
+func_info M=a a==am_module_info A=u==1 | label L | move n x==0 => too_old_compiler
# The undocumented and unsupported guard BIF is_constant/1 was removed
# in R13. The is_constant/2 operation is marked as obsolete in genop.tab,
@@ -58,6 +59,7 @@ put_tuple u==0 d => too_old_compiler
# All the other instructions.
#
+%cold
label L
i_func_info I a a I
int_code_end
@@ -67,6 +69,8 @@ i_debug_breakpoint
i_return_time_trace
i_return_to_trace
i_yield
+trace_jump W
+%hot
return
@@ -75,17 +79,6 @@ return
# with the following call instruction, we need to make sure that
# there is no line/1 instruction between the move and the call.
#
-
-move S r | line Loc | call_ext Ar Func => \
- line Loc | move S r | call_ext Ar Func
-move S r | line Loc | call_ext_last Ar Func=u$is_bif D => \
- line Loc | move S r | call_ext_last Ar Func D
-move S r | line Loc | call_ext_only Ar Func=u$is_bif => \
- line Loc | move S r | call_ext_only Ar Func
-move S r | line Loc | call Ar Func => \
- line Loc | move S r | call Ar Func
-
-#
# A tail-recursive call to an external function (non-BIF) will
# never be saved on the stack, so there is no reason to keep
# the line instruction. (The compiler did not remove the line
@@ -93,33 +86,34 @@ move S r | line Loc | call Ar Func => \
# BIFs and ordinary Erlang functions.)
#
-line Loc | call_ext_last Ar Func=u$is_not_bif D => \
- call_ext_last Ar Func D
-line Loc | call_ext_only Ar Func=u$is_not_bif => \
- call_ext_only Ar Func
+move S X0=x==0 | line Loc | call_ext Ar Func => \
+ line Loc | move S X0 | call_ext Ar Func
+move S X0=x==0 | line Loc | call_ext_last Ar Func=u$is_not_bif D => \
+ move S X0 | call_ext_last Ar Func D
+move S X0=x==0 | line Loc | call_ext_only Ar Func=u$is_not_bif => \
+ move S X0 | call_ext_only Ar Func
+move S X0=x==0 | line Loc | call Ar Func => \
+ line Loc | move S X0 | call Ar Func
line Loc | func_info M F A => func_info M F A | line Loc
line I
+allocate t t?
+allocate_heap t I t?
-%macro: allocate Allocate -pack
-%macro: allocate_zero AllocateZero -pack
-%macro: allocate_heap AllocateHeap -pack
-%macro: allocate_heap_zero AllocateHeapZero -pack
-%macro: test_heap TestHeap -pack
+%cold
+deallocate Q
+%hot
-allocate t t
-allocate_heap t I t
-deallocate I
init y
-allocate_zero t t
-allocate_heap_zero t I t
+allocate_zero t t?
+allocate_heap_zero t I t?
trim N Remaining => i_trim N
-i_trim I
+i_trim t
-test_heap I t
+test_heap I t?
allocate_heap S u==0 R => allocate S R
allocate_heap_zero S u==0 R => allocate_zero S R
@@ -128,8 +122,6 @@ init2 y y
init3 y y y
init Y1 | init Y2 | init Y3 => init3 Y1 Y2 Y3
init Y1 | init Y2 => init2 Y1 Y2
-%macro: init2 Init2 -pack
-%macro: init3 Init3 -pack
# Selecting values
@@ -166,62 +158,29 @@ is_tuple Fail=f S | select_tuple_arity S=d Fail=f Size=u Rest=* => \
select_tuple_arity S=d Fail=f Size=u Rest=* => \
gen_select_tuple_arity(S, Fail, Size, Rest)
-i_select_val r f I
-i_select_val x f I
-i_select_val y f I
-
-i_select_val2 r f c f c f
-i_select_val2 x f c f c f
-i_select_val2 y f c f c f
-
-i_select_tuple_arity2 r f A f A f
-i_select_tuple_arity2 x f A f A f
-i_select_tuple_arity2 y f A f A f
+i_select_val_bins xy f? I
-i_select_tuple_arity r f I
-i_select_tuple_arity x f I
-i_select_tuple_arity y f I
+i_select_val_lins xy f? I
-i_jump_on_val_zero r f I
-i_jump_on_val_zero x f I
-i_jump_on_val_zero y f I
+i_select_val2 xy f? c c
-i_jump_on_val r f I I
-i_jump_on_val x f I I
-i_jump_on_val y f I I
+i_select_tuple_arity xy f? I
-jump Target | label Lbl | same_label(Target, Lbl) => label Lbl
+i_select_tuple_arity2 xy f? A A
-is_ne_exact L1 S1 S2 | jump Fail | label L2 | same_label(L1, L2) => \
- is_eq_exact Fail S1 S2 | label L2
+i_jump_on_val_zero xy f? I
-%macro: get_list GetList -pack
-get_list x x x
-get_list x x y
-get_list x x r
-get_list x y x
-get_list x y y
-get_list x y r
-get_list x r x
-get_list x r y
+i_jump_on_val xy f? I W
-get_list y x x
-get_list y x y
-get_list y x r
-get_list y y x
-get_list y y y
-get_list y y r
-get_list y r x
-get_list y r y
+get_list xy xy xy
+# The following get_list instructions using x(0) are frequently used.
get_list r x x
+get_list r r y
+get_list x r x
get_list r x y
-get_list r x r
-get_list r y x
-get_list r y y
get_list r y r
-get_list r r x
-get_list r r y
+get_list r x r
# Old-style catch.
catch y f
@@ -229,74 +188,160 @@ catch_end y
# Try/catch.
try Y F => catch Y F
-try_case Y => try_end Y
+
+try_case y
try_end y
-try_case_end Literal=q => move Literal x | try_case_end x
+%cold
try_case_end s
+%hot
# Destructive set tuple element
-set_tuple_element Lit=q Tuple Pos => move Lit x | set_tuple_element x Tuple Pos
-set_tuple_element s d P
+set_tuple_element s S P
# Get tuple element
-%macro: i_get_tuple_element GetTupleElement -pack
-i_get_tuple_element x P x
-i_get_tuple_element r P x
-i_get_tuple_element y P x
-i_get_tuple_element x P r
-i_get_tuple_element y P r
+i_get_tuple_element xy P x
%cold
-i_get_tuple_element r P r
-i_get_tuple_element x P y
-i_get_tuple_element r P y
-i_get_tuple_element y P y
+i_get_tuple_element xy P y
%hot
-%macro: is_number IsNumber -fail_action
+i_get_tuple_element2 x P x
+i_get_tuple_element2y x P y y
+
+i_get_tuple_element3 x P x
+
%cold
-is_number f r
-is_number f x
-is_number f y
+is_number f? xy
%hot
+
is_number Fail=f i =>
is_number Fail=f na => jump Fail
is_number Fail Literal=q => move Literal x | is_number Fail x
jump f
-case_end Literal=cq => move Literal x | case_end x
-badmatch Literal=cq => move Literal x | badmatch x
+#
+# Expection rasing instructions. Infrequently executed.
+#
+
+%cold
+case_end NotInX=cy => move NotInX x | case_end x
+badmatch NotInX=cy => move NotInX x | badmatch x
-case_end r
case_end x
-case_end y
-badmatch r
badmatch x
-badmatch y
if_end
-raise s s
+
+# Operands for raise/2 are almost always in x(2) and x(1).
+# Optimize for that case.
+raise x==2 x==1 => i_raise
+raise Trace=y Value=y => move Trace x=2 | move Value x=1 | i_raise
+raise Trace Value => move Trace x=3 | move Value x=1 | move x=3 x=2 | i_raise
+
+i_raise
# Internal now, but could be useful to make known to the compiler.
badarg j
system_limit j
-move C=cxy r | jump Lbl => move_jump Lbl C
+%hot
+
+#
+# Move instructions.
+#
+
+move C=cxy x==0 | jump Lbl => move_jump Lbl C
+
+move_jump f ncxy
+
+# Movement to and from the stack is common
+# Try to pack as much as we can into one instruction
+
+# Window move
+move_window/5
+move_window/6
+
+# x -> y
+
+move X1=x Y1=y | move X2=x Y2=y | move X3=x Y3=y | succ(Y1,Y2) | succ(Y2,Y3) => \
+ move_window X1 X2 X3 Y1 Y3
+
+move_window X1=x X2=x X3=x Y1=y Y3=y | move X4=x Y4=y | succ(Y3,Y4) => \
+ move_window X1 X2 X3 X4 Y1 Y4
+
+move_window X1=x X2=x X3=x X4=x Y1=y Y4=y | move X5=x Y5=y | succ(Y4,Y5) => \
+ move_window5 X1 X2 X3 X4 X5 Y1
+
+move_window X1=x X2=x X3=x Y1=y Y3=y => move_window3 X1 X2 X3 Y1
+move_window X1=x X2=x X3=x X4=x Y1=y Y4=y => move_window4 X1 X2 X3 X4 Y1
+
+move_window3 x x x y
+move_window4 x x x x y
+move_window5 x x x x x y
+
+# Swap registers.
+move R1=x Tmp=x | move R2=xy R1 | move Tmp R2 => swap_temp R1 R2 Tmp
+
+swap_temp R1 R2 Tmp | line Loc | apply Live | is_killed_apply(Tmp, Live) => \
+ swap R1 R2 | line Loc | apply Live
+
+swap_temp R1 R2 Tmp | line Loc | call Live Addr | is_killed(Tmp, Live) => \
+ swap R1 R2 | line Loc | call Live Addr
+swap_temp R1 R2 Tmp | call_only Live Addr | \
+ is_killed(Tmp, Live) => swap R1 R2 | call_only Live Addr
+swap_temp R1 R2 Tmp | call_last Live Addr D | \
+ is_killed(Tmp, Live) => swap R1 R2 | call_last Live Addr D
+
+swap_temp R1 R2 Tmp | line Loc | call_ext Live Addr | is_killed(Tmp, Live) => \
+ swap R1 R2 | line Loc | call_ext Live Addr
+swap_temp R1 R2 Tmp | line Loc | call_ext_only Live Addr | \
+ is_killed(Tmp, Live) => swap R1 R2 | line Loc | call_ext_only Live Addr
+swap_temp R1 R2 Tmp | line Loc | call_ext_last Live Addr D | \
+ is_killed(Tmp, Live) => swap R1 R2 | line Loc | call_ext_last Live Addr D
+
+swap_temp x xy x
+
+swap x xy
+
+move Src=x D1=x | move Src=x D2=x => move_dup Src D1 D2
+move Src=x SD=x | move SD=x D=x => move_dup Src SD D
+move Src=x D1=x | move Src=x D2=y => move_dup Src D1 D2
+move Src=y SD=x | move SD=x D=y => move_dup Src SD D
+move Src=x SD=x | move SD=x D=y => move_dup Src SD D
+move Src=y SD=x | move SD=x D=x => move_dup Src SD D
-%macro: move_jump MoveJump -nonext
-move_jump f n
-move_jump f c
-move_jump f x
-move_jump f y
+move SD=x D=x | move Src=xy SD=x => move_shift Src SD D
+move SD=y D=x | move Src=x SD=y => move_shift Src SD D
+move SD=x D=y | move Src=x SD=x => move_shift Src SD D
-move X1=x Y1=y | move X2=x Y2=y => move2 X1 Y1 X2 Y2
-move Y1=y X1=x | move Y2=y X2=x => move2 Y1 X1 Y2 X2
-move X1=x X2=x | move X3=x X4=x => move2 X1 X2 X3 X4
+# The transformations above guarantee that the source for
+# the second move is not the same as the destination for
+# the first move. That means that we can do the moves in
+# parallel (fetch both values, then store them) which could
+# be faster.
+
+move X1=x Y1=y | move X2=x Y2=y => move2_par X1 Y1 X2 Y2
+move Y1=y X1=x | move Y2=y X2=x => move2_par Y1 X1 Y2 X2
+
+move X1=x X2=x | move X3=x X4=x => move2_par X1 X2 X3 X4
+
+move X1=x X2=x | move X3=x Y1=y => move2_par X1 X2 X3 Y1
+
+move S1=x S2=x | move X1=x Y1=y => move2_par S1 S2 X1 Y1
+
+move S1=y S2=x | move X1=x Y1=y => move2_par S1 S2 X1 Y1
+
+move Y1=y X1=x | move S1=x D1=x => move2_par Y1 X1 S1 D1
+move S1=x D1=x | move Y1=y X1=x => move2_par S1 D1 Y1 X1
+
+move2_par X1=x Y1=y X2=x Y2=y | move X3=x Y3=y => move3 X1 Y1 X2 Y2 X3 Y3
+move2_par Y1=y X1=x Y2=y X2=x | move Y3=y X3=x => move3 Y1 X1 Y2 X2 Y3 X3
+move2_par X1=x X2=x X3=x X4=x | move X5=x X6=x => move3 X1 X2 X3 X4 X5 X6
move C=aiq X=x==1 => move_x1 C
move C=aiq X=x==2 => move_x2 C
@@ -304,58 +349,82 @@ move C=aiq X=x==2 => move_x2 C
move_x1 c
move_x2 c
-%macro: move2 Move2 -pack
-move2 x y x y
-move2 y x y x
-move2 x x x x
+move_shift x x x
+move_shift y x x
+move_shift x y x
+move_shift x x y
+
+move_dup xy x xy
+
+move2_par x y x y
+move2_par y x y x
+move2_par x x x x
+
+move2_par x x x y
+
+move2_par y x x y
+
+move2_par x x y x
+move2_par y x x x
+
+move3 x y x y x y
+move3 y x y x y x
+move3 x x x x x x
# The compiler almost never generates a "move Literal y(Y)" instruction,
# so let's cheat if we encounter one.
move S=n D=y => init D
move S=c D=y => move S x | move x D
-%macro:move Move -pack -gen_dest
move x x
move x y
-move x r
move y x
-move y r
-move r x
-move r y
-move c r
move c x
move n x
-move n r
move y y
+# The following move instructions using x(0) are frequently used.
+
+move x r
+move r x
+move y r
+move c r
+move r y
+
# Receive operations.
-loop_rec Fail Src | smp_mark_target_label(Fail) => i_loop_rec Fail Src
+loop_rec Fail x==0 | smp_mark_target_label(Fail) => i_loop_rec Fail
-label L | wait_timeout Fail Src | smp_already_locked(L) => label L | i_wait_timeout_locked Fail Src
-wait_timeout Fail Src => i_wait_timeout Fail Src
-i_wait_timeout Fail Src=aiq => gen_literal_timeout(Fail, Src)
-i_wait_timeout_locked Fail Src=aiq => gen_literal_timeout_locked(Fail, Src)
+label L | wait_timeout Fail Src | smp_already_locked(L) => \
+ label L | wait_timeout_locked Src Fail
+wait_timeout Fail Src => wait_timeout_unlocked Src Fail
+
+wait_timeout_unlocked Src=aiq Fail => gen_literal_timeout(Fail, Src)
+wait_timeout_locked Src=aiq Fail => gen_literal_timeout_locked(Fail, Src)
label L | wait Fail | smp_already_locked(L) => label L | wait_locked Fail
-wait Fail | smp() => wait_unlocked Fail
+wait Fail => wait_unlocked Fail
label L | timeout | smp_already_locked(L) => label L | timeout_locked
remove_message
timeout
timeout_locked
-i_loop_rec f r
+i_loop_rec f
loop_rec_end f
-wait f
wait_locked f
wait_unlocked f
-i_wait_timeout f I
-i_wait_timeout f s
-i_wait_timeout_locked f I
-i_wait_timeout_locked f s
+
+# Note that a timeout value must fit in 32 bits.
+wait_timeout_unlocked_int I f
+wait_timeout_unlocked s f
+wait_timeout_locked_int I f
+wait_timeout_locked s f
+
+%cold
i_wait_error
i_wait_error_locked
+%hot
send
@@ -363,48 +432,52 @@ send
# Optimized comparisons with one immediate/literal operand.
#
-is_eq_exact Lbl R=rxy C=ian => i_is_eq_exact_immed Lbl R C
-is_eq_exact Lbl R=rxy C=q => i_is_eq_exact_literal R Lbl C
+is_eq_exact Lbl S S =>
+is_eq_exact Lbl C1=c C2=c => move C1 x | is_eq_exact Lbl x C2
+is_eq_exact Lbl C=c R=xy => is_eq_exact Lbl R C
-is_ne_exact Lbl R=rxy C=ian => i_is_ne_exact_immed Lbl R C
-is_ne_exact Lbl R=rxy C=q => i_is_ne_exact_literal R Lbl C
+is_eq_exact Lbl R=xy n => is_nil Lbl R
+is_eq_exact Lbl R=xy C=ia => i_is_eq_exact_immed Lbl R C
+is_eq_exact Lbl R=xy C=q => i_is_eq_exact_literal Lbl R C
-%macro: i_is_eq_exact_immed EqualImmed -fail_action
-i_is_eq_exact_immed f r c
-i_is_eq_exact_immed f x c
-i_is_eq_exact_immed f y c
+is_ne_exact Lbl S S => jump Lbl
+is_ne_exact Lbl C1=c C2=c => move C1 x | is_ne_exact Lbl x C2
+is_ne_exact Lbl C=c R=xy => is_ne_exact Lbl R C
-i_is_eq_exact_literal r f c
-i_is_eq_exact_literal x f c
-i_is_eq_exact_literal y f c
+is_ne_exact Lbl R=xy C=ian => i_is_ne_exact_immed Lbl R C
+is_ne_exact Lbl R=xy C=q => i_is_ne_exact_literal Lbl R C
-%macro: i_is_ne_exact_immed NotEqualImmed -fail_action
-i_is_ne_exact_immed f r c
-i_is_ne_exact_immed f x c
-i_is_ne_exact_immed f y c
+i_is_eq_exact_immed f? rxy c
-i_is_ne_exact_literal r f c
-i_is_ne_exact_literal x f c
-i_is_ne_exact_literal y f c
+i_is_eq_exact_literal f? xy c
-#
-# All other comparisons.
-#
+i_is_ne_exact_immed f? xy c
+
+i_is_ne_exact_literal f? xy c
+
+is_eq_exact Lbl Y=y X=x => is_eq_exact Lbl X Y
+is_eq_exact f? x xy
+is_eq_exact f? y y
+
+is_ne_exact f? S S
+
+is_lt f? x x
+is_lt f? x c
+is_lt f? c x
+%cold
+is_lt f? s s
+%hot
-is_eq_exact Lbl S1 S2 => i_fetch S1 S2 | i_is_eq_exact Lbl
-is_ne_exact Lbl S1 S2 => i_fetch S1 S2 | i_is_ne_exact Lbl
+is_ge f? x x
+is_ge f? x c
+is_ge f? c x
+%cold
+is_ge f? s s
+%hot
-is_ge Lbl S1 S2 => i_fetch S1 S2 | i_is_ge Lbl
-is_lt Lbl S1 S2 => i_fetch S1 S2 | i_is_lt Lbl
-is_eq Lbl S1 S2 => i_fetch S1 S2 | i_is_eq Lbl
-is_ne Lbl S1 S2 => i_fetch S1 S2 | i_is_ne Lbl
+is_eq f? s s
-i_is_eq_exact f
-i_is_ne_exact f
-i_is_lt f
-i_is_ge f
-i_is_eq f
-i_is_ne f
+is_ne f? s s
#
# Putting things.
@@ -421,98 +494,56 @@ i_put_tuple Dst Arity Puts=* | put S => \
i_put_tuple/2
-%macro:i_put_tuple PutTuple -pack -goto:do_put_tuple
-i_put_tuple r I
-i_put_tuple x I
-i_put_tuple y I
+i_put_tuple xy I
#
-# The instruction "put_list Const [] Dst" will not be generated by
-# the current BEAM compiler. But until R15A, play it safe by handling
-# that instruction with the following transformation.
+# The instruction "put_list Const [] Dst" were generated in rare
+# circumstances up to and including OTP 18. Starting with OTP 19,
+# AFAIK, it should never be generated.
#
put_list Const=c n Dst => move Const x | put_list x n Dst
-%macro:put_list PutList -pack -gen_dest
-
put_list x n x
put_list y n x
put_list x x x
put_list y x x
-put_list x x r
-put_list y r r
put_list y y x
put_list x y x
-put_list r x x
-put_list r y x
-put_list r x r
-put_list y y r
-put_list y r x
-put_list r n x
-put_list x r x
-put_list x y r
-put_list y x r
put_list y x x
-put_list x r r
-
# put_list SrcReg Constant Dst
-put_list r c r
-put_list r c x
-put_list r c y
-put_list x c r
put_list x c x
put_list x c y
-put_list y c r
put_list y c x
-put_list y c y
# put_list Constant SrcReg Dst
-put_list c r r
-put_list c r x
-put_list c r y
-put_list c x r
put_list c x x
-put_list c x y
-
-put_list c y r
put_list c y x
-put_list c y y
-%cold
-put_list s s d
-%hot
+# The following put_list instructions using x(0) are frequently used.
-%macro: i_fetch FetchArgs -pack
-i_fetch c c
-i_fetch c r
-i_fetch c x
-i_fetch c y
-i_fetch r c
-i_fetch r x
-i_fetch r y
-i_fetch x c
-i_fetch x r
-i_fetch x x
-i_fetch x y
-i_fetch y c
-i_fetch y r
-i_fetch y x
-i_fetch y y
+put_list y r r
+put_list x r r
+put_list r n r
+put_list r n x
+put_list r x x
+put_list r x r
+put_list x x r
%cold
-i_fetch s s
+put_list s s d
%hot
#
# Some more only used by the emulator
#
+%cold
normal_exit
continue_exit
apply_bif
@@ -520,6 +551,7 @@ call_nif
call_error_handler
error_action_code
return_trace
+%hot
#
# Instruction transformations & folded instructions.
@@ -528,154 +560,100 @@ return_trace
# Note: There is no 'move_return y r', since there never are any y registers
# when we do move_return (if we have y registers, we must do move_deallocate_return).
-move S r | return => move_return S r
+move S x==0 | return => move_return S
-%macro: move_return MoveReturn -nonext
-move_return x r
-move_return c r
-move_return n r
+move_return xcn
-move S r | deallocate D | return => move_deallocate_return S r D
+move S x==0 | deallocate D | return => move_deallocate_return S D
-%macro: move_deallocate_return MoveDeallocateReturn -pack -nonext
-move_deallocate_return x r Q
-move_deallocate_return y r Q
-move_deallocate_return c r Q
-move_deallocate_return n r Q
+move_deallocate_return xycn Q
deallocate D | return => deallocate_return D
-%macro: deallocate_return DeallocateReturn -nonext
deallocate_return Q
-test_heap Need u==1 | put_list Y=y r r => test_heap_1_put_list Need Y
+test_heap Need u==1 | put_list Y=y x==0 x==0 => test_heap_1_put_list Need Y
-%macro: test_heap_1_put_list TestHeapPutList -pack
test_heap_1_put_list I y
+#
+# is_tagged_tuple Fail=f Src=rxy Arity Atom=a
+#
+
+is_tagged_tuple Fail Literal=q Arity Atom => \
+ move Literal x | is_tagged_tuple Fail x Arity Atom
+is_tagged_tuple Fail=f c Arity Atom => jump Fail
+
+is_tagged_tuple f? rxy A a
+
# Test tuple & arity (head)
is_tuple Fail Literal=q => move Literal x | is_tuple Fail x
is_tuple Fail=f c => jump Fail
-is_tuple Fail=f S=rxy | test_arity Fail=f S=rxy Arity => is_tuple_of_arity Fail S Arity
-
-%macro:is_tuple_of_arity IsTupleOfArity -fail_action
+is_tuple Fail=f S=xy | test_arity Fail=f S=xy Arity => is_tuple_of_arity Fail S Arity
-is_tuple_of_arity f x A
-is_tuple_of_arity f y A
-is_tuple_of_arity f r A
+is_tuple_of_arity f? rxy A
-%macro: is_tuple IsTuple -fail_action
-is_tuple f x
-is_tuple f y
-is_tuple f r
+is_tuple f? rxy
test_arity Fail Literal=q Arity => move Literal x | test_arity Fail x Arity
test_arity Fail=f c Arity => jump Fail
-%macro: test_arity IsArity -fail_action
-test_arity f x A
-test_arity f y A
-test_arity f r A
-
-is_tuple_of_arity Fail=f Reg Arity | get_tuple_element Reg P=u==0 Dst=xy => \
- is_tuple_of_arity Fail Reg Arity | extract_next_element Dst | original_reg Reg P
-
-test_arity Fail Reg Arity | get_tuple_element Reg P=u==0 Dst=xy => \
- test_arity Fail Reg Arity | extract_next_element Dst | original_reg Reg P
-
-original_reg Reg P1 | get_tuple_element Reg P2 Dst=xy | succ(P1, P2) => \
- extract_next_element Dst | original_reg Reg P2
-
-get_tuple_element Reg P Dst => i_get_tuple_element Reg P Dst | original_reg Reg P
-
-original_reg Reg Pos =>
-
-original_reg/2
-
-extract_next_element D1=xy | original_reg Reg P1 | get_tuple_element Reg P2 D2=xy | \
-succ(P1, P2) | succ(D1, D2) => \
- extract_next_element2 D1 | original_reg Reg P2
+test_arity f? xy A
-extract_next_element2 D1=xy | original_reg Reg P1 | get_tuple_element Reg P2 D2=xy | \
-succ(P1, P2) | succ2(D1, D2) => \
- extract_next_element3 D1 | original_reg Reg P2
+get_tuple_element Reg=x P1 D1=x | get_tuple_element Reg=x P2 D2=x | \
+ get_tuple_element Reg=x P3 D3=x | \
+ succ(P1, P2) | succ(P2, P3) | \
+ succ(D1, D2) | succ(D2, D3) => i_get_tuple_element3 Reg P1 D1
-#extract_next_element3 D1=xy | original_reg Reg P1 | get_tuple_element Reg P2 D2=xy | \
-#succ(P1, P2) | succ3(D1, D2) => \
-# extract_next_element4 D1 | original_reg Reg P2
+get_tuple_element Reg=x P1 D1=x | get_tuple_element Reg=x P2 D2=x | \
+ succ(P1, P2) | succ(D1, D2) => i_get_tuple_element2 Reg P1 D1
-%macro: extract_next_element ExtractNextElement -pack
-extract_next_element x
-extract_next_element y
+get_tuple_element Reg=x P1 D1=y | get_tuple_element Reg=x P2 D2=y | \
+ succ(P1, P2) => i_get_tuple_element2y Reg P1 D1 D2
-%macro: extract_next_element2 ExtractNextElement2 -pack
-extract_next_element2 x
-extract_next_element2 y
-
-%macro: extract_next_element3 ExtractNextElement3 -pack
-extract_next_element3 x
-extract_next_element3 y
-
-#%macro: extract_next_element4 ExtractNextElement4 -pack
-#extract_next_element4 x
-#extract_next_element4 y
+get_tuple_element Reg P Dst => i_get_tuple_element Reg P Dst
is_integer Fail=f i =>
is_integer Fail=f an => jump Fail
is_integer Fail Literal=q => move Literal x | is_integer Fail x
-is_integer Fail=f S=rx | allocate Need Regs => is_integer_allocate Fail S Need Regs
+is_integer Fail=f S=x | allocate Need Regs => is_integer_allocate Fail S Need Regs
-%macro: is_integer_allocate IsIntegerAllocate -fail_action
-is_integer_allocate f x I I
-is_integer_allocate f r I I
+is_integer_allocate f? x t t
-%macro: is_integer IsInteger -fail_action
-is_integer f x
-is_integer f y
-is_integer f r
+is_integer f? xy
is_list Fail=f n =>
is_list Fail Literal=q => move Literal x | is_list Fail x
is_list Fail=f c => jump Fail
-%macro: is_list IsList -fail_action
-is_list f r
-is_list f x
+is_list f? x
%cold
-is_list f y
+is_list f? y
%hot
-is_nonempty_list Fail=f S=rx | allocate Need Rs => is_nonempty_list_allocate Fail S Need Rs
-
-%macro:is_nonempty_list_allocate IsNonemptyListAllocate -fail_action -pack
-is_nonempty_list_allocate f x I t
-is_nonempty_list_allocate f r I t
+is_nonempty_list Fail=f S=x | allocate Need Rs => is_nonempty_list_allocate Fail S Need Rs
-is_nonempty_list F=f r | test_heap I1 I2 => is_non_empty_list_test_heap F r I1 I2
+is_nonempty_list F=f x==0 | test_heap I1 I2 => is_nonempty_list_test_heap F I1 I2
-%macro: is_non_empty_list_test_heap IsNonemptyListTestHeap -fail_action -pack
-is_non_empty_list_test_heap f r I t
+is_nonempty_list Fail=f S=x | get_list S D1=x D2=x => \
+ is_nonempty_list_get_list Fail S D1 D2
-%macro: is_nonempty_list IsNonemptyList -fail_action
-is_nonempty_list f x
-is_nonempty_list f y
-is_nonempty_list f r
+is_nonempty_list_allocate f? rx t t
+is_nonempty_list_test_heap f? I t
+is_nonempty_list_get_list f? rx x x
+is_nonempty_list f? xy
-%macro: is_atom IsAtom -fail_action
-is_atom f x
-is_atom f r
+is_atom f? x
%cold
-is_atom f y
+is_atom f? y
%hot
is_atom Fail=f a =>
is_atom Fail=f niq => jump Fail
-%macro: is_float IsFloat -fail_action
-is_float f r
-is_float f x
+is_float f? x
%cold
-is_float f y
+is_float f? y
%hot
is_float Fail=f nai => jump Fail
is_float Fail Literal=q => move Literal x | is_float Fail x
@@ -683,18 +661,13 @@ is_float Fail Literal=q => move Literal x | is_float Fail x
is_nil Fail=f n =>
is_nil Fail=f qia => jump Fail
-%macro: is_nil IsNil -fail_action
-is_nil f x
-is_nil f y
-is_nil f r
+is_nil f? xy
is_binary Fail Literal=q => move Literal x | is_binary Fail x
is_binary Fail=f c => jump Fail
-%macro: is_binary IsBinary -fail_action
-is_binary f r
-is_binary f x
+is_binary f? x
%cold
-is_binary f y
+is_binary f? y
%hot
# XXX Deprecated.
@@ -702,35 +675,27 @@ is_bitstr Fail Term => is_bitstring Fail Term
is_bitstring Fail Literal=q => move Literal x | is_bitstring Fail x
is_bitstring Fail=f c => jump Fail
-%macro: is_bitstring IsBitstring -fail_action
-is_bitstring f r
-is_bitstring f x
+is_bitstring f? x
%cold
-is_bitstring f y
+is_bitstring f? y
%hot
is_reference Fail=f cq => jump Fail
-%macro: is_reference IsRef -fail_action
-is_reference f r
-is_reference f x
+is_reference f? x
%cold
-is_reference f y
+is_reference f? y
%hot
is_pid Fail=f cq => jump Fail
-%macro: is_pid IsPid -fail_action
-is_pid f r
-is_pid f x
+is_pid f? x
%cold
-is_pid f y
+is_pid f? y
%hot
is_port Fail=f cq => jump Fail
-%macro: is_port IsPort -fail_action
-is_port f r
-is_port f x
+is_port f? x
%cold
-is_port f y
+is_port f? y
%hot
is_boolean Fail=f a==am_true =>
@@ -738,49 +703,42 @@ is_boolean Fail=f a==am_false =>
is_boolean Fail=f ac => jump Fail
%cold
-%macro: is_boolean IsBoolean -fail_action
-is_boolean f r
-is_boolean f x
-is_boolean f y
+is_boolean f? xy
%hot
is_function2 Fail=f acq Arity => jump Fail
is_function2 Fail=f Fun a => jump Fail
-is_function2 Fail Fun Literal=q => move Literal x | is_function2 Fail Fun x
-is_function2 f s s
-%macro: is_function2 IsFunction2 -fail_action
+is_function2 f? S s
# Allocating & initializing.
allocate Need Regs | init Y => allocate_init Need Regs Y
init Y1 | init Y2 => init2 Y1 Y2
-%macro: allocate_init AllocateInit -pack
-allocate_init t I y
+allocate_init t t? y
#################################################################
# External function and bif calls.
#################################################################
#
-# The BIFs erts_internal:check_process_code/2 must be called like a function,
+# The BIFs erts_internal:check_process_code/1 must be called like a function,
# to ensure that c_p->i (program counter) is set correctly (an ordinary
# BIF call doesn't set it).
#
-call_ext u==2 Bif=u$bif:erts_internal:check_process_code/2 => i_call_ext Bif
-call_ext_last u==2 Bif=u$bif:erts_internal:check_process_code/2 D => i_call_ext_last Bif D
-call_ext_only u==2 Bif=u$bif:erts_internal:check_process_code/2 => i_call_ext_only Bif
+call_ext u==1 Bif=u$bif:erts_internal:check_process_code/1 => i_call_ext Bif
+call_ext_last u==1 Bif=u$bif:erts_internal:check_process_code/1 D => i_call_ext_last Bif D
+call_ext_only u==1 Bif=u$bif:erts_internal:check_process_code/1 => i_call_ext_only Bif
#
-# The BIFs erlang:garbage_collect/0 must be called like a function,
+# The BIFs erts_internal:garbage_collect/1 must be called like a function,
# to allow them to invoke the garbage collector. (The stack pointer must
# be saved and p->arity must be zeroed, which is not done on ordinary BIF calls.)
#
-
-call_ext u==0 Bif=u$bif:erlang:garbage_collect/0 => i_call_ext Bif
-call_ext_last u==0 Bif=u$bif:erlang:garbage_collect/0 D => i_call_ext_last Bif D
-call_ext_only u==0 Bif=u$bif:erlang:garbage_collect/0 => i_call_ext_only Bif
+call_ext u==1 Bif=u$bif:erts_internal:garbage_collect/1 => i_call_ext Bif
+call_ext_last u==1 Bif=u$bif:erts_internal:garbage_collect/1 D => i_call_ext_last Bif D
+call_ext_only u==1 Bif=u$bif:erts_internal:garbage_collect/1 => i_call_ext_only Bif
#
# put/2 and erase/1 must be able to do garbage collection, so we must call
@@ -886,76 +844,76 @@ call_ext_only u==3 u$func:erlang:hibernate/3 => i_hibernate
%unless USE_VM_PROBES
call_ext Arity u$func:erlang:dt_get_tag/0 => \
- move a=am_undefined r
+ move a=am_undefined x=0
call_ext_last Arity u$func:erlang:dt_get_tag/0 D => \
- move a=am_undefined r | deallocate D | return
+ move a=am_undefined x=0 | deallocate D | return
call_ext_only Arity u$func:erlang:dt_get_tag/0 => \
- move a=am_undefined r | return
-
-move Any r | call_ext Arity u$func:erlang:dt_put_tag/1 => \
- move a=am_undefined r
-move Any r | call_ext_last Arity u$func:erlang:dt_put_tag/1 D => \
- move a=am_undefined r | deallocate D | return
-move Any r | call_ext_only Arity u$func:erlang:dt_put_tag/1 => \
- move a=am_undefined r | return
+ move a=am_undefined x=0 | return
+
+move Any x==0 | call_ext Arity u$func:erlang:dt_put_tag/1 => \
+ move a=am_undefined x=0
+move Any x==0 | call_ext_last Arity u$func:erlang:dt_put_tag/1 D => \
+ move a=am_undefined x=0 | deallocate D | return
+move Any x==0 | call_ext_only Arity u$func:erlang:dt_put_tag/1 => \
+ move a=am_undefined x=0 | return
call_ext Arity u$func:erlang:dt_put_tag/1 => \
- move a=am_undefined r
+ move a=am_undefined x=0
call_ext_last Arity u$func:erlang:dt_put_tag/1 D => \
- move a=am_undefined r | deallocate D | return
+ move a=am_undefined x=0 | deallocate D | return
call_ext_only Arity u$func:erlang:dt_put_tag/1 => \
- move a=am_undefined r | return
+ move a=am_undefined x=0 | return
call_ext Arity u$func:erlang:dt_get_tag_data/0 => \
- move a=am_undefined r
+ move a=am_undefined x=0
call_ext_last Arity u$func:erlang:dt_get_tag_data/0 D => \
- move a=am_undefined r | deallocate D | return
+ move a=am_undefined x=0 | deallocate D | return
call_ext_only Arity u$func:erlang:dt_get_tag_data/0 => \
- move a=am_undefined r | return
-
-move Any r | call_ext Arity u$func:erlang:dt_spread_tag/1 => \
- move a=am_true r
-move Any r | call_ext_last Arity u$func:erlang:dt_spread_tag/1 D => \
- move a=am_true r | deallocate D | return
-move Any r | call_ext_only Arity u$func:erlang:dt_spread_tag/1 => \
- move a=am_true r | return
+ move a=am_undefined x=0 | return
+
+move Any x==0 | call_ext Arity u$func:erlang:dt_spread_tag/1 => \
+ move a=am_true x=0
+move Any x==0 | call_ext_last Arity u$func:erlang:dt_spread_tag/1 D => \
+ move a=am_true x=0 | deallocate D | return
+move Any x==0 | call_ext_only Arity u$func:erlang:dt_spread_tag/1 => \
+ move a=am_true x=0 | return
call_ext Arity u$func:erlang:dt_spread_tag/1 => \
- move a=am_true r
+ move a=am_true x=0
call_ext_last Arity u$func:erlang:dt_spread_tag/1 D => \
- move a=am_true r | deallocate D | return
+ move a=am_true x=0 | deallocate D | return
call_ext_only Arity u$func:erlang:dt_spread_tag/1 => \
- move a=am_true r | return
-
-move Any r | call_ext Arity u$func:erlang:dt_restore_tag/1 => \
- move a=am_true r
-move Any r | call_ext_last Arity u$func:erlang:dt_restore_tag/1 D => \
- move a=am_true r | deallocate D | return
-move Any r | call_ext_only Arity u$func:erlang:dt_restore_tag/1 => \
- move a=am_true r | return
+ move a=am_true x=0 | return
+
+move Any x==0 | call_ext Arity u$func:erlang:dt_restore_tag/1 => \
+ move a=am_true x=0
+move Any x==0 | call_ext_last Arity u$func:erlang:dt_restore_tag/1 D => \
+ move a=am_true x=0 | deallocate D | return
+move Any x==0 | call_ext_only Arity u$func:erlang:dt_restore_tag/1 => \
+ move a=am_true x=0 | return
call_ext Arity u$func:erlang:dt_restore_tag/1 => \
- move a=am_true r
+ move a=am_true x=0
call_ext_last Arity u$func:erlang:dt_restore_tag/1 D => \
- move a=am_true r | deallocate D | return
+ move a=am_true x=0 | deallocate D | return
call_ext_only Arity u$func:erlang:dt_restore_tag/1 => \
- move a=am_true r | return
-
-move Any r | call_ext Arity u$func:erlang:dt_prepend_vm_tag_data/1 => \
- move Any r
-move Any r | call_ext_last Arity u$func:erlang:dt_prepend_vm_tag_data/1 D => \
- move Any r | deallocate D | return
-move Any r | call_ext_only Arity u$func:erlang:dt_prepend_vm_tag_data/1 => \
- move Any r | return
+ move a=am_true x=0 | return
+
+move Any x==0 | call_ext Arity u$func:erlang:dt_prepend_vm_tag_data/1 => \
+ move Any x=0
+move Any x==0 | call_ext_last Arity u$func:erlang:dt_prepend_vm_tag_data/1 D => \
+ move Any x=0 | deallocate D | return
+move Any x==0 | call_ext_only Arity u$func:erlang:dt_prepend_vm_tag_data/1 => \
+ move Any x=0 | return
call_ext Arity u$func:erlang:dt_prepend_vm_tag_data/1 =>
call_ext_last Arity u$func:erlang:dt_prepend_vm_tag_data/1 D => \
deallocate D | return
call_ext_only Arity u$func:erlang:dt_prepend_vm_tag_data/1 => \
return
-move Any r | call_ext Arity u$func:erlang:dt_append_vm_tag_data/1 => \
- move Any r
-move Any r | call_ext_last Arity u$func:erlang:dt_append_vm_tag_data/1 D => \
- move Any r | deallocate D | return
-move Any r | call_ext_only Arity u$func:erlang:dt_append_vm_tag_data/1 => \
- move Any r | return
+move Any x==0 | call_ext Arity u$func:erlang:dt_append_vm_tag_data/1 => \
+ move Any x=0
+move Any x==0 | call_ext_last Arity u$func:erlang:dt_append_vm_tag_data/1 D => \
+ move Any x=0 | deallocate D | return
+move Any x==0 | call_ext_only Arity u$func:erlang:dt_append_vm_tag_data/1 => \
+ move Any x=0 | return
call_ext Arity u$func:erlang:dt_append_vm_tag_data/1 =>
call_ext_last Arity u$func:erlang:dt_append_vm_tag_data/1 D => \
deallocate D | return
@@ -963,10 +921,17 @@ call_ext_only Arity u$func:erlang:dt_append_vm_tag_data/1 => \
return
# Can happen after one of the transformations above.
-move Discarded r | move Something r => move Something r
+move Discarded x==0 | move Something x==0 => move Something x=0
%endif
+call_ext u==0 u$func:os:perf_counter/0 => \
+ i_perf_counter
+call_ext_last u==0 u$func:os:perf_counter/0 D => \
+ i_perf_counter | deallocate_return D
+call_ext_only u==0 u$func:os:perf_counter/0 => \
+ i_perf_counter | return
+
#
# The general case for BIFs that have no special instructions.
# A BIF used in the tail must be followed by a return instruction.
@@ -988,24 +953,28 @@ call_ext_only Ar=u Bif=u$is_bif => \
# with call instructions.
#
-move S=c r | call_ext Ar=u Func=u$is_not_bif => i_move_call_ext S r Func
-move S=c r | call_ext_last Ar=u Func=u$is_not_bif D => i_move_call_ext_last Func D S r
-move S=c r | call_ext_only Ar=u Func=u$is_not_bif => i_move_call_ext_only Func S r
+move S=c x==0 | call_ext Ar=u Func=u$is_not_bif => i_move_call_ext S Func
+move S=c x==0 | call_ext_last Ar=u Func=u$is_not_bif D => i_move_call_ext_last Func D S
+move S=c x==0 | call_ext_only Ar=u Func=u$is_not_bif => i_move_call_ext_only Func S
call_ext Ar Func => i_call_ext Func
call_ext_last Ar Func D => i_call_ext_last Func D
call_ext_only Ar Func => i_call_ext_only Func
i_apply
-i_apply_last P
+i_apply_last Q
i_apply_only
i_apply_fun
-i_apply_fun_last P
+i_apply_fun_last Q
i_apply_fun_only
+%cold
i_hibernate
+i_perf_counter
+%hot
+
call_bif e
#
@@ -1015,116 +984,86 @@ call_bif e
bif0 u$bif:erlang:self/0 Dst=d => self Dst
bif0 u$bif:erlang:node/0 Dst=d => node Dst
-bif1 Fail Bif=u$bif:erlang:get/1 Src=s Dst=d => i_get Src Dst
+bif1 Fail Bif=u$bif:erlang:get/1 Src=s Dst=d => gen_get(Src, Dst)
-bif2 Jump=j u$bif:erlang:element/2 S1=s S2=rxy Dst=d => gen_element(Jump, S1, S2, Dst)
+bif2 Jump=j u$bif:erlang:element/2 S1=s S2=xy Dst=d => gen_element(Jump, S1, S2, Dst)
-bif1 Fail Bif Literal=q Dst => move Literal x | bif1 Fail Bif x Dst
bif1 p Bif S1 Dst => bif1_body Bif S1 Dst
-bif1_body Bif Literal=q Dst => move Literal x | bif1_body Bif x Dst
-
-bif2 p Bif S1 S2 Dst => i_fetch S1 S2 | i_bif2_body Bif Dst
-bif2 Fail Bif S1 S2 Dst => i_fetch S1 S2 | i_bif2 Fail Bif Dst
+bif2 p Bif S1 S2 Dst => i_bif2_body Bif S1 S2 Dst
+bif2 Fail Bif S1 S2 Dst => i_bif2 Fail Bif S1 S2 Dst
+i_get_hash c I d
i_get s d
-%macro: self Self
-self r
-self x
-self y
+self xy
-%macro: node Node
-node r
node x
%cold
node y
%hot
-i_fast_element r j I d
-i_fast_element x j I d
-i_fast_element y j I d
+# Note: 'I' is sufficient because this instruction will only be used
+# if the arity fits in 24 bits.
+i_fast_element xy j? I d
-i_element r j s d
-i_element x j s d
-i_element y j s d
+i_element xy j? s d
-bif1 f b s d
+bif1 f? b s d
bif1_body b s d
-i_bif2 f b d
-i_bif2_body b d
+i_bif2 f? b s s d
+i_bif2_body b s s d
#
# Internal calls.
#
-move S=c r | call Ar P=f => i_move_call S r P
-move S=s r | call Ar P=f => move_call S r P
-
-i_move_call c r f
-
-%macro:move_call MoveCall -arg_f -size -nonext
-move_call/3
-
-move_call x r f
-move_call y r f
+move S=cxy x==0 | call Ar P=f => move_call S P
-move S=c r | call_last Ar P=f D => i_move_call_last P D S r
-move S r | call_last Ar P=f D => move_call_last S r P D
+move_call/2
+move_call cxy f
-i_move_call_last f P c r
+move S x==0 | call_last Ar P=f D => move_call_last S P D
-%macro:move_call_last MoveCallLast -arg_f -nonext -pack
+move_call_last/3
+move_call_last cxy f Q
-move_call_last/4
-move_call_last x r f Q
-move_call_last y r f Q
+move S=cx x==0 | call_only Ar P=f => move_call_only S P
-move S=c r | call_only Ar P=f => i_move_call_only P S r
-move S=x r | call_only Ar P=f => move_call_only S r P
-
-i_move_call_only f c r
-
-%macro:move_call_only MoveCallOnly -arg_f -nonext
-move_call_only/3
-
-move_call_only x r f
+move_call_only/2
+move_call_only cx f
call Ar Func => i_call Func
call_last Ar Func D => i_call_last Func D
call_only Ar Func => i_call_only Func
i_call f
-i_call_last f P
+i_call_last f Q
i_call_only f
i_call_ext e
-i_call_ext_last e P
+i_call_ext_last e Q
i_call_ext_only e
-i_move_call_ext c r e
-i_move_call_ext_last e P c r
-i_move_call_ext_only e c r
+i_move_call_ext c e
+i_move_call_ext_last e Q c
+i_move_call_ext_only e c
# Fun calls.
call_fun Arity | deallocate D | return => i_call_fun_last Arity D
call_fun Arity => i_call_fun Arity
-i_call_fun I
-i_call_fun_last I P
+i_call_fun t
+i_call_fun_last t Q
make_fun2 OldIndex=u => gen_make_fun2(OldIndex)
-%macro: i_make_fun MakeFun -pack
%cold
-i_make_fun I t
+i_make_fun W t
%hot
-%macro: is_function IsFunction -fail_action
-is_function f x
-is_function f y
-is_function f r
+is_function f? xy
is_function Fail=f c => jump Fail
func_info M F A => i_func_info u M F A
@@ -1133,168 +1072,128 @@ func_info M F A => i_func_info u M F A
# New bit syntax matching (R11B).
# ================================================================
-%cold
+%warm
bs_start_match2 Fail=f ica X Y D => jump Fail
bs_start_match2 Fail Bin X Y D => i_bs_start_match2 Bin Fail X Y D
-i_bs_start_match2 r f I I d
-i_bs_start_match2 x f I I d
-i_bs_start_match2 y f I I d
+i_bs_start_match2 xy f t t x
bs_save2 Reg Index => gen_bs_save(Reg, Index)
-i_bs_save2 r I
-i_bs_save2 x I
+i_bs_save2 x t
bs_restore2 Reg Index => gen_bs_restore(Reg, Index)
-i_bs_restore2 r I
-i_bs_restore2 x I
+i_bs_restore2 x t
# Matching integers
bs_match_string Fail Ms Bits Val => i_bs_match_string Ms Fail Bits Val
-i_bs_match_string r f I I
-i_bs_match_string x f I I
+i_bs_match_string x f W W
# Fetching integers from binaries.
-bs_get_integer2 Fail=f Ms=rx Live=u Sz=sq Unit=u Flags=u Dst=d => \
+bs_get_integer2 Fail=f Ms=x Live=u Sz=sq Unit=u Flags=u Dst=d => \
gen_get_integer2(Fail, Ms, Live, Sz, Unit, Flags, Dst)
-i_bs_get_integer_small_imm r I f I d
-i_bs_get_integer_small_imm x I f I d
-i_bs_get_integer_imm r I I f I d
-i_bs_get_integer_imm x I I f I d
-i_bs_get_integer f I I d
-i_bs_get_integer_8 r f d
-i_bs_get_integer_8 x f d
-i_bs_get_integer_16 r f d
-i_bs_get_integer_16 x f d
-i_bs_get_integer_32 r f I d
-i_bs_get_integer_32 x f I d
+i_bs_get_integer_small_imm x W f? t x
+i_bs_get_integer_imm x W t f? t x
+i_bs_get_integer f? t t x s x
+i_bs_get_integer_8 x f? x
+i_bs_get_integer_16 x f? x
+
+%if ARCH_64
+i_bs_get_integer_32 x f? x
+%endif
# Fetching binaries from binaries.
-bs_get_binary2 Fail=f Ms=rx Live=u Sz=sq Unit=u Flags=u Dst=d => \
+bs_get_binary2 Fail=f Ms=x Live=u Sz=sq Unit=u Flags=u Dst=d => \
gen_get_binary2(Fail, Ms, Live, Sz, Unit, Flags, Dst)
-%macro: i_bs_get_binary_imm2 BsGetBinaryImm_2 -fail_action -gen_dest
-%macro: i_bs_get_binary2 BsGetBinary_2 -fail_action -gen_dest
-%macro: i_bs_get_binary_all2 BsGetBinaryAll_2 -fail_action -gen_dest
-
-i_bs_get_binary_imm2 f r I I I d
-i_bs_get_binary_imm2 f x I I I d
-i_bs_get_binary2 f r I s I d
-i_bs_get_binary2 f x I s I d
-i_bs_get_binary_all2 f r I I d
-i_bs_get_binary_all2 f x I I d
-i_bs_get_binary_all_reuse r f I
-i_bs_get_binary_all_reuse x f I
+i_bs_get_binary_imm2 f? x t W t x
+i_bs_get_binary2 f x t? s t x
+i_bs_get_binary_all2 f? x t t x
+i_bs_get_binary_all_reuse x f? t
# Fetching float from binaries.
-bs_get_float2 Fail=f Ms=rx Live=u Sz=s Unit=u Flags=u Dst=d => \
+bs_get_float2 Fail=f Ms=x Live=u Sz=s Unit=u Flags=u Dst=d => \
gen_get_float2(Fail, Ms, Live, Sz, Unit, Flags, Dst)
-bs_get_float2 Fail=f Ms=rx Live=u Sz=q Unit=u Flags=u Dst=d => jump Fail
+bs_get_float2 Fail=f Ms=x Live=u Sz=q Unit=u Flags=u Dst=d => jump Fail
-%macro: i_bs_get_float2 BsGetFloat2 -fail_action -gen_dest
-i_bs_get_float2 f r I s I d
-i_bs_get_float2 f x I s I d
+i_bs_get_float2 f? x t s t x
# Miscellanous
-bs_skip_bits2 Fail=f Ms=rx Sz=s Unit=u Flags=u => \
- gen_skip_bits2(Fail, Ms, Sz, Unit, Flags)
-bs_skip_bits2 Fail=f Ms=rx Sz=q Unit=u Flags=u => \
+bs_skip_bits2 Fail=f Ms=x Sz=sq Unit=u Flags=u => \
gen_skip_bits2(Fail, Ms, Sz, Unit, Flags)
-%macro: i_bs_skip_bits_imm2 BsSkipBitsImm2 -fail_action
-i_bs_skip_bits_imm2 f r I
-i_bs_skip_bits_imm2 f x I
+i_bs_skip_bits_imm2 f? x W
+i_bs_skip_bits2 f? x xy t
+i_bs_skip_bits_all2 f? x t
+
+bs_test_tail2 Fail=f Ms=x Bits=u==0 => bs_test_zero_tail2 Fail Ms
+bs_test_tail2 Fail=f Ms=x Bits=u => bs_test_tail_imm2 Fail Ms Bits
+bs_test_zero_tail2 f? x
+bs_test_tail_imm2 f? x W
-%macro: i_bs_skip_bits2 BsSkipBits2 -fail_action
-i_bs_skip_bits2 f r x I
-i_bs_skip_bits2 f r y I
-i_bs_skip_bits2 f x x I
-i_bs_skip_bits2 f x r I
-i_bs_skip_bits2 f x y I
+bs_test_unit F Ms Unit=u==8 => bs_test_unit8 F Ms
+bs_test_unit f? x t
+bs_test_unit8 f? x
-%macro: i_bs_skip_bits_all2 BsSkipBitsAll2 -fail_action
-i_bs_skip_bits_all2 f r I
-i_bs_skip_bits_all2 f x I
+# An y register operand for bs_context_to_binary is rare,
+# but can happen because of inlining.
-bs_test_tail2 Fail=f Ms=rx Bits=u==0 => bs_test_zero_tail2 Fail Ms
-bs_test_tail2 Fail=f Ms=rx Bits=u => bs_test_tail_imm2 Fail Ms Bits
-bs_test_zero_tail2 f r
-bs_test_zero_tail2 f x
-bs_test_tail_imm2 f r I
-bs_test_tail_imm2 f x I
+bs_context_to_binary Y=y | line L | badmatch Y => \
+ move Y x | bs_context_to_binary x | line L | badmatch x
-bs_test_unit F Ms Unit=u==8 => bs_test_unit8 F Ms
-bs_test_unit f r I
-bs_test_unit f x I
-bs_test_unit8 f r
-bs_test_unit8 f x
+bs_context_to_binary Y=y => move Y x | bs_context_to_binary x
-bs_context_to_binary r
bs_context_to_binary x
-bs_context_to_binary y
#
# Utf8/utf16/utf32 support. (R12B-5)
#
-bs_get_utf8 Fail=f Ms=rx u u Dst=d => i_bs_get_utf8 Ms Fail Dst
-i_bs_get_utf8 r f d
-i_bs_get_utf8 x f d
+bs_get_utf8 Fail=f Ms=x u u Dst=d => i_bs_get_utf8 Ms Fail Dst
+i_bs_get_utf8 x f? x
-bs_skip_utf8 Fail=f Ms=rx u u => i_bs_get_utf8 Ms Fail x
+bs_skip_utf8 Fail=f Ms=x u u => i_bs_get_utf8 Ms Fail x
-bs_get_utf16 Fail=f Ms=rx u Flags=u Dst=d => i_bs_get_utf16 Ms Fail Flags Dst
-bs_skip_utf16 Fail=f Ms=rx u Flags=u => i_bs_get_utf16 Ms Fail Flags x
+bs_get_utf16 Fail=f Ms=x u Flags=u Dst=d => i_bs_get_utf16 Ms Fail Flags Dst
+bs_skip_utf16 Fail=f Ms=x u Flags=u => i_bs_get_utf16 Ms Fail Flags x
-i_bs_get_utf16 r f I d
-i_bs_get_utf16 x f I d
+i_bs_get_utf16 x f? t x
-bs_get_utf32 Fail=f Ms=rx Live=u Flags=u Dst=d => \
+bs_get_utf32 Fail=f Ms=x Live=u Flags=u Dst=d => \
bs_get_integer2 Fail Ms Live i=32 u=1 Flags Dst | \
- i_fetch Dst Ms | \
- i_bs_validate_unicode_retract Fail
-bs_skip_utf32 Fail=f Ms=rx Live=u Flags=u => \
+ i_bs_validate_unicode_retract Fail Dst Ms
+bs_skip_utf32 Fail=f Ms=x Live=u Flags=u => \
bs_get_integer2 Fail Ms Live i=32 u=1 Flags x | \
- i_fetch x Ms | \
- i_bs_validate_unicode_retract Fail
+ i_bs_validate_unicode_retract Fail x Ms
-i_bs_validate_unicode_retract j
+i_bs_validate_unicode_retract j s S
%hot
#
# Constructing binaries
#
-%cold
+%warm
bs_init2 Fail Sz Words Regs Flags Dst | binary_too_big(Sz) => system_limit Fail
-bs_init2 Fail Sz=u Words=u==0 Regs Flags Dst | should_gen_heap_bin(Sz) => \
- i_bs_init_heap_bin Sz Regs Dst
bs_init2 Fail Sz=u Words=u==0 Regs Flags Dst => i_bs_init Sz Regs Dst
-bs_init2 Fail Sz=u Words Regs Flags Dst | should_gen_heap_bin(Sz) => \
- i_bs_init_heap_bin_heap Sz Words Regs Dst
bs_init2 Fail Sz=u Words Regs Flags Dst => \
i_bs_init_heap Sz Words Regs Dst
bs_init2 Fail Sz Words=u==0 Regs Flags Dst => \
i_bs_init_fail Sz Fail Regs Dst
bs_init2 Fail Sz Words Regs Flags Dst => \
- i_fetch Sz r | i_bs_init_fail_heap Words Fail Regs Dst
+ i_bs_init_fail_heap Sz Words Fail Regs Dst
-i_bs_init_fail r j I d
-i_bs_init_fail x j I d
-i_bs_init_fail y j I d
+i_bs_init_fail xy j? t? x
-i_bs_init_fail_heap I j I d
+i_bs_init_fail_heap s I j? t? x
-i_bs_init I I d
-i_bs_init_heap_bin I I d
+i_bs_init W t? x
-i_bs_init_heap I I I d
-i_bs_init_heap_bin_heap I I I d
+i_bs_init_heap W I t? x
bs_init_bits Fail Sz=o Words Regs Flags Dst => system_limit Fail
@@ -1305,117 +1204,84 @@ bs_init_bits Fail Sz=u Words Regs Flags Dst => i_bs_init_bits_heap Sz Words Reg
bs_init_bits Fail Sz Words=u==0 Regs Flags Dst => \
i_bs_init_bits_fail Sz Fail Regs Dst
bs_init_bits Fail Sz Words Regs Flags Dst => \
- i_fetch Sz r | i_bs_init_bits_fail_heap Words Fail Regs Dst
+ i_bs_init_bits_fail_heap Sz Words Fail Regs Dst
-i_bs_init_bits_fail r j I d
-i_bs_init_bits_fail x j I d
-i_bs_init_bits_fail y j I d
+i_bs_init_bits_fail xy j? t? x
-i_bs_init_bits_fail_heap I j I d
+i_bs_init_bits_fail_heap s I j? t? x
-i_bs_init_bits I I d
-i_bs_init_bits_heap I I I d
+i_bs_init_bits W t? x
+i_bs_init_bits_heap W I t? x
bs_add Fail S1=i==0 S2 Unit=u==1 D => move S2 D
-bs_add Fail S1 S2 Unit D => i_fetch S1 S2 | i_bs_add Fail Unit D
-i_bs_add j I d
+bs_add j? s s t? x
bs_append Fail Size Extra Live Unit Bin Flags Dst => \
- i_fetch Size Bin | i_bs_append Fail Extra Live Unit Dst
+ move Bin x | i_bs_append Fail Extra Live Unit Size Dst
bs_private_append Fail Size Unit Bin Flags Dst => \
- i_fetch Size Bin | i_bs_private_append Fail Unit Dst
+ i_bs_private_append Fail Unit Size Bin Dst
bs_init_writable
-i_bs_append j I I I d
-i_bs_private_append j I d
+i_bs_append j? I t? t s x
+i_bs_private_append j? t s S x
#
# Storing integers into binaries.
#
-bs_put_integer Fail=j Sz=s Unit=u Flags=u Literal=q => \
- move Literal x | bs_put_integer Fail Sz Unit Flags x
bs_put_integer Fail=j Sz=sq Unit=u Flags=u Src=s => \
gen_put_integer(Fail, Sz, Unit, Flags, Src)
-%macro: i_new_bs_put_integer NewBsPutInteger
-%macro: i_new_bs_put_integer_imm NewBsPutIntegerImm
-
-i_new_bs_put_integer j s I s
-i_new_bs_put_integer_imm j I I s
+i_new_bs_put_integer j? s t s
+i_new_bs_put_integer_imm j? W t s
#
# Utf8/utf16/utf32 support. (R12B-5)
#
-bs_utf8_size Fail Literal=q Dst=d => \
- move Literal x | bs_utf8_size Fail x Dst
bs_utf8_size j Src=s Dst=d => i_bs_utf8_size Src Dst
-i_bs_utf8_size s d
+i_bs_utf8_size s x
-bs_utf16_size Fail Literal=q Dst=d => \
- move Literal x | bs_utf16_size Fail x Dst
bs_utf16_size j Src=s Dst=d => i_bs_utf16_size Src Dst
-i_bs_utf16_size s d
+i_bs_utf16_size s x
-bs_put_utf8 Fail=j Flags=u Literal=q => \
- move Literal x | bs_put_utf8 Fail Flags x
bs_put_utf8 Fail u Src=s => i_bs_put_utf8 Fail Src
-i_bs_put_utf8 j s
-
-bs_put_utf16 Fail=j Flags=u Literal=q => \
- move Literal x | bs_put_utf16 Fail Flags x
-bs_put_utf16 Fail Flags=u Src=s => i_bs_put_utf16 Fail Flags Src
+i_bs_put_utf8 j? s
-i_bs_put_utf16 j I s
+bs_put_utf16 j? t s
-bs_put_utf32 Fail=j Flags=u Literal=q => \
- move Literal x | bs_put_utf32 Fail Flags x
bs_put_utf32 Fail=j Flags=u Src=s => \
i_bs_validate_unicode Fail Src | bs_put_integer Fail i=32 u=1 Flags Src
-i_bs_validate_unicode j s
+i_bs_validate_unicode j? s
#
# Storing floats into binaries.
#
bs_put_float Fail Sz=q Unit Flags Val => badarg Fail
-bs_put_float Fail=j Sz Unit=u Flags=u Literal=q => \
- move Literal x | bs_put_float Fail Sz Unit Flags x
-
bs_put_float Fail=j Sz=s Unit=u Flags=u Src=s => \
gen_put_float(Fail, Sz, Unit, Flags, Src)
-%macro: i_new_bs_put_float NewBsPutFloat
-%macro: i_new_bs_put_float_imm NewBsPutFloatImm
-
-i_new_bs_put_float j s I s
-i_new_bs_put_float_imm j I I s
+i_new_bs_put_float j? s t s
+i_new_bs_put_float_imm j? W t s
#
# Storing binaries into binaries.
#
-bs_put_binary Fail Sz Unit Flags Literal=q => \
- move Literal x | bs_put_binary Fail Sz Unit Flags x
bs_put_binary Fail=j Sz=s Unit=u Flags=u Src=s => \
gen_put_binary(Fail, Sz, Unit, Flags, Src)
-%macro: i_new_bs_put_binary NewBsPutBinary
-i_new_bs_put_binary j s I s
-
-%macro: i_new_bs_put_binary_imm NewBsPutBinaryImm
-i_new_bs_put_binary_imm j I s
-
-%macro: i_new_bs_put_binary_all NewBsPutBinaryAll
-i_new_bs_put_binary_all j s I
+i_new_bs_put_binary j? s t s
+i_new_bs_put_binary_imm j? W s
+i_new_bs_put_binary_all j? s t
#
# Warning: The i_bs_put_string and i_new_bs_put_string instructions
@@ -1423,9 +1289,7 @@ i_new_bs_put_binary_all j s I
# Don't change the instruction format unless you change the loader too.
#
-bs_put_string I I
-
-%hot
+bs_put_string W W
#
# New floating point instructions (R8).
@@ -1439,11 +1303,13 @@ fnegate p FR1 FR2 => i_fnegate FR1 FR2
fconv Arg=iqan Dst=l => move Arg x | fconv x Dst
-fmove q l
-fmove d l
-fmove l d
+fmove Arg=l Dst=d => fstore Arg Dst
+fmove Arg=dq Dst=l => fload Arg Dst
+
+fstore l d
+fload Sq l
-fconv d l
+fconv S l
i_fadd l l l
i_fsub l l l
@@ -1453,162 +1319,202 @@ i_fnegate l l
fclearerror | no_fpe_signals() =>
fcheckerror p | no_fpe_signals() =>
+
+%unless NO_FPE_SIGNALS
fcheckerror p => i_fcheckerror
i_fcheckerror
fclearerror
+%endif
+
+%hot
#
# New apply instructions in R10B.
#
-apply I
-apply_last I P
+apply t
+apply_last t Q
+
+#
+# Handle compatibility with OTP 17 here.
+#
+
+i_put_map_assoc/4
+
+# We KNOW that in OTP 20 (actually OTP 18 and higher), a put_map_assoc instruction
+# is always preceded by an is_map test. That means that put_map_assoc can never
+# fail and does not need any failure label.
+
+put_map_assoc Fail Map Dst Live Size Rest=* | compiled_with_otp_20_or_higher() => \
+ i_put_map_assoc Map Dst Live Size Rest
+
+# Translate the put_map_assoc instruction if the module was compiled by a compiler
+# before 20. This is only necessary if the OTP 17 compiler was used, but we
+# have no safe and relatively easy way to know whether OTP 18/19 was used.
+
+put_map_assoc Fail=p Map Dst Live Size Rest=* => \
+ ensure_map Map | i_put_map_assoc Map Dst Live Size Rest
+put_map_assoc Fail=f Map Dst Live Size Rest=* => \
+ is_map Fail Map | i_put_map_assoc Map Dst Live Size Rest
+
+ensure_map Lit=q | literal_is_map(Lit) =>
+ensure_map Src=cqy => move Src x | ensure_map x
+
+%cold
+ensure_map x
+%hot
#
-# Map instructions in R17.
+# Map instructions. First introduced in R17.
#
-put_map_assoc F n Dst Live Size Rest=* => new_map F Dst Live Size Rest
-put_map_assoc F Src=s Dst Live Size Rest=* => \
- update_map_assoc F Src Dst Live Size Rest
-put_map_assoc F Src Dst Live Size Rest=* => \
- move Src x | update_map_assoc F x Dst Live Size Rest
-put_map_exact F n Dst Live Size Rest=* => new_map F Dst Live Size Rest
-put_map_exact F Src=s Dst Live Size Rest=* => \
+sorted_put_map_assoc/4
+i_put_map_assoc Map Dst Live Size Rest=* | map_key_sort(Size, Rest) => \
+ sorted_put_map_assoc Map Dst Live Size Rest
+
+sorted_put_map_exact/5
+put_map_exact F Map Dst Live Size Rest=* | map_key_sort(Size, Rest) => \
+ sorted_put_map_exact F Map Dst Live Size Rest
+
+sorted_put_map_assoc Map Dst Live Size Rest=* | is_empty_map(Map) => \
+ new_map Dst Live Size Rest
+sorted_put_map_assoc Src=s Dst Live Size Rest=* => \
+ update_map_assoc Src Dst Live Size Rest
+sorted_put_map_assoc Src Dst Live Size Rest=* => \
+ move Src x | update_map_assoc x Dst Live Size Rest
+
+sorted_put_map_exact F Src=s Dst Live Size Rest=* => \
update_map_exact F Src Dst Live Size Rest
-put_map_exact F Src Dst Live Size Rest=* => \
+sorted_put_map_exact F Src Dst Live Size Rest=* => \
move Src x | update_map_exact F x Dst Live Size Rest
-new_map j d I I
-update_map_assoc j s d I I
-update_map_exact j s d I I
+new_map Dst Live Size Rest=* | is_small_map_literal_keys(Size, Rest) => \
+ gen_new_small_map_lit(Dst, Live, Size, Rest)
-is_map Fail Literal=q => move Literal x | is_map Fail x
-is_map Fail c => jump Fail
+new_map d t I
+i_new_small_map_lit d t q
+update_map_assoc s d t I
+update_map_exact j? s d t I
-%macro: is_map IsMap -fail_action
-is_map f r
-is_map f x
-is_map f y
+is_map Fail Lit=q | literal_is_map(Lit) =>
+is_map Fail cq => jump Fail
-## Transform has_map_field(s) #{ K1 := _, K2 := _ }
+is_map f? xy
-has_map_field/3
+## Transform has_map_fields #{ K1 := _, K2 := _ } to has_map_elements
-has_map_fields Fail Src Size=u==1 Rest=* => gen_has_map_field(Fail,Src,Size,Rest)
-has_map_fields Fail Src Size Rest=* => i_has_map_fields Fail Src Size Rest
+has_map_fields Fail Src Size Rest=* => \
+ gen_has_map_fields(Fail, Src, Size, Rest)
-i_has_map_fields f s I
+## Transform get_map_elements(s) #{ K1 := V1, K2 := V2 }
-has_map_field Fail Src=rxy Key=arxy => i_has_map_field Fail Src Key
-has_map_field Fail Src Key => move Key x | i_has_map_field Fail Src x
+get_map_elements Fail Src=xy Size=u==2 Rest=* => \
+ gen_get_map_element(Fail, Src, Size, Rest)
+get_map_elements Fail Src Size Rest=* | map_key_sort(Size, Rest) => \
+ gen_get_map_elements(Fail, Src, Size, Rest)
-%macro: i_has_map_field HasMapField -fail_action
-i_has_map_field f r a
-i_has_map_field f x a
-i_has_map_field f y a
-i_has_map_field f r r
-i_has_map_field f x r
-i_has_map_field f y r
-i_has_map_field f r x
-i_has_map_field f x x
-i_has_map_field f y x
-i_has_map_field f r y
-i_has_map_field f x y
-i_has_map_field f y y
+i_get_map_elements f? s I
-## Transform get_map_elements(s) #{ K1 := V1, K2 := V2 }
+i_get_map_element Fail Src=xy Key=y Dst => \
+ move Key x | i_get_map_element Fail Src x Dst
+
+i_get_map_element_hash f? xy c I xy
-get_map_element/4
+i_get_map_element f? xy x xy
-get_map_elements Fail Src=rxy Size=u==2 Rest=* => gen_get_map_element(Fail,Src,Size,Rest)
-get_map_elements Fail Src Size Rest=* => i_get_map_elements Fail Src Size Rest
+#
+# Convert the plus operations to a generic plus instruction.
+#
+gen_plus/5
+gen_minus/5
-i_get_map_elements f s I
+gc_bif1 Fail Live u$bif:erlang:splus/1 Src Dst => \
+ gen_plus Fail Live Src i Dst
+gc_bif2 Fail Live u$bif:erlang:splus/2 S1 S2 Dst => \
+ gen_plus Fail Live S1 S2 Dst
-get_map_element Fail Src=rxy Key=ax Dst => i_get_map_element Fail Src Key Dst
-get_map_element Fail Src=rxy Key=rycq Dst => \
- move Key x | i_get_map_element Fail Src x Dst
-get_map_element Fail Src Key Dst => jump Fail
-
-%macro: i_get_map_element GetMapElement -fail_action
-i_get_map_element f r a r
-i_get_map_element f x a r
-i_get_map_element f y a r
-i_get_map_element f r a x
-i_get_map_element f x a x
-i_get_map_element f y a x
-i_get_map_element f r a y
-i_get_map_element f x a y
-i_get_map_element f y a y
-i_get_map_element f r x r
-i_get_map_element f x x r
-i_get_map_element f y x r
-i_get_map_element f r x x
-i_get_map_element f x x x
-i_get_map_element f y x x
-i_get_map_element f r x y
-i_get_map_element f x x y
-i_get_map_element f y x y
+gc_bif1 Fail Live u$bif:erlang:sminus/1 Src Dst => \
+ gen_minus Fail Live i Src Dst
+gc_bif2 Fail Live u$bif:erlang:sminus/2 S1 S2 Dst => \
+ gen_minus Fail Live S1 S2 Dst
#
# Optimize addition and subtraction of small literals using
# the i_increment/4 instruction (in bodies, not in guards).
#
-gc_bif2 p Live u$bif:erlang:splus/2 Int=i Reg=d Dst => \
+gen_plus p Live Int=i Reg=d Dst => \
gen_increment(Reg, Int, Live, Dst)
-gc_bif2 p Live u$bif:erlang:splus/2 Reg=d Int=i Dst => \
+gen_plus p Live Reg=d Int=i Dst => \
gen_increment(Reg, Int, Live, Dst)
-gc_bif2 p Live u$bif:erlang:sminus/2 Reg=d Int=i Dst | \
- negation_is_small(Int) => \
+gen_minus p Live Reg=d Int=i Dst | negation_is_small(Int) => \
gen_increment_from_minus(Reg, Int, Live, Dst)
#
# GCing arithmetic instructions.
#
-gc_bif2 Fail I u$bif:erlang:splus/2 S1 S2 Dst=d => i_fetch S1 S2 | i_plus Fail I Dst
-gc_bif2 Fail I u$bif:erlang:sminus/2 S1 S2 Dst=d => i_fetch S1 S2 | i_minus Fail I Dst
-gc_bif2 Fail I u$bif:erlang:stimes/2 S1 S2 Dst=d => i_fetch S1 S2 | i_times Fail I Dst
-gc_bif2 Fail I u$bif:erlang:div/2 S1 S2 Dst=d => i_fetch S1 S2 | i_m_div Fail I Dst
+gen_plus Fail Live S1 S2 Dst => i_plus S1 S2 Fail Live Dst
+
+gen_minus Fail Live S1 S2 Dst => i_minus S1 S2 Fail Live Dst
-gc_bif2 Fail I u$bif:erlang:intdiv/2 S1 S2 Dst=d => i_fetch S1 S2 | i_int_div Fail I Dst
-gc_bif2 Fail I u$bif:erlang:rem/2 S1 S2 Dst=d => i_fetch S1 S2 | i_rem Fail I Dst
+gc_bif2 Fail Live u$bif:erlang:stimes/2 S1 S2 Dst => \
+ i_times Fail Live S1 S2 Dst
-gc_bif2 Fail I u$bif:erlang:bsl/2 S1 S2 Dst=d => i_fetch S1 S2 | i_bsl Fail I Dst
-gc_bif2 Fail I u$bif:erlang:bsr/2 S1 S2 Dst=d => i_fetch S1 S2 | i_bsr Fail I Dst
+gc_bif2 Fail Live u$bif:erlang:div/2 S1 S2 Dst => \
+ i_m_div Fail Live S1 S2 Dst
+gc_bif2 Fail Live u$bif:erlang:intdiv/2 S1 S2 Dst => \
+ i_int_div Fail Live S1 S2 Dst
-gc_bif2 Fail I u$bif:erlang:band/2 S1 S2 Dst=d => i_fetch S1 S2 | i_band Fail I Dst
-gc_bif2 Fail I u$bif:erlang:bor/2 S1 S2 Dst=d => i_fetch S1 S2 | i_bor Fail I Dst
-gc_bif2 Fail I u$bif:erlang:bxor/2 S1 S2 Dst=d => i_fetch S1 S2 | i_bxor Fail I Dst
+gc_bif2 Fail Live u$bif:erlang:rem/2 S1 S2 Dst => \
+ i_rem S1 S2 Fail Live Dst
+
+gc_bif2 Fail Live u$bif:erlang:bsl/2 S1 S2 Dst => \
+ i_bsl S1 S2 Fail Live Dst
+gc_bif2 Fail Live u$bif:erlang:bsr/2 S1 S2 Dst => \
+ i_bsr S1 S2 Fail Live Dst
+
+gc_bif2 Fail Live u$bif:erlang:band/2 S1 S2 Dst => \
+ i_band S1 S2 Fail Live Dst
+
+gc_bif2 Fail Live u$bif:erlang:bor/2 S1 S2 Dst => \
+ i_bor Fail Live S1 S2 Dst
+
+gc_bif2 Fail Live u$bif:erlang:bxor/2 S1 S2 Dst => \
+ i_bxor Fail Live S1 S2 Dst
gc_bif1 Fail I u$bif:erlang:bnot/1 Src Dst=d => i_int_bnot Fail Src I Dst
-gc_bif1 Fail I u$bif:erlang:sminus/1 Src Dst=d => i_fetch i Src | i_minus Fail I Dst
-gc_bif1 Fail I u$bif:erlang:splus/1 Src Dst=d => i_fetch i Src | i_plus Fail I Dst
+i_increment rxy W t d
+
+i_plus x xy j? t d
+i_plus s s j? t d
+
+i_minus x x j? t d
+i_minus s s j? t d
-i_increment r I I d
-i_increment x I I d
-i_increment y I I d
+i_times j? t s s d
-i_plus j I d
-i_minus j I d
-i_times j I d
-i_m_div j I d
-i_int_div j I d
-i_rem j I d
+i_m_div j? t s s d
+i_int_div j? t s s d
-i_bsl j I d
-i_bsr j I d
+i_rem x x j? t d
+i_rem s s j? t d
-i_band j I d
-i_bor j I d
-i_bxor j I d
+i_bsl s s j? t d
+i_bsr s s j? t d
-i_int_bnot j s I d
+i_band x c j? t d
+i_band s s j? t d
+
+i_bor j? I s s d
+i_bxor j? I s s d
+
+i_int_bnot Fail Src=c Live Dst => move Src x | i_int_bnot Fail x Live Dst
+
+i_int_bnot j? S t d
#
# Old guard BIFs that creates heap fragments are no longer allowed.
@@ -1632,21 +1538,18 @@ gc_bif2 Fail I Bif S1 S2 Dst => \
gc_bif3 Fail I Bif S1 S2 S3 Dst => \
gen_guard_bif3(Fail, I, Bif, S1, S2, S3, Dst)
-i_gc_bif1 Fail Bif V=q Live D => move V x | i_gc_bif1 Fail Bif x Live D
-
-i_gc_bif1 j I s I d
-
-ii_gc_bif2/6
-
-ii_gc_bif2 Fail Bif S1 S2 Live D => i_fetch S1 S2 | i_gc_bif2 Fail Bif Live D
+i_gc_bif1 j? W s t? d
-i_gc_bif2 j I I d
+i_gc_bif2 j? W t? s s d
ii_gc_bif3/7
-ii_gc_bif3 Fail Bif S1 S2 S3 Live D => move S1 x | i_fetch S2 S3 | i_gc_bif3 Fail Bif x Live D
+# A specific instruction can only have 6 operands, so we must
+# pass one of the arguments in an x register.
+ii_gc_bif3 Fail Bif Live S1 S2 S3 Dst => \
+ move S1 x | i_gc_bif3 Fail Bif Live S2 S3 Dst
-i_gc_bif3 j I s I d
+i_gc_bif3 j? W t? s s d
#
# The following instruction is specially handled in beam_load.c
diff --git a/erts/emulator/beam/packet_parser.c b/erts/emulator/beam/packet_parser.c
index db0e78b1a7..f14910bc72 100644
--- a/erts/emulator/beam/packet_parser.c
+++ b/erts/emulator/beam/packet_parser.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -255,6 +256,7 @@ int packet_get_length(enum PacketParseType htype,
const char* ptr, unsigned n, /* Bytes read so far */
unsigned max_plen, /* Max packet length, 0=no limit */
unsigned trunc_len, /* Truncate (lines) if longer, 0=no limit */
+ char delimiter, /* Line delimiting character */
int* statep) /* Protocol specific state */
{
unsigned hlen, plen;
@@ -298,9 +300,9 @@ int packet_get_length(enum PacketParseType htype,
goto remain;
case TCP_PB_LINE_LF: {
- /* TCP_PB_LINE_LF: [Data ... \n] */
+ /* TCP_PB_LINE_LF: [Data ... Delimiter] */
const char* ptr2;
- if ((ptr2 = memchr(ptr, '\n', n)) == NULL) {
+ if ((ptr2 = memchr(ptr, delimiter, n)) == NULL) {
if (n > max_plen && max_plen != 0) { /* packet full */
DEBUGF((" => packet full (no NL)=%d\r\n", n));
goto error;
diff --git a/erts/emulator/beam/packet_parser.h b/erts/emulator/beam/packet_parser.h
index 1c3a9aa3da..358d650804 100644
--- a/erts/emulator/beam/packet_parser.h
+++ b/erts/emulator/beam/packet_parser.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -104,7 +105,8 @@ int packet_get_length(enum PacketParseType htype,
const char* ptr, unsigned n, /* Bytes read so far */
unsigned max_plen, /* Packet max length, 0=no limit */
unsigned trunc_len, /* Truncate (lines) if longer, 0=no limit */
- int* statep); /* Internal protocol state */
+ char delimiter, /* Line delimiting character */
+ int* statep); /* Internal protocol state */
ERTS_GLB_INLINE
void packet_get_body(enum PacketParseType htype,
diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c
index c626cb2780..92a0854ad3 100644
--- a/erts/emulator/beam/register.c
+++ b/erts/emulator/beam/register.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -37,16 +38,15 @@ static Hash process_reg;
#define REG_HASH(term) ((HashValue) atom_val(term))
-static erts_smp_rwmtx_t regtab_rwmtx;
+static erts_rwmtx_t regtab_rwmtx;
-#define reg_try_read_lock() erts_smp_rwmtx_tryrlock(&regtab_rwmtx)
-#define reg_try_write_lock() erts_smp_rwmtx_tryrwlock(&regtab_rwmtx)
-#define reg_read_lock() erts_smp_rwmtx_rlock(&regtab_rwmtx)
-#define reg_write_lock() erts_smp_rwmtx_rwlock(&regtab_rwmtx)
-#define reg_read_unlock() erts_smp_rwmtx_runlock(&regtab_rwmtx)
-#define reg_write_unlock() erts_smp_rwmtx_rwunlock(&regtab_rwmtx)
+#define reg_try_read_lock() erts_rwmtx_tryrlock(&regtab_rwmtx)
+#define reg_try_write_lock() erts_rwmtx_tryrwlock(&regtab_rwmtx)
+#define reg_read_lock() erts_rwmtx_rlock(&regtab_rwmtx)
+#define reg_write_lock() erts_rwmtx_rwlock(&regtab_rwmtx)
+#define reg_read_unlock() erts_rwmtx_runlock(&regtab_rwmtx)
+#define reg_write_unlock() erts_rwmtx_rwunlock(&regtab_rwmtx)
-#ifdef ERTS_SMP
static ERTS_INLINE void
reg_safe_read_lock(Process *c_p, ErtsProcLocks *c_p_locks)
{
@@ -63,7 +63,7 @@ reg_safe_read_lock(Process *c_p, ErtsProcLocks *c_p_locks)
}
/* Release process locks in order to avoid deadlock */
- erts_smp_proc_unlock(c_p, *c_p_locks);
+ erts_proc_unlock(c_p, *c_p_locks);
*c_p_locks = 0;
}
@@ -86,14 +86,13 @@ reg_safe_write_lock(Process *c_p, ErtsProcLocks *c_p_locks)
}
/* Release process locks in order to avoid deadlock */
- erts_smp_proc_unlock(c_p, *c_p_locks);
+ erts_proc_unlock(c_p, *c_p_locks);
*c_p_locks = 0;
}
reg_write_lock();
}
-#endif
static ERTS_INLINE int
is_proc_alive(Process *p)
@@ -101,7 +100,7 @@ is_proc_alive(Process *p)
return !ERTS_PROC_IS_EXITING(p);
}
-void register_info(int to, void *to_arg)
+void register_info(fmtfn_t to, void *to_arg)
{
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
@@ -124,7 +123,7 @@ static RegProc* reg_alloc(RegProc *tmpl)
{
RegProc* obj = (RegProc*) erts_alloc(ERTS_ALC_T_REG_PROC, sizeof(RegProc));
if (!obj) {
- erl_exit(1, "Can't allocate %d bytes of memory\n", sizeof(RegProc));
+ erts_exit(ERTS_ERROR_EXIT, "Can't allocate %d bytes of memory\n", sizeof(RegProc));
}
obj->name = tmpl->name;
obj->p = tmpl->p;
@@ -140,16 +139,20 @@ static void reg_free(RegProc *obj)
void init_register_table(void)
{
HashFunctions f;
- erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
- rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
- rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+ erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&regtab_rwmtx, &rwmtx_opt, "reg_tab");
+ erts_rwmtx_init_opt(&regtab_rwmtx, &rwmtx_opt, "reg_tab", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
f.hash = (H_FUN) reg_hash;
f.cmp = (HCMP_FUN) reg_cmp;
f.alloc = (HALLOC_FUN) reg_alloc;
f.free = (HFREE_FUN) reg_free;
+ f.meta_alloc = (HMALLOC_FUN) erts_alloc;
+ f.meta_free = (HMFREE_FUN) erts_free;
+ f.meta_print = (HMPRINT_FUN) erts_print;
hash_init(ERTS_ALC_T_REG_TABLE, &process_reg, "process_reg",
PREG_HASH_SIZE, f);
@@ -170,7 +173,7 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id)
Process *proc = NULL;
Port *port = NULL;
RegProc r, *rp;
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
+ ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
if (is_not_atom(name) || name == am_undefined)
return res;
@@ -180,7 +183,7 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id)
else {
if (is_not_internal_pid(id) && is_not_internal_port(id))
return res;
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
if (is_internal_port(id)) {
port = erts_id2port(id);
if (!port)
@@ -188,15 +191,13 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id)
}
}
-#ifdef ERTS_SMP
{
ErtsProcLocks proc_locks = proc ? ERTS_PROC_LOCK_MAIN : 0;
reg_safe_write_lock(proc, &proc_locks);
if (proc && !proc_locks)
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
-#endif
if (is_internal_pid(id)) {
if (!proc)
@@ -210,7 +211,7 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id)
}
else {
ASSERT(!INVALID_PORT(port, id));
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(port));
r.pt = port;
if (r.pt->common.u.alive.reg)
goto done;
@@ -222,7 +223,8 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id)
rp = (RegProc*) hash_put(&process_reg, (void*) &r);
if (proc && rp->p == proc) {
if (IS_TRACED_FL(proc, F_TRACE_PROCS)) {
- trace_proc(c_p, proc, am_register, name);
+ trace_proc(proc, ERTS_PROC_LOCK_MAIN,
+ proc, am_register, name);
}
proc->common.u.alive.reg = rp;
}
@@ -244,8 +246,8 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id)
erts_port_release(port);
if (c_p != proc) {
if (proc)
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
return res;
}
@@ -266,14 +268,15 @@ erts_whereis_name_to_id(Process *c_p, Eterm name)
HashValue hval;
int ix;
HashBucket* b;
-#ifdef ERTS_SMP
- ErtsProcLocks c_p_locks = c_p ? ERTS_PROC_LOCK_MAIN : 0;
-
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
+ ErtsProcLocks c_p_locks = 0;
+ if (c_p) {
+ c_p_locks = ERTS_PROC_LOCK_MAIN;
+ ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
+ }
reg_safe_read_lock(c_p, &c_p_locks);
+
if (c_p && !c_p_locks)
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
-#endif
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
hval = REG_HASH(name);
ix = hval % process_reg.size;
@@ -315,13 +318,13 @@ erts_whereis_name(Process *c_p,
Process** proc,
ErtsProcLocks need_locks,
int flags,
- Port** port)
+ Port** port,
+ int lock_port)
{
RegProc* rp = NULL;
HashValue hval;
int ix;
HashBucket* b;
-#ifdef ERTS_SMP
ErtsProcLocks current_c_p_locks;
Port *pending_port = NULL;
@@ -338,7 +341,6 @@ erts_whereis_name(Process *c_p,
* - read reg lock
* - current_c_p_locks (either c_p_locks or 0) on c_p
*/
-#endif
hval = REG_HASH(name);
ix = hval % process_reg.size;
@@ -360,7 +362,6 @@ erts_whereis_name(Process *c_p,
if (!rp)
*proc = NULL;
else {
-#ifdef ERTS_SMP
if (!rp->p)
*proc = NULL;
else {
@@ -377,19 +378,12 @@ erts_whereis_name(Process *c_p,
*proc = rp->p;
else {
if (need_locks)
- erts_smp_proc_unlock(rp->p, need_locks);
+ erts_proc_unlock(rp->p, need_locks);
*proc = NULL;
}
- if (*proc && (flags & ERTS_P2P_FLG_SMP_INC_REFC))
- erts_smp_proc_inc_refc(rp->p);
}
-#else
- if (rp->p
- && ((flags & ERTS_P2P_FLG_ALLOW_OTHER_X) || is_proc_alive(rp->p)))
- *proc = rp->p;
- else
- *proc = NULL;
-#endif
+ if (*proc && (flags & ERTS_P2P_FLG_INC_REFC))
+ erts_proc_inc_refc(*proc);
}
}
@@ -397,41 +391,39 @@ erts_whereis_name(Process *c_p,
if (!rp || !rp->pt)
*port = NULL;
else {
-#ifdef ERTS_SMP
- if (pending_port == rp->pt)
- pending_port = NULL;
- else {
- if (pending_port) {
- /* Ahh! Registered port changed while reg lock
- was unlocked... */
- erts_port_release(pending_port);
- pending_port = NULL;
- }
+ if (lock_port) {
+ if (pending_port == rp->pt)
+ pending_port = NULL;
+ else {
+ if (pending_port) {
+ /* Ahh! Registered port changed while reg lock
+ was unlocked... */
+ erts_port_release(pending_port);
+ pending_port = NULL;
+ }
- if (erts_smp_port_trylock(rp->pt) == EBUSY) {
- Eterm id = rp->pt->common.id; /* id read only... */
- /* Unlock all locks, acquire port lock, and restart... */
- if (current_c_p_locks) {
- erts_smp_proc_unlock(c_p, current_c_p_locks);
- current_c_p_locks = 0;
- }
- reg_read_unlock();
- pending_port = erts_id2port(id);
- goto restart;
- }
- }
-#endif
+ if (erts_port_trylock(rp->pt) == EBUSY) {
+ Eterm id = rp->pt->common.id; /* id read only... */
+ /* Unlock all locks, acquire port lock, and restart... */
+ if (current_c_p_locks) {
+ erts_proc_unlock(c_p, current_c_p_locks);
+ current_c_p_locks = 0;
+ }
+ reg_read_unlock();
+ pending_port = erts_id2port(id);
+ goto restart;
+ }
+ }
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(rp->pt));
+ }
*port = rp->pt;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(*port));
}
}
-#ifdef ERTS_SMP
if (c_p && !current_c_p_locks)
- erts_smp_proc_lock(c_p, c_p_locks);
+ erts_proc_lock(c_p, c_p_locks);
if (pending_port)
erts_port_release(pending_port);
-#endif
reg_read_unlock();
}
@@ -444,7 +436,7 @@ erts_whereis_process(Process *c_p,
int flags)
{
Process *proc;
- erts_whereis_name(c_p, c_p_locks, name, &proc, need_locks, flags, NULL);
+ erts_whereis_name(c_p, c_p_locks, name, &proc, need_locks, flags, NULL, 0);
return proc;
}
@@ -463,8 +455,7 @@ int erts_unregister_name(Process *c_p,
int res = 0;
RegProc r, *rp;
Port *port = c_prt;
-#ifdef ERTS_SMP
- ErtsProcLocks current_c_p_locks;
+ ErtsProcLocks current_c_p_locks = 0;
/*
* SMP note: If 'c_prt != NULL' and 'c_prt->reg->name == name',
@@ -480,18 +471,15 @@ int erts_unregister_name(Process *c_p,
restart:
reg_safe_write_lock(c_p, &current_c_p_locks);
-#endif
r.name = name;
if (is_non_value(name)) {
/* Unregister current process name */
ASSERT(c_p);
-#ifdef ERTS_SMP
if (current_c_p_locks != c_p_locks) {
- erts_smp_proc_lock(c_p, c_p_locks);
+ erts_proc_lock(c_p, c_p_locks);
current_c_p_locks = c_p_locks;
}
-#endif
if (c_p->common.u.alive.reg) {
r.name = c_p->common.u.alive.reg->name;
} else {
@@ -504,40 +492,41 @@ int erts_unregister_name(Process *c_p,
if ((rp = (RegProc*) hash_get(&process_reg, (void*) &r)) != NULL) {
if (rp->pt) {
if (port != rp->pt) {
-#ifdef ERTS_SMP
if (port) {
ASSERT(port != c_prt);
erts_port_release(port);
port = NULL;
}
- if (erts_smp_port_trylock(rp->pt) == EBUSY) {
+ if (erts_port_trylock(rp->pt) == EBUSY) {
Eterm id = rp->pt->common.id; /* id read only... */
/* Unlock all locks, acquire port lock, and restart... */
if (current_c_p_locks) {
- erts_smp_proc_unlock(c_p, current_c_p_locks);
+ erts_proc_unlock(c_p, current_c_p_locks);
current_c_p_locks = 0;
}
reg_write_unlock();
port = erts_id2port(id);
goto restart;
}
-#endif
port = rp->pt;
}
ASSERT(rp->pt == port);
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(port));
rp->pt->common.u.alive.reg = NULL;
-
+
if (IS_TRACED_FL(port, F_TRACE_PORTS)) {
+ if (current_c_p_locks) {
+ erts_proc_unlock(c_p, current_c_p_locks);
+ current_c_p_locks = 0;
+ }
trace_port(port, am_unregister, r.name);
}
} else if (rp->p) {
-#ifdef ERTS_SMP
erts_proc_safelock(c_p,
current_c_p_locks,
c_p_locks,
@@ -545,16 +534,14 @@ int erts_unregister_name(Process *c_p,
(c_p == rp->p) ? current_c_p_locks : 0,
ERTS_PROC_LOCK_MAIN);
current_c_p_locks = c_p_locks;
-#endif
rp->p->common.u.alive.reg = NULL;
if (IS_TRACED_FL(rp->p, F_TRACE_PROCS)) {
- trace_proc(c_p, rp->p, am_unregister, r.name);
+ trace_proc(rp->p, (c_p == rp->p) ? c_p_locks : ERTS_PROC_LOCK_MAIN,
+ rp->p, am_unregister, r.name);
}
-#ifdef ERTS_SMP
if (rp->p != c_p) {
- erts_smp_proc_unlock(rp->p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(rp->p, ERTS_PROC_LOCK_MAIN);
}
-#endif
}
hash_erase(&process_reg, (void*) &r);
res = 1;
@@ -568,14 +555,12 @@ int erts_unregister_name(Process *c_p,
erts_port_release(port);
}
if (c_prt) {
- erts_smp_port_lock(c_prt);
+ erts_port_lock(c_prt);
}
}
-#ifdef ERTS_SMP
if (c_p && !current_c_p_locks) {
- erts_smp_proc_lock(c_p, c_p_locks);
+ erts_proc_lock(c_p, c_p_locks);
}
-#endif
return res;
}
@@ -616,14 +601,12 @@ BIF_RETTYPE registered_0(BIF_ALIST_0)
Uint need;
Eterm* hp;
HashBucket **bucket;
-#ifdef ERTS_SMP
ErtsProcLocks proc_locks = ERTS_PROC_LOCK_MAIN;
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(BIF_P);
+ ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(BIF_P);
reg_safe_read_lock(BIF_P, &proc_locks);
if (!proc_locks)
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
-#endif
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
bucket = process_reg.bucket;
diff --git a/erts/emulator/beam/register.h b/erts/emulator/beam/register.h
index 7170463375..27a314ca78 100644
--- a/erts/emulator/beam/register.h
+++ b/erts/emulator/beam/register.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2012. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -43,12 +44,12 @@ typedef struct reg_proc
int process_reg_size(void);
int process_reg_sz(void);
void init_register_table(void);
-void register_info(int, void *);
+void register_info(fmtfn_t, void *);
int erts_register_name(Process *, Eterm, Eterm);
Eterm erts_whereis_name_to_id(Process *, Eterm);
void erts_whereis_name(Process *, ErtsProcLocks,
Eterm, Process**, ErtsProcLocks, int,
- Port**);
+ Port**, int);
Process *erts_whereis_process(Process *,
ErtsProcLocks,
Eterm,
diff --git a/erts/emulator/beam/safe_hash.c b/erts/emulator/beam/safe_hash.c
index 3326e5cc2a..73306030ae 100644
--- a/erts/emulator/beam/safe_hash.c
+++ b/erts/emulator/beam/safe_hash.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -61,7 +62,7 @@ static ERTS_INLINE int align_up_pow2(int val)
*/
static void rehash(SafeHash* h, int grow_limit)
{
- if (erts_smp_atomic_xchg_acqb(&h->is_rehashing, 1) != 0) {
+ if (erts_atomic_xchg_acqb(&h->is_rehashing, 1) != 0) {
return; /* already in progress */
}
if (h->grow_limit == grow_limit) {
@@ -76,7 +77,7 @@ static void rehash(SafeHash* h, int grow_limit)
sys_memzero(new_tab, bytes);
for (i=0; i<SAFE_HASH_LOCK_CNT; i++) { /* stop all traffic */
- erts_smp_mtx_lock(&h->lock_vec[i].mtx);
+ erts_mtx_lock(&h->lock_vec[i].mtx);
}
h->tab = new_tab;
@@ -94,12 +95,12 @@ static void rehash(SafeHash* h, int grow_limit)
}
for (i=0; i<SAFE_HASH_LOCK_CNT; i++) {
- erts_smp_mtx_unlock(&h->lock_vec[i].mtx);
+ erts_mtx_unlock(&h->lock_vec[i].mtx);
}
erts_free(h->type, (void *) old_tab);
}
/*else already done */
- erts_smp_atomic_set_relb(&h->is_rehashing, 0);
+ erts_atomic_set_relb(&h->is_rehashing, 0);
}
@@ -114,7 +115,7 @@ void safe_hash_get_info(SafeHashInfo *hi, SafeHash *h)
int objects = 0;
for (lock_ix=0; lock_ix<SAFE_HASH_LOCK_CNT; lock_ix++) {
- erts_smp_mtx_lock(&h->lock_vec[lock_ix].mtx);
+ erts_mtx_lock(&h->lock_vec[lock_ix].mtx);
size = h->size_mask + 1;
for (i = lock_ix; i < size; i += SAFE_HASH_LOCK_CNT) {
int depth = 0;
@@ -127,7 +128,7 @@ void safe_hash_get_info(SafeHashInfo *hi, SafeHash *h)
if (depth > max_depth)
max_depth = depth;
}
- erts_smp_mtx_unlock(&h->lock_vec[lock_ix].mtx);
+ erts_mtx_unlock(&h->lock_vec[lock_ix].mtx);
}
hi->name = h->name;
@@ -144,9 +145,9 @@ int safe_hash_table_sz(SafeHash *h)
int i, size;
for(i=0; h->name[i]; i++);
i++;
- erts_smp_mtx_lock(&h->lock_vec[0].mtx); /* any lock will do to read size */
+ erts_mtx_lock(&h->lock_vec[0].mtx); /* any lock will do to read size */
size = h->size_mask + 1;
- erts_smp_mtx_unlock(&h->lock_vec[0].mtx);
+ erts_mtx_unlock(&h->lock_vec[0].mtx);
return sizeof(SafeHash) + size*sizeof(SafeHashBucket*) + i;
}
@@ -154,7 +155,8 @@ int safe_hash_table_sz(SafeHash *h)
** Init a pre allocated or static hash structure
** and allocate buckets. NOT SAFE
*/
-SafeHash* safe_hash_init(ErtsAlcType_t type, SafeHash* h, char* name, int size, SafeHashFunctions fun)
+SafeHash* safe_hash_init(ErtsAlcType_t type, SafeHash* h, char* name, erts_lock_flags_t flags,
+ int size, SafeHashFunctions fun)
{
int i, bytes;
@@ -166,10 +168,11 @@ SafeHash* safe_hash_init(ErtsAlcType_t type, SafeHash* h, char* name, int size,
h->name = name;
h->fun = fun;
set_size(h,size);
- erts_smp_atomic_init_nob(&h->is_rehashing, 0);
- erts_smp_atomic_init_nob(&h->nitems, 0);
+ erts_atomic_init_nob(&h->is_rehashing, 0);
+ erts_atomic_init_nob(&h->nitems, 0);
for (i=0; i<SAFE_HASH_LOCK_CNT; i++) {
- erts_smp_mtx_init(&h->lock_vec[i].mtx,"safe_hash");
+ erts_mtx_init(&h->lock_vec[i].mtx, "safe_hash", NIL,
+ flags);
}
return h;
}
@@ -182,8 +185,8 @@ void* safe_hash_get(SafeHash* h, void* tmpl)
{
SafeHashValue hval = h->fun.hash(tmpl);
SafeHashBucket* b;
- erts_smp_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx;
- erts_smp_mtx_lock(lock);
+ erts_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx;
+ erts_mtx_lock(lock);
b = h->tab[hval & h->size_mask];
while(b != NULL) {
@@ -191,7 +194,7 @@ void* safe_hash_get(SafeHash* h, void* tmpl)
break;
b = b->next;
}
- erts_smp_mtx_unlock(lock);
+ erts_mtx_unlock(lock);
return (void*) b;
}
@@ -204,13 +207,13 @@ void* safe_hash_put(SafeHash* h, void* tmpl)
SafeHashValue hval = h->fun.hash(tmpl);
SafeHashBucket* b;
SafeHashBucket** head;
- erts_smp_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx;
- erts_smp_mtx_lock(lock);
+ erts_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx;
+ erts_mtx_lock(lock);
head = &h->tab[hval & h->size_mask];
b = *head;
while(b != NULL) {
if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) {
- erts_smp_mtx_unlock(lock);
+ erts_mtx_unlock(lock);
return b;
}
b = b->next;
@@ -221,8 +224,8 @@ void* safe_hash_put(SafeHash* h, void* tmpl)
b->next = *head;
*head = b;
grow_limit = h->grow_limit;
- erts_smp_mtx_unlock(lock);
- if (erts_smp_atomic_inc_read_nob(&h->nitems) > grow_limit) {
+ erts_mtx_unlock(lock);
+ if (erts_atomic_inc_read_nob(&h->nitems) > grow_limit) {
rehash(h, grow_limit);
}
return (void*) b;
@@ -237,40 +240,58 @@ void* safe_hash_erase(SafeHash* h, void* tmpl)
SafeHashValue hval = h->fun.hash(tmpl);
SafeHashBucket* b;
SafeHashBucket** prevp;
- erts_smp_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx;
- erts_smp_mtx_lock(lock);
+ erts_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx;
+ erts_mtx_lock(lock);
prevp = &h->tab[hval & h->size_mask];
b = *prevp;
while(b != NULL) {
if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) {
*prevp = b->next;
- erts_smp_mtx_unlock(lock);
- erts_smp_atomic_dec_nob(&h->nitems);
+ erts_mtx_unlock(lock);
+ erts_atomic_dec_nob(&h->nitems);
h->fun.free((void*)b);
return tmpl;
}
prevp = &b->next;
b = b->next;
}
- erts_smp_mtx_unlock(lock);
+ erts_mtx_unlock(lock);
return NULL;
}
/*
-** Call 'func(obj,func_arg2)' for all objects in table. NOT SAFE!!!
+** Call 'func(obj,func_arg2,func_arg3)' for all objects in table. NOT SAFE!!!
*/
-void safe_hash_for_each(SafeHash* h, void (*func)(void *, void *), void *func_arg2)
+void safe_hash_for_each(SafeHash* h, void (*func)(void *, void *, void *),
+ void *func_arg2, void *func_arg3)
{
int i;
for (i = 0; i <= h->size_mask; i++) {
SafeHashBucket* b = h->tab[i];
while (b != NULL) {
- (*func)((void *) b, func_arg2);
+ (*func)((void *) b, func_arg2, func_arg3);
b = b->next;
}
}
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_hash_lock_count(SafeHash *h, erts_lock_flags_t flags, int enable) {
+ int i;
+
+ for(i = 0; i < SAFE_HASH_LOCK_CNT; i++) {
+ erts_mtx_t *lock = &h->lock_vec[i].mtx;
+
+ if(enable) {
+ erts_lcnt_install_new_lock_info(&lock->lcnt, "safe_hash", NIL,
+ ERTS_LOCK_TYPE_MUTEX | flags);
+ } else {
+ erts_lcnt_uninstall(&lock->lcnt);
+ }
+ }
+}
+#endif /* ERTS_ENABLE_LOCK_COUNT */
+
#endif /* !ERTS_SYS_CONTINOUS_FD_NUMBERS */
diff --git a/erts/emulator/beam/safe_hash.h b/erts/emulator/beam/safe_hash.h
index c691126ef9..af97b4cb4d 100644
--- a/erts/emulator/beam/safe_hash.h
+++ b/erts/emulator/beam/safe_hash.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -25,13 +26,9 @@
#ifndef __SAFE_HASH_H__
#define __SAFE_HASH_H__
-
-#ifndef __SYS_H__
#include "sys.h"
-#endif
-
#include "erl_alloc.h"
-
+#include "erl_lock_flags.h"
typedef unsigned long SafeHashValue;
@@ -76,11 +73,11 @@ typedef struct
int size_mask; /* (RW) Number of slots - 1 */
SafeHashBucket** tab; /* (RW) Vector of bucket pointers (objects) */
int grow_limit; /* (RW) Threshold for growing table */
- erts_smp_atomic_t nitems; /* (A) Number of items in table */
- erts_smp_atomic_t is_rehashing; /* (A) Table rehashing in progress */
+ erts_atomic_t nitems; /* (A) Number of items in table */
+ erts_atomic_t is_rehashing; /* (A) Table rehashing in progress */
union {
- erts_smp_mtx_t mtx;
+ erts_mtx_t mtx;
byte __cache_line__[64];
}lock_vec[SAFE_HASH_LOCK_CNT];
@@ -89,7 +86,7 @@ typedef struct
/* A: Lockless atomics */
} SafeHash;
-SafeHash* safe_hash_init(ErtsAlcType_t, SafeHash*, char*, int, SafeHashFunctions);
+SafeHash* safe_hash_init(ErtsAlcType_t, SafeHash*, char*, erts_lock_flags_t, int, SafeHashFunctions);
void safe_hash_get_info(SafeHashInfo*, SafeHash*);
int safe_hash_table_sz(SafeHash *);
@@ -98,7 +95,11 @@ void* safe_hash_get(SafeHash*, void*);
void* safe_hash_put(SafeHash*, void*);
void* safe_hash_erase(SafeHash*, void*);
-void safe_hash_for_each(SafeHash*, void (*func)(void *, void *), void *);
+void safe_hash_for_each(SafeHash*, void (*func)(void *, void *, void *), void *, void *);
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_hash_lock_count(SafeHash*, erts_lock_flags_t, int);
+#endif
#endif /* __SAFE_HASH_H__ */
diff --git a/erts/emulator/beam/select_instrs.tab b/erts/emulator/beam/select_instrs.tab
new file mode 100644
index 0000000000..2951949d38
--- /dev/null
+++ b/erts/emulator/beam/select_instrs.tab
@@ -0,0 +1,190 @@
+// -*- c -*-
+//
+// %CopyrightBegin%
+//
+// Copyright Ericsson AB 2017. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// %CopyrightEnd%
+//
+
+i_select_val_bins := select_val_bins.fetch.select;
+
+select_val_bins.head() {
+ Eterm select_val;
+}
+
+select_val_bins.fetch(Src) {
+ select_val = $Src;
+}
+
+select_val_bins.select(Fail, NumElements) {
+ struct Singleton {
+ BeamInstr val;
+ };
+ struct Singleton* low;
+ struct Singleton* high;
+ struct Singleton* mid;
+ int bdiff; /* int not long because the arrays aren't that large */
+
+ low = (struct Singleton *) ($NEXT_INSTRUCTION);
+ high = low + $NumElements;
+
+ /* The pointer subtraction (high-low) below must produce
+ * a signed result, because high could be < low. That
+ * requires the compiler to insert quite a bit of code.
+ *
+ * However, high will be > low so the result will be
+ * positive. We can use that knowledge to optimise the
+ * entire sequence, from the initial comparison to the
+ * computation of mid.
+ *
+ * -- Mikael Pettersson, Acumem AB
+ *
+ * Original loop control code:
+ *
+ * while (low < high) {
+ * mid = low + (high-low) / 2;
+ *
+ */
+ while ((bdiff = (int)((char*)high - (char*)low)) > 0) {
+ unsigned int boffset = ((unsigned int)bdiff >> 1) & ~(sizeof(struct Singleton)-1);
+
+ mid = (struct Singleton*)((char*)low + boffset);
+ if (select_val < mid->val) {
+ high = mid;
+ } else if (select_val > mid->val) {
+ low = mid + 1;
+ } else {
+ Sint32* jump_tab = (Sint32 *) ($NEXT_INSTRUCTION + $NumElements);
+ Sint32 offset = jump_tab[mid - (struct Singleton *)($NEXT_INSTRUCTION)];
+ $JUMP(offset);
+ }
+ }
+ $JUMP($Fail);
+}
+
+i_select_tuple_arity2 := select_val2.src.get_arity.execute;
+i_select_val2 := select_val2.src.execute;
+
+select_val2.head() {
+ Eterm select_val2;
+}
+
+select_val2.src(Src) {
+ select_val2 = $Src;
+}
+
+select_val2.get_arity() {
+ if (ERTS_LIKELY(is_tuple(select_val2))) {
+ select_val2 = *tuple_val(select_val2);
+ } else {
+ select_val2 = NIL;
+ }
+}
+
+select_val2.execute(Fail, T1, T2) {
+ Sint32* jump_tab = (Sint32 *) ($NEXT_INSTRUCTION);
+
+ if (select_val2 == $T1) {
+ $JUMP(jump_tab[0]);
+ } else if (select_val2 == $T2) {
+ $JUMP(jump_tab[1]);
+ } else {
+ $FAIL($Fail);
+ }
+}
+
+i_select_tuple_arity := select_val_lin.fetch.get_arity.execute;
+i_select_val_lins := select_val_lin.fetch.execute;
+
+select_val_lin.head() {
+ Eterm select_val;
+}
+
+select_val_lin.fetch(Src) {
+ select_val = $Src;
+}
+
+select_val_lin.get_arity() {
+ if (ERTS_LIKELY(is_tuple(select_val))) {
+ select_val = *tuple_val(select_val);
+ } else {
+ select_val = NIL;
+ }
+}
+
+select_val_lin.execute(Fail, N) {
+ BeamInstr* vs = $NEXT_INSTRUCTION;
+ int ix = 0;
+
+ for (;;) {
+ if (vs[ix+0] >= select_val) {
+ ix += 0;
+ break;
+ }
+ if (vs[ix+1] >= select_val) {
+ ix += 1;
+ break;
+ }
+ ix += 2;
+ }
+
+ if (vs[ix] == select_val) {
+ Sint32* jump_tab = (Sint32 *) ($NEXT_INSTRUCTION + $N);
+ Eterm offset = jump_tab[ix];
+ $JUMP(offset);
+ } else {
+ $JUMP($Fail);
+ }
+}
+
+JUMP_ON_VAL(Fail, Index, N, Base) {
+ if (is_small($Index)) {
+ $Index = (Uint) (signed_val($Index) - $Base);
+ if ($Index < $N) {
+ Sint32* jump_tab = (Sint32 *) ($NEXT_INSTRUCTION);
+ $JUMP(jump_tab[$Index]);
+ }
+ }
+ $FAIL($Fail);
+}
+
+i_jump_on_val_zero := jump_on_val_zero.fetch.execute;
+
+jump_on_val_zero.head() {
+ Eterm index;
+}
+
+jump_on_val_zero.fetch(Src) {
+ index = $Src;
+}
+
+jump_on_val_zero.execute(Fail, N) {
+ $JUMP_ON_VAL($Fail, index, $N, 0);
+}
+
+i_jump_on_val := jump_on_val.fetch.execute;
+
+jump_on_val.head() {
+ Eterm index;
+}
+
+jump_on_val.fetch(Src) {
+ index = $Src;
+}
+
+jump_on_val.execute(Fail, N, Base) {
+ $JUMP_ON_VAL($Fail, index, $N, $Base);
+}
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index 05f07e57b2..bf7d310568 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -20,27 +21,90 @@
#ifndef __SYS_H__
#define __SYS_H__
+#if !defined(__GNUC__) || defined(__e2k__)
+# define ERTS_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) 0
+#elif !defined(__GNUC_MINOR__)
+# define ERTS_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \
+ ((__GNUC__ << 24) >= (((MAJ) << 24) | ((MIN) << 12) | (PL)))
+#elif !defined(__GNUC_PATCHLEVEL__)
+# define ERTS_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \
+ (((__GNUC__ << 24) | (__GNUC_MINOR__ << 12)) >= (((MAJ) << 24) | ((MIN) << 12) | (PL)))
+#else
+# define ERTS_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \
+ (((__GNUC__ << 24) | (__GNUC_MINOR__ << 12) | __GNUC_PATCHLEVEL__) >= (((MAJ) << 24) | ((MIN) << 12) | (PL)))
+#endif
-#if defined(VALGRIND) && !defined(NO_FPE_SIGNALS)
-# define NO_FPE_SIGNALS
+
+#ifdef ERTS_INLINE
+# ifndef ERTS_CAN_INLINE
+# define ERTS_CAN_INLINE 1
+# endif
+#else
+# if defined(__GNUC__)
+# define ERTS_CAN_INLINE 1
+# define ERTS_INLINE __inline__
+# elif defined(__WIN32__)
+# define ERTS_CAN_INLINE 1
+# define ERTS_INLINE __inline
+# else
+# define ERTS_CAN_INLINE 0
+# define ERTS_INLINE
+# endif
#endif
-#ifdef DISABLE_CHILD_WAITER_THREAD
-#undef ENABLE_CHILD_WAITER_THREAD
+#ifndef ERTS_FORCE_INLINE
+# if ERTS_AT_LEAST_GCC_VSN__(3,1,1)
+# define ERTS_FORCE_INLINE __inline__ __attribute__((__always_inline__))
+# elif defined(__WIN32__)
+# define ERTS_FORCE_INLINE __forceinline
+# endif
+# ifndef ERTS_FORCE_INLINE
+# define ERTS_FORCE_INLINE ERTS_INLINE
+# endif
#endif
-#if defined(ERTS_SMP) && !defined(DISABLE_CHILD_WAITER_THREAD)
-#undef ENABLE_CHILD_WAITER_THREAD
-#define ENABLE_CHILD_WAITER_THREAD 1
+#if defined(DEBUG) || defined(ERTS_ENABLE_LOCK_CHECK)
+# undef ERTS_CAN_INLINE
+# define ERTS_CAN_INLINE 0
+# undef ERTS_INLINE
+# define ERTS_INLINE
+#endif
+
+#if ERTS_CAN_INLINE
+#define ERTS_GLB_FORCE_INLINE static ERTS_FORCE_INLINE
+#define ERTS_GLB_INLINE static ERTS_INLINE
+#else
+#define ERTS_GLB_FORCE_INLINE
+#define ERTS_GLB_INLINE
+#endif
+
+#if ERTS_CAN_INLINE || defined(ERTS_DO_INCL_GLB_INLINE_FUNC_DEF)
+# define ERTS_GLB_INLINE_INCL_FUNC_DEF 1
+#else
+# define ERTS_GLB_INLINE_INCL_FUNC_DEF 0
+#endif
+
+#if defined(VALGRIND) && !defined(NO_FPE_SIGNALS)
+# define NO_FPE_SIGNALS
#endif
#define ERTS_I64_LITERAL(X) X##LL
+#define ErtsInArea(ptr,start,nbytes) \
+ ((UWord)((char*)(ptr) - (char*)(start)) < (nbytes))
+
+#define ErtsContainerStruct(ptr, type, member) \
+ ((type *)((char *)(1 ? (ptr) : &((type *)0)->member) - offsetof(type, member)))
+
+/* Use this variant when the member is an array */
+#define ErtsContainerStruct_(ptr, type, memberv) \
+ ((type *)((char *)(1 ? (ptr) : ((type *)0)->memberv) - offsetof(type, memberv)))
+
+#define ErtsSizeofMember(type, member) sizeof(((type *)0)->member)
+
#if defined (__WIN32__)
# include "erl_win_sys.h"
-#elif defined (__OSE__)
-# include "erl_ose_sys.h"
-#else
+#else
# include "erl_unix_sys.h"
#ifndef UNIX
# define UNIX 1
@@ -66,39 +130,13 @@
*/
#ifndef ERTS_SYS_FD_TYPE
+#define ERTS_SYS_FD_INVALID ((ErtsSysFdType) -1)
typedef int ErtsSysFdType;
#else
-typedef ERTS_SYS_FD_TYPE ErtsSysFdType;
+#ifndef ERTS_SYS_FD_INVALID
+# error missing ERTS_SYS_FD_INVALID
#endif
-
-#ifdef ERTS_INLINE
-# ifndef ERTS_CAN_INLINE
-# define ERTS_CAN_INLINE 1
-# endif
-#else
-# if defined(__GNUC__)
-# define ERTS_CAN_INLINE 1
-# define ERTS_INLINE __inline__
-# elif defined(__WIN32__)
-# define ERTS_CAN_INLINE 1
-# define ERTS_INLINE __inline
-# else
-# define ERTS_CAN_INLINE 0
-# define ERTS_INLINE
-# endif
-#endif
-
-#if !defined(__GNUC__)
-# define ERTS_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) 0
-#elif !defined(__GNUC_MINOR__)
-# define ERTS_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \
- ((__GNUC__ << 24) >= (((MAJ) << 24) | ((MIN) << 12) | (PL)))
-#elif !defined(__GNUC_PATCHLEVEL__)
-# define ERTS_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \
- (((__GNUC__ << 24) | (__GNUC_MINOR__ << 12)) >= (((MAJ) << 24) | ((MIN) << 12) | (PL)))
-#else
-# define ERTS_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \
- (((__GNUC__ << 24) | (__GNUC_MINOR__ << 12) | __GNUC_PATCHLEVEL__) >= (((MAJ) << 24) | ((MIN) << 12) | (PL)))
+typedef ERTS_SYS_FD_TYPE ErtsSysFdType;
#endif
#if ERTS_AT_LEAST_GCC_VSN__(2, 96, 0)
@@ -108,8 +146,20 @@ typedef ERTS_SYS_FD_TYPE ErtsSysFdType;
# define ERTS_LIKELY(BOOL) (BOOL)
# define ERTS_UNLIKELY(BOOL) (BOOL)
#endif
+
+#if ERTS_AT_LEAST_GCC_VSN__(2, 96, 0)
+#if (defined(__APPLE__) && defined(__MACH__)) || defined(__DARWIN__)
+# define ERTS_WRITE_UNLIKELY(X) X __attribute__ ((section ("__DATA,ERTS_LOW_WRITE") ))
+#else
+# define ERTS_WRITE_UNLIKELY(X) X __attribute__ ((section ("ERTS_LOW_WRITE") ))
+#endif
+#else
+# define ERTS_WRITE_UNLIKELY(X) X
+#endif
+
+/* clang may have too low __GNUC__ versions but can handle it */
#ifdef __GNUC__
-# if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 5)
+# if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 5) || defined(__clang__)
# define ERTS_DECLARE_DUMMY(X) X __attribute__ ((unused))
# else
# define ERTS_DECLARE_DUMMY(X) X
@@ -128,24 +178,8 @@ typedef ERTS_SYS_FD_TYPE ErtsSysFdType;
# endif
#endif
-#if defined(DEBUG) || defined(ERTS_ENABLE_LOCK_CHECK)
-# undef ERTS_CAN_INLINE
-# define ERTS_CAN_INLINE 0
-# undef ERTS_INLINE
-# define ERTS_INLINE
-#endif
-
-#if ERTS_CAN_INLINE
-#define ERTS_GLB_INLINE static ERTS_INLINE
-#else
-#define ERTS_GLB_INLINE
-#endif
-
-#if ERTS_CAN_INLINE || defined(ERTS_DO_INCL_GLB_INLINE_FUNC_DEF)
-# define ERTS_GLB_INLINE_INCL_FUNC_DEF 1
-#else
-# define ERTS_GLB_INLINE_INCL_FUNC_DEF 0
-#endif
+#define ERTS_MK_VSN_INT(Major, Minor, Build) \
+ ((((Major) & 0x3ff) << 20) | (((Minor) & 0x3ff) << 10) | ((Build) & 0x3ff))
#ifndef ERTS_EXIT_AFTER_DUMP
# define ERTS_EXIT_AFTER_DUMP exit
@@ -184,6 +218,32 @@ __decl_noreturn void __noreturn erl_assert_error(const char* expr, const char *f
# define ASSERT(e) ((void) 1)
#endif
+/* ERTS_UNDEF can be used to silence false warnings about
+ * "variable may be used uninitialized" while keeping the variable
+ * marked as undefined by valgrind.
+ */
+#ifdef VALGRIND
+# define ERTS_UNDEF(V,I)
+#else
+# define ERTS_UNDEF(V,I) V = I
+#endif
+
+/*
+ * Compile time assert
+ * (the actual compiler error msg can be a bit confusing)
+ */
+#if ERTS_AT_LEAST_GCC_VSN__(3,1,1)
+# define ERTS_CT_ASSERT(e) \
+ do { \
+ enum { compile_time_assert__ = __builtin_choose_expr((e),0,(void)0) }; \
+ } while(0)
+#else
+# define ERTS_CT_ASSERT(e) \
+ do { \
+ enum { compile_time_assert__ = 1/(e) }; \
+ } while (0)
+#endif
+
/*
* Microsoft C/C++: We certainly want to use stdarg.h and prototypes.
* But MSC doesn't define __STDC__, unless we compile with the -Za
@@ -252,65 +312,15 @@ __decl_noreturn void __noreturn erl_assert_error(const char* expr, const char *f
#else
#error Neither 32 nor 64 bit architecture
#endif
-#if defined(ARCH_64) && defined(HALFWORD_HEAP_EMULATOR)
-# define HALFWORD_HEAP 1
-# define HALFWORD_ASSERT 0
-# define ASSERT_HALFWORD(COND) ASSERT(COND)
-# undef ERTS_SIZEOF_TERM
-# define ERTS_SIZEOF_TERM 4
-#else
-# define HALFWORD_HEAP 0
-# define HALFWORD_ASSERT 0
-# define ASSERT_HALFWORD(COND)
-#endif
#if SIZEOF_VOID_P != SIZEOF_SIZE_T
#error sizeof(void*) != sizeof(size_t)
#endif
-#if HALFWORD_HEAP
-
-#if SIZEOF_INT == 4
-typedef unsigned int Eterm;
-typedef unsigned int Uint;
-typedef int Sint;
-#define ERTS_SIZEOF_ETERM SIZEOF_INT
-#define ErtsStrToSint strtol
-#else
-#error Found no appropriate type to use for 'Eterm', 'Uint' and 'Sint'
-#endif
-
-#if SIZEOF_VOID_P == SIZEOF_LONG
-typedef unsigned long UWord;
-typedef long SWord;
-#define SWORD_CONSTANT(Const) Const##L
-#define UWORD_CONSTANT(Const) Const##UL
-#define ERTS_UWORD_MAX ULONG_MAX
-#define ERTS_SWORD_MAX LONG_MAX
-#elif SIZEOF_VOID_P == SIZEOF_INT
-typedef unsigned int UWord;
-typedef int SWord;
-#define SWORD_CONSTANT(Const) Const
-#define UWORD_CONSTANT(Const) Const##U
-#define ERTS_UWORD_MAX UINT_MAX
-#define ERTS_SWORD_MAX INT_MAX
-#elif SIZEOF_VOID_P == SIZEOF_LONG_LONG
-typedef unsigned long long UWord;
-typedef long long SWord;
-#define SWORD_CONSTANT(Const) Const##LL
-#define UWORD_CONSTANT(Const) Const##ULL
-#define ERTS_UWORD_MAX ULLONG_MAX
-#define ERTS_SWORD_MAX LLONG_MAX
-#else
-#error Found no appropriate type to use for 'Eterm', 'Uint' and 'Sint'
-#endif
-
-#else /* !HALFWORD_HEAP */
-
#if SIZEOF_VOID_P == SIZEOF_LONG
-typedef unsigned long Eterm;
-typedef unsigned long Uint;
-typedef long Sint;
+typedef unsigned long Eterm erts_align_attribute(sizeof(long));
+typedef unsigned long Uint erts_align_attribute(sizeof(long));
+typedef long Sint erts_align_attribute(sizeof(long));
#define SWORD_CONSTANT(Const) Const##L
#define UWORD_CONSTANT(Const) Const##UL
#define ERTS_UWORD_MAX ULONG_MAX
@@ -318,9 +328,9 @@ typedef long Sint;
#define ERTS_SIZEOF_ETERM SIZEOF_LONG
#define ErtsStrToSint strtol
#elif SIZEOF_VOID_P == SIZEOF_INT
-typedef unsigned int Eterm;
-typedef unsigned int Uint;
-typedef int Sint;
+typedef unsigned int Eterm erts_align_attribute(sizeof(int));
+typedef unsigned int Uint erts_align_attribute(sizeof(int));
+typedef int Sint erts_align_attribute(sizeof(int));
#define SWORD_CONSTANT(Const) Const
#define UWORD_CONSTANT(Const) Const##U
#define ERTS_UWORD_MAX UINT_MAX
@@ -328,9 +338,9 @@ typedef int Sint;
#define ERTS_SIZEOF_ETERM SIZEOF_INT
#define ErtsStrToSint strtol
#elif SIZEOF_VOID_P == SIZEOF_LONG_LONG
-typedef unsigned long long Eterm;
-typedef unsigned long long Uint;
-typedef long long Sint;
+typedef unsigned long long Eterm erts_align_attribute(sizeof(long long));
+typedef unsigned long long Uint erts_align_attribute(sizeof(long long));
+typedef long long Sint erts_align_attribute(sizeof(long long));
#define SWORD_CONSTANT(Const) Const##LL
#define UWORD_CONSTANT(Const) Const##ULL
#define ERTS_UWORD_MAX ULLONG_MAX
@@ -347,23 +357,52 @@ typedef long long Sint;
typedef Uint UWord;
typedef Sint SWord;
-
-#endif /* HALFWORD_HEAP */
+#define ERTS_UINT_MAX ERTS_UWORD_MAX
typedef UWord BeamInstr;
#ifndef HAVE_INT64
-#if SIZEOF_LONG == 8
-#define HAVE_INT64 1
+# if SIZEOF_LONG == 8
+# define HAVE_INT64 1
typedef unsigned long Uint64;
typedef long Sint64;
-#elif SIZEOF_LONG_LONG == 8
-#define HAVE_INT64 1
+# ifdef ULONG_MAX
+# define ERTS_UINT64_MAX ULONG_MAX
+# endif
+# ifdef LONG_MAX
+# define ERTS_SINT64_MAX LONG_MAX
+# endif
+# ifdef LONG_MIN
+# define ERTS_SINT64_MIN LONG_MIN
+# endif
+# define ErtsStrToSint64 strtol
+# elif SIZEOF_LONG_LONG == 8
+# define HAVE_INT64 1
typedef unsigned long long Uint64;
typedef long long Sint64;
-#else
-#define HAVE_INT64 0
+# ifdef ULLONG_MAX
+# define ERTS_UINT64_MAX ULLONG_MAX
+# endif
+# ifdef LLONG_MAX
+# define ERTS_SINT64_MAX LLONG_MAX
+# endif
+# ifdef LLONG_MIN
+# define ERTS_SINT64_MIN LLONG_MIN
+# endif
+# define ErtsStrToSint64 strtoll
+# else
+# error "No 64-bit integer type found"
+# endif
+#endif
+
+#ifndef ERTS_UINT64_MAX
+# define ERTS_UINT64_MAX (~((Uint64) 0))
+#endif
+#ifndef ERTS_SINT64_MAX
+# define ERTS_SINT64_MAX ((Sint64) ((((Uint64) 1) << 63)-1))
#endif
+#ifndef ERTS_SINT64_MIN
+# define ERTS_SINT64_MIN (-1*(((Sint64) 1) << 63))
#endif
#if SIZEOF_LONG == 4
@@ -422,49 +461,25 @@ typedef union {
#include "erl_lock_check.h"
-/* needed by erl_smp.h */
+/* needed by erl_threads.h */
int erts_send_warning_to_logger_str_nogl(char *);
-#include "erl_smp.h"
+#include "erl_threads.h"
#ifdef ERTS_WANT_BREAK_HANDLING
-# ifdef ERTS_SMP
-extern erts_smp_atomic32_t erts_break_requested;
+extern erts_atomic32_t erts_break_requested;
# define ERTS_BREAK_REQUESTED \
- ((int) erts_smp_atomic32_read_nob(&erts_break_requested))
-# else
-extern volatile int erts_break_requested;
-# define ERTS_BREAK_REQUESTED erts_break_requested
-# endif
+ ((int) erts_atomic32_read_nob(&erts_break_requested))
void erts_do_break_handling(void);
#endif
-#ifdef ERTS_WANT_GOT_SIGUSR1
-# ifndef UNIX
-# define ERTS_GOT_SIGUSR1 0
-# else
-# ifdef ERTS_SMP
-extern erts_smp_atomic32_t erts_got_sigusr1;
-# define ERTS_GOT_SIGUSR1 ((int) erts_smp_atomic32_read_mb(&erts_got_sigusr1))
-# else
-extern volatile int erts_got_sigusr1;
-# define ERTS_GOT_SIGUSR1 erts_got_sigusr1
-# endif
-# endif
-#endif
-#ifdef ERTS_SMP
-extern erts_smp_atomic32_t erts_writing_erl_crash_dump;
+extern erts_atomic32_t erts_writing_erl_crash_dump;
extern erts_tsd_key_t erts_is_crash_dumping_key;
#define ERTS_SOMEONE_IS_CRASH_DUMPING \
- ((int) erts_smp_atomic32_read_mb(&erts_writing_erl_crash_dump))
+ ((int) erts_atomic32_read_mb(&erts_writing_erl_crash_dump))
#define ERTS_IS_CRASH_DUMPING \
((int) (SWord) erts_tsd_get(erts_is_crash_dumping_key))
-#else
-extern volatile int erts_writing_erl_crash_dump;
-#define ERTS_SOMEONE_IS_CRASH_DUMPING erts_writing_erl_crash_dump
-#define ERTS_IS_CRASH_DUMPING erts_writing_erl_crash_dump
-#endif
/* Deal with memcpy() vs bcopy() etc. We want to use the mem*() functions,
but be able to fall back on bcopy() etc on systems that don't have
@@ -499,7 +514,7 @@ extern volatile int erts_writing_erl_crash_dump;
# define NO_ERF
# define NO_ERFC
/* This definition doesn't take NaN into account, but matherr() gets those */
-# define finite(x) (fabs(x) != HUGE_VAL)
+# define isfinite(x) (fabs(x) != HUGE_VAL)
# define USE_MATHERR
# define HAVE_FINITE
#endif
@@ -548,18 +563,19 @@ static unsigned long zero_value = 0, one_value = 1;
# endif /* !__WIN32__ */
#endif /* WANT_NONBLOCKING */
-__decl_noreturn void __noreturn erl_exit(int n, char*, ...);
+__decl_noreturn void __noreturn erts_exit(int n, char*, ...);
-/* Some special erl_exit() codes: */
-#define ERTS_INTR_EXIT INT_MIN /* called from signal handler */
-#define ERTS_ABORT_EXIT (INT_MIN + 1) /* no crash dump; only abort() */
-#define ERTS_DUMP_EXIT (INT_MIN + 2) /* crash dump; then exit() */
+/* Some special erts_exit() codes: */
+#define ERTS_INTR_EXIT -1 /* called from signal handler */
+#define ERTS_ABORT_EXIT -2 /* no crash dump; only abort() */
+#define ERTS_DUMP_EXIT -3 /* crash dump; then exit() */
+#define ERTS_ERROR_EXIT -4 /* crash dump; then abort() */
#define ERTS_INTERNAL_ERROR(What) \
- erl_exit(ERTS_ABORT_EXIT, "%s:%d:%s(): Internal error: %s\n", \
+ erts_exit(ERTS_ABORT_EXIT, "%s:%d:%s(): Internal error: %s\n", \
__FILE__, __LINE__, __func__, What)
-Eterm erts_check_io_info(void *p);
+UWord erts_sys_get_page_size(void);
/* Size of misc memory allocated from system dependent code */
Uint erts_sys_misc_mem_sz(void);
@@ -569,23 +585,21 @@ Uint erts_sys_misc_mem_sz(void);
#include "erl_printf.h"
/* Io constants to erts_print and erts_putc */
-#define ERTS_PRINT_STDERR (2)
-#define ERTS_PRINT_STDOUT (1)
-#define ERTS_PRINT_INVALID (0) /* Don't want to use 0 since CBUF was 0 */
-#define ERTS_PRINT_FILE (-1)
-#define ERTS_PRINT_SBUF (-2)
-#define ERTS_PRINT_SNBUF (-3)
-#define ERTS_PRINT_DSBUF (-4)
-
-#define ERTS_PRINT_MIN ERTS_PRINT_DSBUF
+#define ERTS_PRINT_STDERR ((fmtfn_t)0)
+#define ERTS_PRINT_STDOUT ((fmtfn_t)1)
+#define ERTS_PRINT_FILE ((fmtfn_t)2)
+#define ERTS_PRINT_SBUF ((fmtfn_t)3)
+#define ERTS_PRINT_SNBUF ((fmtfn_t)4)
+#define ERTS_PRINT_DSBUF ((fmtfn_t)5)
+#define ERTS_PRINT_FD ((fmtfn_t)6)
typedef struct {
char *buf;
size_t size;
} erts_print_sn_buf;
-int erts_print(int to, void *arg, char *format, ...); /* in utils.c */
-int erts_putc(int to, void *arg, char); /* in utils.c */
+int erts_print(fmtfn_t to, void *arg, char *format, ...); /* in utils.c */
+int erts_putc(fmtfn_t to, void *arg, char); /* in utils.c */
/* logger stuff is declared here instead of in global.h, so sys files
won't have to include global.h */
@@ -594,6 +608,7 @@ erts_dsprintf_buf_t *erts_create_logger_dsbuf(void);
int erts_send_info_to_logger(Eterm, erts_dsprintf_buf_t *);
int erts_send_warning_to_logger(Eterm, erts_dsprintf_buf_t *);
int erts_send_error_to_logger(Eterm, erts_dsprintf_buf_t *);
+int erts_send_error_term_to_logger(Eterm, erts_dsprintf_buf_t *, Eterm);
int erts_send_info_to_logger_str(Eterm, char *);
int erts_send_warning_to_logger_str(Eterm, char *);
int erts_send_error_to_logger_str(Eterm, char *);
@@ -601,7 +616,7 @@ int erts_send_info_to_logger_nogl(erts_dsprintf_buf_t *);
int erts_send_warning_to_logger_nogl(erts_dsprintf_buf_t *);
int erts_send_error_to_logger_nogl(erts_dsprintf_buf_t *);
int erts_send_info_to_logger_str_nogl(char *);
-/* needed by erl_smp.h (declared above)
+/* needed by erl_threads.h (declared above)
int erts_send_warning_to_logger_str_nogl(char *); */
int erts_send_error_to_logger_str_nogl(char *);
@@ -611,6 +626,15 @@ typedef struct preload {
unsigned char* code; /* Code pointer */
} Preload;
+/*
+ * ErtsTracer is either NIL, 'true' or [Mod | State]
+ *
+ * If set to NIL, it means no tracer.
+ * If set to 'true' it means the current process' tracer.
+ * If set to [Mod | State], there is a tracer.
+ * See erts_tracer_update for more details
+ */
+typedef Eterm ErtsTracer;
/*
* This structure contains options to all built in drivers.
@@ -640,14 +664,37 @@ extern char *erts_default_arg0;
extern char os_type[];
-extern int sys_init_time(void);
+typedef struct {
+ int have_os_monotonic_time;
+ int have_corrected_os_monotonic_time;
+ ErtsMonotonicTime os_monotonic_time_unit;
+ ErtsMonotonicTime sys_clock_resolution;
+ struct {
+ Uint64 resolution;
+ char *func;
+ char *clock_id;
+ int locked_use;
+ int extended;
+ } os_monotonic_time_info;
+ struct {
+ Uint64 resolution;
+ char *func;
+ char *clock_id;
+ int locked_use;
+ } os_system_time_info;
+} ErtsSysInitTimeResult;
+
+#define ERTS_SYS_INIT_TIME_RESULT_INITER \
+ {0, 0, (ErtsMonotonicTime) -1, (ErtsMonotonicTime) 1}
+
+extern void erts_init_sys_time_sup(void);
+extern void sys_init_time(ErtsSysInitTimeResult *);
+extern void erts_late_sys_init_time(void);
extern void erts_deliver_time(void);
extern void erts_time_remaining(SysTimeval *);
-extern int erts_init_time_sup(void);
extern void erts_sys_init_float(void);
extern void erts_thread_init_float(void);
extern void erts_thread_disable_fpe(void);
-
ERTS_GLB_INLINE int erts_block_fpe(void);
ERTS_GLB_INLINE void erts_unblock_fpe(int);
@@ -690,17 +737,12 @@ extern char *erts_sys_ddll_error(int code);
/*
* System interfaces for startup.
*/
-#include "erl_time.h"
-
-void erts_sys_schedule_interrupt(int set);
-#ifdef ERTS_SMP
-void erts_sys_schedule_interrupt_timed(int set, erts_short_time_t msec);
void erts_sys_main_thread(void);
-#endif
extern int erts_sys_prepare_crash_dump(int secs);
extern void erts_sys_pre_init(void);
extern void erl_sys_init(void);
+extern void erl_sys_late_init(void);
extern void erl_sys_args(int *argc, char **argv);
extern void erl_sys_schedule(int);
void sys_tty_reset(int);
@@ -711,10 +753,6 @@ Preload* sys_preloaded(void);
unsigned char* sys_preload_begin(Preload*);
void sys_preload_end(Preload*);
int sys_get_key(int);
-void elapsed_time_both(UWord *ms_user, UWord *ms_sys,
- UWord *ms_user_diff, UWord *ms_sys_diff);
-void wall_clock_elapsed_time_both(UWord *ms_total,
- UWord *ms_diff);
void get_time(int *hour, int *minute, int *second);
void get_date(int *year, int *month, int *day);
void get_localtime(int *year, int *month, int *day,
@@ -733,6 +771,12 @@ int univ_to_local(
int local_to_univ(Sint *year, Sint *month, Sint *day,
Sint *hour, Sint *minute, Sint *second, int isdst);
void get_now(Uint*, Uint*, Uint*);
+struct ErtsSchedulerData_;
+ErtsMonotonicTime erts_get_monotonic_time(struct ErtsSchedulerData_ *);
+ErtsMonotonicTime erts_get_time_offset(void);
+void
+erts_make_timestamp_value(Uint* megasec, Uint* sec, Uint* microsec,
+ ErtsMonotonicTime mtime, ErtsMonotonicTime offset);
void get_sys_now(Uint*, Uint*, Uint*);
void set_break_quit(void (*)(void), void (*)(void));
@@ -742,6 +786,16 @@ void init_getenv_state(GETENV_STATE *);
char * getenv_string(GETENV_STATE *);
void fini_getenv_state(GETENV_STATE *);
+#define HAVE_ERTS_CHECK_IO_DEBUG
+typedef struct {
+ int no_used_fds;
+ int no_driver_select_structs;
+ int no_enif_select_structs;
+} ErtsCheckIoDebugInfo;
+int erts_check_io_debug(ErtsCheckIoDebugInfo *ip);
+
+int erts_sys_is_area_readable(char *start, char *stop);
+
/* xxxP */
#define SYS_DEFAULT_FLOAT_DECIMALS 20
void init_sys_float(void);
@@ -770,6 +824,17 @@ int erts_sys_unsetenv(char *key);
char *erts_read_env(char *key);
void erts_free_read_env(void *value);
+#if defined(ERTS_THR_HAVE_SIG_FUNCS) && \
+ (!defined(ETHR_UNUSABLE_SIGUSRX) || defined(SIGRTMIN))
+extern void sys_thr_resume(erts_tid_t tid);
+extern void sys_thr_suspend(erts_tid_t tid);
+#ifdef SIGRTMIN
+#define ERTS_SYS_SUSPEND_SIGNAL (SIGRTMIN+1)
+#else
+#define ERTS_SYS_SUSPEND_SIGNAL (SIGUSR2)
+#endif /* SIGRTMIN */
+#endif /* HAVE_SIG_FUNCS */
+
/* utils.c */
/* Options to sys_alloc_opt */
@@ -800,10 +865,13 @@ void sys_alloc_stat(SysAllocStat *);
#define ERTS_REFC_DEBUG
#endif
-typedef erts_smp_atomic_t erts_refc_t;
+typedef erts_atomic_t erts_refc_t;
ERTS_GLB_INLINE void erts_refc_init(erts_refc_t *refcp, erts_aint_t val);
ERTS_GLB_INLINE void erts_refc_inc(erts_refc_t *refcp, erts_aint_t min_val);
+ERTS_GLB_INLINE erts_aint_t erts_refc_inc_unless(erts_refc_t *refcp,
+ erts_aint_t unless_val,
+ erts_aint_t min_val);
ERTS_GLB_INLINE erts_aint_t erts_refc_inctest(erts_refc_t *refcp,
erts_aint_t min_val);
ERTS_GLB_INLINE void erts_refc_dec(erts_refc_t *refcp, erts_aint_t min_val);
@@ -819,30 +887,54 @@ ERTS_GLB_INLINE erts_aint_t erts_refc_read(erts_refc_t *refcp,
ERTS_GLB_INLINE void
erts_refc_init(erts_refc_t *refcp, erts_aint_t val)
{
- erts_smp_atomic_init_nob((erts_smp_atomic_t *) refcp, val);
+ erts_atomic_init_nob((erts_atomic_t *) refcp, val);
}
ERTS_GLB_INLINE void
erts_refc_inc(erts_refc_t *refcp, erts_aint_t min_val)
{
#ifdef ERTS_REFC_DEBUG
- erts_aint_t val = erts_smp_atomic_inc_read_nob((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_atomic_inc_read_nob((erts_atomic_t *) refcp);
if (val < min_val)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"erts_refc_inc(): Bad refc found (refc=%ld < %ld)!\n",
val, min_val);
#else
- erts_smp_atomic_inc_nob((erts_smp_atomic_t *) refcp);
+ erts_atomic_inc_nob((erts_atomic_t *) refcp);
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint_t
+erts_refc_inc_unless(erts_refc_t *refcp,
+ erts_aint_t unless_val,
+ erts_aint_t min_val)
+{
+ erts_aint_t val = erts_atomic_read_nob((erts_atomic_t *) refcp);
+ while (1) {
+ erts_aint_t exp, new;
+#ifdef ERTS_REFC_DEBUG
+ if (val < 0)
+ erts_exit(ERTS_ABORT_EXIT,
+ "erts_refc_inc_unless(): Bad refc found (refc=%ld < %ld)!\n",
+ val, min_val);
#endif
+ if (val == unless_val)
+ return val;
+ new = val + 1;
+ exp = val;
+ val = erts_atomic_cmpxchg_nob((erts_atomic_t *) refcp, new, exp);
+ if (val == exp)
+ return new;
+ }
}
ERTS_GLB_INLINE erts_aint_t
erts_refc_inctest(erts_refc_t *refcp, erts_aint_t min_val)
{
- erts_aint_t val = erts_smp_atomic_inc_read_nob((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_atomic_inc_read_nob((erts_atomic_t *) refcp);
#ifdef ERTS_REFC_DEBUG
if (val < min_val)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"erts_refc_inctest(): Bad refc found (refc=%ld < %ld)!\n",
val, min_val);
#endif
@@ -853,23 +945,23 @@ ERTS_GLB_INLINE void
erts_refc_dec(erts_refc_t *refcp, erts_aint_t min_val)
{
#ifdef ERTS_REFC_DEBUG
- erts_aint_t val = erts_smp_atomic_dec_read_nob((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_atomic_dec_read_nob((erts_atomic_t *) refcp);
if (val < min_val)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"erts_refc_dec(): Bad refc found (refc=%ld < %ld)!\n",
val, min_val);
#else
- erts_smp_atomic_dec_nob((erts_smp_atomic_t *) refcp);
+ erts_atomic_dec_nob((erts_atomic_t *) refcp);
#endif
}
ERTS_GLB_INLINE erts_aint_t
erts_refc_dectest(erts_refc_t *refcp, erts_aint_t min_val)
{
- erts_aint_t val = erts_smp_atomic_dec_read_nob((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_atomic_dec_read_nob((erts_atomic_t *) refcp);
#ifdef ERTS_REFC_DEBUG
if (val < min_val)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"erts_refc_dectest(): Bad refc found (refc=%ld < %ld)!\n",
val, min_val);
#endif
@@ -880,23 +972,23 @@ ERTS_GLB_INLINE void
erts_refc_add(erts_refc_t *refcp, erts_aint_t diff, erts_aint_t min_val)
{
#ifdef ERTS_REFC_DEBUG
- erts_aint_t val = erts_smp_atomic_add_read_nob((erts_smp_atomic_t *) refcp, diff);
+ erts_aint_t val = erts_atomic_add_read_nob((erts_atomic_t *) refcp, diff);
if (val < min_val)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"erts_refc_add(%ld): Bad refc found (refc=%ld < %ld)!\n",
diff, val, min_val);
#else
- erts_smp_atomic_add_nob((erts_smp_atomic_t *) refcp, diff);
+ erts_atomic_add_nob((erts_atomic_t *) refcp, diff);
#endif
}
ERTS_GLB_INLINE erts_aint_t
erts_refc_read(erts_refc_t *refcp, erts_aint_t min_val)
{
- erts_aint_t val = erts_smp_atomic_read_nob((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_atomic_read_nob((erts_atomic_t *) refcp);
#ifdef ERTS_REFC_DEBUG
if (val < min_val)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"erts_refc_read(): Bad refc found (refc=%ld < %ld)!\n",
val, min_val);
#endif
@@ -905,10 +997,6 @@ erts_refc_read(erts_refc_t *refcp, erts_aint_t min_val)
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#ifdef ERTS_ENABLE_KERNEL_POLL
-extern int erts_use_kernel_poll;
-#endif
-
#define sys_memcpy(s1,s2,n) memcpy(s1,s2,n)
#define sys_memmove(s1,s2,n) memmove(s1,s2,n)
#define sys_memcmp(s1,s2,n) memcmp(s1,s2,n)
@@ -997,7 +1085,6 @@ extern int erts_use_kernel_poll;
#define put_int8(i, s) do {((unsigned char*)(s))[0] = (i) & 0xff;} while (0)
-
/*
* Use DEBUGF as you would use printf, but use double parentheses:
*
@@ -1015,6 +1102,14 @@ void erl_bin_write(unsigned char *, int, int);
# define DEBUGF(x)
#endif
+#ifndef MAX
+#define MAX(A, B) ((A) > (B) ? (A) : (B))
+#endif
+
+#ifndef MIN
+#define MIN(A, B) ((A) < (B) ? (A) : (B))
+#endif
+
#ifdef __WIN32__
#ifdef ARCH_64
#define ERTS_ALLOC_ALIGN_BYTES 16
@@ -1080,4 +1175,52 @@ int erts_get_printable_characters(void);
void erts_init_sys_common_misc(void);
+ERTS_GLB_INLINE Sint erts_raw_env_7bit_ascii_char_need(int encoding);
+ERTS_GLB_INLINE byte *erts_raw_env_7bit_ascii_char_put(byte c, byte *p,
+ int encoding);
+ERTS_GLB_INLINE int erts_raw_env_char_is_7bit_ascii_char(byte c, byte *p,
+ int encoding);
+ERTS_GLB_INLINE byte *erts_raw_env_next_char(byte *p, int encoding);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE Sint
+erts_raw_env_7bit_ascii_char_need(int encoding)
+{
+ return (encoding == ERL_FILENAME_WIN_WCHAR) ? 2 : 1;
+}
+
+ERTS_GLB_INLINE byte *
+erts_raw_env_7bit_ascii_char_put(byte c,
+ byte *p,
+ int encoding)
+{
+ *(p++) = c;
+ if (encoding == ERL_FILENAME_WIN_WCHAR)
+ *(p++) = 0;
+ return p;
+}
+
+ERTS_GLB_INLINE int
+erts_raw_env_char_is_7bit_ascii_char(byte c,
+ byte *p,
+ int encoding)
+{
+ if (encoding == ERL_FILENAME_WIN_WCHAR)
+ return (p[0] == c) & (p[1] == 0);
+ else
+ return p[0] == c;
+}
+
+ERTS_GLB_INLINE byte *
+erts_raw_env_next_char(byte *p, int encoding)
+{
+ if (encoding == ERL_FILENAME_WIN_WCHAR)
+ return p + 2;
+ else
+ return p + 1;
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
#endif
diff --git a/erts/emulator/beam/time.c b/erts/emulator/beam/time.c
index 2fd8e0cf00..a3069e419a 100644
--- a/erts/emulator/beam/time.c
+++ b/erts/emulator/beam/time.c
@@ -1,72 +1,173 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
-
+
/*
- * TIMING WHEEL
+ * TIMER WHEEL
+ *
+ *
+ * The time scale used for timers is Erlang monotonic time. The
+ * time unit used is ERTS specific clock ticks. A clock tick is
+ * currently defined to 1 millisecond. That is, the resolution of
+ * timers triggered by the runtime system is 1 millisecond.
*
- * Timeouts kept in an wheel. A timeout is measured relative to the
- * current slot (tiw_pos) in the wheel, and inserted at slot
- * (tiw_pos + timeout) % TIW_SIZE. Each timeout also has a count
- * equal to timeout/TIW_SIZE, which is needed since the time axis
- * is wrapped arount the wheel.
+ * When a timer is set, it is determined at what Erlang monotonic
+ * time, in clock ticks, it should be triggered.
*
- * Several slots may be processed in one operation. If the number of
- * slots is greater that the wheel size, the wheel is only traversed
- * once,
+ * The 'pos' field of the wheel corresponds to current time of
+ * the wheel. That is, it corresponds to Erlang monotonic time in
+ * clock tick time unit. The 'pos' field of the wheel is
+ * monotonically increased when erts_bump_timers() is called. All
+ * timers in the wheel that have a time less than or equal to
+ * 'pos' are triggered by the bump operation. The bump operation
+ * may however be spread over multiple calls to erts_bump_timers()
+ * if there are a lots of timers to trigger.
*
- * The following example shows a time axis where there is one timeout
- * at each "tick", and where 1, 2, 3 ... wheel slots are released in
- * one operation. The notation "<x" means "release all items with
- * counts less than x".
+ * Each scheduler thread maintains its own timer wheel. The timer
+ * wheel of a scheduler, however, actually consists of two wheels.
+ * A soon wheel and a later wheel.
+ *
+ *
+ * -- The Soon Wheel --
+ *
+ * The soon wheel contain timers that should be triggered soon.
+ * That is, they are soon to be triggered. Each slot in the soon
+ * wheel is 1 clock tick wide. The number of slots in the soon
+ * wheel is currently 2¹⁴. That is, it contains timers in the
+ * range ('pos', 'pos' + 2¹⁴] which corresponds to a bit more
+ * than 16 seconds.
+ *
+ * When the bump operation is started, 'pos' is moved forward to a
+ * position that corresponds to current Erlang monotonic time. Then
+ * all timers that are in the range (old 'pos', new 'pos'] are
+ * triggered. During a bump operation, the soon wheel may contain
+ * timers in the two, possibly overlapping, ranges (old 'pos',
+ * old 'pos' + 2¹⁴], and (new 'pos', new 'pos' + 2¹⁴]. This may
+ * occur even if the bump operation doesn't yield, due to timeout
+ * callbacks inserting new timers.
+ *
+ *
+ * -- The Later Wheel --
+ *
+ * The later wheel contain timers that are further away from 'pos'
+ * than the width of the soon timer wheel. That is, currently
+ * timers further away from 'pos' than 2¹⁴ clock ticks. The width
+ * of each slot in the later wheel is half the width of the soon
+ * wheel. That is, each slot is currently 2¹³ clock ticks wide
+ * which corresponds to about 8 seconds. If three timers of the
+ * times 'pos' + 17000, 'pos' + 18000, and 'pos' + 19000 are
+ * inserted, they will all end up in the same slot in the later
+ * wheel.
+ *
+ * The number of slots in the later wheel is currently the same as
+ * in the soon wheel, i.e. 2¹⁴. That is, one revolution of the later
+ * wheel currently corresponds to 2¹⁴×2¹³ clock ticks which is
+ * almost 37 ½ hour. Timers even further away than that are put in
+ * the later slot identified by their time modulo the size of the later
+ * wheel. Such timers are however very uncommon. Most timers used
+ * by the runtime system will utilize the high level timer API.
+ * The high level timer implementation will not insert timers
+ * further away then one revolution into the later wheel. It will
+ * instead keep such timers in a tree of very long timers. The
+ * high level timer implementation utilize one timer wheel timer
+ * for the management of this tree of timers. This timer is set to
+ * the closest timeout in the tree. This timer may however be
+ * further away than one revolution in the later wheel.
+ *
+ * The 'later.pos' field identifies next position in the later wheel.
+ * 'later.pos' is always increased by the width of a later wheel slot.
+ * That is, currently 2¹³ clock ticks. When 'pos' is moved (during
+ * a bump operation) closer to 'later.pos' than the width of a later
+ * wheel slot, i.e. currently when 'pos' + 2¹³ ≥ 'later.pos', we
+ * inspect the slot identified by 'later.pos' and then move 'later.pos'
+ * forward. When inspecting the later slot we move all timers in the
+ * slot, that are in the soon wheel range, from the later wheel to
+ * the soon wheel. Timers one or more revolutions of the later wheel
+ * away are kept in the slot.
+ *
+ * During normal operation, timers originally located in the later
+ * wheel will currently be moved into the soon wheel about 8 to
+ * 16 seconds before they should be triggered. During extremely
+ * heavy load, the scheduler might however be heavily delayed, so
+ * the code must be prepared for situations where time for
+ * triggering the timer has passed when we inspect the later wheel
+ * slot, and then trigger the timer immediately. We must also be
+ * prepared to inspect multiple later wheel slots at once due to the
+ * delay.
+ *
+ *
+ * -- Slot Management --
+ *
+ * All timers of a slot are placed in a circular double linked
+ * list. This makes insertion and removal of a timer O(1).
+ *
+ * While bumping timers in a slot, we move the circular list
+ * away from the slot, and refer to it from the 'sentinel'
+ * field. The list will stay there until we are done with it
+ * even if the bump operation should yield. The cancel operation
+ * can remove the timer from this position as well as from the
+ * slot position by just removing it from the circular double
+ * linked list that it is in.
+ *
+ * -- At Once Slot --
+ *
+ * If a timer is set that has a time earlier or equal to 'pos',
+ * it is not inserted into the wheel. It is instead inserted,
+ * into a circular double linked list referred to by the "at
+ * once" slot. When the bump operation is performed these timers
+ * will be triggered at once. The circular list of the slot will
+ * be moved to the 'sentinel' field while bumping these timers
+ * as when bumping an ordinary wheel slot. A yielding bump
+ * operation and cancelation of timers is handled the same way
+ * as if the timer was in a wheel slot.
+ *
+ * -- Searching for Next Timeout --
+ *
+ * In order to limit the amount of work needed in order to find
+ * next timeout, we keep track of total amount of timers in the
+ * wheels, total amount of timers in the later wheel, total amount
+ * of timers in soon wheel, and the total amount of timers in
+ * each range of slots. Each slot range currently contain 512
+ * slots.
+ *
+ * When next timeout is less than the soon wheel width away we
+ * determine the exact timeout. Due to the timer counts of
+ * slot ranges, we currently at most need to search 1024 slots
+ * in the soon wheel. This besides inspecting slot range counts
+ * and two slots in the later wheel which potentially might trigger
+ * timeouts for moving timers from the later wheel to the soon wheel
+ * earlier than timeouts in the soon wheel. We also keep track
+ * of latest known minimum timeout position in each wheel which
+ * makes it possible to avoid scanning from current position
+ * each time.
+ *
+ * When next timeout is further away than the soon wheel width
+ * we settle for the earliest possible timeout in the first
+ * non-empty slot range. The further away the next timeout is, the
+ * more likely it is that the next timeout change before we
+ * actually get there. That is, a change due to another timer is
+ * set to an earlier time and/or the timer is cancelled. It is
+ * therefore in this case no point determining next timeout
+ * exactly. If the state should not change, we will wake up a bit
+ * early and do a recalculation of next timeout and eventually
+ * we will be so close to it that we determine it exactly.
*
- * Size of wheel: 4
- *
- * --|----|----|----|----|----|----|----|----|----|----|----|----|----
- * 0.0 0.1 0.2 0.3 1.0 1.1 1.2 1.3 2.0 2.1 2.2 2.3 3.0
- *
- * 1 [ )
- * <1 0.1 0.2 0.3 0.0 1.1 1.2 1.3 1.0 2.1 2.2 2.3 2.0
- *
- * 2 [ )
- * <1 <1 0.2 0.3 0.0 0.1 1.2 1.3 1.0 1.1 2.2 2.3 2.0
- *
- * 3 [ )
- * <1 <1 <1 0.3 0.0 0.1 0.2 1.3 1.0 1.1 1.2 2.3 2.0
- *
- * 4 [ )
- * <1 <1 <1 <1 0.0 0.1 0.2 0.3 1.0 1.1 1.2 1.3 2.0
- *
- * 5 [ )
- * <2 <1 <1 <1. 0.1 0.2 0.3 0.0 1.1 1.2 1.3 1.0
- *
- * 6 [ )
- * <2 <2 <1 <1. 0.2 0.3 0.0 0.1 1.2 1.3 1.0
- *
- * 7 [ )
- * <2 <2 <2 <1. 0.3 0.0 0.1 0.2 1.3 1.0
- *
- * 8 [ )
- * <2 <2 <2 <2. 0.0 0.1 0.2 0.3 1.0
- *
- * 9 [ )
- * <3 <2 <2 <2. 0.1 0.2 0.3 0.0
- *
*/
#ifdef HAVE_CONFIG_H
@@ -76,6 +177,14 @@
#include "sys.h"
#include "erl_vm.h"
#include "global.h"
+#define ERTS_WANT_TIMER_WHEEL_API
+#include "erl_time.h"
+
+#define ERTS_MAX_CLKTCKS \
+ ERTS_MONOTONIC_TO_CLKTCKS(ERTS_MONOTONIC_TIME_MAX)
+
+#define ERTS_CLKTCKS_WEEK \
+ ERTS_MONOTONIC_TO_CLKTCKS(ERTS_SEC_TO_MONOTONIC(7*60*60*24))
#ifdef ERTS_ENABLE_LOCK_CHECK
#define ASSERT_NO_LOCKED_LOCKS erts_lc_check_exact(NULL, 0)
@@ -83,26 +192,75 @@
#define ASSERT_NO_LOCKED_LOCKS
#endif
-static erts_smp_mtx_t tiw_lock;
+#if 0
+# define ERTS_TW_HARD_DEBUG
+#endif
+#if defined(ERTS_TW_HARD_DEBUG) && !defined(ERTS_TW_DEBUG)
+# define ERTS_TW_DEBUG
+#endif
+#if defined(DEBUG) && !defined(ERTS_TW_DEBUG)
+# define ERTS_TW_DEBUG
+#endif
-/* BEGIN tiw_lock protected variables
-**
-** The individual timer cells in tiw are also protected by the same mutex.
-*/
+#undef ERTS_TW_ASSERT
+#if defined(ERTS_TW_DEBUG)
+# define ERTS_TW_ASSERT(E) ERTS_ASSERT(E)
+#else
+# define ERTS_TW_ASSERT(E) ((void) 1)
+#endif
-#ifdef SMALL_MEMORY
-#define TIW_SIZE 8192
+#ifdef ERTS_TW_DEBUG
+# define ERTS_TWHEEL_BUMP_YIELD_LIMIT 500
#else
-#define TIW_SIZE 65536 /* timing wheel size (should be a power of 2) */
+# define ERTS_TWHEEL_BUMP_YIELD_LIMIT 10000
#endif
-static ErlTimer** tiw; /* the timing wheel, allocated in init_time() */
-static Uint tiw_pos; /* current position in wheel */
-static Uint tiw_nto; /* number of timeouts in wheel */
-static Uint tiw_min;
-static ErlTimer *tiw_min_ptr;
+#define ERTS_TW_COST_SLOT 1
+#define ERTS_TW_COST_SLOT_MOVE 5
+#define ERTS_TW_COST_TIMEOUT 100
+
+/*
+ * Every slot in the soon wheel is a clock tick (as defined
+ * by ERTS) wide. A clock tick is currently 1 milli second.
+ */
-/* END tiw_lock protected variables */
+#define ERTS_TW_SOON_WHEEL_FIRST_SLOT 0
+#define ERTS_TW_SOON_WHEEL_END_SLOT \
+ (ERTS_TW_SOON_WHEEL_FIRST_SLOT + ERTS_TW_SOON_WHEEL_SIZE)
+
+#define ERTS_TW_SOON_WHEEL_MASK (ERTS_TW_SOON_WHEEL_SIZE-1)
+
+/*
+ * Every slot in the later wheel is as wide as half the size
+ * of the soon wheel.
+ */
+
+#define ERTS_TW_LATER_WHEEL_SHIFT (ERTS_TW_SOON_WHEEL_BITS - 1)
+#define ERTS_TW_LATER_WHEEL_SLOT_SIZE \
+ ((ErtsMonotonicTime) (1 << ERTS_TW_LATER_WHEEL_SHIFT))
+#define ERTS_TW_LATER_WHEEL_POS_MASK \
+ (~((ErtsMonotonicTime) (1 << ERTS_TW_LATER_WHEEL_SHIFT)-1))
+
+#define ERTS_TW_LATER_WHEEL_FIRST_SLOT ERTS_TW_SOON_WHEEL_SIZE
+#define ERTS_TW_LATER_WHEEL_END_SLOT \
+ (ERTS_TW_LATER_WHEEL_FIRST_SLOT + ERTS_TW_LATER_WHEEL_SIZE)
+
+#define ERTS_TW_LATER_WHEEL_MASK (ERTS_TW_LATER_WHEEL_SIZE-1)
+
+#define ERTS_TW_SCNT_BITS 9
+#define ERTS_TW_SCNT_SHIFT
+#define ERTS_TW_SCNT_SIZE \
+ ((ERTS_TW_SOON_WHEEL_SIZE + ERTS_TW_LATER_WHEEL_SIZE) \
+ >> ERTS_TW_SCNT_BITS)
+
+#ifdef __GNUC__
+#if ERTS_TW_SOON_WHEEL_BITS < ERTS_TW_SCNT_BITS
+# warning Consider larger soon timer wheel
+#endif
+#if ERTS_TW_SOON_WHEEL_BITS < ERTS_TW_SCNT_BITS
+# warning Consider larger later timer wheel
+#endif
+#endif
/* Actual interval time chosen by sys_init_time() */
@@ -114,392 +272,1260 @@ static int tiw_itime; /* Constant after init */
# define TIW_ITIME tiw_itime
#endif
-erts_smp_atomic32_t do_time; /* set at clock interrupt */
-static ERTS_INLINE erts_short_time_t do_time_read(void)
+const int etp_tw_soon_wheel_size = ERTS_TW_SOON_WHEEL_SIZE;
+const ErtsMonotonicTime etp_tw_soon_wheel_mask = ERTS_TW_SOON_WHEEL_MASK;
+const int etp_tw_soon_wheel_first_slot = ERTS_TW_SOON_WHEEL_FIRST_SLOT;
+
+const int etp_tw_later_wheel_size = ERTS_TW_LATER_WHEEL_SIZE;
+const ErtsMonotonicTime etp_tw_later_wheel_slot_size = ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+const int etp_tw_later_wheel_shift = ERTS_TW_LATER_WHEEL_SHIFT;
+const ErtsMonotonicTime etp_tw_later_wheel_mask = ERTS_TW_LATER_WHEEL_MASK;
+const ErtsMonotonicTime etp_tw_later_wheel_pos_mask = ERTS_TW_LATER_WHEEL_POS_MASK;
+const int etp_tw_later_wheel_first_slot = ERTS_TW_LATER_WHEEL_FIRST_SLOT;
+
+struct ErtsTimerWheel_ {
+ ErtsTWheelTimer *slots[1 /* At Once Slot */
+ + ERTS_TW_SOON_WHEEL_SIZE /* Soon Wheel Slots */
+ + ERTS_TW_LATER_WHEEL_SIZE]; /* Later Wheel Slots */
+ ErtsTWheelTimer **w;
+ Sint scnt[ERTS_TW_SCNT_SIZE];
+ Sint bump_scnt[ERTS_TW_SCNT_SIZE];
+ ErtsMonotonicTime pos;
+ Uint nto;
+ struct {
+ Uint nto;
+ } at_once;
+ struct {
+ ErtsMonotonicTime min_tpos;
+ Uint nto;
+ } soon;
+ struct {
+ ErtsMonotonicTime min_tpos;
+ int min_tpos_slot;
+ ErtsMonotonicTime pos;
+ Uint nto;
+ } later;
+ int yield_slot;
+ int yield_slots_left;
+ ErtsTWheelTimer sentinel;
+ int true_next_timeout_time;
+ ErtsMonotonicTime next_timeout_pos;
+ ErtsMonotonicTime next_timeout_time;
+};
+
+#define ERTS_TW_SLOT_AT_ONCE (-1)
+
+#define ERTS_TW_BUMP_LATER_WHEEL(TIW) \
+ ((tiw)->pos + ERTS_TW_LATER_WHEEL_SLOT_SIZE >= (TIW)->later.pos)
+
+static int bump_later_wheel(ErtsTimerWheel *tiw, int *yield_count_p);
+
+#ifdef ERTS_TW_DEBUG
+#define ERTS_TW_DBG_VERIFY_EMPTY_SOON_SLOTS(TIW, TO_POS) \
+ dbg_verify_empty_soon_slots((TIW), (TO_POS))
+#define ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(TIW, TO_POS) \
+ dbg_verify_empty_later_slots((TIW), (TO_POS))
+void dbg_verify_empty_soon_slots(ErtsTimerWheel *, ErtsMonotonicTime);
+void dbg_verify_empty_later_slots(ErtsTimerWheel *, ErtsMonotonicTime);
+#else
+#define ERTS_TW_DBG_VERIFY_EMPTY_SOON_SLOTS(TIW, TO_POS)
+#define ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(TIW, TO_POS)
+#endif
+
+static ERTS_INLINE int
+scnt_get_ix(int slot)
{
- return erts_smp_atomic32_read_acqb(&do_time);
+ return slot >> ERTS_TW_SCNT_BITS;
}
-static ERTS_INLINE erts_short_time_t do_time_update(void)
+static ERTS_INLINE void
+scnt_inc(Sint *scnt, int slot)
{
- return do_time_read();
+ scnt[slot >> ERTS_TW_SCNT_BITS]++;
}
-static ERTS_INLINE void do_time_init(void)
+#ifdef ERTS_TW_HARD_DEBUG
+
+static ERTS_INLINE void
+scnt_ix_inc(Sint *scnt, int six)
+{
+ scnt[six]++;
+}
+
+#endif
+
+static ERTS_INLINE void
+scnt_dec(Sint *scnt, int slot)
{
- erts_smp_atomic32_init_nob(&do_time, 0);
+ scnt[slot >> ERTS_TW_SCNT_BITS]--;
+ ERTS_TW_ASSERT(scnt[slot >> ERTS_TW_SCNT_BITS] >= 0);
}
-/* get the time (in units of TIW_ITIME) to the next timeout,
- or -1 if there are no timeouts */
+static ERTS_INLINE void
+scnt_ix_dec(Sint *scnt, int six)
+{
+ scnt[six]--;
+ ERTS_TW_ASSERT(scnt[six] >= 0);
+}
-static erts_short_time_t next_time_internal(void) /* PRE: tiw_lock taken by caller */
+static ERTS_INLINE void
+scnt_wheel_next(int *slotp, int *leftp, ErtsMonotonicTime *posp,
+ int *sixp, Sint *scnt, int first_slot,
+ int end_slot, ErtsMonotonicTime slot_sz)
{
- int i, tm, nto;
- Uint32 min;
- ErlTimer* p;
- erts_short_time_t dt;
-
- if (tiw_nto == 0)
- return -1; /* no timeouts in wheel */
-
- if (tiw_min_ptr) {
- min = tiw_min;
- dt = do_time_read();
- return ((min >= dt) ? (min - dt) : 0);
+ int slot = *slotp;
+ int left = *leftp;
+ int ix;
+
+ ERTS_TW_ASSERT(*leftp >= 0);
+
+ left--;
+ slot++;
+ if (slot == end_slot)
+ slot = first_slot;
+ ix = slot >> ERTS_TW_SCNT_BITS;
+
+ while (!scnt[ix] && left > 0) {
+ int diff, old_slot = slot;
+ ix++;
+ slot = (ix << ERTS_TW_SCNT_BITS);
+ diff = slot - old_slot;
+ if (left < diff) {
+ slot = old_slot + left;
+ diff = left;
+ }
+ if (slot < end_slot)
+ left -= diff;
+ else {
+ left -= end_slot - old_slot;
+ slot = first_slot;
+ ix = slot >> ERTS_TW_SCNT_BITS;
+ }
}
-
- /* start going through wheel to find next timeout */
- tm = nto = 0;
- min = (Uint32) -1; /* max Uint32 */
- i = tiw_pos;
- do {
- p = tiw[i];
- while (p != NULL) {
- nto++;
- if (p->count == 0) {
- /* found next timeout */
- dt = do_time_read();
- /* p->count is zero */
- tiw_min_ptr = p;
- tiw_min = tm;
- return ((tm >= dt) ? (tm - dt) : 0);
- } else {
- /* keep shortest time in 'min' */
- if (tm + p->count*TIW_SIZE < min) {
- min = tm + p->count*TIW_SIZE;
- tiw_min_ptr = p;
- tiw_min = min;
- }
- }
- p = p->next;
- }
- /* when we have found all timeouts the shortest time will be in min */
- if (nto == tiw_nto) break;
- tm++;
- i = (i + 1) % TIW_SIZE;
- } while (i != tiw_pos);
- dt = do_time_read();
- if (min <= (Uint32) dt)
- return 0;
- if ((min - (Uint32) dt) > (Uint32) ERTS_SHORT_TIME_T_MAX)
- return ERTS_SHORT_TIME_T_MAX;
- return (erts_short_time_t) (min - (Uint32) dt);
+
+ ERTS_TW_ASSERT(left >= -1);
+
+ if (posp)
+ *posp += slot_sz * ((ErtsMonotonicTime) (*leftp - left));
+ if (sixp)
+ *sixp = slot >> ERTS_TW_SCNT_BITS;
+ *leftp = left;
+ *slotp = slot;
+}
+
+
+static ERTS_INLINE void
+scnt_soon_wheel_next(int *slotp, int *leftp, ErtsMonotonicTime *posp,
+ int *sixp, Sint *scnt)
+{
+ scnt_wheel_next(slotp, leftp, posp, sixp, scnt,
+ ERTS_TW_SOON_WHEEL_FIRST_SLOT,
+ ERTS_TW_SOON_WHEEL_END_SLOT, 1);
}
-static void remove_timer(ErlTimer *p) {
- /* first */
- if (!p->prev) {
- tiw[p->slot] = p->next;
- if(p->next)
- p->next->prev = NULL;
- } else {
- p->prev->next = p->next;
+static ERTS_INLINE void
+scnt_later_wheel_next(int *slotp, int *leftp, ErtsMonotonicTime *posp,
+ int *sixp, Sint *scnt)
+{
+ scnt_wheel_next(slotp, leftp, posp, sixp, scnt,
+ ERTS_TW_LATER_WHEEL_FIRST_SLOT,
+ ERTS_TW_LATER_WHEEL_END_SLOT,
+ ERTS_TW_LATER_WHEEL_SLOT_SIZE);
+}
+
+
+static ERTS_INLINE int
+soon_slot(ErtsMonotonicTime soon_pos)
+{
+ ErtsMonotonicTime slot = soon_pos;
+ slot &= ERTS_TW_SOON_WHEEL_MASK;
+
+ ERTS_TW_ASSERT(ERTS_TW_SOON_WHEEL_FIRST_SLOT <= slot);
+ ERTS_TW_ASSERT(slot < ERTS_TW_SOON_WHEEL_END_SLOT);
+
+ return (int) slot;
+}
+
+static ERTS_INLINE int
+later_slot(ErtsMonotonicTime later_pos)
+{
+ ErtsMonotonicTime slot = later_pos;
+ slot >>= ERTS_TW_LATER_WHEEL_SHIFT;
+ slot &= ERTS_TW_LATER_WHEEL_MASK;
+ slot += ERTS_TW_LATER_WHEEL_FIRST_SLOT;
+
+ ERTS_TW_ASSERT(ERTS_TW_LATER_WHEEL_FIRST_SLOT <= slot);
+ ERTS_TW_ASSERT(slot < ERTS_TW_LATER_WHEEL_END_SLOT);
+
+ return (int) slot;
+}
+
+#ifdef ERTS_TW_HARD_DEBUG
+#define ERTS_HARD_DBG_CHK_WHEELS(TIW, CHK_MIN_TPOS) \
+ hrd_dbg_check_wheels((TIW), (CHK_MIN_TPOS))
+static void hrd_dbg_check_wheels(ErtsTimerWheel *tiw, int check_min_tpos);
+#else
+#define ERTS_HARD_DBG_CHK_WHEELS(TIW, CHK_MIN_TPOS)
+#endif
+
+static ErtsMonotonicTime
+find_next_timeout(ErtsSchedulerData *esdp, ErtsTimerWheel *tiw)
+{
+ int slot, slots;
+ int true_min_timeout = 0;
+ ErtsMonotonicTime min_timeout_pos;
+
+ ERTS_TW_ASSERT(tiw->pos + ERTS_TW_LATER_WHEEL_SLOT_SIZE < tiw->later.pos
+ && tiw->later.pos <= tiw->pos + ERTS_TW_SOON_WHEEL_SIZE);
+
+ ERTS_HARD_DBG_CHK_WHEELS(tiw, 0);
+
+ ERTS_TW_ASSERT(tiw->yield_slot == ERTS_TW_SLOT_INACTIVE);
+
+ if (tiw->nto == 0) { /* no timeouts in wheel */
+ ErtsMonotonicTime curr_time = erts_get_monotonic_time(esdp);
+ tiw->pos = min_timeout_pos = ERTS_MONOTONIC_TO_CLKTCKS(curr_time);
+ tiw->later.pos = min_timeout_pos + ERTS_TW_SOON_WHEEL_SIZE;
+ tiw->later.pos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ min_timeout_pos += ERTS_CLKTCKS_WEEK;
+ goto done;
}
- /* last */
- if (!p->next) {
- if (p->prev)
- p->prev->next = NULL;
- } else {
- p->next->prev = p->prev;
+ ERTS_TW_ASSERT(tiw->soon.nto || tiw->later.nto);
+
+ if (!tiw->soon.nto) {
+ ErtsMonotonicTime tpos, min_tpos;
+
+ /* Search later wheel... */
+
+ min_tpos = tiw->later.min_tpos & ERTS_TW_LATER_WHEEL_POS_MASK;
+
+ if (min_tpos <= tiw->later.pos) {
+ tpos = tiw->later.pos;
+ slots = ERTS_TW_LATER_WHEEL_SIZE;
+ }
+ else {
+ ErtsMonotonicTime tmp;
+ /* Don't inspect slots we know are empty... */
+ tmp = min_tpos - tiw->later.pos;
+ tmp /= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ if (tmp >= ERTS_TW_LATER_WHEEL_SIZE) {
+ /* Timeout more than one revolution ahead... */
+
+ /* Pre-timeout for move from later to soon wheel... */
+ min_timeout_pos = min_tpos - ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ goto done;
+ }
+ tpos = min_tpos;
+ ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(tiw, min_tpos);
+ slots = ERTS_TW_LATER_WHEEL_SIZE - ((int) tmp);
+ }
+
+ slot = later_slot(tpos);
+
+ /*
+ * We never search for an exact timeout in the
+ * later wheel, but instead settle for the first
+ * scnt range used.
+ */
+ if (tiw->w[slot])
+ true_min_timeout = 1;
+ else
+ scnt_later_wheel_next(&slot, &slots, &tpos, NULL, tiw->scnt);
+
+ tiw->later.min_tpos = tpos;
+ tiw->later.min_tpos_slot = slot;
+ ERTS_TW_ASSERT(slot == later_slot(tpos));
+
+ /* Pre-timeout for move from later to soon wheel... */
+ tpos -= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ min_timeout_pos = tpos;
}
+ else {
+ ErtsMonotonicTime tpos;
+ /* Search soon wheel... */
+
+ min_timeout_pos = tiw->pos + ERTS_TW_SOON_WHEEL_SIZE;
+
+ /*
+ * Besides inspecting the soon wheel we
+ * may also have to inspect two slots in the
+ * later wheel which potentially can trigger
+ * timeouts before timeouts in soon wheel...
+ */
+ if (tiw->later.min_tpos > (tiw->later.pos
+ + 2*ERTS_TW_LATER_WHEEL_SLOT_SIZE)) {
+ ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(
+ tiw, 2*ERTS_TW_LATER_WHEEL_SLOT_SIZE);
+ }
+ else {
+ int fslot;
+ tpos = tiw->later.pos;
+ tpos -= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ fslot = later_slot(tiw->later.pos);
+ if (tiw->w[fslot])
+ min_timeout_pos = tpos;
+ else {
+ tpos += ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ if (tpos < min_timeout_pos) {
+ fslot++;
+ if (fslot == ERTS_TW_LATER_WHEEL_END_SLOT)
+ fslot = ERTS_TW_LATER_WHEEL_FIRST_SLOT;
+ if (tiw->w[fslot])
+ min_timeout_pos = tpos;
+ }
+ }
+ }
+
+ if (tiw->soon.min_tpos <= tiw->pos) {
+ tpos = tiw->pos;
+ slots = ERTS_TW_SOON_WHEEL_SIZE;
+ }
+ else {
+ ErtsMonotonicTime tmp;
+ /* Don't inspect slots we know are empty... */
+ tmp = tiw->soon.min_tpos - tiw->pos;
+ ERTS_TW_ASSERT(ERTS_TW_SOON_WHEEL_SIZE > tmp);
+ ERTS_TW_DBG_VERIFY_EMPTY_SOON_SLOTS(tiw, tiw->soon.min_tpos);
+ slots = ERTS_TW_SOON_WHEEL_SIZE - ((int) tmp);
+ tpos = tiw->soon.min_tpos;
+ }
+
+ slot = soon_slot(tpos);
+
+ /* find next non-empty slot */
+ while (tpos < min_timeout_pos) {
+ if (tiw->w[slot]) {
+ ERTS_TW_ASSERT(tiw->w[slot]->timeout_pos == tpos);
+ min_timeout_pos = tpos;
+ break;
+ }
+ scnt_soon_wheel_next(&slot, &slots, &tpos, NULL, tiw->scnt);
+ }
+
+ tiw->soon.min_tpos = min_timeout_pos;
+ true_min_timeout = 1;
+ }
+
+done: {
+ ErtsMonotonicTime min_timeout;
+
+ min_timeout = ERTS_CLKTCKS_TO_MONOTONIC(min_timeout_pos);
+ tiw->next_timeout_pos = min_timeout_pos;
+ tiw->next_timeout_time = min_timeout;
+ tiw->true_next_timeout_time = true_min_timeout;
+
+ ERTS_HARD_DBG_CHK_WHEELS(tiw, 1);
- p->next = NULL;
- p->prev = NULL;
- /* Make sure cancel callback isn't called */
- p->active = 0;
- tiw_nto--;
+ return min_timeout;
+ }
+}
+
+static ERTS_INLINE void
+insert_timer_into_slot(ErtsTimerWheel *tiw, int slot, ErtsTWheelTimer *p)
+{
+ ERTS_TW_ASSERT(ERTS_TW_SLOT_AT_ONCE <= slot
+ && slot < ERTS_TW_LATER_WHEEL_END_SLOT);
+ p->slot = slot;
+ if (!tiw->w[slot]) {
+ tiw->w[slot] = p;
+ p->next = p;
+ p->prev = p;
+ }
+ else {
+ ErtsTWheelTimer *next, *prev;
+ next = tiw->w[slot];
+ prev = next->prev;
+ p->next = next;
+ p->prev = prev;
+ prev->next = p;
+ next->prev = p;
+ }
+ if (slot == ERTS_TW_SLOT_AT_ONCE)
+ tiw->at_once.nto++;
+ else {
+ ErtsMonotonicTime tpos = p->timeout_pos;
+ if (slot < ERTS_TW_SOON_WHEEL_END_SLOT) {
+ ERTS_TW_ASSERT(p->timeout_pos < tiw->pos + ERTS_TW_SOON_WHEEL_SIZE);
+ tiw->soon.nto++;
+ if (tiw->soon.min_tpos > tpos)
+ tiw->soon.min_tpos = tpos;
+ }
+ else {
+ ERTS_TW_ASSERT(p->timeout_pos >= tiw->pos + ERTS_TW_SOON_WHEEL_SIZE);
+ tiw->later.nto++;
+ if (tiw->later.min_tpos > tpos) {
+ tiw->later.min_tpos = tpos;
+ tiw->later.min_tpos_slot = slot;
+ }
+ }
+ scnt_inc(tiw->scnt, slot);
+ }
+}
+
+static ERTS_INLINE void
+remove_timer(ErtsTimerWheel *tiw, ErtsTWheelTimer *p)
+{
+ int slot = p->slot;
+ int empty_slot;
+ ERTS_TW_ASSERT(slot != ERTS_TW_SLOT_INACTIVE);
+
+ /*
+ * Timer is in circular list either referred to
+ * by at once slot, slot in soon wheel, slot
+ * in later wheel, or by sentinel (timers currently
+ * being triggered).
+ */
+ ERTS_TW_ASSERT(ERTS_TW_SLOT_AT_ONCE <= slot
+ && slot < ERTS_TW_LATER_WHEEL_END_SLOT);
+
+ if (p->next == p) {
+ /* Cannot be referred by sentinel, i.e. must be referred by slot... */
+ ERTS_TW_ASSERT(tiw->w[slot] == p);
+ tiw->w[slot] = NULL;
+ empty_slot = 1;
+ }
+ else {
+ if (tiw->w[slot] == p)
+ tiw->w[slot] = p->next;
+ p->prev->next = p->next;
+ p->next->prev = p->prev;
+ empty_slot = 0;
+ }
+ if (slot == ERTS_TW_SLOT_AT_ONCE) {
+ ERTS_TW_ASSERT(tiw->at_once.nto > 0);
+ tiw->at_once.nto--;
+ }
+ else {
+ scnt_dec(tiw->scnt, slot);
+ if (slot < ERTS_TW_SOON_WHEEL_END_SLOT) {
+ if (empty_slot
+ && tiw->true_next_timeout_time
+ && p->timeout_pos == tiw->next_timeout_pos) {
+ tiw->true_next_timeout_time = 0;
+ }
+ if (--tiw->soon.nto == 0)
+ tiw->soon.min_tpos = ERTS_MAX_CLKTCKS;
+ }
+ else {
+ if (empty_slot
+ && tiw->true_next_timeout_time
+ && tiw->later.min_tpos_slot == slot) {
+ ErtsMonotonicTime tpos = tiw->later.min_tpos;
+ tpos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ tpos -= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ if (tpos == tiw->next_timeout_pos)
+ tiw->true_next_timeout_time = 0;
+ }
+ if (--tiw->later.nto == 0) {
+ tiw->later.min_tpos = ERTS_MAX_CLKTCKS;
+ tiw->later.min_tpos_slot = ERTS_TW_LATER_WHEEL_END_SLOT;
+ }
+ }
+ }
+ p->slot = ERTS_TW_SLOT_INACTIVE;
}
-/* Private export to erl_time_sup.c */
-erts_short_time_t erts_next_time(void)
+ErtsMonotonicTime
+erts_check_next_timeout_time(ErtsSchedulerData *esdp)
{
- erts_short_time_t ret;
+ ErtsTimerWheel *tiw = esdp->timer_wheel;
+ ErtsMonotonicTime time;
+ ERTS_MSACC_DECLARE_CACHE_X();
+ ERTS_TW_ASSERT(tiw->next_timeout_time
+ == ERTS_CLKTCKS_TO_MONOTONIC(tiw->next_timeout_pos));
+ if (tiw->true_next_timeout_time)
+ return tiw->next_timeout_time; /* known timeout... */
+ if (tiw->next_timeout_pos > tiw->pos + ERTS_TW_SOON_WHEEL_SIZE)
+ return tiw->next_timeout_time; /* sufficiently later away... */
+ ERTS_MSACC_PUSH_AND_SET_STATE_CACHED_X(ERTS_MSACC_STATE_TIMERS);
+ time = find_next_timeout(esdp, tiw);
+ ERTS_MSACC_POP_STATE_M_X();
+ return time;
+}
- erts_smp_mtx_lock(&tiw_lock);
- (void)do_time_update();
- ret = next_time_internal();
- erts_smp_mtx_unlock(&tiw_lock);
- return ret;
+static ERTS_INLINE void
+timeout_timer(ErtsTWheelTimer *p)
+{
+ ErlTimeoutProc timeout;
+ void *arg;
+ p->slot = ERTS_TW_SLOT_INACTIVE;
+ timeout = p->timeout;
+ arg = p->arg;
+ (*timeout)(arg);
+ ASSERT_NO_LOCKED_LOCKS;
}
-static ERTS_INLINE void bump_timer_internal(erts_short_time_t dt) /* PRE: tiw_lock is write-locked */
+void
+erts_bump_timers(ErtsTimerWheel *tiw, ErtsMonotonicTime curr_time)
{
- Uint keep_pos;
- Uint count;
- ErlTimer *p, **prev, *timeout_head, **timeout_tail;
- Uint dtime = (Uint) dt;
-
- /* no need to bump the position if there aren't any timeouts */
- if (tiw_nto == 0) {
- erts_smp_mtx_unlock(&tiw_lock);
- return;
+ int slot, restarted, yield_count, slots, scnt_ix;
+ ErtsMonotonicTime bump_to;
+ Sint *scnt, *bump_scnt;
+ ERTS_MSACC_PUSH_AND_SET_STATE_M_X(ERTS_MSACC_STATE_TIMERS);
+
+ yield_count = ERTS_TWHEEL_BUMP_YIELD_LIMIT;
+
+ scnt = &tiw->scnt[0];
+ bump_scnt = &tiw->bump_scnt[0];
+
+ /*
+ * In order to be fair we always continue with work
+ * where we left off when restarting after a yield.
+ */
+
+ slot = tiw->yield_slot;
+ restarted = slot != ERTS_TW_SLOT_INACTIVE;
+ if (restarted) {
+ bump_to = tiw->pos;
+ if (slot >= ERTS_TW_LATER_WHEEL_FIRST_SLOT)
+ goto restart_yielded_later_slot;
+ tiw->yield_slot = ERTS_TW_SLOT_INACTIVE;
+ if (slot == ERTS_TW_SLOT_AT_ONCE)
+ goto restart_yielded_at_once_slot;
+ scnt_ix = scnt_get_ix(slot);
+ slots = tiw->yield_slots_left;
+ ASSERT(0 <= slots && slots <= ERTS_TW_SOON_WHEEL_SIZE);
+ goto restart_yielded_soon_slot;
}
- /* if do_time > TIW_SIZE we want to go around just once */
- count = (Uint)(dtime / TIW_SIZE) + 1;
- keep_pos = (tiw_pos + dtime) % TIW_SIZE;
- if (dtime > TIW_SIZE) dtime = TIW_SIZE;
-
- timeout_head = NULL;
- timeout_tail = &timeout_head;
- while (dtime > 0) {
- /* this is to decrease the counters with the right amount */
- /* when dtime >= TIW_SIZE */
- if (tiw_pos == keep_pos) count--;
- prev = &tiw[tiw_pos];
- while ((p = *prev) != NULL) {
- ASSERT( p != p->next);
- if (p->count < count) { /* we have a timeout */
- /* remove min time */
- if (tiw_min_ptr == p) {
- tiw_min_ptr = NULL;
- tiw_min = 0;
- }
+ do {
+
+ restarted = 0;
+ bump_to = ERTS_MONOTONIC_TO_CLKTCKS(curr_time);
+ tiw->true_next_timeout_time = 1;
+ tiw->next_timeout_pos = bump_to;
+ tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(bump_to);
- /* Remove from list */
- remove_timer(p);
- *timeout_tail = p; /* Insert in timeout queue */
- timeout_tail = &p->next;
+ while (1) {
+ ErtsTWheelTimer *p;
+
+ if (tiw->nto == 0) {
+ empty_wheel:
+ ERTS_TW_DBG_VERIFY_EMPTY_SOON_SLOTS(tiw, bump_to);
+ ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(tiw, bump_to);
+ tiw->true_next_timeout_time = 0;
+ tiw->next_timeout_pos = bump_to + ERTS_CLKTCKS_WEEK;
+ tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(tiw->next_timeout_pos);;
+ tiw->pos = bump_to;
+ tiw->later.pos = bump_to + ERTS_TW_SOON_WHEEL_SIZE;
+ tiw->later.pos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ tiw->yield_slot = ERTS_TW_SLOT_INACTIVE;
+ ERTS_MSACC_POP_STATE_M_X();
+ return;
}
- else {
- /* no timeout, just decrease counter */
- p->count -= count;
- prev = &p->next;
+
+ p = tiw->w[ERTS_TW_SLOT_AT_ONCE];
+
+ if (p) {
+
+ if (p->next == p) {
+ ERTS_TW_ASSERT(tiw->sentinel.next == &tiw->sentinel);
+ ERTS_TW_ASSERT(tiw->sentinel.prev == &tiw->sentinel);
+ }
+ else {
+ tiw->sentinel.next = p->next;
+ tiw->sentinel.prev = p->prev;
+ tiw->sentinel.next->prev = &tiw->sentinel;
+ tiw->sentinel.prev->next = &tiw->sentinel;
+ }
+ tiw->w[ERTS_TW_SLOT_AT_ONCE] = NULL;
+
+ while (1) {
+ ERTS_TW_ASSERT(tiw->nto > 0);
+ ERTS_TW_ASSERT(tiw->at_once.nto > 0);
+ tiw->nto--;
+ tiw->at_once.nto--;
+
+ timeout_timer(p);
+
+ yield_count -= ERTS_TW_COST_TIMEOUT;
+
+ restart_yielded_at_once_slot:
+
+ p = tiw->sentinel.next;
+ if (p == &tiw->sentinel) {
+ ERTS_TW_ASSERT(tiw->sentinel.prev == &tiw->sentinel);
+ break;
+ }
+
+ if (yield_count <= 0) {
+ ERTS_TW_ASSERT(tiw->nto > 0);
+ ERTS_TW_ASSERT(tiw->at_once.nto > 0);
+ tiw->yield_slot = ERTS_TW_SLOT_AT_ONCE;
+ ERTS_MSACC_POP_STATE_M_X();
+ return; /* Yield! */
+ }
+
+ tiw->sentinel.next = p->next;
+ p->next->prev = &tiw->sentinel;
+ }
+
+ }
+
+ if (tiw->pos >= bump_to) {
+ ERTS_MSACC_POP_STATE_M_X();
+ break;
+ }
+
+ if (tiw->nto == 0)
+ goto empty_wheel;
+
+ /*
+ * Save slot counts in bump operation local
+ * array.
+ *
+ * The amount of timers to trigger (or move)
+ * will only decrease from now until we have
+ * completed this bump operation (even if we
+ * yield in the middle of it).
+ *
+ * The amount of timers in the wheels may
+ * however increase due to timers being set
+ * by timeout callbacks.
+ */
+ sys_memcpy((void *) bump_scnt, (void *) scnt,
+ sizeof(Sint) * ERTS_TW_SCNT_SIZE);
+
+ if (tiw->soon.min_tpos > tiw->pos) {
+ ErtsMonotonicTime skip_until_pos = tiw->soon.min_tpos;
+
+ /*
+ * No need inspecting slots where we know no timeouts
+ * to trigger should reside.
+ */
+
+ if (skip_until_pos > bump_to)
+ skip_until_pos = bump_to;
+
+ skip_until_pos--;
+
+ if (skip_until_pos > tiw->pos) {
+ ERTS_TW_DBG_VERIFY_EMPTY_SOON_SLOTS(tiw, skip_until_pos);
+ tiw->pos = skip_until_pos;
+ }
+ }
+
+ {
+ ErtsMonotonicTime tmp_slots = bump_to - tiw->pos;
+ tmp_slots = (bump_to - tiw->pos);
+ if (tmp_slots < ERTS_TW_SOON_WHEEL_SIZE)
+ slots = (int) tmp_slots;
+ else
+ slots = ERTS_TW_SOON_WHEEL_SIZE;
+ }
+
+ slot = soon_slot(tiw->pos+1);
+ tiw->pos = bump_to;
+
+ tiw->next_timeout_pos = bump_to;
+ tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(bump_to);
+
+ scnt_ix = scnt_get_ix(slot);
+
+ /* Timeout timers in soon wheel */
+ while (slots > 0) {
+
+ yield_count -= ERTS_TW_COST_SLOT;
+
+ p = tiw->w[slot];
+ if (p) {
+ /* timeout callback need tiw->pos to be up to date */
+ if (p->next == p) {
+ ERTS_TW_ASSERT(tiw->sentinel.next == &tiw->sentinel);
+ ERTS_TW_ASSERT(tiw->sentinel.prev == &tiw->sentinel);
+ }
+ else {
+ tiw->sentinel.next = p->next;
+ tiw->sentinel.prev = p->prev;
+ tiw->sentinel.next->prev = &tiw->sentinel;
+ tiw->sentinel.prev->next = &tiw->sentinel;
+ }
+ tiw->w[slot] = NULL;
+
+ while (1) {
+
+ ERTS_TW_ASSERT(ERTS_TW_SOON_WHEEL_FIRST_SLOT <= p->slot
+ && p->slot < ERTS_TW_SOON_WHEEL_END_SLOT);
+ if (--tiw->soon.nto == 0)
+ tiw->soon.min_tpos = ERTS_MAX_CLKTCKS;
+ scnt_ix_dec(scnt, scnt_ix);
+ if (p->timeout_pos <= bump_to) {
+ timeout_timer(p);
+ tiw->nto--;
+ scnt_ix_dec(bump_scnt, scnt_ix);
+ yield_count -= ERTS_TW_COST_TIMEOUT;
+ }
+ else {
+ /* uncommon case */
+ insert_timer_into_slot(tiw, slot, p);
+ yield_count -= ERTS_TW_COST_SLOT_MOVE;
+ }
+
+ restart_yielded_soon_slot:
+
+ p = tiw->sentinel.next;
+ if (p == &tiw->sentinel) {
+ ERTS_TW_ASSERT(tiw->sentinel.prev == &tiw->sentinel);
+ break;
+ }
+
+ if (yield_count <= 0) {
+ tiw->yield_slot = slot;
+ tiw->yield_slots_left = slots;
+ ERTS_MSACC_POP_STATE_M_X();
+ return; /* Yield! */
+ }
+
+ tiw->sentinel.next = p->next;
+ p->next->prev = &tiw->sentinel;
+ }
+ }
+
+ scnt_soon_wheel_next(&slot, &slots, NULL, &scnt_ix, bump_scnt);
}
+
+ if (ERTS_TW_BUMP_LATER_WHEEL(tiw)) {
+ restart_yielded_later_slot:
+ if (bump_later_wheel(tiw, &yield_count))
+ return; /* Yield! */
+ }
}
- tiw_pos = (tiw_pos + 1) % TIW_SIZE;
- dtime--;
- }
- tiw_pos = keep_pos;
- if (tiw_min_ptr)
- tiw_min -= dt;
-
- erts_smp_mtx_unlock(&tiw_lock);
-
- /* Call timedout timers callbacks */
- while (timeout_head) {
- p = timeout_head;
- timeout_head = p->next;
- /* Here comes hairy use of the timer fields!
- * They are reset without having the lock.
- * It is assumed that no code but this will
- * accesses any field until the ->timeout
- * callback is called.
- */
- p->next = NULL;
- p->prev = NULL;
- p->slot = 0;
- (*p->timeout)(p->arg);
- }
+
+ } while (restarted);
+
+ tiw->true_next_timeout_time = 0;
+ ERTS_TW_ASSERT(tiw->next_timeout_pos == bump_to);
+
+ (void) find_next_timeout(NULL, tiw);
+ ERTS_MSACC_POP_STATE_M_X();
}
-void erts_bump_timer(erts_short_time_t dt) /* dt is value from do_time */
+static int
+bump_later_wheel(ErtsTimerWheel *tiw, int *ycount_p)
{
- erts_smp_mtx_lock(&tiw_lock);
- bump_timer_internal(dt);
+ ErtsMonotonicTime cpos = tiw->pos;
+ ErtsMonotonicTime later_pos = tiw->later.pos;
+ int ycount = *ycount_p;
+ int slots, fslot, scnt_ix;
+ Sint *scnt, *bump_scnt;
+
+ scnt = &tiw->scnt[0];
+ bump_scnt = &tiw->bump_scnt[0];
+
+ ERTS_HARD_DBG_CHK_WHEELS(tiw, 0);
+
+ if (tiw->yield_slot >= ERTS_TW_LATER_WHEEL_FIRST_SLOT) {
+ fslot = tiw->yield_slot;
+ scnt_ix = scnt_get_ix(fslot);
+ slots = tiw->yield_slots_left;
+ ASSERT(0 <= slots && slots <= ERTS_TW_LATER_WHEEL_SIZE);
+ tiw->yield_slot = ERTS_TW_SLOT_INACTIVE;
+ goto restart_yielded_slot;
+ }
+ else {
+ ErtsMonotonicTime end_later_pos, tmp_slots, min_tpos;
+
+ min_tpos = tiw->later.min_tpos & ERTS_TW_LATER_WHEEL_POS_MASK;
+ end_later_pos = cpos + ERTS_TW_SOON_WHEEL_SIZE;
+ end_later_pos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+
+ /* Skip known empty slots... */
+ if (min_tpos > later_pos) {
+ if (min_tpos > end_later_pos) {
+ ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(tiw, end_later_pos);
+ tiw->later.pos = end_later_pos;
+ goto done;
+ }
+ later_pos = min_tpos;
+ ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(tiw, later_pos);
+ }
+
+ tmp_slots = end_later_pos;
+ tmp_slots -= later_pos;
+ tmp_slots /= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ if (tmp_slots < ERTS_TW_LATER_WHEEL_SIZE)
+ slots = (int) tmp_slots;
+ else
+ slots = ERTS_TW_LATER_WHEEL_SIZE;
+
+ fslot = later_slot(later_pos);
+ scnt_ix = scnt_get_ix(fslot);
+
+ tiw->later.pos = end_later_pos;
+ }
+
+ while (slots > 0) {
+ ErtsTWheelTimer *p;
+
+ ycount -= ERTS_TW_COST_SLOT;
+
+ p = tiw->w[fslot];
+
+ if (p) {
+
+ if (p->next == p) {
+ ERTS_TW_ASSERT(tiw->sentinel.next == &tiw->sentinel);
+ ERTS_TW_ASSERT(tiw->sentinel.prev == &tiw->sentinel);
+ }
+ else {
+ tiw->sentinel.next = p->next;
+ tiw->sentinel.prev = p->prev;
+ tiw->sentinel.next->prev = &tiw->sentinel;
+ tiw->sentinel.prev->next = &tiw->sentinel;
+ }
+ tiw->w[fslot] = NULL;
+
+ while (1) {
+ ErtsMonotonicTime tpos = p->timeout_pos;
+
+ ERTS_TW_ASSERT(p->slot == fslot);
+
+ if (--tiw->later.nto == 0) {
+ tiw->later.min_tpos = ERTS_MAX_CLKTCKS;
+ tiw->later.min_tpos_slot = ERTS_TW_LATER_WHEEL_END_SLOT;
+ }
+ scnt_ix_dec(scnt, scnt_ix);
+
+ if (tpos >= tiw->later.pos + ERTS_TW_LATER_WHEEL_SLOT_SIZE) {
+ /* keep in later slot; very uncommon... */
+ insert_timer_into_slot(tiw, fslot, p);
+ ycount -= ERTS_TW_COST_SLOT_MOVE;
+ }
+ else {
+ scnt_ix_dec(bump_scnt, scnt_ix);
+ ERTS_TW_ASSERT(tpos < cpos + ERTS_TW_SOON_WHEEL_SIZE);
+ if (tpos > cpos) {
+ /* move into soon wheel */
+ insert_timer_into_slot(tiw, soon_slot(tpos), p);
+ ycount -= ERTS_TW_COST_SLOT_MOVE;
+ }
+ else {
+ /* trigger at once */
+ timeout_timer(p);
+ tiw->nto--;
+ ycount -= ERTS_TW_COST_TIMEOUT;
+ }
+ }
+
+ restart_yielded_slot:
+
+ p = tiw->sentinel.next;
+ if (p == &tiw->sentinel) {
+ ERTS_TW_ASSERT(tiw->sentinel.prev == &tiw->sentinel);
+ break;
+ }
+
+ if (ycount < 0) {
+ tiw->yield_slot = fslot;
+ tiw->yield_slots_left = slots;
+ *ycount_p = 0;
+ ERTS_HARD_DBG_CHK_WHEELS(tiw, 0);
+ return 1; /* Yield! */
+ }
+
+ tiw->sentinel.next = p->next;
+ p->next->prev = &tiw->sentinel;
+ }
+ }
+
+ scnt_later_wheel_next(&fslot, &slots, NULL, &scnt_ix, bump_scnt);
+ }
+
+done:
+
+ ERTS_HARD_DBG_CHK_WHEELS(tiw, 0);
+
+ *ycount_p = ycount;
+
+ return 0;
}
Uint
erts_timer_wheel_memory_size(void)
{
- return (Uint) TIW_SIZE * sizeof(ErlTimer*);
+ return sizeof(ErtsTimerWheel)*erts_no_schedulers;
+}
+
+ErtsTimerWheel *
+erts_create_timer_wheel(ErtsSchedulerData *esdp)
+{
+ ErtsMonotonicTime mtime;
+ int i;
+ ErtsTimerWheel *tiw;
+
+ /* Some compile time sanity checks... */
+
+ /* Slots... */
+ ERTS_CT_ASSERT(ERTS_TW_SLOT_AT_ONCE == -1);
+ ERTS_CT_ASSERT(ERTS_TW_SLOT_INACTIVE < ERTS_TW_SLOT_AT_ONCE);
+ ERTS_CT_ASSERT(ERTS_TW_SLOT_AT_ONCE + 1 == ERTS_TW_SOON_WHEEL_FIRST_SLOT);
+ ERTS_CT_ASSERT(ERTS_TW_SOON_WHEEL_FIRST_SLOT < ERTS_TW_SOON_WHEEL_END_SLOT);
+ ERTS_CT_ASSERT(ERTS_TW_SOON_WHEEL_END_SLOT == ERTS_TW_LATER_WHEEL_FIRST_SLOT);
+ ERTS_CT_ASSERT(ERTS_TW_LATER_WHEEL_FIRST_SLOT < ERTS_TW_LATER_WHEEL_END_SLOT);
+
+ /* Both wheel sizes should be a powers of 2 */
+ ERTS_CT_ASSERT(ERTS_TW_SOON_WHEEL_SIZE
+ && !(ERTS_TW_SOON_WHEEL_SIZE & (ERTS_TW_SOON_WHEEL_SIZE-1)));
+ ERTS_CT_ASSERT(ERTS_TW_LATER_WHEEL_SIZE
+ && !(ERTS_TW_LATER_WHEEL_SIZE & (ERTS_TW_LATER_WHEEL_SIZE-1)));
+
+ tiw = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_TIMER_WHEEL,
+ sizeof(ErtsTimerWheel));
+ tiw->w = &tiw->slots[1];
+ for(i = ERTS_TW_SLOT_AT_ONCE; i < ERTS_TW_LATER_WHEEL_END_SLOT; i++)
+ tiw->w[i] = NULL;
+
+ for (i = 0; i < ERTS_TW_SCNT_SIZE; i++)
+ tiw->scnt[i] = 0;
+
+ mtime = erts_get_monotonic_time(esdp);
+ tiw->pos = ERTS_MONOTONIC_TO_CLKTCKS(mtime);
+ tiw->nto = 0;
+ tiw->at_once.nto = 0;
+ tiw->soon.min_tpos = ERTS_MAX_CLKTCKS;
+ tiw->soon.nto = 0;
+ tiw->later.min_tpos = ERTS_MAX_CLKTCKS;
+ tiw->later.min_tpos_slot = ERTS_TW_LATER_WHEEL_END_SLOT;
+ tiw->later.pos = tiw->pos + ERTS_TW_SOON_WHEEL_SIZE;
+ tiw->later.pos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ tiw->later.nto = 0;
+ tiw->yield_slot = ERTS_TW_SLOT_INACTIVE;
+ tiw->true_next_timeout_time = 0;
+ tiw->next_timeout_pos = tiw->pos + ERTS_CLKTCKS_WEEK;
+ tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(tiw->next_timeout_pos);
+ tiw->sentinel.next = &tiw->sentinel;
+ tiw->sentinel.prev = &tiw->sentinel;
+ tiw->sentinel.timeout = NULL;
+ tiw->sentinel.arg = NULL;
+ return tiw;
}
+ErtsNextTimeoutRef
+erts_get_next_timeout_reference(ErtsTimerWheel *tiw)
+{
+ return (ErtsNextTimeoutRef) &tiw->next_timeout_time;
+}
+
+
/* this routine links the time cells into a free list at the start
and sets the time queue as empty */
void
-erts_init_time(void)
+erts_init_time(int time_correction, ErtsTimeWarpMode time_warp_mode)
{
- int i, itime;
+ int itime;
/* system dependent init; must be done before do_time_init()
if timer thread is enabled */
- itime = erts_init_time_sup();
+ itime = erts_init_time_sup(time_correction, time_warp_mode);
#ifdef TIW_ITIME_IS_CONSTANT
if (itime != TIW_ITIME) {
- erl_exit(ERTS_ABORT_EXIT, "timer resolution mismatch %d != %d", itime, TIW_ITIME);
+ erts_exit(ERTS_ABORT_EXIT, "timer resolution mismatch %d != %d", itime, TIW_ITIME);
}
#else
tiw_itime = itime;
#endif
-
- erts_smp_mtx_init(&tiw_lock, "timer_wheel");
-
- tiw = (ErlTimer**) erts_alloc(ERTS_ALC_T_TIMER_WHEEL,
- TIW_SIZE * sizeof(ErlTimer*));
- for(i = 0; i < TIW_SIZE; i++)
- tiw[i] = NULL;
- do_time_init();
- tiw_pos = tiw_nto = 0;
- tiw_min_ptr = NULL;
- tiw_min = 0;
}
+void
+erts_twheel_set_timer(ErtsTimerWheel *tiw,
+ ErtsTWheelTimer *p, ErlTimeoutProc timeout,
+ void *arg, ErtsMonotonicTime timeout_pos)
+{
+ int slot;
+ ERTS_MSACC_PUSH_AND_SET_STATE_M_X(ERTS_MSACC_STATE_TIMERS);
+ p->timeout = timeout;
+ p->arg = arg;
+ ERTS_TW_ASSERT(p->slot == ERTS_TW_SLOT_INACTIVE);
-/*
-** Insert a process into the time queue, with a timeout 't'
-*/
-static void
-insert_timer(ErlTimer* p, Uint t)
-{
- Uint tm;
- Uint64 ticks;
-
- /* The current slot (tiw_pos) in timing wheel is the next slot to be
- * be processed. Hence no extra time tick is needed.
- *
- * (x + y - 1)/y is precisely the "number of bins" formula.
- */
- ticks = (t + (TIW_ITIME - 1)) / TIW_ITIME;
+ tiw->nto++;
- /*
- * Ticks must be a Uint64, or the addition may overflow here,
- * resulting in an incorrect value for p->count below.
- */
- ticks += do_time_update(); /* Add backlog of unprocessed time */
-
/* calculate slot */
- tm = (ticks + tiw_pos) % TIW_SIZE;
- p->slot = (Uint) tm;
- p->count = (Uint) (ticks / TIW_SIZE);
-
- /* insert at head of list at slot */
- p->next = tiw[tm];
- p->prev = NULL;
- if (p->next != NULL)
- p->next->prev = p;
- tiw[tm] = p;
-
-
- /* insert min time */
- if ((tiw_nto == 0) || ((tiw_min_ptr != NULL) && (ticks < tiw_min))) {
- tiw_min = ticks;
- tiw_min_ptr = p;
+ if (timeout_pos <= tiw->pos) {
+ /* at once */
+ p->timeout_pos = timeout_pos = tiw->pos;
+ slot = ERTS_TW_SLOT_AT_ONCE;
}
- if ((tiw_min_ptr == p) && (ticks > tiw_min)) {
- /* some other timer might be 'min' now */
- tiw_min = 0;
- tiw_min_ptr = NULL;
+ else if (timeout_pos < tiw->pos + ERTS_TW_SOON_WHEEL_SIZE) {
+ /* soon wheel */
+ p->timeout_pos = timeout_pos;
+ slot = soon_slot(timeout_pos);
+ if (tiw->soon.min_tpos > timeout_pos)
+ tiw->soon.min_tpos = timeout_pos;
+ }
+ else {
+ /* later wheel */
+ p->timeout_pos = timeout_pos;
+ slot = later_slot(timeout_pos);
+
+ /*
+ * Next timeout due to this timeout
+ * should be in good time before the
+ * actual timeout (one later wheel slot
+ * size). This, in order to move it
+ * from the later wheel to the soon
+ * wheel.
+ */
+ timeout_pos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ timeout_pos -= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
}
- tiw_nto++;
+ insert_timer_into_slot(tiw, slot, p);
+
+ if (timeout_pos <= tiw->next_timeout_pos) {
+ tiw->true_next_timeout_time = 1;
+ if (timeout_pos < tiw->next_timeout_pos) {
+ tiw->next_timeout_pos = timeout_pos;
+ tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(timeout_pos);
+ }
+ }
+ ERTS_MSACC_POP_STATE_M_X();
}
void
-erts_set_timer(ErlTimer* p, ErlTimeoutProc timeout, ErlCancelProc cancel,
- void* arg, Uint t)
+erts_twheel_cancel_timer(ErtsTimerWheel *tiw, ErtsTWheelTimer *p)
{
-
- erts_deliver_time();
- erts_smp_mtx_lock(&tiw_lock);
- if (p->active) { /* XXX assert ? */
- erts_smp_mtx_unlock(&tiw_lock);
- return;
+ if (p->slot != ERTS_TW_SLOT_INACTIVE) {
+ ERTS_MSACC_PUSH_AND_SET_STATE_M_X(ERTS_MSACC_STATE_TIMERS);
+ remove_timer(tiw, p);
+ tiw->nto--;
+ ERTS_MSACC_POP_STATE_M_X();
}
- p->timeout = timeout;
- p->cancel = cancel;
- p->arg = arg;
- p->active = 1;
- insert_timer(p, t);
- erts_smp_mtx_unlock(&tiw_lock);
-#if defined(ERTS_SMP)
- if (t <= (Uint) ERTS_SHORT_TIME_T_MAX)
- erts_sys_schedule_interrupt_timed(1, (erts_short_time_t) t);
-#endif
}
void
-erts_cancel_timer(ErlTimer* p)
+erts_twheel_debug_foreach(ErtsTimerWheel *tiw,
+ void (*tclbk)(void *),
+ void (*func)(void *,
+ ErtsMonotonicTime,
+ void *),
+ void *arg)
{
- erts_smp_mtx_lock(&tiw_lock);
- if (!p->active) { /* allow repeated cancel (drivers) */
- erts_smp_mtx_unlock(&tiw_lock);
- return;
- }
+ ErtsTWheelTimer *tmr;
+ int ix;
- /* is it the 'min' timer, remove min */
- if (p == tiw_min_ptr) {
- tiw_min_ptr = NULL;
- tiw_min = 0;
+ tmr = tiw->sentinel.next;
+ while (tmr != &tiw->sentinel) {
+ if (tmr->timeout == tclbk)
+ (*func)(arg, tmr->timeout_pos, tmr->arg);
+ tmr = tmr->next;
}
- remove_timer(p);
- p->slot = p->count = 0;
-
- if (p->cancel != NULL) {
- erts_smp_mtx_unlock(&tiw_lock);
- (*p->cancel)(p->arg);
- return;
+ for (ix = ERTS_TW_SLOT_AT_ONCE; ix < ERTS_TW_LATER_WHEEL_END_SLOT; ix++) {
+ tmr = tiw->w[ix];
+ if (tmr) {
+ do {
+ if (tmr->timeout == tclbk)
+ (*func)(arg, tmr->timeout_pos, tmr->arg);
+ tmr = tmr->next;
+ } while (tmr != tiw->w[ix]);
+ }
}
- erts_smp_mtx_unlock(&tiw_lock);
}
-/*
- Returns the amount of time left in ms until the timer 'p' is triggered.
- 0 is returned if 'p' isn't active.
- 0 is returned also if the timer is overdue (i.e., would have triggered
- immediately if it hadn't been cancelled).
-*/
-Uint
-erts_time_left(ErlTimer *p)
+#ifdef ERTS_TW_DEBUG
+
+void
+dbg_verify_empty_soon_slots(ErtsTimerWheel *tiw, ErtsMonotonicTime to_pos)
{
- Uint left;
- erts_short_time_t dt;
+ int ix;
+ ErtsMonotonicTime tmp;
- erts_smp_mtx_lock(&tiw_lock);
+ ix = soon_slot(tiw->pos);
+ tmp = to_pos;
+ if (tmp > tiw->pos) {
+ int slots;
+ tmp -= tiw->pos;
+ ERTS_TW_ASSERT(tmp > 0);
+ if (tmp < (ErtsMonotonicTime) ERTS_TW_SOON_WHEEL_SIZE)
+ slots = (int) tmp;
+ else
+ slots = ERTS_TW_SOON_WHEEL_SIZE;
- if (!p->active) {
- erts_smp_mtx_unlock(&tiw_lock);
- return 0;
- }
+ while (slots > 0) {
+ ERTS_TW_ASSERT(!tiw->w[ix]);
+ ix++;
+ if (ix == ERTS_TW_SOON_WHEEL_END_SLOT)
+ ix = ERTS_TW_SOON_WHEEL_FIRST_SLOT;
+ slots--;
+ }
+ }
+}
+
+void
+dbg_verify_empty_later_slots(ErtsTimerWheel *tiw, ErtsMonotonicTime to_pos)
+{
+ int ix;
+ ErtsMonotonicTime tmp;
+
+ ix = later_slot(tiw->later.pos);
+ tmp = to_pos;
+ tmp &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ if (tmp > tiw->later.pos) {
+ ErtsMonotonicTime pos_min;
+ int slots;
+ tmp -= tiw->later.pos;
+ tmp /= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ ERTS_TW_ASSERT(tmp > 0);
- if (p->slot < tiw_pos)
- left = (p->count + 1) * TIW_SIZE + p->slot - tiw_pos;
- else
- left = p->count * TIW_SIZE + p->slot - tiw_pos;
- dt = do_time_read();
- if (left < dt)
- left = 0;
- else
- left -= dt;
+ pos_min = tiw->later.pos;
- erts_smp_mtx_unlock(&tiw_lock);
+ if (tmp < (ErtsMonotonicTime) ERTS_TW_LATER_WHEEL_SIZE)
+ slots = (int) tmp;
+ else {
+ pos_min += ((tmp / ERTS_TW_LATER_WHEEL_SIZE)
+ * ERTS_TW_LATER_WHEEL_SLOT_SIZE);
+ slots = ERTS_TW_LATER_WHEEL_SIZE;
+ }
- return (Uint) left * TIW_ITIME;
+ while (slots > 0) {
+ ErtsTWheelTimer *tmr = tiw->w[ix];
+ pos_min += ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ if (tmr) {
+ ErtsTWheelTimer *end = tmr;
+ do {
+ ERTS_TW_ASSERT(tmr->timeout_pos >= pos_min);
+ tmr = tmr->next;
+ } while (tmr != end);
+ }
+ ix++;
+ if (ix == ERTS_TW_LATER_WHEEL_END_SLOT)
+ ix = ERTS_TW_LATER_WHEEL_FIRST_SLOT;
+ slots--;
+ }
+ }
}
-#ifdef DEBUG
-void erts_p_slpq(void)
+#endif /* ERTS_TW_DEBUG */
+
+#ifdef ERTS_TW_HARD_DEBUG
+
+static void
+hrd_dbg_check_wheels(ErtsTimerWheel *tiw, int check_min_tpos)
{
- int i;
- ErlTimer* p;
-
- erts_smp_mtx_lock(&tiw_lock);
-
- /* print the whole wheel, starting at the current position */
- erts_printf("\ntiw_pos = %d tiw_nto %d\n", tiw_pos, tiw_nto);
- i = tiw_pos;
- if (tiw[i] != NULL) {
- erts_printf("%d:\n", i);
- for(p = tiw[i]; p != NULL; p = p->next) {
- erts_printf(" (count %d, slot %d)\n",
- p->count, p->slot);
- }
+ int ix, six, soon_tmo, later_tmo, at_once_tmo,
+ scnt_slot, scnt_slots, scnt_six;
+ ErtsMonotonicTime min_tpos;
+ Sint scnt[ERTS_TW_SCNT_SIZE];
+ ErtsTWheelTimer *p;
+
+ for (six = 0; six < ERTS_TW_SCNT_SIZE; six++)
+ scnt[six] = 0;
+
+ min_tpos = ERTS_MONOTONIC_TO_CLKTCKS(tiw->next_timeout_time);
+
+ at_once_tmo = 0;
+ p = tiw->w[ERTS_TW_SLOT_AT_ONCE];
+ if (p) {
+ ErtsTWheelTimer *first = p;
+ do {
+ at_once_tmo++;
+ ERTS_TW_ASSERT(p->slot == ERTS_TW_SLOT_AT_ONCE);
+ ERTS_TW_ASSERT(p->timeout_pos <= tiw->pos);
+ ERTS_TW_ASSERT(!check_min_tpos || tiw->pos >= min_tpos);
+ ERTS_TW_ASSERT(p->next->prev == p);
+ p = p->next;
+ } while (p != first);
}
- for(i = (i+1)%TIW_SIZE; i != tiw_pos; i = (i+1)%TIW_SIZE) {
- if (tiw[i] != NULL) {
- erts_printf("%d:\n", i);
- for(p = tiw[i]; p != NULL; p = p->next) {
- erts_printf(" (count %d, slot %d)\n",
- p->count, p->slot);
- }
- }
+
+ soon_tmo = 0;
+ scnt_slot = ERTS_TW_SOON_WHEEL_END_SLOT-1;
+ scnt_slots = ERTS_TW_SOON_WHEEL_SIZE;
+ scnt_six = 0;
+ scnt_soon_wheel_next(&scnt_slot, &scnt_slots,
+ NULL, &scnt_six, tiw->scnt);
+ for (ix = ERTS_TW_SOON_WHEEL_FIRST_SLOT;
+ ix < ERTS_TW_SOON_WHEEL_END_SLOT;
+ ix++) {
+ p = tiw->w[ix];
+ six = scnt_get_ix(ix);
+ ERTS_TW_ASSERT(!p || six == scnt_six);
+ if (p) {
+ ErtsTWheelTimer *first = p;
+ do {
+ ErtsMonotonicTime tpos = p->timeout_pos;
+ soon_tmo++;
+ scnt_ix_inc(scnt, six);
+ ERTS_TW_ASSERT(p->slot == ix);
+ ERTS_TW_ASSERT(ix == soon_slot(tpos));
+ ERTS_TW_ASSERT(p->timeout_pos < tiw->pos + ERTS_TW_SOON_WHEEL_SIZE);
+ ERTS_TW_ASSERT(!check_min_tpos || tpos >= min_tpos);
+ ERTS_TW_ASSERT(p->next->prev == p);
+ p = p->next;
+ } while (p != first);
+ }
+ if (ix == scnt_slot)
+ scnt_soon_wheel_next(&scnt_slot, &scnt_slots,
+ NULL, &scnt_six, tiw->scnt);
}
- erts_smp_mtx_unlock(&tiw_lock);
+ later_tmo = 0;
+ scnt_slot = ERTS_TW_SOON_WHEEL_END_SLOT-1;
+ scnt_slots = ERTS_TW_SOON_WHEEL_SIZE;
+ scnt_six = 0;
+ scnt_later_wheel_next(&scnt_slot, &scnt_slots,
+ NULL, &scnt_six, tiw->scnt);
+ for (ix = ERTS_TW_LATER_WHEEL_FIRST_SLOT;
+ ix < ERTS_TW_LATER_WHEEL_END_SLOT;
+ ix++) {
+ p = tiw->w[ix];
+ six = scnt_get_ix(ix);
+ ERTS_TW_ASSERT(!p || six == scnt_six);
+ if (p) {
+ ErtsTWheelTimer *first = p;
+ six = scnt_get_ix(ix);
+ do {
+ ErtsMonotonicTime tpos = p->timeout_pos;
+ later_tmo++;
+ scnt_ix_inc(scnt, six);
+ ERTS_TW_ASSERT(p->slot == ix);
+ ERTS_TW_ASSERT(later_slot(tpos) == ix);
+ tpos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ tpos -= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ ERTS_TW_ASSERT(!check_min_tpos || tpos >= min_tpos);
+ ERTS_TW_ASSERT(p->next->prev == p);
+ p = p->next;
+ } while (p != first);
+ }
+ if (ix == scnt_slot)
+ scnt_later_wheel_next(&scnt_slot, &scnt_slots,
+ NULL, &scnt_six, tiw->scnt);
+ }
+
+ if (tiw->yield_slot != ERTS_TW_SLOT_INACTIVE) {
+ p = tiw->sentinel.next;
+ ix = tiw->yield_slot;
+ while (p != &tiw->sentinel) {
+ ErtsMonotonicTime tpos = p->timeout_pos;
+ ERTS_TW_ASSERT(ix == p->slot);
+ if (ix == ERTS_TW_SLOT_AT_ONCE)
+ at_once_tmo++;
+ else {
+ scnt_inc(scnt, ix);
+ if (ix >= ERTS_TW_LATER_WHEEL_FIRST_SLOT) {
+ later_tmo++;
+ ERTS_TW_ASSERT(ix == later_slot(tpos));
+ }
+ else {
+ soon_tmo++;
+ ERTS_TW_ASSERT(ix == (tpos & ERTS_TW_SOON_WHEEL_MASK));
+ ERTS_TW_ASSERT(tpos < tiw->pos + ERTS_TW_SOON_WHEEL_SIZE);
+ }
+ p = p->next;
+ }
+ }
+ }
+
+
+ ERTS_TW_ASSERT(tiw->at_once.nto == at_once_tmo);
+ ERTS_TW_ASSERT(tiw->soon.nto == soon_tmo);
+ ERTS_TW_ASSERT(tiw->later.nto == later_tmo);
+ ERTS_TW_ASSERT(tiw->nto == soon_tmo + later_tmo + at_once_tmo);
+
+ for (six = 0; six < ERTS_TW_SCNT_SIZE; six++)
+ ERTS_TW_ASSERT(scnt[six] == tiw->scnt[six]);
}
-#endif /* DEBUG */
+
+#endif /* ERTS_TW_HARD_DEBUG */
diff --git a/erts/emulator/beam/trace_instrs.tab b/erts/emulator/beam/trace_instrs.tab
new file mode 100644
index 0000000000..3eee81c053
--- /dev/null
+++ b/erts/emulator/beam/trace_instrs.tab
@@ -0,0 +1,168 @@
+// -*- c -*-
+//
+// %CopyrightBegin%
+//
+// Copyright Ericsson AB 2017. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// %CopyrightEnd%
+//
+
+return_trace() {
+ ErtsCodeMFA* mfa = (ErtsCodeMFA *)(E[0]);
+
+ SWAPOUT; /* Needed for shared heap */
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
+ erts_trace_return(c_p, mfa, r(0), ERTS_TRACER_FROM_ETERM(E+1)/* tracer */);
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
+ SWAPIN;
+ c_p->cp = NULL;
+ SET_I((BeamInstr *) cp_val(E[2]));
+ E += 3;
+ Goto(*I);
+ //| -no_next
+}
+
+i_generic_breakpoint() {
+ BeamInstr real_I;
+ HEAVY_SWAPOUT;
+ real_I = erts_generic_breakpoint(c_p, erts_code_to_codeinfo(I), reg);
+ HEAVY_SWAPIN;
+ ASSERT(VALID_INSTR(real_I));
+ Goto(real_I);
+ //| -no_next
+}
+
+i_return_time_trace() {
+ BeamInstr *pc = (BeamInstr *) (UWord) E[0];
+ SWAPOUT;
+ erts_trace_time_return(c_p, erts_code_to_codeinfo(pc));
+ SWAPIN;
+ c_p->cp = NULL;
+ SET_I((BeamInstr *) cp_val(E[1]));
+ E += 2;
+ Goto(*I);
+ //| -no_next
+}
+
+i_return_to_trace() {
+ if (IS_TRACED_FL(c_p, F_TRACE_RETURN_TO)) {
+ Uint *cpp = (Uint*) E;
+ for(;;) {
+ ASSERT(is_CP(*cpp));
+ if (IsOpCode(*cp_val(*cpp), return_trace)) {
+ do
+ ++cpp;
+ while (is_not_CP(*cpp));
+ cpp += 2;
+ } else if (IsOpCode(*cp_val(*cpp), i_return_to_trace)) {
+ do
+ ++cpp;
+ while (is_not_CP(*cpp));
+ } else {
+ break;
+ }
+ }
+ SWAPOUT; /* Needed for shared heap */
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
+ erts_trace_return_to(c_p, cp_val(*cpp));
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
+ SWAPIN;
+ }
+ c_p->cp = NULL;
+ SET_I((BeamInstr *) cp_val(E[0]));
+ E += 1;
+ Goto(*I);
+ //| -no_next
+}
+
+i_yield() {
+ /* This is safe as long as REDS_IN(c_p) is never stored
+ * in c_p->arg_reg[0]. It is currently stored in c_p->def_arg_reg[5].
+ */
+ c_p->arg_reg[0] = am_true;
+ c_p->arity = 1; /* One living register (the 'true' return value) */
+ SWAPOUT;
+ $SET_CP_I_ABS($NEXT_INSTRUCTION);
+ c_p->current = NULL;
+ goto do_schedule;
+ //| -no_next
+}
+
+i_hibernate() {
+ HEAVY_SWAPOUT;
+ if (erts_hibernate(c_p, reg)) {
+ FCALLS = c_p->fcalls;
+ c_p->flags &= ~F_HIBERNATE_SCHED;
+ goto do_schedule;
+ } else {
+ HEAVY_SWAPIN;
+ I = handle_error(c_p, I, reg, &bif_export[BIF_hibernate_3]->info.mfa);
+ goto post_error_handling;
+ }
+ //| -no_next
+}
+
+// This is optimised as an instruction because
+// it has to be very very fast.
+
+i_perf_counter() {
+ ErtsSysPerfCounter ts;
+
+ ts = erts_sys_perf_counter();
+ if (IS_SSMALL(ts)) {
+ r(0) = make_small((Sint)ts);
+ } else {
+ $GC_TEST(0, ERTS_SINT64_HEAP_SIZE(ts), 0);
+ r(0) = make_big(HTOP);
+#if defined(ARCH_32)
+ if (ts >= (((Uint64) 1) << 32)) {
+ *HTOP = make_pos_bignum_header(2);
+ BIG_DIGIT(HTOP, 0) = (Uint) (ts & ((Uint) 0xffffffff));
+ BIG_DIGIT(HTOP, 1) = (Uint) ((ts >> 32) & ((Uint) 0xffffffff));
+ HTOP += 3;
+ }
+ else
+#endif
+ {
+ *HTOP = make_pos_bignum_header(1);
+ BIG_DIGIT(HTOP, 0) = (Uint) ts;
+ HTOP += 2;
+ }
+ }
+}
+
+i_debug_breakpoint() {
+ HEAVY_SWAPOUT;
+ I = call_error_handler(c_p, erts_code_to_codemfa(I), reg, am_breakpoint);
+ HEAVY_SWAPIN;
+ if (I) {
+ Goto(*I);
+ }
+ goto handle_error;
+ //| -no_next
+}
+
+
+
+//
+// Special jump instruction used for tracing. Takes an absolute
+// failure address.
+//
+
+trace_jump(Fail) {
+ //| -no_next
+ SET_I((BeamInstr *) $Fail);
+ Goto(*I);
+}
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 738f793020..993585be10 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2014. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/
@@ -41,13 +42,23 @@
#include "dist.h"
#include "erl_printf.h"
#include "erl_threads.h"
-#include "erl_smp.h"
+#include "erl_lock_count.h"
#include "erl_time.h"
#include "erl_thr_progress.h"
#include "erl_thr_queue.h"
#include "erl_sched_spec_pre_alloc.h"
#include "beam_bp.h"
#include "erl_ptab.h"
+#include "erl_check_io.h"
+#include "erl_bif_unique.h"
+#include "erl_io_queue.h"
+#define ERTS_WANT_TIMER_WHEEL_API
+#include "erl_time.h"
+#ifdef HIPE
+# include "hipe_mode_switch.h"
+#endif
+#define ERTS_WANT_NFUNC_SCHED_INTERNALS__
+#include "erl_nfunc_sched.h"
#undef M_TRIM_THRESHOLD
#undef M_TOP_PAD
@@ -63,46 +74,10 @@
#define HAVE_MALLOPT 0
#endif
-/* profile_scheduler mini message queue */
-
-typedef struct {
- Uint scheduler_id;
- Uint no_schedulers;
- Uint Ms;
- Uint s;
- Uint us;
- Eterm state;
-} profile_sched_msg;
-
-typedef struct {
- profile_sched_msg msg[2];
- Uint n;
-} profile_sched_msg_q;
-
-#ifdef ERTS_SMP
-
-#if 0 /* Unused */
-static void
-dispatch_profile_msg_q(profile_sched_msg_q *psmq)
-{
- int i = 0;
- profile_sched_msg *msg = NULL;
- ASSERT(psmq != NULL);
- for (i = 0; i < psmq->n; i++) {
- msg = &(psmq->msg[i]);
- profile_scheduler_q(make_small(msg->scheduler_id), msg->state, am_undefined, msg->Ms, msg->s, msg->us);
- }
-}
-#endif
-
-#endif
-
-
Eterm*
erts_heap_alloc(Process* p, Uint need, Uint xtra)
{
ErlHeapFragment* bp;
- Eterm* htop;
Uint n;
#if defined(DEBUG) || defined(CHECK_FOR_HOLES)
Uint i;
@@ -113,7 +88,7 @@ erts_heap_alloc(Process* p, Uint need, Uint xtra)
&& HEAP_TOP(p) >= p->space_verified_from
&& HEAP_TOP(p) + need <= p->space_verified_from + p->space_verified
&& HEAP_LIMIT(p) - HEAP_TOP(p) >= need) {
-
+
Uint consumed = need + (HEAP_TOP(p) - p->space_verified_from);
ASSERT(consumed <= p->space_verified);
p->space_verified -= consumed;
@@ -130,6 +105,7 @@ erts_heap_alloc(Process* p, Uint need, Uint xtra)
if (bp != NULL && need <= (bp->alloc_size - bp->used_size)) {
Eterm* ret = bp->mem + bp->used_size;
bp->used_size += need;
+ p->mbuf_sz += need;
return ret;
}
#ifdef DEBUG
@@ -148,21 +124,11 @@ erts_heap_alloc(Process* p, Uint need, Uint xtra)
n--;
#endif
- /*
- * When we have created a heap fragment, we are no longer allowed
- * to store anything more on the heap.
- */
- htop = HEAP_TOP(p);
- if (htop < HEAP_LIMIT(p)) {
- *htop = make_pos_bignum_header(HEAP_LIMIT(p)-htop-1);
- HEAP_TOP(p) = HEAP_LIMIT(p);
- }
-
bp->next = MBUF(p);
MBUF(p) = bp;
bp->alloc_size = n;
bp->used_size = need;
- MBUF_SIZE(p) += n;
+ MBUF_SIZE(p) += need;
bp->off_heap.first = NULL;
bp->off_heap.overhead = 0;
return bp->mem;
@@ -186,12 +152,18 @@ erts_set_hole_marker(Eterm* ptr, Uint sz)
* Helper function for the ESTACK macros defined in global.h.
*/
void
-erl_grow_estack(ErtsEStack* s, Eterm* default_estack)
+erl_grow_estack(ErtsEStack* s, Uint need)
{
Uint old_size = (s->end - s->start);
- Uint new_size = old_size * 2;
+ Uint new_size;
Uint sp_offs = s->sp - s->start;
- if (s->start != default_estack) {
+
+ if (need < old_size)
+ new_size = 2*old_size;
+ else
+ new_size = ((need / old_size) + 2) * old_size;
+
+ if (s->start != s->edefault) {
s->start = erts_realloc(s->alloc_type, s->start,
new_size*sizeof(Eterm));
} else {
@@ -206,12 +178,18 @@ erl_grow_estack(ErtsEStack* s, Eterm* default_estack)
* Helper function for the WSTACK macros defined in global.h.
*/
void
-erl_grow_wstack(ErtsWStack* s, UWord* default_wstack)
+erl_grow_wstack(ErtsWStack* s, Uint need)
{
Uint old_size = (s->wend - s->wstart);
- Uint new_size = old_size * 2;
+ Uint new_size;
Uint sp_offs = s->wsp - s->wstart;
- if (s->wstart != default_wstack) {
+
+ if (need < old_size)
+ new_size = 2 * old_size;
+ else
+ new_size = ((need / old_size) + 2) * old_size;
+
+ if (s->wstart != s->wdefault) {
s->wstart = erts_realloc(s->alloc_type, s->wstart,
new_size*sizeof(UWord));
} else {
@@ -223,6 +201,55 @@ erl_grow_wstack(ErtsWStack* s, UWord* default_wstack)
s->wsp = s->wstart + sp_offs;
}
+/*
+ * Helper function for the PSTACK macros defined in global.h.
+ */
+void
+erl_grow_pstack(ErtsPStack* s, void* default_pstack, unsigned need_bytes)
+{
+ Uint old_size = s->size;
+ Uint new_size;
+
+ if (need_bytes < old_size)
+ new_size = 2 * old_size;
+ else
+ new_size = ((need_bytes / old_size) + 2) * old_size;
+
+ if (s->pstart != default_pstack) {
+ s->pstart = erts_realloc(s->alloc_type, s->pstart, new_size);
+ } else {
+ byte* new_ptr = erts_alloc(s->alloc_type, new_size);
+ sys_memcpy(new_ptr, s->pstart, old_size);
+ s->pstart = new_ptr;
+ }
+ s->size = new_size;
+}
+
+/*
+ * Helper function for the EQUEUE macros defined in global.h.
+ */
+
+void
+erl_grow_equeue(ErtsEQueue* q, Eterm* default_equeue)
+{
+ Uint old_size = (q->end - q->start);
+ Uint new_size = old_size * 2;
+ Uint first_part = (q->end - q->front);
+ Uint second_part = (q->back - q->start);
+ Eterm* new_ptr = erts_alloc(q->alloc_type, new_size*sizeof(Eterm));
+ ASSERT(q->back == q->front); // of course the queue is full now!
+ if (first_part > 0)
+ sys_memcpy(new_ptr, q->front, first_part*sizeof(Eterm));
+ if (second_part > 0)
+ sys_memcpy(new_ptr+first_part, q->start, second_part*sizeof(Eterm));
+ if (q->start != default_equeue)
+ erts_free(q->alloc_type, q->start);
+ q->start = new_ptr;
+ q->end = q->start + new_size;
+ q->front = q->start;
+ q->back = q->start + old_size;
+}
+
/* CTYPE macros */
#define LATIN1
@@ -257,10 +284,10 @@ erl_grow_wstack(ErtsWStack* s, UWord* default_wstack)
* Calculate length of a list.
* Returns -1 if not a proper list (i.e. not terminated with NIL)
*/
-int
+Sint
erts_list_length(Eterm list)
{
- int i = 0;
+ Sint i = 0;
while(is_list(list)) {
i++;
@@ -310,44 +337,53 @@ int erts_fit_in_bits_int32(Sint32 value)
return fit_in_bits((Sint64) (Uint32) value, 4);
}
+int erts_fit_in_bits_uint(Uint value)
+{
+#if ERTS_SIZEOF_ETERM == 4
+ return fit_in_bits((Sint64) (Uint32) value, 4);
+#elif ERTS_SIZEOF_ETERM == 8
+ return fit_in_bits(value, 5);
+#else
+# error "No way, Jose"
+#endif
+}
+
int
-erts_print(int to, void *arg, char *format, ...)
+erts_print(fmtfn_t to, void *arg, char *format, ...)
{
int res;
va_list arg_list;
va_start(arg_list, format);
- if (to < ERTS_PRINT_MIN)
- res = -EINVAL;
- else {
- switch (to) {
- case ERTS_PRINT_STDOUT:
+ {
+ switch ((UWord)to) {
+ case (UWord)ERTS_PRINT_STDOUT:
res = erts_vprintf(format, arg_list);
break;
- case ERTS_PRINT_STDERR:
+ case (UWord)ERTS_PRINT_STDERR:
res = erts_vfprintf(stderr, format, arg_list);
break;
- case ERTS_PRINT_FILE:
+ case (UWord)ERTS_PRINT_FILE:
res = erts_vfprintf((FILE *) arg, format, arg_list);
break;
- case ERTS_PRINT_SBUF:
+ case (UWord)ERTS_PRINT_SBUF:
res = erts_vsprintf((char *) arg, format, arg_list);
break;
- case ERTS_PRINT_SNBUF:
+ case (UWord)ERTS_PRINT_SNBUF:
res = erts_vsnprintf(((erts_print_sn_buf *) arg)->buf,
((erts_print_sn_buf *) arg)->size,
format,
arg_list);
break;
- case ERTS_PRINT_DSBUF:
+ case (UWord)ERTS_PRINT_DSBUF:
res = erts_vdsprintf((erts_dsprintf_buf_t *) arg, format, arg_list);
break;
- case ERTS_PRINT_INVALID:
- res = -EINVAL;
- break;
- default:
- res = erts_vfdprintf((int) to, format, arg_list);
+ case (UWord)ERTS_PRINT_FD:
+ res = erts_vfdprintf((int)(SWord) arg, format, arg_list);
break;
+ default:
+ res = erts_vcbprintf(to, arg, format, arg_list);
+ break;
}
}
@@ -356,7 +392,7 @@ erts_print(int to, void *arg, char *format, ...)
}
int
-erts_putc(int to, void *arg, char c)
+erts_putc(fmtfn_t to, void *arg, char c)
{
return erts_print(to, arg, "%c", c);
}
@@ -605,7 +641,7 @@ erts_bld_atom_uword_2tup_list(Uint **hpp, Uint *szp,
ui = uint_to_big(uints[i], *hpp);
*hpp += BIG_UINT_HEAP_SIZE;
}
-
+
res = CONS(*hpp+3, TUPLE2(*hpp, atoms[i], ui), res);
*hpp += 5;
}
@@ -643,14 +679,14 @@ erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length,
ui1 = uint_to_big(uints1[i], *hpp);
*hpp += BIG_UINT_HEAP_SIZE;
}
-
+
if (IS_USMALL(0, uints2[i]))
ui2 = make_small(uints2[i]);
else {
ui2 = uint_to_big(uints2[i], *hpp);
*hpp += BIG_UINT_HEAP_SIZE;
}
-
+
res = CONS(*hpp+4, TUPLE3(*hpp, atoms[i], ui1, ui2), res);
*hpp += 6;
}
@@ -665,12 +701,7 @@ erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length,
/* make a hash index from an erlang term */
/*
-** There are three hash functions.
-** make_broken_hash: the one used for backward compatibility
-** is called from the bif erlang:hash/2. Should never be used
-** as it a) hashes only a part of binaries, b) hashes bignums really poorly,
-** c) hashes bignums differently on different endian processors and d) hashes
-** small integers with different weights on different bytes.
+** There are two hash functions.
**
** make_hash: A hash function that will give the same values for the same
** terms regardless of the internal representation. Small integers are
@@ -706,7 +737,7 @@ erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length,
** If N < 0, Y = FUNNY_NUMBER4 else Y = FUNNY_NUMBER3.
** The hash value is Y*h(J) mod 2^32 where h(J) is calculated like
** h(0) = <initial hash>
-** h(i) = h(i-i)*X + B(i-1)
+** h(i) = h(i-1)*X + B(i-1)
** The above should hold regardless of internal representation.
** Pids are hashed like small numbers but with differrent constants, as are
** ports.
@@ -761,7 +792,7 @@ hash_binary_bytes(Eterm bin, Uint sz, Uint32 hash)
Uint b;
Uint lshift = bitoffs;
Uint rshift = 8 - lshift;
-
+
while (sz--) {
b = (previous << lshift) & 0xFF;
previous = *ptr++;
@@ -772,7 +803,7 @@ hash_binary_bytes(Eterm bin, Uint sz, Uint32 hash)
b = (previous << lshift) & 0xFF;
previous = *ptr++;
b |= previous >> rshift;
-
+
b >>= 8 - bitsize;
hash = (hash*FUNNY_NUMBER1 + b) * FUNNY_NUMBER12 + bitsize;
}
@@ -787,11 +818,10 @@ Uint32 make_hash(Eterm term_arg)
Eterm hash = 0;
unsigned op;
- /* Must not collide with the real tag_val_def's: */
-#define MAKE_HASH_TUPLE_OP 0x11
-#define MAKE_HASH_TERM_ARRAY_OP 0x12
-#define MAKE_HASH_CDR_PRE_OP 0x13
-#define MAKE_HASH_CDR_POST_OP 0x14
+#define MAKE_HASH_TUPLE_OP (FIRST_VACANT_TAG_DEF)
+#define MAKE_HASH_TERM_ARRAY_OP (FIRST_VACANT_TAG_DEF+1)
+#define MAKE_HASH_CDR_PRE_OP (FIRST_VACANT_TAG_DEF+2)
+#define MAKE_HASH_CDR_POST_OP (FIRST_VACANT_TAG_DEF+3)
/*
** Convenience macro for calculating a bytewise hash on an unsigned 32 bit
@@ -803,21 +833,21 @@ Uint32 make_hash(Eterm term_arg)
do { \
Uint32 x = (Uint32) (Expr); \
hash = \
- (((((hash)*(Prime1) + (x & 0xFF)) * (Prime1) + \
- ((x >> 8) & 0xFF)) * (Prime1) + \
- ((x >> 16) & 0xFF)) * (Prime1) + \
+ (((((hash)*(Prime1) + (x & 0xFF)) * (Prime1) + \
+ ((x >> 8) & 0xFF)) * (Prime1) + \
+ ((x >> 16) & 0xFF)) * (Prime1) + \
(x >> 24)); \
} while(0)
-#define UINT32_HASH_RET(Expr, Prime1, Prime2) \
+#define UINT32_HASH_RET(Expr, Prime1, Prime2) \
UINT32_HASH_STEP(Expr, Prime1); \
hash = hash * (Prime2); \
- break
-
-
+ break
+
+
/*
* Significant additions needed for real 64 bit port with larger fixnums.
- */
+ */
/*
* Note, for the simple 64bit port, not utilizing the
@@ -832,7 +862,7 @@ tail_recur:
hash = hash*FUNNY_NUMBER3 + 1;
break;
case ATOM_DEF:
- hash = hash*FUNNY_NUMBER1 +
+ hash = hash*FUNNY_NUMBER1 +
(atom_tab(atom_val(term))->slot.bucket.hvalue);
break;
case SMALL_DEF:
@@ -841,7 +871,7 @@ tail_recur:
Uint y2 = y1 < 0 ? -(Uint)y1 : y1;
UINT32_HASH_STEP(y2, FUNNY_NUMBER2);
-#if defined(ARCH_64) && !HALFWORD_HEAP
+#if defined(ARCH_64)
if (y2 >> 32)
UINT32_HASH_STEP(y2 >> 32, FUNNY_NUMBER2);
#endif
@@ -860,11 +890,11 @@ tail_recur:
{
Export* ep = *((Export **) (export_val(term) + 1));
- hash = hash * FUNNY_NUMBER11 + ep->code[2];
- hash = hash*FUNNY_NUMBER1 +
- (atom_tab(atom_val(ep->code[0]))->slot.bucket.hvalue);
- hash = hash*FUNNY_NUMBER1 +
- (atom_tab(atom_val(ep->code[1]))->slot.bucket.hvalue);
+ hash = hash * FUNNY_NUMBER11 + ep->info.mfa.arity;
+ hash = hash*FUNNY_NUMBER1 +
+ (atom_tab(atom_val(ep->info.mfa.module))->slot.bucket.hvalue);
+ hash = hash*FUNNY_NUMBER1 +
+ (atom_tab(atom_val(ep->info.mfa.function))->slot.bucket.hvalue);
break;
}
@@ -874,7 +904,7 @@ tail_recur:
Uint num_free = funp->num_free;
hash = hash * FUNNY_NUMBER10 + num_free;
- hash = hash*FUNNY_NUMBER1 +
+ hash = hash*FUNNY_NUMBER1 +
(atom_tab(atom_val(funp->fe->module))->slot.bucket.hvalue);
hash = hash*FUNNY_NUMBER2 + funp->fe->old_index;
hash = hash*FUNNY_NUMBER2 + funp->fe->old_uniq;
@@ -899,14 +929,17 @@ tail_recur:
UINT32_HASH_RET(internal_ref_numbers(term)[0],FUNNY_NUMBER9,FUNNY_NUMBER10);
case EXTERNAL_REF_DEF:
UINT32_HASH_RET(external_ref_numbers(term)[0],FUNNY_NUMBER9,FUNNY_NUMBER10);
- case FLOAT_DEF:
+ case FLOAT_DEF:
{
- FloatDef ff;
- GET_DOUBLE(term, ff);
- hash = hash*FUNNY_NUMBER6 + (ff.fw[0] ^ ff.fw[1]);
- break;
+ FloatDef ff;
+ GET_DOUBLE(term, ff);
+ if (ff.fd == 0.0f) {
+ /* ensure positive 0.0 */
+ ff.fd = erts_get_positive_zero_float();
+ }
+ hash = hash*FUNNY_NUMBER6 + (ff.fw[0] ^ ff.fw[1]);
+ break;
}
-
case MAKE_HASH_CDR_PRE_OP:
term = (Eterm) WSTACK_POP(stack);
if (is_not_list(term)) {
@@ -923,12 +956,12 @@ tail_recur:
** as multiplications on a Sparc is so slow.
*/
hash = hash*FUNNY_NUMBER2 + unsigned_val(*list);
-
+
if (is_not_list(CDR(list))) {
WSTACK_PUSH(stack, MAKE_HASH_CDR_POST_OP);
term = CDR(list);
goto tail_recur;
- }
+ }
list = list_val(CDR(list));
}
WSTACK_PUSH2(stack, CDR(list), MAKE_HASH_CDR_PRE_OP);
@@ -959,7 +992,7 @@ tail_recur:
}
d = BIG_DIGIT(ptr, k);
k = sizeof(ErtsDigit);
-#if defined(ARCH_64) && !HALFWORD_HEAP
+#if defined(ARCH_64)
if (!(d >> 32))
k /= 2;
#endif
@@ -969,32 +1002,17 @@ tail_recur:
}
hash *= is_neg ? FUNNY_NUMBER4 : FUNNY_NUMBER3;
break;
- }
- case MAP_DEF:
- {
- map_t *mp = (map_t *)map_val(term);
- int size = map_get_size(mp);
- Eterm *ks = map_get_keys(mp);
- Eterm *vs = map_get_values(mp);
-
- /* Use a prime with size to remedy some of
- * the {} and <<>> hash problems */
- hash = hash*FUNNY_NUMBER13 + FUNNY_NUMBER14 + size;
- if (size == 0)
- break;
-
- /* push values first */
- WSTACK_PUSH3(stack, (UWord)vs, (UWord) size, MAKE_HASH_TERM_ARRAY_OP);
- WSTACK_PUSH3(stack, (UWord)ks, (UWord) size, MAKE_HASH_TERM_ARRAY_OP);
- break;
}
- case TUPLE_DEF:
+ case MAP_DEF:
+ hash = hash*FUNNY_NUMBER13 + FUNNY_NUMBER14 + make_hash2(term);
+ break;
+ case TUPLE_DEF:
{
Eterm* ptr = tuple_val(term);
Uint arity = arityval(*ptr);
WSTACK_PUSH3(stack, (UWord) arity, (UWord)(ptr+1), (UWord) arity);
- op = MAKE_HASH_TUPLE_OP;
+ op = MAKE_HASH_TUPLE_OP;
}/*fall through*/
case MAKE_HASH_TUPLE_OP:
case MAKE_HASH_TERM_ARRAY_OP:
@@ -1011,10 +1029,10 @@ tail_recur:
hash = hash*FUNNY_NUMBER9 + arity;
}
break;
- }
-
+ }
+
default:
- erl_exit(1, "Invalid tag in make_hash(0x%X,0x%X)\n", term, op);
+ erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_hash(0x%X,0x%X)\n", term, op);
return 0;
}
if (WSTACK_ISEMPTY(stack)) break;
@@ -1023,6 +1041,10 @@ tail_recur:
DESTROY_WSTACK(stack);
return hash;
+#undef MAKE_HASH_TUPLE_OP
+#undef MAKE_HASH_TERM_ARRAY_OP
+#undef MAKE_HASH_CDR_PRE_OP
+#undef MAKE_HASH_CDR_POST_OP
#undef UINT32_HASH_STEP
#undef UINT32_HASH_RET
}
@@ -1091,11 +1113,12 @@ Uint32
make_hash2(Eterm term)
{
Uint32 hash;
- Uint32 hash_xor_keys = 0;
- Uint32 hash_xor_values = 0;
+ Uint32 hash_xor_pairs;
DeclareTmpHeapNoproc(tmp_big,2);
-/* (HCONST * {2, ..., 16}) mod 2^32 */
+ ERTS_UNDEF(hash_xor_pairs, 0);
+
+/* (HCONST * {2, ..., 22}) mod 2^32 */
#define HCONST_2 0x3c6ef372UL
#define HCONST_3 0xdaa66d2bUL
#define HCONST_4 0x78dde6e4UL
@@ -1111,10 +1134,16 @@ make_hash2(Eterm term)
#define HCONST_14 0xa708a81eUL
#define HCONST_15 0x454021d7UL
#define HCONST_16 0xe3779b90UL
+#define HCONST_17 0x81af1549UL
+#define HCONST_18 0x1fe68f02UL
+#define HCONST_19 0xbe1e08bbUL
+#define HCONST_20 0x5c558274UL
+#define HCONST_21 0xfa8cfc2dUL
+#define HCONST_22 0x98c475e6UL
#define HASH_MAP_TAIL (_make_header(1,_TAG_HEADER_REF))
-#define HASH_MAP_KEY (_make_header(2,_TAG_HEADER_REF))
-#define HASH_MAP_VAL (_make_header(3,_TAG_HEADER_REF))
+#define HASH_MAP_PAIR (_make_header(2,_TAG_HEADER_REF))
+#define HASH_CDR (_make_header(3,_TAG_HEADER_REF))
#define UINT32_HASH_2(Expr1, Expr2, AConst) \
do { \
@@ -1132,11 +1161,18 @@ make_hash2(Eterm term)
if (y < 0) { \
UINT32_HASH(-y, AConst); \
/* Negative numbers are unnecessarily mixed twice. */ \
- } \
- UINT32_HASH(y, AConst); \
+ } \
+ UINT32_HASH(y, AConst); \
} while(0)
#define IS_SSMALL28(x) (((Uint) (((x) >> (28-1)) + 1)) < 2)
+
+#ifdef ARCH_64
+# define POINTER_HASH(Ptr, AConst) UINT32_HASH_2((Uint32)(UWord)(Ptr), (((UWord)(Ptr)) >> 32), AConst)
+#else
+# define POINTER_HASH(Ptr, AConst) UINT32_HASH(Ptr, AConst)
+#endif
+
/* Optimization. Simple cases before declaration of estack. */
if (primary_tag(term) == TAG_PRIMARY_IMMED1) {
switch (term & _TAG_IMMED1_MASK) {
@@ -1191,9 +1227,9 @@ make_hash2(Eterm term)
if (c > 0)
UINT32_HASH(sh, HCONST_4);
if (is_list(term)) {
- term = *ptr;
- tmp = *++ptr;
- ESTACK_PUSH(s, tmp);
+ tmp = CDR(ptr);
+ ESTACK_PUSH(s, tmp);
+ term = CAR(ptr);
}
}
break;
@@ -1208,59 +1244,98 @@ make_hash2(Eterm term)
int arity = header_arity(hdr);
Eterm* elem = tuple_val(term);
UINT32_HASH(arity, HCONST_9);
- if (arity == 0) /* Empty tuple */
- goto hash2_common;
- for (i = arity; i >= 1; i--) {
- tmp = elem[i];
- ESTACK_PUSH(s, tmp);
- }
- goto hash2_common;
- }
- break;
- case MAP_SUBTAG:
- {
- map_t *mp = (map_t *)map_val(term);
- int i;
- int size = map_get_size(mp);
- Eterm *ks = map_get_keys(mp);
- Eterm *vs = map_get_values(mp);
- UINT32_HASH(size, HCONST_16);
- if (size == 0) {
+ if (arity == 0) /* Empty tuple */
goto hash2_common;
+ for (i = arity; ; i--) {
+ term = elem[i];
+ if (i == 1)
+ break;
+ ESTACK_PUSH(s, term);
}
- ESTACK_PUSH(s, hash_xor_values);
- ESTACK_PUSH(s, hash_xor_keys);
- ESTACK_PUSH(s, hash);
- ESTACK_PUSH(s, HASH_MAP_TAIL);
- hash = 0;
- hash_xor_keys = 0;
- hash_xor_values = 0;
- for (i = size - 1; i >= 0; i--) {
- tmp = vs[i];
- ESTACK_PUSH(s, HASH_MAP_VAL);
- ESTACK_PUSH(s, tmp);
- }
- /* We do not want to expose the tuple representation.
- * Do not push the keys as a tuple.
- */
- for (i = size - 1; i >= 0; i--) {
- tmp = ks[i];
- ESTACK_PUSH(s, HASH_MAP_KEY);
- ESTACK_PUSH(s, tmp);
- }
- goto hash2_common;
}
break;
+ case MAP_SUBTAG:
+ {
+ Eterm* ptr = boxed_val(term) + 1;
+ Uint size;
+ int i;
+ switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_FLATMAP:
+ {
+ flatmap_t *mp = (flatmap_t *)flatmap_val(term);
+ Eterm *ks = flatmap_get_keys(mp);
+ Eterm *vs = flatmap_get_values(mp);
+ size = flatmap_get_size(mp);
+ UINT32_HASH(size, HCONST_16);
+ if (size == 0)
+ goto hash2_common;
+
+ /* We want a portable hash function that is *independent* of
+ * the order in which keys and values are encountered.
+ * We therefore calculate context independent hashes for all .
+ * key-value pairs and then xor them together.
+ */
+ ESTACK_PUSH(s, hash_xor_pairs);
+ ESTACK_PUSH(s, hash);
+ ESTACK_PUSH(s, HASH_MAP_TAIL);
+ hash = 0;
+ hash_xor_pairs = 0;
+ for (i = size - 1; i >= 0; i--) {
+ ESTACK_PUSH(s, HASH_MAP_PAIR);
+ ESTACK_PUSH(s, vs[i]);
+ ESTACK_PUSH(s, ks[i]);
+ }
+ goto hash2_common;
+ }
+
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ size = *ptr++;
+ UINT32_HASH(size, HCONST_16);
+ if (size == 0)
+ goto hash2_common;
+ ESTACK_PUSH(s, hash_xor_pairs);
+ ESTACK_PUSH(s, hash);
+ ESTACK_PUSH(s, HASH_MAP_TAIL);
+ hash = 0;
+ hash_xor_pairs = 0;
+ }
+ switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ i = 16;
+ break;
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ case HAMT_SUBTAG_NODE_BITMAP:
+ i = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ break;
+ default:
+ erts_exit(ERTS_ERROR_EXIT, "bad header");
+ }
+ while (i) {
+ if (is_list(*ptr)) {
+ Eterm* cons = list_val(*ptr);
+ ESTACK_PUSH(s, HASH_MAP_PAIR);
+ ESTACK_PUSH(s, CDR(cons));
+ ESTACK_PUSH(s, CAR(cons));
+ }
+ else {
+ ASSERT(is_boxed(*ptr));
+ ESTACK_PUSH(s, *ptr);
+ }
+ i--; ptr++;
+ }
+ goto hash2_common;
+ }
+ break;
case EXPORT_SUBTAG:
{
Export* ep = *((Export **) (export_val(term) + 1));
-
UINT32_HASH_2
- (ep->code[2],
- atom_tab(atom_val(ep->code[0]))->slot.bucket.hvalue,
+ (ep->info.mfa.arity,
+ atom_tab(atom_val(ep->info.mfa.module))->slot.bucket.hvalue,
HCONST);
UINT32_HASH
- (atom_tab(atom_val(ep->code[1]))->slot.bucket.hvalue,
+ (atom_tab(atom_val(ep->info.mfa.function))->slot.bucket.hvalue,
HCONST_14);
goto hash2_common;
}
@@ -1269,9 +1344,8 @@ make_hash2(Eterm term)
{
ErlFunThing* funp = (ErlFunThing *) fun_val(term);
Uint num_free = funp->num_free;
-
UINT32_HASH_2
- (num_free,
+ (num_free,
atom_tab(atom_val(funp->fe->module))->slot.bucket.hvalue,
HCONST);
UINT32_HASH_2
@@ -1350,7 +1424,8 @@ make_hash2(Eterm term)
do {
Uint t;
Uint32 x, y;
- t = i < n ? BIG_DIGIT(ptr, i++) : 0;
+ ASSERT(i < n);
+ t = BIG_DIGIT(ptr, i++);
x = t & 0xffffffff;
y = t >> 32;
UINT32_HASH_2(x, y, con);
@@ -1383,6 +1458,10 @@ make_hash2(Eterm term)
{
FloatDef ff;
GET_DOUBLE(term, ff);
+ if (ff.fd == 0.0f) {
+ /* ensure positive 0.0 */
+ ff.fd = erts_get_positive_zero_float();
+ }
#if defined(WORDS_BIGENDIAN) || defined(DOUBLE_MIDDLE_ENDIAN)
UINT32_HASH_2(ff.fw[0], ff.fw[1], HCONST_12);
#else
@@ -1391,9 +1470,9 @@ make_hash2(Eterm term)
goto hash2_common;
}
break;
-
+
default:
- erl_exit(1, "Invalid tag in make_hash2(0x%X)\n", term);
+ erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_hash2(0x%X)\n", term);
}
}
break;
@@ -1424,7 +1503,7 @@ make_hash2(Eterm term)
UINT32_HASH(NIL_DEF, HCONST_2);
goto hash2_common;
default:
- erl_exit(1, "Invalid tag in make_hash2(0x%X)\n", term);
+ erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_hash2(0x%X)\n", term);
}
case _TAG_IMMED1_SMALL:
{
@@ -1440,7 +1519,7 @@ make_hash2(Eterm term)
}
break;
default:
- erl_exit(1, "Invalid tag in make_hash2(0x%X)\n", term);
+ erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_hash2(0x%X)\n", term);
hash2_common:
/* Uint32 hash always has the hash value of the previous term,
@@ -1458,19 +1537,13 @@ make_hash2(Eterm term)
switch (term) {
case HASH_MAP_TAIL: {
hash = (Uint32) ESTACK_POP(s);
- UINT32_HASH(hash_xor_keys, HCONST_16);
- UINT32_HASH(hash_xor_values, HCONST_16);
- hash_xor_keys = (Uint32) ESTACK_POP(s);
- hash_xor_values = (Uint32) ESTACK_POP(s);
+ UINT32_HASH(hash_xor_pairs, HCONST_19);
+ hash_xor_pairs = (Uint32) ESTACK_POP(s);
goto hash2_common;
}
- case HASH_MAP_KEY:
- hash_xor_keys ^= hash;
- hash = 0;
- goto hash2_common;
- case HASH_MAP_VAL:
- hash_xor_values ^= hash;
- hash = 0;
+ case HASH_MAP_PAIR:
+ hash_xor_pairs ^= hash;
+ hash = 0;
goto hash2_common;
default:
break;
@@ -1478,301 +1551,425 @@ make_hash2(Eterm term)
}
}
}
-
-#undef HASH_MAP_TAIL
-#undef HASH_MAP_KEY
-#undef HASH_MAP_VAL
-
-#undef UINT32_HASH_2
-#undef UINT32_HASH
-#undef SINT32_HASH
}
-#undef HCONST
-#undef MIX
+/* Term hash function for internal use.
+ *
+ * Limitation #1: Is not "portable" in any way between different VM instances.
+ *
+ * Limitation #2: The hash value is only valid as long as the term exists
+ * somewhere in the VM. Why? Because external pids, ports and refs are hashed
+ * by mixing the node *pointer* value. If a node disappears and later reappears
+ * with a new ErlNode struct, externals from that node will hash different than
+ * before.
+ *
+ * One IMPORTANT property must hold (for hamt).
+ * EVERY BIT of the term that is significant for equality (see EQ)
+ * MUST BE USED AS INPUT FOR THE HASH. Two different terms must always have a
+ * chance of hashing different when salted: hash([Salt|A]) vs hash([Salt|B]).
+ *
+ * This is why we can not use cached hash values for atoms for example.
+ *
+ */
+#define CONST_HASH(AConst) \
+do { /* Lightweight mixing of constant (type info) */ \
+ hash ^= AConst; \
+ hash = (hash << 17) ^ (hash >> (32-17)); \
+} while (0)
-Uint32 make_broken_hash(Eterm term)
+Uint32
+make_internal_hash(Eterm term, Uint32 salt)
{
- Uint32 hash = 0;
- DECLARE_WSTACK(stack);
- unsigned op;
-tail_recur:
- op = tag_val_def(term);
- for (;;) {
- switch (op) {
- case NIL_DEF:
- hash = hash*FUNNY_NUMBER3 + 1;
- break;
- case ATOM_DEF:
- hash = hash*FUNNY_NUMBER1 +
- (atom_tab(atom_val(term))->slot.bucket.hvalue);
- break;
- case SMALL_DEF:
-#if defined(ARCH_64) && !HALFWORD_HEAP
+ Uint32 hash;
+
+ /* Optimization. Simple cases before declaration of estack. */
+ if (primary_tag(term) == TAG_PRIMARY_IMMED1) {
+ hash = salt;
+ #if ERTS_SIZEOF_ETERM == 8
+ UINT32_HASH_2((Uint32)term, (Uint32)(term >> 32), HCONST);
+ #elif ERTS_SIZEOF_ETERM == 4
+ UINT32_HASH(term, HCONST);
+ #else
+ # error "No you don't"
+ #endif
+ return hash;
+ }
{
- Sint y1 = signed_val(term);
- Uint y2 = y1 < 0 ? -(Uint)y1 : y1;
- Uint32 y3 = (Uint32) (y2 >> 32);
- int arity = 1;
-
-#if defined(WORDS_BIGENDIAN)
- if (!IS_SSMALL28(y1))
- { /* like a bignum */
- Uint32 y4 = (Uint32) y2;
- hash = hash*FUNNY_NUMBER2 + ((y4 << 16) | (y4 >> 16));
- if (y3)
- {
- hash = hash*FUNNY_NUMBER2 + ((y3 << 16) | (y3 >> 16));
- arity++;
+ Eterm tmp;
+ DECLARE_ESTACK(s);
+
+ hash = salt;
+ for (;;) {
+ switch (primary_tag(term)) {
+ case TAG_PRIMARY_LIST:
+ {
+ int c = 0;
+ Uint32 sh = 0;
+ Eterm* ptr = list_val(term);
+ while (is_byte(*ptr)) {
+ /* Optimization for strings. */
+ sh = (sh << 8) + unsigned_val(*ptr);
+ if (c == 3) {
+ UINT32_HASH(sh, HCONST_4);
+ c = sh = 0;
+ } else {
+ c++;
+ }
+ term = CDR(ptr);
+ if (is_not_list(term))
+ break;
+ ptr = list_val(term);
}
- hash = hash * (y1 < 0 ? FUNNY_NUMBER3 : FUNNY_NUMBER2) + arity;
- } else {
- hash = hash*FUNNY_NUMBER2 + (((Uint) y1) & 0xfffffff);
- }
-#else
- if (!IS_SSMALL28(y1))
- { /* like a bignum */
- hash = hash*FUNNY_NUMBER2 + ((Uint32) y2);
- if (y3)
- {
- hash = hash*FUNNY_NUMBER2 + y3;
- arity++;
+ if (c > 0)
+ UINT32_HASH_2(sh, (Uint32)c, HCONST_22);
+
+ if (is_list(term)) {
+ tmp = CDR(ptr);
+ CONST_HASH(HCONST_17); /* Hash CAR in cons cell */
+ ESTACK_PUSH(s, tmp);
+ if (is_not_list(tmp)) {
+ ESTACK_PUSH(s, HASH_CDR);
+ }
+ term = CAR(ptr);
}
- hash = hash * (y1 < 0 ? FUNNY_NUMBER3 : FUNNY_NUMBER2) + arity;
- } else {
- hash = hash*FUNNY_NUMBER2 + (((Uint) y1) & 0xfffffff);
}
-#endif
- }
-#else
- hash = hash*FUNNY_NUMBER2 + unsigned_val(term);
-#endif
break;
-
- case BINARY_DEF:
+ case TAG_PRIMARY_BOXED:
{
- size_t sz = binary_size(term);
- size_t i = (sz < 15) ? sz : 15;
-
- hash = hash_binary_bytes(term, i, hash);
- hash = hash*FUNNY_NUMBER4 + sz;
+ Eterm hdr = *boxed_val(term);
+ ASSERT(is_header(hdr));
+ switch (hdr & _TAG_HEADER_MASK) {
+ case ARITYVAL_SUBTAG:
+ {
+ int i;
+ int arity = header_arity(hdr);
+ Eterm* elem = tuple_val(term);
+ UINT32_HASH(arity, HCONST_9);
+ if (arity == 0) /* Empty tuple */
+ goto pop_next;
+ for (i = arity; ; i--) {
+ term = elem[i];
+ if (i == 1)
+ break;
+ ESTACK_PUSH(s, term);
+ }
+ }
break;
- }
- case EXPORT_DEF:
- {
- Export* ep = *((Export **) (export_val(term) + 1));
+ case MAP_SUBTAG:
+ {
+ Eterm* ptr = boxed_val(term) + 1;
+ Uint size;
+ int i;
+
+ /*
+ * We rely on key-value iteration order being constant
+ * for identical maps (in this VM instance).
+ */
+ switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_FLATMAP:
+ {
+ flatmap_t *mp = (flatmap_t *)flatmap_val(term);
+ Eterm *ks = flatmap_get_keys(mp);
+ Eterm *vs = flatmap_get_values(mp);
+ size = flatmap_get_size(mp);
+ UINT32_HASH(size, HCONST_16);
+ if (size == 0)
+ goto pop_next;
+
+ for (i = size - 1; i >= 0; i--) {
+ ESTACK_PUSH(s, vs[i]);
+ ESTACK_PUSH(s, ks[i]);
+ }
+ goto pop_next;
+ }
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ size = *ptr++;
+ UINT32_HASH(size, HCONST_16);
+ if (size == 0)
+ goto pop_next;
+ }
+ switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ i = 16;
+ break;
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ case HAMT_SUBTAG_NODE_BITMAP:
+ i = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ break;
+ default:
+ erts_exit(ERTS_ERROR_EXIT, "bad header");
+ }
+ while (i) {
+ if (is_list(*ptr)) {
+ Eterm* cons = list_val(*ptr);
+ ESTACK_PUSH(s, CDR(cons));
+ ESTACK_PUSH(s, CAR(cons));
+ }
+ else {
+ ASSERT(is_boxed(*ptr));
+ ESTACK_PUSH(s, *ptr);
+ }
+ i--; ptr++;
+ }
+ goto pop_next;
+ }
+ break;
+ case EXPORT_SUBTAG:
+ {
+ Export* ep = *((Export **) (export_val(term) + 1));
+ /* Assumes Export entries never move */
+ POINTER_HASH(ep, HCONST_14);
+ goto pop_next;
+ }
- hash = hash * FUNNY_NUMBER11 + ep->code[2];
- hash = hash*FUNNY_NUMBER1 +
- (atom_tab(atom_val(ep->code[0]))->slot.bucket.hvalue);
- hash = hash*FUNNY_NUMBER1 +
- (atom_tab(atom_val(ep->code[1]))->slot.bucket.hvalue);
+ case FUN_SUBTAG:
+ {
+ ErlFunThing* funp = (ErlFunThing *) fun_val(term);
+ Uint num_free = funp->num_free;
+ UINT32_HASH_2(num_free, funp->fe->module, HCONST_20);
+ UINT32_HASH_2(funp->fe->old_index, funp->fe->old_uniq, HCONST_21);
+ if (num_free == 0) {
+ goto pop_next;
+ } else {
+ Eterm* bptr = funp->env + num_free - 1;
+ while (num_free-- > 1) {
+ term = *bptr--;
+ ESTACK_PUSH(s, term);
+ }
+ term = *bptr;
+ }
+ }
break;
- }
-
- case FUN_DEF:
- {
- ErlFunThing* funp = (ErlFunThing *) fun_val(term);
- Uint num_free = funp->num_free;
+ case REFC_BINARY_SUBTAG:
+ case HEAP_BINARY_SUBTAG:
+ case SUB_BINARY_SUBTAG:
+ {
+ byte* bptr;
+ unsigned sz = binary_size(term);
+ Uint32 con = HCONST_13 + hash;
+ Uint bitoffs;
+ Uint bitsize;
- hash = hash * FUNNY_NUMBER10 + num_free;
- hash = hash*FUNNY_NUMBER1 +
- (atom_tab(atom_val(funp->fe->module))->slot.bucket.hvalue);
- hash = hash*FUNNY_NUMBER2 + funp->fe->old_index;
- hash = hash*FUNNY_NUMBER2 + funp->fe->old_uniq;
- if (num_free > 0) {
- if (num_free > 1) {
- WSTACK_PUSH3(stack, (UWord) &funp->env[1], (num_free-1), MAKE_HASH_TERM_ARRAY_OP);
+ ERTS_GET_BINARY_BYTES(term, bptr, bitoffs, bitsize);
+ if (sz == 0 && bitsize == 0) {
+ hash = con;
+ } else {
+ if (bitoffs == 0) {
+ hash = block_hash(bptr, sz, con);
+ if (bitsize > 0) {
+ UINT32_HASH_2(bitsize, (bptr[sz] >> (8 - bitsize)),
+ HCONST_15);
+ }
+ } else {
+ byte* buf = (byte *) erts_alloc(ERTS_ALC_T_TMP,
+ sz + (bitsize != 0));
+ erts_copy_bits(bptr, bitoffs, 1, buf, 0, 1, sz*8+bitsize);
+ hash = block_hash(buf, sz, con);
+ if (bitsize > 0) {
+ UINT32_HASH_2(bitsize, (buf[sz] >> (8 - bitsize)),
+ HCONST_15);
+ }
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ }
}
- term = funp->env[0];
- goto tail_recur;
+ goto pop_next;
}
break;
- }
-
- case PID_DEF:
- hash = hash*FUNNY_NUMBER5 + internal_pid_number(term);
- break;
- case EXTERNAL_PID_DEF:
- hash = hash*FUNNY_NUMBER5 + external_pid_number(term);
- break;
- case PORT_DEF:
- hash = hash*FUNNY_NUMBER9 + internal_port_number(term);
- break;
- case EXTERNAL_PORT_DEF:
- hash = hash*FUNNY_NUMBER9 + external_port_number(term);
- break;
- case REF_DEF:
- hash = hash*FUNNY_NUMBER9 + internal_ref_numbers(term)[0];
- break;
- case EXTERNAL_REF_DEF:
- hash = hash*FUNNY_NUMBER9 + external_ref_numbers(term)[0];
- break;
- case FLOAT_DEF:
- {
- FloatDef ff;
- GET_DOUBLE(term, ff);
- hash = hash*FUNNY_NUMBER6 + (ff.fw[0] ^ ff.fw[1]);
- }
- break;
-
- case MAKE_HASH_CDR_PRE_OP:
- term = (Eterm) WSTACK_POP(stack);
- if (is_not_list(term)) {
- WSTACK_PUSH(stack, (UWord) MAKE_HASH_CDR_POST_OP);
- goto tail_recur;
- }
- /*fall through*/
- case LIST_DEF:
- {
- Eterm* list = list_val(term);
- WSTACK_PUSH2(stack, (UWord) CDR(list),
- (UWord) MAKE_HASH_CDR_PRE_OP);
- term = CAR(list);
- goto tail_recur;
- }
-
- case MAKE_HASH_CDR_POST_OP:
- hash *= FUNNY_NUMBER8;
- break;
-
- case BIG_DEF:
- {
- Eterm* ptr = big_val(term);
- int is_neg = BIG_SIGN(ptr);
- Uint arity = BIG_ARITY(ptr);
- Uint i = arity;
- ptr++;
+ case POS_BIG_SUBTAG:
+ case NEG_BIG_SUBTAG:
+ {
+ Eterm* ptr = big_val(term);
+ Uint i = 0;
+ Uint n = BIG_SIZE(ptr);
+ Uint32 con = BIG_SIGN(ptr) ? HCONST_10 : HCONST_11;
#if D_EXP == 16
- /* hash over 32 bit LE */
-
- while(i--) {
- hash = hash*FUNNY_NUMBER2 + *ptr++;
- }
+ do {
+ Uint32 x, y;
+ x = i < n ? BIG_DIGIT(ptr, i++) : 0;
+ x += (Uint32)(i < n ? BIG_DIGIT(ptr, i++) : 0) << 16;
+ y = i < n ? BIG_DIGIT(ptr, i++) : 0;
+ y += (Uint32)(i < n ? BIG_DIGIT(ptr, i++) : 0) << 16;
+ UINT32_HASH_2(x, y, con);
+ } while (i < n);
#elif D_EXP == 32
-
-#if defined(WORDS_BIGENDIAN)
- while(i--) {
- Uint d = *ptr++;
- hash = hash*FUNNY_NUMBER2 + ((d << 16) | (d >> 16));
- }
-#else
- while(i--) {
- hash = hash*FUNNY_NUMBER2 + *ptr++;
- }
-#endif
-
+ do {
+ Uint32 x, y;
+ x = i < n ? BIG_DIGIT(ptr, i++) : 0;
+ y = i < n ? BIG_DIGIT(ptr, i++) : 0;
+ UINT32_HASH_2(x, y, con);
+ } while (i < n);
#elif D_EXP == 64
- {
- Uint32 h = 0, l;
-#if defined(WORDS_BIGENDIAN)
- while(i--) {
- Uint d = *ptr++;
- l = d & 0xffffffff;
- h = d >> 32;
- hash = hash*FUNNY_NUMBER2 + ((l << 16) | (l >> 16));
- if (h || i)
- hash = hash*FUNNY_NUMBER2 + ((h << 16) | (h >> 16));
- }
+ do {
+ Uint t;
+ Uint32 x, y;
+ ASSERT(i < n);
+ t = BIG_DIGIT(ptr, i++);
+ x = t & 0xffffffff;
+ y = t >> 32;
+ UINT32_HASH_2(x, y, con);
+ } while (i < n);
#else
- while(i--) {
- Uint d = *ptr++;
- l = d & 0xffffffff;
- h = d >> 32;
- hash = hash*FUNNY_NUMBER2 + l;
- if (h || i)
- hash = hash*FUNNY_NUMBER2 + h;
- }
+#error "unsupported D_EXP size"
#endif
- /* adjust arity to match 32 bit mode */
- arity = (arity << 1) - (h == 0);
+ goto pop_next;
+ }
+ break;
+ case REF_SUBTAG:
+ UINT32_HASH(internal_ref_numbers(term)[0], HCONST_7);
+ ASSERT(internal_ref_no_numbers(term) == 3);
+ UINT32_HASH_2(internal_ref_numbers(term)[1],
+ internal_ref_numbers(term)[2], HCONST_8);
+ goto pop_next;
+
+ case EXTERNAL_REF_SUBTAG:
+ {
+ ExternalThing* thing = external_thing_ptr(term);
+
+ ASSERT(external_thing_ref_no_numbers(thing) == 3);
+ /* See limitation #2 */
+ #ifdef ARCH_64
+ POINTER_HASH(thing->node, HCONST_7);
+ UINT32_HASH(external_thing_ref_numbers(thing)[0], HCONST_7);
+ #else
+ UINT32_HASH_2(thing->node,
+ external_thing_ref_numbers(thing)[0], HCONST_7);
+ #endif
+ UINT32_HASH_2(external_thing_ref_numbers(thing)[1],
+ external_thing_ref_numbers(thing)[2], HCONST_8);
+ goto pop_next;
+ }
+ case EXTERNAL_PID_SUBTAG: {
+ ExternalThing* thing = external_thing_ptr(term);
+ /* See limitation #2 */
+ #ifdef ARCH_64
+ POINTER_HASH(thing->node, HCONST_5);
+ UINT32_HASH(thing->data.ui[0], HCONST_5);
+ #else
+ UINT32_HASH_2(thing->node, thing->data.ui[0], HCONST_5);
+ #endif
+ goto pop_next;
+ }
+ case EXTERNAL_PORT_SUBTAG: {
+ ExternalThing* thing = external_thing_ptr(term);
+ /* See limitation #2 */
+ #ifdef ARCH_64
+ POINTER_HASH(thing->node, HCONST_6);
+ UINT32_HASH(thing->data.ui[0], HCONST_6);
+ #else
+ UINT32_HASH_2(thing->node, thing->data.ui[0], HCONST_6);
+ #endif
+ goto pop_next;
+ }
+ case FLOAT_SUBTAG:
+ {
+ FloatDef ff;
+ GET_DOUBLE(term, ff);
+ if (ff.fd == 0.0f) {
+ /* ensure positive 0.0 */
+ ff.fd = erts_get_positive_zero_float();
+ }
+ UINT32_HASH_2(ff.fw[0], ff.fw[1], HCONST_12);
+ goto pop_next;
+ }
+ default:
+ erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_internal_hash(0x%X, %lu)\n", term, salt);
}
-
-#else
-#error "unsupported D_EXP size"
-#endif
- hash = hash * (is_neg ? FUNNY_NUMBER3 : FUNNY_NUMBER2) + arity;
}
break;
+ case TAG_PRIMARY_IMMED1:
+ #if ERTS_SIZEOF_ETERM == 8
+ UINT32_HASH_2((Uint32)term, (Uint32)(term >> 32), HCONST);
+ #else
+ UINT32_HASH(term, HCONST);
+ #endif
+ goto pop_next;
- case MAP_DEF:
- {
- map_t *mp = (map_t *)map_val(term);
- int size = map_get_size(mp);
- Eterm *ks = map_get_keys(mp);
- Eterm *vs = map_get_values(mp);
-
- /* Use a prime with size to remedy some of
- * the {} and <<>> hash problems */
- hash = hash*FUNNY_NUMBER13 + FUNNY_NUMBER14 + size;
- if (size == 0)
- break;
+ default:
+ erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_internal_hash(0x%X, %lu)\n", term, salt);
- /* push values first */
- WSTACK_PUSH3(stack, (UWord)vs, (UWord) size, MAKE_HASH_TERM_ARRAY_OP);
- WSTACK_PUSH3(stack, (UWord)ks, (UWord) size, MAKE_HASH_TERM_ARRAY_OP);
- break;
- }
- case TUPLE_DEF:
- {
- Eterm* ptr = tuple_val(term);
- Uint arity = arityval(*ptr);
+ pop_next:
+ if (ESTACK_ISEMPTY(s)) {
+ DESTROY_ESTACK(s);
- WSTACK_PUSH3(stack, (UWord) arity, (UWord) (ptr+1), (UWord) arity);
- op = MAKE_HASH_TUPLE_OP;
- }/*fall through*/
- case MAKE_HASH_TUPLE_OP:
- case MAKE_HASH_TERM_ARRAY_OP:
- {
- Uint i = (Uint) WSTACK_POP(stack);
- Eterm* ptr = (Eterm*) WSTACK_POP(stack);
- if (i != 0) {
- term = *ptr;
- WSTACK_PUSH3(stack, (UWord)(ptr+1), (UWord) i-1, (UWord) op);
- goto tail_recur;
+ return hash;
}
- if (op == MAKE_HASH_TUPLE_OP) {
- Uint32 arity = (UWord) WSTACK_POP(stack);
- hash = hash*FUNNY_NUMBER9 + arity;
+
+ term = ESTACK_POP(s);
+
+ switch (term) {
+ case HASH_CDR:
+ CONST_HASH(HCONST_18); /* Hash CDR i cons cell */
+ goto pop_next;
+ default:
+ break;
}
- break;
}
-
- default:
- erl_exit(1, "Invalid tag in make_broken_hash\n");
- return 0;
- }
- if (WSTACK_ISEMPTY(stack)) break;
- op = (Uint) WSTACK_POP(stack);
+ }
}
- DESTROY_WSTACK(stack);
- return hash;
-
-#undef MAKE_HASH_TUPLE_OP
-#undef MAKE_HASH_TERM_ARRAY_OP
-#undef MAKE_HASH_CDR_PRE_OP
-#undef MAKE_HASH_CDR_POST_OP
+#undef CONST_HASH
+#undef HASH_MAP_TAIL
+#undef HASH_MAP_PAIR
+#undef HASH_CDR
+
+#undef UINT32_HASH_2
+#undef UINT32_HASH
+#undef SINT32_HASH
}
+#undef HCONST
+#undef MIX
+
+static Eterm
+do_allocate_logger_message(Eterm gleader, Eterm **hp, ErlOffHeap **ohp,
+ ErlHeapFragment **bp, Process **p, Uint sz)
+{
+ Uint gl_sz;
+ gl_sz = IS_CONST(gleader) ? 0 : size_object(gleader);
+ sz = sz + gl_sz;
+
+ *bp = new_message_buffer(sz);
+ *ohp = &(*bp)->off_heap;
+ *hp = (*bp)->mem;
+
+ return (is_nil(gleader)
+ ? am_noproc
+ : (IS_CONST(gleader)
+ ? gleader
+ : copy_struct(gleader,gl_sz,hp,*ohp)));
+}
+
+static void do_send_logger_message(Eterm *hp, ErlOffHeap *ohp, ErlHeapFragment *bp,
+ Process *p, Eterm message)
+{
+#ifdef HARDDEBUG
+ erts_fprintf(stderr, "%T\n", message);
+#endif
+ {
+ Eterm from = erts_get_current_pid();
+ if (is_not_internal_pid(from))
+ from = NIL;
+ erts_queue_error_logger_message(from, message, bp);
+ }
+}
+
+/* error_logger !
+ {notify,{info_msg,gleader,{emulator,format,[args]}}} |
+ {notify,{error,gleader,{emulator,format,[args]}}} |
+ {notify,{warning_msg,gleader,{emulator,format,[args}]}} */
static int do_send_to_logger(Eterm tag, Eterm gleader, char *buf, int len)
{
- /* error_logger !
- {notify,{info_msg,gleader,{emulator,"~s~n",[<message as list>]}}} |
- {notify,{error,gleader,{emulator,"~s~n",[<message as list>]}}} |
- {notify,{warning_msg,gleader,{emulator,"~s~n",[<message as list>}]}} */
- Eterm* hp;
Uint sz;
- Uint gl_sz;
Eterm gl;
- Eterm list,plist,format,tuple1,tuple2,tuple3;
- ErlOffHeap *ohp;
+ Eterm list,args,format,tuple1,tuple2,tuple3;
+
+ Eterm *hp = NULL;
+ ErlOffHeap *ohp = NULL;
ErlHeapFragment *bp = NULL;
-#if !defined(ERTS_SMP)
- Process *p;
-#endif
+ Process *p = NULL;
ASSERT(is_atom(tag));
@@ -1780,89 +1977,83 @@ static int do_send_to_logger(Eterm tag, Eterm gleader, char *buf, int len)
return -1;
}
-#ifndef ERTS_SMP
-#ifdef USE_THREADS
- p = NULL;
- if (erts_get_scheduler_data()) /* Must be scheduler thread */
-#endif
- {
- p = erts_whereis_process(NULL, 0, am_error_logger, 0, 0);
- if (p) {
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state);
- if (state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))
- p = NULL;
- }
- }
+ sz = len * 2 /* message list */ + 2 /* cons surrounding message list */
+ + 3 /*outer 2-tuple*/ + 4 /* middle 3-tuple */ + 4 /*inner 3-tuple */
+ + 8 /* "~s~n" */;
- if (!p) {
- /* buf *always* points to a null terminated string */
- erts_fprintf(stderr, "(no error logger present) %T: \"%s\"\n",
- tag, buf);
- return 0;
- }
- /* So we have an error logger, lets build the message */
-#endif
- gl_sz = IS_CONST(gleader) ? 0 : size_object(gleader);
- sz = len * 2 /* message list */+ 2 /* cons surrounding message list */
- + gl_sz +
- 3 /*outer 2-tuple*/ + 4 /* middle 3-tuple */ + 4 /*inner 3-tuple */ +
- 8 /* "~s~n" */;
-
-#ifndef ERTS_SMP
- if (sz <= HeapWordsLeft(p)) {
- ohp = &MSO(p);
- hp = HEAP_TOP(p);
- HEAP_TOP(p) += sz;
- } else {
-#endif
- bp = new_message_buffer(sz);
- ohp = &bp->off_heap;
- hp = bp->mem;
-#ifndef ERTS_SMP
+ /* gleader size is accounted and allocated next */
+ gl = do_allocate_logger_message(gleader, &hp, &ohp, &bp, &p, sz);
+
+ if(is_nil(gl)) {
+ /* buf *always* points to a null terminated string */
+ erts_fprintf(stderr, "(no error logger present) %T: \"%s\"\n",
+ tag, buf);
+ return 0;
}
-#endif
- gl = (is_nil(gleader)
- ? am_noproc
- : (IS_CONST(gleader)
- ? gleader
- : copy_struct(gleader,gl_sz,&hp,ohp)));
+
list = buf_to_intlist(&hp, buf, len, NIL);
- plist = CONS(hp,list,NIL);
+ args = CONS(hp,list,NIL);
hp += 2;
format = buf_to_intlist(&hp, "~s~n", 4, NIL);
- tuple1 = TUPLE3(hp, am_emulator, format, plist);
+ tuple1 = TUPLE3(hp, am_emulator, format, args);
hp += 4;
tuple2 = TUPLE3(hp, tag, gl, tuple1);
hp += 4;
tuple3 = TUPLE2(hp, am_notify, tuple2);
-#ifdef HARDDEBUG
- erts_fprintf(stderr, "%T\n", tuple3);
-#endif
-#ifdef ERTS_SMP
- {
- Eterm from = erts_get_current_pid();
- if (is_not_internal_pid(from))
- from = NIL;
- erts_queue_error_logger_message(from, tuple3, bp);
+
+ do_send_logger_message(hp, ohp, bp, p, tuple3);
+ return 0;
+}
+
+static int do_send_term_to_logger(Eterm tag, Eterm gleader,
+ char *buf, int len, Eterm args)
+{
+ Uint sz;
+ Eterm gl;
+ Uint args_sz;
+ Eterm format,tuple1,tuple2,tuple3;
+
+ Eterm *hp = NULL;
+ ErlOffHeap *ohp = NULL;
+ ErlHeapFragment *bp = NULL;
+ Process *p = NULL;
+
+ ASSERT(is_atom(tag));
+
+ args_sz = size_object(args);
+ sz = len * 2 /* format */ + args_sz
+ + 3 /*outer 2-tuple*/ + 4 /* middle 3-tuple */ + 4 /*inner 3-tuple */;
+
+ /* gleader size is accounted and allocated next */
+ gl = do_allocate_logger_message(gleader, &hp, &ohp, &bp, &p, sz);
+
+ if(is_nil(gl)) {
+ /* buf *always* points to a null terminated string */
+ erts_fprintf(stderr, "(no error logger present) %T: \"%s\" %T\n",
+ tag, buf, args);
+ return 0;
}
-#else
- erts_queue_message(p, NULL /* only used for smp build */, bp, tuple3, NIL
-#ifdef USE_VM_PROBES
- , NIL
-#endif
- );
-#endif
+
+ format = buf_to_intlist(&hp, buf, len, NIL);
+ args = copy_struct(args, args_sz, &hp, ohp);
+ tuple1 = TUPLE3(hp, am_emulator, format, args);
+ hp += 4;
+ tuple2 = TUPLE3(hp, tag, gl, tuple1);
+ hp += 4;
+ tuple3 = TUPLE2(hp, am_notify, tuple2);
+
+ do_send_logger_message(hp, ohp, bp, p, tuple3);
return 0;
}
static ERTS_INLINE int
-send_info_to_logger(Eterm gleader, char *buf, int len)
+send_info_to_logger(Eterm gleader, char *buf, int len)
{
return do_send_to_logger(am_info_msg, gleader, buf, len);
}
static ERTS_INLINE int
-send_warning_to_logger(Eterm gleader, char *buf, int len)
+send_warning_to_logger(Eterm gleader, char *buf, int len)
{
Eterm tag;
switch (erts_error_logger_warnings) {
@@ -1874,11 +2065,17 @@ send_warning_to_logger(Eterm gleader, char *buf, int len)
}
static ERTS_INLINE int
-send_error_to_logger(Eterm gleader, char *buf, int len)
+send_error_to_logger(Eterm gleader, char *buf, int len)
{
return do_send_to_logger(am_error, gleader, buf, len);
}
+static ERTS_INLINE int
+send_error_term_to_logger(Eterm gleader, char *buf, int len, Eterm args)
+{
+ return do_send_term_to_logger(am_error, gleader, buf, len, args);
+}
+
#define LOGGER_DSBUF_INC_SZ 256
static erts_dsprintf_buf_t *
@@ -1954,6 +2151,15 @@ erts_send_error_to_logger(Eterm gleader, erts_dsprintf_buf_t *dsbufp)
}
int
+erts_send_error_term_to_logger(Eterm gleader, erts_dsprintf_buf_t *dsbufp, Eterm args)
+{
+ int res;
+ res = send_error_term_to_logger(gleader, dsbufp->str, dsbufp->str_len, args);
+ destroy_logger_dsbuf(dsbufp);
+ return res;
+}
+
+int
erts_send_info_to_logger_str(Eterm gleader, char *str)
{
return send_info_to_logger(gleader, str, sys_strlen(str));
@@ -2059,11 +2265,7 @@ erts_destroy_tmp_dsbuf(erts_dsprintf_buf_t *dsbufp)
* Test for equality of two terms.
* Returns 0 if not equal, or a non-zero value otherwise.
*/
-#if HALFWORD_HEAP
-int eq_rel(Eterm a, Eterm* a_base, Eterm b, Eterm* b_base)
-#else
int eq(Eterm a, Eterm b)
-#endif
{
DECLARE_WSTACK(stack);
Sint sz;
@@ -2071,18 +2273,18 @@ int eq(Eterm a, Eterm b)
Eterm* bb;
tailrecur:
- if (is_same(a, a_base, b, b_base)) goto pop_next;
+ if (is_same(a, b)) goto pop_next;
tailrecur_ne:
switch (primary_tag(a)) {
case TAG_PRIMARY_LIST:
if (is_list(b)) {
- Eterm* aval = list_val_rel(a, a_base);
- Eterm* bval = list_val_rel(b, b_base);
+ Eterm* aval = list_val(a);
+ Eterm* bval = list_val(b);
while (1) {
Eterm atmp = CAR(aval);
Eterm btmp = CAR(bval);
- if (!is_same(atmp,a_base,btmp,b_base)) {
+ if (!is_same(atmp,btmp)) {
WSTACK_PUSH2(stack,(UWord) CDR(bval),(UWord) CDR(aval));
a = atmp;
b = btmp;
@@ -2090,7 +2292,7 @@ tailrecur_ne:
}
atmp = CDR(aval);
btmp = CDR(bval);
- if (is_same(atmp,a_base,btmp,b_base)) {
+ if (is_same(atmp,btmp)) {
goto pop_next;
}
if (is_not_list(atmp) || is_not_list(btmp)) {
@@ -2098,43 +2300,27 @@ tailrecur_ne:
b = btmp;
goto tailrecur_ne;
}
- aval = list_val_rel(atmp, a_base);
- bval = list_val_rel(btmp, b_base);
+ aval = list_val(atmp);
+ bval = list_val(btmp);
}
}
break; /* not equal */
case TAG_PRIMARY_BOXED:
- {
- Eterm hdr = *boxed_val_rel(a,a_base);
+ {
+ Eterm hdr = *boxed_val(a);
switch (hdr & _TAG_HEADER_MASK) {
case ARITYVAL_SUBTAG:
{
- aa = tuple_val_rel(a, a_base);
- if (!is_boxed(b) || *boxed_val_rel(b,b_base) != *aa)
+ aa = tuple_val(a);
+ if (!is_boxed(b) || *boxed_val(b) != *aa)
goto not_equal;
- bb = tuple_val_rel(b,b_base);
+ bb = tuple_val(b);
if ((sz = arityval(*aa)) == 0) goto pop_next;
++aa;
++bb;
goto term_array;
}
- case MAP_SUBTAG:
- {
- aa = map_val_rel(a, a_base);
- if (!is_boxed(b) || *boxed_val_rel(b,b_base) != *aa)
- goto not_equal;
- bb = map_val_rel(b,b_base);
- sz = map_get_size((map_t*)aa);
-
- if (sz != map_get_size((map_t*)bb)) goto not_equal;
- if (sz == 0) goto pop_next;
-
- aa += 2;
- bb += 2;
- sz += 1; /* increment for tuple-keys */
- goto term_array;
- }
case REFC_BINARY_SUBTAG:
case HEAP_BINARY_SUBTAG:
case SUB_BINARY_SUBTAG:
@@ -2147,17 +2333,17 @@ tailrecur_ne:
Uint b_bitsize;
Uint a_bitoffs;
Uint b_bitoffs;
-
- if (!is_binary_rel(b,b_base)) {
+
+ if (!is_binary(b)) {
goto not_equal;
}
- a_size = binary_size_rel(a,a_base);
- b_size = binary_size_rel(b,b_base);
+ a_size = binary_size(a);
+ b_size = binary_size(b);
if (a_size != b_size) {
goto not_equal;
}
- ERTS_GET_BINARY_BYTES_REL(a, a_ptr, a_bitoffs, a_bitsize, a_base);
- ERTS_GET_BINARY_BYTES_REL(b, b_ptr, b_bitoffs, b_bitsize, b_base);
+ ERTS_GET_BINARY_BYTES(a, a_ptr, a_bitoffs, a_bitsize);
+ ERTS_GET_BINARY_BYTES(b, b_ptr, b_bitoffs, b_bitsize);
if ((a_bitsize | b_bitsize | a_bitoffs | b_bitoffs) == 0) {
if (sys_memcmp(a_ptr, b_ptr, a_size) == 0) goto pop_next;
} else if (a_bitsize == b_bitsize) {
@@ -2168,9 +2354,9 @@ tailrecur_ne:
}
case EXPORT_SUBTAG:
{
- if (is_export_rel(b,b_base)) {
- Export* a_exp = *((Export **) (export_val_rel(a,a_base) + 1));
- Export* b_exp = *((Export **) (export_val_rel(b,b_base) + 1));
+ if (is_export(b)) {
+ Export* a_exp = *((Export **) (export_val(a) + 1));
+ Export* b_exp = *((Export **) (export_val(b) + 1));
if (a_exp == b_exp) goto pop_next;
}
break; /* not equal */
@@ -2179,11 +2365,11 @@ tailrecur_ne:
{
ErlFunThing* f1;
ErlFunThing* f2;
-
- if (!is_fun_rel(b,b_base))
+
+ if (!is_fun(b))
goto not_equal;
- f1 = (ErlFunThing *) fun_val_rel(a,a_base);
- f2 = (ErlFunThing *) fun_val_rel(b,b_base);
+ f1 = (ErlFunThing *) fun_val(a);
+ f2 = (ErlFunThing *) fun_val(b);
if (f1->fe->module != f2->fe->module ||
f1->fe->old_index != f2->fe->old_index ||
f1->fe->old_uniq != f2->fe->old_uniq ||
@@ -2201,16 +2387,16 @@ tailrecur_ne:
ExternalThing *ap;
ExternalThing *bp;
- if(!is_external_rel(b,b_base))
+ if(!is_external(b))
goto not_equal;
- ap = external_thing_ptr_rel(a,a_base);
- bp = external_thing_ptr_rel(b,b_base);
+ ap = external_thing_ptr(a);
+ bp = external_thing_ptr(b);
if(ap->header == bp->header && ap->node == bp->node) {
- ASSERT(1 == external_data_words_rel(a,a_base));
- ASSERT(1 == external_data_words_rel(b,b_base));
-
+ ASSERT(1 == external_data_words(a));
+ ASSERT(1 == external_data_words(b));
+
if (ap->data.ui[0] == bp->data.ui[0]) goto pop_next;
}
break; /* not equal */
@@ -2230,33 +2416,31 @@ tailrecur_ne:
ExternalThing* athing;
ExternalThing* bthing;
- if(!is_external_ref_rel(b,b_base))
+ if(!is_external_ref(b))
goto not_equal;
- athing = external_thing_ptr_rel(a,a_base);
- bthing = external_thing_ptr_rel(b,b_base);
+ athing = external_thing_ptr(a);
+ bthing = external_thing_ptr(b);
if(athing->node != bthing->node)
goto not_equal;
anum = external_thing_ref_numbers(athing);
bnum = external_thing_ref_numbers(bthing);
- alen = external_thing_ref_no_of_numbers(athing);
- blen = external_thing_ref_no_of_numbers(bthing);
+ alen = external_thing_ref_no_numbers(athing);
+ blen = external_thing_ref_no_numbers(bthing);
goto ref_common;
+
case REF_SUBTAG:
- if (!is_internal_ref_rel(b,b_base))
- goto not_equal;
- {
- RefThing* athing = ref_thing_ptr_rel(a,a_base);
- RefThing* bthing = ref_thing_ptr_rel(b,b_base);
- alen = internal_thing_ref_no_of_numbers(athing);
- blen = internal_thing_ref_no_of_numbers(bthing);
- anum = internal_thing_ref_numbers(athing);
- bnum = internal_thing_ref_numbers(bthing);
- }
+ if (!is_internal_ref(b))
+ goto not_equal;
+
+ alen = internal_ref_no_numbers(a);
+ anum = internal_ref_numbers(a);
+ blen = internal_ref_no_numbers(b);
+ bnum = internal_ref_numbers(b);
ref_common:
ASSERT(alen > 0 && blen > 0);
@@ -2267,7 +2451,7 @@ tailrecur_ne:
if (alen == 3 && blen == 3) {
/* Most refs are of length 3 */
if (anum[1] == bnum[1] && anum[2] == bnum[2]) {
- goto pop_next;
+ goto pop_next;
} else {
goto not_equal;
}
@@ -2292,7 +2476,7 @@ tailrecur_ne:
for (i = common_len; i < blen; i++)
if (bnum[i] != 0)
goto not_equal;
- }
+ }
}
goto pop_next;
}
@@ -2300,11 +2484,11 @@ tailrecur_ne:
case NEG_BIG_SUBTAG:
{
int i;
-
- if (!is_big_rel(b,b_base))
+
+ if (!is_big(b))
goto not_equal;
- aa = big_val_rel(a,a_base);
- bb = big_val_rel(b,b_base);
+ aa = big_val(a);
+ bb = big_val(b);
if (*aa != *bb)
goto not_equal;
i = BIG_ARITY(aa);
@@ -2318,14 +2502,54 @@ tailrecur_ne:
{
FloatDef af;
FloatDef bf;
-
- if (is_float_rel(b,b_base)) {
- GET_DOUBLE_REL(a, af, a_base);
- GET_DOUBLE_REL(b, bf, b_base);
+
+ if (is_float(b)) {
+ GET_DOUBLE(a, af);
+ GET_DOUBLE(b, bf);
if (af.fd == bf.fd) goto pop_next;
}
break; /* not equal */
}
+ case MAP_SUBTAG:
+ if (is_flatmap(a)) {
+ aa = flatmap_val(a);
+ if (!is_boxed(b) || *boxed_val(b) != *aa)
+ goto not_equal;
+ bb = flatmap_val(b);
+ sz = flatmap_get_size((flatmap_t*)aa);
+
+ if (sz != flatmap_get_size((flatmap_t*)bb)) goto not_equal;
+ if (sz == 0) goto pop_next;
+
+ aa += 2;
+ bb += 2;
+ sz += 1; /* increment for tuple-keys */
+ goto term_array;
+
+ } else {
+ if (!is_boxed(b) || *boxed_val(b) != hdr)
+ goto not_equal;
+
+ aa = hashmap_val(a) + 1;
+ bb = hashmap_val(b) + 1;
+ switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ aa++; bb++;
+ sz = 16;
+ break;
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ aa++; bb++;
+ case HAMT_SUBTAG_NODE_BITMAP:
+ sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ ASSERT(sz > 0 && sz < 17);
+ break;
+ default:
+ erts_exit(ERTS_ERROR_EXIT, "Unknown hashmap subsubtag\n");
+ }
+ goto term_array;
+ }
+ default:
+ ASSERT(!"Unknown boxed subtab in EQ");
}
break;
}
@@ -2340,7 +2564,7 @@ term_array: /* arrays in 'aa' and 'bb', length in 'sz' */
Eterm* bp = bb;
Sint i = sz;
for (;;) {
- if (!is_same(*ap,a_base,*bp,b_base)) break;
+ if (!is_same(*ap,*bp)) break;
if (--i == 0) goto pop_next;
++ap;
++bp;
@@ -2357,7 +2581,7 @@ term_array: /* arrays in 'aa' and 'bb', length in 'sz' */
}
goto tailrecur_ne;
}
-
+
pop_next:
if (!WSTACK_ISEMPTY(stack)) {
UWord something = WSTACK_POP(stack);
@@ -2418,7 +2642,7 @@ static int cmpbytes(byte *s1, int l1, byte *s2, int l2)
#define float_comp(x,y) (((x)<(y)) ? -1 : (((x)==(y)) ? 0 : 1))
-static int cmp_atoms(Eterm a, Eterm b)
+int erts_cmp_atoms(Eterm a, Eterm b)
{
Atom *aa = atom_tab(atom_val(a));
Atom *bb = atom_tab(atom_val(b));
@@ -2429,27 +2653,50 @@ static int cmp_atoms(Eterm a, Eterm b)
bb->name+3, bb->len-3);
}
-#if !HALFWORD_HEAP
/* cmp(Eterm a, Eterm b)
* For compatibility with HiPE - arith-based compare.
*/
Sint cmp(Eterm a, Eterm b)
{
- return erts_cmp(a, b, 0);
+ return erts_cmp(a, b, 0, 0);
}
-#endif
+
+Sint erts_cmp_compound(Eterm a, Eterm b, int exact, int eq_only);
+
+Sint erts_cmp(Eterm a, Eterm b, int exact, int eq_only)
+{
+ if (is_atom(a) && is_atom(b)) {
+ return erts_cmp_atoms(a, b);
+ } else if (is_both_small(a, b)) {
+ return (signed_val(a) - signed_val(b));
+ } else if (is_float(a) && is_float(b)) {
+ FloatDef af, bf;
+ GET_DOUBLE(a, af);
+ GET_DOUBLE(b, bf);
+ return float_comp(af.fd, bf.fd);
+ }
+ return erts_cmp_compound(a,b,exact,eq_only);
+}
+
/* erts_cmp(Eterm a, Eterm b, int exact)
* exact = 1 -> term-based compare
* exact = 0 -> arith-based compare
*/
-#if HALFWORD_HEAP
-Sint erts_cmp_rel_opt(Eterm a, Eterm* a_base, Eterm b, Eterm* b_base, int exact)
-#else
-Sint erts_cmp(Eterm a, Eterm b, int exact)
-#endif
+Sint erts_cmp_compound(Eterm a, Eterm b, int exact, int eq_only)
{
- DECLARE_WSTACK(stack);
+#define PSTACK_TYPE struct erts_cmp_hashmap_state
+ struct erts_cmp_hashmap_state {
+ Sint wstack_rollback;
+ int was_exact;
+ Eterm *ap;
+ Eterm *bp;
+ Eterm min_key;
+ Sint cmp_res; /* result so far -1,0,+1 */
+ };
+ PSTACK_DECLARE(hmap_stack, 1);
+ WSTACK_DECLARE(stack);
+ WSTACK_DECLARE(b_stack); /* only used by hashmaps */
Eterm* aa;
Eterm* bb;
int i;
@@ -2465,6 +2712,26 @@ Sint erts_cmp(Eterm a, Eterm b, int exact)
Uint32 *anum;
Uint32 *bnum;
+/* The WSTACK contains naked Eterms and Operations marked with header-tags */
+#define OP_BITS 4
+#define OP_MASK 0xF
+#define TERM_ARRAY_OP 0
+#define SWITCH_EXACT_OFF_OP 1
+#define HASHMAP_PHASE1_ARE_KEYS_EQUAL 2
+#define HASHMAP_PHASE1_IS_MIN_KEY 3
+#define HASHMAP_PHASE1_CMP_VALUES 4
+#define HASHMAP_PHASE2_ARE_KEYS_EQUAL 5
+#define HASHMAP_PHASE2_IS_MIN_KEY_A 6
+#define HASHMAP_PHASE2_IS_MIN_KEY_B 7
+
+
+#define OP_WORD(OP) (((OP) << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER)
+#define TERM_ARRAY_OP_WORD(SZ) OP_WORD(((SZ) << OP_BITS) | TERM_ARRAY_OP)
+
+#define GET_OP(WORD) (ASSERT(is_header(WORD)), ((WORD) >> _TAG_PRIMARY_SIZE) & OP_MASK)
+#define GET_OP_ARG(WORD) (ASSERT(is_header(WORD)), ((WORD) >> (OP_BITS + _TAG_PRIMARY_SIZE)))
+
+
#define RETURN_NEQ(cmp) { j=(cmp); ASSERT(j != 0); goto not_equal; }
#define ON_CMP_GOTO(cmp) if ((j=(cmp)) == 0) goto pop_next; else goto not_equal
@@ -2473,15 +2740,17 @@ Sint erts_cmp(Eterm a, Eterm b, int exact)
do { \
if((AN) != (BN)) { \
if((AN)->sysname != (BN)->sysname) \
- RETURN_NEQ(cmp_atoms((AN)->sysname, (BN)->sysname)); \
+ RETURN_NEQ(erts_cmp_atoms((AN)->sysname, (BN)->sysname)); \
ASSERT((AN)->creation != (BN)->creation); \
RETURN_NEQ(((AN)->creation < (BN)->creation) ? -1 : 1); \
} \
} while (0)
+bodyrecur:
+ j = 0;
tailrecur:
- if (is_same(a,a_base,b,b_base)) { /* Equal values or pointers. */
+ if (is_same(a,b)) { /* Equal values or pointers. */
goto pop_next;
}
tailrecur_ne:
@@ -2489,7 +2758,7 @@ tailrecur_ne:
/* deal with majority (?) cases by brute-force */
if (is_atom(a)) {
if (is_atom(b)) {
- ON_CMP_GOTO(cmp_atoms(a, b));
+ ON_CMP_GOTO(erts_cmp_atoms(a, b));
}
} else if (is_both_small(a, b)) {
ON_CMP_GOTO(signed_val(a) - signed_val(b));
@@ -2507,16 +2776,16 @@ tailrecur_ne:
if (is_internal_port(b)) {
bnode = erts_this_node;
bdata = internal_port_data(b);
- } else if (is_external_port_rel(b,b_base)) {
- bnode = external_port_node_rel(b,b_base);
- bdata = external_port_data_rel(b,b_base);
+ } else if (is_external_port(b)) {
+ bnode = external_port_node(b);
+ bdata = external_port_data(b);
} else {
a_tag = PORT_DEF;
goto mixed_types;
}
anode = erts_this_node;
adata = internal_port_data(a);
-
+
port_common:
CMP_NODES(anode, bnode);
ON_CMP_GOTO((Sint)(adata - bdata));
@@ -2525,16 +2794,16 @@ tailrecur_ne:
if (is_internal_pid(b)) {
bnode = erts_this_node;
bdata = internal_pid_data(b);
- } else if (is_external_pid_rel(b,b_base)) {
- bnode = external_pid_node_rel(b,b_base);
- bdata = external_pid_data_rel(b,b_base);
+ } else if (is_external_pid(b)) {
+ bnode = external_pid_node(b);
+ bdata = external_pid_data(b);
} else {
a_tag = PID_DEF;
goto mixed_types;
}
anode = erts_this_node;
adata = internal_pid_data(a);
-
+
pid_common:
if (adata != bdata) {
RETURN_NEQ(adata < bdata ? -1 : 1);
@@ -2560,12 +2829,12 @@ tailrecur_ne:
a_tag = LIST_DEF;
goto mixed_types;
}
- aa = list_val_rel(a,a_base);
- bb = list_val_rel(b,b_base);
+ aa = list_val(a);
+ bb = list_val(b);
while (1) {
Eterm atmp = CAR(aa);
Eterm btmp = CAR(bb);
- if (!is_same(atmp,a_base,btmp,b_base)) {
+ if (!is_same(atmp,btmp)) {
WSTACK_PUSH2(stack,(UWord) CDR(bb),(UWord) CDR(aa));
a = atmp;
b = btmp;
@@ -2573,7 +2842,7 @@ tailrecur_ne:
}
atmp = CDR(aa);
btmp = CDR(bb);
- if (is_same(atmp,a_base,btmp,b_base)) {
+ if (is_same(atmp,btmp)) {
goto pop_next;
}
if (is_not_list(atmp) || is_not_list(btmp)) {
@@ -2581,20 +2850,20 @@ tailrecur_ne:
b = btmp;
goto tailrecur_ne;
}
- aa = list_val_rel(atmp,a_base);
- bb = list_val_rel(btmp,b_base);
+ aa = list_val(atmp);
+ bb = list_val(btmp);
}
case TAG_PRIMARY_BOXED:
{
- Eterm ahdr = *boxed_val_rel(a,a_base);
+ Eterm ahdr = *boxed_val(a);
switch ((ahdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
case (_TAG_HEADER_ARITYVAL >> _TAG_PRIMARY_SIZE):
- if (!is_tuple_rel(b,b_base)) {
+ if (!is_tuple(b)) {
a_tag = TUPLE_DEF;
goto mixed_types;
}
- aa = tuple_val_rel(a,a_base);
- bb = tuple_val_rel(b,b_base);
+ aa = tuple_val(a);
+ bb = tuple_val(b);
/* compare the arities */
i = arityval(ahdr); /* get the arity*/
if (i != arityval(*bb)) {
@@ -2606,68 +2875,141 @@ tailrecur_ne:
++aa;
++bb;
goto term_array;
- case (_TAG_HEADER_MAP >> _TAG_PRIMARY_SIZE) :
- if (!is_map_rel(b,b_base)) {
- a_tag = MAP_DEF;
- goto mixed_types;
- }
- aa = (Eterm *)map_val_rel(a,a_base);
- bb = (Eterm *)map_val_rel(b,b_base);
-
- i = map_get_size((map_t*)aa);
- if (i != map_get_size((map_t*)bb)) {
- RETURN_NEQ((int)(i - map_get_size((map_t*)bb)));
- }
- if (i == 0) {
- goto pop_next;
+ case (_TAG_HEADER_MAP >> _TAG_PRIMARY_SIZE) :
+ {
+ struct erts_cmp_hashmap_state* sp;
+ if (is_flatmap_header(ahdr)) {
+ if (!is_flatmap(b)) {
+ if (is_hashmap(b)) {
+ aa = (Eterm *)flatmap_val(a);
+ i = flatmap_get_size((flatmap_t*)aa) - hashmap_size(b);
+ ASSERT(i != 0);
+ RETURN_NEQ(i);
+ }
+ a_tag = MAP_DEF;
+ goto mixed_types;
+ }
+ aa = (Eterm *)flatmap_val(a);
+ bb = (Eterm *)flatmap_val(b);
+
+ i = flatmap_get_size((flatmap_t*)aa);
+ if (i != flatmap_get_size((flatmap_t*)bb)) {
+ RETURN_NEQ((int)(i - flatmap_get_size((flatmap_t*)bb)));
+ }
+ if (i == 0) {
+ goto pop_next;
+ }
+ aa += 2;
+ bb += 2;
+ if (exact) {
+ i += 1; /* increment for tuple-keys */
+ goto term_array;
+ }
+ else {
+ /* Value array */
+ WSTACK_PUSH3(stack,(UWord)(bb+1),(UWord)(aa+1),TERM_ARRAY_OP_WORD(i));
+ /* Switch back from 'exact' key compare */
+ WSTACK_PUSH(stack,OP_WORD(SWITCH_EXACT_OFF_OP));
+ /* Now do 'exact' compare of key tuples */
+ a = *aa;
+ b = *bb;
+ exact = 1;
+ goto bodyrecur;
+ }
+ }
+ if (!is_hashmap(b)) {
+ if (is_flatmap(b)) {
+ bb = (Eterm *)flatmap_val(b);
+ i = hashmap_size(a) - flatmap_get_size((flatmap_t*)bb);
+ ASSERT(i != 0);
+ RETURN_NEQ(i);
+ }
+ a_tag = MAP_DEF;
+ goto mixed_types;
+ }
+ i = hashmap_size(a) - hashmap_size(b);
+ if (i) {
+ RETURN_NEQ(i);
+ }
+ if (hashmap_size(a) == 0) {
+ goto pop_next;
+ }
+
+ /* Hashmap compare strategy:
+ Phase 1. While keys are identical
+ Do synchronous stepping through leafs of both trees in hash
+ order. Maintain value compare result of minimal key.
+
+ Phase 2. If key diff was found in phase 1
+ Ignore values from now on.
+ Continue iterate trees by always advancing the one
+ lagging behind hash-wise. Identical keys are skipped.
+ A minimal key can only be candidate as tie-breaker if we
+ have passed that hash value in the other tree (which means
+ the key did not exist in the other tree).
+ */
+
+ sp = PSTACK_PUSH(hmap_stack);
+ hashmap_iterator_init(&stack, a, 0);
+ hashmap_iterator_init(&b_stack, b, 0);
+ sp->ap = hashmap_iterator_next(&stack);
+ sp->bp = hashmap_iterator_next(&b_stack);
+ sp->cmp_res = 0;
+ ASSERT(sp->ap && sp->bp);
+
+ a = CAR(sp->ap);
+ b = CAR(sp->bp);
+ sp->was_exact = exact;
+ exact = 1;
+ WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_ARE_KEYS_EQUAL));
+ sp->wstack_rollback = WSTACK_COUNT(stack);
+ goto bodyrecur;
}
- aa += 2;
- bb += 2;
- i += 1; /* increment for tuple-keys */
- goto term_array;
case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
- if (!is_float_rel(b,b_base)) {
+ if (!is_float(b)) {
a_tag = FLOAT_DEF;
goto mixed_types;
} else {
FloatDef af;
FloatDef bf;
- GET_DOUBLE_REL(a, af, a_base);
- GET_DOUBLE_REL(b, bf, b_base);
+ GET_DOUBLE(a, af);
+ GET_DOUBLE(b, bf);
ON_CMP_GOTO(float_comp(af.fd, bf.fd));
}
case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
- if (!is_big_rel(b,b_base)) {
+ if (!is_big(b)) {
a_tag = BIG_DEF;
goto mixed_types;
}
- ON_CMP_GOTO(big_comp(rterm2wterm(a,a_base), rterm2wterm(b,b_base)));
+ ON_CMP_GOTO(big_comp(a, b));
case (_TAG_HEADER_EXPORT >> _TAG_PRIMARY_SIZE):
- if (!is_export_rel(b,b_base)) {
+ if (!is_export(b)) {
a_tag = EXPORT_DEF;
goto mixed_types;
} else {
- Export* a_exp = *((Export **) (export_val_rel(a,a_base) + 1));
- Export* b_exp = *((Export **) (export_val_rel(b,b_base) + 1));
+ Export* a_exp = *((Export **) (export_val(a) + 1));
+ Export* b_exp = *((Export **) (export_val(b) + 1));
- if ((j = cmp_atoms(a_exp->code[0], b_exp->code[0])) != 0) {
+ if ((j = erts_cmp_atoms(a_exp->info.mfa.module,
+ b_exp->info.mfa.module)) != 0) {
RETURN_NEQ(j);
}
- if ((j = cmp_atoms(a_exp->code[1], b_exp->code[1])) != 0) {
+ if ((j = erts_cmp_atoms(a_exp->info.mfa.function,
+ b_exp->info.mfa.function)) != 0) {
RETURN_NEQ(j);
}
- ON_CMP_GOTO((Sint) a_exp->code[2] - (Sint) b_exp->code[2]);
+ ON_CMP_GOTO((Sint) a_exp->info.mfa.arity - (Sint) b_exp->info.mfa.arity);
}
break;
case (_TAG_HEADER_FUN >> _TAG_PRIMARY_SIZE):
- if (!is_fun_rel(b,b_base)) {
+ if (!is_fun(b)) {
a_tag = FUN_DEF;
goto mixed_types;
} else {
- ErlFunThing* f1 = (ErlFunThing *) fun_val_rel(a,a_base);
- ErlFunThing* f2 = (ErlFunThing *) fun_val_rel(b,b_base);
+ ErlFunThing* f1 = (ErlFunThing *) fun_val(a);
+ ErlFunThing* f2 = (ErlFunThing *) fun_val(b);
Sint diff;
diff = cmpbytes(atom_tab(atom_val(f1->fe->module))->name,
@@ -2688,7 +3030,7 @@ tailrecur_ne:
diff = f1->num_free - f2->num_free;
if (diff != 0) {
RETURN_NEQ(diff);
- }
+ }
i = f1->num_free;
if (i == 0) goto pop_next;
aa = f1->env;
@@ -2699,29 +3041,29 @@ tailrecur_ne:
if (is_internal_pid(b)) {
bnode = erts_this_node;
bdata = internal_pid_data(b);
- } else if (is_external_pid_rel(b,b_base)) {
- bnode = external_pid_node_rel(b,b_base);
- bdata = external_pid_data_rel(b,b_base);
+ } else if (is_external_pid(b)) {
+ bnode = external_pid_node(b);
+ bdata = external_pid_data(b);
} else {
a_tag = EXTERNAL_PID_DEF;
goto mixed_types;
}
- anode = external_pid_node_rel(a,a_base);
- adata = external_pid_data_rel(a,a_base);
+ anode = external_pid_node(a);
+ adata = external_pid_data(a);
goto pid_common;
case (_TAG_HEADER_EXTERNAL_PORT >> _TAG_PRIMARY_SIZE):
if (is_internal_port(b)) {
bnode = erts_this_node;
bdata = internal_port_data(b);
- } else if (is_external_port_rel(b,b_base)) {
- bnode = external_port_node_rel(b,b_base);
- bdata = external_port_data_rel(b,b_base);
+ } else if (is_external_port(b)) {
+ bnode = external_port_node(b);
+ bdata = external_port_data(b);
} else {
a_tag = EXTERNAL_PORT_DEF;
goto mixed_types;
}
- anode = external_port_node_rel(a,a_base);
- adata = external_port_data_rel(a,a_base);
+ anode = external_port_node(a);
+ adata = external_port_data(a);
goto port_common;
case (_TAG_HEADER_REF >> _TAG_PRIMARY_SIZE):
/*
@@ -2729,31 +3071,26 @@ tailrecur_ne:
* (32-bit words), *not* ref data words.
*/
-
- if (is_internal_ref_rel(b,b_base)) {
- RefThing* bthing = ref_thing_ptr_rel(b,b_base);
+ if (is_internal_ref(b)) {
bnode = erts_this_node;
- bnum = internal_thing_ref_numbers(bthing);
- blen = internal_thing_ref_no_of_numbers(bthing);
- } else if(is_external_ref_rel(b,b_base)) {
- ExternalThing* bthing = external_thing_ptr_rel(b,b_base);
+ blen = internal_ref_no_numbers(b);
+ bnum = internal_ref_numbers(b);
+ } else if(is_external_ref(b)) {
+ ExternalThing* bthing = external_thing_ptr(b);
bnode = bthing->node;
bnum = external_thing_ref_numbers(bthing);
- blen = external_thing_ref_no_of_numbers(bthing);
+ blen = external_thing_ref_no_numbers(bthing);
} else {
a_tag = REF_DEF;
goto mixed_types;
}
- {
- RefThing* athing = ref_thing_ptr_rel(a,a_base);
- anode = erts_this_node;
- anum = internal_thing_ref_numbers(athing);
- alen = internal_thing_ref_no_of_numbers(athing);
- }
-
+ anode = erts_this_node;
+ alen = internal_ref_no_numbers(a);
+ anum = internal_ref_numbers(a);
+
ref_common:
CMP_NODES(anode, bnode);
-
+
ASSERT(alen > 0 && blen > 0);
if (alen != blen) {
if (alen > blen) {
@@ -2771,43 +3108,42 @@ tailrecur_ne:
} while (alen < blen);
}
}
-
+
ASSERT(alen == blen);
for (i = (Sint) alen - 1; i >= 0; i--)
if (anum[i] != bnum[i])
RETURN_NEQ((Sint32) (anum[i] - bnum[i]));
goto pop_next;
case (_TAG_HEADER_EXTERNAL_REF >> _TAG_PRIMARY_SIZE):
- if (is_internal_ref_rel(b,b_base)) {
- RefThing* bthing = ref_thing_ptr_rel(b,b_base);
+ if (is_internal_ref(b)) {
bnode = erts_this_node;
- bnum = internal_thing_ref_numbers(bthing);
- blen = internal_thing_ref_no_of_numbers(bthing);
- } else if (is_external_ref_rel(b,b_base)) {
- ExternalThing* bthing = external_thing_ptr_rel(b,b_base);
+ blen = internal_ref_no_numbers(b);
+ bnum = internal_ref_numbers(b);
+ } else if (is_external_ref(b)) {
+ ExternalThing* bthing = external_thing_ptr(b);
bnode = bthing->node;
bnum = external_thing_ref_numbers(bthing);
- blen = external_thing_ref_no_of_numbers(bthing);
+ blen = external_thing_ref_no_numbers(bthing);
} else {
a_tag = EXTERNAL_REF_DEF;
goto mixed_types;
}
{
- ExternalThing* athing = external_thing_ptr_rel(a,a_base);
+ ExternalThing* athing = external_thing_ptr(a);
anode = athing->node;
anum = external_thing_ref_numbers(athing);
- alen = external_thing_ref_no_of_numbers(athing);
+ alen = external_thing_ref_no_numbers(athing);
}
goto ref_common;
default:
/* Must be a binary */
- ASSERT(is_binary_rel(a,a_base));
- if (!is_binary_rel(b,b_base)) {
+ ASSERT(is_binary(a));
+ if (!is_binary(b)) {
a_tag = BINARY_DEF;
goto mixed_types;
} else {
- Uint a_size = binary_size_rel(a,a_base);
- Uint b_size = binary_size_rel(b,b_base);
+ Uint a_size = binary_size(a);
+ Uint b_size = binary_size(b);
Uint a_bitsize;
Uint b_bitsize;
Uint a_bitoffs;
@@ -2816,8 +3152,8 @@ tailrecur_ne:
int cmp;
byte* a_ptr;
byte* b_ptr;
- ERTS_GET_BINARY_BYTES_REL(a, a_ptr, a_bitoffs, a_bitsize, a_base);
- ERTS_GET_BINARY_BYTES_REL(b, b_ptr, b_bitoffs, b_bitsize, b_base);
+ ERTS_GET_BINARY_BYTES(a, a_ptr, a_bitoffs, a_bitsize);
+ ERTS_GET_BINARY_BYTES(b, b_ptr, b_bitoffs, b_bitsize);
if ((a_bitsize | b_bitsize | a_bitoffs | b_bitoffs) == 0) {
min_size = (a_size < b_size) ? a_size : b_size;
if ((cmp = sys_memcmp(a_ptr, b_ptr, min_size)) != 0) {
@@ -2848,13 +3184,8 @@ tailrecur_ne:
{
FloatDef f1, f2;
Eterm big;
-#if HALFWORD_HEAP
- Wterm aw = is_immed(a) ? a : rterm2wterm(a,a_base);
- Wterm bw = is_immed(b) ? b : rterm2wterm(b,b_base);
-#else
Eterm aw = a;
Eterm bw = b;
-#endif
#define MAX_LOSSLESS_FLOAT ((double)((1LL << 53) - 2))
#define MIN_LOSSLESS_FLOAT ((double)(((1LL << 53) - 2)*-1))
#define BIG_ARITY_FLOAT_MAX (1024 / D_EXP) /* arity of max float as a bignum */
@@ -2926,7 +3257,7 @@ tailrecur_ne:
}
} else {
big = double_to_big(f2.fd, big_buf, sizeof(big_buf)/sizeof(Eterm));
- j = big_comp(aw, rterm2wterm(big,big_buf));
+ j = big_comp(aw, big);
}
if (_NUMBER_CODE(a_tag, b_tag) == FLOAT_BIG) {
j = -j;
@@ -2974,9 +3305,9 @@ term_array: /* arrays in 'aa' and 'bb', length in 'i' */
while (--i) {
a = *aa++;
b = *bb++;
- if (!is_same(a,a_base, b,b_base)) {
+ if (!is_same(a, b)) {
if (is_atom(a) && is_atom(b)) {
- if ((j = cmp_atoms(a, b)) != 0) {
+ if ((j = erts_cmp_atoms(a, b)) != 0) {
goto not_equal;
}
} else if (is_both_small(a, b)) {
@@ -2984,35 +3315,191 @@ term_array: /* arrays in 'aa' and 'bb', length in 'i' */
goto not_equal;
}
} else {
- /* (ab)Use TAG_PRIMARY_HEADER to recognize a term_array */
- WSTACK_PUSH3(stack, i, (UWord)bb, (UWord)aa | TAG_PRIMARY_HEADER);
+ WSTACK_PUSH3(stack, (UWord)bb, (UWord)aa, TERM_ARRAY_OP_WORD(i));
goto tailrecur_ne;
}
}
}
a = *aa;
b = *bb;
- goto tailrecur;
-
+ goto tailrecur;
+
pop_next:
if (!WSTACK_ISEMPTY(stack)) {
UWord something = WSTACK_POP(stack);
- if (primary_tag((Eterm) something) == TAG_PRIMARY_HEADER) { /* a term_array */
- aa = (Eterm*) something;
- bb = (Eterm*) WSTACK_POP(stack);
- i = WSTACK_POP(stack);
- goto term_array;
+ struct erts_cmp_hashmap_state* sp;
+ if (primary_tag((Eterm) something) == TAG_PRIMARY_HEADER) { /* an operation */
+ switch (GET_OP(something)) {
+ case TERM_ARRAY_OP:
+ i = GET_OP_ARG(something);
+ aa = (Eterm*)WSTACK_POP(stack);
+ bb = (Eterm*) WSTACK_POP(stack);
+ goto term_array;
+
+ case SWITCH_EXACT_OFF_OP:
+ /* Done with exact compare of map keys, switch back */
+ ASSERT(exact);
+ exact = 0;
+ goto pop_next;
+
+ case HASHMAP_PHASE1_ARE_KEYS_EQUAL: {
+ sp = PSTACK_TOP(hmap_stack);
+ if (j) {
+ /* Key diff found, enter phase 2 */
+ if (hashmap_key_hash_cmp(sp->ap, sp->bp) < 0) {
+ sp->min_key = CAR(sp->ap);
+ sp->cmp_res = -1;
+ sp->ap = hashmap_iterator_next(&stack);
+ }
+ else {
+ sp->min_key = CAR(sp->bp);
+ sp->cmp_res = 1;
+ sp->bp = hashmap_iterator_next(&b_stack);
+ }
+ exact = 1; /* only exact key compares in phase 2 */
+ goto case_HASHMAP_PHASE2_LOOP;
+ }
+
+ /* No key diff found so far, compare values if min key */
+
+ if (sp->cmp_res) {
+ a = CAR(sp->ap);
+ b = sp->min_key;
+ exact = 1;
+ WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_IS_MIN_KEY));
+ sp->wstack_rollback = WSTACK_COUNT(stack);
+ goto bodyrecur;
+ }
+ /* no min key-value found yet */
+ a = CDR(sp->ap);
+ b = CDR(sp->bp);
+ exact = sp->was_exact;
+ WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_CMP_VALUES));
+ sp->wstack_rollback = WSTACK_COUNT(stack);
+ goto bodyrecur;
+ }
+ case HASHMAP_PHASE1_IS_MIN_KEY:
+ sp = PSTACK_TOP(hmap_stack);
+ if (j < 0) {
+ a = CDR(sp->ap);
+ b = CDR(sp->bp);
+ exact = sp->was_exact;
+ WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_CMP_VALUES));
+ sp->wstack_rollback = WSTACK_COUNT(stack);
+ goto bodyrecur;
+ }
+ goto case_HASHMAP_PHASE1_LOOP;
+
+ case HASHMAP_PHASE1_CMP_VALUES:
+ sp = PSTACK_TOP(hmap_stack);
+ if (j) {
+ sp->cmp_res = j;
+ sp->min_key = CAR(sp->ap);
+ }
+ case_HASHMAP_PHASE1_LOOP:
+ sp->ap = hashmap_iterator_next(&stack);
+ sp->bp = hashmap_iterator_next(&b_stack);
+ if (!sp->ap) {
+ /* end of maps with identical keys */
+ ASSERT(!sp->bp);
+ j = sp->cmp_res;
+ exact = sp->was_exact;
+ (void) PSTACK_POP(hmap_stack);
+ ON_CMP_GOTO(j);
+ }
+ a = CAR(sp->ap);
+ b = CAR(sp->bp);
+ exact = 1;
+ WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_ARE_KEYS_EQUAL));
+ sp->wstack_rollback = WSTACK_COUNT(stack);
+ goto bodyrecur;
+
+ case_HASHMAP_PHASE2_LOOP:
+ if (sp->ap && sp->bp) {
+ a = CAR(sp->ap);
+ b = CAR(sp->bp);
+ ASSERT(exact);
+ WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE2_ARE_KEYS_EQUAL));
+ sp->wstack_rollback = WSTACK_COUNT(stack);
+ goto bodyrecur;
+ }
+ goto case_HASHMAP_PHASE2_NEXT_STEP;
+
+ case HASHMAP_PHASE2_ARE_KEYS_EQUAL:
+ sp = PSTACK_TOP(hmap_stack);
+ if (j == 0) {
+ /* keys are equal, skip them */
+ sp->ap = hashmap_iterator_next(&stack);
+ sp->bp = hashmap_iterator_next(&b_stack);
+ goto case_HASHMAP_PHASE2_LOOP;
+ }
+ /* fall through */
+ case_HASHMAP_PHASE2_NEXT_STEP:
+ if (sp->ap || sp->bp) {
+ if (hashmap_key_hash_cmp(sp->ap, sp->bp) < 0) {
+ ASSERT(sp->ap);
+ a = CAR(sp->ap);
+ b = sp->min_key;
+ ASSERT(exact);
+ WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE2_IS_MIN_KEY_A));
+ }
+ else { /* hash_cmp > 0 */
+ ASSERT(sp->bp);
+ a = CAR(sp->bp);
+ b = sp->min_key;
+ ASSERT(exact);
+ WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE2_IS_MIN_KEY_B));
+ }
+ sp->wstack_rollback = WSTACK_COUNT(stack);
+ goto bodyrecur;
+ }
+ /* End of both maps */
+ j = sp->cmp_res;
+ exact = sp->was_exact;
+ (void) PSTACK_POP(hmap_stack);
+ ON_CMP_GOTO(j);
+
+ case HASHMAP_PHASE2_IS_MIN_KEY_A:
+ sp = PSTACK_TOP(hmap_stack);
+ if (j < 0) {
+ sp->min_key = CAR(sp->ap);
+ sp->cmp_res = -1;
+ }
+ sp->ap = hashmap_iterator_next(&stack);
+ goto case_HASHMAP_PHASE2_LOOP;
+
+ case HASHMAP_PHASE2_IS_MIN_KEY_B:
+ sp = PSTACK_TOP(hmap_stack);
+ if (j < 0) {
+ sp->min_key = CAR(sp->bp);
+ sp->cmp_res = 1;
+ }
+ sp->bp = hashmap_iterator_next(&b_stack);
+ goto case_HASHMAP_PHASE2_LOOP;
+
+ default:
+ ASSERT(!"Invalid cmp op");
+ } /* switch */
}
a = (Eterm) something;
b = (Eterm) WSTACK_POP(stack);
goto tailrecur;
}
- DESTROY_WSTACK(stack);
+ ASSERT(PSTACK_IS_EMPTY(hmap_stack));
+ PSTACK_DESTROY(hmap_stack);
+ WSTACK_DESTROY(stack);
+ WSTACK_DESTROY(b_stack);
return 0;
not_equal:
- DESTROY_WSTACK(stack);
+ if (!PSTACK_IS_EMPTY(hmap_stack) && !eq_only) {
+ WSTACK_ROLLBACK(stack, PSTACK_TOP(hmap_stack)->wstack_rollback);
+ goto pop_next;
+ }
+ PSTACK_DESTROY(hmap_stack);
+ WSTACK_DESTROY(stack);
+ WSTACK_DESTROY(b_stack);
return j;
#undef CMP_NODES
@@ -3022,40 +3509,41 @@ not_equal:
Eterm
store_external_or_ref_(Uint **hpp, ErlOffHeap* oh, Eterm ns)
{
+ struct erl_off_heap_header *ohhp;
Uint i;
Uint size;
- Uint *from_hp;
- Uint *to_hp = *hpp;
+ Eterm *from_hp;
+ Eterm *to_hp = *hpp;
ASSERT(is_external(ns) || is_internal_ref(ns));
- if(is_external(ns)) {
- from_hp = external_val(ns);
- size = thing_arityval(*from_hp) + 1;
- *hpp += size;
-
- for(i = 0; i < size; i++)
- to_hp[i] = from_hp[i];
-
- erts_refc_inc(&((ExternalThing *) to_hp)->node->refc, 2);
-
- ((struct erl_off_heap_header*) to_hp)->next = oh->first;
- oh->first = (struct erl_off_heap_header*) to_hp;
-
- return make_external(to_hp);
- }
-
- /* Internal ref */
- from_hp = internal_ref_val(ns);
-
+ from_hp = boxed_val(ns);
size = thing_arityval(*from_hp) + 1;
-
*hpp += size;
for(i = 0; i < size; i++)
to_hp[i] = from_hp[i];
- return make_internal_ref(to_hp);
+ if (is_external_header(*from_hp)) {
+ ExternalThing *etp = (ExternalThing *) from_hp;
+ ASSERT(is_external(ns));
+ erts_refc_inc(&etp->node->refc, 2);
+ }
+ else if (is_ordinary_ref_thing(from_hp))
+ return make_internal_ref(to_hp);
+ else {
+ ErtsMRefThing *mreft = (ErtsMRefThing *) from_hp;
+ ErtsMagicBinary *mb = mreft->mb;
+ ASSERT(is_magic_ref_thing(from_hp));
+ erts_refc_inc(&mb->intern.refc, 2);
+ OH_OVERHEAD(oh, mb->orig_size / sizeof(Eterm));
+ }
+
+ ohhp = (struct erl_off_heap_header*) to_hp;
+ ohhp->next = oh->first;
+ oh->first = ohhp;
+
+ return make_boxed(to_hp);
}
Eterm
@@ -3072,7 +3560,7 @@ store_external_or_ref_in_proc_(Process *proc, Eterm ns)
return store_external_or_ref_(&hp, &MSO(proc), ns);
}
-void bin_write(int to, void *to_arg, byte* buf, size_t sz)
+void bin_write(fmtfn_t to, void *to_arg, byte* buf, size_t sz)
{
size_t i;
@@ -3089,31 +3577,149 @@ void bin_write(int to, void *to_arg, byte* buf, size_t sz)
}
/* Fill buf with the contents of bytelist list
- return number of chars in list or -1 for error */
-
-int
-intlist_to_buf(Eterm list, char *buf, int len)
+ * return number of chars in list
+ * or -1 for type error
+ * or -2 for not enough buffer space (buffer contains truncated result)
+ */
+Sint
+intlist_to_buf(Eterm list, char *buf, Sint len)
{
Eterm* listptr;
- int sz = 0;
+ Sint sz = 0;
- if (is_nil(list))
+ if (is_nil(list))
return 0;
if (is_not_list(list))
return -1;
listptr = list_val(list);
while (sz < len) {
- if (!is_byte(*listptr))
+ if (!is_byte(*listptr))
return -1;
buf[sz++] = unsigned_val(*listptr);
if (is_nil(*(listptr + 1)))
return(sz);
- if (is_not_list(*(listptr + 1)))
+ if (is_not_list(*(listptr + 1)))
return -1;
listptr = list_val(*(listptr + 1));
}
- return -1; /* not enough space */
+ return -2; /* not enough space */
+}
+
+/** @brief Fill buf with the UTF8 contents of the unicode list
+ * @param len Max number of characters to write.
+ * @param written NULL or bytes written.
+ * @return 0 ok,
+ * -1 type error,
+ * -2 list too long, only \c len characters written
+ */
+int
+erts_unicode_list_to_buf(Eterm list, byte *buf, Sint len, Sint* written)
+{
+ Eterm* listptr;
+ Sint sz = 0;
+ Sint val;
+ int res;
+
+ while (1) {
+ if (is_nil(list)) {
+ res = 0;
+ break;
+ }
+ if (is_not_list(list)) {
+ res = -1;
+ break;
+ }
+ listptr = list_val(list);
+
+ if (len-- <= 0) {
+ res = -2;
+ break;
+ }
+
+ if (is_not_small(CAR(listptr))) {
+ res = -1;
+ break;
+ }
+ val = signed_val(CAR(listptr));
+ if (0 <= val && val < 0x80) {
+ buf[sz] = val;
+ sz++;
+ } else if (val < 0x800) {
+ buf[sz+0] = 0xC0 | (val >> 6);
+ buf[sz+1] = 0x80 | (val & 0x3F);
+ sz += 2;
+ } else if (val < 0x10000UL) {
+ if (0xD800 <= val && val <= 0xDFFF) {
+ res = -1;
+ break;
+ }
+ buf[sz+0] = 0xE0 | (val >> 12);
+ buf[sz+1] = 0x80 | ((val >> 6) & 0x3F);
+ buf[sz+2] = 0x80 | (val & 0x3F);
+ sz += 3;
+ } else if (val < 0x110000) {
+ buf[sz+0] = 0xF0 | (val >> 18);
+ buf[sz+1] = 0x80 | ((val >> 12) & 0x3F);
+ buf[sz+2] = 0x80 | ((val >> 6) & 0x3F);
+ buf[sz+3] = 0x80 | (val & 0x3F);
+ sz += 4;
+ } else {
+ res = -1;
+ break;
+ }
+ list = CDR(listptr);
+ }
+
+ if (written)
+ *written = sz;
+ return res;
+}
+
+Sint
+erts_unicode_list_to_buf_len(Eterm list)
+{
+ Eterm* listptr;
+ Sint sz = 0;
+
+ if (is_nil(list)) {
+ return 0;
+ }
+ if (is_not_list(list)) {
+ return -1;
+ }
+ listptr = list_val(list);
+
+ while (1) {
+ Sint val;
+
+ if (is_not_small(CAR(listptr))) {
+ return -1;
+ }
+ val = signed_val(CAR(listptr));
+ if (0 <= val && val < 0x80) {
+ sz++;
+ } else if (val < 0x800) {
+ sz += 2;
+ } else if (val < 0x10000UL) {
+ if (0xD800 <= val && val <= 0xDFFF) {
+ return -1;
+ }
+ sz += 3;
+ } else if (val < 0x110000) {
+ sz += 4;
+ } else {
+ return -1;
+ }
+ list = CDR(listptr);
+ if (is_nil(list)) {
+ return sz;
+ }
+ if (is_not_list(list)) {
+ return -1;
+ }
+ listptr = list_val(list);
+ }
}
/*
@@ -3197,106 +3803,303 @@ buf_to_intlist(Eterm** hpp, const char *buf, size_t len, Eterm tail)
**
*/
-ErlDrvSizeT erts_iolist_to_buf(Eterm obj, char* buf, ErlDrvSizeT alloced_len)
+typedef enum {
+ ERTS_IL2B_BCOPY_OK,
+ ERTS_IL2B_BCOPY_YIELD,
+ ERTS_IL2B_BCOPY_OVERFLOW,
+ ERTS_IL2B_BCOPY_TYPE_ERROR
+} ErtsIL2BBCopyRes;
+
+static ErtsIL2BBCopyRes
+iolist_to_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp);
+
+static ERTS_INLINE ErlDrvSizeT
+iolist_to_buf(const int yield_support,
+ ErtsIOList2BufState *state,
+ Eterm obj,
+ char* buf,
+ ErlDrvSizeT alloced_len)
{
- ErlDrvSizeT len = (ErlDrvSizeT) alloced_len;
- Eterm* objp;
+#undef IOLIST_TO_BUF_BCOPY
+#define IOLIST_TO_BUF_BCOPY(CONSP) \
+do { \
+ size_t size = binary_size(obj); \
+ if (size > 0) { \
+ Uint bitsize; \
+ byte* bptr; \
+ Uint bitoffs; \
+ Uint num_bits; \
+ if (yield_support) { \
+ size_t max_size = ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; \
+ if (yield_count > 0) \
+ max_size *= yield_count+1; \
+ if (size > max_size) { \
+ state->objp = CONSP; \
+ goto L_bcopy_yield; \
+ } \
+ if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) { \
+ int cost = (int) size; \
+ cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; \
+ yield_count -= cost; \
+ } \
+ } \
+ if (len < size) \
+ goto L_overflow; \
+ ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize); \
+ if (bitsize != 0) \
+ goto L_type_error; \
+ num_bits = 8*size; \
+ copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits); \
+ buf += size; \
+ len -= size; \
+ } \
+} while (0)
+
+ ErlDrvSizeT res, len;
+ Eterm* objp = NULL;
+ int init_yield_count;
+ int yield_count;
DECLARE_ESTACK(s);
- goto L_again;
-
- while (!ESTACK_ISEMPTY(s)) {
- obj = ESTACK_POP(s);
- L_again:
- if (is_list(obj)) {
- L_iter_list:
- objp = list_val(obj);
- obj = CAR(objp);
- if (is_byte(obj)) {
- if (len == 0) {
- goto L_overflow;
- }
- *buf++ = unsigned_val(obj);
- len--;
- } else if (is_binary(obj)) {
- byte* bptr;
- size_t size = binary_size(obj);
- Uint bitsize;
- Uint bitoffs;
- Uint num_bits;
-
- if (len < size) {
+
+ len = (ErlDrvSizeT) alloced_len;
+
+ if (!yield_support) {
+ yield_count = init_yield_count = 0; /* Shut up faulty warning... >:-( */
+ goto L_again;
+ }
+ else {
+
+ if (state->iolist.reds_left <= 0)
+ return ERTS_IOLIST_TO_BUF_YIELD;
+
+ ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
+ init_yield_count = (ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED
+ * state->iolist.reds_left);
+ yield_count = init_yield_count;
+
+ if (!state->iolist.estack.start)
+ goto L_again;
+ else {
+ int chk_stack;
+ /* Restart; restore state... */
+ ESTACK_RESTORE(s, &state->iolist.estack);
+
+ if (!state->bcopy.bptr)
+ chk_stack = 0;
+ else {
+ chk_stack = 1;
+ switch (iolist_to_buf_bcopy(state, THE_NON_VALUE, &yield_count)) {
+ case ERTS_IL2B_BCOPY_OK:
+ break;
+ case ERTS_IL2B_BCOPY_YIELD:
+ BUMP_ALL_REDS(state->iolist.c_p);
+ state->iolist.reds_left = 0;
+ ESTACK_SAVE(s, &state->iolist.estack);
+ return ERTS_IOLIST_TO_BUF_YIELD;
+ case ERTS_IL2B_BCOPY_OVERFLOW:
goto L_overflow;
- }
- ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
- if (bitsize != 0) {
+ case ERTS_IL2B_BCOPY_TYPE_ERROR:
goto L_type_error;
}
- num_bits = 8*size;
- copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits);
- buf += size;
- len -= size;
- } else if (is_list(obj)) {
- ESTACK_PUSH(s, CDR(objp));
- goto L_iter_list; /* on head */
- } else if (is_not_nil(obj)) {
- goto L_type_error;
}
- obj = CDR(objp);
- if (is_list(obj)) {
- goto L_iter_list; /* on tail */
- } else if (is_binary(obj)) {
- byte* bptr;
- size_t size = binary_size(obj);
- Uint bitsize;
- Uint bitoffs;
- Uint num_bits;
- if (len < size) {
- goto L_overflow;
+ obj = state->iolist.obj;
+ buf = state->buf;
+ len = state->len;
+ objp = state->objp;
+ state->objp = NULL;
+ if (objp)
+ goto L_tail;
+ if (!chk_stack)
+ goto L_again;
+ /* check stack */
+ }
+ }
+
+ while (!ESTACK_ISEMPTY(s)) {
+ obj = ESTACK_POP(s);
+ L_again:
+ if (is_list(obj)) {
+ while (1) { /* Tail loop */
+ while (1) { /* Head loop */
+ if (yield_support && --yield_count <= 0)
+ goto L_yield;
+ objp = list_val(obj);
+ obj = CAR(objp);
+ if (is_byte(obj)) {
+ if (len == 0) {
+ goto L_overflow;
+ }
+ *buf++ = unsigned_val(obj);
+ len--;
+ } else if (is_binary(obj)) {
+ IOLIST_TO_BUF_BCOPY(objp);
+ } else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ continue; /* Head loop */
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ break;
}
- ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
- if (bitsize != 0) {
+
+ L_tail:
+
+ obj = CDR(objp);
+
+ if (is_list(obj)) {
+ continue; /* Tail loop */
+ } else if (is_binary(obj)) {
+ IOLIST_TO_BUF_BCOPY(NULL);
+ } else if (is_not_nil(obj)) {
goto L_type_error;
}
- num_bits = 8*size;
- copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits);
- buf += size;
- len -= size;
- } else if (is_not_nil(obj)) {
- goto L_type_error;
+ break;
}
} else if (is_binary(obj)) {
- byte* bptr;
- size_t size = binary_size(obj);
- Uint bitsize;
- Uint bitoffs;
- Uint num_bits;
- if (len < size) {
- goto L_overflow;
- }
- ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
- if (bitsize != 0) {
- goto L_type_error;
- }
- num_bits = 8*size;
- copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits);
- buf += size;
- len -= size;
+ IOLIST_TO_BUF_BCOPY(NULL);
} else if (is_not_nil(obj)) {
goto L_type_error;
- }
+ } else if (yield_support && --yield_count <= 0)
+ goto L_yield;
}
-
+
+ res = len;
+
+ L_return:
+
DESTROY_ESTACK(s);
- return len;
+
+ if (yield_support) {
+ int reds;
+ CLEAR_SAVED_ESTACK(&state->iolist.estack);
+ reds = ((init_yield_count - yield_count - 1)
+ / ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED) + 1;
+ BUMP_REDS(state->iolist.c_p, reds);
+ state->iolist.reds_left -= reds;
+ if (state->iolist.reds_left < 0)
+ state->iolist.reds_left = 0;
+ }
+
+
+ return res;
L_type_error:
- DESTROY_ESTACK(s);
- return ERTS_IOLIST_TO_BUF_TYPE_ERROR;
+ res = ERTS_IOLIST_TO_BUF_TYPE_ERROR;
+ goto L_return;
L_overflow:
- DESTROY_ESTACK(s);
- return ERTS_IOLIST_TO_BUF_OVERFLOW;
+ res = ERTS_IOLIST_TO_BUF_OVERFLOW;
+ goto L_return;
+
+ L_bcopy_yield:
+
+ state->buf = buf;
+ state->len = len;
+
+ switch (iolist_to_buf_bcopy(state, obj, &yield_count)) {
+ case ERTS_IL2B_BCOPY_OK:
+ ERTS_INTERNAL_ERROR("Missing yield");
+ case ERTS_IL2B_BCOPY_YIELD:
+ BUMP_ALL_REDS(state->iolist.c_p);
+ state->iolist.reds_left = 0;
+ ESTACK_SAVE(s, &state->iolist.estack);
+ return ERTS_IOLIST_TO_BUF_YIELD;
+ case ERTS_IL2B_BCOPY_OVERFLOW:
+ goto L_overflow;
+ case ERTS_IL2B_BCOPY_TYPE_ERROR:
+ goto L_type_error;
+ }
+
+ L_yield:
+
+ BUMP_ALL_REDS(state->iolist.c_p);
+ state->iolist.reds_left = 0;
+ state->iolist.obj = obj;
+ state->buf = buf;
+ state->len = len;
+ ESTACK_SAVE(s, &state->iolist.estack);
+ return ERTS_IOLIST_TO_BUF_YIELD;
+
+#undef IOLIST_TO_BUF_BCOPY
+}
+
+static ErtsIL2BBCopyRes
+iolist_to_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp)
+{
+ ErtsIL2BBCopyRes res;
+ char *buf = state->buf;
+ ErlDrvSizeT len = state->len;
+ byte* bptr;
+ size_t size;
+ size_t max_size;
+ Uint bitoffs;
+ Uint num_bits;
+ int yield_count = *yield_countp;
+
+ if (state->bcopy.bptr) {
+ bptr = state->bcopy.bptr;
+ size = state->bcopy.size;
+ bitoffs = state->bcopy.bitoffs;
+ state->bcopy.bptr = NULL;
+ }
+ else {
+ Uint bitsize;
+
+ ASSERT(is_binary(obj));
+
+ size = binary_size(obj);
+ if (size <= 0)
+ return ERTS_IL2B_BCOPY_OK;
+
+ if (len < size)
+ return ERTS_IL2B_BCOPY_OVERFLOW;
+
+ ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
+ if (bitsize != 0)
+ return ERTS_IL2B_BCOPY_TYPE_ERROR;
+ }
+
+ ASSERT(size > 0);
+ max_size = (size_t) ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT;
+ if (yield_count > 0)
+ max_size *= (size_t) (yield_count+1);
+
+ if (size <= max_size) {
+ if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) {
+ int cost = (int) size;
+ cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT;
+ yield_count -= cost;
+ }
+ res = ERTS_IL2B_BCOPY_OK;
+ }
+ else {
+ ASSERT(0 < max_size && max_size < size);
+ yield_count = 0;
+ state->bcopy.bptr = bptr + max_size;
+ state->bcopy.bitoffs = bitoffs;
+ state->bcopy.size = size - max_size;
+ size = max_size;
+ res = ERTS_IL2B_BCOPY_YIELD;
+ }
+
+ num_bits = 8*size;
+ copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits);
+ state->buf += size;
+ state->len -= size;
+ *yield_countp = yield_count;
+
+ return res;
+}
+
+ErlDrvSizeT erts_iolist_to_buf_yielding(ErtsIOList2BufState *state)
+{
+ return iolist_to_buf(1, state, state->iolist.obj, state->buf, state->len);
+}
+
+ErlDrvSizeT erts_iolist_to_buf(Eterm obj, char* buf, ErlDrvSizeT alloced_len)
+{
+ return iolist_to_buf(0, NULL, obj, buf, alloced_len);
}
/*
@@ -3307,11 +4110,32 @@ ErlDrvSizeT erts_iolist_to_buf(Eterm obj, char* buf, ErlDrvSizeT alloced_len)
* Any input term error detected in erts_iolist_to_buf should also
* be detected in this function!
*/
-int erts_iolist_size(Eterm obj, ErlDrvSizeT* sizep)
+
+static ERTS_INLINE int
+iolist_size(const int yield_support, ErtsIOListState *state, Eterm obj, ErlDrvSizeT* sizep)
{
+ int res, init_yield_count, yield_count;
Eterm* objp;
- Uint size = 0; /* Intentionally Uint due to halfword heap */
+ Uint size = (Uint) *sizep;
DECLARE_ESTACK(s);
+
+ if (!yield_support)
+ yield_count = init_yield_count = 0; /* Shut up faulty warning... >:-( */
+ else {
+ if (state->reds_left <= 0)
+ return ERTS_IOLIST_YIELD;
+ ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
+ init_yield_count = ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED;
+ init_yield_count *= state->reds_left;
+ yield_count = init_yield_count;
+ if (state->estack.start) {
+ /* Restart; restore state... */
+ ESTACK_RESTORE(s, &state->estack);
+ size = (Uint) state->size;
+ obj = state->obj;
+ }
+ }
+
goto L_again;
#define SAFE_ADD(Var, Val) \
@@ -3327,58 +4151,109 @@ int erts_iolist_size(Eterm obj, ErlDrvSizeT* sizep)
obj = ESTACK_POP(s);
L_again:
if (is_list(obj)) {
- L_iter_list:
- objp = list_val(obj);
- /* Head */
- obj = CAR(objp);
- if (is_byte(obj)) {
- size++;
- if (size == 0) {
- goto L_overflow_error;
+ while (1) { /* Tail loop */
+ while (1) { /* Head loop */
+ if (yield_support && --yield_count <= 0)
+ goto L_yield;
+ objp = list_val(obj);
+ /* Head */
+ obj = CAR(objp);
+ if (is_byte(obj)) {
+ size++;
+ if (size == 0) {
+ goto L_overflow_error;
+ }
+ } else if (is_binary(obj) && binary_bitsize(obj) == 0) {
+ SAFE_ADD(size, binary_size(obj));
+ } else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ continue; /* Head loop */
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ break;
}
- } else if (is_binary(obj) && binary_bitsize(obj) == 0) {
- SAFE_ADD(size, binary_size(obj));
- } else if (is_list(obj)) {
- ESTACK_PUSH(s, CDR(objp));
- goto L_iter_list; /* on head */
- } else if (is_not_nil(obj)) {
- goto L_type_error;
+ /* Tail */
+ obj = CDR(objp);
+ if (is_list(obj))
+ continue; /* Tail loop */
+ else if (is_binary(obj) && binary_bitsize(obj) == 0) {
+ SAFE_ADD(size, binary_size(obj));
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ break;
}
- /* Tail */
- obj = CDR(objp);
- if (is_list(obj))
- goto L_iter_list; /* on tail */
- else if (is_binary(obj) && binary_bitsize(obj) == 0) {
+ } else {
+ if (yield_support && --yield_count <= 0)
+ goto L_yield;
+ if (is_binary(obj) && binary_bitsize(obj) == 0) { /* Tail was binary */
SAFE_ADD(size, binary_size(obj));
} else if (is_not_nil(obj)) {
goto L_type_error;
}
- } else if (is_binary(obj) && binary_bitsize(obj) == 0) { /* Tail was binary */
- SAFE_ADD(size, binary_size(obj));
- } else if (is_not_nil(obj)) {
- goto L_type_error;
}
}
#undef SAFE_ADD
- DESTROY_ESTACK(s);
*sizep = (ErlDrvSizeT) size;
- return ERTS_IOLIST_OK;
- L_overflow_error:
+ res = ERTS_IOLIST_OK;
+
+ L_return:
+
DESTROY_ESTACK(s);
- return ERTS_IOLIST_OVERFLOW;
+
+ if (yield_support) {
+ int yc, reds;
+ CLEAR_SAVED_ESTACK(&state->estack);
+ yc = init_yield_count - yield_count;
+ reds = ((yc - 1) / ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED) + 1;
+ BUMP_REDS(state->c_p, reds);
+ state->reds_left -= reds;
+ state->size = (ErlDrvSizeT) size;
+ state->have_size = 1;
+ }
+
+ return res;
+
+ L_overflow_error:
+ res = ERTS_IOLIST_OVERFLOW;
+ size = 0;
+ goto L_return;
L_type_error:
- DESTROY_ESTACK(s);
- return ERTS_IOLIST_TYPE;
+ res = ERTS_IOLIST_TYPE;
+ size = 0;
+ goto L_return;
+
+ L_yield:
+ BUMP_ALL_REDS(state->c_p);
+ state->reds_left = 0;
+ state->size = size;
+ state->obj = obj;
+ ESTACK_SAVE(s, &state->estack);
+ return ERTS_IOLIST_YIELD;
}
-/* return 0 if item is not a non-empty flat list of bytes */
-int
+int erts_iolist_size_yielding(ErtsIOListState *state)
+{
+ ErlDrvSizeT size = state->size;
+ return iolist_size(1, state, state->obj, &size);
+}
+
+int erts_iolist_size(Eterm obj, ErlDrvSizeT* sizep)
+{
+ *sizep = 0;
+ return iolist_size(0, NULL, obj, sizep);
+}
+
+/* return 0 if item is not a non-empty flat list of bytes
+ otherwise return the nonzero length of the list */
+Sint
is_string(Eterm list)
{
- int len = 0;
+ Sint len = 0;
while(is_list(list)) {
Eterm* consp = list_val(list);
@@ -3394,145 +4269,6 @@ is_string(Eterm list)
return 0;
}
-#ifdef ERTS_SMP
-
-/*
- * Process and Port timers in smp case
- */
-
-ERTS_SCHED_PREF_PRE_ALLOC_IMPL(ptimer_pre, ErtsSmpPTimer, 1000)
-
-#define ERTS_PTMR_FLGS_ALLCD_SIZE \
- 2
-#define ERTS_PTMR_FLGS_ALLCD_MASK \
- ((((Uint32) 1) << ERTS_PTMR_FLGS_ALLCD_SIZE) - 1)
-
-#define ERTS_PTMR_FLGS_PREALLCD ((Uint32) 1)
-#define ERTS_PTMR_FLGS_SLALLCD ((Uint32) 2)
-#define ERTS_PTMR_FLGS_LLALLCD ((Uint32) 3)
-#define ERTS_PTMR_FLG_CANCELLED (((Uint32) 1) << (ERTS_PTMR_FLGS_ALLCD_SIZE+0))
-
-static void
-init_ptimers(void)
-{
- init_ptimer_pre_alloc();
-}
-
-static ERTS_INLINE void
-free_ptimer(ErtsSmpPTimer *ptimer)
-{
- switch (ptimer->timer.flags & ERTS_PTMR_FLGS_ALLCD_MASK) {
- case ERTS_PTMR_FLGS_PREALLCD:
- (void) ptimer_pre_free(ptimer);
- break;
- case ERTS_PTMR_FLGS_SLALLCD:
- erts_free(ERTS_ALC_T_SL_PTIMER, (void *) ptimer);
- break;
- case ERTS_PTMR_FLGS_LLALLCD:
- erts_free(ERTS_ALC_T_LL_PTIMER, (void *) ptimer);
- break;
- default:
- erl_exit(ERTS_ABORT_EXIT,
- "Internal error: Bad ptimer alloc type\n");
- break;
- }
-}
-
-/* Callback for process timeout cancelled */
-static void
-ptimer_cancelled(ErtsSmpPTimer *ptimer)
-{
- free_ptimer(ptimer);
-}
-
-/* Callback for process timeout */
-static void
-ptimer_timeout(ErtsSmpPTimer *ptimer)
-{
- if (is_internal_pid(ptimer->timer.id)) {
- Process *p;
- p = erts_pid2proc_opt(NULL,
- 0,
- ptimer->timer.id,
- ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS,
- ERTS_P2P_FLG_ALLOW_OTHER_X);
- if (p) {
- if (!ERTS_PROC_IS_EXITING(p)
- && !(ptimer->timer.flags & ERTS_PTMR_FLG_CANCELLED)) {
- ASSERT(*ptimer->timer.timer_ref == ptimer);
- *ptimer->timer.timer_ref = NULL;
- (*ptimer->timer.timeout_func)(p);
- }
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
- }
- }
- else {
- Port *p;
- ASSERT(is_internal_port(ptimer->timer.id));
- p = erts_id2port_sflgs(ptimer->timer.id,
- NULL,
- 0,
- ERTS_PORT_SFLGS_DEAD);
- if (p) {
- if (!(ptimer->timer.flags & ERTS_PTMR_FLG_CANCELLED)) {
- ASSERT(*ptimer->timer.timer_ref == ptimer);
- *ptimer->timer.timer_ref = NULL;
- (*ptimer->timer.timeout_func)(p);
- }
- erts_port_release(p);
- }
- }
- free_ptimer(ptimer);
-}
-
-void
-erts_create_smp_ptimer(ErtsSmpPTimer **timer_ref,
- Eterm id,
- ErlTimeoutProc timeout_func,
- Uint timeout)
-{
- ErtsSmpPTimer *res = ptimer_pre_alloc();
- if (res)
- res->timer.flags = ERTS_PTMR_FLGS_PREALLCD;
- else {
- if (timeout < ERTS_ALC_MIN_LONG_LIVED_TIME) {
- res = erts_alloc(ERTS_ALC_T_SL_PTIMER, sizeof(ErtsSmpPTimer));
- res->timer.flags = ERTS_PTMR_FLGS_SLALLCD;
- }
- else {
- res = erts_alloc(ERTS_ALC_T_LL_PTIMER, sizeof(ErtsSmpPTimer));
- res->timer.flags = ERTS_PTMR_FLGS_LLALLCD;
- }
- }
- res->timer.timeout_func = timeout_func;
- res->timer.timer_ref = timer_ref;
- res->timer.id = id;
- res->timer.tm.active = 0; /* MUST be initalized */
-
- ASSERT(!*timer_ref);
-
- *timer_ref = res;
-
- erts_set_timer(&res->timer.tm,
- (ErlTimeoutProc) ptimer_timeout,
- (ErlCancelProc) ptimer_cancelled,
- (void*) res,
- timeout);
-}
-
-void
-erts_cancel_smp_ptimer(ErtsSmpPTimer *ptimer)
-{
- if (ptimer) {
- ASSERT(*ptimer->timer.timer_ref == ptimer);
- *ptimer->timer.timer_ref = NULL;
- ptimer->timer.flags |= ERTS_PTMR_FLG_CANCELLED;
- erts_cancel_timer(&ptimer->timer.tm);
- }
-}
-
-#endif
-
static int trim_threshold;
static int top_pad;
static int mmap_threshold;
@@ -3542,9 +4278,7 @@ Uint tot_bin_allocated;
void erts_init_utils(void)
{
-#ifdef ERTS_SMP
- init_ptimers();
-#endif
+
}
void erts_init_utils_mem(void)
@@ -3680,6 +4414,9 @@ erts_save_emu_args(int argc, char **argv)
size += sz+1;
}
ptr = (char *) malloc(size);
+ if (!ptr) {
+ ERTS_INTERNAL_ERROR("malloc failed to allocate memory!");
+ }
#ifdef DEBUG
end_ptr = ptr + size;
#endif
@@ -3944,89 +4681,19 @@ void erts_silence_warn_unused_result(long unused)
void
erts_interval_init(erts_interval_t *icp)
{
-#ifdef ARCH_64
- erts_atomic_init_nob(&icp->counter.atomic, 0);
-#else
- erts_dw_aint_t dw;
-#ifdef ETHR_SU_DW_NAINT_T__
- dw.dw_sint = 0;
-#else
- dw.sint[ERTS_DW_AINT_HIGH_WORD] = 0;
- dw.sint[ERTS_DW_AINT_LOW_WORD] = 0;
-#endif
- erts_dw_atomic_init_nob(&icp->counter.atomic, &dw);
-
-#endif
-#ifdef DEBUG
- icp->smp_api = 0;
-#endif
-}
-
-void
-erts_smp_interval_init(erts_interval_t *icp)
-{
-#ifdef ERTS_SMP
- erts_interval_init(icp);
-#else
- icp->counter.not_atomic = 0;
-#endif
-#ifdef DEBUG
- icp->smp_api = 1;
-#endif
+ erts_atomic64_init_nob(&icp->counter.atomic, 0);
}
static ERTS_INLINE Uint64
step_interval_nob(erts_interval_t *icp)
{
-#ifdef ARCH_64
- return (Uint64) erts_atomic_inc_read_nob(&icp->counter.atomic);
-#else
- erts_dw_aint_t exp;
-
- erts_dw_atomic_read_nob(&icp->counter.atomic, &exp);
- while (1) {
- erts_dw_aint_t new = exp;
-
-#ifdef ETHR_SU_DW_NAINT_T__
- new.dw_sint++;
-#else
- new.sint[ERTS_DW_AINT_LOW_WORD]++;
- if (new.sint[ERTS_DW_AINT_LOW_WORD] == 0)
- new.sint[ERTS_DW_AINT_HIGH_WORD]++;
-#endif
-
- if (erts_dw_atomic_cmpxchg_nob(&icp->counter.atomic, &new, &exp))
- return erts_interval_dw_aint_to_val__(&new);
-
- }
-#endif
+ return (Uint64) erts_atomic64_inc_read_nob(&icp->counter.atomic);
}
static ERTS_INLINE Uint64
step_interval_relb(erts_interval_t *icp)
{
-#ifdef ARCH_64
- return (Uint64) erts_atomic_inc_read_relb(&icp->counter.atomic);
-#else
- erts_dw_aint_t exp;
-
- erts_dw_atomic_read_nob(&icp->counter.atomic, &exp);
- while (1) {
- erts_dw_aint_t new = exp;
-
-#ifdef ETHR_SU_DW_NAINT_T__
- new.dw_sint++;
-#else
- new.sint[ERTS_DW_AINT_LOW_WORD]++;
- if (new.sint[ERTS_DW_AINT_LOW_WORD] == 0)
- new.sint[ERTS_DW_AINT_HIGH_WORD]++;
-#endif
-
- if (erts_dw_atomic_cmpxchg_relb(&icp->counter.atomic, &new, &exp))
- return erts_interval_dw_aint_to_val__(&new);
-
- }
-#endif
+ return (Uint64) erts_atomic64_inc_read_relb(&icp->counter.atomic);
}
@@ -4034,38 +4701,10 @@ static ERTS_INLINE Uint64
ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic)
{
Uint64 curr_ic;
-#ifdef ARCH_64
- curr_ic = (Uint64) erts_atomic_read_nob(&icp->counter.atomic);
+ curr_ic = (Uint64) erts_atomic64_read_nob(&icp->counter.atomic);
if (curr_ic > ic)
return curr_ic;
- return (Uint64) erts_atomic_inc_read_nob(&icp->counter.atomic);
-#else
- erts_dw_aint_t exp;
-
- erts_dw_atomic_read_nob(&icp->counter.atomic, &exp);
- curr_ic = erts_interval_dw_aint_to_val__(&exp);
- if (curr_ic > ic)
- return curr_ic;
-
- while (1) {
- erts_dw_aint_t new = exp;
-
-#ifdef ETHR_SU_DW_NAINT_T__
- new.dw_sint++;
-#else
- new.sint[ERTS_DW_AINT_LOW_WORD]++;
- if (new.sint[ERTS_DW_AINT_LOW_WORD] == 0)
- new.sint[ERTS_DW_AINT_HIGH_WORD]++;
-#endif
-
- if (erts_dw_atomic_cmpxchg_nob(&icp->counter.atomic, &new, &exp))
- return erts_interval_dw_aint_to_val__(&new);
-
- curr_ic = erts_interval_dw_aint_to_val__(&exp);
- if (curr_ic > ic)
- return curr_ic;
- }
-#endif
+ return (Uint64) erts_atomic64_inc_read_nob(&icp->counter.atomic);
}
@@ -4073,126 +4712,44 @@ static ERTS_INLINE Uint64
ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic)
{
Uint64 curr_ic;
-#ifdef ARCH_64
- curr_ic = (Uint64) erts_atomic_read_acqb(&icp->counter.atomic);
- if (curr_ic > ic)
- return curr_ic;
- return (Uint64) erts_atomic_inc_read_acqb(&icp->counter.atomic);
-#else
- erts_dw_aint_t exp;
-
- erts_dw_atomic_read_acqb(&icp->counter.atomic, &exp);
- curr_ic = erts_interval_dw_aint_to_val__(&exp);
+ curr_ic = (Uint64) erts_atomic64_read_acqb(&icp->counter.atomic);
if (curr_ic > ic)
return curr_ic;
-
- while (1) {
- erts_dw_aint_t new = exp;
-
-#ifdef ETHR_SU_DW_NAINT_T__
- new.dw_sint++;
-#else
- new.sint[ERTS_DW_AINT_LOW_WORD]++;
- if (new.sint[ERTS_DW_AINT_LOW_WORD] == 0)
- new.sint[ERTS_DW_AINT_HIGH_WORD]++;
-#endif
-
- if (erts_dw_atomic_cmpxchg_acqb(&icp->counter.atomic, &new, &exp))
- return erts_interval_dw_aint_to_val__(&new);
-
- curr_ic = erts_interval_dw_aint_to_val__(&exp);
- if (curr_ic > ic)
- return curr_ic;
- }
-#endif
+ return (Uint64) erts_atomic64_inc_read_acqb(&icp->counter.atomic);
}
Uint64
erts_step_interval_nob(erts_interval_t *icp)
{
- ASSERT(!icp->smp_api);
return step_interval_nob(icp);
}
Uint64
erts_step_interval_relb(erts_interval_t *icp)
{
- ASSERT(!icp->smp_api);
return step_interval_relb(icp);
}
Uint64
-erts_smp_step_interval_nob(erts_interval_t *icp)
-{
- ASSERT(icp->smp_api);
-#ifdef ERTS_SMP
- return step_interval_nob(icp);
-#else
- return ++icp->counter.not_atomic;
-#endif
-}
-
-Uint64
-erts_smp_step_interval_relb(erts_interval_t *icp)
-{
- ASSERT(icp->smp_api);
-#ifdef ERTS_SMP
- return step_interval_relb(icp);
-#else
- return ++icp->counter.not_atomic;
-#endif
-}
-
-Uint64
erts_ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic)
{
- ASSERT(!icp->smp_api);
return ensure_later_interval_nob(icp, ic);
}
Uint64
erts_ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic)
{
- ASSERT(!icp->smp_api);
return ensure_later_interval_acqb(icp, ic);
}
-Uint64
-erts_smp_ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic)
-{
- ASSERT(icp->smp_api);
-#ifdef ERTS_SMP
- return ensure_later_interval_nob(icp, ic);
-#else
- if (icp->counter.not_atomic > ic)
- return icp->counter.not_atomic;
- else
- return ++icp->counter.not_atomic;
-#endif
-}
-
-Uint64
-erts_smp_ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic)
-{
- ASSERT(icp->smp_api);
-#ifdef ERTS_SMP
- return ensure_later_interval_acqb(icp, ic);
-#else
- if (icp->counter.not_atomic > ic)
- return icp->counter.not_atomic;
- else
- return ++icp->counter.not_atomic;
-#endif
-}
-
/*
* A millisecond timestamp without time correction where there's no hrtime
* - for tracing on "long" things...
*/
Uint64 erts_timestamp_millis(void)
{
-#ifdef HAVE_GETHRTIME
- return (Uint64) (sys_gethrtime() / 1000000);
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ return ERTS_MONOTONIC_TO_MSEC(erts_os_monotonic_time());
#else
Uint64 res;
SysTimeval tv;
@@ -4203,6 +4760,53 @@ Uint64 erts_timestamp_millis(void)
#endif
}
+void *
+erts_calc_stacklimit(char *prev_c, UWord stacksize)
+{
+ /*
+ * We *don't* want this function inlined, i.e., it is
+ * risky to call this function from another function
+ * in utils.c
+ */
+
+ UWord pagesize = erts_sys_get_page_size();
+ char c;
+ char *start;
+ if (&c > prev_c) {
+ start = (char *) ((((UWord) prev_c) / pagesize) * pagesize);
+ return (void *) (start + stacksize);
+ }
+ else {
+ start = (char *) (((((UWord) prev_c) - 1) / pagesize + 1) * pagesize);
+ return (void *) (start - stacksize);
+ }
+}
+
+/*
+ * erts_check_below_limit() and
+ * erts_check_above_limit() are put
+ * in utils.c in order to prevent
+ * inlining.
+ */
+
+int
+erts_check_below_limit(char *ptr, char *limit)
+{
+ return ptr < limit;
+}
+
+int
+erts_check_above_limit(char *ptr, char *limit)
+{
+ return ptr > limit;
+}
+
+void *
+erts_ptr_id(void *ptr)
+{
+ return ptr;
+}
+
#ifdef DEBUG
/*
* Handy functions when using a debugger - don't use in the code!
@@ -4232,7 +4836,7 @@ Process *p;
if(p)
print_process_info(ERTS_PRINT_STDERR, NULL, p);
}
-
+
void ppi(Eterm pid)
{
pp(erts_proc_lookup(pid));
@@ -4258,5 +4862,3 @@ ps(Process* p, Eterm* stop)
}
}
#endif
-
-
diff --git a/erts/emulator/beam/version.h b/erts/emulator/beam/version.h
index 3952c751b7..0fa775fe8c 100644
--- a/erts/emulator/beam/version.h
+++ b/erts/emulator/beam/version.h
@@ -1,18 +1,19 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2016. All Rights Reserved.
*
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
* %CopyrightEnd%
*/