aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/atom.c2
-rw-r--r--erts/emulator/beam/atom.names5
-rw-r--r--erts/emulator/beam/beam_bif_load.c59
-rw-r--r--erts/emulator/beam/beam_emu.c39
-rw-r--r--erts/emulator/beam/bif.c192
-rw-r--r--erts/emulator/beam/bif.tab41
-rw-r--r--erts/emulator/beam/big.c141
-rw-r--r--erts/emulator/beam/big.h14
-rw-r--r--erts/emulator/beam/binary.c112
-rw-r--r--erts/emulator/beam/break.c41
-rw-r--r--erts/emulator/beam/copy.c10
-rw-r--r--erts/emulator/beam/erl_alloc.c3
-rw-r--r--erts/emulator/beam/erl_alloc.types6
-rw-r--r--erts/emulator/beam/erl_alloc_util.c4
-rw-r--r--erts/emulator/beam/erl_ao_firstfit_alloc.c89
-rw-r--r--erts/emulator/beam/erl_async.c2
-rw-r--r--erts/emulator/beam/erl_bestfit_alloc.c45
-rw-r--r--erts/emulator/beam/erl_bif_atomics.c256
-rw-r--r--erts/emulator/beam/erl_bif_binary.c18
-rw-r--r--erts/emulator/beam/erl_bif_counters.c255
-rw-r--r--erts/emulator/beam/erl_bif_info.c5
-rw-r--r--erts/emulator/beam/erl_bif_lists.c838
-rw-r--r--erts/emulator/beam/erl_bif_persistent.c1000
-rw-r--r--erts/emulator/beam/erl_bif_port.c16
-rw-r--r--erts/emulator/beam/erl_bif_unique.h6
-rw-r--r--erts/emulator/beam/erl_db_tree.c12
-rw-r--r--erts/emulator/beam/erl_db_util.c2
-rw-r--r--erts/emulator/beam/erl_dirty_bif.tab2
-rw-r--r--erts/emulator/beam/erl_gc.c57
-rw-r--r--erts/emulator/beam/erl_hl_timer.c417
-rw-r--r--erts/emulator/beam/erl_init.c122
-rw-r--r--erts/emulator/beam/erl_lock_check.c2
-rw-r--r--erts/emulator/beam/erl_map.c2
-rw-r--r--erts/emulator/beam/erl_monitor_link.h15
-rw-r--r--erts/emulator/beam/erl_nif.c13
-rw-r--r--erts/emulator/beam/erl_node_tables.c21
-rw-r--r--erts/emulator/beam/erl_port.h4
-rw-r--r--erts/emulator/beam/erl_port_task.c81
-rw-r--r--erts/emulator/beam/erl_port_task.h21
-rw-r--r--erts/emulator/beam/erl_printf_term.c4
-rw-r--r--erts/emulator/beam/erl_proc_sig_queue.c1
-rw-r--r--erts/emulator/beam/erl_proc_sig_queue.h3
-rw-r--r--erts/emulator/beam/erl_process.c355
-rw-r--r--erts/emulator/beam/erl_process.h4
-rw-r--r--erts/emulator/beam/erl_process_dump.c40
-rw-r--r--erts/emulator/beam/erl_sched_spec_pre_alloc.h9
-rw-r--r--erts/emulator/beam/erl_thr_progress.c34
-rw-r--r--erts/emulator/beam/erl_thr_progress.h29
-rw-r--r--erts/emulator/beam/erl_trace.c59
-rw-r--r--erts/emulator/beam/erl_trace.h2
-rw-r--r--erts/emulator/beam/erl_utils.h61
-rw-r--r--erts/emulator/beam/erl_vm.h2
-rw-r--r--erts/emulator/beam/external.c76
-rw-r--r--erts/emulator/beam/global.h25
-rw-r--r--erts/emulator/beam/io.c2
-rw-r--r--erts/emulator/beam/sys.h12
-rw-r--r--erts/emulator/beam/utils.c176
57 files changed, 3630 insertions, 1234 deletions
diff --git a/erts/emulator/beam/atom.c b/erts/emulator/beam/atom.c
index 5381611fab..59b51fd15e 100644
--- a/erts/emulator/beam/atom.c
+++ b/erts/emulator/beam/atom.c
@@ -174,7 +174,7 @@ atom_alloc(Atom* tmpl)
/*
* Precompute ordinal value of first 3 bytes + 7 bits.
- * This is used by utils.c:erts_cmp_atoms().
+ * This is used by erl_utils.h:erts_cmp_atoms().
* We cannot use the full 32 bits of the first 4 bytes,
* since we use the sign of the difference between two
* ordinal values to represent their relative order.
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 45b7540aeb..291bc95604 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -182,6 +182,7 @@ atom control
atom copy
atom copy_literals
atom counters
+atom count
atom cpu
atom cpu_timestamp
atom cr
@@ -287,6 +288,7 @@ atom gc_minor_end
atom gc_minor_start
atom Ge='>='
atom generational
+atom get_all_trap
atom get_seq_token
atom get_tcw
atom gather_gc_info_result
@@ -325,6 +327,7 @@ atom index
atom infinity
atom info
atom info_msg
+atom info_trap
atom init
atom initial_call
atom input
@@ -393,6 +396,7 @@ atom microsecond
atom microstate_accounting
atom milli_seconds
atom millisecond
+atom min
atom min_heap_size
atom min_bin_vheap_size
atom minor
@@ -542,6 +546,7 @@ atom reload
atom rem
atom report_errors
atom reset
+atom reset_seq_trace
atom restart
atom return_from
atom return_to
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index d221e6aea6..bb1b2e5b27 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -1752,29 +1752,7 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
finalize_purge_operation(BIF_P, ret == am_true);
if (literals) {
- ErtsLiteralAreaRef *ref;
- ErtsMessage *mp;
- ref = erts_alloc(ERTS_ALC_T_LITERAL_REF,
- sizeof(ErtsLiteralAreaRef));
- ref->literal_area = literals;
- ref->next = NULL;
- erts_mtx_lock(&release_literal_areas.mtx);
- if (release_literal_areas.last) {
- release_literal_areas.last->next = ref;
- release_literal_areas.last = ref;
- }
- else {
- release_literal_areas.first = ref;
- release_literal_areas.last = ref;
- }
- erts_mtx_unlock(&release_literal_areas.mtx);
- mp = erts_alloc_message(0, NULL);
- ERL_MESSAGE_TOKEN(mp) = am_undefined;
- erts_queue_proc_message(BIF_P,
- erts_literal_area_collector,
- 0,
- mp,
- am_copy_literals);
+ erts_queue_release_literals(BIF_P, literals);
}
return ret;
@@ -1786,6 +1764,41 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
}
}
+void
+erts_queue_release_literals(Process* c_p, ErtsLiteralArea* literals)
+{
+ ErtsLiteralAreaRef *ref;
+ ErtsMessage *mp;
+ ref = erts_alloc(ERTS_ALC_T_LITERAL_REF,
+ sizeof(ErtsLiteralAreaRef));
+ ref->literal_area = literals;
+ ref->next = NULL;
+ erts_mtx_lock(&release_literal_areas.mtx);
+ if (release_literal_areas.last) {
+ release_literal_areas.last->next = ref;
+ release_literal_areas.last = ref;
+ } else {
+ release_literal_areas.first = ref;
+ release_literal_areas.last = ref;
+ }
+ erts_mtx_unlock(&release_literal_areas.mtx);
+ mp = erts_alloc_message(0, NULL);
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ if (c_p == NULL) {
+ erts_queue_message(erts_literal_area_collector,
+ 0,
+ mp,
+ am_copy_literals,
+ am_system);
+ } else {
+ erts_queue_proc_message(c_p,
+ erts_literal_area_collector,
+ 0,
+ mp,
+ am_copy_literals);
+ }
+}
+
/*
* Move code from current to old and null all export entries for the module
*/
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index ab5920a67e..4351dda5a7 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -379,6 +379,7 @@ do { \
# define NOINLINE
#endif
+int tuple_module_apply;
/*
* The following functions are called directly by process_main().
@@ -579,6 +580,7 @@ init_emulator(void)
* the instructions' C labels to the loader.
* The second call starts execution of BEAM code. This call never returns.
*/
+ERTS_NO_RETPOLINE
void process_main(Eterm * x_reg_array, FloatDef* f_reg_array)
{
static int init_done = 0;
@@ -2209,6 +2211,7 @@ apply(Process* p, Eterm* reg, BeamInstr *I, Uint stack_offset)
Eterm module = reg[0];
Eterm function = reg[1];
Eterm args = reg[2];
+ Eterm this;
/*
* Check the arguments which should be of the form apply(Module,
@@ -2231,8 +2234,20 @@ apply(Process* p, Eterm* reg, BeamInstr *I, Uint stack_offset)
while (1) {
Eterm m, f, a;
-
- if (is_not_atom(module)) goto error;
+ /* The module argument may be either an atom or an abstract module
+ * (currently implemented using tuples, but this might change).
+ */
+ this = THE_NON_VALUE;
+ if (is_not_atom(module)) {
+ Eterm* tp;
+
+ if (!tuple_module_apply || is_not_tuple(module)) goto error;
+ tp = tuple_val(module);
+ if (arityval(tp[0]) < 1) goto error;
+ this = module;
+ module = tp[1];
+ if (is_not_atom(module)) goto error;
+ }
if (module != am_erlang || function != am_apply)
break;
@@ -2267,7 +2282,9 @@ apply(Process* p, Eterm* reg, BeamInstr *I, Uint stack_offset)
}
/*
* Walk down the 3rd parameter of apply (the argument list) and copy
- * the parameters to the x registers (reg[]).
+ * the parameters to the x registers (reg[]). If the module argument
+ * was an abstract module, add 1 to the function arity and put the
+ * module argument in the n+1st x register as a THIS reference.
*/
tmp = args;
@@ -2284,6 +2301,9 @@ apply(Process* p, Eterm* reg, BeamInstr *I, Uint stack_offset)
if (is_not_nil(tmp)) { /* Must be well-formed list */
goto error;
}
+ if (this != THE_NON_VALUE) {
+ reg[arity++] = this;
+ }
/*
* Get the index into the export table, or failing that the export
@@ -2322,7 +2342,18 @@ fixed_apply(Process* p, Eterm* reg, Uint arity,
return 0;
}
- if (is_not_atom(module)) goto error;
+ /* The module argument may be either an atom or an abstract module
+ * (currently implemented using tuples, but this might change).
+ */
+ if (is_not_atom(module)) {
+ Eterm* tp;
+ if (!tuple_module_apply || is_not_tuple(module)) goto error;
+ tp = tuple_val(module);
+ if (arityval(tp[0]) < 1) goto error;
+ module = tp[1];
+ if (is_not_atom(module)) goto error;
+ ++arity;
+ }
/* Handle apply of apply/3... */
if (module == am_erlang && function == am_apply && arity == 3) {
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index f18af8bcd7..000397e790 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -2810,38 +2810,110 @@ BIF_RETTYPE list_to_existing_atom_1(BIF_ALIST_1)
/* convert an integer to a list of ascii integers */
-BIF_RETTYPE integer_to_list_1(BIF_ALIST_1)
+static Eterm integer_to_list(Process *c_p, Eterm num, int base)
{
- Eterm* hp;
+ Eterm *hp;
+ Eterm res;
Uint need;
+ if (is_small(num)) {
+ char s[128];
+ char *c = s;
+ Uint digits;
+
+ digits = Sint_to_buf(signed_val(num), base, &c, sizeof(s));
+ need = 2 * digits;
+
+ hp = HAlloc(c_p, need);
+ res = buf_to_intlist(&hp, c, digits, NIL);
+ } else {
+ const int DIGITS_PER_RED = 16;
+ Eterm *hp_end;
+ Uint digits;
+
+ digits = big_integer_estimate(num, base);
+
+ if ((digits / DIGITS_PER_RED) > ERTS_BIF_REDS_LEFT(c_p)) {
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+
+ /* This could take a very long time, tell the caller to reschedule
+ * us to a dirty CPU scheduler if we aren't already on one. */
+ if (esdp->type == ERTS_SCHED_NORMAL) {
+ return THE_NON_VALUE;
+ }
+ } else {
+ BUMP_REDS(c_p, digits / DIGITS_PER_RED);
+ }
+
+ need = 2 * digits;
+
+ hp = HAlloc(c_p, need);
+ hp_end = hp + need;
+
+ res = erts_big_to_list(num, base, &hp);
+ HRelease(c_p, hp_end, hp);
+ }
+
+ return res;
+}
+
+BIF_RETTYPE integer_to_list_1(BIF_ALIST_1)
+{
+ Eterm res;
+
if (is_not_integer(BIF_ARG_1)) {
- BIF_ERROR(BIF_P, BADARG);
+ BIF_ERROR(BIF_P, BADARG);
}
- if (is_small(BIF_ARG_1)) {
- char *c;
- int n;
- struct Sint_buf ibuf;
+ res = integer_to_list(BIF_P, BIF_ARG_1, 10);
- c = Sint_to_buf(signed_val(BIF_ARG_1), &ibuf);
- n = sys_strlen(c);
- need = 2*n;
- hp = HAlloc(BIF_P, need);
- BIF_RET(buf_to_intlist(&hp, c, n, NIL));
+ if (is_non_value(res)) {
+ Eterm args[1];
+ args[0] = BIF_ARG_1;
+ return erts_schedule_bif(BIF_P,
+ args,
+ BIF_I,
+ integer_to_list_1,
+ ERTS_SCHED_DIRTY_CPU,
+ am_erlang,
+ am_integer_to_list,
+ 1);
}
- else {
- int n = big_decimal_estimate(BIF_ARG_1);
- Eterm res;
- Eterm* hp_end;
- need = 2*n;
- hp = HAlloc(BIF_P, need);
- hp_end = hp + need;
- res = erts_big_to_list(BIF_ARG_1, &hp);
- HRelease(BIF_P,hp_end,hp);
- BIF_RET(res);
+ return res;
+}
+
+BIF_RETTYPE integer_to_list_2(BIF_ALIST_2)
+{
+ Eterm res;
+ SWord base;
+
+ if (is_not_integer(BIF_ARG_1) || is_not_small(BIF_ARG_2)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ base = signed_val(BIF_ARG_2);
+ if (base < 2 || base > 36) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ res = integer_to_list(BIF_P, BIF_ARG_1, base);
+
+ if (is_non_value(res)) {
+ Eterm args[2];
+ args[0] = BIF_ARG_1;
+ args[1] = BIF_ARG_2;
+ return erts_schedule_bif(BIF_P,
+ args,
+ BIF_I,
+ integer_to_list_2,
+ ERTS_SCHED_DIRTY_CPU,
+ am_erlang,
+ am_integer_to_list,
+ 2);
}
+
+ return res;
}
/**********************************************************************/
@@ -3622,6 +3694,10 @@ erts_internal_garbage_collect_1(BIF_ALIST_1)
default: BIF_ERROR(BIF_P, BADARG);
}
erts_garbage_collect(BIF_P, 0, NULL, 0);
+ if (ERTS_PROC_IS_EXITING(BIF_P)) {
+ /* The max heap size limit was reached. */
+ return THE_NON_VALUE;
+ }
return am_true;
}
@@ -4486,11 +4562,12 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
ERTS_TRACER_CLEAR(&old_seq_tracer);
BIF_RET(ret);
- } else if (BIF_ARG_1 == make_small(1)) {
+ } else if (BIF_ARG_1 == am_reset_seq_trace) {
int i, max;
- erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_thr_progress_block();
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
+
max = erts_ptab_max(&erts_proc);
for (i = 0; i < max; i++) {
Process *p = erts_pix2proc(i);
@@ -4502,13 +4579,14 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
#endif
p->seq_trace_clock = 0;
p->seq_trace_lastcnt = 0;
-
+ erts_proc_lock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_MSGQ);
erts_proc_sig_clear_seq_trace_tokens(p);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_MSGQ);
}
}
- erts_thr_progress_unblock();
- erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(am_true);
} else if (BIF_ARG_1 == am_scheduler_wall_time) {
@@ -4623,6 +4701,9 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
return erts_bind_schedulers(BIF_P, BIF_ARG_2);
} else if (ERTS_IS_ATOM_STR("erts_alloc", BIF_ARG_1)) {
return erts_alloc_set_dyn_param(BIF_P, BIF_ARG_2);
+ } else if (ERTS_IS_ATOM_STR("system_logger", BIF_ARG_1)) {
+ Eterm res = erts_set_system_logger(BIF_ARG_2);
+ if (is_value(res)) BIF_RET(res);
}
error:
BIF_ERROR(BIF_P, BADARG);
@@ -5125,61 +5206,6 @@ erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *
return exiting;
}
-
-
-#ifdef HARDDEBUG
-/*
-You'll need this line in bif.tab to be able to use this debug bif
-
-bif erlang:send_to_logger/2
-
-*/
-BIF_RETTYPE send_to_logger_2(BIF_ALIST_2)
-{
- byte *buf;
- ErlDrvSizeT len;
- if (!is_atom(BIF_ARG_1) || !(is_list(BIF_ARG_2) ||
- is_nil(BIF_ARG_1))) {
- BIF_ERROR(BIF_P,BADARG);
- }
- if (erts_iolist_size(BIF_ARG_2, &len) != 0)
- BIF_ERROR(BIF_P,BADARG);
- else if (len == 0)
- buf = "";
- else {
-#ifdef DEBUG
- ErlDrvSizeT len2;
-#endif
- buf = (byte *) erts_alloc(ERTS_ALC_T_TMP, len+1);
-#ifdef DEBUG
- len2 =
-#else
- (void)
-#endif
- erts_iolist_to_buf(BIF_ARG_2, buf, len);
- ASSERT(len2 == len);
- buf[len] = '\0';
- switch (BIF_ARG_1) {
- case am_info:
- erts_send_info_to_logger(BIF_P->group_leader, buf, len);
- break;
- case am_warning:
- erts_send_warning_to_logger(BIF_P->group_leader, buf, len);
- break;
- case am_error:
- erts_send_error_to_logger(BIF_P->group_leader, buf, len);
- break;
- default:
- {
- BIF_ERROR(BIF_P,BADARG);
- }
- }
- erts_free(ERTS_ALC_T_TMP, (void *) buf);
- }
- BIF_RET(am_true);
-}
-#endif /* HARDDEBUG */
-
BIF_RETTYPE get_module_info_1(BIF_ALIST_1)
{
Eterm ret = erts_module_info_0(BIF_P, BIF_ARG_1);
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index 7548924178..8419244832 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -40,6 +40,7 @@
# Note: Guards BIFs usually require special support in the compiler.
#
+
gcbif erlang:abs/1
bif erlang:adler32/1
bif erlang:adler32/2
@@ -698,3 +699,43 @@ ubif erlang:map_get/2
ubif erlang:is_map_key/2
bif ets:internal_delete_all/2
bif ets:internal_select_delete/2
+
+#
+# New in 21.2
+#
+
+bif persistent_term:put/2
+bif persistent_term:get/1
+bif persistent_term:get/0
+bif persistent_term:erase/1
+bif persistent_term:info/0
+bif erts_internal:erase_persistent_terms/0
+
+bif erts_internal:atomics_new/2
+bif atomics:get/2
+bif atomics:put/3
+bif atomics:add/3
+bif atomics:add_get/3
+bif atomics:exchange/3
+bif atomics:compare_exchange/4
+bif atomics:info/1
+
+bif erts_internal:counters_new/1
+bif erts_internal:counters_get/2
+bif erts_internal:counters_add/3
+bif erts_internal:counters_put/3
+bif erts_internal:counters_info/1
+
+#
+# New in 21.2.3
+#
+
+bif erts_internal:spawn_system_process/3
+
+#
+# New in 21.3
+#
+
+bif erlang:integer_to_list/2
+bif erlang:integer_to_binary/2
+bif persistent_term:get/2
diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c
index 84338769e0..522f50287a 100644
--- a/erts/emulator/beam/big.c
+++ b/erts/emulator/beam/big.c
@@ -429,6 +429,7 @@
static const byte digits_per_sint_lookup[36-1];
static const byte digits_per_small_lookup[36-1];
static const Sint largest_power_of_base_lookup[36-1];
+static const double lg2_lookup[36-1];
static ERTS_INLINE byte get_digits_per_signed_int(Uint base) {
return digits_per_sint_lookup[base-2];
@@ -442,6 +443,10 @@ static ERTS_INLINE Sint get_largest_power_of_base(Uint base) {
return largest_power_of_base_lookup[base-2];
}
+static ERTS_INLINE double lookup_log2(Uint base) {
+ return lg2_lookup[base - 2];
+}
+
/*
** compare two number vectors
*/
@@ -668,27 +673,25 @@ static dsize_t I_mul(ErtsDigit* x, dsize_t xl, ErtsDigit* y, dsize_t yl, ErtsDig
static dsize_t I_sqr(ErtsDigit* x, dsize_t xl, ErtsDigit* r)
{
- ErtsDigit d_next = *x;
ErtsDigit d;
ErtsDigit* r0 = r;
ErtsDigit* s = r;
if ((r + xl) == x) /* "Inline" operation */
*x = 0;
- x++;
while(xl--) {
- ErtsDigit* y = x;
+ ErtsDigit* y;
ErtsDigit y_0 = 0, y_1 = 0, y_2 = 0, y_3 = 0;
ErtsDigit b0, b1;
ErtsDigit z0, z1, z2;
ErtsDigit t;
dsize_t y_l = xl;
-
+
+ d = *x;
+ x++;
+ y = x;
s = r;
- d = d_next;
- d_next = *x;
- x++;
DMUL(d, d, b1, b0);
DSUMc(*s, b0, y_3, t);
@@ -1159,8 +1162,11 @@ static dsize_t I_band(ErtsDigit* x, dsize_t xl, short xsgn,
*r++ = ~c1 & ~c2;
x++; y++;
}
- while(xl--)
- *r++ = ~*x++;
+ while(xl--) {
+ DSUBb(*x,0,b1,c1);
+ *r++ = ~c1;
+ x++;
+ }
}
}
return I_btrail(r0, r, sign);
@@ -1719,23 +1725,23 @@ double_to_big(double x, Eterm *heap, Uint hsz)
/*
- ** Estimate the number of decimal digits (include sign)
+ ** Estimate the number of digits in given base (include sign)
*/
-int big_decimal_estimate(Wterm x)
+int big_integer_estimate(Wterm x, Uint base)
{
Eterm* xp = big_val(x);
int lg = I_lg(BIG_V(xp), BIG_SIZE(xp));
- int lg10 = ((lg+1)*28/93)+1;
+ int lgBase = ((lg + 1) / lookup_log2(base)) + 1;
- if (BIG_SIGN(xp)) lg10++; /* add sign */
- return lg10+1; /* add null */
+ if (BIG_SIGN(xp)) lgBase++; /* add sign */
+ return lgBase + 1; /* add null */
}
/*
-** Convert a bignum into a string of decimal numbers
+** Convert a bignum into a string of numbers in given base
*/
-
-static Uint write_big(Wterm x, void (*write_func)(void *, char), void *arg)
+static Uint write_big(Wterm x, int base, void (*write_func)(void *, char),
+ void *arg)
{
Eterm* xp = big_val(x);
ErtsDigit* dx = BIG_V(xp);
@@ -1743,48 +1749,72 @@ static Uint write_big(Wterm x, void (*write_func)(void *, char), void *arg)
short sign = BIG_SIGN(xp);
ErtsDigit rem;
Uint n = 0;
- const Uint digits_per_Sint = get_digits_per_signed_int(10);
- const Sint largest_pow_of_base = get_largest_power_of_base(10);
+ const Uint digits_per_Sint = get_digits_per_signed_int(base);
+ const Sint largest_pow_of_base = get_largest_power_of_base(base);
if (xl == 1 && *dx < largest_pow_of_base) {
- rem = *dx;
- if (rem == 0) {
- (*write_func)(arg, '0'); n++;
- } else {
- while(rem) {
- (*write_func)(arg, (rem % 10) + '0'); n++;
- rem /= 10;
- }
- }
+ rem = *dx;
+ if (rem == 0) {
+ (*write_func)(arg, '0'); n++;
+ } else {
+ while(rem) {
+ int digit = rem % base;
+
+ if (digit < 10) {
+ (*write_func)(arg, digit + '0'); n++;
+ } else {
+ (*write_func)(arg, 'A' + (digit - 10)); n++;
+ }
+
+ rem /= base;
+ }
+ }
} else {
- ErtsDigit* tmp = (ErtsDigit*) erts_alloc(ERTS_ALC_T_TMP,
- sizeof(ErtsDigit)*xl);
- dsize_t tmpl = xl;
+ ErtsDigit* tmp = (ErtsDigit*) erts_alloc(ERTS_ALC_T_TMP,
+ sizeof(ErtsDigit) * xl);
+ dsize_t tmpl = xl;
- MOVE_DIGITS(tmp, dx, xl);
+ MOVE_DIGITS(tmp, dx, xl);
- while(1) {
+ while(1) {
tmpl = D_div(tmp, tmpl, largest_pow_of_base, tmp, &rem);
- if (tmpl == 1 && *tmp == 0) {
- while(rem) {
- (*write_func)(arg, (rem % 10)+'0'); n++;
- rem /= 10;
- }
- break;
- } else {
+
+ if (tmpl == 1 && *tmp == 0) {
+ while(rem) {
+ int digit = rem % base;
+
+ if (digit < 10) {
+ (*write_func)(arg, digit + '0'); n++;
+ } else {
+ (*write_func)(arg, 'A' + (digit - 10)); n++;
+ }
+
+ rem /= base;
+ }
+ break;
+ } else {
Uint i = digits_per_Sint;
- while(i--) {
- (*write_func)(arg, (rem % 10)+'0'); n++;
- rem /= 10;
- }
- }
- }
- erts_free(ERTS_ALC_T_TMP, (void *) tmp);
+
+ while(i--) {
+ int digit = rem % base;
+
+ if (digit < 10) {
+ (*write_func)(arg, digit + '0'); n++;
+ } else {
+ (*write_func)(arg, 'A' + (digit - 10)); n++;
+ }
+
+ rem /= base;
+ }
+ }
+ }
+ erts_free(ERTS_ALC_T_TMP, (void *) tmp);
}
if (sign) {
- (*write_func)(arg, '-'); n++;
+ (*write_func)(arg, '-'); n++;
}
+
return n;
}
@@ -1801,12 +1831,12 @@ write_list(void *arg, char c)
blp->hp += 2;
}
-Eterm erts_big_to_list(Eterm x, Eterm **hpp)
+Eterm erts_big_to_list(Eterm x, int base, Eterm **hpp)
{
struct big_list__ bl;
bl.hp = *hpp;
bl.res = NIL;
- write_big(x, write_list, (void *) &bl);
+ write_big(x, base, write_list, (void *) &bl);
*hpp = bl.hp;
return bl.res;
}
@@ -1817,11 +1847,11 @@ write_string(void *arg, char c)
*(--(*((char **) arg))) = c;
}
-char *erts_big_to_string(Wterm x, char *buf, Uint buf_sz)
+char *erts_big_to_string(Wterm x, int base, char *buf, Uint buf_sz)
{
char *big_str = buf + buf_sz - 1;
*big_str = '\0';
- write_big(x, write_string, (void *) &big_str);
+ write_big(x, base, write_string, (void*)&big_str);
ASSERT(buf <= big_str && big_str <= buf + buf_sz - 1);
return big_str;
}
@@ -1830,11 +1860,11 @@ char *erts_big_to_string(Wterm x, char *buf, Uint buf_sz)
* e.g. 1 bsl 64 -> "18446744073709551616"
*/
-Uint erts_big_to_binary_bytes(Eterm x, char *buf, Uint buf_sz)
+Uint erts_big_to_binary_bytes(Eterm x, int base, char *buf, Uint buf_sz)
{
char *big_str = buf + buf_sz;
Uint n;
- n = write_big(x, write_string, (void *) &big_str);
+ n = write_big(x, base, write_string, (void *) &big_str);
ASSERT(buf <= big_str && big_str <= buf + buf_sz);
return n;
}
@@ -2577,9 +2607,6 @@ static const double lg2_lookup[36-1] = {
4.32193, 4.39232, 4.45943, 4.52356, 4.58496, 4.64386, 4.70044, 4.75489,
4.80735, 4.85798, 4.90689, 4.9542, 5.0, 5.04439, 5.08746, 5.12928, 5.16993
};
-static ERTS_INLINE double lookup_log2(Uint base) {
- return lg2_lookup[base - 2];
-}
/*
* How many digits can fit into a signed int (Sint) for given base, we take
diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h
index 7556205063..274482a0d2 100644
--- a/erts/emulator/beam/big.h
+++ b/erts/emulator/beam/big.h
@@ -81,7 +81,11 @@ typedef Uint dsize_t; /* Vector size type */
* a Uint64 argument. Therefore, we must test the size of the argument
* to ensure that the cast does not discard the high-order 32 bits.
*/
-#define _IS_SSMALL32(x) (((Uint32) ((((x)) >> (SMALL_BITS-1)) + 1)) < 2)
+#if defined(ARCH_32)
+# define _IS_SSMALL32(x) (((Uint32) ((((x)) >> (SMALL_BITS-1)) + 1)) < 2)
+#else
+# define _IS_SSMALL32(x) (1)
+#endif
#define _IS_SSMALL64(x) (((Uint64) ((((x)) >> (SMALL_BITS-1)) + 1)) < 2)
#define IS_SSMALL(x) (sizeof(x) == sizeof(Uint32) ? _IS_SSMALL32(x) : _IS_SSMALL64(x))
@@ -119,10 +123,10 @@ typedef Uint dsize_t; /* Vector size type */
#endif
-int big_decimal_estimate(Wterm);
-Eterm erts_big_to_list(Eterm, Eterm**);
-char *erts_big_to_string(Wterm x, char *buf, Uint buf_sz);
-Uint erts_big_to_binary_bytes(Eterm x, char *buf, Uint buf_sz);
+int big_integer_estimate(Wterm, Uint base);
+Eterm erts_big_to_list(Eterm, int base, Eterm**);
+char *erts_big_to_string(Wterm x, int base, char *buf, Uint buf_sz);
+Uint erts_big_to_binary_bytes(Eterm x, int base, char *buf, Uint buf_sz);
Eterm small_times(Sint, Sint, Eterm*);
diff --git a/erts/emulator/beam/binary.c b/erts/emulator/beam/binary.c
index 6a349764b2..a18228b84a 100644
--- a/erts/emulator/beam/binary.c
+++ b/erts/emulator/beam/binary.c
@@ -322,40 +322,102 @@ BIF_RETTYPE binary_to_integer_2(BIF_ALIST_2)
}
+static Eterm integer_to_binary(Process *c_p, Eterm num, int base)
+{
+ Eterm res;
+
+ if (is_small(num)) {
+ char s[128];
+ char *c = s;
+ Uint digits;
+
+ digits = Sint_to_buf(signed_val(num), base, &c, sizeof(s));
+ res = new_binary(c_p, (byte*)c, digits);
+ } else {
+ const int DIGITS_PER_RED = 16;
+ Uint digits, n;
+ byte *bytes;
+
+ digits = big_integer_estimate(num, base);
+
+ if ((digits / DIGITS_PER_RED) > ERTS_BIF_REDS_LEFT(c_p)) {
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+
+ /* This could take a very long time, tell the caller to reschedule
+ * us to a dirty CPU scheduler if we aren't already on one. */
+ if (esdp->type == ERTS_SCHED_NORMAL) {
+ return THE_NON_VALUE;
+ }
+ } else {
+ BUMP_REDS(c_p, digits / DIGITS_PER_RED);
+ }
+
+ bytes = (byte*)erts_alloc(ERTS_ALC_T_TMP, sizeof(byte) * digits);
+ n = erts_big_to_binary_bytes(num, base, (char*)bytes, digits);
+ res = new_binary(c_p, bytes + digits - n, n);
+ erts_free(ERTS_ALC_T_TMP, (void*)bytes);
+ }
+
+ return res;
+}
+
BIF_RETTYPE integer_to_binary_1(BIF_ALIST_1)
-{
- Uint size;
+{
Eterm res;
if (is_not_integer(BIF_ARG_1)) {
- BIF_ERROR(BIF_P, BADARG);
+ BIF_ERROR(BIF_P, BADARG);
}
- if (is_small(BIF_ARG_1)) {
- char *c;
- struct Sint_buf ibuf;
+ res = integer_to_binary(BIF_P, BIF_ARG_1, 10);
+
+ if (is_non_value(res)) {
+ Eterm args[1];
+ args[0] = BIF_ARG_1;
+ return erts_schedule_bif(BIF_P,
+ args,
+ BIF_I,
+ integer_to_binary_1,
+ ERTS_SCHED_DIRTY_CPU,
+ am_erlang,
+ am_integer_to_binary,
+ 1);
+ }
- /* Enhancement: If we can calculate the buffer size exactly
- * we could avoid an unnecessary copy of buffers.
- * Useful if size determination is faster than a copy.
- */
- c = Sint_to_buf(signed_val(BIF_ARG_1), &ibuf);
- size = sys_strlen(c);
- res = new_binary(BIF_P, (byte *)c, size);
- } else {
- byte* bytes;
- Uint n = 0;
+ return res;
+}
- /* Here we also have multiple copies of buffers
- * due to new_binary interface
- */
- size = big_decimal_estimate(BIF_ARG_1) - 1; /* remove null */
- bytes = (byte*) erts_alloc(ERTS_ALC_T_TMP, sizeof(byte)*size);
- n = erts_big_to_binary_bytes(BIF_ARG_1, (char *)bytes, size);
- res = new_binary(BIF_P, bytes + size - n, n);
- erts_free(ERTS_ALC_T_TMP, (void *) bytes);
+BIF_RETTYPE integer_to_binary_2(BIF_ALIST_2)
+{
+ Eterm res;
+ SWord base;
+
+ if (is_not_integer(BIF_ARG_1) || is_not_small(BIF_ARG_2)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ base = signed_val(BIF_ARG_2);
+ if (base < 2 || base > 36) {
+ BIF_ERROR(BIF_P, BADARG);
}
- BIF_RET(res);
+
+ res = integer_to_binary(BIF_P, BIF_ARG_1, base);
+
+ if (is_non_value(res)) {
+ Eterm args[2];
+ args[0] = BIF_ARG_1;
+ args[1] = BIF_ARG_2;
+ return erts_schedule_bif(BIF_P,
+ args,
+ BIF_I,
+ integer_to_binary_2,
+ ERTS_SCHED_DIRTY_CPU,
+ am_erlang,
+ am_integer_to_binary,
+ 2);
+ }
+
+ return res;
}
#define ERTS_B2L_BYTES_PER_REDUCTION 256
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index 9ff52c92b8..92009c2345 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -82,7 +82,7 @@ process_info(fmtfn_t to, void *to_arg)
* they are most likely just created and has invalid data
*/
if (!ERTS_PROC_IS_EXITING(p) && p->heap != NULL)
- print_process_info(to, to_arg, p);
+ print_process_info(to, to_arg, p, 0);
}
}
@@ -101,7 +101,7 @@ process_killer(void)
rp = erts_pix2proc(i);
if (rp && rp->i != ENULL) {
int br;
- print_process_info(ERTS_PRINT_STDOUT, NULL, rp);
+ print_process_info(ERTS_PRINT_STDOUT, NULL, rp, 0);
erts_printf("(k)ill (n)ext (r)eturn:\n");
while(1) {
if ((j = sys_get_key(0)) <= 0)
@@ -199,13 +199,14 @@ static void doit_print_monitor(ErtsMonitor *mon, void *vpcontext)
/* Display info about an individual Erlang process */
void
-print_process_info(fmtfn_t to, void *to_arg, Process *p)
+print_process_info(fmtfn_t to, void *to_arg, Process *p, ErtsProcLocks orig_locks)
{
int garbing = 0;
int running = 0;
Sint len;
struct saved_calls *scb;
erts_aint32_t state;
+ ErtsProcLocks locks = orig_locks;
/* display the PID */
erts_print(to, to_arg, "=proc:%T\n", p->common.id);
@@ -222,6 +223,22 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p)
| ERTS_PSFLG_DIRTY_RUNNING))
running = 1;
+ if (!(locks & ERTS_PROC_LOCK_MAIN)) {
+ locks |= ERTS_PROC_LOCK_MAIN;
+ if (ERTS_IS_CRASH_DUMPING && running) {
+ if (erts_proc_trylock(p, locks)) {
+ /* crash dumping and main lock taken, this probably means that
+ the process is doing a GC on a dirty-scheduler... so we cannot
+ do erts_proc_sig_fetch as that would potentially cause a segfault */
+ locks = 0;
+ }
+ } else {
+ erts_proc_lock(p, locks);
+ }
+ } else {
+ ERTS_ASSERT(locks == ERTS_PROC_LOCK_MAIN && "Only main lock should be held");
+ }
+
/*
* If the process is registered as a global process, display the
* registered name
@@ -251,13 +268,19 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p)
erts_print(to, to_arg, "Spawned by: %T\n", p->parent);
- erts_proc_lock(p, ERTS_PROC_LOCK_MSGQ);
- len = erts_proc_sig_fetch(p);
- erts_proc_unlock(p, ERTS_PROC_LOCK_MSGQ);
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_proc_lock(p, ERTS_PROC_LOCK_MSGQ);
+ len = erts_proc_sig_fetch(p);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MSGQ);
+ } else {
+ len = p->sig_qs.len;
+ }
erts_print(to, to_arg, "Message queue length: %d\n", len);
- /* display the message queue only if there is anything in it */
- if (!ERTS_IS_CRASH_DUMPING && p->sig_qs.first != NULL && !garbing) {
+ /* display the message queue only if there is anything in it
+ and we can do it safely */
+ if (!ERTS_IS_CRASH_DUMPING && p->sig_qs.first != NULL && !garbing
+ && (locks & ERTS_PROC_LOCK_MAIN)) {
erts_print(to, to_arg, "Message queue: [");
ERTS_FOREACH_SIG_PRIVQS(
p, mp,
@@ -357,6 +380,8 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p)
/* Display all states */
erts_print(to, to_arg, "Internal State: ");
erts_dump_extended_process_state(to, to_arg, state);
+
+ erts_proc_unlock(p, locks & ~orig_locks);
}
static void
diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c
index e7bfd04b73..e7bd046e18 100644
--- a/erts/emulator/beam/copy.c
+++ b/erts/emulator/beam/copy.c
@@ -1074,6 +1074,7 @@ Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info)
Eterm* ptr;
Eterm *lit_purge_ptr = info->lit_purge_ptr;
Uint lit_purge_sz = info->lit_purge_sz;
+ int copy_literals = info->copy_literals;
#ifdef DEBUG
Eterm mypid = erts_get_current_pid();
#endif
@@ -1119,7 +1120,7 @@ Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info)
/* off heap list pointers are copied verbatim */
if (erts_is_literal(obj,ptr)) {
VERBOSE(DEBUG_SHCOPY, ("[pid=%T] bypassed copying %p is %T\n", mypid, ptr, obj));
- if (in_literal_purge_area(ptr))
+ if (copy_literals || in_literal_purge_area(ptr))
info->literal_size += size_object(obj);
goto pop_next;
}
@@ -1170,7 +1171,7 @@ Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info)
/* off heap pointers to boxes are copied verbatim */
if (erts_is_literal(obj,ptr)) {
VERBOSE(DEBUG_SHCOPY, ("[pid=%T] bypassed copying %p is %T\n", mypid, ptr, obj));
- if (in_literal_purge_area(ptr))
+ if (copy_literals || in_literal_purge_area(ptr))
info->literal_size += size_object(obj);
goto pop_next;
}
@@ -1338,6 +1339,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
unsigned remaining;
Eterm *lit_purge_ptr = info->lit_purge_ptr;
Uint lit_purge_sz = info->lit_purge_sz;
+ int copy_literals = info->copy_literals;
#ifdef DEBUG
Eterm mypid = erts_get_current_pid();
Eterm saved_obj = obj;
@@ -1387,7 +1389,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
ptr = list_val(obj);
/* off heap list pointers are copied verbatim */
if (erts_is_literal(obj,ptr)) {
- if (!in_literal_purge_area(ptr)) {
+ if (!(copy_literals || in_literal_purge_area(ptr))) {
*resp = obj;
} else {
Uint bsz = 0;
@@ -1455,7 +1457,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
ptr = boxed_val(obj);
/* off heap pointers to boxes are copied verbatim */
if (erts_is_literal(obj,ptr)) {
- if (!in_literal_purge_area(ptr)) {
+ if (!(copy_literals || in_literal_purge_area(ptr))) {
*resp = obj;
} else {
Uint bsz = 0;
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 8fe1ccb758..9e36d5e0d1 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -4030,6 +4030,9 @@ debug_free(ErtsAlcType_t n, void *extra, void *ptr)
ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
+ if (!ptr)
+ return;
+
dptr = check_memory_fence(ptr, &size, n, ERTS_ALC_O_FREE);
#ifdef ERTS_ALC_A_EXEC
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 5409b89bab..4f03a34390 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -274,9 +274,13 @@ type ML_YIELD_STATE SHORT_LIVED SYSTEM monitor_link_yield_state
type ML_DIST STANDARD SYSTEM monitor_link_dist
type PF3_ARGS SHORT_LIVED PROCESSES process_flag_3_arguments
type SETUP_CONN_ARG SHORT_LIVED PROCESSES setup_connection_argument
+type LIST_TRAP SHORT_LIVED PROCESSES list_bif_trap_state
type ENVIRONMENT SYSTEM SYSTEM environment
+type PERSISTENT_TERM LONG_LIVED CODE persisten_term
+type PERSISTENT_LOCK_Q SHORT_LIVED SYSTEM persistent_lock_q
+
#
# Types used for special emulators
#
@@ -334,6 +338,8 @@ type GC_INFO_REQ SHORT_LIVED SYSTEM gc_info_request
type PORT_DATA_HEAP STANDARD SYSTEM port_data_heap
type MSACC DRIVER SYSTEM microstate_accounting
type SYS_CHECK_REQ SHORT_LIVED SYSTEM system_check_request
+type ATOMICS STANDARD SYSTEM erl_bif_atomics
+type COUNTERS STANDARD SYSTEM erl_bif_counters
#
# Types used by system specific code
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index a5740a08cf..0be4562785 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -834,6 +834,8 @@ static ERTS_INLINE void clr_bit(UWord* map, Uint ix)
&= ~((UWord)1 << (ix % ERTS_VSPACE_WORD_BITS));
}
+#ifdef DEBUG
+
static ERTS_INLINE int is_bit_set(UWord* map, Uint ix)
{
ASSERT(ix / ERTS_VSPACE_WORD_BITS < VSPACE_MAP_SZ);
@@ -841,6 +843,8 @@ static ERTS_INLINE int is_bit_set(UWord* map, Uint ix)
& ((UWord)1 << (ix % ERTS_VSPACE_WORD_BITS));
}
+#endif
+
UWord erts_literal_vspace_map[VSPACE_MAP_SZ];
static void set_literal_range(void* start, Uint size)
diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.c b/erts/emulator/beam/erl_ao_firstfit_alloc.c
index 3f0ab33597..917cb1cf10 100644
--- a/erts/emulator/beam/erl_ao_firstfit_alloc.c
+++ b/erts/emulator/beam/erl_ao_firstfit_alloc.c
@@ -100,8 +100,8 @@
#define AOFF_BLK_SZ(B) MBC_FBLK_SZ(&(B)->hdr)
-#define LIST_NEXT(N) (((AOFF_RBTree_t*)(N))->u.next)
-#define LIST_PREV(N) (((AOFF_RBTree_t*)(N))->parent)
+#define AOFF_LIST_NEXT(N) (((AOFF_RBTree_t*)(N))->u.next)
+#define AOFF_LIST_PREV(N) (((AOFF_RBTree_t*)(N))->parent)
typedef struct AOFF_Carrier_t_ AOFF_Carrier_t;
@@ -152,13 +152,13 @@ static ERTS_INLINE Uint node_max_size(AOFF_RBTree_t *x)
static ERTS_INLINE void lower_max_size(AOFF_RBTree_t *node,
AOFF_RBTree_t* stop_at)
{
- AOFF_RBTree_t* x = node;
+ AOFF_RBTree_t* x = node;
Uint old_max = x->max_sz;
Uint new_max = node_max_size(x);
if (new_max < old_max) {
x->max_sz = new_max;
- while ((x=x->parent) != stop_at && x->max_sz == old_max) {
+ while ((x=x->parent) != stop_at && x->max_sz == old_max) {
x->max_sz = node_max_size(x);
}
ASSERT(x == stop_at || x->max_sz > old_max);
@@ -352,7 +352,7 @@ left_rotate(AOFF_RBTree_t **root, AOFF_RBTree_t *x)
x->parent = y;
y->max_sz = x->max_sz;
- x->max_sz = node_max_size(x);
+ x->max_sz = node_max_size(x);
ASSERT(y->max_sz >= x->max_sz);
}
@@ -377,7 +377,7 @@ right_rotate(AOFF_RBTree_t **root, AOFF_RBTree_t *x)
y->right = x;
x->parent = y;
y->max_sz = x->max_sz;
- x->max_sz = node_max_size(x);
+ x->max_sz = node_max_size(x);
ASSERT(y->max_sz >= x->max_sz);
}
@@ -523,23 +523,23 @@ aoff_unlink_free_block(Allctr_t *allctr, Block_t *blk)
ASSERT(del->flags & IS_BF_FLG);
if (IS_LIST_ELEM(del)) {
/* Remove from list */
- ASSERT(LIST_PREV(del));
- ASSERT(LIST_PREV(del)->flags & IS_BF_FLG);
- LIST_NEXT(LIST_PREV(del)) = LIST_NEXT(del);
- if (LIST_NEXT(del)) {
- ASSERT(LIST_NEXT(del)->flags & IS_BF_FLG);
- LIST_PREV(LIST_NEXT(del)) = LIST_PREV(del);
+ ASSERT(AOFF_LIST_PREV(del));
+ ASSERT(AOFF_LIST_PREV(del)->flags & IS_BF_FLG);
+ AOFF_LIST_NEXT(AOFF_LIST_PREV(del)) = AOFF_LIST_NEXT(del);
+ if (AOFF_LIST_NEXT(del)) {
+ ASSERT(AOFF_LIST_NEXT(del)->flags & IS_BF_FLG);
+ AOFF_LIST_PREV(AOFF_LIST_NEXT(del)) = AOFF_LIST_PREV(del);
}
return;
}
- else if (LIST_NEXT(del)) {
+ else if (AOFF_LIST_NEXT(del)) {
/* Replace tree node by next element in list... */
-
- ASSERT(AOFF_BLK_SZ(LIST_NEXT(del)) == AOFF_BLK_SZ(del));
- ASSERT(IS_LIST_ELEM(LIST_NEXT(del)));
-
- replace(&crr->root, (AOFF_RBTree_t*)del, LIST_NEXT(del));
-
+
+ ASSERT(AOFF_BLK_SZ(AOFF_LIST_NEXT(del)) == AOFF_BLK_SZ(del));
+ ASSERT(IS_LIST_ELEM(AOFF_LIST_NEXT(del)));
+
+ replace(&crr->root, (AOFF_RBTree_t*)del, AOFF_LIST_NEXT(del));
+
HARD_CHECK_TREE(&crr->crr, alc->blk_order, crr->root, 0);
return;
}
@@ -550,13 +550,13 @@ aoff_unlink_free_block(Allctr_t *allctr, Block_t *blk)
HARD_CHECK_TREE(&crr->crr, alc->blk_order, crr->root, 0);
/* Update the carrier tree with a potentially new (lower) max_sz
- */
+ */
if (crr->root) {
if (crr->rbt_node.hdr.bhdr == crr->root->max_sz) {
return;
}
ASSERT(crr->rbt_node.hdr.bhdr > crr->root->max_sz);
- crr->rbt_node.hdr.bhdr = crr->root->max_sz;
+ crr->rbt_node.hdr.bhdr = crr->root->max_sz;
}
else {
crr->rbt_node.hdr.bhdr = 0;
@@ -773,7 +773,7 @@ rbt_insert(enum AOFFSortOrder order, AOFF_RBTree_t** root, AOFF_RBTree_t* blk)
#ifdef DEBUG
blk->flags = (order == FF_BF) ? IS_BF_FLG : 0;
#else
- blk->flags = 0;
+ blk->flags = 0;
#endif
blk->left = NULL;
blk->right = NULL;
@@ -787,7 +787,7 @@ rbt_insert(enum AOFFSortOrder order, AOFF_RBTree_t** root, AOFF_RBTree_t* blk)
else {
AOFF_RBTree_t *x = *root;
while (1) {
- SWord diff;
+ SWord diff;
if (x->max_sz < blk_sz) {
x->max_sz = blk_sz;
}
@@ -810,14 +810,14 @@ rbt_insert(enum AOFFSortOrder order, AOFF_RBTree_t** root, AOFF_RBTree_t* blk)
}
else {
ASSERT(order == FF_BF);
- ASSERT(blk->flags & IS_BF_FLG);
- ASSERT(x->flags & IS_BF_FLG);
+ ASSERT(blk->flags & IS_BF_FLG);
+ ASSERT(x->flags & IS_BF_FLG);
SET_LIST_ELEM(blk);
- LIST_NEXT(blk) = LIST_NEXT(x);
- LIST_PREV(blk) = x;
- if (LIST_NEXT(x))
- LIST_PREV(LIST_NEXT(x)) = blk;
- LIST_NEXT(x) = blk;
+ AOFF_LIST_NEXT(blk) = AOFF_LIST_NEXT(x);
+ AOFF_LIST_PREV(blk) = x;
+ if (AOFF_LIST_NEXT(x))
+ AOFF_LIST_PREV(AOFF_LIST_NEXT(x)) = blk;
+ AOFF_LIST_NEXT(x) = blk;
return;
}
}
@@ -831,7 +831,7 @@ rbt_insert(enum AOFFSortOrder order, AOFF_RBTree_t** root, AOFF_RBTree_t* blk)
}
if (order == FF_BF) {
SET_TREE_NODE(blk);
- LIST_NEXT(blk) = NULL;
+ AOFF_LIST_NEXT(blk) = NULL;
}
}
@@ -878,7 +878,7 @@ aoff_get_free_block(Allctr_t *allctr, Uint size,
#ifdef HARD_DEBUG
AOFF_RBTree_t* dbg_blk;
#endif
-
+
ASSERT(!cand_blk || cand_size >= size);
/* Get first-fit carrier
@@ -964,7 +964,7 @@ static void aoff_add_mbc(Allctr_t *allctr, Carrier_t *carrier)
AOFF_RBTree_t **root = &alc->mbc_root;
ASSERT(!IS_CRR_IN_TREE(crr, *root));
- HARD_CHECK_TREE(NULL, alc->crr_order, *root, 0);
+ HARD_CHECK_TREE(NULL, alc->crr_order, *root, 0);
rbt_insert(alc->crr_order, root, &crr->rbt_node);
@@ -1105,7 +1105,7 @@ info_options(Allctr_t *allctr,
}
if (hpp || szp) {
-
+
if (!atoms_initialized)
erts_exit(ERTS_ERROR_EXIT, "%s:%d: Internal error: Atoms not initialized",
__FILE__, __LINE__);;
@@ -1132,7 +1132,7 @@ erts_aoffalc_test(UWord op, UWord a1, UWord a2)
switch (op) {
case 0x500: return (UWord) ((AOFFAllctr_t *) a1)->blk_order == FF_AOBF;
case 0x501: {
- AOFF_RBTree_t *node = ((AOFFAllctr_t *) a1)->mbc_root;
+ AOFF_RBTree_t *node = ((AOFFAllctr_t *) a1)->mbc_root;
Uint size = (Uint) a2;
node = node ? rbt_search(node, size) : NULL;
return (UWord) (node ? RBT_NODE_TO_MBC(node)->root : NULL);
@@ -1140,13 +1140,13 @@ erts_aoffalc_test(UWord op, UWord a1, UWord a2)
case 0x502: return (UWord) ((AOFF_RBTree_t *) a1)->parent;
case 0x503: return (UWord) ((AOFF_RBTree_t *) a1)->left;
case 0x504: return (UWord) ((AOFF_RBTree_t *) a1)->right;
- case 0x505: return (UWord) LIST_NEXT(a1);
+ case 0x505: return (UWord) AOFF_LIST_NEXT(a1);
case 0x506: return (UWord) IS_BLACK((AOFF_RBTree_t *) a1);
case 0x507: return (UWord) IS_TREE_NODE((AOFF_RBTree_t *) a1);
case 0x508: return (UWord) 0; /* IS_BF_ALGO */
case 0x509: return (UWord) ((AOFF_RBTree_t *) a1)->max_sz;
case 0x50a: return (UWord) ((AOFFAllctr_t *) a1)->blk_order == FF_BF;
- case 0x50b: return (UWord) LIST_PREV(a1);
+ case 0x50b: return (UWord) AOFF_LIST_PREV(a1);
default: ASSERT(0); return ~((UWord) 0);
}
}
@@ -1166,7 +1166,7 @@ static int rbt_is_member(AOFF_RBTree_t* root, AOFF_RBTree_t* node)
return 0;
}
node = node->parent;
- }
+ }
return 1;
}
@@ -1279,15 +1279,15 @@ check_tree(Carrier_t* within_crr, enum AOFFSortOrder order, AOFF_RBTree_t* root,
}
if (order == FF_BF) {
AOFF_RBTree_t* y = x;
- AOFF_RBTree_t* nxt = LIST_NEXT(y);
+ AOFF_RBTree_t* nxt = AOFF_LIST_NEXT(y);
ASSERT(IS_TREE_NODE(x));
while (nxt) {
ASSERT(IS_LIST_ELEM(nxt));
ASSERT(AOFF_BLK_SZ(nxt) == AOFF_BLK_SZ(x));
ASSERT(FBLK_TO_MBC(&nxt->hdr) == within_crr);
- ASSERT(LIST_PREV(nxt) == y);
+ ASSERT(AOFF_LIST_PREV(nxt) == y);
y = nxt;
- nxt = LIST_NEXT(nxt);
+ nxt = AOFF_LIST_NEXT(nxt);
}
}
@@ -1301,13 +1301,13 @@ check_tree(Carrier_t* within_crr, enum AOFFSortOrder order, AOFF_RBTree_t* root,
if (x->left) {
ASSERT(x->left->parent == x);
ASSERT(cmp_blocks(order, x->left, x) < 0);
- ASSERT(x->left->max_sz <= x->max_sz);
+ ASSERT(x->left->max_sz <= x->max_sz);
}
if (x->right) {
ASSERT(x->right->parent == x);
ASSERT(cmp_blocks(order, x->right, x) > 0);
- ASSERT(x->right->max_sz <= x->max_sz);
+ ASSERT(x->right->max_sz <= x->max_sz);
}
ASSERT(x->max_sz >= AOFF_BLK_SZ(x));
ASSERT(x->max_sz == AOFF_BLK_SZ(x)
@@ -1327,7 +1327,7 @@ check_tree(Carrier_t* within_crr, enum AOFFSortOrder order, AOFF_RBTree_t* root,
x = x->parent;
--depth;
}
- ASSERT(depth == 0 || (!root && depth==1));
+ ASSERT(depth == 0 || (!root && depth==1));
ASSERT(curr_blacks == 0);
ASSERT((1 << (max_depth/2)) <= node_cnt);
@@ -1373,4 +1373,3 @@ print_tree(AOFF_RBTree_t* root)
#endif /* PRINT_TREE */
#endif /* HARD_DEBUG */
-
diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c
index 605a2b3461..44655ad5df 100644
--- a/erts/emulator/beam/erl_async.c
+++ b/erts/emulator/beam/erl_async.c
@@ -336,7 +336,7 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
case ERTS_THR_Q_NEED_THR_PRGR:
{
ErtsThrPrgrVal prgr = erts_thr_q_need_thr_progress(q);
- erts_thr_progress_wakeup(NULL, prgr);
+ erts_thr_progress_wakeup(erts_thr_prgr_data(NULL), prgr);
/*
* We do no dequeue finalizing in hope that a new async
* job will arrive before we are woken due to thread
diff --git a/erts/emulator/beam/erl_bestfit_alloc.c b/erts/emulator/beam/erl_bestfit_alloc.c
index 9cb1199c2a..3f981ca2bc 100644
--- a/erts/emulator/beam/erl_bestfit_alloc.c
+++ b/erts/emulator/beam/erl_bestfit_alloc.c
@@ -122,8 +122,8 @@ typedef struct {
RBTree_t *next;
} RBTreeList_t;
-#define LIST_NEXT(N) (((RBTreeList_t *) (N))->next)
-#define LIST_PREV(N) (((RBTreeList_t *) (N))->t.parent)
+#define BF_LIST_NEXT(N) (((RBTreeList_t *) (N))->next)
+#define BF_LIST_PREV(N) (((RBTreeList_t *) (N))->t.parent)
#ifdef DEBUG
@@ -593,7 +593,6 @@ aobf_link_free_block(Allctr_t *allctr, Block_t *block)
RBTree_t *blk = (RBTree_t *) block;
Uint blk_sz = BF_BLK_SZ(blk);
-
blk->flags = 0;
blk->left = NULL;
@@ -673,7 +672,7 @@ aobf_get_free_block(Allctr_t *allctr, Uint size,
x = x->left;
}
}
-
+
if (!blk)
return NULL;
@@ -729,11 +728,11 @@ bf_link_free_block(Allctr_t *allctr, Block_t *block)
if (blk_sz == size) {
SET_LIST_ELEM(blk);
- LIST_NEXT(blk) = LIST_NEXT(x);
- LIST_PREV(blk) = x;
- if (LIST_NEXT(x))
- LIST_PREV(LIST_NEXT(x)) = blk;
- LIST_NEXT(x) = blk;
+ BF_LIST_NEXT(blk) = BF_LIST_NEXT(x);
+ BF_LIST_PREV(blk) = x;
+ if (BF_LIST_NEXT(x))
+ BF_LIST_PREV(BF_LIST_NEXT(x)) = blk;
+ BF_LIST_NEXT(x) = blk;
return; /* Finnished */
}
@@ -764,7 +763,7 @@ bf_link_free_block(Allctr_t *allctr, Block_t *block)
}
SET_TREE_NODE(blk);
- LIST_NEXT(blk) = NULL;
+ BF_LIST_NEXT(blk) = NULL;
#ifdef HARD_DEBUG
check_tree(root, 0, 0);
@@ -780,22 +779,22 @@ bf_unlink_free_block(Allctr_t *allctr, Block_t *block)
if (IS_LIST_ELEM(x)) {
/* Remove from list */
- ASSERT(LIST_PREV(x));
- LIST_NEXT(LIST_PREV(x)) = LIST_NEXT(x);
- if (LIST_NEXT(x))
- LIST_PREV(LIST_NEXT(x)) = LIST_PREV(x);
+ ASSERT(BF_LIST_PREV(x));
+ BF_LIST_NEXT(BF_LIST_PREV(x)) = BF_LIST_NEXT(x);
+ if (BF_LIST_NEXT(x))
+ BF_LIST_PREV(BF_LIST_NEXT(x)) = BF_LIST_PREV(x);
}
- else if (LIST_NEXT(x)) {
+ else if (BF_LIST_NEXT(x)) {
/* Replace tree node by next element in list... */
- ASSERT(BF_BLK_SZ(LIST_NEXT(x)) == BF_BLK_SZ(x));
+ ASSERT(BF_BLK_SZ(BF_LIST_NEXT(x)) == BF_BLK_SZ(x));
ASSERT(IS_TREE_NODE(x));
- ASSERT(IS_LIST_ELEM(LIST_NEXT(x)));
+ ASSERT(IS_LIST_ELEM(BF_LIST_NEXT(x)));
#ifdef HARD_DEBUG
check_tree(root, 0, 0);
#endif
- replace(root, x, LIST_NEXT(x));
+ replace(root, x, BF_LIST_NEXT(x));
#ifdef HARD_DEBUG
check_tree(bfallctr, 0);
@@ -834,7 +833,7 @@ bf_get_free_block(Allctr_t *allctr, Uint size,
x = x->left;
}
}
-
+
if (!blk)
return NULL;
@@ -853,7 +852,7 @@ bf_get_free_block(Allctr_t *allctr, Uint size,
/* Use next block if it exist in order to avoid replacing
the tree node */
- blk = LIST_NEXT(blk) ? LIST_NEXT(blk) : blk;
+ blk = BF_LIST_NEXT(blk) ? BF_LIST_NEXT(blk) : blk;
bf_unlink_free_block(allctr, (Block_t *) blk);
return (Block_t *) blk;
@@ -938,7 +937,7 @@ info_options(Allctr_t *allctr,
}
if (hpp || szp) {
-
+
if (!atoms_initialized)
erts_exit(ERTS_ERROR_EXIT, "%s:%d: Internal error: Atoms not initialized",
__FILE__, __LINE__);;
@@ -969,12 +968,12 @@ erts_bfalc_test(UWord op, UWord a1, UWord a2)
case 0x202: return (UWord) ((RBTree_t *) a1)->parent;
case 0x203: return (UWord) ((RBTree_t *) a1)->left;
case 0x204: return (UWord) ((RBTree_t *) a1)->right;
- case 0x205: return (UWord) LIST_NEXT(a1);
+ case 0x205: return (UWord) BF_LIST_NEXT(a1);
case 0x206: return (UWord) IS_BLACK((RBTree_t *) a1);
case 0x207: return (UWord) IS_TREE_NODE((RBTree_t *) a1);
case 0x208: return (UWord) 1; /* IS_BF_ALGO */
case 0x20a: return (UWord) !((BFAllctr_t *) a1)->address_order; /* IS_BF */
- case 0x20b: return (UWord) LIST_PREV(a1);
+ case 0x20b: return (UWord) BF_LIST_PREV(a1);
default: ASSERT(0); return ~((UWord) 0);
}
}
diff --git a/erts/emulator/beam/erl_bif_atomics.c b/erts/emulator/beam/erl_bif_atomics.c
new file mode 100644
index 0000000000..029831bd95
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_atomics.c
@@ -0,0 +1,256 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2018. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Purpose: High performance atomics.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <stddef.h> /* offsetof */
+
+#include "sys.h"
+#include "export.h"
+#include "bif.h"
+#include "erl_threads.h"
+#include "big.h"
+#include "erl_binary.h"
+#include "erl_bif_unique.h"
+#include "erl_map.h"
+
+typedef struct
+{
+ int is_signed;
+ UWord vlen;
+ erts_atomic64_t v[1];
+}AtomicsRef;
+
+static int atomics_destructor(Binary *unused)
+{
+ return 1;
+}
+
+#define OPT_SIGNED (1 << 0)
+
+BIF_RETTYPE erts_internal_atomics_new_2(BIF_ALIST_2)
+{
+ AtomicsRef* p;
+ Binary* mbin;
+ UWord i, cnt, opts;
+ Uint bytes;
+ Eterm* hp;
+
+ if (!term_to_UWord(BIF_ARG_1, &cnt)
+ || cnt == 0
+ || !term_to_UWord(BIF_ARG_2, &opts)) {
+
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (cnt > (ERTS_UWORD_MAX / sizeof(p->v[0])))
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+
+ bytes = offsetof(AtomicsRef, v) + cnt*sizeof(p->v[0]);
+ mbin = erts_create_magic_binary_x(bytes,
+ atomics_destructor,
+ ERTS_ALC_T_ATOMICS,
+ 0);
+ p = ERTS_MAGIC_BIN_DATA(mbin);
+ p->is_signed = opts & OPT_SIGNED;
+ p->vlen = cnt;
+ for (i=0; i < cnt; i++)
+ erts_atomic64_init_nob(&p->v[i], 0);
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
+ return erts_mk_magic_ref(&hp, &MSO(BIF_P), mbin);
+}
+
+static ERTS_INLINE int get_ref(Eterm ref, AtomicsRef** pp)
+{
+ Binary* mbin;
+ if (!is_internal_magic_ref(ref))
+ return 0;
+
+ mbin = erts_magic_ref2bin(ref);
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(mbin) != atomics_destructor)
+ return 0;
+ *pp = ERTS_MAGIC_BIN_DATA(mbin);
+ return 1;
+}
+
+static ERTS_INLINE int get_ref_ix(Eterm ref, Eterm ix,
+ AtomicsRef** pp, UWord* ixp)
+{
+ return (get_ref(ref, pp)
+ && term_to_UWord(ix, ixp)
+ && --(*ixp) < (*pp)->vlen);
+}
+
+static ERTS_INLINE int get_value(AtomicsRef* p, Eterm term, erts_aint64_t *valp)
+{
+ return (p->is_signed ?
+ term_to_Sint64(term, (Sint64*)valp) :
+ term_to_Uint64(term, (Uint64*)valp));
+}
+
+static ERTS_INLINE int get_incr(AtomicsRef* p, Eterm term, erts_aint64_t *valp)
+{
+ return (term_to_Sint64(term, (Sint64*)valp)
+ || term_to_Uint64(term, (Uint64*)valp));
+}
+
+static ERTS_INLINE Eterm bld_atomic(Process* proc, AtomicsRef* p,
+ erts_aint64_t val)
+{
+ if (p->is_signed) {
+ if (IS_SSMALL(val))
+ return make_small((Sint) val);
+ else {
+ Uint hsz = ERTS_SINT64_HEAP_SIZE(val);
+ Eterm* hp = HAlloc(proc, hsz);
+ return erts_sint64_to_big(val, &hp);
+ }
+ }
+ else {
+ if ((Uint64)val <= MAX_SMALL)
+ return make_small((Sint) val);
+ else {
+ Uint hsz = ERTS_UINT64_HEAP_SIZE((Uint64)val);
+ Eterm* hp = HAlloc(proc, hsz);
+ return erts_uint64_to_big(val, &hp);
+ }
+ }
+}
+
+BIF_RETTYPE atomics_put_3(BIF_ALIST_3)
+{
+ AtomicsRef* p;
+ UWord ix;
+ erts_aint64_t val;
+
+ if (!get_ref_ix(BIF_ARG_1, BIF_ARG_2, &p, &ix)
+ || !get_value(p, BIF_ARG_3, &val)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ erts_atomic64_set_mb(&p->v[ix], val);
+ return am_ok;
+}
+
+BIF_RETTYPE atomics_get_2(BIF_ALIST_2)
+{
+ AtomicsRef* p;
+ UWord ix;
+
+ if (!get_ref_ix(BIF_ARG_1, BIF_ARG_2, &p, &ix)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ return bld_atomic(BIF_P, p, erts_atomic64_read_mb(&p->v[ix]));
+}
+
+BIF_RETTYPE atomics_add_3(BIF_ALIST_3)
+{
+ AtomicsRef* p;
+ UWord ix;
+ erts_aint64_t incr;
+
+ if (!get_ref_ix(BIF_ARG_1, BIF_ARG_2, &p, &ix)
+ || !get_incr(p, BIF_ARG_3, &incr)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ erts_atomic64_add_mb(&p->v[ix], incr);
+ return am_ok;
+}
+
+BIF_RETTYPE atomics_add_get_3(BIF_ALIST_3)
+{
+ AtomicsRef* p;
+ UWord ix;
+ erts_aint64_t incr;
+
+ if (!get_ref_ix(BIF_ARG_1, BIF_ARG_2, &p, &ix)
+ || !get_incr(p, BIF_ARG_3, &incr)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ return bld_atomic(BIF_P, p, erts_atomic64_add_read_mb(&p->v[ix], incr));
+}
+
+BIF_RETTYPE atomics_exchange_3(BIF_ALIST_3)
+{
+ AtomicsRef* p;
+ UWord ix;
+ erts_aint64_t desired, was;
+
+ if (!get_ref_ix(BIF_ARG_1, BIF_ARG_2, &p, &ix)
+ || !get_value(p, BIF_ARG_3, &desired)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ was = erts_atomic64_xchg_mb(&p->v[ix], desired);
+ return bld_atomic(BIF_P, p, was);
+}
+
+BIF_RETTYPE atomics_compare_exchange_4(BIF_ALIST_4)
+{
+ AtomicsRef* p;
+ UWord ix;
+ erts_aint64_t expected, desired, was;
+
+ if (!get_ref_ix(BIF_ARG_1, BIF_ARG_2, &p, &ix)
+ || !get_value(p, BIF_ARG_3, &expected)
+ || !get_value(p, BIF_ARG_4, &desired)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ was = erts_atomic64_cmpxchg_mb(&p->v[ix], desired, expected);
+ return was == expected ? am_ok : bld_atomic(BIF_P, p, was);
+}
+
+BIF_RETTYPE atomics_info_1(BIF_ALIST_1)
+{
+ AtomicsRef* p;
+ Uint hsz = MAP4_SZ;
+ Eterm *hp;
+ Uint64 max;
+ Sint64 min;
+ UWord memory;
+ Eterm max_val, min_val, sz_val, mem_val;
+
+ if (!get_ref(BIF_ARG_1, &p))
+ BIF_ERROR(BIF_P, BADARG);
+
+ max = p->is_signed ? ERTS_SINT64_MAX : ERTS_UINT64_MAX;
+ min = p->is_signed ? ERTS_SINT64_MIN : 0;
+ memory = erts_magic_ref2bin(BIF_ARG_1)->orig_size;
+
+ erts_bld_uint64(NULL, &hsz, max);
+ erts_bld_sint64(NULL, &hsz, min);
+ erts_bld_uword(NULL, &hsz, p->vlen);
+ erts_bld_uword(NULL, &hsz, memory);
+
+ hp = HAlloc(BIF_P, hsz);
+ max_val = erts_bld_uint64(&hp, NULL, max);
+ min_val = erts_bld_sint64(&hp, NULL, min);
+ sz_val = erts_bld_uword(&hp, NULL, p->vlen);
+ mem_val = erts_bld_uword(&hp, NULL, memory);
+
+ return MAP4(hp, am_max, max_val,
+ am_memory, mem_val,
+ am_min, min_val,
+ am_size, sz_val);
+}
diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c
index a2610bf2e1..ae1bf6e652 100644
--- a/erts/emulator/beam/erl_bif_binary.c
+++ b/erts/emulator/beam/erl_bif_binary.c
@@ -2762,7 +2762,7 @@ static BIF_RETTYPE do_encode_unsigned(Process *p, Eterm uns, Eterm endianess)
dsize_t num_parts = BIG_SIZE(bigp);
Eterm res;
byte *b;
- ErtsDigit d;
+ ErtsDigit d = 0;
if(BIG_SIGN(bigp)) {
goto badarg;
@@ -2778,26 +2778,22 @@ static BIF_RETTYPE do_encode_unsigned(Process *p, Eterm uns, Eterm endianess)
if (endianess == am_big) {
Sint i,j;
j = 0;
- d = BIG_DIGIT(bigp,0);
for (i=n-1;i>=0;--i) {
- b[i] = d & 0xFF;
- if (!((++j) % sizeof(ErtsDigit))) {
+ if (!((j++) % sizeof(ErtsDigit))) {
d = BIG_DIGIT(bigp,j / sizeof(ErtsDigit));
- } else {
- d >>= 8;
}
+ b[i] = d & 0xFF;
+ d >>= 8;
}
} else {
Sint i,j;
j = 0;
- d = BIG_DIGIT(bigp,0);
for (i=0;i<n;++i) {
- b[i] = d & 0xFF;
- if (!((++j) % sizeof(ErtsDigit))) {
+ if (!((j++) % sizeof(ErtsDigit))) {
d = BIG_DIGIT(bigp,j / sizeof(ErtsDigit));
- } else {
- d >>= 8;
}
+ b[i] = d & 0xFF;
+ d >>= 8;
}
}
diff --git a/erts/emulator/beam/erl_bif_counters.c b/erts/emulator/beam/erl_bif_counters.c
new file mode 100644
index 0000000000..7c8884ba32
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_counters.c
@@ -0,0 +1,255 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2018. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Purpose: The implementation for 'counters' with 'write_concurrency'.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <stddef.h> /* offsetof */
+
+#include "sys.h"
+#include "export.h"
+#include "bif.h"
+#include "erl_threads.h"
+#include "big.h"
+#include "erl_binary.h"
+#include "erl_bif_unique.h"
+#include "erl_map.h"
+
+/*
+ * Each logical counter consists of one 64-bit atomic instance per scheduler
+ * plus one instance for the "base value".
+ *
+ * get() reads all atomics for the counter and returns the sum.
+ * add() reads and writes only its own scheduler specific atomic instance.
+ * put() reads all scheduler specific atomics and writes a new base value.
+ */
+#define ATOMICS_PER_COUNTER (erts_no_schedulers + 1)
+
+#define ATOMICS_PER_CACHE_LINE (ERTS_CACHE_LINE_SIZE / sizeof(erts_atomic64_t))
+
+typedef struct
+{
+ UWord arity;
+#ifdef DEBUG
+ UWord ulen;
+#endif
+ union {
+ erts_atomic64_t v[ATOMICS_PER_CACHE_LINE];
+ byte cache_line__[ERTS_CACHE_LINE_SIZE];
+ } u[1];
+}CountersRef;
+
+static int counters_destructor(Binary *mbin)
+{
+ return 1;
+}
+
+
+static UWord ERTS_INLINE div_ceil(UWord dividend, UWord divisor)
+{
+ return (dividend + divisor - 1) / divisor;
+}
+
+BIF_RETTYPE erts_internal_counters_new_1(BIF_ALIST_1)
+{
+ CountersRef* p;
+ Binary* mbin;
+ UWord ui, vi, cnt;
+ Uint bytes, cache_lines;
+ Eterm* hp;
+
+ if (!term_to_UWord(BIF_ARG_1, &cnt)
+ || cnt == 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (cnt > (ERTS_UWORD_MAX / (sizeof(erts_atomic64_t)*2*ATOMICS_PER_COUNTER)))
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+
+ cache_lines = ATOMICS_PER_COUNTER * div_ceil(cnt, ATOMICS_PER_CACHE_LINE);
+ bytes = offsetof(CountersRef, u) + cache_lines * ERTS_CACHE_LINE_SIZE;
+ mbin = erts_create_magic_binary_x(bytes,
+ counters_destructor,
+ ERTS_ALC_T_ATOMICS,
+ 0);
+ p = ERTS_MAGIC_BIN_DATA(mbin);
+ p->arity = cnt;
+
+#ifdef DEBUG
+ p->ulen = cache_lines;
+#endif
+ ASSERT((byte*)&p->u[cache_lines] <= ((byte*)p + bytes));
+ for (ui=0; ui < cache_lines; ui++)
+ for (vi=0; vi < ATOMICS_PER_CACHE_LINE; vi++)
+ erts_atomic64_init_nob(&p->u[ui].v[vi], 0);
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
+ return erts_mk_magic_ref(&hp, &MSO(BIF_P), mbin);
+}
+
+static ERTS_INLINE int get_ref(Eterm ref, CountersRef** pp)
+{
+ Binary* mbin;
+ if (!is_internal_magic_ref(ref))
+ return 0;
+
+ mbin = erts_magic_ref2bin(ref);
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(mbin) != counters_destructor)
+ return 0;
+ *pp = ERTS_MAGIC_BIN_DATA(mbin);
+ return 1;
+}
+
+static ERTS_INLINE int get_ref_cnt(Eterm ref, Eterm index,
+ CountersRef** pp,
+ erts_atomic64_t** app,
+ UWord sched_ix)
+{
+ CountersRef* p;
+ UWord ix, ui, vi;
+ if (!get_ref(ref, &p) || !term_to_UWord(index, &ix) || --ix >= p->arity)
+ return 0;
+ ui = (ix / ATOMICS_PER_CACHE_LINE) * ATOMICS_PER_COUNTER + sched_ix;
+ vi = ix % ATOMICS_PER_CACHE_LINE;
+ ASSERT(ui < p->ulen);
+ *pp = p;
+ *app = &p->u[ui].v[vi];
+ return 1;
+}
+
+static ERTS_INLINE int get_ref_my_cnt(Eterm ref, Eterm index,
+ CountersRef** pp,
+ erts_atomic64_t** app)
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ASSERT(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp));
+ ASSERT(esdp->no > 0 && esdp->no < ATOMICS_PER_COUNTER);
+ return get_ref_cnt(ref, index, pp, app, esdp->no);
+}
+
+static ERTS_INLINE int get_ref_first_cnt(Eterm ref, Eterm index,
+ CountersRef** pp,
+ erts_atomic64_t** app)
+{
+ return get_ref_cnt(ref, index, pp, app, 0);
+}
+
+static ERTS_INLINE int get_incr(CountersRef* p, Eterm term, erts_aint64_t *valp)
+{
+ return (term_to_Sint64(term, (Sint64*)valp)
+ || term_to_Uint64(term, (Uint64*)valp));
+}
+
+static ERTS_INLINE Eterm bld_counter(Process* proc, CountersRef* p,
+ erts_aint64_t val)
+{
+ if (IS_SSMALL(val))
+ return make_small((Sint) val);
+ else {
+ Uint hsz = ERTS_SINT64_HEAP_SIZE(val);
+ Eterm* hp = HAlloc(proc, hsz);
+ return erts_sint64_to_big(val, &hp);
+ }
+}
+
+BIF_RETTYPE erts_internal_counters_get_2(BIF_ALIST_2)
+{
+ CountersRef* p;
+ erts_atomic64_t* ap;
+ erts_aint64_t acc = 0;
+ int j;
+
+ if (!get_ref_first_cnt(BIF_ARG_1, BIF_ARG_2, &p, &ap)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ for (j = ATOMICS_PER_COUNTER; j ; --j) {
+ acc += erts_atomic64_read_nob(ap);
+ ap = (erts_atomic64_t*) ((byte*)ap + ERTS_CACHE_LINE_SIZE);
+ }
+ return bld_counter(BIF_P, p, acc);
+}
+
+BIF_RETTYPE erts_internal_counters_add_3(BIF_ALIST_3)
+{
+ CountersRef* p;
+ erts_atomic64_t* ap;
+ erts_aint64_t incr, sum;
+
+ if (!get_ref_my_cnt(BIF_ARG_1, BIF_ARG_2, &p, &ap)
+ || !get_incr(p, BIF_ARG_3, &incr)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ sum = incr + erts_atomic64_read_nob(ap);
+ erts_atomic64_set_nob(ap, sum);
+ return am_ok;
+}
+
+BIF_RETTYPE erts_internal_counters_put_3(BIF_ALIST_3)
+{
+ CountersRef* p;
+ erts_atomic64_t* first_ap;
+ erts_atomic64_t* ap;
+ erts_aint64_t acc;
+ erts_aint64_t val;
+ int j;
+
+ if (!get_ref_first_cnt(BIF_ARG_1, BIF_ARG_2, &p, &first_ap)
+ || !term_to_Sint64(BIF_ARG_3, &val)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ ap = first_ap;
+ acc = 0;
+ j = ATOMICS_PER_COUNTER - 1;
+ do {
+ ap = (erts_atomic64_t*) ((byte*)ap + ERTS_CACHE_LINE_SIZE);
+ acc += erts_atomic64_read_nob(ap);
+ } while (--j);
+ erts_atomic64_set_nob(first_ap, val-acc);
+
+ return am_ok;
+}
+
+BIF_RETTYPE erts_internal_counters_info_1(BIF_ALIST_1)
+{
+ CountersRef* p;
+ Uint hsz = MAP2_SZ;
+ Eterm *hp;
+ UWord memory;
+ Eterm sz_val, mem_val;
+
+ if (!get_ref(BIF_ARG_1, &p))
+ BIF_ERROR(BIF_P, BADARG);
+
+ memory = erts_magic_ref2bin(BIF_ARG_1)->orig_size;
+ erts_bld_uword(NULL, &hsz, p->arity);
+ erts_bld_uword(NULL, &hsz, memory);
+
+ hp = HAlloc(BIF_P, hsz);
+ sz_val = erts_bld_uword(&hp, NULL, p->arity);
+ mem_val = erts_bld_uword(&hp, NULL, memory);
+
+ return MAP2(hp, am_memory, mem_val,
+ am_size, sz_val);
+}
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 7fada0d548..f6a98c85fd 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -2967,7 +2967,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
} else if (ERTS_IS_ATOM_STR("context_reductions", BIF_ARG_1)) {
BIF_RET(make_small(CONTEXT_REDS));
} else if (ERTS_IS_ATOM_STR("kernel_poll", BIF_ARG_1)) {
-#ifdef ERTS_ENABLE_KERNEL_POLL
+#if ERTS_ENABLE_KERNEL_POLL
BIF_RET(am_true);
#else
BIF_RET(am_false);
@@ -3139,6 +3139,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
DECL_AM(tag);
BIF_RET(AM_tag);
#endif
+ } else if (ERTS_IS_ATOM_STR("system_logger", BIF_ARG_1)) {
+ BIF_RET(erts_get_system_logger());
}
BIF_ERROR(BIF_P, BADARG);
@@ -4608,6 +4610,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
}
}
else if (ERTS_IS_ATOM_STR("broken_halt", BIF_ARG_1)) {
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
broken_halt_test(BIF_ARG_2);
}
else if (ERTS_IS_ATOM_STR("unique_monotonic_integer_state", BIF_ARG_1)) {
diff --git a/erts/emulator/beam/erl_bif_lists.c b/erts/emulator/beam/erl_bif_lists.c
index 395be67a90..aaf262780f 100644
--- a/erts/emulator/beam/erl_bif_lists.c
+++ b/erts/emulator/beam/erl_bif_lists.c
@@ -29,12 +29,13 @@
#include "sys.h"
#include "erl_vm.h"
#include "global.h"
-#include "erl_process.h"
-#include "error.h"
#include "bif.h"
+#include "erl_binary.h"
+
static Eterm keyfind(int Bif, Process* p, Eterm Key, Eterm Pos, Eterm List);
+
static BIF_RETTYPE append(Process* p, Eterm A, Eterm B)
{
Eterm list;
@@ -146,110 +147,732 @@ BIF_RETTYPE append_2(BIF_ALIST_2)
return append(BIF_P, BIF_ARG_1, BIF_ARG_2);
}
-/*
- * erlang:'--'/2
- */
+/* erlang:'--'/2
+ *
+ * Subtracts a list from another (LHS -- RHS), removing the first occurrence of
+ * each element in LHS from RHS. There is no type coercion so the elements must
+ * match exactly.
+ *
+ * The BIF is broken into several stages that can all trap individually, and it
+ * chooses its algorithm based on input size. If either input is small it will
+ * use a linear scan tuned to which side it's on, and if both inputs are large
+ * enough it will convert RHS into a multiset to provide good asymptotic
+ * behavior. */
+
+#define SUBTRACT_LHS_THRESHOLD 16
+#define SUBTRACT_RHS_THRESHOLD 16
+
+typedef enum {
+ SUBTRACT_STAGE_START,
+ SUBTRACT_STAGE_LEN_LHS,
+
+ /* Naive linear scan that's efficient when
+ * LEN_LHS <= SUBTRACT_LHS_THRESHOLD. */
+ SUBTRACT_STAGE_NAIVE_LHS,
+
+ SUBTRACT_STAGE_LEN_RHS,
+
+ /* As SUBTRACT_STAGE_NAIVE_LHS but for RHS. */
+ SUBTRACT_STAGE_NAIVE_RHS,
+
+ /* Creates a multiset from RHS for faster lookups before sweeping through
+ * LHS. The set is implemented as a red-black tree and duplicate elements
+ * are handled by a counter on each node. */
+ SUBTRACT_STAGE_SET_BUILD,
+ SUBTRACT_STAGE_SET_FINISH
+} ErtsSubtractCtxStage;
+
+typedef struct subtract_node__ {
+ struct subtract_node__ *parent;
+ struct subtract_node__ *left;
+ struct subtract_node__ *right;
+ int is_red;
+
+ Eterm key;
+ Uint count;
+} subtract_tree_t;
+
+typedef struct {
+ ErtsSubtractCtxStage stage;
+
+ Eterm lhs_original;
+ Eterm rhs_original;
+
+ Uint lhs_remaining;
+ Uint rhs_remaining;
+
+ Eterm iterator;
+
+ Eterm *result_cdr;
+ Eterm result;
+
+ union {
+ Eterm lhs_elements[SUBTRACT_LHS_THRESHOLD];
+ Eterm rhs_elements[SUBTRACT_RHS_THRESHOLD];
+
+ struct {
+ subtract_tree_t *tree;
+
+ /* A memory area for the tree's nodes, saving us the need to have
+ * one allocation per node. */
+ subtract_tree_t *alloc_start;
+ subtract_tree_t *alloc;
+ } rhs_set;
+ } u;
+} ErtsSubtractContext;
+
+#define ERTS_RBT_PREFIX subtract
+#define ERTS_RBT_T subtract_tree_t
+#define ERTS_RBT_KEY_T Eterm
+#define ERTS_RBT_FLAGS_T int
+#define ERTS_RBT_INIT_EMPTY_TNODE(T) \
+ do { \
+ (T)->parent = NULL; \
+ (T)->left = NULL; \
+ (T)->right = NULL; \
+ } while(0)
+#define ERTS_RBT_IS_RED(T) ((T)->is_red)
+#define ERTS_RBT_SET_RED(T) ((T)->is_red = 1)
+#define ERTS_RBT_IS_BLACK(T) (!ERTS_RBT_IS_RED(T))
+#define ERTS_RBT_SET_BLACK(T) ((T)->is_red = 0)
+#define ERTS_RBT_GET_FLAGS(T) ((T)->is_red)
+#define ERTS_RBT_SET_FLAGS(T, F) ((T)->is_red = F)
+#define ERTS_RBT_GET_PARENT(T) ((T)->parent)
+#define ERTS_RBT_SET_PARENT(T, P) ((T)->parent = P)
+#define ERTS_RBT_GET_RIGHT(T) ((T)->right)
+#define ERTS_RBT_SET_RIGHT(T, R) ((T)->right = (R))
+#define ERTS_RBT_GET_LEFT(T) ((T)->left)
+#define ERTS_RBT_SET_LEFT(T, L) ((T)->left = (L))
+#define ERTS_RBT_GET_KEY(T) ((T)->key)
+#define ERTS_RBT_CMP_KEYS(KX, KY) CMP_TERM(KX, KY)
+#define ERTS_RBT_WANT_LOOKUP_INSERT
+#define ERTS_RBT_WANT_LOOKUP
+#define ERTS_RBT_WANT_DELETE
+#define ERTS_RBT_UNDEF
+
+#include "erl_rbtree.h"
+
+static int subtract_continue(Process *p, ErtsSubtractContext *context);
+
+static void subtract_ctx_dtor(ErtsSubtractContext *context) {
+ switch (context->stage) {
+ case SUBTRACT_STAGE_SET_BUILD:
+ case SUBTRACT_STAGE_SET_FINISH:
+ erts_free(ERTS_ALC_T_LIST_TRAP, context->u.rhs_set.alloc_start);
+ break;
+ default:
+ break;
+ }
+}
-#define SMALL_VEC_SIZE 10
-static Eterm subtract(Process* p, Eterm A, Eterm B)
-{
- Eterm list;
- Eterm* hp;
- Uint need;
- Eterm res;
- Eterm small_vec[SMALL_VEC_SIZE]; /* Preallocated memory for small lists */
- Eterm* vec_p;
- Eterm* vp;
- Sint i;
- Sint n;
- Sint m;
-
- if ((n = erts_list_length(A)) < 0) {
- BIF_ERROR(p, BADARG);
+static int subtract_ctx_bin_dtor(Binary *context_bin) {
+ ErtsSubtractContext *context = ERTS_MAGIC_BIN_DATA(context_bin);
+ subtract_ctx_dtor(context);
+ return 1;
+}
+
+static void subtract_ctx_move(ErtsSubtractContext *from,
+ ErtsSubtractContext *to) {
+ int uses_result_cdr = 0;
+
+ to->stage = from->stage;
+
+ to->lhs_original = from->lhs_original;
+ to->rhs_original = from->rhs_original;
+
+ to->lhs_remaining = from->lhs_remaining;
+ to->rhs_remaining = from->rhs_remaining;
+
+ to->iterator = from->iterator;
+ to->result = from->result;
+
+ switch (to->stage) {
+ case SUBTRACT_STAGE_NAIVE_LHS:
+ sys_memcpy(to->u.lhs_elements,
+ from->u.lhs_elements,
+ sizeof(Eterm) * to->lhs_remaining);
+ break;
+ case SUBTRACT_STAGE_NAIVE_RHS:
+ sys_memcpy(to->u.rhs_elements,
+ from->u.rhs_elements,
+ sizeof(Eterm) * to->rhs_remaining);
+
+ uses_result_cdr = 1;
+ break;
+ case SUBTRACT_STAGE_SET_FINISH:
+ uses_result_cdr = 1;
+ /* FALL THROUGH */
+ case SUBTRACT_STAGE_SET_BUILD:
+ to->u.rhs_set.alloc_start = from->u.rhs_set.alloc_start;
+ to->u.rhs_set.alloc = from->u.rhs_set.alloc;
+ to->u.rhs_set.tree = from->u.rhs_set.tree;
+ break;
+ default:
+ break;
}
- if ((m = erts_list_length(B)) < 0) {
- BIF_ERROR(p, BADARG);
+
+ if (uses_result_cdr) {
+ if (from->result_cdr == &from->result) {
+ to->result_cdr = &to->result;
+ } else {
+ to->result_cdr = from->result_cdr;
+ }
}
-
- if (n == 0)
- BIF_RET(NIL);
- if (m == 0)
- BIF_RET(A);
-
- /* allocate element vector */
- if (n <= SMALL_VEC_SIZE)
- vec_p = small_vec;
- else
- vec_p = (Eterm*) erts_alloc(ERTS_ALC_T_TMP, n * sizeof(Eterm));
-
- /* PUT ALL ELEMENTS IN VP */
- vp = vec_p;
- list = A;
- i = n;
- while(i--) {
- Eterm* listp = list_val(list);
- *vp++ = CAR(listp);
- list = CDR(listp);
+}
+
+static Eterm subtract_create_trap_state(Process *p,
+ ErtsSubtractContext *context) {
+ Binary *state_bin;
+ Eterm *hp;
+
+ state_bin = erts_create_magic_binary(sizeof(ErtsSubtractContext),
+ subtract_ctx_bin_dtor);
+
+ subtract_ctx_move(context, ERTS_MAGIC_BIN_DATA(state_bin));
+
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+
+ return erts_mk_magic_ref(&hp, &MSO(p), state_bin);
+}
+
+static int subtract_enter_len_lhs(Process *p, ErtsSubtractContext *context) {
+ context->stage = SUBTRACT_STAGE_LEN_LHS;
+
+ context->iterator = context->lhs_original;
+ context->lhs_remaining = 0;
+
+ return subtract_continue(p, context);
+}
+
+static int subtract_enter_len_rhs(Process *p, ErtsSubtractContext *context) {
+ context->stage = SUBTRACT_STAGE_LEN_RHS;
+
+ context->iterator = context->rhs_original;
+ context->rhs_remaining = 0;
+
+ return subtract_continue(p, context);
+}
+
+static int subtract_get_length(Process *p, Eterm *iterator_p, Uint *count_p) {
+ static const Sint ELEMENTS_PER_RED = 32;
+
+ Sint budget, count;
+ Eterm iterator;
+
+ budget = ELEMENTS_PER_RED * ERTS_BIF_REDS_LEFT(p);
+ iterator = *iterator_p;
+
+#ifdef DEBUG
+ budget = budget / 10 + 1;
+#endif
+
+ for (count = 0; count < budget && is_list(iterator); count++) {
+ iterator = CDR(list_val(iterator));
}
-
- /* UNMARK ALL DELETED CELLS */
- list = B;
- m = 0; /* number of deleted elements */
- while(is_list(list)) {
- Eterm* listp = list_val(list);
- Eterm elem = CAR(listp);
- i = n;
- vp = vec_p;
- while(i--) {
- if (is_value(*vp) && eq(*vp, elem)) {
- *vp = THE_NON_VALUE;
- m++;
- break;
- }
- vp++;
- }
- list = CDR(listp);
+
+ if (!is_list(iterator) && !is_nil(iterator)) {
+ return -1;
}
-
- if (m == n) /* All deleted ? */
- res = NIL;
- else if (m == 0) /* None deleted ? */
- res = A;
- else { /* REBUILD LIST */
- res = NIL;
- need = 2*(n - m);
- hp = HAlloc(p, need);
- vp = vec_p + n - 1;
- while(vp >= vec_p) {
- if (is_value(*vp)) {
- res = CONS(hp, *vp, res);
- hp += 2;
- }
- vp--;
- }
+
+ BUMP_REDS(p, count / ELEMENTS_PER_RED);
+
+ *iterator_p = iterator;
+ *count_p += count;
+
+ if (is_nil(iterator)) {
+ return 1;
}
- if (vec_p != small_vec)
- erts_free(ERTS_ALC_T_TMP, (void *) vec_p);
- BIF_RET(res);
+
+ return 0;
}
-BIF_RETTYPE ebif_minusminus_2(BIF_ALIST_2)
-{
- return subtract(BIF_P, BIF_ARG_1, BIF_ARG_2);
+static int subtract_enter_naive_lhs(Process *p, ErtsSubtractContext *context) {
+ Eterm iterator;
+ int i = 0;
+
+ context->stage = SUBTRACT_STAGE_NAIVE_LHS;
+
+ context->iterator = context->rhs_original;
+ context->result = NIL;
+
+ iterator = context->lhs_original;
+
+ while (is_list(iterator)) {
+ const Eterm *cell = list_val(iterator);
+
+ ASSERT(i < SUBTRACT_LHS_THRESHOLD);
+
+ context->u.lhs_elements[i++] = CAR(cell);
+ iterator = CDR(cell);
+ }
+
+ ASSERT(i == context->lhs_remaining);
+
+ return subtract_continue(p, context);
}
-BIF_RETTYPE subtract_2(BIF_ALIST_2)
-{
- return subtract(BIF_P, BIF_ARG_1, BIF_ARG_2);
+static int subtract_naive_lhs(Process *p, ErtsSubtractContext *context) {
+ const Sint CHECKS_PER_RED = 16;
+ Sint checks, budget;
+
+ budget = CHECKS_PER_RED * ERTS_BIF_REDS_LEFT(p);
+ checks = 0;
+
+ while (checks < budget && is_list(context->iterator)) {
+ const Eterm *cell;
+ Eterm value, next;
+ int found_at;
+
+ cell = list_val(context->iterator);
+
+ value = CAR(cell);
+ next = CDR(cell);
+
+ for (found_at = 0; found_at < context->lhs_remaining; found_at++) {
+ if (EQ(value, context->u.lhs_elements[found_at])) {
+ /* We shift the array one step down as we have to preserve
+ * order.
+ *
+ * Note that we can't exit early as that would suppress errors
+ * in the right-hand side (this runs prior to determining the
+ * length of RHS). */
+
+ context->lhs_remaining--;
+ sys_memmove(&context->u.lhs_elements[found_at],
+ &context->u.lhs_elements[found_at + 1],
+ (context->lhs_remaining - found_at) * sizeof(Eterm));
+ break;
+ }
+ }
+
+ checks += MAX(1, context->lhs_remaining);
+ context->iterator = next;
+ }
+
+ BUMP_REDS(p, MIN(checks, budget) / CHECKS_PER_RED);
+
+ if (is_list(context->iterator)) {
+ return 0;
+ } else if (!is_nil(context->iterator)) {
+ return -1;
+ }
+
+ if (context->lhs_remaining > 0) {
+ Eterm *hp;
+ int i;
+
+ hp = HAlloc(p, context->lhs_remaining * 2);
+
+ for (i = context->lhs_remaining - 1; i >= 0; i--) {
+ Eterm value = context->u.lhs_elements[i];
+
+ context->result = CONS(hp, value, context->result);
+ hp += 2;
+ }
+ }
+
+ ASSERT(context->lhs_remaining > 0 || context->result == NIL);
+
+ return 1;
}
+static int subtract_enter_naive_rhs(Process *p, ErtsSubtractContext *context) {
+ Eterm iterator;
+ int i = 0;
+
+ context->stage = SUBTRACT_STAGE_NAIVE_RHS;
+
+ context->iterator = context->lhs_original;
+ context->result_cdr = &context->result;
+ context->result = NIL;
+
+ iterator = context->rhs_original;
+
+ while (is_list(iterator)) {
+ const Eterm *cell = list_val(iterator);
+
+ ASSERT(i < SUBTRACT_RHS_THRESHOLD);
+
+ context->u.rhs_elements[i++] = CAR(cell);
+ iterator = CDR(cell);
+ }
+
+ ASSERT(i == context->rhs_remaining);
+
+ return subtract_continue(p, context);
+}
+
+static int subtract_naive_rhs(Process *p, ErtsSubtractContext *context) {
+ const Sint CHECKS_PER_RED = 16;
+ Sint checks, budget;
+
+ budget = CHECKS_PER_RED * ERTS_BIF_REDS_LEFT(p);
+ checks = 0;
+
+#ifdef DEBUG
+ budget = budget / 10 + 1;
+#endif
+
+ while (checks < budget && is_list(context->iterator)) {
+ const Eterm *cell;
+ Eterm value, next;
+ int found_at;
+
+ cell = list_val(context->iterator);
+ value = CAR(cell);
+ next = CDR(cell);
+
+ for (found_at = context->rhs_remaining - 1; found_at >= 0; found_at--) {
+ if (EQ(value, context->u.rhs_elements[found_at])) {
+ break;
+ }
+ }
+
+ if (found_at < 0) {
+ /* Destructively add the value to the result. This is safe
+ * since the GC is disabled and the unfinished term is never
+ * leaked to the outside world. */
+ Eterm *hp = HAllocX(p, 2, context->lhs_remaining * 2);
+
+ *context->result_cdr = make_list(hp);
+ context->result_cdr = &CDR(hp);
+
+ CAR(hp) = value;
+ } else if (found_at >= 0) {
+ Eterm swap;
+
+ if (context->rhs_remaining-- == 1) {
+ /* We've run out of items to remove, so the rest of the
+ * result will be equal to the remainder of the input. We know
+ * that LHS is well-formed as any errors would've been reported
+ * during length determination. */
+ *context->result_cdr = next;
+
+ BUMP_REDS(p, MIN(budget, checks) / CHECKS_PER_RED);
+
+ return 1;
+ }
+
+ swap = context->u.rhs_elements[context->rhs_remaining];
+ context->u.rhs_elements[found_at] = swap;
+ }
+
+ checks += context->rhs_remaining;
+ context->iterator = next;
+ context->lhs_remaining--;
+ }
+
+ /* The result only has to be terminated when returning it to the user, but
+ * we're doing it when trapping as well to prevent headaches when
+ * debugging. */
+ *context->result_cdr = NIL;
+
+ BUMP_REDS(p, MIN(budget, checks) / CHECKS_PER_RED);
+
+ if (is_list(context->iterator)) {
+ ASSERT(context->lhs_remaining > 0 && context->rhs_remaining > 0);
+ return 0;
+ }
+
+ return 1;
+}
+
+static int subtract_enter_set_build(Process *p, ErtsSubtractContext *context) {
+ context->stage = SUBTRACT_STAGE_SET_BUILD;
+
+ context->u.rhs_set.alloc_start =
+ erts_alloc(ERTS_ALC_T_LIST_TRAP,
+ context->rhs_remaining * sizeof(subtract_tree_t));
+
+ context->u.rhs_set.alloc = context->u.rhs_set.alloc_start;
+ context->u.rhs_set.tree = NULL;
+
+ context->iterator = context->rhs_original;
+
+ return subtract_continue(p, context);
+}
+
+static int subtract_set_build(Process *p, ErtsSubtractContext *context) {
+ const static Sint INSERTIONS_PER_RED = 16;
+ Sint budget, insertions;
+
+ budget = INSERTIONS_PER_RED * ERTS_BIF_REDS_LEFT(p);
+ insertions = 0;
+
+#ifdef DEBUG
+ budget = budget / 10 + 1;
+#endif
+
+ while (insertions < budget && is_list(context->iterator)) {
+ subtract_tree_t *existing_node, *new_node;
+ const Eterm *cell;
+ Eterm value, next;
+
+ cell = list_val(context->iterator);
+ value = CAR(cell);
+ next = CDR(cell);
+
+ new_node = context->u.rhs_set.alloc;
+ new_node->key = value;
+ new_node->count = 1;
+
+ existing_node = subtract_rbt_lookup_insert(&context->u.rhs_set.tree,
+ new_node);
+
+ if (existing_node != NULL) {
+ existing_node->count++;
+ } else {
+ context->u.rhs_set.alloc++;
+ }
+
+ context->iterator = next;
+ insertions++;
+ }
+
+ BUMP_REDS(p, insertions / INSERTIONS_PER_RED);
+
+ ASSERT(is_list(context->iterator) || is_nil(context->iterator));
+ ASSERT(context->u.rhs_set.tree != NULL);
+
+ return is_nil(context->iterator);
+}
+
+static int subtract_enter_set_finish(Process *p, ErtsSubtractContext *context) {
+ context->stage = SUBTRACT_STAGE_SET_FINISH;
+
+ context->result_cdr = &context->result;
+ context->result = NIL;
+
+ context->iterator = context->lhs_original;
+
+ return subtract_continue(p, context);
+}
+
+static int subtract_set_finish(Process *p, ErtsSubtractContext *context) {
+ const Sint CHECKS_PER_RED = 8;
+ Sint checks, budget;
+
+ budget = CHECKS_PER_RED * ERTS_BIF_REDS_LEFT(p);
+ checks = 0;
+
+#ifdef DEBUG
+ budget = budget / 10 + 1;
+#endif
+
+ while (checks < budget && is_list(context->iterator)) {
+ subtract_tree_t *node;
+ const Eterm *cell;
+ Eterm value, next;
+
+ cell = list_val(context->iterator);
+ value = CAR(cell);
+ next = CDR(cell);
+
+ ASSERT(context->rhs_remaining > 0);
+
+ node = subtract_rbt_lookup(context->u.rhs_set.tree, value);
+
+ if (node == NULL) {
+ Eterm *hp = HAllocX(p, 2, context->lhs_remaining * 2);
+
+ *context->result_cdr = make_list(hp);
+ context->result_cdr = &CDR(hp);
+
+ CAR(hp) = value;
+ } else {
+ if (context->rhs_remaining-- == 1) {
+ *context->result_cdr = next;
+
+ BUMP_REDS(p, checks / CHECKS_PER_RED);
+
+ return 1;
+ }
+
+ if (node->count-- == 1) {
+ subtract_rbt_delete(&context->u.rhs_set.tree, node);
+ }
+ }
+
+ context->iterator = next;
+ context->lhs_remaining--;
+ checks++;
+ }
+
+ *context->result_cdr = NIL;
+
+ BUMP_REDS(p, checks / CHECKS_PER_RED);
+
+ if (is_list(context->iterator)) {
+ ASSERT(context->lhs_remaining > 0 && context->rhs_remaining > 0);
+ return 0;
+ }
+
+ return 1;
+}
+
+static int subtract_continue(Process *p, ErtsSubtractContext *context) {
+ switch (context->stage) {
+ case SUBTRACT_STAGE_START: {
+ return subtract_enter_len_lhs(p, context);
+ }
+
+ case SUBTRACT_STAGE_LEN_LHS: {
+ int res = subtract_get_length(p,
+ &context->iterator,
+ &context->lhs_remaining);
+
+ if (res != 1) {
+ return res;
+ }
+
+ if (context->lhs_remaining <= SUBTRACT_LHS_THRESHOLD) {
+ return subtract_enter_naive_lhs(p, context);
+ }
+
+ return subtract_enter_len_rhs(p, context);
+ }
+
+ case SUBTRACT_STAGE_NAIVE_LHS: {
+ return subtract_naive_lhs(p, context);
+ }
+
+ case SUBTRACT_STAGE_LEN_RHS: {
+ int res = subtract_get_length(p,
+ &context->iterator,
+ &context->rhs_remaining);
+
+ if (res != 1) {
+ return res;
+ }
+
+ /* We've walked through both lists fully now so we no longer need
+ * to check for errors past this point. */
+
+ if (context->rhs_remaining <= SUBTRACT_RHS_THRESHOLD) {
+ return subtract_enter_naive_rhs(p, context);
+ }
+
+ return subtract_enter_set_build(p, context);
+ }
+
+ case SUBTRACT_STAGE_NAIVE_RHS: {
+ return subtract_naive_rhs(p, context);
+ }
+
+ case SUBTRACT_STAGE_SET_BUILD: {
+ int res = subtract_set_build(p, context);
+
+ if (res != 1) {
+ return res;
+ }
+
+ return subtract_enter_set_finish(p, context);
+ }
+
+ case SUBTRACT_STAGE_SET_FINISH: {
+ return subtract_set_finish(p, context);
+ }
+
+ default:
+ ERTS_ASSERT(!"unreachable");
+ }
+}
+
+static int subtract_start(Process *p, Eterm lhs, Eterm rhs,
+ ErtsSubtractContext *context) {
+ context->stage = SUBTRACT_STAGE_START;
+
+ context->lhs_original = lhs;
+ context->rhs_original = rhs;
+
+ return subtract_continue(p, context);
+}
+
+/* erlang:'--'/2 */
+static Eterm subtract(Export *bif_entry, BIF_ALIST_2) {
+ Eterm lhs = BIF_ARG_1, rhs = BIF_ARG_2;
+
+ if ((is_list(lhs) || is_nil(lhs)) && (is_list(rhs) || is_nil(rhs))) {
+ /* We start with the context on the stack in the hopes that we won't
+ * have to trap. */
+ ErtsSubtractContext context;
+ int res;
+
+ res = subtract_start(BIF_P, lhs, rhs, &context);
+
+ if (res == 0) {
+ Eterm state_mref;
+
+ state_mref = subtract_create_trap_state(BIF_P, &context);
+ erts_set_gc_state(BIF_P, 0);
+
+ BIF_TRAP2(bif_entry, BIF_P, state_mref, NIL);
+ }
+
+ subtract_ctx_dtor(&context);
+
+ if (res < 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ BIF_RET(context.result);
+ } else if (is_internal_magic_ref(lhs)) {
+ ErtsSubtractContext *context;
+ int (*dtor)(Binary*);
+ Binary *magic_bin;
+
+ int res;
+
+ magic_bin = erts_magic_ref2bin(lhs);
+ dtor = ERTS_MAGIC_BIN_DESTRUCTOR(magic_bin);
+
+ if (dtor != subtract_ctx_bin_dtor) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ ASSERT(BIF_P->flags & F_DISABLE_GC);
+ ASSERT(rhs == NIL);
+
+ context = ERTS_MAGIC_BIN_DATA(magic_bin);
+ res = subtract_continue(BIF_P, context);
+
+ if (res == 0) {
+ BIF_TRAP2(bif_entry, BIF_P, lhs, NIL);
+ }
+
+ erts_set_gc_state(BIF_P, 1);
+
+ if (res < 0) {
+ ERTS_BIF_ERROR_TRAPPED2(BIF_P, BADARG, bif_entry,
+ context->lhs_original,
+ context->rhs_original);
+ }
+
+ BIF_RET(context->result);
+ }
+
+ ASSERT(!(BIF_P->flags & F_DISABLE_GC));
+
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE ebif_minusminus_2(BIF_ALIST_2) {
+ return subtract(bif_export[BIF_ebif_minusminus_2], BIF_CALL_ARGS);
+}
+
+BIF_RETTYPE subtract_2(BIF_ALIST_2) {
+ return subtract(bif_export[BIF_subtract_2], BIF_CALL_ARGS);
+}
+
+
BIF_RETTYPE lists_member_2(BIF_ALIST_2)
{
Eterm term;
Eterm list;
Eterm item;
int non_immed_key;
- int max_iter = 10 * CONTEXT_REDS;
+ int reds_left = ERTS_BIF_REDS_LEFT(BIF_P);
+ int max_iter = 16 * reds_left;
if (is_nil(BIF_ARG_2)) {
BIF_RET(am_false);
@@ -267,14 +890,15 @@ BIF_RETTYPE lists_member_2(BIF_ALIST_2)
}
item = CAR(list_val(list));
if ((item == term) || (non_immed_key && eq(item, term))) {
- BIF_RET2(am_true, CONTEXT_REDS - max_iter/10);
+ BIF_RET2(am_true, reds_left - max_iter/16);
}
list = CDR(list_val(list));
}
if (is_not_nil(list)) {
+ BUMP_REDS(BIF_P, reds_left - max_iter/16);
BIF_ERROR(BIF_P, BADARG);
}
- BIF_RET2(am_false, CONTEXT_REDS - max_iter/10);
+ BIF_RET2(am_false, reds_left - max_iter/16);
}
static BIF_RETTYPE lists_reverse_alloc(Process *c_p,
@@ -283,7 +907,7 @@ static BIF_RETTYPE lists_reverse_alloc(Process *c_p,
{
static const Uint CELLS_PER_RED = 40;
- Eterm *heap_top, *heap_end;
+ Eterm *alloc_top, *alloc_end;
Uint cells_left, max_cells;
Eterm list, tail;
Eterm lookahead;
@@ -305,18 +929,18 @@ static BIF_RETTYPE lists_reverse_alloc(Process *c_p,
BIF_ERROR(c_p, BADARG);
}
- heap_top = HAlloc(c_p, 2 * (max_cells - cells_left));
- heap_end = heap_top + 2 * (max_cells - cells_left);
+ alloc_top = HAlloc(c_p, 2 * (max_cells - cells_left));
+ alloc_end = alloc_top + 2 * (max_cells - cells_left);
- while (heap_top < heap_end) {
+ while (alloc_top < alloc_end) {
Eterm *pair = list_val(list);
- tail = CONS(heap_top, CAR(pair), tail);
+ tail = CONS(alloc_top, CAR(pair), tail);
list = CDR(pair);
ASSERT(is_list(list) || is_nil(list));
- heap_top += 2;
+ alloc_top += 2;
}
if (is_nil(list)) {
@@ -333,7 +957,7 @@ static BIF_RETTYPE lists_reverse_onheap(Process *c_p,
{
static const Uint CELLS_PER_RED = 60;
- Eterm *heap_top, *heap_end;
+ Eterm *alloc_start, *alloc_top, *alloc_end;
Uint cells_left, max_cells;
Eterm list, tail;
@@ -343,21 +967,27 @@ static BIF_RETTYPE lists_reverse_onheap(Process *c_p,
cells_left = max_cells = CELLS_PER_RED * (1 + ERTS_BIF_REDS_LEFT(c_p));
ASSERT(HEAP_LIMIT(c_p) >= HEAP_TOP(c_p) + 2);
- heap_end = HEAP_LIMIT(c_p) - 2;
- heap_top = HEAP_TOP(c_p);
+ alloc_start = HEAP_TOP(c_p);
+ alloc_end = HEAP_LIMIT(c_p) - 2;
+ alloc_top = alloc_start;
+
+ /* Don't process more cells than we have reductions for. */
+ alloc_end = MIN(alloc_top + (cells_left * 2), alloc_end);
- while (heap_top < heap_end && is_list(list)) {
+ while (alloc_top < alloc_end && is_list(list)) {
Eterm *pair = list_val(list);
- tail = CONS(heap_top, CAR(pair), tail);
+ tail = CONS(alloc_top, CAR(pair), tail);
list = CDR(pair);
- heap_top += 2;
+ alloc_top += 2;
}
- cells_left -= (heap_top - heap_end) / 2;
+ cells_left -= (alloc_top - alloc_start) / 2;
+ HEAP_TOP(c_p) = alloc_top;
+
+ ASSERT(cells_left >= 0 && cells_left <= max_cells);
BUMP_REDS(c_p, (max_cells - cells_left) / CELLS_PER_RED);
- HEAP_TOP(c_p) = heap_top;
if (is_nil(list)) {
BIF_RET(tail);
diff --git a/erts/emulator/beam/erl_bif_persistent.c b/erts/emulator/beam/erl_bif_persistent.c
new file mode 100644
index 0000000000..5a78a043ce
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_persistent.c
@@ -0,0 +1,1000 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2018. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Purpose: Implement persistent term storage.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "erl_driver.h"
+#include "bif.h"
+#include "erl_map.h"
+#include "erl_binary.h"
+
+/*
+ * The limit for the number of persistent terms before
+ * a warning is issued.
+ */
+
+#define WARNING_LIMIT 20000
+#define XSTR(s) STR(s)
+#define STR(s) #s
+
+/*
+ * Parameters for the hash table.
+ */
+#define INITIAL_SIZE 8
+#define LOAD_FACTOR ((Uint)50)
+#define MUST_GROW(t) (((Uint)100) * t->num_entries >= LOAD_FACTOR * t->allocated)
+#define MUST_SHRINK(t) (((Uint)200) * t->num_entries <= LOAD_FACTOR * t->allocated && \
+ t->allocated > INITIAL_SIZE)
+
+typedef struct hash_table {
+ Uint allocated;
+ Uint num_entries;
+ Uint mask;
+ Uint first_to_delete;
+ Uint num_to_delete;
+ erts_atomic_t refc;
+ struct hash_table* delete_next;
+ ErtsThrPrgrLaterOp thr_prog_op;
+ Eterm term[1];
+} HashTable;
+
+typedef struct trap_data {
+ HashTable* table;
+ Uint idx;
+ Uint remaining;
+ Uint memory; /* Used by info/0 to count used memory */
+} TrapData;
+
+/*
+ * Declarations of local functions.
+ */
+
+static HashTable* create_initial_table(void);
+static Uint lookup(HashTable* hash_table, Eterm key);
+static HashTable* copy_table(HashTable* old_table, Uint new_size, int rehash);
+static HashTable* tmp_table_copy(HashTable* old_table);
+static int try_seize_update_permission(Process* c_p);
+static void release_update_permission(int release_updater);
+static void table_updater(void* table);
+static void table_deleter(void* hash_table);
+static void dec_table_refc(Process* c_p, HashTable* old_table);
+static void delete_table(Process* c_p, HashTable* table);
+static void mark_for_deletion(HashTable* hash_table, Uint entry_index);
+static ErtsLiteralArea* term_to_area(Eterm tuple);
+static void suspend_updater(Process* c_p);
+static Eterm do_get_all(Process* c_p, TrapData* trap_data, Eterm res);
+static Eterm do_info(Process* c_p, TrapData* trap_data);
+static void append_to_delete_queue(HashTable* table);
+static HashTable* next_to_delete(void);
+static Eterm alloc_trap_data(Process* c_p);
+static int cleanup_trap_data(Binary *bp);
+
+/*
+ * Traps
+ */
+
+static Export persistent_term_get_all_export;
+static BIF_RETTYPE persistent_term_get_all_trap(BIF_ALIST_2);
+static Export persistent_term_info_export;
+static BIF_RETTYPE persistent_term_info_trap(BIF_ALIST_1);
+
+/*
+ * Pointer to the current hash table.
+ */
+
+static erts_atomic_t the_hash_table;
+
+/*
+ * Queue of processes waiting to update the hash table.
+ */
+
+struct update_queue_item {
+ Process *p;
+ struct update_queue_item* next;
+};
+
+static erts_mtx_t update_table_permission_mtx;
+static struct update_queue_item* update_queue = NULL;
+static Process* updater_process = NULL;
+
+/* Protected by update_table_permission_mtx */
+static ErtsThrPrgrLaterOp thr_prog_op;
+static int issued_warning = 0;
+
+/*
+ * Queue of hash tables to be deleted.
+ */
+
+static erts_mtx_t delete_queue_mtx;
+static HashTable* delete_queue_head = NULL;
+static HashTable** delete_queue_tail = &delete_queue_head;
+
+/*
+ * The following variables are only used during crash dumping. They
+ * are intialized by erts_init_persistent_dumping().
+ */
+
+ErtsLiteralArea** erts_persistent_areas;
+Uint erts_num_persistent_areas;
+
+void erts_init_bif_persistent_term(void)
+{
+ HashTable* hash_table;
+
+ /*
+ * Initialize the mutex protecting updates.
+ */
+
+ erts_mtx_init(&update_table_permission_mtx,
+ "update_persistent_term_permission",
+ NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC |
+ ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+
+ /*
+ * Initialize delete queue.
+ */
+
+ erts_mtx_init(&delete_queue_mtx,
+ "persistent_term_delete_permission",
+ NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC |
+ ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+
+ /*
+ * Allocate a small initial hash table.
+ */
+
+ hash_table = create_initial_table();
+ erts_atomic_init_nob(&the_hash_table, (erts_aint_t)hash_table);
+
+ /*
+ * Initialize export entry for traps
+ */
+
+ erts_init_trap_export(&persistent_term_get_all_export,
+ am_persistent_term, am_get_all_trap, 2,
+ &persistent_term_get_all_trap);
+ erts_init_trap_export(&persistent_term_info_export,
+ am_persistent_term, am_info_trap, 1,
+ &persistent_term_info_trap);
+}
+
+BIF_RETTYPE persistent_term_put_2(BIF_ALIST_2)
+{
+ Eterm key;
+ Eterm term;
+ Eterm heap[3];
+ Eterm tuple;
+ HashTable* hash_table;
+ Uint term_size;
+ Uint lit_area_size;
+ ErlOffHeap code_off_heap;
+ ErtsLiteralArea* literal_area;
+ erts_shcopy_t info;
+ Eterm* ptr;
+ Uint entry_index;
+
+ if (!try_seize_update_permission(BIF_P)) {
+ ERTS_BIF_YIELD2(bif_export[BIF_persistent_term_put_2],
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ }
+
+ hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+
+ key = BIF_ARG_1;
+ term = BIF_ARG_2;
+
+ entry_index = lookup(hash_table, key);
+
+ heap[0] = make_arityval(2);
+ heap[1] = key;
+ heap[2] = term;
+ tuple = make_tuple(heap);
+
+ if (is_nil(hash_table->term[entry_index])) {
+ Uint size = hash_table->allocated;
+ if (MUST_GROW(hash_table)) {
+ size *= 2;
+ }
+ hash_table = copy_table(hash_table, size, 0);
+ entry_index = lookup(hash_table, key);
+ hash_table->num_entries++;
+ } else {
+ Eterm tuple = hash_table->term[entry_index];
+ Eterm old_term;
+
+ ASSERT(is_tuple_arity(tuple, 2));
+ old_term = boxed_val(tuple)[2];
+ if (EQ(term, old_term)) {
+ /* Same value. No need to update anything. */
+ release_update_permission(0);
+ BIF_RET(am_ok);
+ } else {
+ /* Mark the old term for deletion. */
+ mark_for_deletion(hash_table, entry_index);
+ hash_table = copy_table(hash_table, hash_table->allocated, 0);
+ }
+ }
+
+ /*
+ * Preserve internal sharing in the term by using the
+ * sharing-preserving functions. However, literals must
+ * be copied in case the module holding them are unloaded.
+ */
+ INITIALIZE_SHCOPY(info);
+ info.copy_literals = 1;
+ term_size = copy_shared_calculate(tuple, &info);
+ ERTS_INIT_OFF_HEAP(&code_off_heap);
+ lit_area_size = ERTS_LITERAL_AREA_ALLOC_SIZE(term_size);
+ literal_area = erts_alloc(ERTS_ALC_T_LITERAL, lit_area_size);
+ ptr = &literal_area->start[0];
+ literal_area->end = ptr + term_size;
+ tuple = copy_shared_perform(tuple, term_size, &info, &ptr, &code_off_heap);
+ ASSERT(tuple_val(tuple) == literal_area->start);
+ literal_area->off_heap = code_off_heap.first;
+ DESTROY_SHCOPY(info);
+ erts_set_literal_tag(&tuple, literal_area->start, term_size);
+ hash_table->term[entry_index] = tuple;
+
+ erts_schedule_thr_prgr_later_op(table_updater, hash_table, &thr_prog_op);
+ suspend_updater(BIF_P);
+
+ /*
+ * Issue a warning once if the warning limit has been exceeded.
+ */
+
+ if (hash_table->num_entries > WARNING_LIMIT && issued_warning == 0) {
+ static char w[] =
+ "More than " XSTR(WARNING_LIMIT) " persistent terms "
+ "have been created.\n"
+ "It is recommended to avoid creating an excessive number of\n"
+ "persistent terms, as creation and deletion of persistent terms\n"
+ "will be slower as the number of persistent terms increases.\n";
+ issued_warning = 1;
+ erts_send_warning_to_logger_str(BIF_P->group_leader, w);
+ }
+
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_ok);
+}
+
+BIF_RETTYPE persistent_term_get_0(BIF_ALIST_0)
+{
+ HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ TrapData* trap_data;
+ Eterm res = NIL;
+ Eterm magic_ref;
+ Binary* mbp;
+
+ magic_ref = alloc_trap_data(BIF_P);
+ mbp = erts_magic_ref2bin(magic_ref);
+ trap_data = ERTS_MAGIC_BIN_DATA(mbp);
+ trap_data->table = hash_table;
+ trap_data->idx = 0;
+ trap_data->remaining = hash_table->num_entries;
+ res = do_get_all(BIF_P, trap_data, res);
+ if (trap_data->remaining == 0) {
+ BUMP_REDS(BIF_P, hash_table->num_entries);
+ trap_data->table = NULL; /* Prevent refc decrement */
+ BIF_RET(res);
+ } else {
+ /*
+ * Increment the ref counter to prevent an update operation (by put/2
+ * or erase/1) to delete this hash table.
+ */
+ erts_atomic_inc_nob(&hash_table->refc);
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(&persistent_term_get_all_export, BIF_P, magic_ref, res);
+ }
+}
+
+BIF_RETTYPE persistent_term_get_1(BIF_ALIST_1)
+{
+ Eterm key = BIF_ARG_1;
+ HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ Uint entry_index;
+ Eterm term;
+
+ entry_index = lookup(hash_table, key);
+ term = hash_table->term[entry_index];
+ if (is_boxed(term)) {
+ ASSERT(is_tuple_arity(term, 2));
+ BIF_RET(tuple_val(term)[2]);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE persistent_term_get_2(BIF_ALIST_2)
+{
+ Eterm key = BIF_ARG_1;
+ Eterm result = BIF_ARG_2;
+ HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ Uint entry_index;
+ Eterm term;
+
+ entry_index = lookup(hash_table, key);
+ term = hash_table->term[entry_index];
+ if (is_boxed(term)) {
+ ASSERT(is_tuple_arity(term, 2));
+ result = tuple_val(term)[2];
+ }
+ BIF_RET(result);
+}
+
+BIF_RETTYPE persistent_term_erase_1(BIF_ALIST_1)
+{
+ Eterm key = BIF_ARG_1;
+ HashTable* old_table;
+ HashTable* new_table;
+ Uint entry_index;
+ Eterm old_term;
+
+ if (!try_seize_update_permission(BIF_P)) {
+ ERTS_BIF_YIELD1(bif_export[BIF_persistent_term_erase_1],
+ BIF_P, BIF_ARG_1);
+ }
+
+ old_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ entry_index = lookup(old_table, key);
+ old_term = old_table->term[entry_index];
+ if (is_boxed(old_term)) {
+ Uint new_size;
+ HashTable* tmp_table;
+
+ /*
+ * Since we don't use any delete markers, we must rehash
+ * the table when deleting terms to ensure that all terms
+ * can still be reached if there are hash collisions.
+ * We can't rehash in place and it would not be safe to modify
+ * the old table yet, so we will first need a new
+ * temporary table copy of the same size as the old one.
+ */
+
+ ASSERT(is_tuple_arity(old_term, 2));
+ tmp_table = tmp_table_copy(old_table);
+
+ /*
+ * Delete the term from the temporary table. Then copy the
+ * temporary table to a new table, rehashing the entries
+ * while copying.
+ */
+
+ tmp_table->term[entry_index] = NIL;
+ tmp_table->num_entries--;
+ new_size = tmp_table->allocated;
+ if (MUST_SHRINK(tmp_table)) {
+ new_size /= 2;
+ }
+ new_table = copy_table(tmp_table, new_size, 1);
+ erts_free(ERTS_ALC_T_TMP, tmp_table);
+
+ mark_for_deletion(old_table, entry_index);
+ erts_schedule_thr_prgr_later_op(table_updater, new_table, &thr_prog_op);
+ suspend_updater(BIF_P);
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
+ }
+
+ /*
+ * Key is not present. Nothing to do.
+ */
+
+ ASSERT(is_nil(old_term));
+ release_update_permission(0);
+ BIF_RET(am_false);
+}
+
+BIF_RETTYPE erts_internal_erase_persistent_terms_0(BIF_ALIST_0)
+{
+ HashTable* old_table;
+ HashTable* new_table;
+
+ if (!try_seize_update_permission(BIF_P)) {
+ ERTS_BIF_YIELD0(bif_export[BIF_erts_internal_erase_persistent_terms_0],
+ BIF_P);
+ }
+ old_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ old_table->first_to_delete = 0;
+ old_table->num_to_delete = old_table->allocated;
+ new_table = create_initial_table();
+ erts_schedule_thr_prgr_later_op(table_updater, new_table, &thr_prog_op);
+ suspend_updater(BIF_P);
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
+}
+
+BIF_RETTYPE persistent_term_info_0(BIF_ALIST_0)
+{
+ HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ TrapData* trap_data;
+ Eterm res = NIL;
+ Eterm magic_ref;
+ Binary* mbp;
+
+ magic_ref = alloc_trap_data(BIF_P);
+ mbp = erts_magic_ref2bin(magic_ref);
+ trap_data = ERTS_MAGIC_BIN_DATA(mbp);
+ trap_data->table = hash_table;
+ trap_data->idx = 0;
+ trap_data->remaining = hash_table->num_entries;
+ trap_data->memory = 0;
+ res = do_info(BIF_P, trap_data);
+ if (trap_data->remaining == 0) {
+ BUMP_REDS(BIF_P, hash_table->num_entries);
+ trap_data->table = NULL; /* Prevent refc decrement */
+ BIF_RET(res);
+ } else {
+ /*
+ * Increment the ref counter to prevent an update operation (by put/2
+ * or erase/1) to delete this hash table.
+ */
+ erts_atomic_inc_nob(&hash_table->refc);
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(&persistent_term_info_export, BIF_P, magic_ref, res);
+ }
+}
+
+Uint
+erts_persistent_term_count(void)
+{
+ HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ return hash_table->num_entries;
+}
+
+void
+erts_init_persistent_dumping(void)
+{
+ HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ ErtsLiteralArea** area_p;
+ Uint i;
+
+ /*
+ * Overwrite the array of Eterms in the current hash table
+ * with pointers to literal areas.
+ */
+
+ erts_persistent_areas = (ErtsLiteralArea **) hash_table->term;
+ erts_num_persistent_areas = hash_table->num_entries;
+ area_p = erts_persistent_areas;
+ for (i = 0; i < hash_table->allocated; i++) {
+ Eterm term = hash_table->term[i];
+
+ if (is_boxed(term)) {
+ *area_p++ = term_to_area(term);
+ }
+ }
+}
+
+/*
+ * Local functions.
+ */
+
+static HashTable*
+create_initial_table(void)
+{
+ HashTable* hash_table;
+ int i;
+
+ hash_table = (HashTable *) erts_alloc(ERTS_ALC_T_PERSISTENT_TERM,
+ sizeof(HashTable)+sizeof(Eterm) *
+ (INITIAL_SIZE-1));
+ hash_table->allocated = INITIAL_SIZE;
+ hash_table->num_entries = 0;
+ hash_table->mask = INITIAL_SIZE-1;
+ hash_table->first_to_delete = 0;
+ hash_table->num_to_delete = 0;
+ erts_atomic_init_nob(&hash_table->refc, (erts_aint_t)1);
+ for (i = 0; i < INITIAL_SIZE; i++) {
+ hash_table->term[i] = NIL;
+ }
+ return hash_table;
+}
+
+static BIF_RETTYPE
+persistent_term_get_all_trap(BIF_ALIST_2)
+{
+ TrapData* trap_data;
+ Eterm res = BIF_ARG_2;
+ Uint bump_reds;
+ Binary* mbp;
+
+ ASSERT(is_list(BIF_ARG_2));
+ mbp = erts_magic_ref2bin(BIF_ARG_1);
+ trap_data = ERTS_MAGIC_BIN_DATA(mbp);
+ bump_reds = trap_data->remaining;
+ res = do_get_all(BIF_P, trap_data, res);
+ ASSERT(is_list(res));
+ if (trap_data->remaining > 0) {
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(&persistent_term_get_all_export, BIF_P, BIF_ARG_1, res);
+ } else {
+ /*
+ * Decrement ref count (and possibly delete the hash table
+ * and associated literal area).
+ */
+ dec_table_refc(BIF_P, trap_data->table);
+ trap_data->table = NULL; /* Prevent refc decrement */
+ BUMP_REDS(BIF_P, bump_reds);
+ BIF_RET(res);
+ }
+}
+
+static Eterm
+do_get_all(Process* c_p, TrapData* trap_data, Eterm res)
+{
+ HashTable* hash_table;
+ Uint remaining;
+ Uint idx;
+ Uint max_iter;
+ Uint i;
+ Eterm* hp;
+ Uint heap_size;
+ struct copy_term {
+ Uint key_size;
+ Eterm* tuple_ptr;
+ } *copy_data;
+
+ hash_table = trap_data->table;
+ idx = trap_data->idx;
+#if defined(DEBUG) || defined(VALGRIND)
+ max_iter = 50;
+#else
+ max_iter = ERTS_BIF_REDS_LEFT(c_p);
+#endif
+ remaining = trap_data->remaining < max_iter ?
+ trap_data->remaining : max_iter;
+ trap_data->remaining -= remaining;
+
+ copy_data = (struct copy_term *) erts_alloc(ERTS_ALC_T_TMP,
+ remaining *
+ sizeof(struct copy_term));
+ i = 0;
+ heap_size = (2 + 3) * remaining;
+ while (remaining != 0) {
+ Eterm term = hash_table->term[idx];
+ if (is_tuple(term)) {
+ Uint key_size;
+ Eterm* tup_val;
+
+ ASSERT(is_tuple_arity(term, 2));
+ tup_val = tuple_val(term);
+ key_size = size_object(tup_val[1]);
+ copy_data[i].key_size = key_size;
+ copy_data[i].tuple_ptr = tup_val;
+ heap_size += key_size;
+ i++;
+ remaining--;
+ }
+ idx++;
+ }
+ trap_data->idx = idx;
+
+ hp = HAlloc(c_p, heap_size);
+ remaining = i;
+ for (i = 0; i < remaining; i++) {
+ Eterm* tuple_ptr;
+ Uint key_size;
+ Eterm key;
+ Eterm tup;
+
+ tuple_ptr = copy_data[i].tuple_ptr;
+ key_size = copy_data[i].key_size;
+ key = copy_struct(tuple_ptr[1], key_size, &hp, &c_p->off_heap);
+ tup = TUPLE2(hp, key, tuple_ptr[2]);
+ hp += 3;
+ res = CONS(hp, tup, res);
+ hp += 2;
+ }
+ erts_free(ERTS_ALC_T_TMP, copy_data);
+ return res;
+}
+
+static BIF_RETTYPE
+persistent_term_info_trap(BIF_ALIST_1)
+{
+ TrapData* trap_data = (TrapData *) BIF_ARG_1;
+ Eterm res;
+ Uint bump_reds;
+ Binary* mbp;
+
+ mbp = erts_magic_ref2bin(BIF_ARG_1);
+ trap_data = ERTS_MAGIC_BIN_DATA(mbp);
+ bump_reds = trap_data->remaining;
+ res = do_info(BIF_P, trap_data);
+ if (trap_data->remaining > 0) {
+ ASSERT(res == am_ok);
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP1(&persistent_term_info_export, BIF_P, BIF_ARG_1);
+ } else {
+ /*
+ * Decrement ref count (and possibly delete the hash table
+ * and associated literal area).
+ */
+ dec_table_refc(BIF_P, trap_data->table);
+ trap_data->table = NULL; /* Prevent refc decrement */
+ BUMP_REDS(BIF_P, bump_reds);
+ ASSERT(is_map(res));
+ BIF_RET(res);
+ }
+}
+
+#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
+
+static Eterm
+do_info(Process* c_p, TrapData* trap_data)
+{
+ HashTable* hash_table;
+ Uint remaining;
+ Uint idx;
+ Uint max_iter;
+
+ hash_table = trap_data->table;
+ idx = trap_data->idx;
+#if defined(DEBUG) || defined(VALGRIND)
+ max_iter = 50;
+#else
+ max_iter = ERTS_BIF_REDS_LEFT(c_p);
+#endif
+ remaining = trap_data->remaining < max_iter ? trap_data->remaining : max_iter;
+ trap_data->remaining -= remaining;
+ while (remaining != 0) {
+ if (is_boxed(hash_table->term[idx])) {
+ ErtsLiteralArea* area;
+ area = term_to_area(hash_table->term[idx]);
+ trap_data->memory += sizeof(ErtsLiteralArea) +
+ sizeof(Eterm) * (area->end - area->start - 1);
+ remaining--;
+ }
+ idx++;
+ }
+ trap_data->idx = idx;
+ if (trap_data->remaining > 0) {
+ return am_ok; /* Dummy return value */
+ } else {
+ Eterm* hp;
+ Eterm count_term;
+ Eterm memory_term;
+ Eterm res;
+ Uint memory;
+ Uint hsz = MAP_SZ(2);
+
+ memory = sizeof(HashTable) + (trap_data->table->allocated-1) *
+ sizeof(Eterm) + trap_data->memory;
+ (void) erts_bld_uint(NULL, &hsz, hash_table->num_entries);
+ (void) erts_bld_uint(NULL, &hsz, memory);
+ hp = HAlloc(c_p, hsz);
+ count_term = erts_bld_uint(&hp, NULL, hash_table->num_entries);
+ memory_term = erts_bld_uint(&hp, NULL, memory);
+ res = MAP2(hp, am_count, count_term, am_memory, memory_term);
+ return res;
+ }
+}
+
+#undef DECL_AM
+
+static Eterm
+alloc_trap_data(Process* c_p)
+{
+ Binary* mbp = erts_create_magic_binary(sizeof(TrapData),
+ cleanup_trap_data);
+ Eterm* hp;
+
+ hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
+ return erts_mk_magic_ref(&hp, &MSO(c_p), mbp);
+}
+
+static int
+cleanup_trap_data(Binary *bp)
+{
+ TrapData* trap_data = ERTS_MAGIC_BIN_DATA(bp);
+
+ if (trap_data->table) {
+ /*
+ * The process has been killed and is now exiting.
+ * Decrement the reference counter for the table.
+ */
+ dec_table_refc(NULL, trap_data->table);
+ }
+ return 1;
+}
+
+static Uint
+lookup(HashTable* hash_table, Eterm key)
+{
+ Uint mask = hash_table->mask;
+ Eterm* table = hash_table->term;
+ Uint32 idx = make_internal_hash(key, 0);
+ Eterm term;
+
+ do {
+ idx++;
+ term = table[idx & mask];
+ } while (is_boxed(term) && !EQ(key, (tuple_val(term))[1]));
+ return idx & mask;
+}
+
+static HashTable*
+tmp_table_copy(HashTable* old_table)
+{
+ Uint size = old_table->allocated;
+ HashTable* tmp_table;
+ Uint i;
+
+ tmp_table = (HashTable *) erts_alloc(ERTS_ALC_T_TMP,
+ sizeof(HashTable) +
+ sizeof(Eterm) * (size-1));
+ *tmp_table = *old_table;
+ for (i = 0; i < size; i++) {
+ tmp_table->term[i] = old_table->term[i];
+ }
+ return tmp_table;
+}
+
+static HashTable*
+copy_table(HashTable* old_table, Uint new_size, int rehash)
+{
+ HashTable* new_table;
+ Uint old_size = old_table->allocated;
+ Uint i;
+
+ new_table = (HashTable *) erts_alloc(ERTS_ALC_T_PERSISTENT_TERM,
+ sizeof(HashTable) +
+ sizeof(Eterm) * (new_size-1));
+ if (old_table->allocated == new_size && !rehash) {
+ /*
+ * Same size and no key deleted. Make an exact copy of the table.
+ */
+ *new_table = *old_table;
+ for (i = 0; i < new_size; i++) {
+ new_table->term[i] = old_table->term[i];
+ }
+ } else {
+ /*
+ * The size of the table has changed or an element has been
+ * deleted. Must rehash, by inserting all old terms into the
+ * new (empty) table.
+ */
+ new_table->allocated = new_size;
+ new_table->num_entries = old_table->num_entries;
+ new_table->mask = new_size - 1;
+ for (i = 0; i < new_size; i++) {
+ new_table->term[i] = NIL;
+ }
+ for (i = 0; i < old_size; i++) {
+ if (is_tuple(old_table->term[i])) {
+ Eterm key = tuple_val(old_table->term[i])[1];
+ Uint entry_index = lookup(new_table, key);
+ ASSERT(is_nil(new_table->term[entry_index]));
+ new_table->term[entry_index] = old_table->term[i];
+ }
+ }
+ }
+ new_table->first_to_delete = 0;
+ new_table->num_to_delete = 0;
+ erts_atomic_init_nob(&new_table->refc, (erts_aint_t)1);
+ return new_table;
+}
+
+static void
+mark_for_deletion(HashTable* hash_table, Uint entry_index)
+{
+ hash_table->first_to_delete = entry_index;
+ hash_table->num_to_delete = 1;
+}
+
+static ErtsLiteralArea*
+term_to_area(Eterm tuple)
+{
+ ASSERT(is_tuple_arity(tuple, 2));
+ return (ErtsLiteralArea *) (((char *) tuple_val(tuple)) -
+ offsetof(ErtsLiteralArea, start));
+}
+
+static void
+table_updater(void* data)
+{
+ HashTable* old_table;
+ HashTable* new_table;
+
+ old_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ new_table = (HashTable *) data;
+ ASSERT(new_table->num_to_delete == 0);
+ erts_atomic_set_nob(&the_hash_table, (erts_aint_t)new_table);
+ append_to_delete_queue(old_table);
+ erts_schedule_thr_prgr_later_op(table_deleter,
+ old_table,
+ &old_table->thr_prog_op);
+ release_update_permission(1);
+}
+
+static void
+table_deleter(void* data)
+{
+ HashTable* old_table = (HashTable *) data;
+
+ dec_table_refc(NULL, old_table);
+}
+
+static void
+dec_table_refc(Process* c_p, HashTable* old_table)
+{
+ erts_aint_t refc = erts_atomic_dec_read_nob(&old_table->refc);
+
+ if (refc == 0) {
+ HashTable* to_delete;
+
+ while ((to_delete = next_to_delete()) != NULL) {
+ delete_table(c_p, to_delete);
+ }
+ }
+}
+
+static void
+delete_table(Process* c_p, HashTable* table)
+{
+ Uint idx = table->first_to_delete;
+ Uint n = table->num_to_delete;
+
+ /*
+ * There are no longer any references to this hash table.
+ *
+ * Any literals pointed for deletion can be queued for
+ * deletion and the table itself can be deallocated.
+ */
+
+#ifdef DEBUG
+ if (n == 1) {
+ ASSERT(is_tuple_arity(table->term[idx], 2));
+ }
+#endif
+
+ while (n > 0) {
+ Eterm term = table->term[idx];
+
+ if (is_tuple_arity(term, 2)) {
+ if (is_immed(tuple_val(term)[2])) {
+ erts_release_literal_area(term_to_area(term));
+ } else {
+ erts_queue_release_literals(c_p, term_to_area(term));
+ }
+ }
+ idx++, n--;
+ }
+ erts_free(ERTS_ALC_T_PERSISTENT_TERM, table);
+}
+
+/*
+ * Caller *must* yield if this function returns 0.
+ */
+
+static int
+try_seize_update_permission(Process* c_p)
+{
+ int success;
+
+ ASSERT(!erts_thr_progress_is_blocking()); /* to avoid deadlock */
+ ASSERT(c_p != NULL);
+
+ erts_mtx_lock(&update_table_permission_mtx);
+ ASSERT(updater_process != c_p);
+ success = (updater_process == NULL);
+ if (success) {
+ updater_process = c_p;
+ } else {
+ struct update_queue_item* qitem;
+ qitem = erts_alloc(ERTS_ALC_T_PERSISTENT_LOCK_Q, sizeof(*qitem));
+ qitem->p = c_p;
+ erts_proc_inc_refc(c_p);
+ qitem->next = update_queue;
+ update_queue = qitem;
+ erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
+ }
+ erts_mtx_unlock(&update_table_permission_mtx);
+ return success;
+}
+
+static void
+release_update_permission(int release_updater)
+{
+ erts_mtx_lock(&update_table_permission_mtx);
+ ASSERT(updater_process != NULL);
+
+ if (release_updater) {
+ erts_proc_lock(updater_process, ERTS_PROC_LOCK_STATUS);
+ if (!ERTS_PROC_IS_EXITING(updater_process)) {
+ erts_resume(updater_process, ERTS_PROC_LOCK_STATUS);
+ }
+ erts_proc_unlock(updater_process, ERTS_PROC_LOCK_STATUS);
+ }
+ updater_process = NULL;
+
+ while (update_queue != NULL) { /* Unleash the entire herd */
+ struct update_queue_item* qitem = update_queue;
+ erts_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS);
+ if (!ERTS_PROC_IS_EXITING(qitem->p)) {
+ erts_resume(qitem->p, ERTS_PROC_LOCK_STATUS);
+ }
+ erts_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS);
+ update_queue = qitem->next;
+ erts_proc_dec_refc(qitem->p);
+ erts_free(ERTS_ALC_T_PERSISTENT_LOCK_Q, qitem);
+ }
+ erts_mtx_unlock(&update_table_permission_mtx);
+}
+
+static void
+suspend_updater(Process* c_p)
+{
+#ifdef DEBUG
+ ASSERT(c_p != NULL);
+ erts_mtx_lock(&update_table_permission_mtx);
+ ASSERT(updater_process == c_p);
+ erts_mtx_unlock(&update_table_permission_mtx);
+#endif
+ erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
+}
+
+static void
+append_to_delete_queue(HashTable* table)
+{
+ erts_mtx_lock(&delete_queue_mtx);
+ table->delete_next = NULL;
+ *delete_queue_tail = table;
+ delete_queue_tail = &table->delete_next;
+ erts_mtx_unlock(&delete_queue_mtx);
+}
+
+static HashTable*
+next_to_delete(void)
+{
+ HashTable* table;
+
+ erts_mtx_lock(&delete_queue_mtx);
+ table = delete_queue_head;
+ if (table) {
+ if (erts_atomic_read_nob(&table->refc)) {
+ /*
+ * This hash table is still referenced. Hash tables
+ * must be deleted in order, so we return a NULL
+ * pointer.
+ */
+ table = NULL;
+ } else {
+ /*
+ * Remove the first hash table from the queue.
+ */
+ delete_queue_head = table->delete_next;
+ if (delete_queue_head == NULL) {
+ delete_queue_tail = &delete_queue_head;
+ }
+ }
+ }
+ erts_mtx_unlock(&delete_queue_mtx);
+ return table;
+}
diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c
index 7fe4e02782..ed825d3dda 100644
--- a/erts/emulator/beam/erl_bif_port.c
+++ b/erts/emulator/beam/erl_bif_port.c
@@ -891,10 +891,6 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
driver = &spawn_driver;
} else if (*tp == am_fd) { /* An fd port */
- int n;
- struct Sint_buf sbuf;
- char* p;
-
if (arity != make_arityval(3)) {
goto badarg;
}
@@ -904,15 +900,9 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
opts.ifd = unsigned_val(tp[1]);
opts.ofd = unsigned_val(tp[2]);
- /* Syntesize name from input and output descriptor. */
- name_buf = erts_alloc(ERTS_ALC_T_TMP,
- 2*sizeof(struct Sint_buf) + 2);
- p = Sint_to_buf(opts.ifd, &sbuf);
- n = sys_strlen(p);
- sys_strncpy(name_buf, p, n);
- name_buf[n] = '/';
- p = Sint_to_buf(opts.ofd, &sbuf);
- sys_strcpy(name_buf+n+1, p);
+ /* Syntesize name from input and output descriptor. */
+ name_buf = erts_alloc(ERTS_ALC_T_TMP, 256);
+ erts_snprintf(name_buf, 256, "%i/%i", opts.ifd, opts.ofd);
driver = &fd_driver;
} else {
diff --git a/erts/emulator/beam/erl_bif_unique.h b/erts/emulator/beam/erl_bif_unique.h
index 40b70667c0..944788c67c 100644
--- a/erts/emulator/beam/erl_bif_unique.h
+++ b/erts/emulator/beam/erl_bif_unique.h
@@ -242,11 +242,11 @@ erts_internal_ref_number_cmp(Uint32 num1[ERTS_REF_NUMBERS],
Uint32 num2[ERTS_REF_NUMBERS])
{
if (num1[2] != num2[2])
- return (int) ((Sint64) num1[2] - (Sint64) num2[2]);
+ return num1[2] > num2[2] ? 1 : -1;
if (num1[1] != num2[1])
- return (int) ((Sint64) num1[1] - (Sint64) num2[1]);
+ return num1[1] > num2[1] ? 1 : -1;
if (num1[0] != num2[0])
- return (int) ((Sint64) num1[0] - (Sint64) num2[0]);
+ return num1[0] > num2[0] ? 1 : -1;
return 0;
}
diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c
index 788718ab09..45e4be2426 100644
--- a/erts/emulator/beam/erl_db_tree.c
+++ b/erts/emulator/beam/erl_db_tree.c
@@ -1860,22 +1860,14 @@ static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid,
sc.mp = mpi.mp;
- stack = get_static_stack(tb);
if (!mpi.got_partial && mpi.some_limitation &&
CMP_EQ(mpi.least,mpi.most)) {
- TreeDbTerm* term = *(mpi.save_term);
doit_select_replace(tb,mpi.save_term,&sc,0 /* dummy */);
- if (stack != NULL) {
- if (TOP_NODE(stack) == term)
- // throw away potentially invalid reference
- REPLACE_TOP_NODE(stack, *(mpi.save_term));
- release_stack(tb, stack);
- }
+ reset_static_stack(tb); /* may refer replaced term */
RET_TO_BIF(erts_make_integer(sc.replaced,p),DB_ERROR_NONE);
}
- if (stack == NULL)
- stack = get_any_stack(tb);
+ stack = get_any_stack(tb);
if (mpi.some_limitation) {
if ((this = find_next_from_pb_key(tb, stack, mpi.most)) != NULL) {
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index f1d47326b4..e2c029c244 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -2470,7 +2470,7 @@ restart:
case matchProcessDump: {
erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
ASSERT(c_p == self);
- print_process_info(ERTS_PRINT_DSBUF, (void *) dsbufp, c_p);
+ print_process_info(ERTS_PRINT_DSBUF, (void *) dsbufp, c_p, ERTS_PROC_LOCK_MAIN);
*esp++ = new_binary(build_proc, (byte *)dsbufp->str,
dsbufp->str_len);
erts_destroy_tmp_dsbuf(dsbufp);
diff --git a/erts/emulator/beam/erl_dirty_bif.tab b/erts/emulator/beam/erl_dirty_bif.tab
index 086275fbe5..20299ff604 100644
--- a/erts/emulator/beam/erl_dirty_bif.tab
+++ b/erts/emulator/beam/erl_dirty_bif.tab
@@ -59,8 +59,6 @@ dirty-cpu erts_debug:lcnt_clear/0
dirty-cpu-test erlang:'++'/2
dirty-cpu-test erlang:append/2
-dirty-cpu-test erlang:'--'/2
-dirty-cpu-test erlang:subtract/2
dirty-cpu-test erlang:iolist_size/1
dirty-cpu-test erlang:make_tuple/2
dirty-cpu-test erlang:make_tuple/3
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index a65dbbf42b..3a50b294d1 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -681,7 +681,7 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
ErtsMonotonicTime start_time;
ErtsSchedulerData *esdp = erts_proc_sched_data(p);
erts_aint32_t state;
- ERTS_MSACC_PUSH_STATE_M();
+ ERTS_MSACC_PUSH_STATE();
#ifdef USE_VM_PROBES
DTRACE_CHARBUF(pidbuf, DTRACE_TERM_BUF_SIZE);
#endif
@@ -711,7 +711,7 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
else if (p->live_hf_end != ERTS_INVALID_HFRAG_PTR)
live_hf_end = p->live_hf_end;
- ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_GC);
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_GC);
erts_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
if (erts_system_monitor_long_gc != 0)
@@ -759,7 +759,7 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
gc_trace_end_tag = am_gc_minor_end;
} else {
do_major_collection:
- ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_GC_FULL);
+ ERTS_MSACC_SET_STATE_CACHED_X(ERTS_MSACC_STATE_GC_FULL);
if (IS_TRACED_FL(p, F_TRACE_GC)) {
trace_gc(p, am_gc_major_start, need, THE_NON_VALUE);
}
@@ -770,7 +770,7 @@ do_major_collection:
p->flags &= ~(F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC);
DTRACE2(gc_major_end, pidbuf, reclaimed_now);
gc_trace_end_tag = am_gc_major_end;
- ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_GC);
+ ERTS_MSACC_SET_STATE_CACHED_X(ERTS_MSACC_STATE_GC);
}
reset_active_writer(p);
@@ -800,7 +800,7 @@ do_major_collection:
/* We have to make sure that we have space for need on the heap */
res = delay_garbage_collection(p, live_hf_end, need, fcalls);
- ERTS_MSACC_POP_STATE_M();
+ ERTS_MSACC_POP_STATE();
return res;
}
@@ -843,7 +843,7 @@ do_major_collection:
FLAGS(p) &= ~(F_FORCE_GC|F_HIBERNATED);
p->live_hf_end = ERTS_INVALID_HFRAG_PTR;
- ERTS_MSACC_POP_STATE_M();
+ ERTS_MSACC_POP_STATE();
#ifdef CHECK_FOR_HOLES
/*
@@ -1133,9 +1133,28 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
reds = (Sint64) garbage_collect(p, ERTS_INVALID_HFRAG_PTR, 0,
p->arg_reg, p->arity, fcalls,
ygen_usage);
+ if (ERTS_PROC_IS_EXITING(p)) {
+ return 0;
+ }
ASSERT(!(p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)));
+ if (MAX_HEAP_SIZE_GET(p)) {
+ Uint new_heap_size;
+ Uint old_heap_size;
+ Uint total_heap_size;
+
+ new_heap_size = HEAP_END(p) - HEAP_START(p);
+ old_heap_size = erts_next_heap_size(lit_size, 0);
+ total_heap_size = new_heap_size + old_heap_size;
+ if (MAX_HEAP_SIZE_GET(p) < total_heap_size &&
+ reached_max_heap_size(p, total_heap_size,
+ new_heap_size, old_heap_size)) {
+ erts_set_self_exiting(p, am_killed);
+ return 0;
+ }
+ }
+
/*
* Set GC state.
*/
@@ -2419,27 +2438,9 @@ erts_copy_one_frag(Eterm** hpp, ErlOffHeap* off_heap,
cpy_words:
ASSERT(sz >= cpy_sz);
sz -= cpy_sz;
- while (cpy_sz >= 8) {
- cpy_sz -= 8;
- *hp++ = *fhp++;
- *hp++ = *fhp++;
- *hp++ = *fhp++;
- *hp++ = *fhp++;
- *hp++ = *fhp++;
- *hp++ = *fhp++;
- *hp++ = *fhp++;
- *hp++ = *fhp++;
- }
- switch (cpy_sz) {
- case 7: *hp++ = *fhp++;
- case 6: *hp++ = *fhp++;
- case 5: *hp++ = *fhp++;
- case 4: *hp++ = *fhp++;
- case 3: *hp++ = *fhp++;
- case 2: *hp++ = *fhp++;
- case 1: *hp++ = *fhp++;
- default: break;
- }
+ sys_memcpy(hp, fhp, cpy_sz * sizeof(Eterm));
+ hp += cpy_sz;
+ fhp += cpy_sz;
if (oh) {
/* Add to offheap list */
oh->next = off_heap->first;
@@ -2458,7 +2459,7 @@ erts_copy_one_frag(Eterm** hpp, ErlOffHeap* off_heap,
*hpp = hp;
for (i = 0; i < nrefs; i++) {
- if (is_not_immed(refs[i]))
+ if (is_not_immed(refs[i]) && !erts_is_literal(refs[i],ptr_val(refs[i])))
refs[i] = offset_ptr(refs[i], offs);
}
bp->off_heap.first = NULL;
diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c
index 6ec6f8065e..75ad6de2c9 100644
--- a/erts/emulator/beam/erl_hl_timer.c
+++ b/erts/emulator/beam/erl_hl_timer.c
@@ -29,8 +29,6 @@
# include "config.h"
#endif
-/* #define ERTS_MAGIC_REF_BIF_TIMERS */
-
#include "sys.h"
#include "global.h"
#include "bif.h"
@@ -39,9 +37,6 @@
#include "erl_time.h"
#include "erl_hl_timer.h"
#include "erl_proc_sig_queue.h"
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
-#include "erl_binary.h"
-#endif
#define ERTS_TMR_CHECK_CANCEL_ON_CREATE 0
@@ -195,14 +190,9 @@ struct ErtsBifTimer_ {
} type;
struct {
erts_atomic32_t state;
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- ErtsMagicBinary *mbin;
- ErtsHLTimerList proc_list;
-#else
Uint32 refn[ERTS_REF_NUMBERS];
ErtsBifTimerTree proc_tree;
ErtsBifTimerTree tree;
-#endif
Eterm message;
ErlHeapFragment *bp;
} btm;
@@ -220,11 +210,7 @@ typedef ErtsTimer *(*ErtsCreateTimerFunc)(ErtsSchedulerData *esdp,
int short_time, ErtsTmrType type,
void *rcvrp, Eterm rcvr,
Eterm msg,
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- ErtsMagicBinary *mbin,
-#else
Uint32 *refn,
-#endif
void (*callback)(void *), void *arg);
#ifdef SMALL_MEMORY
@@ -303,16 +289,12 @@ typedef struct {
struct ErtsHLTimerService_ {
ErtsHLTCncldTmrQ canceled_queue;
ErtsHLTimer *time_tree;
-#ifndef ERTS_MAGIC_REF_BIF_TIMERS
ErtsBifTimer *btm_tree;
-#endif
ErtsHLTimer *next_timeout;
ErtsYieldingTimeoutState yield;
ErtsTWheelTimer service_timer;
};
-#ifndef ERTS_MAGIC_REF_BIF_TIMERS
-
static ERTS_INLINE int
refn_is_lt(Uint32 *x, Uint32 *y)
{
@@ -334,8 +316,6 @@ refn_is_eq(Uint32 *x, Uint32 *y)
return (x[0] == y[0]) & (x[1] == y[1]) & (x[2] == y[2]);
}
-#endif
-
#define ERTS_RBT_PREFIX time
#define ERTS_RBT_T ErtsHLTimer
#define ERTS_RBT_KEY_T ErtsMonotonicTime
@@ -525,13 +505,7 @@ same_time_list_lookup(ErtsHLTimer *root, ErtsHLTimer *x)
#endif /* ERTS_HLT_HARD_DEBUG */
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
-#define ERTS_BTM_HLT2REFN(T) ((T)->btm.mbin->refn)
-#else
#define ERTS_BTM_HLT2REFN(T) ((T)->btm.refn)
-#endif
-
-#ifndef ERTS_MAGIC_REF_BIF_TIMERS
#define ERTS_RBT_PREFIX btm
#define ERTS_RBT_T ErtsBifTimer
@@ -576,87 +550,12 @@ same_time_list_lookup(ErtsHLTimer *root, ErtsHLTimer *x)
#define ERTS_RBT_IS_EQ(KX, KY) refn_is_eq((KX), (KY))
#define ERTS_RBT_WANT_DELETE
#define ERTS_RBT_WANT_INSERT
-#ifndef ERTS_MAGIC_REF_BIF_TIMERS
#define ERTS_RBT_WANT_LOOKUP
-#endif
#define ERTS_RBT_WANT_FOREACH
#define ERTS_RBT_UNDEF
#include "erl_rbtree.h"
-#endif /* !ERTS_MAGIC_REF_BIF_TIMERS */
-
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
-
-static ERTS_INLINE void
-proc_btm_list_insert(ErtsBifTimer **list, ErtsBifTimer *x)
-{
- ErtsBifTimer *y = *list;
- if (!y) {
- x->btm.proc_list.next = x;
- x->btm.proc_list.prev = x;
- *list = x;
- }
- else {
- ERTS_HLT_ASSERT(y->btm.proc_list.prev->btm.proc_list.next == y);
- x->btm.proc_list.next = y;
- x->btm.proc_list.prev = y->btm.proc_list.prev;
- y->btm.proc_list.prev->btm.proc_list.next = x;
- y->btm.proc_list.prev = x;
- }
-}
-
-static ERTS_INLINE void
-proc_btm_list_delete(ErtsBifTimer **list, ErtsBifTimer *x)
-{
- ErtsBifTimer *y = *list;
- if (y == x && x->btm.proc_list.next == x) {
- ERTS_HLT_ASSERT(x->btm.proc_list.prev == x);
- *list = NULL;
- }
- else {
- if (y == x)
- *list = x->btm.proc_list.next;
- ERTS_HLT_ASSERT(x->btm.proc_list.prev->btm.proc_list.next == x);
- ERTS_HLT_ASSERT(x->btm.proc_list.next->btm.proc_list.prev == x);
- x->btm.proc_list.prev->btm.proc_list.next = x->btm.proc_list.next;
- x->btm.proc_list.next->btm.proc_list.prev = x->btm.proc_list.prev;
- }
- x->btm.proc_list.next = NULL;
-}
-
-static ERTS_INLINE int
-proc_btm_list_foreach_destroy_yielding(ErtsBifTimer **list,
- void (*destroy)(ErtsBifTimer *, void *),
- void *arg,
- int limit)
-{
- int i;
- ErtsBifTimer *first, *last;
-
- first = *list;
- if (!first)
- return 0;
-
- last = first->btm.proc_list.prev;
- for (i = 0; i < limit; i++) {
- ErtsBifTimer *x = last;
- last = last->btm.proc_list.prev;
- (*destroy)(x, arg);
- x->btm.proc_list.next = NULL;
- if (x == first) {
- *list = NULL;
- return 0;
- }
- }
-
- last->btm.proc_list.next = first;
- first->btm.proc_list.prev = last;
- return 1;
-}
-
-#else /* !ERTS_MAGIC_REF_BIF_TIMERS */
-
#define ERTS_RBT_PREFIX proc_btm
#define ERTS_RBT_T ErtsBifTimer
#define ERTS_RBT_KEY_T Uint32 *
@@ -700,16 +599,12 @@ proc_btm_list_foreach_destroy_yielding(ErtsBifTimer **list,
#define ERTS_RBT_IS_EQ(KX, KY) refn_is_eq((KX), (KY))
#define ERTS_RBT_WANT_DELETE
#define ERTS_RBT_WANT_INSERT
-#ifndef ERTS_MAGIC_REF_BIF_TIMERS
#define ERTS_RBT_WANT_LOOKUP
-#endif
#define ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING
#define ERTS_RBT_UNDEF
#include "erl_rbtree.h"
-#endif /* !ERTS_MAGIC_REF_BIF_TIMERS */
-
static void init_canceled_queue(ErtsHLTCncldTmrQ *cq);
void
@@ -728,9 +623,7 @@ erts_create_timer_service(void)
srv = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_TIMER_SERVICE,
sizeof(ErtsHLTimerService));
srv->time_tree = NULL;
-#ifndef ERTS_MAGIC_REF_BIF_TIMERS
srv->btm_tree = NULL;
-#endif
srv->next_timeout = NULL;
srv->yield = init_yield;
erts_twheel_init_timer(&srv->service_timer);
@@ -805,40 +698,10 @@ port_timeout_common(Port *port, void *tmr)
return 0;
}
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
-
-static erts_atomic_t *
-mbin_to_btmref__(ErtsMagicBinary *mbin)
-{
- return erts_binary_to_magic_indirection((Binary *) mbin);
-}
-
-static ERTS_INLINE void
-magic_binary_init(ErtsMagicBinary *mbin, ErtsBifTimer *tmr)
-{
- erts_atomic_t *aptr = mbin_to_btmref__(mbin);
- erts_atomic_init_nob(aptr, (erts_aint_t) tmr);
-}
-
-static ERTS_INLINE ErtsBifTimer *
-magic_binary_to_btm(ErtsMagicBinary *mbin)
-{
- erts_atomic_t *aptr = mbin_to_btmref__(mbin);
- ErtsBifTimer *tmr = (ErtsBifTimer *) erts_atomic_read_nob(aptr);
- ERTS_HLT_ASSERT(!tmr || tmr->btm.mbin == mbin);
- return tmr;
-}
-
-#endif /* ERTS_MAGIC_REF_BIF_TIMERS */
-
static ERTS_INLINE erts_aint_t
init_btm_specifics(ErtsSchedulerData *esdp,
ErtsBifTimer *tmr, Eterm msg,
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- ErtsMagicBinary *mbin
-#else
Uint32 *refn
-#endif
)
{
Uint hsz = is_immed(msg) ? ((Uint) 0) : size_object(msg);
@@ -853,13 +716,6 @@ init_btm_specifics(ErtsSchedulerData *esdp,
tmr->btm.message = copy_struct(msg, hsz, &hp, &bp->off_heap);
tmr->btm.bp = bp;
}
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- refc = 1;
- tmr->btm.mbin = mbin;
- erts_refc_inc(&mbin->refc, 1);
- magic_binary_init(mbin, tmr);
- tmr->btm.proc_list.next = NULL;
-#else
refc = 0;
tmr->btm.refn[0] = refn[0];
tmr->btm.refn[1] = refn[1];
@@ -868,7 +724,6 @@ init_btm_specifics(ErtsSchedulerData *esdp,
tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
btm_rbt_insert(&esdp->timer_service->btm_tree, tmr);
-#endif
erts_atomic32_init_nob(&tmr->btm.state, ERTS_TMR_STATE_ACTIVE);
return refc; /* refc from magic binary... */
@@ -886,11 +741,6 @@ timer_destroy(ErtsTimer *tmr, int twt, int btm)
erts_free(ERTS_ALC_T_HL_PTIMER, tmr);
}
else {
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- Binary *bp = (Binary *) tmr->btm.btm.mbin;
- if (erts_refc_dectest(&bp->refc, 0) == 0)
- erts_bin_free(bp);
-#endif
if (tmr->head.roflgs & ERTS_TMR_ROFLG_PRE_ALC)
bif_timer_pre_free(&tmr->btm);
else
@@ -940,9 +790,6 @@ schedule_tw_timer_destroy(ErtsTWTimer *tmr)
else {
/* Message buffer already dropped... */
size = sizeof(ErtsBifTimer);
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- size += sizeof(ErtsMagicIndirectionWord);
-#endif
}
erts_schedule_thr_prgr_later_cleanup_op(
@@ -1006,11 +853,7 @@ create_tw_timer(ErtsSchedulerData *esdp,
int short_time, ErtsTmrType type,
void *rcvrp, Eterm rcvr,
Eterm msg,
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- ErtsMagicBinary *mbin,
-#else
Uint32 *refn,
-#endif
void (*callback)(void *), void *arg)
{
ErtsTWTimer *tmr;
@@ -1087,11 +930,7 @@ create_tw_timer(ErtsSchedulerData *esdp,
refc += init_btm_specifics(esdp,
(ErtsBifTimer *) tmr,
msg,
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- mbin
-#else
refn
-#endif
);
break;
@@ -1152,9 +991,6 @@ schedule_hl_timer_destroy(ErtsHLTimer *tmr, Uint32 roflgs)
else {
/* Message buffer already dropped... */
size = sizeof(ErtsBifTimer);
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- size += sizeof(ErtsMagicIndirectionWord);
-#endif
}
erts_schedule_thr_prgr_later_cleanup_op(
@@ -1192,34 +1028,6 @@ check_canceled_queue(ErtsSchedulerData *esdp, ErtsHLTimerService *srv)
#endif
}
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
-
-static int
-bif_timer_ref_destructor(Binary *unused)
-{
- return 1;
-}
-
-static ERTS_INLINE void
-btm_clear_magic_binary(ErtsBifTimer *tmr)
-{
- erts_atomic_t *aptr = mbin_to_btmref__(tmr->btm.mbin);
- Uint32 roflgs = tmr->type.head.roflgs;
-#ifdef ERTS_HLT_DEBUG
- erts_aint_t tval = erts_atomic_xchg_nob(aptr,
- (erts_aint_t) NULL);
- ERTS_HLT_ASSERT(tval == (erts_aint_t) tmr);
-#else
- erts_atomic_set_nob(aptr, (erts_aint_t) NULL);
-#endif
- if (roflgs & ERTS_TMR_ROFLG_HLT)
- hl_timer_dec_refc(&tmr->type.hlt, roflgs);
- else
- tw_timer_dec_refc(&tmr->type.twt);
-}
-
-#endif /* ERTS_MAGIC_REF_BIF_TIMERS */
-
static ERTS_INLINE void
bif_timer_timeout(ErtsHLTimerService *srv,
ErtsBifTimer *tmr,
@@ -1240,10 +1048,6 @@ bif_timer_timeout(ErtsHLTimerService *srv,
if (state == ERTS_TMR_STATE_ACTIVE) {
Process *proc;
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- btm_clear_magic_binary(tmr);
-#endif
-
if (roflgs & ERTS_TMR_ROFLG_REG_NAME) {
Eterm term;
term = tmr->type.head.receiver.name;
@@ -1266,18 +1070,11 @@ bif_timer_timeout(ErtsHLTimerService *srv,
erts_proc_lock(proc, ERTS_PROC_LOCK_BTM);
/* If the process is exiting do not disturb the cleanup... */
if (!ERTS_PROC_IS_EXITING(proc)) {
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- if (tmr->btm.proc_list.next) {
- proc_btm_list_delete(&proc->bif_timers, tmr);
- dec_refc = 1;
- }
-#else
if (tmr->btm.proc_tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
proc_btm_rbt_delete(&proc->bif_timers, tmr);
tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
dec_refc = 1;
}
-#endif
}
erts_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
if (dec_refc)
@@ -1287,25 +1084,18 @@ bif_timer_timeout(ErtsHLTimerService *srv,
free_message_buffer(tmr->btm.bp);
}
-#ifndef ERTS_MAGIC_REF_BIF_TIMERS
if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
btm_rbt_delete(&srv->btm_tree, tmr);
tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
}
-#endif
-
}
static void
tw_bif_timer_timeout(void *vbtmp)
{
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- ErtsHLTimerService *srv = NULL;
-#else
ErtsSchedulerData *esdp = erts_get_scheduler_data();
ErtsHLTimerService *srv = esdp->timer_service;
-#endif
ErtsBifTimer *btmp = (ErtsBifTimer *) vbtmp;
bif_timer_timeout(srv, btmp, btmp->type.head.roflgs);
tw_timer_dec_refc(&btmp->type.twt);
@@ -1317,11 +1107,7 @@ create_hl_timer(ErtsSchedulerData *esdp,
int short_time, ErtsTmrType type,
void *rcvrp, Eterm rcvr,
Eterm msg,
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- ErtsMagicBinary *mbin,
-#else
Uint32 *refn,
-#endif
void (*callback)(void *), void *arg)
{
ErtsHLTimerService *srv = esdp->timer_service;
@@ -1407,11 +1193,7 @@ create_hl_timer(ErtsSchedulerData *esdp,
refc += init_btm_specifics(esdp,
(ErtsBifTimer *) tmr,
msg,
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- mbin
-#else
refn
-#endif
);
}
@@ -1628,7 +1410,6 @@ cleanup_sched_local_canceled_timer(ErtsSchedulerData *esdp,
ERTS_HLT_ASSERT((tmr->head.roflgs & ERTS_TMR_ROFLG_SID_MASK)
== (Uint32) esdp->no);
-#ifndef ERTS_MAGIC_REF_BIF_TIMERS
if (roflgs & ERTS_TMR_ROFLG_BIF_TMR) {
ErtsBifTimer *btm = (ErtsBifTimer *) tmr;
if (btm->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
@@ -1636,7 +1417,6 @@ cleanup_sched_local_canceled_timer(ErtsSchedulerData *esdp,
btm->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
}
}
-#endif
if (roflgs & ERTS_TMR_ROFLG_HLT) {
hlt_delete_timer(esdp, &tmr->hlt);
@@ -1909,9 +1689,6 @@ setup_bif_timer(Process *c_p, int twheel, ErtsMonotonicTime timeout_pos,
Eterm ref, tmo_msg, *hp;
ErtsBifTimer *tmr;
ErtsSchedulerData *esdp;
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- Binary *mbin;
-#endif
Eterm tmp_hp[4];
ErtsCreateTimerFunc create_timer;
@@ -1920,18 +1697,10 @@ setup_bif_timer(Process *c_p, int twheel, ErtsMonotonicTime timeout_pos,
esdp = erts_proc_sched_data(c_p);
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- mbin = erts_create_magic_indirection(bif_timer_ref_destructor);
- hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
- ref = erts_mk_magic_ref(&hp, &c_p->off_heap, mbin);
- ASSERT(erts_get_ref_numbers_thr_id(((ErtsMagicBinary *)mbin)->refn)
- == (Uint32) esdp->no);
-#else
hp = HAlloc(c_p, ERTS_REF_THING_SIZE);
ref = erts_sched_make_ref_in_buffer(esdp, hp);
ASSERT(erts_get_ref_numbers_thr_id(internal_ordinary_ref_numbers(ref))
== (Uint32) esdp->no);
-#endif
tmo_msg = wrap ? TUPLE3(tmp_hp, am_timeout, ref, msg) : msg;
@@ -1939,11 +1708,7 @@ setup_bif_timer(Process *c_p, int twheel, ErtsMonotonicTime timeout_pos,
tmr = (ErtsBifTimer *) create_timer(esdp, timeout_pos,
short_time, ERTS_TMR_BIF,
NULL, rcvr, tmo_msg,
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- (ErtsMagicBinary *) mbin,
-#else
internal_ordinary_ref_numbers(ref),
-#endif
NULL, NULL);
if (is_internal_pid(rcvr)) {
@@ -1951,14 +1716,10 @@ setup_bif_timer(Process *c_p, int twheel, ErtsMonotonicTime timeout_pos,
rcvr, ERTS_PROC_LOCK_BTM,
ERTS_P2P_FLG_INC_REFC);
if (!proc) {
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- btm_clear_magic_binary(tmr);
-#else
if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
btm_rbt_delete(&esdp->timer_service->btm_tree, tmr);
tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
}
-#endif
if (tmr->btm.bp)
free_message_buffer(tmr->btm.bp);
if (twheel)
@@ -1968,11 +1729,7 @@ setup_bif_timer(Process *c_p, int twheel, ErtsMonotonicTime timeout_pos,
timer_destroy((ErtsTimer *) tmr, twheel, 1);
}
else {
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- proc_btm_list_insert(&proc->bif_timers, tmr);
-#else
proc_btm_rbt_insert(&proc->bif_timers, tmr);
-#endif
erts_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
tmr->type.head.receiver.proc = proc;
}
@@ -2000,10 +1757,6 @@ cancel_bif_timer(ErtsBifTimer *tmr)
if (state != ERTS_TMR_STATE_ACTIVE)
return 0;
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- btm_clear_magic_binary(tmr);
-#endif
-
if (tmr->btm.bp)
free_message_buffer(tmr->btm.bp);
@@ -2022,19 +1775,12 @@ cancel_bif_timer(ErtsBifTimer *tmr)
* the btm tree by itself (it may be in
* the middle of tree destruction).
*/
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- if (!ERTS_PROC_IS_EXITING(proc) && tmr->btm.proc_list.next) {
- proc_btm_list_delete(&proc->bif_timers, tmr);
- res = 1;
- }
-#else
if (!ERTS_PROC_IS_EXITING(proc)
&& tmr->btm.proc_tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
proc_btm_rbt_delete(&proc->bif_timers, tmr);
tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
res = 1;
}
-#endif
erts_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
}
@@ -2075,12 +1821,10 @@ access_btm(ErtsBifTimer *tmr, Uint32 sid, ErtsSchedulerData *esdp, int cancel)
queue_canceled_timer(esdp, sid, (ErtsTimer *) tmr);
}
else {
-#ifndef ERTS_MAGIC_REF_BIF_TIMERS
if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
btm_rbt_delete(&esdp->timer_service->btm_tree, tmr);
tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
}
-#endif
if (is_hlt) {
if (cncl_res > 0)
hl_timer_dec_refc(&tmr->type.hlt, tmr->type.hlt.head.roflgs);
@@ -2157,52 +1901,6 @@ send_async_info(Process *proc, ErtsProcLocks initial_locks,
return am_ok;
}
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
-
-static BIF_RETTYPE
-access_bif_timer(Process *c_p, Eterm tref, int cancel, int async, int info)
-{
- BIF_RETTYPE ret;
- Eterm res;
- Sint64 time_left;
-
- if (!is_internal_magic_ref(tref)) {
- if (is_not_ref(tref)) {
- ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
- return ret;
- }
- time_left = -1;
- }
- else {
- ErtsMagicBinary *mbin;
- mbin = (ErtsMagicBinary *) erts_magic_ref2bin(tref);
- if (mbin->destructor != bif_timer_ref_destructor)
- time_left = -1;
- else {
- ErtsBifTimer *tmr;
- Uint32 sid;
- tmr = magic_binary_to_btm(mbin);
- sid = erts_get_ref_numbers_thr_id(internal_magic_ref_numbers(tref));
- ASSERT(1 <= sid && sid <= erts_no_schedulers);
- time_left = access_btm(tmr, sid, erts_proc_sched_data(c_p), cancel);
- }
- }
-
- if (!info)
- res = am_ok;
- else if (!async)
- res = return_info(c_p, time_left);
- else
- res = send_async_info(c_p, ERTS_PROC_LOCK_MAIN,
- tref, cancel, time_left);
-
- ERTS_BIF_PREP_RET(ret, res);
-
- return ret;
-}
-
-#else /* !ERTS_MAGIC_REF_BIF_TIMERS */
-
static ERTS_INLINE Eterm
send_sync_info(Process *proc, ErtsProcLocks initial_locks,
Uint32 *refn, int cancel, Sint64 time_left)
@@ -2505,8 +2203,6 @@ no_timer:
return no_timer_result(c_p, tref, cancel, async, info);
}
-#endif /* !ERTS_MAGIC_REF_BIF_TIMERS */
-
static ERTS_INLINE int
bool_arg(Eterm val, int *argp)
{
@@ -2584,18 +2280,11 @@ exit_cancel_bif_timer(ErtsBifTimer *tmr, void *vesdp)
is_hlt = !!(roflgs & ERTS_TMR_ROFLG_HLT);
ERTS_HLT_ASSERT(sid == erts_get_ref_numbers_thr_id(ERTS_BTM_HLT2REFN(tmr)));
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- ERTS_HLT_ASSERT(tmr->btm.proc_list.next);
-#else
ERTS_HLT_ASSERT(tmr->btm.proc_tree.parent
!= ERTS_HLT_PFIELD_NOT_IN_TABLE);
tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
-#endif
if (state == ERTS_TMR_STATE_ACTIVE) {
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- btm_clear_magic_binary(tmr);
-#endif
if (tmr->btm.bp)
free_message_buffer(tmr->btm.bp);
@@ -2604,12 +2293,10 @@ exit_cancel_bif_timer(ErtsBifTimer *tmr, void *vesdp)
return;
}
-#ifndef ERTS_MAGIC_REF_BIF_TIMERS
if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
btm_rbt_delete(&esdp->timer_service->btm_tree, tmr);
tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
}
-#endif
if (is_hlt)
hlt_delete_timer(esdp, &tmr->type.hlt);
else
@@ -2627,28 +2314,17 @@ exit_cancel_bif_timer(ErtsBifTimer *tmr, void *vesdp)
# define ERTS_BTM_MAX_DESTROY_LIMIT 50
#endif
-#ifndef ERTS_MAGIC_REF_BIF_TIMERS
typedef struct {
ErtsBifTimers *bif_timers;
union {
proc_btm_rbt_yield_state_t proc_btm_yield_state;
} u;
} ErtsBifTimerYieldState;
-#endif
int erts_cancel_bif_timers(Process *p, ErtsBifTimers **btm, void **vyspp)
{
ErtsSchedulerData *esdp = erts_proc_sched_data(p);
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
-
- return proc_btm_list_foreach_destroy_yielding(btm,
- exit_cancel_bif_timer,
- (void *) esdp,
- ERTS_BTM_MAX_DESTROY_LIMIT);
-
-#else /* !ERTS_MAGIC_REF_BIF_TIMERS */
-
ErtsBifTimerYieldState ys = {*btm, {ERTS_RBT_YIELD_STAT_INITER}};
ErtsBifTimerYieldState *ysp;
int res;
@@ -2682,7 +2358,6 @@ int erts_cancel_bif_timers(Process *p, ErtsBifTimers **btm, void **vyspp)
return res;
-#endif /* !ERTS_MAGIC_REF_BIF_TIMERS */
}
static ERTS_INLINE int
@@ -3041,15 +2716,23 @@ erts_set_port_timer(Port *c_prt, Sint64 tmo)
check_canceled_queue(esdp, esdp->timer_service);
- timeout_pos = get_timeout_pos(erts_get_monotonic_time(esdp), tmo);
+ if (tmo == 0) {
+ erts_atomic_set_relb(&c_prt->common.timer, ERTS_PTMR_TIMEDOUT);
+ erts_port_task_schedule(c_prt->common.id,
+ &c_prt->timeout_task,
+ ERTS_PORT_TASK_TIMEOUT);
+ } else {
+
+ timeout_pos = get_timeout_pos(erts_get_monotonic_time(esdp), tmo);
- create_timer = (tmo < ERTS_TIMER_WHEEL_MSEC
- ? create_tw_timer
- : create_hl_timer);
- tmr = (void *) create_timer(esdp, timeout_pos, 0, ERTS_TMR_PORT,
- (void *) c_prt, c_prt->common.id,
- THE_NON_VALUE, NULL, NULL, NULL);
- erts_atomic_set_relb(&c_prt->common.timer, (erts_aint_t) tmr);
+ create_timer = (tmo < ERTS_TIMER_WHEEL_MSEC
+ ? create_tw_timer
+ : create_hl_timer);
+ tmr = (void *) create_timer(esdp, timeout_pos, 0, ERTS_TMR_PORT,
+ (void *) c_prt, c_prt->common.id,
+ THE_NON_VALUE, NULL, NULL, NULL);
+ erts_atomic_set_relb(&c_prt->common.timer, (erts_aint_t) tmr);
+ }
}
void
@@ -3108,11 +2791,6 @@ btm_print(ErtsBifTimer *tmr, void *vbtmp, ErtsMonotonicTime tpos, int is_hlt)
ErtsMonotonicTime left;
Eterm receiver;
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- if (!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_BIF_TMR))
- return;
-#endif
-
if (is_hlt) {
ERTS_HLT_ASSERT(tmr->type.head.roflgs & ERTS_TMR_ROFLG_HLT);
if (tmr->type.hlt.timeout <= btmp->now)
@@ -3141,22 +2819,6 @@ btm_print(ErtsBifTimer *tmr, void *vbtmp, ErtsMonotonicTime tpos, int is_hlt)
(Sint64) left);
}
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
-
-static void
-hlt_btm_print(ErtsHLTimer *tmr, void *vbtmp)
-{
- btm_print((ErtsBifTimer *) tmr, vbtmp, 0, 1);
-}
-
-static void
-twt_btm_print(void *vbtmp, ErtsMonotonicTime tpos, void *vtwtp)
-{
- btm_print((ErtsBifTimer *) vtwtp, vbtmp, tpos, 0);
-}
-
-#else
-
static void
btm_tree_print(ErtsBifTimer *tmr, void *vbtmp)
{
@@ -3169,8 +2831,6 @@ btm_tree_print(ErtsBifTimer *tmr, void *vbtmp)
btm_print(tmr, vbtmp, tpos, is_hlt);
}
-#endif
-
void
erts_print_bif_timer_info(fmtfn_t to, void *to_arg)
{
@@ -3188,15 +2848,7 @@ erts_print_bif_timer_info(fmtfn_t to, void *to_arg)
for (six = 0; six < erts_no_schedulers; six++) {
ErtsHLTimerService *srv =
erts_aligned_scheduler_data[six].esd.timer_service;
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- ErtsTimerWheel *twheel =
- erts_aligned_scheduler_data[six].esd.timer_wheel;
- erts_twheel_debug_foreach(twheel, tw_bif_timer_timeout,
- twt_btm_print, (void *) &btmp);
- time_rbt_foreach(srv->time_tree, hlt_btm_print, (void *) &btmp);
-#else
btm_rbt_foreach(srv->btm_tree, btm_tree_print, (void *) &btmp);
-#endif
}
}
@@ -3211,10 +2863,6 @@ typedef struct {
static void
debug_btm_foreach(ErtsBifTimer *tmr, void *vbtmfd)
{
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- if (!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_BIF_TMR))
- return;
-#endif
if (erts_atomic32_read_nob(&tmr->btm.state) == ERTS_TMR_STATE_ACTIVE) {
ErtsBTMForeachDebug *btmfd = (ErtsBTMForeachDebug *) vbtmfd;
Eterm id = ((tmr->type.head.roflgs & ERTS_TMR_ROFLG_REG_NAME)
@@ -3224,22 +2872,6 @@ debug_btm_foreach(ErtsBifTimer *tmr, void *vbtmfd)
}
}
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
-
-static void
-hlt_debug_btm_foreach(ErtsHLTimer *tmr, void *vbtmfd)
-{
- debug_btm_foreach((ErtsBifTimer *) tmr, vbtmfd);
-}
-
-static void
-twt_debug_btm_foreach(void *vbtmfd, ErtsMonotonicTime tpos, void *vtwtp)
-{
- debug_btm_foreach((ErtsBifTimer *) vtwtp, vbtmfd);
-}
-
-#endif
-
void
erts_debug_bif_timer_foreach(void (*func)(Eterm,
Eterm,
@@ -3259,20 +2891,9 @@ erts_debug_bif_timer_foreach(void (*func)(Eterm,
for (six = 0; six < erts_no_schedulers; six++) {
ErtsHLTimerService *srv =
erts_aligned_scheduler_data[six].esd.timer_service;
-#ifdef ERTS_MAGIC_REF_BIF_TIMERS
- ErtsTimerWheel *twheel =
- erts_aligned_scheduler_data[six].esd.timer_wheel;
- erts_twheel_debug_foreach(twheel, tw_bif_timer_timeout,
- twt_debug_btm_foreach,
- (void *) &btmfd);
- time_rbt_foreach(srv->time_tree,
- hlt_debug_btm_foreach,
- (void *) &btmfd);
-#else
btm_rbt_foreach(srv->btm_tree,
debug_btm_foreach,
(void *) &btmfd);
-#endif
}
}
@@ -3395,9 +3016,7 @@ st_hdbg_func(ErtsHLTimer *tmr, void *vhdbg)
}
ERTS_HLT_ASSERT(tmr->time.tree.u.l.next->time.tree.u.l.prev == tmr);
ERTS_HLT_ASSERT(tmr->time.tree.u.l.prev->time.tree.u.l.next == tmr);
-#ifndef ERTS_MAGIC_REF_BIF_TIMERS
ERTS_HLT_ASSERT(btm_rbt_lookup(hdbg->srv->btm_tree, ERTS_BTM_HLT2REFN(tmr)) == tmr);
-#endif
}
static void
@@ -3426,10 +3045,8 @@ tt_hdbg_func(ErtsHLTimer *tmr, void *vhdbg)
& ~ERTS_HLT_PFLGS_MASK);
ERTS_HLT_ASSERT(tmr == prnt);
}
-#ifndef ERTS_MAGIC_REF_BIF_TIMERS
if (tmr->head.roflgs & ERTS_TMR_ROFLG_BIF_TMR)
ERTS_HLT_ASSERT(btm_rbt_lookup(hdbg->srv->btm_tree, ERTS_BTM_HLT2REFN(tmr)) == tmr);
-#endif
if (tmr->time.tree.same_time) {
ErtsHdbgHLT st_hdbg;
st_hdbg.srv = hdbg->srv;
@@ -3495,7 +3112,6 @@ hdbg_chk_srv(ErtsHLTimerService *srv)
time_rbt_foreach(srv->time_tree, tt_hdbg_func, (void *) &hdbg);
ERTS_HLT_ASSERT(hdbg.found_root);
}
-#ifndef ERTS_MAGIC_REF_BIF_TIMERS
if (srv->btm_tree) {
ErtsHdbgHLT hdbg;
hdbg.srv = srv;
@@ -3504,7 +3120,6 @@ hdbg_chk_srv(ErtsHLTimerService *srv)
btm_rbt_foreach(srv->btm_tree, bt_hdbg_func, (void *) &hdbg);
ERTS_HLT_ASSERT(hdbg.found_root);
}
-#endif
}
#endif /* ERTS_HLT_HARD_DEBUG */
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 57c6c10c7f..163724ed3c 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -78,7 +78,7 @@ const char etp_erts_version[] = ERLANG_VERSION;
const char etp_otp_release[] = ERLANG_OTP_RELEASE;
const char etp_compile_date[] = ERLANG_COMPILE_DATE;
const char etp_arch[] = ERLANG_ARCHITECTURE;
-#ifdef ERTS_ENABLE_KERNEL_POLL
+#if ERTS_ENABLE_KERNEL_POLL
const int erts_use_kernel_poll = 1;
const int etp_kernel_poll_support = 1;
#else
@@ -354,6 +354,7 @@ erl_init(int ncpu,
erts_init_bif();
erts_init_bif_chksum();
erts_init_bif_binary();
+ erts_init_bif_persistent_term();
erts_init_bif_re();
erts_init_unicode(); /* after RE to get access to PCRE unicode */
erts_init_external();
@@ -375,6 +376,27 @@ erl_init(int ncpu,
}
static Eterm
+erl_spawn_system_process(Process* parent, Eterm mod, Eterm func, Eterm args,
+ ErlSpawnOpts *so)
+{
+ Eterm res;
+ int arity;
+
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(parent));
+ arity = erts_list_length(args);
+
+ if (erts_find_function(mod, func, arity, erts_active_code_ix()) == NULL) {
+ erts_exit(ERTS_ERROR_EXIT, "No function %T:%T/%i\n", mod, func, arity);
+ }
+
+ so->flags |= SPO_SYSTEM_PROC;
+
+ res = erl_create_process(parent, mod, func, args, so);
+
+ return res;
+}
+
+static Eterm
erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char** argv)
{
int i;
@@ -385,12 +407,6 @@ erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char**
Process parent;
ErlSpawnOpts so;
Eterm env;
-
- start_mod = erts_atom_put((byte *) modname, sys_strlen(modname), ERTS_ATOM_ENC_LATIN1, 1);
- if (erts_find_function(start_mod, am_start, 2,
- erts_active_code_ix()) == NULL) {
- erts_exit(ERTS_ERROR_EXIT, "No function %s:start/2\n", modname);
- }
/*
* We need a dummy parent process to be able to call erl_create_process().
@@ -398,6 +414,7 @@ erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char**
erts_init_empty_process(&parent);
erts_proc_lock(&parent, ERTS_PROC_LOCK_MAIN);
+
hp = HAlloc(&parent, argc*2 + 4);
args = NIL;
for (i = argc-1; i >= 0; i--) {
@@ -410,44 +427,73 @@ erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char**
hp += 2;
args = CONS(hp, env, args);
- so.flags = erts_default_spo_flags|SPO_SYSTEM_PROC;
- res = erl_create_process(&parent, start_mod, am_start, args, &so);
+ start_mod = erts_atom_put((byte *) modname, sys_strlen(modname),
+ ERTS_ATOM_ENC_LATIN1, 1);
+
+ so.flags = erts_default_spo_flags;
+ res = erl_spawn_system_process(&parent, start_mod, am_start, args, &so);
+ ASSERT(is_internal_pid(res));
+
erts_proc_unlock(&parent, ERTS_PROC_LOCK_MAIN);
erts_cleanup_empty_process(&parent);
+
return res;
}
static Eterm
erl_system_process_otp(Eterm parent_pid, char* modname, int off_heap_msgq, int prio)
-{
- Eterm start_mod;
- Process* parent;
+{
+ Process *parent;
ErlSpawnOpts so;
- Eterm res;
-
- start_mod = erts_atom_put((byte *) modname, sys_strlen(modname), ERTS_ATOM_ENC_LATIN1, 1);
- if (erts_find_function(start_mod, am_start, 0,
- erts_active_code_ix()) == NULL) {
- erts_exit(ERTS_ERROR_EXIT, "No function %s:start/0\n", modname);
- }
+ Eterm mod, res;
parent = erts_pid2proc(NULL, 0, parent_pid, ERTS_PROC_LOCK_MAIN);
+ mod = erts_atom_put((byte *) modname, sys_strlen(modname),
+ ERTS_ATOM_ENC_LATIN1, 1);
+
+ so.flags = erts_default_spo_flags|SPO_USE_ARGS;
- so.flags = erts_default_spo_flags|SPO_SYSTEM_PROC|SPO_USE_ARGS;
- if (off_heap_msgq)
+ if (off_heap_msgq) {
so.flags |= SPO_OFF_HEAP_MSGQ;
- so.min_heap_size = H_MIN_SIZE;
- so.min_vheap_size = BIN_VH_MIN_SIZE;
- so.max_heap_size = H_MAX_SIZE;
- so.max_heap_flags = H_MAX_FLAGS;
- so.priority = prio;
- so.max_gen_gcs = (Uint16) erts_atomic32_read_nob(&erts_max_gen_gcs);
- so.scheduler = 0;
- res = erl_create_process(parent, start_mod, am_start, NIL, &so);
+ }
+
+ so.min_heap_size = H_MIN_SIZE;
+ so.min_vheap_size = BIN_VH_MIN_SIZE;
+ so.max_heap_size = H_MAX_SIZE;
+ so.max_heap_flags = H_MAX_FLAGS;
+ so.priority = prio;
+ so.max_gen_gcs = (Uint16) erts_atomic32_read_nob(&erts_max_gen_gcs);
+ so.scheduler = 0;
+
+ res = erl_spawn_system_process(parent, mod, am_start, NIL, &so);
+ ASSERT(is_internal_pid(res));
+
erts_proc_unlock(parent, ERTS_PROC_LOCK_MAIN);
+
return res;
}
+Eterm erts_internal_spawn_system_process_3(BIF_ALIST_3) {
+ Eterm mod, func, args, res;
+ ErlSpawnOpts so;
+
+ mod = BIF_ARG_1;
+ func = BIF_ARG_2;
+ args = BIF_ARG_3;
+
+ ASSERT(is_atom(mod));
+ ASSERT(is_atom(func));
+ ASSERT(erts_list_length(args) >= 0);
+
+ so.flags = erts_default_spo_flags;
+ res = erl_spawn_system_process(BIF_P, mod, func, args, &so);
+
+ if (is_non_value(res)) {
+ BIF_ERROR(BIF_P, so.error_code);
+ }
+
+ BIF_RET(res);
+}
Eterm
erts_preloaded(Process* p)
@@ -683,6 +729,9 @@ void erts_usage(void)
erts_fprintf(stderr, "-zebwt val set ets busy wait threshold, valid values are:\n");
erts_fprintf(stderr, " none|very_short|short|medium|long|very_long|extremely_long\n");
#endif
+ erts_fprintf(stderr, "-ztma bool enable/disable tuple module apply support in emulator\n");
+ erts_fprintf(stderr, " (transitional flag for parameterized modules; recompile\n");
+ erts_fprintf(stderr, " with +tuple_calls for compatibility with future versions)\n");
erts_fprintf(stderr, "\n");
erts_fprintf(stderr, "Note that if the emulator is started with erlexec (typically\n");
erts_fprintf(stderr, "from the erl script), these flags should be specified with +.\n");
@@ -2166,6 +2215,17 @@ erl_start(int argc, char **argv)
erts_usage();
}
}
+ else if (has_prefix("tma", sub_param)) {
+ arg = get_arg(sub_param+3, argv[i+1], &i);
+ if (sys_strcmp(arg,"true") == 0) {
+ tuple_module_apply = 1;
+ } else if (sys_strcmp(arg,"false") == 0) {
+ tuple_module_apply = 0;
+ } else {
+ erts_fprintf(stderr, "bad tuple module apply %s\n", arg);
+ erts_usage();
+ }
+ }
else {
erts_fprintf(stderr, "bad -z option %s\n", argv[i]);
erts_usage();
@@ -2358,8 +2418,8 @@ system_cleanup(int flush_async)
* The exiting thread might be waiting for
* us to block; need to update status...
*/
- erts_thr_progress_active(NULL, 0);
- erts_thr_progress_prepare_wait(NULL);
+ erts_thr_progress_active(erts_thr_prgr_data(NULL), 0);
+ erts_thr_progress_prepare_wait(erts_thr_prgr_data(NULL));
}
/* Wait forever... */
while (1)
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 463ae898a3..1416c5f96c 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -97,6 +97,8 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "proc_btm", "pid" },
{ "dist_entry", "address" },
{ "dist_entry_links", "address" },
+ { "update_persistent_term_permission", NULL },
+ { "persistent_term_delete_permission", NULL },
{ "code_write_permission", NULL },
{ "purge_state", NULL },
{ "proc_status", "pid" },
diff --git a/erts/emulator/beam/erl_map.c b/erts/emulator/beam/erl_map.c
index cba17d3e6a..8f96dc3d23 100644
--- a/erts/emulator/beam/erl_map.c
+++ b/erts/emulator/beam/erl_map.c
@@ -475,7 +475,7 @@ Eterm erts_hashmap_from_array(ErtsHeapFactory* factory, Eterm *leafs, Uint n,
Eterm erts_map_from_ks_and_vs(ErtsHeapFactory *factory, Eterm *ks0, Eterm *vs0, Uint n)
{
- if (n < MAP_SMALL_MAP_LIMIT) {
+ if (n <= MAP_SMALL_MAP_LIMIT) {
Eterm *ks, *vs, *hp;
flatmap_t *mp;
Eterm keys;
diff --git a/erts/emulator/beam/erl_monitor_link.h b/erts/emulator/beam/erl_monitor_link.h
index 9ff8aa509a..ed7bf7d54a 100644
--- a/erts/emulator/beam/erl_monitor_link.h
+++ b/erts/emulator/beam/erl_monitor_link.h
@@ -1387,11 +1387,14 @@ ERTS_GLB_INLINE void
erts_monitor_release(ErtsMonitor *mon)
{
ErtsMonitorData *mdp = erts_monitor_to_data(mon);
- ERTS_ML_ASSERT(!(mon->flags & ERTS_ML_FLG_IN_TABLE));
ERTS_ML_ASSERT(erts_atomic32_read_nob(&mdp->refc) > 0);
- if (erts_atomic32_dec_read_nob(&mdp->refc) == 0)
+ if (erts_atomic32_dec_read_nob(&mdp->refc) == 0) {
+ ERTS_ML_ASSERT(!(mdp->origin.flags & ERTS_ML_FLG_IN_TABLE));
+ ERTS_ML_ASSERT(!(mdp->target.flags & ERTS_ML_FLG_IN_TABLE));
+
erts_monitor_destroy__(mdp);
+ }
}
ERTS_GLB_INLINE void
@@ -1399,12 +1402,14 @@ erts_monitor_release_both(ErtsMonitorData *mdp)
{
ERTS_ML_ASSERT((mdp->origin.flags & ERTS_ML_FLGS_SAME)
== (mdp->target.flags & ERTS_ML_FLGS_SAME));
- ERTS_ML_ASSERT(!(mdp->origin.flags & ERTS_ML_FLG_IN_TABLE));
- ERTS_ML_ASSERT(!(mdp->target.flags & ERTS_ML_FLG_IN_TABLE));
ERTS_ML_ASSERT(erts_atomic32_read_nob(&mdp->refc) >= 2);
- if (erts_atomic32_add_read_nob(&mdp->refc, (erts_aint32_t) -2) == 0)
+ if (erts_atomic32_add_read_nob(&mdp->refc, (erts_aint32_t) -2) == 0) {
+ ERTS_ML_ASSERT(!(mdp->origin.flags & ERTS_ML_FLG_IN_TABLE));
+ ERTS_ML_ASSERT(!(mdp->target.flags & ERTS_ML_FLG_IN_TABLE));
+
erts_monitor_destroy__(mdp);
+ }
}
ERTS_GLB_INLINE int
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index ee6e6085b6..ebef485b04 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -2354,6 +2354,13 @@ static void dtor_demonitor(ErtsMonitor* mon, void* context)
erts_proc_sig_send_demonitor(mon);
}
+#ifdef DEBUG
+int erts_dbg_is_resource_dying(ErtsResource* resource)
+{
+ return resource->monitors && rmon_is_dying(resource->monitors);
+}
+#endif
+
# define NIF_RESOURCE_DTOR &nif_resource_dtor
static int nif_resource_dtor(Binary* bin)
@@ -2694,8 +2701,12 @@ int enif_consume_timeslice(ErlNifEnv* env, int percent)
{
Process *proc;
Sint reds;
+ int sched;
- execution_state(env, &proc, NULL);
+ execution_state(env, &proc, &sched);
+
+ if (sched < 0)
+ return 0; /* no-op on dirty scheduler */
ASSERT(is_proc_bound(env) && percent >= 1 && percent <= 100);
if (percent < 1) percent = 1;
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index f4dc60941a..18ed782ae3 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -421,8 +421,25 @@ static void schedule_delete_dist_entry(DistEntry* dep)
*
* Note that timeouts do not guarantee thread progress.
*/
- erts_schedule_thr_prgr_later_op(start_timer_delete_dist_entry,
- dep, &dep->later_op);
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ if (esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ erts_schedule_thr_prgr_later_op(start_timer_delete_dist_entry,
+ dep, &dep->later_op);
+ } else {
+ /*
+ * Since OTP 20, it's possible that destructor is executed on
+ * a dirty scheduler. Aux work cannot be done on a dirty
+ * scheduler, and scheduling any aux work on a dirty scheduler
+ * makes the scheduler to loop infinitely.
+ * To avoid this, make a spot jump: schedule this function again
+ * on a first normal scheduler. It is guaranteed to be always
+ * online. Since it's a rare event, this shall not pose a big
+ * utilisation hit.
+ */
+ erts_schedule_misc_aux_work(1,
+ (void (*)(void *))schedule_delete_dist_entry,
+ (void *) dep);
+ }
}
static void
diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h
index 2be0a5bf74..039d8cf67a 100644
--- a/erts/emulator/beam/erl_port.h
+++ b/erts/emulator/beam/erl_port.h
@@ -334,6 +334,8 @@ Eterm erts_request_io_bytes(Process *c_p);
#define ERTS_PORT_SFLG_INVALID ((Uint32) (1 << 11))
/* Last port to terminate halts the emulator */
#define ERTS_PORT_SFLG_HALT ((Uint32) (1 << 12))
+/* Check if the event in ready_input should be cleaned */
+#define ERTS_PORT_SFLG_CHECK_FD_CLEANUP ((Uint32) (1 << 13))
#ifdef DEBUG
/* Only debug: make sure all flags aren't cleared unintentionally */
#define ERTS_PORT_SFLG_PORT_DEBUG ((Uint32) (1 << 31))
@@ -1016,6 +1018,6 @@ int erts_port_output_async(Port *, Eterm, Eterm);
/*
* Signals from ports to ports. Used by sys drivers.
*/
-int erl_drv_port_control(Eterm, char, char*, ErlDrvSizeT);
+int erl_drv_port_control(Eterm, unsigned int, char*, ErlDrvSizeT);
#endif
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 4928d80f27..c8f2e88127 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -97,6 +97,9 @@ static void chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_q
typedef union {
struct { /* I/O tasks */
ErlDrvEvent event;
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ int is_scheduler_event;
+#endif
} io;
struct {
ErtsProc2PortSigCallback callback;
@@ -141,6 +144,9 @@ struct ErtsPortTaskBusyCallerTable_ {
ErtsPortTaskBusyCaller pre_alloc_busy_caller;
};
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+erts_atomic_t erts_port_task_outstanding_io_tasks;
+#endif
static void begin_port_cleanup(Port *pp,
ErtsPortTask **execq,
@@ -578,13 +584,26 @@ reset_handle(ErtsPortTask *ptp)
}
static ERTS_INLINE void
-reset_executed_io_task_handle(ErtsPortTask *ptp)
+reset_executed_io_task_handle(Port *prt, ErtsPortTask *ptp)
{
if (ptp->u.alive.handle) {
ASSERT(ptp == handle2task(ptp->u.alive.handle));
- /* The port task handle is reset inside task_executed */
- erts_io_notify_port_task_executed(ptp->type, ptp->u.alive.handle,
- reset_port_task_handle);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ if (ptp->u.alive.td.io.is_scheduler_event) {
+ if ((erts_atomic32_read_nob(&prt->state) & ERTS_PORT_SFLG_CHECK_FD_CLEANUP)) {
+ erts_io_notify_port_task_executed(ptp->type, ptp->u.alive.handle,
+ reset_port_task_handle);
+ erts_atomic32_read_band_nob(&prt->state, ~ERTS_PORT_SFLG_CHECK_FD_CLEANUP);
+ } else {
+ reset_port_task_handle(ptp->u.alive.handle);
+ }
+ } else
+#endif
+ {
+ /* The port task handle is reset inside task_executed */
+ erts_io_notify_port_task_executed(ptp->type, ptp->u.alive.handle,
+ reset_port_task_handle);
+ }
}
}
@@ -1307,6 +1326,22 @@ erts_port_task_abort(ErtsPortTaskHandle *pthp)
res = - 1; /* Task already aborted, executing, or executed */
else {
reset_port_task_handle(pthp);
+
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ switch (ptp->type) {
+ case ERTS_PORT_TASK_INPUT:
+ case ERTS_PORT_TASK_OUTPUT:
+ if (ptp->u.alive.td.io.is_scheduler_event) {
+ ASSERT(erts_atomic_read_nob(
+ &erts_port_task_outstanding_io_tasks) > 0);
+ erts_atomic_dec_relb(&erts_port_task_outstanding_io_tasks);
+ }
+ break;
+ default:
+ break;
+ }
+#endif
+
res = 0;
}
}
@@ -1442,7 +1477,14 @@ erts_port_task_schedule(Eterm id,
va_list argp;
va_start(argp, type);
ptp->u.alive.td.io.event = va_arg(argp, ErlDrvEvent);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ ptp->u.alive.td.io.is_scheduler_event = va_arg(argp, int);
+#endif
va_end(argp);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ if (ptp->u.alive.td.io.is_scheduler_event)
+ erts_atomic_inc_relb(&erts_port_task_outstanding_io_tasks);
+#endif
break;
}
case ERTS_PORT_TASK_PROC_SIG: {
@@ -1621,12 +1663,14 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
int processing_busy_q;
int vreds = 0;
int reds = 0;
- erts_aint_t io_tasks_executed = 0;
int fpe_was_unmasked;
erts_aint32_t state;
int active;
Uint64 start_time = 0;
ErtsSchedulerData *esdp = runq->scheduler;
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ erts_aint_t io_tasks_executed = 0;
+#endif
ERTS_MSACC_PUSH_STATE_M();
ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq));
@@ -1722,8 +1766,11 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
for input and output */
(*pp->drv_ptr->ready_input)((ErlDrvData) pp->drv_data,
ptp->u.alive.td.io.event);
- reset_executed_io_task_handle(ptp);
- io_tasks_executed++;
+ reset_executed_io_task_handle(pp, ptp);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ if (ptp->u.alive.td.io.is_scheduler_event)
+ io_tasks_executed++;
+#endif
break;
case ERTS_PORT_TASK_OUTPUT:
reds = ERTS_PORT_REDS_OUTPUT;
@@ -1732,8 +1779,11 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
LTTNG_DRIVER(driver_ready_output, pp);
(*pp->drv_ptr->ready_output)((ErlDrvData) pp->drv_data,
ptp->u.alive.td.io.event);
- reset_executed_io_task_handle(ptp);
- io_tasks_executed++;
+ reset_executed_io_task_handle(pp, ptp);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ if (ptp->u.alive.td.io.is_scheduler_event)
+ io_tasks_executed++;
+#endif
break;
case ERTS_PORT_TASK_PROC_SIG: {
ErtsProc2PortSigData *sigdp = &ptp->u.alive.td.psig.data;
@@ -1799,6 +1849,15 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_unblock_fpe(fpe_was_unmasked);
ERTS_MSACC_POP_STATE_M();
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ if (io_tasks_executed) {
+ ASSERT(erts_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
+ >= io_tasks_executed);
+ erts_atomic_add_relb(&erts_port_task_outstanding_io_tasks,
+ -1*io_tasks_executed);
+ }
+#endif
+
ASSERT(runq == erts_get_runq_port(pp));
active = finalize_exec(pp, &execq, processing_busy_q);
@@ -2086,6 +2145,10 @@ erts_dequeue_port(ErtsRunQueue *rq)
void
erts_port_task_init(void)
{
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ erts_atomic_init_nob(&erts_port_task_outstanding_io_tasks,
+ (erts_aint_t) 0);
+#endif
init_port_task_alloc(erts_no_schedulers + erts_no_poll_threads
+ 1); /* aux_thread */
init_busy_caller_table_alloc();
diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h
index ae78a7d8a3..ca5183b305 100644
--- a/erts/emulator/beam/erl_port_task.h
+++ b/erts/emulator/beam/erl_port_task.h
@@ -38,6 +38,8 @@ typedef erts_atomic_t ErtsPortTaskHandle;
#ifndef ERL_PORT_TASK_H__
#define ERL_PORT_TASK_H__
+#include "erl_poll.h"
+
#undef ERTS_INCLUDE_SCHEDULER_INTERNALS
#if (defined(ERL_PROCESS_C__) \
|| defined(ERL_PORT_TASK_C__) \
@@ -54,8 +56,8 @@ typedef erts_atomic_t ErtsPortTaskHandle;
#define ERTS_PT_FLG_BAD_OUTPUT (1 << 4)
typedef enum {
- ERTS_PORT_TASK_INPUT,
- ERTS_PORT_TASK_OUTPUT,
+ ERTS_PORT_TASK_INPUT = 0,
+ ERTS_PORT_TASK_OUTPUT = 1,
ERTS_PORT_TASK_TIMEOUT,
ERTS_PORT_TASK_DIST_CMD,
ERTS_PORT_TASK_PROC_SIG
@@ -134,6 +136,12 @@ ERTS_GLB_INLINE void erts_port_task_sched_unlock(ErtsPortTaskSched *ptsp);
ERTS_GLB_INLINE int erts_port_task_sched_lock_is_locked(ErtsPortTaskSched *ptsp);
ERTS_GLB_INLINE void erts_port_task_sched_enter_exiting_state(ErtsPortTaskSched *ptsp);
+#if defined(ERTS_INCLUDE_SCHEDULER_INTERNALS) && ERTS_POLL_USE_SCHEDULER_POLLING
+ERTS_GLB_INLINE int erts_port_task_have_outstanding_io_tasks(void);
+/* NOTE: Do not access any of the exported variables directly */
+extern erts_atomic_t erts_port_task_outstanding_io_tasks;
+#endif
+
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
@@ -211,6 +219,15 @@ erts_port_task_sched_enter_exiting_state(ErtsPortTaskSched *ptsp)
erts_atomic32_read_bor_nob(&ptsp->flags, ERTS_PTS_FLG_EXITING);
}
+#if defined(ERTS_INCLUDE_SCHEDULER_INTERNALS) && ERTS_POLL_USE_SCHEDULER_POLLING
+ERTS_GLB_INLINE int
+erts_port_task_have_outstanding_io_tasks(void)
+{
+ return (erts_atomic_read_acqb(&erts_port_task_outstanding_io_tasks)
+ != 0);
+}
+#endif
+
#endif
#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
diff --git a/erts/emulator/beam/erl_printf_term.c b/erts/emulator/beam/erl_printf_term.c
index 990a01b96f..9cfb7fc681 100644
--- a/erts/emulator/beam/erl_printf_term.c
+++ b/erts/emulator/beam/erl_printf_term.c
@@ -364,13 +364,13 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount) {
int print_res;
char def_buf[64];
char *buf, *big_str;
- Uint sz = (Uint) big_decimal_estimate(wobj);
+ Uint sz = (Uint) big_integer_estimate(wobj, 10);
sz++;
if (sz <= 64)
buf = &def_buf[0];
else
buf = erts_alloc(ERTS_ALC_T_TMP, sz);
- big_str = erts_big_to_string(wobj, buf, sz);
+ big_str = erts_big_to_string(wobj, 10, buf, sz);
print_res = erts_printf_string(fn, arg, big_str);
if (buf != &def_buf[0])
erts_free(ERTS_ALC_T_TMP, (void *) buf);
diff --git a/erts/emulator/beam/erl_proc_sig_queue.c b/erts/emulator/beam/erl_proc_sig_queue.c
index f343e984f7..18418a76e1 100644
--- a/erts/emulator/beam/erl_proc_sig_queue.c
+++ b/erts/emulator/beam/erl_proc_sig_queue.c
@@ -3812,7 +3812,6 @@ clear_seq_trace_token(ErtsMessage *sig)
void
erts_proc_sig_clear_seq_trace_tokens(Process *c_p)
{
- ASSERT(erts_thr_progress_is_blocking());
erts_proc_sig_fetch(c_p);
ERTS_FOREACH_SIG_PRIVQS(c_p, sig, clear_seq_trace_token(sig));
}
diff --git a/erts/emulator/beam/erl_proc_sig_queue.h b/erts/emulator/beam/erl_proc_sig_queue.h
index 3fc2d06b2d..6b065a7add 100644
--- a/erts/emulator/beam/erl_proc_sig_queue.h
+++ b/erts/emulator/beam/erl_proc_sig_queue.h
@@ -989,8 +989,7 @@ erts_proc_sig_fetch(Process *proc)
Sint res = 0;
ErtsSignal *sig;
- ERTS_LC_ASSERT(erts_thr_progress_is_blocking()
- || ERTS_PROC_IS_EXITING(proc)
+ ERTS_LC_ASSERT(ERTS_PROC_IS_EXITING(proc)
|| ((erts_proc_lc_my_proc_locks(proc)
& (ERTS_PROC_LOCK_MAIN
| ERTS_PROC_LOCK_MSGQ))
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 0f7f1598fd..cc02fbad1e 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -92,10 +92,6 @@
#undef HARDDEBUG
#endif
-#ifdef HARDDEBUG
-#define HARDDEBUG_RUNQS
-#endif
-
#ifdef HIPE
#include "hipe_mode_switch.h" /* for hipe_init_process() */
#include "hipe_signal.h" /* for hipe_thread_signal_init() */
@@ -174,7 +170,6 @@ ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE];
typedef struct {
int aux_work;
int tse;
- int sys_schedule;
} ErtsBusyWaitParams;
static ErtsBusyWaitParams sched_busy_wait_params[ERTS_SCHED_TYPE_LAST + 1];
@@ -344,6 +339,10 @@ erts_sched_stat_t erts_sched_stat;
static erts_tsd_key_t ERTS_WRITE_UNLIKELY(sched_data_key);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+static erts_atomic32_t function_calls;
+static erts_atomic32_t doing_sys_schedule;
+#endif
static erts_atomic32_t no_empty_run_queues;
long erts_runq_supervision_interval = 0;
static ethr_event runq_supervision_event;
@@ -1646,7 +1645,7 @@ haw_thr_prgr_wakeup(ErtsAuxWorkData *awdp, ErtsThrPrgrVal val)
awdp->latest_wakeup = val;
haw_chk_later_cleanup_op_wakeup(awdp, val);
}
- erts_thr_progress_wakeup(awdp->esdp, val);
+ erts_thr_progress_wakeup(erts_thr_prgr_data(awdp->esdp), val);
}
}
@@ -1656,7 +1655,7 @@ haw_thr_prgr_soft_wakeup(ErtsAuxWorkData *awdp, ErtsThrPrgrVal val)
if (erts_thr_progress_cmp(val, awdp->latest_wakeup) > 0) {
awdp->latest_wakeup = val;
haw_chk_later_cleanup_op_wakeup(awdp, val);
- erts_thr_progress_wakeup(awdp->esdp, val);
+ erts_thr_progress_wakeup(erts_thr_prgr_data(awdp->esdp), val);
}
}
@@ -1670,7 +1669,7 @@ haw_thr_prgr_later_cleanup_op_wakeup(ErtsAuxWorkData *awdp, ErtsThrPrgrVal val,
else {
awdp->latest_wakeup = val;
awdp->later_op.size = thr_prgr_later_cleanup_op_threshold;
- erts_thr_progress_wakeup(awdp->esdp, val);
+ erts_thr_progress_wakeup(erts_thr_prgr_data(awdp->esdp), val);
}
}
}
@@ -2463,6 +2462,13 @@ handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
erts_port_lock(prt);
+ if (prt->common.u.alive.reg &&
+ prt->common.u.alive.reg->name == am_heart_port) {
+ /* Leave heart port to not get killed before flushing is done*/
+ erts_port_release(prt);
+ continue;
+ }
+
state = erts_atomic32_read_nob(&prt->state);
if (!(state & (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
| ERTS_PORT_SFLG_HALT))) {
@@ -3066,6 +3072,7 @@ aux_thread(void *unused)
ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(-1);
erts_aint32_t aux_work;
ErtsThrPrgrCallbacks callbacks;
+ ErtsThrPrgrData *tpd;
int thr_prgr_active = 1;
ERTS_MSACC_DECLARE_CACHE();
@@ -3087,12 +3094,16 @@ aux_thread(void *unused)
callbacks.wait = thr_prgr_wait;
callbacks.finalize_wait = thr_prgr_fin_wait;
- erts_thr_progress_register_managed_thread(NULL, &callbacks, 1);
+ tpd = erts_thr_progress_register_managed_thread(NULL, &callbacks, 1);
init_aux_work_data(awdp, NULL, NULL);
awdp->ssi = ssi;
#if ERTS_POLL_USE_FALLBACK
- ssi->psi = erts_create_pollset_thread(-1);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ ssi->psi = erts_create_pollset_thread(-2, tpd);
+#else
+ ssi->psi = erts_create_pollset_thread(-1, tpd);
+#endif
#endif
sched_prep_spin_wait(ssi);
@@ -3105,11 +3116,11 @@ aux_thread(void *unused)
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
if (aux_work) {
if (!thr_prgr_active)
- erts_thr_progress_active(NULL, thr_prgr_active = 1);
+ erts_thr_progress_active(tpd, thr_prgr_active = 1);
aux_work = handle_aux_work(awdp, aux_work, 1);
ERTS_MSACC_UPDATE_CACHE();
- if (aux_work && erts_thr_progress_update(NULL))
- erts_thr_progress_leader_update(NULL);
+ if (aux_work && erts_thr_progress_update(tpd))
+ erts_thr_progress_leader_update(tpd);
}
if (!aux_work) {
@@ -3120,7 +3131,7 @@ aux_thread(void *unused)
#endif
if (thr_prgr_active)
- erts_thr_progress_active(NULL, thr_prgr_active = 0);
+ erts_thr_progress_active(tpd, thr_prgr_active = 0);
#if ERTS_POLL_USE_FALLBACK
@@ -3132,11 +3143,11 @@ aux_thread(void *unused)
if (flgs & ERTS_SSI_FLG_SLEEPING) {
ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- erts_check_io(ssi->psi);
+ erts_check_io(ssi->psi, ERTS_POLL_INF_TIMEOUT);
}
}
#else
- erts_thr_progress_prepare_wait(NULL);
+ erts_thr_progress_prepare_wait(tpd);
flgs = sched_spin_wait(ssi, 0);
@@ -3153,7 +3164,7 @@ aux_thread(void *unused)
ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_OTHER);
}
}
- erts_thr_progress_finalize_wait(NULL);
+ erts_thr_progress_finalize_wait(tpd);
#endif
}
@@ -3171,7 +3182,8 @@ poll_thread(void *arg)
erts_aint32_t aux_work;
ErtsThrPrgrCallbacks callbacks;
int thr_prgr_active = 1;
- struct erts_poll_thread *psi = erts_create_pollset_thread(id);
+ struct erts_poll_thread *psi;
+ ErtsThrPrgrData *tpd;
ERTS_MSACC_DECLARE_CACHE();
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -3192,9 +3204,12 @@ poll_thread(void *arg)
callbacks.wait = thr_prgr_wait;
callbacks.finalize_wait = thr_prgr_fin_wait;
- erts_thr_progress_register_managed_thread(NULL, &callbacks, 0);
+ tpd = erts_thr_progress_register_managed_thread(NULL, &callbacks, 0);
init_aux_work_data(awdp, NULL, NULL);
awdp->ssi = ssi;
+
+ psi = erts_create_pollset_thread(id, tpd);
+
ssi->psi = psi;
sched_prep_spin_wait(ssi);
@@ -3207,16 +3222,16 @@ poll_thread(void *arg)
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
if (aux_work) {
if (!thr_prgr_active)
- erts_thr_progress_active(NULL, thr_prgr_active = 1);
+ erts_thr_progress_active(tpd, thr_prgr_active = 1);
aux_work = handle_aux_work(awdp, aux_work, 1);
ERTS_MSACC_UPDATE_CACHE();
- if (aux_work && erts_thr_progress_update(NULL))
- erts_thr_progress_leader_update(NULL);
+ if (aux_work && erts_thr_progress_update(tpd))
+ erts_thr_progress_leader_update(tpd);
}
if (!aux_work) {
if (thr_prgr_active)
- erts_thr_progress_active(NULL, thr_prgr_active = 0);
+ erts_thr_progress_active(tpd, thr_prgr_active = 0);
flgs = sched_spin_wait(ssi, 0);
@@ -3226,7 +3241,7 @@ poll_thread(void *arg)
if (flgs & ERTS_SSI_FLG_SLEEPING) {
ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- erts_check_io(psi);
+ erts_check_io(psi, ERTS_POLL_INF_TIMEOUT);
}
}
}
@@ -3236,6 +3251,78 @@ poll_thread(void *arg)
return NULL;
}
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+static ERTS_INLINE void
+clear_sys_scheduling(void)
+{
+ erts_atomic32_set_relb(&function_calls, 0);
+ erts_atomic32_set_mb(&doing_sys_schedule, 0);
+}
+
+static ERTS_INLINE int
+try_set_sys_scheduling(void)
+{
+ return 0 == erts_atomic32_cmpxchg_acqb(&doing_sys_schedule, 1, 0);
+}
+
+
+static ERTS_INLINE int
+prepare_for_sys_schedule(void)
+{
+ while (!erts_port_task_have_outstanding_io_tasks()
+ && try_set_sys_scheduling()) {
+ if (!erts_port_task_have_outstanding_io_tasks())
+ return 1;
+ clear_sys_scheduling();
+ }
+ return 0;
+}
+
+#else
+#define clear_sys_scheduling()
+#define prepare_for_sys_schedule() 0
+#endif
+
+#ifdef HARDDEBUG
+#define ERTS_HDBG_CHK_SLEEP_LIST(SL, L, F, FN) \
+ check_sleepers_list((SL), (L), (F), (FN))
+static void check_sleepers_list(ErtsSchedulerSleepList *sl,
+ int lock,
+ ErtsSchedulerSleepInfo *find,
+ ErtsSchedulerSleepInfo *find_not)
+{
+ ErtsSchedulerSleepInfo *last_out;
+ int found = 0;
+
+ if (lock)
+ erts_spin_lock(&sl->lock);
+
+ ERTS_ASSERT(!find_not || (!find_not->next && !find_not->prev));
+
+ last_out = sl->list;
+ if (last_out) {
+ ErtsSchedulerSleepInfo *tmp = last_out;
+ do {
+ ERTS_ASSERT(tmp->next);
+ ERTS_ASSERT(tmp->prev);
+ ERTS_ASSERT(tmp->next->prev == tmp);
+ ERTS_ASSERT(tmp->prev->next == tmp);
+ ERTS_ASSERT(tmp != find_not);
+ if (tmp == find)
+ found = !0;
+ tmp = tmp->next;
+
+ } while (tmp != last_out);
+ }
+ ERTS_ASSERT(!find || found);
+
+ if (lock)
+ erts_spin_unlock(&sl->lock);
+}
+#else
+#define ERTS_HDBG_CHK_SLEEP_LIST(SL, L, F, FN) ((void) 0)
+#endif
+
static void
scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
{
@@ -3249,23 +3336,29 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
- if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
- erts_spin_lock(&rq->sleepers.lock);
flgs = sched_prep_spin_wait(ssi);
if (flgs & ERTS_SSI_FLG_SUSPENDED) {
/* Go suspend instead... */
- if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
- erts_spin_unlock(&rq->sleepers.lock);
return;
}
if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) {
- ssi->prev = NULL;
- ssi->next = rq->sleepers.list;
- if (rq->sleepers.list)
- rq->sleepers.list->prev = ssi;
- rq->sleepers.list = ssi;
- erts_spin_unlock(&rq->sleepers.lock);
+ erts_spin_lock(&rq->sleepers.lock);
+ ERTS_HDBG_CHK_SLEEP_LIST(&rq->sleepers, 0, NULL, ssi);
+ ASSERT(!ssi->next); /* Not in sleepers list */
+ ASSERT(!ssi->prev);
+ if (!rq->sleepers.list) {
+ ssi->next = ssi->prev = ssi;
+ rq->sleepers.list = ssi;
+ }
+ else {
+ ssi->prev = rq->sleepers.list;
+ ssi->next = rq->sleepers.list->next;
+ ssi->prev->next = ssi;
+ ssi->next->prev = ssi;
+ }
+ ERTS_HDBG_CHK_SLEEP_LIST(&rq->sleepers, 0, ssi, NULL);
+ erts_spin_unlock(&rq->sleepers.lock);
dirty_active(esdp, -1);
}
@@ -3286,13 +3379,13 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1);
ERTS_MSACC_UPDATE_CACHE();
- if (aux_work && erts_thr_progress_update(esdp))
- erts_thr_progress_leader_update(esdp);
+ if (aux_work && erts_thr_progress_update(erts_thr_prgr_data(esdp)))
+ erts_thr_progress_leader_update(erts_thr_prgr_data(esdp));
}
if (aux_work) {
@@ -3301,7 +3394,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
current_time = erts_get_monotonic_time(esdp);
if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) {
if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
erts_bump_timers(esdp->timer_wheel, current_time);
@@ -3321,19 +3414,37 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
}
if (do_timeout) {
if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
}
- else {
+ else if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && prepare_for_sys_schedule()) {
+ /* We sleep in check_io, only for normal schedulers */
+ if (thr_prgr_active) {
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 0);
+ sched_wall_time_change(esdp, 0);
+ }
+ flgs = sched_spin_wait(ssi, 0);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ erts_check_io(ssi->psi, timeout_time);
+ current_time = erts_get_monotonic_time(esdp);
+ }
+ }
+ *fcalls = 0;
+ clear_sys_scheduling();
+ } else {
if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
if (thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 0);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 0);
sched_wall_time_change(esdp, 0);
}
- erts_thr_progress_prepare_wait(esdp);
+ erts_thr_progress_prepare_wait(erts_thr_prgr_data(esdp));
}
-
flgs = sched_spin_wait(ssi, spincount);
if (flgs & ERTS_SSI_FLG_SLEEPING) {
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
@@ -3363,7 +3474,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
}
}
if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
- erts_thr_progress_finalize_wait(esdp);
+ erts_thr_progress_finalize_wait(erts_thr_prgr_data(esdp));
}
if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && current_time >= timeout_time)
erts_bump_timers(esdp->timer_wheel, current_time);
@@ -3389,10 +3500,30 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
(ERTS_SSI_FLG_SUSPENDED
| ERTS_SSI_FLG_MSB_EXEC));
- if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
dirty_sched_wall_time_change(esdp, working = 1);
+ erts_spin_lock(&rq->sleepers.lock);
+ ERTS_HDBG_CHK_SLEEP_LIST(&rq->sleepers, 0, ssi->next ? ssi : NULL, NULL);
+ if (ssi->next) { /* Still in list... */
+ if (ssi->next == ssi) {
+ ASSERT(rq->sleepers.list == ssi);
+ ASSERT(ssi->prev == ssi);
+ rq->sleepers.list = NULL;
+ }
+ else {
+ ASSERT(ssi->prev != ssi);
+ if (rq->sleepers.list == ssi)
+ rq->sleepers.list = ssi->next;
+ ssi->prev->next = ssi->next;
+ ssi->next->prev = ssi->prev;
+ }
+ ssi->next = ssi->prev = NULL;
+ }
+ ERTS_HDBG_CHK_SLEEP_LIST(&rq->sleepers, 0, NULL, ssi);
+ erts_spin_unlock(&rq->sleepers.lock);
+ }
else if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
@@ -3478,56 +3609,44 @@ wake_scheduler(ErtsRunQueue *rq)
}
static void
-wake_dirty_schedulers(ErtsRunQueue *rq, int one)
+wake_dirty_scheduler(ErtsRunQueue *rq)
{
- ErtsSchedulerSleepInfo *ssi;
+ ErtsSchedulerSleepInfo *lo_ssi, *fo_ssi;
ErtsSchedulerSleepList *sl;
ASSERT(ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
sl = &rq->sleepers;
erts_spin_lock(&sl->lock);
- ssi = sl->list;
- if (!ssi) {
+ ERTS_HDBG_CHK_SLEEP_LIST(&rq->sleepers, 0, NULL, NULL);
+ lo_ssi = sl->list;
+ if (!lo_ssi) {
erts_spin_unlock(&sl->lock);
- if (one)
- wake_scheduler(rq);
- } else if (one) {
+ wake_scheduler(rq);
+ }
+ else {
erts_aint32_t flgs;
- if (ssi->prev)
- ssi->prev->next = ssi->next;
- else {
- ASSERT(sl->list == ssi);
- sl->list = ssi->next;
+ fo_ssi = lo_ssi->next;
+ ASSERT(fo_ssi->prev == lo_ssi);
+ if (fo_ssi == lo_ssi) {
+ ASSERT(lo_ssi->prev == lo_ssi);
+ sl->list = NULL;
+ }
+ else {
+ ASSERT(lo_ssi->prev != lo_ssi);
+ lo_ssi->next = fo_ssi->next;
+ fo_ssi->next->prev = fo_ssi->prev;
}
- if (ssi->next)
- ssi->next->prev = ssi->prev;
-
- erts_spin_unlock(&sl->lock);
-
- ERTS_THR_MEMORY_BARRIER;
- flgs = ssi_flags_set_wake(ssi);
- erts_sched_finish_poke(ssi, flgs);
- } else {
- sl->list = NULL;
+ fo_ssi->next = fo_ssi->prev = NULL;
+ ERTS_HDBG_CHK_SLEEP_LIST(&rq->sleepers, 0, NULL, fo_ssi);
erts_spin_unlock(&sl->lock);
ERTS_THR_MEMORY_BARRIER;
- do {
- ErtsSchedulerSleepInfo *wake_ssi = ssi;
- ssi = ssi->next;
- erts_sched_finish_poke(wake_ssi, ssi_flags_set_wake(wake_ssi));
- } while (ssi);
+ flgs = ssi_flags_set_wake(fo_ssi);
+ erts_sched_finish_poke(fo_ssi, flgs);
}
}
-static void
-wake_dirty_scheduler(ErtsRunQueue *rq)
-{
- wake_dirty_schedulers(rq, 1);
-}
-
-
#define ERTS_NO_USED_RUNQS_SHIFT 16
#define ERTS_NO_RUNQS_MASK 0xffffU
@@ -5118,7 +5237,6 @@ erts_fprintf(stderr, "--------------------------------\n");
rq->out_of_work_count = 0;
(void) ERTS_RUNQ_FLGS_READ_BSET(rq, ERTS_RUNQ_FLGS_MIGRATION_INFO, flags);
-
rq->max_len = erts_atomic32_read_dirty(&rq->len);
for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
ErtsRunQueueInfo *rqi;
@@ -5557,7 +5675,6 @@ erts_sched_set_busy_wait_threshold(ErtsSchedType sched_type, char *str)
return EINVAL;
}
- params->sys_schedule = sys_sched;
params->tse = sys_sched * ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT;
params->aux_work = sys_sched * aux_work_fact;
@@ -5768,6 +5885,10 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online, int no_poll_th
size_runqs = sizeof(ErtsAlignedRunQueue) * tot_rqs;
erts_aligned_run_queues =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_RUNQS, size_runqs);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ erts_atomic32_init_nob(&doing_sys_schedule, 0);
+ erts_atomic32_init_nob(&function_calls, 0);
+#endif
erts_atomic32_init_nob(&no_empty_run_queues, 0);
erts_no_run_queues = n;
@@ -5882,6 +6003,8 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online, int no_poll_th
for (ix = 0; ix < no_dirty_cpu_schedulers; ix++) {
ErtsSchedulerSleepInfo *ssi = &aligned_dirty_cpu_sched_sleep_info[ix].ssi;
erts_atomic32_init_nob(&ssi->flags, 0);
+ ssi->next = NULL;
+ ssi->prev = NULL;
ssi->event = NULL; /* initialized in sched_dirty_cpu_thread_func */
erts_atomic32_init_nob(&ssi->aux_work, 0);
}
@@ -5892,6 +6015,8 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online, int no_poll_th
for (ix = 0; ix < no_dirty_io_schedulers; ix++) {
ErtsSchedulerSleepInfo *ssi = &aligned_dirty_io_sched_sleep_info[ix].ssi;
erts_atomic32_init_nob(&ssi->flags, 0);
+ ssi->next = NULL;
+ ssi->prev = NULL;
ssi->event = NULL; /* initialized in sched_dirty_io_thread_func */
erts_atomic32_init_nob(&ssi->aux_work, 0);
}
@@ -7411,7 +7536,13 @@ suspend_scheduler(ErtsSchedulerData *esdp)
return;
}
+#ifdef HARDDEBUG
+ if (sched_type != ERTS_SCHED_NORMAL)
+ ERTS_HDBG_CHK_SLEEP_LIST(&esdp->run_queue->sleepers, !0, NULL, ssi);
+#endif
+
if (erts_atomic32_read_nob(&ssi->flags) & ERTS_SSI_FLG_MSB_EXEC) {
+
ASSERT(no == 1);
if (!msb_scheduler_type_switch(sched_type, esdp, no))
return;
@@ -7565,7 +7696,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
if (aux_work|evacuate) {
if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp),
+ thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
if (aux_work)
@@ -7573,8 +7705,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
aux_work,
1);
- if (aux_work && erts_thr_progress_update(esdp))
- erts_thr_progress_leader_update(esdp);
+ if (aux_work && erts_thr_progress_update(erts_thr_prgr_data(esdp)))
+ erts_thr_progress_leader_update(erts_thr_prgr_data(esdp));
if (evacuate) {
erts_runq_lock(esdp->run_queue);
evacuate_run_queue(esdp->run_queue, &sbp);
@@ -7593,18 +7725,18 @@ suspend_scheduler(ErtsSchedulerData *esdp)
if (!aux_work && current_time < timeout_time) {
/* go to sleep... */
if (thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 0);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 0);
sched_wall_time_change(esdp, 0);
}
- erts_thr_progress_prepare_wait(NULL);
+ erts_thr_progress_prepare_wait(erts_thr_prgr_data(NULL));
suspend_normal_scheduler_sleep(esdp);
- erts_thr_progress_finalize_wait(NULL);
+ erts_thr_progress_finalize_wait(erts_thr_prgr_data(NULL));
current_time = erts_get_monotonic_time(esdp);
}
if (current_time >= timeout_time) {
if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
erts_bump_timers(esdp->timer_wheel, current_time);
@@ -7661,7 +7793,7 @@ suspend_scheduler(ErtsSchedulerData *esdp)
profile_scheduler(make_small(esdp->no), am_active);
if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
}
@@ -8296,6 +8428,11 @@ sched_thread_func(void *vesdp)
erts_msacc_init_thread("scheduler", no, 1);
erts_thr_progress_register_managed_thread(esdp, &callbacks, 0);
+
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ esdp->ssi->psi = erts_create_pollset_thread(-1, NULL);
+#endif
+
erts_alloc_register_scheduler(vesdp);
#ifdef ERTS_ENABLE_LOCK_CHECK
{
@@ -9105,7 +9242,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
Process *proxy_p = NULL;
ErtsRunQueue *rq;
int context_reds;
- int fcalls;
+ int fcalls = 0;
int actual_reds;
int reds;
Uint32 flags;
@@ -9179,6 +9316,10 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
reds = ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST;
esdp->virtual_reds = 0;
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ fcalls = (int) erts_atomic32_add_read_acqb(&function_calls, reds);
+#endif
+
ASSERT(esdp && esdp == erts_get_scheduler_data());
rq = erts_get_runq_current(esdp);
@@ -9313,12 +9454,12 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
}
}
- leader_update = erts_thr_progress_update(esdp);
+ leader_update = erts_thr_progress_update(erts_thr_prgr_data(esdp));
aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work);
if (aux_work | leader_update) {
erts_runq_unlock(rq);
if (leader_update)
- erts_thr_progress_leader_update(esdp);
+ erts_thr_progress_leader_update(erts_thr_prgr_data(esdp));
if (aux_work)
handle_aux_work(&esdp->aux_work_data, aux_work, 0);
erts_runq_lock(rq);
@@ -9395,7 +9536,33 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
non_empty_runq(rq);
goto check_activities_to_run;
- }
+ } else if (is_normal_sched &&
+ fcalls > (2 * context_reds) &&
+ prepare_for_sys_schedule()) {
+ ErtsMonotonicTime current_time;
+ /*
+ * Schedule system-level activities.
+ */
+
+ ERTS_MSACC_PUSH_STATE_CACHED_M();
+
+ erts_runq_unlock(rq);
+
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_CHECK_IO);
+ LTTNG2(scheduler_poll, esdp->no, 1);
+
+ erts_check_io(esdp->ssi->psi, ERTS_POLL_NO_TIMEOUT);
+ ERTS_MSACC_POP_STATE_M();
+
+ current_time = erts_get_monotonic_time(esdp);
+ if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref))
+ erts_bump_timers(esdp->timer_wheel, current_time);
+
+ erts_runq_lock(rq);
+ fcalls = 0;
+ clear_sys_scheduling();
+ goto continue_check_activities_to_run;
+ }
if (flags & ERTS_RUNQ_FLG_MISC_OP)
exec_misc_ops(rq);
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 8d20ccdf90..43937f216c 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -174,7 +174,7 @@ extern int erts_dio_sched_thread_suggested_stack_size;
#define ERTS_RUNQ_FLG_HALTING \
(((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 10))
-#define ERTS_RUNQ_FLG_MAX (ERTS_RUNQ_FLG_BASE2 + 11)
+#define ERTS_RUNQ_FLG_MAX (ERTS_RUNQ_FLG_BASE2 + 12)
#define ERTS_RUNQ_FLGS_MIGRATION_QMASKS \
(ERTS_RUNQ_FLGS_EMIGRATE_QMASK \
@@ -357,7 +357,7 @@ typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo;
typedef struct {
erts_spinlock_t lock;
- ErtsSchedulerSleepInfo *list;
+ ErtsSchedulerSleepInfo *list; /* circular lifo list; points to last out */
} ErtsSchedulerSleepList;
struct ErtsSchedulerSleepInfo_ {
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
index 243db4c734..ac5054ea10 100644
--- a/erts/emulator/beam/erl_process_dump.c
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -58,6 +58,7 @@ static void dump_externally(fmtfn_t to, void *to_arg, Eterm term);
static void mark_literal(Eterm* ptr);
static void init_literal_areas(void);
static void dump_literals(fmtfn_t to, void *to_arg);
+static void dump_persistent_terms(fmtfn_t to, void *to_arg);
static void dump_module_literals(fmtfn_t to, void *to_arg,
ErtsLiteralArea* lit_area);
@@ -74,6 +75,7 @@ erts_deep_process_dump(fmtfn_t to, void *to_arg)
all_binaries = NULL;
init_literal_areas();
+ erts_init_persistent_dumping();
for (i = 0; i < max; i++) {
Process *p = erts_pix2proc(i);
@@ -93,6 +95,7 @@ erts_deep_process_dump(fmtfn_t to, void *to_arg)
}
}
+ dump_persistent_terms(to, to_arg);
dump_literals(to, to_arg);
dump_binaries(to, to_arg, all_binaries);
}
@@ -775,6 +778,9 @@ init_literal_areas(void)
qsort(lit_areas, num_lit_areas, sizeof(ErtsLiteralArea *),
compare_areas);
+ qsort(erts_persistent_areas, erts_num_persistent_areas,
+ sizeof(ErtsLiteralArea *), compare_areas);
+
erts_runlock_old_code(code_ix);
}
@@ -796,6 +802,13 @@ static void mark_literal(Eterm* ptr)
ap = bsearch(ptr, lit_areas, num_lit_areas, sizeof(ErtsLiteralArea*),
search_areas);
+ if (ap == 0) {
+ ap = bsearch(ptr, erts_persistent_areas,
+ erts_num_persistent_areas,
+ sizeof(ErtsLiteralArea*),
+ search_areas);
+ }
+
/*
* If the literal was created by native code, this search will not
@@ -807,12 +820,12 @@ static void mark_literal(Eterm* ptr)
}
}
-
static void
dump_literals(fmtfn_t to, void *to_arg)
{
ErtsCodeIndex code_ix;
int i;
+ Uint idx;
code_ix = erts_active_code_ix();
erts_rlock_old_code(code_ix);
@@ -825,6 +838,28 @@ dump_literals(fmtfn_t to, void *to_arg)
}
erts_runlock_old_code(code_ix);
+
+ for (idx = 0; idx < erts_num_persistent_areas; idx++) {
+ dump_module_literals(to, to_arg, erts_persistent_areas[idx]);
+ }
+}
+
+static void
+dump_persistent_terms(fmtfn_t to, void *to_arg)
+{
+ Uint idx;
+
+ erts_print(to, to_arg, "=persistent_terms\n");
+
+ for (idx = 0; idx < erts_num_persistent_areas; idx++) {
+ ErtsLiteralArea* ap = erts_persistent_areas[idx];
+ Eterm tuple = make_tuple(ap->start);
+ Eterm* tup_val = tuple_val(tuple);
+
+ dump_element(to, to_arg, tup_val[1]);
+ erts_putc(to, to_arg, '|');
+ dump_element_nl(to, to_arg, tup_val[2]);
+ }
}
static void
@@ -963,7 +998,8 @@ dump_module_literals(fmtfn_t to, void *to_arg, ErtsLiteralArea* lit_area)
}
erts_putc(to, to_arg, '\n');
}
- } else if (is_export_header(w)) {
+ } else {
+ /* Dump everything else in the external format */
dump_externally(to, to_arg, term);
erts_putc(to, to_arg, '\n');
}
diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.h b/erts/emulator/beam/erl_sched_spec_pre_alloc.h
index b119c59ab3..74cc966cbe 100644
--- a/erts/emulator/beam/erl_sched_spec_pre_alloc.h
+++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.h
@@ -188,6 +188,7 @@ erts_sspa_alloc(erts_sspa_data_t *data, int cix)
erts_sspa_chunk_t *chnk;
erts_sspa_chunk_header_t *chdr;
erts_sspa_blk_t *res;
+ ERTS_MSACC_PUSH_AND_SET_STATE_M_X(ERTS_MSACC_STATE_ALLOC);
chnk = erts_sspa_cix2chunk(data, cix);
chdr = &chnk->aligned.header;
@@ -201,11 +202,15 @@ erts_sspa_alloc(erts_sspa_data_t *data, int cix)
chdr->local.last = NULL;
ERTS_SSPA_DBG_CHK_LCL(chdr);
}
- if (chdr->local.cnt <= chdr->local.lim)
- return (char *) erts_sspa_process_remote_frees(chdr, res);
+ if (chdr->local.cnt <= chdr->local.lim) {
+ res = erts_sspa_process_remote_frees(chdr, res);
+ ERTS_MSACC_POP_STATE_M_X();
+ return (char*) res;
+ }
else if (chdr->head.no_thr_progress_check < ERTS_SSPA_FORCE_THR_CHECK_PROGRESS)
chdr->head.no_thr_progress_check++;
ASSERT(res);
+ ERTS_MSACC_POP_STATE_M_X();
return (char *) res;
}
diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c
index aa08eb40ec..bac437efe9 100644
--- a/erts/emulator/beam/erl_thr_progress.c
+++ b/erts/emulator/beam/erl_thr_progress.c
@@ -508,6 +508,10 @@ init_wakeup_request_array(ErtsThrPrgrVal *w)
}
}
+ErtsThrPrgrData *erts_thr_progress_data(void) {
+ return erts_tsd_get(erts_thr_prgr_data_key__);
+}
+
void
erts_thr_progress_register_unmanaged_thread(ErtsThrPrgrCallbacks *callbacks)
{
@@ -551,7 +555,7 @@ erts_thr_progress_register_unmanaged_thread(ErtsThrPrgrCallbacks *callbacks)
}
-void
+ErtsThrPrgrData *
erts_thr_progress_register_managed_thread(ErtsSchedulerData *esdp,
ErtsThrPrgrCallbacks *callbacks,
int pref_wakeup)
@@ -630,6 +634,7 @@ erts_thr_progress_register_managed_thread(ErtsSchedulerData *esdp,
wakeup_managed(id);
}
callbacks->finalize_wait(callbacks->arg);
+ return tpd;
}
static ERTS_INLINE int
@@ -796,7 +801,7 @@ leader_update(ErtsThrPrgrData *tpd)
== ERTS_THR_PRGR_LFLG_NO_LEADER))
&& got_sched_wakeups()) {
/* Someone need to make progress */
- wakeup_managed(0);
+ wakeup_managed(tpd->id);
}
}
}
@@ -849,23 +854,22 @@ update(ErtsThrPrgrData *tpd)
}
int
-erts_thr_progress_update(ErtsSchedulerData *esdp)
+erts_thr_progress_update(ErtsThrPrgrData *tpd)
{
- return update(thr_prgr_data(esdp));
+ return update(tpd);
}
int
-erts_thr_progress_leader_update(ErtsSchedulerData *esdp)
+erts_thr_progress_leader_update(ErtsThrPrgrData *tpd)
{
- return leader_update(thr_prgr_data(esdp));
+ return leader_update(tpd);
}
void
-erts_thr_progress_prepare_wait(ErtsSchedulerData *esdp)
+erts_thr_progress_prepare_wait(ErtsThrPrgrData *tpd)
{
erts_aint32_t lflgs;
- ErtsThrPrgrData *tpd = thr_prgr_data(esdp);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_check_exact(NULL, 0);
@@ -884,14 +888,13 @@ erts_thr_progress_prepare_wait(ErtsSchedulerData *esdp)
== ERTS_THR_PRGR_LFLG_NO_LEADER
&& got_sched_wakeups()) {
/* Someone need to make progress */
- wakeup_managed(0);
+ wakeup_managed(tpd->id);
}
}
void
-erts_thr_progress_finalize_wait(ErtsSchedulerData *esdp)
+erts_thr_progress_finalize_wait(ErtsThrPrgrData *tpd)
{
- ErtsThrPrgrData *tpd = thr_prgr_data(esdp);
ErtsThrPrgrVal current, val;
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -921,9 +924,8 @@ erts_thr_progress_finalize_wait(ErtsSchedulerData *esdp)
}
void
-erts_thr_progress_active(ErtsSchedulerData *esdp, int on)
+erts_thr_progress_active(ErtsThrPrgrData *tpd, int on)
{
- ErtsThrPrgrData *tpd = thr_prgr_data(esdp);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_check_exact(NULL, 0);
@@ -973,7 +975,7 @@ unmanaged_continue(ErtsThrPrgrDelayHandle handle)
== (ERTS_THR_PRGR_LFLG_NO_LEADER|ERTS_THR_PRGR_LFLG_WAITING_UM)
&& got_sched_wakeups()) {
/* Others waiting for us... */
- wakeup_managed(0);
+ wakeup_managed(1);
}
}
}
@@ -1182,10 +1184,10 @@ request_wakeup_unmanaged(ErtsThrPrgrData *tpd, ErtsThrPrgrVal value)
}
void
-erts_thr_progress_wakeup(ErtsSchedulerData *esdp,
+erts_thr_progress_wakeup(ErtsThrPrgrData *tpd,
ErtsThrPrgrVal value)
{
- ErtsThrPrgrData *tpd = thr_prgr_data(esdp);
+
ASSERT(!tpd->is_temporary);
if (tpd->is_managed)
request_wakeup_managed(tpd, value);
diff --git a/erts/emulator/beam/erl_thr_progress.h b/erts/emulator/beam/erl_thr_progress.h
index 8329995b24..00a9e61407 100644
--- a/erts/emulator/beam/erl_thr_progress.h
+++ b/erts/emulator/beam/erl_thr_progress.h
@@ -123,22 +123,24 @@ extern ErtsThrPrgr erts_thr_prgr__;
void erts_thr_progress_pre_init(void);
void erts_thr_progress_init(int no_schedulers, int managed, int unmanaged);
-void erts_thr_progress_register_managed_thread(ErtsSchedulerData *esdp,
- ErtsThrPrgrCallbacks *,
- int);
+ErtsThrPrgrData *erts_thr_progress_register_managed_thread(
+ ErtsSchedulerData *esdp, ErtsThrPrgrCallbacks *, int);
void erts_thr_progress_register_unmanaged_thread(ErtsThrPrgrCallbacks *);
-void erts_thr_progress_active(ErtsSchedulerData *esdp, int on);
-void erts_thr_progress_wakeup(ErtsSchedulerData *esdp,
+void erts_thr_progress_active(ErtsThrPrgrData *, int on);
+void erts_thr_progress_wakeup(ErtsThrPrgrData *,
ErtsThrPrgrVal value);
-int erts_thr_progress_update(ErtsSchedulerData *esdp);
-int erts_thr_progress_leader_update(ErtsSchedulerData *esdp);
-void erts_thr_progress_prepare_wait(ErtsSchedulerData *esdp);
-void erts_thr_progress_finalize_wait(ErtsSchedulerData *esdp);
+int erts_thr_progress_update(ErtsThrPrgrData *);
+int erts_thr_progress_leader_update(ErtsThrPrgrData *);
+void erts_thr_progress_prepare_wait(ErtsThrPrgrData *);
+void erts_thr_progress_finalize_wait(ErtsThrPrgrData *);
ErtsThrPrgrDelayHandle erts_thr_progress_unmanaged_delay__(void);
void erts_thr_progress_unmanaged_continue__(int umrefc_ix);
+ErtsThrPrgrData *erts_thr_progress_data(void);
void erts_thr_progress_dbg_print_state(void);
+ERTS_GLB_INLINE ErtsThrPrgrData *erts_thr_prgr_data(ErtsSchedulerData *esdp);
+
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_nob__(ERTS_THR_PRGR_ATOMIC *atmc);
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_acqb__(ERTS_THR_PRGR_ATOMIC *atmc);
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_mb__(ERTS_THR_PRGR_ATOMIC *atmc);
@@ -161,6 +163,15 @@ ERTS_GLB_INLINE int erts_thr_progress_has_reached(ErtsThrPrgrVal val);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE ErtsThrPrgrData *
+erts_thr_prgr_data(ErtsSchedulerData *esdp) {
+ if (esdp) {
+ return &esdp->thr_progress_data;
+ } else {
+ return erts_thr_progress_data();
+ }
+}
+
ERTS_GLB_INLINE ErtsThrPrgrVal
erts_thr_prgr_read_nob__(ERTS_THR_PRGR_ATOMIC *atmc)
{
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 53a020e7a5..ae7084b7f4 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -72,6 +72,7 @@ static ErtsTracer default_port_tracer;
static Eterm system_monitor;
static Eterm system_profile;
+static erts_atomic_t system_logger;
#ifdef HAVE_ERTS_NOW_CPU
int erts_cpu_timestamp;
@@ -340,6 +341,7 @@ void erts_init_trace(void) {
default_port_trace_flags = F_INITIAL_TRACE_FLAGS;
default_port_tracer = erts_tracer_nil;
system_seq_tracer = erts_tracer_nil;
+ erts_atomic_init_nob(&system_logger, am_logger);
init_sys_msg_dispatcher();
init_tracer_nif();
}
@@ -2027,10 +2029,24 @@ enqueue_sys_msg(enum ErtsSysMsgType type,
erts_mtx_unlock(&smq_mtx);
}
+Eterm
+erts_get_system_logger(void)
+{
+ return (Eterm)erts_atomic_read_nob(&system_logger);
+}
+
+Eterm
+erts_set_system_logger(Eterm logger)
+{
+ if (logger != am_logger && logger != am_undefined && !is_internal_pid(logger))
+ return THE_NON_VALUE;
+ return (Eterm)erts_atomic_xchg_nob(&system_logger, logger);
+}
+
void
erts_queue_error_logger_message(Eterm from, Eterm msg, ErlHeapFragment *bp)
{
- enqueue_sys_msg(SYS_MSG_TYPE_ERRLGR, from, am_logger, msg, bp);
+ enqueue_sys_msg(SYS_MSG_TYPE_ERRLGR, from, erts_get_system_logger(), msg, bp);
}
void
@@ -2177,6 +2193,7 @@ sys_msg_dispatcher_func(void *unused)
{
ErtsThrPrgrCallbacks callbacks;
ErtsSysMsgQ *local_sys_message_queue = NULL;
+ ErtsThrPrgrData *tpd;
int wait = 0;
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -2189,7 +2206,7 @@ sys_msg_dispatcher_func(void *unused)
callbacks.wait = sys_msg_dispatcher_wait;
callbacks.finalize_wait = sys_msg_dispatcher_fin_wait;
- erts_thr_progress_register_managed_thread(NULL, &callbacks, 0);
+ tpd = erts_thr_progress_register_managed_thread(NULL, &callbacks, 0);
while (1) {
int end_wait = 0;
@@ -2210,8 +2227,8 @@ sys_msg_dispatcher_func(void *unused)
if (!sys_message_queue) {
erts_mtx_unlock(&smq_mtx);
end_wait = 1;
- erts_thr_progress_active(NULL, 0);
- erts_thr_progress_prepare_wait(NULL);
+ erts_thr_progress_active(tpd, 0);
+ erts_thr_progress_prepare_wait(tpd);
erts_mtx_lock(&smq_mtx);
}
@@ -2225,8 +2242,8 @@ sys_msg_dispatcher_func(void *unused)
erts_mtx_unlock(&smq_mtx);
if (end_wait) {
- erts_thr_progress_finalize_wait(NULL);
- erts_thr_progress_active(NULL, 1);
+ erts_thr_progress_finalize_wait(tpd);
+ erts_thr_progress_active(tpd, 1);
}
/* Send trace messages ... */
@@ -2239,8 +2256,8 @@ sys_msg_dispatcher_func(void *unused)
Process *proc = NULL;
Port *port = NULL;
- if (erts_thr_progress_update(NULL))
- erts_thr_progress_leader_update(NULL);
+ if (erts_thr_progress_update(tpd))
+ erts_thr_progress_leader_update(tpd);
#ifdef DEBUG_PRINTOUTS
print_msg_type(smqp);
@@ -2270,7 +2287,7 @@ sys_msg_dispatcher_func(void *unused)
}
break;
case SYS_MSG_TYPE_ERRLGR:
- receiver = am_logger;
+ receiver = smqp->to;
break;
default:
receiver = NIL;
@@ -2284,8 +2301,15 @@ sys_msg_dispatcher_func(void *unused)
if (is_internal_pid(receiver)) {
proc = erts_pid2proc(NULL, 0, receiver, proc_locks);
if (!proc) {
- /* Bad tracer */
- goto failure;
+ if (smqp->type == SYS_MSG_TYPE_ERRLGR) {
+ /* Bad logger process, send to kernel 'logger' process */
+ erts_set_system_logger(am_logger);
+ receiver = erts_get_system_logger();
+ goto logger;
+ } else {
+ /* Bad tracer */
+ goto failure;
+ }
}
else {
ErtsMessage *mp;
@@ -2298,9 +2322,9 @@ sys_msg_dispatcher_func(void *unused)
#endif
erts_proc_unlock(proc, proc_locks);
}
- }
- else if (receiver == am_logger) {
- proc = erts_whereis_process(NULL,0,receiver,proc_locks,0);
+ } else if (receiver == am_logger) {
+ logger:
+ proc = erts_whereis_process(NULL,0,am_logger,proc_locks,0);
if (!proc)
goto failure;
else if (smqp->from == proc->common.id)
@@ -2308,7 +2332,10 @@ sys_msg_dispatcher_func(void *unused)
else
goto queue_proc_msg;
}
- else if (is_internal_port(receiver)) {
+ else if (receiver == am_undefined) {
+ goto drop_sys_msg;
+ }
+ else if (is_internal_port(receiver)) {
port = erts_thr_id2port_sflgs(receiver,
ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
if (!port)
@@ -2365,7 +2392,7 @@ erts_foreach_sys_msg_in_q(void (*func)(Eterm,
to = erts_get_system_profile();
break;
case SYS_MSG_TYPE_ERRLGR:
- to = am_logger;
+ to = erts_get_system_logger();
break;
default:
to = NIL;
diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h
index bccf31606e..b7844d1cb0 100644
--- a/erts/emulator/beam/erl_trace.h
+++ b/erts/emulator/beam/erl_trace.h
@@ -94,6 +94,8 @@ void erts_foreach_sys_msg_in_q(void (*func)(Eterm,
Eterm,
Eterm,
ErlHeapFragment *));
+Eterm erts_set_system_logger(Eterm);
+Eterm erts_get_system_logger(void);
void erts_queue_error_logger_message(Eterm, Eterm, ErlHeapFragment *);
void erts_send_sys_msg_proc(Eterm, Eterm, Eterm, ErlHeapFragment *);
diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h
index b3bfa69052..880febba8b 100644
--- a/erts/emulator/beam/erl_utils.h
+++ b/erts/emulator/beam/erl_utils.h
@@ -22,6 +22,7 @@
#define ERL_UTILS_H__
#include "sys.h"
+#include "atom.h"
#include "erl_printf.h"
struct process;
@@ -112,10 +113,12 @@ int eq(Eterm, Eterm);
#define EQ(x,y) (((x) == (y)) || (is_not_both_immed((x),(y)) && eq((x),(y))))
-int erts_cmp_atoms(Eterm a, Eterm b);
-Sint erts_cmp(Eterm, Eterm, int, int);
-Sint erts_cmp_compound(Eterm, Eterm, int, int);
+ERTS_GLB_INLINE Sint erts_cmp(Eterm, Eterm, int, int);
+ERTS_GLB_INLINE int erts_cmp_atoms(Eterm a, Eterm b);
+
Sint cmp(Eterm a, Eterm b);
+Sint erts_cmp_compound(Eterm, Eterm, int, int);
+
#define CMP(A,B) erts_cmp(A,B,0,0)
#define CMP_TERM(A,B) erts_cmp(A,B,1,0)
#define CMP_EQ_ONLY(A,B) erts_cmp(A,B,0,1)
@@ -150,4 +153,56 @@ Sint cmp(Eterm a, Eterm b);
if (erts_cmp_compound(X,Y,0,EqOnly) Op 0) { Action; }; \
}
+#define erts_float_comp(x,y) (((x)<(y)) ? -1 : (((x)==(y)) ? 0 : 1))
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE int erts_cmp_atoms(Eterm a, Eterm b) {
+ Atom *aa = atom_tab(atom_val(a));
+ Atom *bb = atom_tab(atom_val(b));
+
+ byte *name_a, *name_b;
+ int len_a, len_b, diff;
+
+ diff = aa->ord0 - bb->ord0;
+
+ if (diff != 0) {
+ return diff;
+ }
+
+ name_a = &aa->name[3];
+ name_b = &bb->name[3];
+ len_a = aa->len-3;
+ len_b = bb->len-3;
+
+ if (len_a > 0 && len_b > 0) {
+ diff = sys_memcmp(name_a, name_b, MIN(len_a, len_b));
+
+ if (diff != 0) {
+ return diff;
+ }
+ }
+
+ return len_a - len_b;
+}
+
+ERTS_GLB_INLINE Sint erts_cmp(Eterm a, Eterm b, int exact, int eq_only) {
+ if (is_atom(a) && is_atom(b)) {
+ return erts_cmp_atoms(a, b);
+ } else if (is_both_small(a, b)) {
+ return (signed_val(a) - signed_val(b));
+ } else if (is_float(a) && is_float(b)) {
+ FloatDef af, bf;
+
+ GET_DOUBLE(a, af);
+ GET_DOUBLE(b, bf);
+
+ return erts_float_comp(af.fd, bf.fd);
+ }
+
+ return erts_cmp_compound(a,b,exact,eq_only);
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
#endif
diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h
index 4089fac48e..d37c2940c4 100644
--- a/erts/emulator/beam/erl_vm.h
+++ b/erts/emulator/beam/erl_vm.h
@@ -167,6 +167,8 @@ extern const int num_instructions; /* Number of instruction in opc[]. */
extern Uint erts_instr_count[];
+extern int tuple_module_apply;
+
/* some constants for various table sizes etc */
#define ATOM_TEXT_SIZE 32768 /* Increment for allocating atom text space */
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index 621ba108ba..1ded5f031c 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -102,7 +102,7 @@ static byte* enc_term(ErtsAtomCacheMap *, Eterm, byte*, Uint32, struct erl_off_h
struct TTBEncodeContext_;
static int enc_term_int(struct TTBEncodeContext_*,ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags,
struct erl_off_heap_header** off_heap, Sint *reds, byte **res);
-static Uint is_external_string(Eterm obj, int* p_is_string);
+static int is_external_string(Eterm obj, Uint* lenp);
static byte* enc_atom(ErtsAtomCacheMap *, Eterm, byte*, Uint32);
static byte* enc_pid(ErtsAtomCacheMap *, Eterm, byte*, Uint32);
struct B2TContext_t;
@@ -1953,7 +1953,8 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
#define RETURN_STATE() \
do { \
- hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE+3); \
+ static const int TUPLE2_SIZE = 2 + 1; \
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE + TUPLE2_SIZE); \
c_term = erts_mk_magic_ref(&hp, &MSO(p), context_b); \
res = TUPLE2(hp, Term, c_term); \
BUMP_ALL_REDS(p); \
@@ -2480,11 +2481,21 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
{
Eterm* cons = list_val(obj);
Eterm tl;
+ Uint len_cnt = WSTACK_POP(s);
obj = CAR(cons);
tl = CDR(cons);
- WSTACK_PUSH2(s, (is_list(tl) ? ENC_ONE_CONS : ENC_TERM),
- tl);
+ if (is_list(tl)) {
+ len_cnt++;
+ WSTACK_PUSH3(s, len_cnt, ENC_ONE_CONS, tl);
+ }
+ else {
+ byte* list_lenp = (byte*) WSTACK_POP(s);
+ ASSERT(list_lenp[-1] == LIST_EXT);
+ put_int32(len_cnt, list_lenp);
+
+ WSTACK_PUSH2(s, ENC_TERM, tl);
+ }
}
break;
case ENC_PATCH_FUN_SIZE:
@@ -2688,10 +2699,7 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
}
case LIST_DEF:
{
- int is_str;
-
- i = is_external_string(obj, &is_str);
- if (is_str) {
+ if (is_external_string(obj, &i)) {
*ep++ = STRING_EXT;
put_int16(i, ep);
ep += 2;
@@ -2700,9 +2708,12 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
*ep++ = unsigned_val(CAR(cons));
obj = CDR(cons);
}
+ r -= i;
} else {
+ r -= i/2;
*ep++ = LIST_EXT;
- put_int32(i, ep);
+ /* Patch list length when we find end of list */
+ WSTACK_PUSH2(s, (UWord)ep, 1);
ep += 4;
goto encode_one_cons;
}
@@ -2960,9 +2971,13 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
return 0;
}
+/** @brief Is it a list of bytes not longer than MAX_STRING_LEN?
+ * @param lenp out: string length or number of list cells traversed
+ * @return true/false
+ */
static
-Uint
-is_external_string(Eterm list, int* p_is_string)
+int
+is_external_string(Eterm list, Uint* lenp)
{
Uint len = 0;
@@ -2974,29 +2989,15 @@ is_external_string(Eterm list, int* p_is_string)
Eterm* consp = list_val(list);
Eterm hd = CAR(consp);
- if (!is_byte(hd)) {
- break;
+ if (!is_byte(hd) || ++len > MAX_STRING_LEN) {
+ *lenp = len;
+ return 0;
}
- len++;
list = CDR(consp);
}
- /*
- * If we have reached the end of the list, and we have
- * not exceeded the maximum length of a string, this
- * is a string.
- */
- *p_is_string = is_nil(list) && len < MAX_STRING_LEN;
-
- /*
- * Continue to calculate the length.
- */
- while (is_list(list)) {
- Eterm* consp = list_val(list);
- len++;
- list = CDR(consp);
- }
- return len;
+ *lenp = len;
+ return is_nil(list);
}
@@ -4074,8 +4075,8 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
for (;;) {
ASSERT(!is_header(obj));
- if (ctx && --r == 0) {
- *reds = r;
+ if (ctx && --r <= 0) {
+ *reds = 0;
ctx->obj = obj;
ctx->result = result;
WSTACK_SAVE(s, &ctx->wstack);
@@ -4165,8 +4166,10 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
result += (1 + encode_size_struct2(acmp, port_node_name(obj), dflags) +
4 + 1);
break;
- case LIST_DEF:
- if ((m = is_string(obj)) && (m < MAX_STRING_LEN)) {
+ case LIST_DEF: {
+ int is_str = is_external_string(obj, &m);
+ r -= m/2;
+ if (is_str) {
result += m + 2 + 1;
} else {
result += 5;
@@ -4175,6 +4178,7 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
continue; /* big loop */
}
break;
+ }
case TUPLE_DEF:
{
Eterm* ptr = tuple_val(obj);
@@ -4316,7 +4320,7 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
if (is_header(obj)) {
switch (obj) {
- case LIST_TAIL_OP:
+ case LIST_TAIL_OP:
obj = (Eterm) WSTACK_POP(s);
if (is_list(obj)) {
Eterm* cons = list_val(obj);
@@ -4342,7 +4346,7 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
WSTACK_DESTROY(s);
if (ctx) {
ASSERT(ctx->wstack.wstart == NULL);
- *reds = r;
+ *reds = r < 0 ? 0 : r;
}
*res = result;
return 0;
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 21ae205237..f564472081 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -113,6 +113,9 @@ extern Eterm erts_bld_resource_ref(Eterm** hp, ErlOffHeap*, ErtsResource*);
extern void erts_pre_nif(struct enif_environment_t*, Process*,
struct erl_module_nif*, Process* tracee);
extern void erts_post_nif(struct enif_environment_t* env);
+#ifdef DEBUG
+int erts_dbg_is_resource_dying(ErtsResource*);
+#endif
extern void erts_resource_stop(ErtsResource*, ErlNifEvent, int is_direct_call);
void erts_fire_nif_monitor(ErtsMonitor *tmon);
void erts_nif_demonitored(ErtsResource* resource);
@@ -906,6 +909,8 @@ typedef struct ErtsLiteralArea_ {
Eterm start[1]; /* beginning of area */
} ErtsLiteralArea;
+void erts_queue_release_literals(Process *c_p, ErtsLiteralArea* literals);
+
#define ERTS_LITERAL_AREA_ALLOC_SIZE(N) \
(sizeof(ErtsLiteralArea) + sizeof(Eterm)*((N) - 1))
@@ -960,7 +965,7 @@ void init_break_handler(void);
void erts_set_ignore_break(void);
void erts_replace_intr(void);
void process_info(fmtfn_t, void *);
-void print_process_info(fmtfn_t, void *, Process*);
+void print_process_info(fmtfn_t, void *, Process*, ErtsProcLocks);
void info(fmtfn_t, void *);
void loaded(fmtfn_t, void *);
void erts_print_base64(fmtfn_t to, void *to_arg, byte* src, Uint size);
@@ -1001,6 +1006,7 @@ typedef struct {
Uint literal_size;
Eterm *lit_purge_ptr;
Uint lit_purge_sz;
+ int copy_literals;
} erts_shcopy_t;
#define INITIALIZE_SHCOPY(info) \
@@ -1010,6 +1016,7 @@ typedef struct {
info.bitstore_start = info.bitstore_default; \
info.shtable_start = info.shtable_default; \
info.literal_size = 0; \
+ info.copy_literals = 0; \
if (larea__) { \
info.lit_purge_ptr = &larea__->start[0]; \
info.lit_purge_sz = larea__->end - info.lit_purge_ptr; \
@@ -1238,6 +1245,13 @@ Sint erts_re_set_loop_limit(Sint limit);
void erts_init_bif_binary(void);
Sint erts_binary_set_loop_limit(Sint limit);
+/* erl_bif_persistent.c */
+void erts_init_bif_persistent_term(void);
+Uint erts_persistent_term_count(void);
+void erts_init_persistent_dumping(void);
+extern ErtsLiteralArea** erts_persistent_areas;
+extern Uint erts_num_persistent_areas;
+
/* external.c */
void erts_init_external(void);
@@ -1292,14 +1306,7 @@ Sint intlist_to_buf(Eterm, char*, Sint); /* most callers pass plain char*'s */
int erts_unicode_list_to_buf(Eterm list, byte *buf, Sint len, Sint* written);
Sint erts_unicode_list_to_buf_len(Eterm list);
-struct Sint_buf {
-#if defined(ARCH_64)
- char s[22];
-#else
- char s[12];
-#endif
-};
-char* Sint_to_buf(Sint, struct Sint_buf*);
+int Sint_to_buf(Sint num, int base, char **buf_p, size_t buf_size);
#define ERTS_IOLIST_STATE_INITER(C_P, OBJ) \
{(C_P), 0, 0, (OBJ), {NULL, NULL, NULL, ERTS_ALC_T_INVALID}, 0, 0}
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 5325480901..7322239a73 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -4073,7 +4073,7 @@ done:
* to the caller.
*/
int
-erl_drv_port_control(Eterm port_num, char cmd, char* buff, ErlDrvSizeT size)
+erl_drv_port_control(Eterm port_num, unsigned int cmd, char* buff, ErlDrvSizeT size)
{
ErtsProc2PortSigData *sigdp = erts_port_task_alloc_p2p_sig_data();
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index bb22548587..a69da4d762 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -325,6 +325,7 @@ typedef long Sint erts_align_attribute(sizeof(long));
#define UWORD_CONSTANT(Const) Const##UL
#define ERTS_UWORD_MAX ULONG_MAX
#define ERTS_SWORD_MAX LONG_MAX
+#define ERTS_SWORD_MIN LONG_MIN
#define ERTS_SIZEOF_ETERM SIZEOF_LONG
#define ErtsStrToSint strtol
#elif SIZEOF_VOID_P == SIZEOF_INT
@@ -335,6 +336,7 @@ typedef int Sint erts_align_attribute(sizeof(int));
#define UWORD_CONSTANT(Const) Const##U
#define ERTS_UWORD_MAX UINT_MAX
#define ERTS_SWORD_MAX INT_MAX
+#define ERTS_SWORD_MIN INT_MIN
#define ERTS_SIZEOF_ETERM SIZEOF_INT
#define ErtsStrToSint strtol
#elif SIZEOF_VOID_P == SIZEOF_LONG_LONG
@@ -345,6 +347,7 @@ typedef long long Sint erts_align_attribute(sizeof(long long));
#define UWORD_CONSTANT(Const) Const##ULL
#define ERTS_UWORD_MAX ULLONG_MAX
#define ERTS_SWORD_MAX LLONG_MAX
+#define ERTS_SWORD_MIN LLONG_MIN
#define ERTS_SIZEOF_ETERM SIZEOF_LONG_LONG
#if defined(__WIN32__)
#define ErtsStrToSint _strtoi64
@@ -1288,4 +1291,13 @@ erts_raw_env_next_char(byte *p, int encoding)
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+/*
+ * Magic numbers for our driver port_control callbacks.
+ * Kept them below 1<<27 to not inflict extra bignum garbage on 32-bit.
+ */
+#define ERTS_TTYSL_DRV_CONTROL_MAGIC_NUMBER 0x018b0900U
+#define ERTS_INET_DRV_CONTROL_MAGIC_NUMBER 0x03f1a300U
+#define ERTS_SPAWN_DRV_CONTROL_MAGIC_NUMBER 0x04c76a00U
+#define ERTS_FORKER_DRV_CONTROL_MAGIC_NUMBER 0x050a7800U
+
#endif
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 08f8ca9788..c5deed38ad 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -1946,7 +1946,7 @@ do_allocate_logger_message(Eterm gleader, ErtsMonotonicTime *ts, Eterm *pid,
else
sz += MAP4_SZ /* metadata map w gl w pid*/;
- *ts = ERTS_MONOTONIC_TO_USEC(erts_get_monotonic_time(NULL) + erts_get_time_offset());
+ *ts = ERTS_MONOTONIC_TO_USEC(erts_os_system_time());
erts_bld_sint64(NULL, &sz, *ts);
*bp = new_message_buffer(sz);
@@ -2615,27 +2615,6 @@ not_equal:
}
-/*
- * Lexically compare two strings of bytes (string s1 length l1 and s2 l2).
- *
- * s1 < s2 return -1
- * s1 = s2 return 0
- * s1 > s2 return +1
- */
-static int cmpbytes(byte *s1, int l1, byte *s2, int l2)
-{
- int i;
- i = 0;
- while((i < l1) && (i < l2)) {
- if (s1[i] < s2[i]) return(-1);
- if (s1[i] > s2[i]) return(1);
- i++;
- }
- if (l1 < l2) return(-1);
- if (l1 > l2) return(1);
- return(0);
-}
-
/*
* Compare objects.
@@ -2649,20 +2628,6 @@ static int cmpbytes(byte *s1, int l1, byte *s2, int l2)
*
*/
-
-#define float_comp(x,y) (((x)<(y)) ? -1 : (((x)==(y)) ? 0 : 1))
-
-int erts_cmp_atoms(Eterm a, Eterm b)
-{
- Atom *aa = atom_tab(atom_val(a));
- Atom *bb = atom_tab(atom_val(b));
- int diff = aa->ord0 - bb->ord0;
- if (diff)
- return diff;
- return cmpbytes(aa->name+3, aa->len-3,
- bb->name+3, bb->len-3);
-}
-
/* cmp(Eterm a, Eterm b)
* For compatibility with HiPE - arith-based compare.
*/
@@ -2673,22 +2638,6 @@ Sint cmp(Eterm a, Eterm b)
Sint erts_cmp_compound(Eterm a, Eterm b, int exact, int eq_only);
-Sint erts_cmp(Eterm a, Eterm b, int exact, int eq_only)
-{
- if (is_atom(a) && is_atom(b)) {
- return erts_cmp_atoms(a, b);
- } else if (is_both_small(a, b)) {
- return (signed_val(a) - signed_val(b));
- } else if (is_float(a) && is_float(b)) {
- FloatDef af, bf;
- GET_DOUBLE(a, af);
- GET_DOUBLE(b, bf);
- return float_comp(af.fd, bf.fd);
- }
- return erts_cmp_compound(a,b,exact,eq_only);
-}
-
-
/* erts_cmp(Eterm a, Eterm b, int exact)
* exact = 1 -> term-based compare
* exact = 0 -> arith-based compare
@@ -2985,7 +2934,7 @@ tailrecur_ne:
GET_DOUBLE(a, af);
GET_DOUBLE(b, bf);
- ON_CMP_GOTO(float_comp(af.fd, bf.fd));
+ ON_CMP_GOTO(erts_float_comp(af.fd, bf.fd));
}
case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
@@ -3022,10 +2971,7 @@ tailrecur_ne:
ErlFunThing* f2 = (ErlFunThing *) fun_val(b);
Sint diff;
- diff = cmpbytes(atom_tab(atom_val(f1->fe->module))->name,
- atom_tab(atom_val(f1->fe->module))->len,
- atom_tab(atom_val(f2->fe->module))->name,
- atom_tab(atom_val(f2->fe->module))->len);
+ diff = erts_cmp_atoms((f1->fe)->module, (f2->fe)->module);
if (diff != 0) {
RETURN_NEQ(diff);
}
@@ -3219,7 +3165,7 @@ tailrecur_ne:
if (f2.fd < MAX_LOSSLESS_FLOAT && f2.fd > MIN_LOSSLESS_FLOAT) {
/* Float is within the no loss limit */
f1.fd = signed_val(aw);
- j = float_comp(f1.fd, f2.fd);
+ j = erts_float_comp(f1.fd, f2.fd);
}
#if ERTS_SIZEOF_ETERM == 8
else if (f2.fd > (double) (MAX_SMALL + 1)) {
@@ -3266,7 +3212,7 @@ tailrecur_ne:
if (big_to_double(aw, &f1.fd) < 0) {
j = big_sign(aw) ? -1 : 1;
} else {
- j = float_comp(f1.fd, f2.fd);
+ j = erts_float_comp(f1.fd, f2.fd);
}
} else {
big = double_to_big(f2.fd, big_buf, sizeof(big_buf)/sizeof(Eterm));
@@ -3282,7 +3228,7 @@ tailrecur_ne:
if (f1.fd < MAX_LOSSLESS_FLOAT && f1.fd > MIN_LOSSLESS_FLOAT) {
/* Float is within the no loss limit */
f2.fd = signed_val(bw);
- j = float_comp(f1.fd, f2.fd);
+ j = erts_float_comp(f1.fd, f2.fd);
}
#if ERTS_SIZEOF_ETERM == 8
else if (f1.fd > (double) (MAX_SMALL + 1)) {
@@ -3735,30 +3681,47 @@ erts_unicode_list_to_buf_len(Eterm list)
}
}
-/*
-** Convert an integer to a byte list
-** return pointer to converted stuff (need not to be at start of buf!)
-*/
-char* Sint_to_buf(Sint n, struct Sint_buf *buf)
+/* Prints an integer in the given base, returning the number of digits printed.
+ *
+ * (*buf) is a pointer to the buffer, and is set to the start of the string
+ * when returning. */
+int Sint_to_buf(Sint n, int base, char **buf, size_t buf_size)
{
- char* p = &buf->s[sizeof(buf->s)-1];
- int sign = 0;
-
- *p-- = '\0'; /* null terminate */
- if (n == 0)
- *p-- = '0';
- else if (n < 0) {
- sign = 1;
- n = -n;
+ char *p = &(*buf)[buf_size - 1];
+ int sign = 0, size = 0;
+
+ ASSERT(base >= 2 && base <= 36);
+
+ if (n == 0) {
+ *p-- = '0';
+ size++;
+ } else if (n < 0) {
+ sign = 1;
+ n = -n;
}
while (n != 0) {
- *p-- = (n % 10) + '0';
- n /= 10;
+ int digit = n % base;
+
+ if (digit < 10) {
+ *p-- = '0' + digit;
+ } else {
+ *p-- = 'A' + (digit - 10);
+ }
+
+ size++;
+
+ n /= base;
}
- if (sign)
- *p-- = '-';
- return p+1;
+
+ if (sign) {
+ *p-- = '-';
+ size++;
+ }
+
+ *buf = p + 1;
+
+ return size;
}
/* Build a list of integers in some safe memory area
@@ -4825,58 +4788,3 @@ erts_ptr_id(void *ptr)
return ptr;
}
-#ifdef DEBUG
-/*
- * Handy functions when using a debugger - don't use in the code!
- */
-
-void upp(byte *buf, size_t sz)
-{
- bin_write(ERTS_PRINT_STDERR, NULL, buf, sz);
-}
-
-void pat(Eterm atom)
-{
- upp(atom_tab(atom_val(atom))->name,
- atom_tab(atom_val(atom))->len);
-}
-
-
-void pinfo()
-{
- process_info(ERTS_PRINT_STDOUT, NULL);
-}
-
-
-void pp(p)
-Process *p;
-{
- if(p)
- print_process_info(ERTS_PRINT_STDERR, NULL, p);
-}
-
-void ppi(Eterm pid)
-{
- pp(erts_proc_lookup(pid));
-}
-
-void td(Eterm x)
-{
- erts_fprintf(stderr, "%T\n", x);
-}
-
-void
-ps(Process* p, Eterm* stop)
-{
- Eterm* sp = STACK_START(p) - 1;
-
- if (stop <= STACK_END(p)) {
- stop = STACK_END(p) + 1;
- }
-
- while(sp >= stop) {
- erts_printf("%p: %.75T\n", sp, *sp);
- sp--;
- }
-}
-#endif