aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/atom.c2
-rw-r--r--erts/emulator/beam/atom.names4
-rw-r--r--erts/emulator/beam/beam_bif_load.c59
-rw-r--r--erts/emulator/beam/beam_emu.c1
-rw-r--r--erts/emulator/beam/bif.c4
-rw-r--r--erts/emulator/beam/bif.tab27
-rw-r--r--erts/emulator/beam/big.c19
-rw-r--r--erts/emulator/beam/copy.c10
-rw-r--r--erts/emulator/beam/erl_alloc.c3
-rw-r--r--erts/emulator/beam/erl_alloc.types6
-rw-r--r--erts/emulator/beam/erl_alloc_util.c4
-rw-r--r--erts/emulator/beam/erl_async.c2
-rw-r--r--erts/emulator/beam/erl_bif_atomics.c256
-rw-r--r--erts/emulator/beam/erl_bif_counters.c255
-rw-r--r--erts/emulator/beam/erl_bif_lists.c838
-rw-r--r--erts/emulator/beam/erl_bif_persistent.c983
-rw-r--r--erts/emulator/beam/erl_bif_unique.h6
-rw-r--r--erts/emulator/beam/erl_db_tree.c12
-rw-r--r--erts/emulator/beam/erl_dirty_bif.tab2
-rw-r--r--erts/emulator/beam/erl_gc.c33
-rw-r--r--erts/emulator/beam/erl_hl_timer.c26
-rw-r--r--erts/emulator/beam/erl_init.c5
-rw-r--r--erts/emulator/beam/erl_lock_check.c2
-rw-r--r--erts/emulator/beam/erl_monitor_link.h15
-rw-r--r--erts/emulator/beam/erl_node_tables.c21
-rw-r--r--erts/emulator/beam/erl_port.h2
-rw-r--r--erts/emulator/beam/erl_port_task.c81
-rw-r--r--erts/emulator/beam/erl_port_task.h21
-rw-r--r--erts/emulator/beam/erl_process.c200
-rw-r--r--erts/emulator/beam/erl_process.h4
-rw-r--r--erts/emulator/beam/erl_process_dump.c40
-rw-r--r--erts/emulator/beam/erl_sched_spec_pre_alloc.h9
-rw-r--r--erts/emulator/beam/erl_thr_progress.c34
-rw-r--r--erts/emulator/beam/erl_thr_progress.h29
-rw-r--r--erts/emulator/beam/erl_trace.c15
-rw-r--r--erts/emulator/beam/erl_utils.h61
-rw-r--r--erts/emulator/beam/external.c3
-rw-r--r--erts/emulator/beam/global.h11
-rw-r--r--erts/emulator/beam/sys.h3
-rw-r--r--erts/emulator/beam/utils.c64
40 files changed, 2835 insertions, 337 deletions
diff --git a/erts/emulator/beam/atom.c b/erts/emulator/beam/atom.c
index 5381611fab..59b51fd15e 100644
--- a/erts/emulator/beam/atom.c
+++ b/erts/emulator/beam/atom.c
@@ -174,7 +174,7 @@ atom_alloc(Atom* tmpl)
/*
* Precompute ordinal value of first 3 bytes + 7 bits.
- * This is used by utils.c:erts_cmp_atoms().
+ * This is used by erl_utils.h:erts_cmp_atoms().
* We cannot use the full 32 bits of the first 4 bytes,
* since we use the sign of the difference between two
* ordinal values to represent their relative order.
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index fdce4f7b95..291bc95604 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -182,6 +182,7 @@ atom control
atom copy
atom copy_literals
atom counters
+atom count
atom cpu
atom cpu_timestamp
atom cr
@@ -287,6 +288,7 @@ atom gc_minor_end
atom gc_minor_start
atom Ge='>='
atom generational
+atom get_all_trap
atom get_seq_token
atom get_tcw
atom gather_gc_info_result
@@ -325,6 +327,7 @@ atom index
atom infinity
atom info
atom info_msg
+atom info_trap
atom init
atom initial_call
atom input
@@ -393,6 +396,7 @@ atom microsecond
atom microstate_accounting
atom milli_seconds
atom millisecond
+atom min
atom min_heap_size
atom min_bin_vheap_size
atom minor
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index d221e6aea6..bb1b2e5b27 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -1752,29 +1752,7 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
finalize_purge_operation(BIF_P, ret == am_true);
if (literals) {
- ErtsLiteralAreaRef *ref;
- ErtsMessage *mp;
- ref = erts_alloc(ERTS_ALC_T_LITERAL_REF,
- sizeof(ErtsLiteralAreaRef));
- ref->literal_area = literals;
- ref->next = NULL;
- erts_mtx_lock(&release_literal_areas.mtx);
- if (release_literal_areas.last) {
- release_literal_areas.last->next = ref;
- release_literal_areas.last = ref;
- }
- else {
- release_literal_areas.first = ref;
- release_literal_areas.last = ref;
- }
- erts_mtx_unlock(&release_literal_areas.mtx);
- mp = erts_alloc_message(0, NULL);
- ERL_MESSAGE_TOKEN(mp) = am_undefined;
- erts_queue_proc_message(BIF_P,
- erts_literal_area_collector,
- 0,
- mp,
- am_copy_literals);
+ erts_queue_release_literals(BIF_P, literals);
}
return ret;
@@ -1786,6 +1764,41 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
}
}
+void
+erts_queue_release_literals(Process* c_p, ErtsLiteralArea* literals)
+{
+ ErtsLiteralAreaRef *ref;
+ ErtsMessage *mp;
+ ref = erts_alloc(ERTS_ALC_T_LITERAL_REF,
+ sizeof(ErtsLiteralAreaRef));
+ ref->literal_area = literals;
+ ref->next = NULL;
+ erts_mtx_lock(&release_literal_areas.mtx);
+ if (release_literal_areas.last) {
+ release_literal_areas.last->next = ref;
+ release_literal_areas.last = ref;
+ } else {
+ release_literal_areas.first = ref;
+ release_literal_areas.last = ref;
+ }
+ erts_mtx_unlock(&release_literal_areas.mtx);
+ mp = erts_alloc_message(0, NULL);
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ if (c_p == NULL) {
+ erts_queue_message(erts_literal_area_collector,
+ 0,
+ mp,
+ am_copy_literals,
+ am_system);
+ } else {
+ erts_queue_proc_message(c_p,
+ erts_literal_area_collector,
+ 0,
+ mp,
+ am_copy_literals);
+ }
+}
+
/*
* Move code from current to old and null all export entries for the module
*/
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index ab5920a67e..e909a0b4da 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -579,6 +579,7 @@ init_emulator(void)
* the instructions' C labels to the loader.
* The second call starts execution of BEAM code. This call never returns.
*/
+ERTS_NO_RETPOLINE
void process_main(Eterm * x_reg_array, FloatDef* f_reg_array)
{
static int init_done = 0;
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 79ed5a0c18..04498332b0 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -3622,6 +3622,10 @@ erts_internal_garbage_collect_1(BIF_ALIST_1)
default: BIF_ERROR(BIF_P, BADARG);
}
erts_garbage_collect(BIF_P, 0, NULL, 0);
+ if (ERTS_PROC_IS_EXITING(BIF_P)) {
+ /* The max heap size limit was reached. */
+ return THE_NON_VALUE;
+ }
return am_true;
}
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index 7548924178..d4ba90a61a 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -40,6 +40,7 @@
# Note: Guards BIFs usually require special support in the compiler.
#
+
gcbif erlang:abs/1
bif erlang:adler32/1
bif erlang:adler32/2
@@ -698,3 +699,29 @@ ubif erlang:map_get/2
ubif erlang:is_map_key/2
bif ets:internal_delete_all/2
bif ets:internal_select_delete/2
+
+#
+# New in 21.2
+#
+
+bif persistent_term:put/2
+bif persistent_term:get/1
+bif persistent_term:get/0
+bif persistent_term:erase/1
+bif persistent_term:info/0
+bif erts_internal:erase_persistent_terms/0
+
+bif erts_internal:atomics_new/2
+bif atomics:get/2
+bif atomics:put/3
+bif atomics:add/3
+bif atomics:add_get/3
+bif atomics:exchange/3
+bif atomics:compare_exchange/4
+bif atomics:info/1
+
+bif erts_internal:counters_new/1
+bif erts_internal:counters_get/2
+bif erts_internal:counters_add/3
+bif erts_internal:counters_put/3
+bif erts_internal:counters_info/1
diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c
index 84338769e0..7e0be2a2a7 100644
--- a/erts/emulator/beam/big.c
+++ b/erts/emulator/beam/big.c
@@ -668,27 +668,25 @@ static dsize_t I_mul(ErtsDigit* x, dsize_t xl, ErtsDigit* y, dsize_t yl, ErtsDig
static dsize_t I_sqr(ErtsDigit* x, dsize_t xl, ErtsDigit* r)
{
- ErtsDigit d_next = *x;
ErtsDigit d;
ErtsDigit* r0 = r;
ErtsDigit* s = r;
if ((r + xl) == x) /* "Inline" operation */
*x = 0;
- x++;
while(xl--) {
- ErtsDigit* y = x;
+ ErtsDigit* y;
ErtsDigit y_0 = 0, y_1 = 0, y_2 = 0, y_3 = 0;
ErtsDigit b0, b1;
ErtsDigit z0, z1, z2;
ErtsDigit t;
dsize_t y_l = xl;
-
+
+ d = *x;
+ x++;
+ y = x;
s = r;
- d = d_next;
- d_next = *x;
- x++;
DMUL(d, d, b1, b0);
DSUMc(*s, b0, y_3, t);
@@ -1159,8 +1157,11 @@ static dsize_t I_band(ErtsDigit* x, dsize_t xl, short xsgn,
*r++ = ~c1 & ~c2;
x++; y++;
}
- while(xl--)
- *r++ = ~*x++;
+ while(xl--) {
+ DSUBb(*x,0,b1,c1);
+ *r++ = ~c1;
+ x++;
+ }
}
}
return I_btrail(r0, r, sign);
diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c
index e7bfd04b73..e7bd046e18 100644
--- a/erts/emulator/beam/copy.c
+++ b/erts/emulator/beam/copy.c
@@ -1074,6 +1074,7 @@ Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info)
Eterm* ptr;
Eterm *lit_purge_ptr = info->lit_purge_ptr;
Uint lit_purge_sz = info->lit_purge_sz;
+ int copy_literals = info->copy_literals;
#ifdef DEBUG
Eterm mypid = erts_get_current_pid();
#endif
@@ -1119,7 +1120,7 @@ Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info)
/* off heap list pointers are copied verbatim */
if (erts_is_literal(obj,ptr)) {
VERBOSE(DEBUG_SHCOPY, ("[pid=%T] bypassed copying %p is %T\n", mypid, ptr, obj));
- if (in_literal_purge_area(ptr))
+ if (copy_literals || in_literal_purge_area(ptr))
info->literal_size += size_object(obj);
goto pop_next;
}
@@ -1170,7 +1171,7 @@ Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info)
/* off heap pointers to boxes are copied verbatim */
if (erts_is_literal(obj,ptr)) {
VERBOSE(DEBUG_SHCOPY, ("[pid=%T] bypassed copying %p is %T\n", mypid, ptr, obj));
- if (in_literal_purge_area(ptr))
+ if (copy_literals || in_literal_purge_area(ptr))
info->literal_size += size_object(obj);
goto pop_next;
}
@@ -1338,6 +1339,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
unsigned remaining;
Eterm *lit_purge_ptr = info->lit_purge_ptr;
Uint lit_purge_sz = info->lit_purge_sz;
+ int copy_literals = info->copy_literals;
#ifdef DEBUG
Eterm mypid = erts_get_current_pid();
Eterm saved_obj = obj;
@@ -1387,7 +1389,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
ptr = list_val(obj);
/* off heap list pointers are copied verbatim */
if (erts_is_literal(obj,ptr)) {
- if (!in_literal_purge_area(ptr)) {
+ if (!(copy_literals || in_literal_purge_area(ptr))) {
*resp = obj;
} else {
Uint bsz = 0;
@@ -1455,7 +1457,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
ptr = boxed_val(obj);
/* off heap pointers to boxes are copied verbatim */
if (erts_is_literal(obj,ptr)) {
- if (!in_literal_purge_area(ptr)) {
+ if (!(copy_literals || in_literal_purge_area(ptr))) {
*resp = obj;
} else {
Uint bsz = 0;
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 8fe1ccb758..9e36d5e0d1 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -4030,6 +4030,9 @@ debug_free(ErtsAlcType_t n, void *extra, void *ptr)
ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
+ if (!ptr)
+ return;
+
dptr = check_memory_fence(ptr, &size, n, ERTS_ALC_O_FREE);
#ifdef ERTS_ALC_A_EXEC
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 5409b89bab..4f03a34390 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -274,9 +274,13 @@ type ML_YIELD_STATE SHORT_LIVED SYSTEM monitor_link_yield_state
type ML_DIST STANDARD SYSTEM monitor_link_dist
type PF3_ARGS SHORT_LIVED PROCESSES process_flag_3_arguments
type SETUP_CONN_ARG SHORT_LIVED PROCESSES setup_connection_argument
+type LIST_TRAP SHORT_LIVED PROCESSES list_bif_trap_state
type ENVIRONMENT SYSTEM SYSTEM environment
+type PERSISTENT_TERM LONG_LIVED CODE persisten_term
+type PERSISTENT_LOCK_Q SHORT_LIVED SYSTEM persistent_lock_q
+
#
# Types used for special emulators
#
@@ -334,6 +338,8 @@ type GC_INFO_REQ SHORT_LIVED SYSTEM gc_info_request
type PORT_DATA_HEAP STANDARD SYSTEM port_data_heap
type MSACC DRIVER SYSTEM microstate_accounting
type SYS_CHECK_REQ SHORT_LIVED SYSTEM system_check_request
+type ATOMICS STANDARD SYSTEM erl_bif_atomics
+type COUNTERS STANDARD SYSTEM erl_bif_counters
#
# Types used by system specific code
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index a5740a08cf..0be4562785 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -834,6 +834,8 @@ static ERTS_INLINE void clr_bit(UWord* map, Uint ix)
&= ~((UWord)1 << (ix % ERTS_VSPACE_WORD_BITS));
}
+#ifdef DEBUG
+
static ERTS_INLINE int is_bit_set(UWord* map, Uint ix)
{
ASSERT(ix / ERTS_VSPACE_WORD_BITS < VSPACE_MAP_SZ);
@@ -841,6 +843,8 @@ static ERTS_INLINE int is_bit_set(UWord* map, Uint ix)
& ((UWord)1 << (ix % ERTS_VSPACE_WORD_BITS));
}
+#endif
+
UWord erts_literal_vspace_map[VSPACE_MAP_SZ];
static void set_literal_range(void* start, Uint size)
diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c
index 605a2b3461..44655ad5df 100644
--- a/erts/emulator/beam/erl_async.c
+++ b/erts/emulator/beam/erl_async.c
@@ -336,7 +336,7 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
case ERTS_THR_Q_NEED_THR_PRGR:
{
ErtsThrPrgrVal prgr = erts_thr_q_need_thr_progress(q);
- erts_thr_progress_wakeup(NULL, prgr);
+ erts_thr_progress_wakeup(erts_thr_prgr_data(NULL), prgr);
/*
* We do no dequeue finalizing in hope that a new async
* job will arrive before we are woken due to thread
diff --git a/erts/emulator/beam/erl_bif_atomics.c b/erts/emulator/beam/erl_bif_atomics.c
new file mode 100644
index 0000000000..029831bd95
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_atomics.c
@@ -0,0 +1,256 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2018. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Purpose: High performance atomics.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <stddef.h> /* offsetof */
+
+#include "sys.h"
+#include "export.h"
+#include "bif.h"
+#include "erl_threads.h"
+#include "big.h"
+#include "erl_binary.h"
+#include "erl_bif_unique.h"
+#include "erl_map.h"
+
+typedef struct
+{
+ int is_signed;
+ UWord vlen;
+ erts_atomic64_t v[1];
+}AtomicsRef;
+
+static int atomics_destructor(Binary *unused)
+{
+ return 1;
+}
+
+#define OPT_SIGNED (1 << 0)
+
+BIF_RETTYPE erts_internal_atomics_new_2(BIF_ALIST_2)
+{
+ AtomicsRef* p;
+ Binary* mbin;
+ UWord i, cnt, opts;
+ Uint bytes;
+ Eterm* hp;
+
+ if (!term_to_UWord(BIF_ARG_1, &cnt)
+ || cnt == 0
+ || !term_to_UWord(BIF_ARG_2, &opts)) {
+
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (cnt > (ERTS_UWORD_MAX / sizeof(p->v[0])))
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+
+ bytes = offsetof(AtomicsRef, v) + cnt*sizeof(p->v[0]);
+ mbin = erts_create_magic_binary_x(bytes,
+ atomics_destructor,
+ ERTS_ALC_T_ATOMICS,
+ 0);
+ p = ERTS_MAGIC_BIN_DATA(mbin);
+ p->is_signed = opts & OPT_SIGNED;
+ p->vlen = cnt;
+ for (i=0; i < cnt; i++)
+ erts_atomic64_init_nob(&p->v[i], 0);
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
+ return erts_mk_magic_ref(&hp, &MSO(BIF_P), mbin);
+}
+
+static ERTS_INLINE int get_ref(Eterm ref, AtomicsRef** pp)
+{
+ Binary* mbin;
+ if (!is_internal_magic_ref(ref))
+ return 0;
+
+ mbin = erts_magic_ref2bin(ref);
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(mbin) != atomics_destructor)
+ return 0;
+ *pp = ERTS_MAGIC_BIN_DATA(mbin);
+ return 1;
+}
+
+static ERTS_INLINE int get_ref_ix(Eterm ref, Eterm ix,
+ AtomicsRef** pp, UWord* ixp)
+{
+ return (get_ref(ref, pp)
+ && term_to_UWord(ix, ixp)
+ && --(*ixp) < (*pp)->vlen);
+}
+
+static ERTS_INLINE int get_value(AtomicsRef* p, Eterm term, erts_aint64_t *valp)
+{
+ return (p->is_signed ?
+ term_to_Sint64(term, (Sint64*)valp) :
+ term_to_Uint64(term, (Uint64*)valp));
+}
+
+static ERTS_INLINE int get_incr(AtomicsRef* p, Eterm term, erts_aint64_t *valp)
+{
+ return (term_to_Sint64(term, (Sint64*)valp)
+ || term_to_Uint64(term, (Uint64*)valp));
+}
+
+static ERTS_INLINE Eterm bld_atomic(Process* proc, AtomicsRef* p,
+ erts_aint64_t val)
+{
+ if (p->is_signed) {
+ if (IS_SSMALL(val))
+ return make_small((Sint) val);
+ else {
+ Uint hsz = ERTS_SINT64_HEAP_SIZE(val);
+ Eterm* hp = HAlloc(proc, hsz);
+ return erts_sint64_to_big(val, &hp);
+ }
+ }
+ else {
+ if ((Uint64)val <= MAX_SMALL)
+ return make_small((Sint) val);
+ else {
+ Uint hsz = ERTS_UINT64_HEAP_SIZE((Uint64)val);
+ Eterm* hp = HAlloc(proc, hsz);
+ return erts_uint64_to_big(val, &hp);
+ }
+ }
+}
+
+BIF_RETTYPE atomics_put_3(BIF_ALIST_3)
+{
+ AtomicsRef* p;
+ UWord ix;
+ erts_aint64_t val;
+
+ if (!get_ref_ix(BIF_ARG_1, BIF_ARG_2, &p, &ix)
+ || !get_value(p, BIF_ARG_3, &val)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ erts_atomic64_set_mb(&p->v[ix], val);
+ return am_ok;
+}
+
+BIF_RETTYPE atomics_get_2(BIF_ALIST_2)
+{
+ AtomicsRef* p;
+ UWord ix;
+
+ if (!get_ref_ix(BIF_ARG_1, BIF_ARG_2, &p, &ix)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ return bld_atomic(BIF_P, p, erts_atomic64_read_mb(&p->v[ix]));
+}
+
+BIF_RETTYPE atomics_add_3(BIF_ALIST_3)
+{
+ AtomicsRef* p;
+ UWord ix;
+ erts_aint64_t incr;
+
+ if (!get_ref_ix(BIF_ARG_1, BIF_ARG_2, &p, &ix)
+ || !get_incr(p, BIF_ARG_3, &incr)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ erts_atomic64_add_mb(&p->v[ix], incr);
+ return am_ok;
+}
+
+BIF_RETTYPE atomics_add_get_3(BIF_ALIST_3)
+{
+ AtomicsRef* p;
+ UWord ix;
+ erts_aint64_t incr;
+
+ if (!get_ref_ix(BIF_ARG_1, BIF_ARG_2, &p, &ix)
+ || !get_incr(p, BIF_ARG_3, &incr)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ return bld_atomic(BIF_P, p, erts_atomic64_add_read_mb(&p->v[ix], incr));
+}
+
+BIF_RETTYPE atomics_exchange_3(BIF_ALIST_3)
+{
+ AtomicsRef* p;
+ UWord ix;
+ erts_aint64_t desired, was;
+
+ if (!get_ref_ix(BIF_ARG_1, BIF_ARG_2, &p, &ix)
+ || !get_value(p, BIF_ARG_3, &desired)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ was = erts_atomic64_xchg_mb(&p->v[ix], desired);
+ return bld_atomic(BIF_P, p, was);
+}
+
+BIF_RETTYPE atomics_compare_exchange_4(BIF_ALIST_4)
+{
+ AtomicsRef* p;
+ UWord ix;
+ erts_aint64_t expected, desired, was;
+
+ if (!get_ref_ix(BIF_ARG_1, BIF_ARG_2, &p, &ix)
+ || !get_value(p, BIF_ARG_3, &expected)
+ || !get_value(p, BIF_ARG_4, &desired)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ was = erts_atomic64_cmpxchg_mb(&p->v[ix], desired, expected);
+ return was == expected ? am_ok : bld_atomic(BIF_P, p, was);
+}
+
+BIF_RETTYPE atomics_info_1(BIF_ALIST_1)
+{
+ AtomicsRef* p;
+ Uint hsz = MAP4_SZ;
+ Eterm *hp;
+ Uint64 max;
+ Sint64 min;
+ UWord memory;
+ Eterm max_val, min_val, sz_val, mem_val;
+
+ if (!get_ref(BIF_ARG_1, &p))
+ BIF_ERROR(BIF_P, BADARG);
+
+ max = p->is_signed ? ERTS_SINT64_MAX : ERTS_UINT64_MAX;
+ min = p->is_signed ? ERTS_SINT64_MIN : 0;
+ memory = erts_magic_ref2bin(BIF_ARG_1)->orig_size;
+
+ erts_bld_uint64(NULL, &hsz, max);
+ erts_bld_sint64(NULL, &hsz, min);
+ erts_bld_uword(NULL, &hsz, p->vlen);
+ erts_bld_uword(NULL, &hsz, memory);
+
+ hp = HAlloc(BIF_P, hsz);
+ max_val = erts_bld_uint64(&hp, NULL, max);
+ min_val = erts_bld_sint64(&hp, NULL, min);
+ sz_val = erts_bld_uword(&hp, NULL, p->vlen);
+ mem_val = erts_bld_uword(&hp, NULL, memory);
+
+ return MAP4(hp, am_max, max_val,
+ am_memory, mem_val,
+ am_min, min_val,
+ am_size, sz_val);
+}
diff --git a/erts/emulator/beam/erl_bif_counters.c b/erts/emulator/beam/erl_bif_counters.c
new file mode 100644
index 0000000000..7c8884ba32
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_counters.c
@@ -0,0 +1,255 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2018. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Purpose: The implementation for 'counters' with 'write_concurrency'.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <stddef.h> /* offsetof */
+
+#include "sys.h"
+#include "export.h"
+#include "bif.h"
+#include "erl_threads.h"
+#include "big.h"
+#include "erl_binary.h"
+#include "erl_bif_unique.h"
+#include "erl_map.h"
+
+/*
+ * Each logical counter consists of one 64-bit atomic instance per scheduler
+ * plus one instance for the "base value".
+ *
+ * get() reads all atomics for the counter and returns the sum.
+ * add() reads and writes only its own scheduler specific atomic instance.
+ * put() reads all scheduler specific atomics and writes a new base value.
+ */
+#define ATOMICS_PER_COUNTER (erts_no_schedulers + 1)
+
+#define ATOMICS_PER_CACHE_LINE (ERTS_CACHE_LINE_SIZE / sizeof(erts_atomic64_t))
+
+typedef struct
+{
+ UWord arity;
+#ifdef DEBUG
+ UWord ulen;
+#endif
+ union {
+ erts_atomic64_t v[ATOMICS_PER_CACHE_LINE];
+ byte cache_line__[ERTS_CACHE_LINE_SIZE];
+ } u[1];
+}CountersRef;
+
+static int counters_destructor(Binary *mbin)
+{
+ return 1;
+}
+
+
+static UWord ERTS_INLINE div_ceil(UWord dividend, UWord divisor)
+{
+ return (dividend + divisor - 1) / divisor;
+}
+
+BIF_RETTYPE erts_internal_counters_new_1(BIF_ALIST_1)
+{
+ CountersRef* p;
+ Binary* mbin;
+ UWord ui, vi, cnt;
+ Uint bytes, cache_lines;
+ Eterm* hp;
+
+ if (!term_to_UWord(BIF_ARG_1, &cnt)
+ || cnt == 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (cnt > (ERTS_UWORD_MAX / (sizeof(erts_atomic64_t)*2*ATOMICS_PER_COUNTER)))
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+
+ cache_lines = ATOMICS_PER_COUNTER * div_ceil(cnt, ATOMICS_PER_CACHE_LINE);
+ bytes = offsetof(CountersRef, u) + cache_lines * ERTS_CACHE_LINE_SIZE;
+ mbin = erts_create_magic_binary_x(bytes,
+ counters_destructor,
+ ERTS_ALC_T_ATOMICS,
+ 0);
+ p = ERTS_MAGIC_BIN_DATA(mbin);
+ p->arity = cnt;
+
+#ifdef DEBUG
+ p->ulen = cache_lines;
+#endif
+ ASSERT((byte*)&p->u[cache_lines] <= ((byte*)p + bytes));
+ for (ui=0; ui < cache_lines; ui++)
+ for (vi=0; vi < ATOMICS_PER_CACHE_LINE; vi++)
+ erts_atomic64_init_nob(&p->u[ui].v[vi], 0);
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
+ return erts_mk_magic_ref(&hp, &MSO(BIF_P), mbin);
+}
+
+static ERTS_INLINE int get_ref(Eterm ref, CountersRef** pp)
+{
+ Binary* mbin;
+ if (!is_internal_magic_ref(ref))
+ return 0;
+
+ mbin = erts_magic_ref2bin(ref);
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(mbin) != counters_destructor)
+ return 0;
+ *pp = ERTS_MAGIC_BIN_DATA(mbin);
+ return 1;
+}
+
+static ERTS_INLINE int get_ref_cnt(Eterm ref, Eterm index,
+ CountersRef** pp,
+ erts_atomic64_t** app,
+ UWord sched_ix)
+{
+ CountersRef* p;
+ UWord ix, ui, vi;
+ if (!get_ref(ref, &p) || !term_to_UWord(index, &ix) || --ix >= p->arity)
+ return 0;
+ ui = (ix / ATOMICS_PER_CACHE_LINE) * ATOMICS_PER_COUNTER + sched_ix;
+ vi = ix % ATOMICS_PER_CACHE_LINE;
+ ASSERT(ui < p->ulen);
+ *pp = p;
+ *app = &p->u[ui].v[vi];
+ return 1;
+}
+
+static ERTS_INLINE int get_ref_my_cnt(Eterm ref, Eterm index,
+ CountersRef** pp,
+ erts_atomic64_t** app)
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ASSERT(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp));
+ ASSERT(esdp->no > 0 && esdp->no < ATOMICS_PER_COUNTER);
+ return get_ref_cnt(ref, index, pp, app, esdp->no);
+}
+
+static ERTS_INLINE int get_ref_first_cnt(Eterm ref, Eterm index,
+ CountersRef** pp,
+ erts_atomic64_t** app)
+{
+ return get_ref_cnt(ref, index, pp, app, 0);
+}
+
+static ERTS_INLINE int get_incr(CountersRef* p, Eterm term, erts_aint64_t *valp)
+{
+ return (term_to_Sint64(term, (Sint64*)valp)
+ || term_to_Uint64(term, (Uint64*)valp));
+}
+
+static ERTS_INLINE Eterm bld_counter(Process* proc, CountersRef* p,
+ erts_aint64_t val)
+{
+ if (IS_SSMALL(val))
+ return make_small((Sint) val);
+ else {
+ Uint hsz = ERTS_SINT64_HEAP_SIZE(val);
+ Eterm* hp = HAlloc(proc, hsz);
+ return erts_sint64_to_big(val, &hp);
+ }
+}
+
+BIF_RETTYPE erts_internal_counters_get_2(BIF_ALIST_2)
+{
+ CountersRef* p;
+ erts_atomic64_t* ap;
+ erts_aint64_t acc = 0;
+ int j;
+
+ if (!get_ref_first_cnt(BIF_ARG_1, BIF_ARG_2, &p, &ap)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ for (j = ATOMICS_PER_COUNTER; j ; --j) {
+ acc += erts_atomic64_read_nob(ap);
+ ap = (erts_atomic64_t*) ((byte*)ap + ERTS_CACHE_LINE_SIZE);
+ }
+ return bld_counter(BIF_P, p, acc);
+}
+
+BIF_RETTYPE erts_internal_counters_add_3(BIF_ALIST_3)
+{
+ CountersRef* p;
+ erts_atomic64_t* ap;
+ erts_aint64_t incr, sum;
+
+ if (!get_ref_my_cnt(BIF_ARG_1, BIF_ARG_2, &p, &ap)
+ || !get_incr(p, BIF_ARG_3, &incr)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ sum = incr + erts_atomic64_read_nob(ap);
+ erts_atomic64_set_nob(ap, sum);
+ return am_ok;
+}
+
+BIF_RETTYPE erts_internal_counters_put_3(BIF_ALIST_3)
+{
+ CountersRef* p;
+ erts_atomic64_t* first_ap;
+ erts_atomic64_t* ap;
+ erts_aint64_t acc;
+ erts_aint64_t val;
+ int j;
+
+ if (!get_ref_first_cnt(BIF_ARG_1, BIF_ARG_2, &p, &first_ap)
+ || !term_to_Sint64(BIF_ARG_3, &val)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ ap = first_ap;
+ acc = 0;
+ j = ATOMICS_PER_COUNTER - 1;
+ do {
+ ap = (erts_atomic64_t*) ((byte*)ap + ERTS_CACHE_LINE_SIZE);
+ acc += erts_atomic64_read_nob(ap);
+ } while (--j);
+ erts_atomic64_set_nob(first_ap, val-acc);
+
+ return am_ok;
+}
+
+BIF_RETTYPE erts_internal_counters_info_1(BIF_ALIST_1)
+{
+ CountersRef* p;
+ Uint hsz = MAP2_SZ;
+ Eterm *hp;
+ UWord memory;
+ Eterm sz_val, mem_val;
+
+ if (!get_ref(BIF_ARG_1, &p))
+ BIF_ERROR(BIF_P, BADARG);
+
+ memory = erts_magic_ref2bin(BIF_ARG_1)->orig_size;
+ erts_bld_uword(NULL, &hsz, p->arity);
+ erts_bld_uword(NULL, &hsz, memory);
+
+ hp = HAlloc(BIF_P, hsz);
+ sz_val = erts_bld_uword(&hp, NULL, p->arity);
+ mem_val = erts_bld_uword(&hp, NULL, memory);
+
+ return MAP2(hp, am_memory, mem_val,
+ am_size, sz_val);
+}
diff --git a/erts/emulator/beam/erl_bif_lists.c b/erts/emulator/beam/erl_bif_lists.c
index 395be67a90..aaf262780f 100644
--- a/erts/emulator/beam/erl_bif_lists.c
+++ b/erts/emulator/beam/erl_bif_lists.c
@@ -29,12 +29,13 @@
#include "sys.h"
#include "erl_vm.h"
#include "global.h"
-#include "erl_process.h"
-#include "error.h"
#include "bif.h"
+#include "erl_binary.h"
+
static Eterm keyfind(int Bif, Process* p, Eterm Key, Eterm Pos, Eterm List);
+
static BIF_RETTYPE append(Process* p, Eterm A, Eterm B)
{
Eterm list;
@@ -146,110 +147,732 @@ BIF_RETTYPE append_2(BIF_ALIST_2)
return append(BIF_P, BIF_ARG_1, BIF_ARG_2);
}
-/*
- * erlang:'--'/2
- */
+/* erlang:'--'/2
+ *
+ * Subtracts a list from another (LHS -- RHS), removing the first occurrence of
+ * each element in LHS from RHS. There is no type coercion so the elements must
+ * match exactly.
+ *
+ * The BIF is broken into several stages that can all trap individually, and it
+ * chooses its algorithm based on input size. If either input is small it will
+ * use a linear scan tuned to which side it's on, and if both inputs are large
+ * enough it will convert RHS into a multiset to provide good asymptotic
+ * behavior. */
+
+#define SUBTRACT_LHS_THRESHOLD 16
+#define SUBTRACT_RHS_THRESHOLD 16
+
+typedef enum {
+ SUBTRACT_STAGE_START,
+ SUBTRACT_STAGE_LEN_LHS,
+
+ /* Naive linear scan that's efficient when
+ * LEN_LHS <= SUBTRACT_LHS_THRESHOLD. */
+ SUBTRACT_STAGE_NAIVE_LHS,
+
+ SUBTRACT_STAGE_LEN_RHS,
+
+ /* As SUBTRACT_STAGE_NAIVE_LHS but for RHS. */
+ SUBTRACT_STAGE_NAIVE_RHS,
+
+ /* Creates a multiset from RHS for faster lookups before sweeping through
+ * LHS. The set is implemented as a red-black tree and duplicate elements
+ * are handled by a counter on each node. */
+ SUBTRACT_STAGE_SET_BUILD,
+ SUBTRACT_STAGE_SET_FINISH
+} ErtsSubtractCtxStage;
+
+typedef struct subtract_node__ {
+ struct subtract_node__ *parent;
+ struct subtract_node__ *left;
+ struct subtract_node__ *right;
+ int is_red;
+
+ Eterm key;
+ Uint count;
+} subtract_tree_t;
+
+typedef struct {
+ ErtsSubtractCtxStage stage;
+
+ Eterm lhs_original;
+ Eterm rhs_original;
+
+ Uint lhs_remaining;
+ Uint rhs_remaining;
+
+ Eterm iterator;
+
+ Eterm *result_cdr;
+ Eterm result;
+
+ union {
+ Eterm lhs_elements[SUBTRACT_LHS_THRESHOLD];
+ Eterm rhs_elements[SUBTRACT_RHS_THRESHOLD];
+
+ struct {
+ subtract_tree_t *tree;
+
+ /* A memory area for the tree's nodes, saving us the need to have
+ * one allocation per node. */
+ subtract_tree_t *alloc_start;
+ subtract_tree_t *alloc;
+ } rhs_set;
+ } u;
+} ErtsSubtractContext;
+
+#define ERTS_RBT_PREFIX subtract
+#define ERTS_RBT_T subtract_tree_t
+#define ERTS_RBT_KEY_T Eterm
+#define ERTS_RBT_FLAGS_T int
+#define ERTS_RBT_INIT_EMPTY_TNODE(T) \
+ do { \
+ (T)->parent = NULL; \
+ (T)->left = NULL; \
+ (T)->right = NULL; \
+ } while(0)
+#define ERTS_RBT_IS_RED(T) ((T)->is_red)
+#define ERTS_RBT_SET_RED(T) ((T)->is_red = 1)
+#define ERTS_RBT_IS_BLACK(T) (!ERTS_RBT_IS_RED(T))
+#define ERTS_RBT_SET_BLACK(T) ((T)->is_red = 0)
+#define ERTS_RBT_GET_FLAGS(T) ((T)->is_red)
+#define ERTS_RBT_SET_FLAGS(T, F) ((T)->is_red = F)
+#define ERTS_RBT_GET_PARENT(T) ((T)->parent)
+#define ERTS_RBT_SET_PARENT(T, P) ((T)->parent = P)
+#define ERTS_RBT_GET_RIGHT(T) ((T)->right)
+#define ERTS_RBT_SET_RIGHT(T, R) ((T)->right = (R))
+#define ERTS_RBT_GET_LEFT(T) ((T)->left)
+#define ERTS_RBT_SET_LEFT(T, L) ((T)->left = (L))
+#define ERTS_RBT_GET_KEY(T) ((T)->key)
+#define ERTS_RBT_CMP_KEYS(KX, KY) CMP_TERM(KX, KY)
+#define ERTS_RBT_WANT_LOOKUP_INSERT
+#define ERTS_RBT_WANT_LOOKUP
+#define ERTS_RBT_WANT_DELETE
+#define ERTS_RBT_UNDEF
+
+#include "erl_rbtree.h"
+
+static int subtract_continue(Process *p, ErtsSubtractContext *context);
+
+static void subtract_ctx_dtor(ErtsSubtractContext *context) {
+ switch (context->stage) {
+ case SUBTRACT_STAGE_SET_BUILD:
+ case SUBTRACT_STAGE_SET_FINISH:
+ erts_free(ERTS_ALC_T_LIST_TRAP, context->u.rhs_set.alloc_start);
+ break;
+ default:
+ break;
+ }
+}
-#define SMALL_VEC_SIZE 10
-static Eterm subtract(Process* p, Eterm A, Eterm B)
-{
- Eterm list;
- Eterm* hp;
- Uint need;
- Eterm res;
- Eterm small_vec[SMALL_VEC_SIZE]; /* Preallocated memory for small lists */
- Eterm* vec_p;
- Eterm* vp;
- Sint i;
- Sint n;
- Sint m;
-
- if ((n = erts_list_length(A)) < 0) {
- BIF_ERROR(p, BADARG);
+static int subtract_ctx_bin_dtor(Binary *context_bin) {
+ ErtsSubtractContext *context = ERTS_MAGIC_BIN_DATA(context_bin);
+ subtract_ctx_dtor(context);
+ return 1;
+}
+
+static void subtract_ctx_move(ErtsSubtractContext *from,
+ ErtsSubtractContext *to) {
+ int uses_result_cdr = 0;
+
+ to->stage = from->stage;
+
+ to->lhs_original = from->lhs_original;
+ to->rhs_original = from->rhs_original;
+
+ to->lhs_remaining = from->lhs_remaining;
+ to->rhs_remaining = from->rhs_remaining;
+
+ to->iterator = from->iterator;
+ to->result = from->result;
+
+ switch (to->stage) {
+ case SUBTRACT_STAGE_NAIVE_LHS:
+ sys_memcpy(to->u.lhs_elements,
+ from->u.lhs_elements,
+ sizeof(Eterm) * to->lhs_remaining);
+ break;
+ case SUBTRACT_STAGE_NAIVE_RHS:
+ sys_memcpy(to->u.rhs_elements,
+ from->u.rhs_elements,
+ sizeof(Eterm) * to->rhs_remaining);
+
+ uses_result_cdr = 1;
+ break;
+ case SUBTRACT_STAGE_SET_FINISH:
+ uses_result_cdr = 1;
+ /* FALL THROUGH */
+ case SUBTRACT_STAGE_SET_BUILD:
+ to->u.rhs_set.alloc_start = from->u.rhs_set.alloc_start;
+ to->u.rhs_set.alloc = from->u.rhs_set.alloc;
+ to->u.rhs_set.tree = from->u.rhs_set.tree;
+ break;
+ default:
+ break;
}
- if ((m = erts_list_length(B)) < 0) {
- BIF_ERROR(p, BADARG);
+
+ if (uses_result_cdr) {
+ if (from->result_cdr == &from->result) {
+ to->result_cdr = &to->result;
+ } else {
+ to->result_cdr = from->result_cdr;
+ }
}
-
- if (n == 0)
- BIF_RET(NIL);
- if (m == 0)
- BIF_RET(A);
-
- /* allocate element vector */
- if (n <= SMALL_VEC_SIZE)
- vec_p = small_vec;
- else
- vec_p = (Eterm*) erts_alloc(ERTS_ALC_T_TMP, n * sizeof(Eterm));
-
- /* PUT ALL ELEMENTS IN VP */
- vp = vec_p;
- list = A;
- i = n;
- while(i--) {
- Eterm* listp = list_val(list);
- *vp++ = CAR(listp);
- list = CDR(listp);
+}
+
+static Eterm subtract_create_trap_state(Process *p,
+ ErtsSubtractContext *context) {
+ Binary *state_bin;
+ Eterm *hp;
+
+ state_bin = erts_create_magic_binary(sizeof(ErtsSubtractContext),
+ subtract_ctx_bin_dtor);
+
+ subtract_ctx_move(context, ERTS_MAGIC_BIN_DATA(state_bin));
+
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+
+ return erts_mk_magic_ref(&hp, &MSO(p), state_bin);
+}
+
+static int subtract_enter_len_lhs(Process *p, ErtsSubtractContext *context) {
+ context->stage = SUBTRACT_STAGE_LEN_LHS;
+
+ context->iterator = context->lhs_original;
+ context->lhs_remaining = 0;
+
+ return subtract_continue(p, context);
+}
+
+static int subtract_enter_len_rhs(Process *p, ErtsSubtractContext *context) {
+ context->stage = SUBTRACT_STAGE_LEN_RHS;
+
+ context->iterator = context->rhs_original;
+ context->rhs_remaining = 0;
+
+ return subtract_continue(p, context);
+}
+
+static int subtract_get_length(Process *p, Eterm *iterator_p, Uint *count_p) {
+ static const Sint ELEMENTS_PER_RED = 32;
+
+ Sint budget, count;
+ Eterm iterator;
+
+ budget = ELEMENTS_PER_RED * ERTS_BIF_REDS_LEFT(p);
+ iterator = *iterator_p;
+
+#ifdef DEBUG
+ budget = budget / 10 + 1;
+#endif
+
+ for (count = 0; count < budget && is_list(iterator); count++) {
+ iterator = CDR(list_val(iterator));
}
-
- /* UNMARK ALL DELETED CELLS */
- list = B;
- m = 0; /* number of deleted elements */
- while(is_list(list)) {
- Eterm* listp = list_val(list);
- Eterm elem = CAR(listp);
- i = n;
- vp = vec_p;
- while(i--) {
- if (is_value(*vp) && eq(*vp, elem)) {
- *vp = THE_NON_VALUE;
- m++;
- break;
- }
- vp++;
- }
- list = CDR(listp);
+
+ if (!is_list(iterator) && !is_nil(iterator)) {
+ return -1;
}
-
- if (m == n) /* All deleted ? */
- res = NIL;
- else if (m == 0) /* None deleted ? */
- res = A;
- else { /* REBUILD LIST */
- res = NIL;
- need = 2*(n - m);
- hp = HAlloc(p, need);
- vp = vec_p + n - 1;
- while(vp >= vec_p) {
- if (is_value(*vp)) {
- res = CONS(hp, *vp, res);
- hp += 2;
- }
- vp--;
- }
+
+ BUMP_REDS(p, count / ELEMENTS_PER_RED);
+
+ *iterator_p = iterator;
+ *count_p += count;
+
+ if (is_nil(iterator)) {
+ return 1;
}
- if (vec_p != small_vec)
- erts_free(ERTS_ALC_T_TMP, (void *) vec_p);
- BIF_RET(res);
+
+ return 0;
}
-BIF_RETTYPE ebif_minusminus_2(BIF_ALIST_2)
-{
- return subtract(BIF_P, BIF_ARG_1, BIF_ARG_2);
+static int subtract_enter_naive_lhs(Process *p, ErtsSubtractContext *context) {
+ Eterm iterator;
+ int i = 0;
+
+ context->stage = SUBTRACT_STAGE_NAIVE_LHS;
+
+ context->iterator = context->rhs_original;
+ context->result = NIL;
+
+ iterator = context->lhs_original;
+
+ while (is_list(iterator)) {
+ const Eterm *cell = list_val(iterator);
+
+ ASSERT(i < SUBTRACT_LHS_THRESHOLD);
+
+ context->u.lhs_elements[i++] = CAR(cell);
+ iterator = CDR(cell);
+ }
+
+ ASSERT(i == context->lhs_remaining);
+
+ return subtract_continue(p, context);
}
-BIF_RETTYPE subtract_2(BIF_ALIST_2)
-{
- return subtract(BIF_P, BIF_ARG_1, BIF_ARG_2);
+static int subtract_naive_lhs(Process *p, ErtsSubtractContext *context) {
+ const Sint CHECKS_PER_RED = 16;
+ Sint checks, budget;
+
+ budget = CHECKS_PER_RED * ERTS_BIF_REDS_LEFT(p);
+ checks = 0;
+
+ while (checks < budget && is_list(context->iterator)) {
+ const Eterm *cell;
+ Eterm value, next;
+ int found_at;
+
+ cell = list_val(context->iterator);
+
+ value = CAR(cell);
+ next = CDR(cell);
+
+ for (found_at = 0; found_at < context->lhs_remaining; found_at++) {
+ if (EQ(value, context->u.lhs_elements[found_at])) {
+ /* We shift the array one step down as we have to preserve
+ * order.
+ *
+ * Note that we can't exit early as that would suppress errors
+ * in the right-hand side (this runs prior to determining the
+ * length of RHS). */
+
+ context->lhs_remaining--;
+ sys_memmove(&context->u.lhs_elements[found_at],
+ &context->u.lhs_elements[found_at + 1],
+ (context->lhs_remaining - found_at) * sizeof(Eterm));
+ break;
+ }
+ }
+
+ checks += MAX(1, context->lhs_remaining);
+ context->iterator = next;
+ }
+
+ BUMP_REDS(p, MIN(checks, budget) / CHECKS_PER_RED);
+
+ if (is_list(context->iterator)) {
+ return 0;
+ } else if (!is_nil(context->iterator)) {
+ return -1;
+ }
+
+ if (context->lhs_remaining > 0) {
+ Eterm *hp;
+ int i;
+
+ hp = HAlloc(p, context->lhs_remaining * 2);
+
+ for (i = context->lhs_remaining - 1; i >= 0; i--) {
+ Eterm value = context->u.lhs_elements[i];
+
+ context->result = CONS(hp, value, context->result);
+ hp += 2;
+ }
+ }
+
+ ASSERT(context->lhs_remaining > 0 || context->result == NIL);
+
+ return 1;
}
+static int subtract_enter_naive_rhs(Process *p, ErtsSubtractContext *context) {
+ Eterm iterator;
+ int i = 0;
+
+ context->stage = SUBTRACT_STAGE_NAIVE_RHS;
+
+ context->iterator = context->lhs_original;
+ context->result_cdr = &context->result;
+ context->result = NIL;
+
+ iterator = context->rhs_original;
+
+ while (is_list(iterator)) {
+ const Eterm *cell = list_val(iterator);
+
+ ASSERT(i < SUBTRACT_RHS_THRESHOLD);
+
+ context->u.rhs_elements[i++] = CAR(cell);
+ iterator = CDR(cell);
+ }
+
+ ASSERT(i == context->rhs_remaining);
+
+ return subtract_continue(p, context);
+}
+
+static int subtract_naive_rhs(Process *p, ErtsSubtractContext *context) {
+ const Sint CHECKS_PER_RED = 16;
+ Sint checks, budget;
+
+ budget = CHECKS_PER_RED * ERTS_BIF_REDS_LEFT(p);
+ checks = 0;
+
+#ifdef DEBUG
+ budget = budget / 10 + 1;
+#endif
+
+ while (checks < budget && is_list(context->iterator)) {
+ const Eterm *cell;
+ Eterm value, next;
+ int found_at;
+
+ cell = list_val(context->iterator);
+ value = CAR(cell);
+ next = CDR(cell);
+
+ for (found_at = context->rhs_remaining - 1; found_at >= 0; found_at--) {
+ if (EQ(value, context->u.rhs_elements[found_at])) {
+ break;
+ }
+ }
+
+ if (found_at < 0) {
+ /* Destructively add the value to the result. This is safe
+ * since the GC is disabled and the unfinished term is never
+ * leaked to the outside world. */
+ Eterm *hp = HAllocX(p, 2, context->lhs_remaining * 2);
+
+ *context->result_cdr = make_list(hp);
+ context->result_cdr = &CDR(hp);
+
+ CAR(hp) = value;
+ } else if (found_at >= 0) {
+ Eterm swap;
+
+ if (context->rhs_remaining-- == 1) {
+ /* We've run out of items to remove, so the rest of the
+ * result will be equal to the remainder of the input. We know
+ * that LHS is well-formed as any errors would've been reported
+ * during length determination. */
+ *context->result_cdr = next;
+
+ BUMP_REDS(p, MIN(budget, checks) / CHECKS_PER_RED);
+
+ return 1;
+ }
+
+ swap = context->u.rhs_elements[context->rhs_remaining];
+ context->u.rhs_elements[found_at] = swap;
+ }
+
+ checks += context->rhs_remaining;
+ context->iterator = next;
+ context->lhs_remaining--;
+ }
+
+ /* The result only has to be terminated when returning it to the user, but
+ * we're doing it when trapping as well to prevent headaches when
+ * debugging. */
+ *context->result_cdr = NIL;
+
+ BUMP_REDS(p, MIN(budget, checks) / CHECKS_PER_RED);
+
+ if (is_list(context->iterator)) {
+ ASSERT(context->lhs_remaining > 0 && context->rhs_remaining > 0);
+ return 0;
+ }
+
+ return 1;
+}
+
+static int subtract_enter_set_build(Process *p, ErtsSubtractContext *context) {
+ context->stage = SUBTRACT_STAGE_SET_BUILD;
+
+ context->u.rhs_set.alloc_start =
+ erts_alloc(ERTS_ALC_T_LIST_TRAP,
+ context->rhs_remaining * sizeof(subtract_tree_t));
+
+ context->u.rhs_set.alloc = context->u.rhs_set.alloc_start;
+ context->u.rhs_set.tree = NULL;
+
+ context->iterator = context->rhs_original;
+
+ return subtract_continue(p, context);
+}
+
+static int subtract_set_build(Process *p, ErtsSubtractContext *context) {
+ const static Sint INSERTIONS_PER_RED = 16;
+ Sint budget, insertions;
+
+ budget = INSERTIONS_PER_RED * ERTS_BIF_REDS_LEFT(p);
+ insertions = 0;
+
+#ifdef DEBUG
+ budget = budget / 10 + 1;
+#endif
+
+ while (insertions < budget && is_list(context->iterator)) {
+ subtract_tree_t *existing_node, *new_node;
+ const Eterm *cell;
+ Eterm value, next;
+
+ cell = list_val(context->iterator);
+ value = CAR(cell);
+ next = CDR(cell);
+
+ new_node = context->u.rhs_set.alloc;
+ new_node->key = value;
+ new_node->count = 1;
+
+ existing_node = subtract_rbt_lookup_insert(&context->u.rhs_set.tree,
+ new_node);
+
+ if (existing_node != NULL) {
+ existing_node->count++;
+ } else {
+ context->u.rhs_set.alloc++;
+ }
+
+ context->iterator = next;
+ insertions++;
+ }
+
+ BUMP_REDS(p, insertions / INSERTIONS_PER_RED);
+
+ ASSERT(is_list(context->iterator) || is_nil(context->iterator));
+ ASSERT(context->u.rhs_set.tree != NULL);
+
+ return is_nil(context->iterator);
+}
+
+static int subtract_enter_set_finish(Process *p, ErtsSubtractContext *context) {
+ context->stage = SUBTRACT_STAGE_SET_FINISH;
+
+ context->result_cdr = &context->result;
+ context->result = NIL;
+
+ context->iterator = context->lhs_original;
+
+ return subtract_continue(p, context);
+}
+
+static int subtract_set_finish(Process *p, ErtsSubtractContext *context) {
+ const Sint CHECKS_PER_RED = 8;
+ Sint checks, budget;
+
+ budget = CHECKS_PER_RED * ERTS_BIF_REDS_LEFT(p);
+ checks = 0;
+
+#ifdef DEBUG
+ budget = budget / 10 + 1;
+#endif
+
+ while (checks < budget && is_list(context->iterator)) {
+ subtract_tree_t *node;
+ const Eterm *cell;
+ Eterm value, next;
+
+ cell = list_val(context->iterator);
+ value = CAR(cell);
+ next = CDR(cell);
+
+ ASSERT(context->rhs_remaining > 0);
+
+ node = subtract_rbt_lookup(context->u.rhs_set.tree, value);
+
+ if (node == NULL) {
+ Eterm *hp = HAllocX(p, 2, context->lhs_remaining * 2);
+
+ *context->result_cdr = make_list(hp);
+ context->result_cdr = &CDR(hp);
+
+ CAR(hp) = value;
+ } else {
+ if (context->rhs_remaining-- == 1) {
+ *context->result_cdr = next;
+
+ BUMP_REDS(p, checks / CHECKS_PER_RED);
+
+ return 1;
+ }
+
+ if (node->count-- == 1) {
+ subtract_rbt_delete(&context->u.rhs_set.tree, node);
+ }
+ }
+
+ context->iterator = next;
+ context->lhs_remaining--;
+ checks++;
+ }
+
+ *context->result_cdr = NIL;
+
+ BUMP_REDS(p, checks / CHECKS_PER_RED);
+
+ if (is_list(context->iterator)) {
+ ASSERT(context->lhs_remaining > 0 && context->rhs_remaining > 0);
+ return 0;
+ }
+
+ return 1;
+}
+
+static int subtract_continue(Process *p, ErtsSubtractContext *context) {
+ switch (context->stage) {
+ case SUBTRACT_STAGE_START: {
+ return subtract_enter_len_lhs(p, context);
+ }
+
+ case SUBTRACT_STAGE_LEN_LHS: {
+ int res = subtract_get_length(p,
+ &context->iterator,
+ &context->lhs_remaining);
+
+ if (res != 1) {
+ return res;
+ }
+
+ if (context->lhs_remaining <= SUBTRACT_LHS_THRESHOLD) {
+ return subtract_enter_naive_lhs(p, context);
+ }
+
+ return subtract_enter_len_rhs(p, context);
+ }
+
+ case SUBTRACT_STAGE_NAIVE_LHS: {
+ return subtract_naive_lhs(p, context);
+ }
+
+ case SUBTRACT_STAGE_LEN_RHS: {
+ int res = subtract_get_length(p,
+ &context->iterator,
+ &context->rhs_remaining);
+
+ if (res != 1) {
+ return res;
+ }
+
+ /* We've walked through both lists fully now so we no longer need
+ * to check for errors past this point. */
+
+ if (context->rhs_remaining <= SUBTRACT_RHS_THRESHOLD) {
+ return subtract_enter_naive_rhs(p, context);
+ }
+
+ return subtract_enter_set_build(p, context);
+ }
+
+ case SUBTRACT_STAGE_NAIVE_RHS: {
+ return subtract_naive_rhs(p, context);
+ }
+
+ case SUBTRACT_STAGE_SET_BUILD: {
+ int res = subtract_set_build(p, context);
+
+ if (res != 1) {
+ return res;
+ }
+
+ return subtract_enter_set_finish(p, context);
+ }
+
+ case SUBTRACT_STAGE_SET_FINISH: {
+ return subtract_set_finish(p, context);
+ }
+
+ default:
+ ERTS_ASSERT(!"unreachable");
+ }
+}
+
+static int subtract_start(Process *p, Eterm lhs, Eterm rhs,
+ ErtsSubtractContext *context) {
+ context->stage = SUBTRACT_STAGE_START;
+
+ context->lhs_original = lhs;
+ context->rhs_original = rhs;
+
+ return subtract_continue(p, context);
+}
+
+/* erlang:'--'/2 */
+static Eterm subtract(Export *bif_entry, BIF_ALIST_2) {
+ Eterm lhs = BIF_ARG_1, rhs = BIF_ARG_2;
+
+ if ((is_list(lhs) || is_nil(lhs)) && (is_list(rhs) || is_nil(rhs))) {
+ /* We start with the context on the stack in the hopes that we won't
+ * have to trap. */
+ ErtsSubtractContext context;
+ int res;
+
+ res = subtract_start(BIF_P, lhs, rhs, &context);
+
+ if (res == 0) {
+ Eterm state_mref;
+
+ state_mref = subtract_create_trap_state(BIF_P, &context);
+ erts_set_gc_state(BIF_P, 0);
+
+ BIF_TRAP2(bif_entry, BIF_P, state_mref, NIL);
+ }
+
+ subtract_ctx_dtor(&context);
+
+ if (res < 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ BIF_RET(context.result);
+ } else if (is_internal_magic_ref(lhs)) {
+ ErtsSubtractContext *context;
+ int (*dtor)(Binary*);
+ Binary *magic_bin;
+
+ int res;
+
+ magic_bin = erts_magic_ref2bin(lhs);
+ dtor = ERTS_MAGIC_BIN_DESTRUCTOR(magic_bin);
+
+ if (dtor != subtract_ctx_bin_dtor) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ ASSERT(BIF_P->flags & F_DISABLE_GC);
+ ASSERT(rhs == NIL);
+
+ context = ERTS_MAGIC_BIN_DATA(magic_bin);
+ res = subtract_continue(BIF_P, context);
+
+ if (res == 0) {
+ BIF_TRAP2(bif_entry, BIF_P, lhs, NIL);
+ }
+
+ erts_set_gc_state(BIF_P, 1);
+
+ if (res < 0) {
+ ERTS_BIF_ERROR_TRAPPED2(BIF_P, BADARG, bif_entry,
+ context->lhs_original,
+ context->rhs_original);
+ }
+
+ BIF_RET(context->result);
+ }
+
+ ASSERT(!(BIF_P->flags & F_DISABLE_GC));
+
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE ebif_minusminus_2(BIF_ALIST_2) {
+ return subtract(bif_export[BIF_ebif_minusminus_2], BIF_CALL_ARGS);
+}
+
+BIF_RETTYPE subtract_2(BIF_ALIST_2) {
+ return subtract(bif_export[BIF_subtract_2], BIF_CALL_ARGS);
+}
+
+
BIF_RETTYPE lists_member_2(BIF_ALIST_2)
{
Eterm term;
Eterm list;
Eterm item;
int non_immed_key;
- int max_iter = 10 * CONTEXT_REDS;
+ int reds_left = ERTS_BIF_REDS_LEFT(BIF_P);
+ int max_iter = 16 * reds_left;
if (is_nil(BIF_ARG_2)) {
BIF_RET(am_false);
@@ -267,14 +890,15 @@ BIF_RETTYPE lists_member_2(BIF_ALIST_2)
}
item = CAR(list_val(list));
if ((item == term) || (non_immed_key && eq(item, term))) {
- BIF_RET2(am_true, CONTEXT_REDS - max_iter/10);
+ BIF_RET2(am_true, reds_left - max_iter/16);
}
list = CDR(list_val(list));
}
if (is_not_nil(list)) {
+ BUMP_REDS(BIF_P, reds_left - max_iter/16);
BIF_ERROR(BIF_P, BADARG);
}
- BIF_RET2(am_false, CONTEXT_REDS - max_iter/10);
+ BIF_RET2(am_false, reds_left - max_iter/16);
}
static BIF_RETTYPE lists_reverse_alloc(Process *c_p,
@@ -283,7 +907,7 @@ static BIF_RETTYPE lists_reverse_alloc(Process *c_p,
{
static const Uint CELLS_PER_RED = 40;
- Eterm *heap_top, *heap_end;
+ Eterm *alloc_top, *alloc_end;
Uint cells_left, max_cells;
Eterm list, tail;
Eterm lookahead;
@@ -305,18 +929,18 @@ static BIF_RETTYPE lists_reverse_alloc(Process *c_p,
BIF_ERROR(c_p, BADARG);
}
- heap_top = HAlloc(c_p, 2 * (max_cells - cells_left));
- heap_end = heap_top + 2 * (max_cells - cells_left);
+ alloc_top = HAlloc(c_p, 2 * (max_cells - cells_left));
+ alloc_end = alloc_top + 2 * (max_cells - cells_left);
- while (heap_top < heap_end) {
+ while (alloc_top < alloc_end) {
Eterm *pair = list_val(list);
- tail = CONS(heap_top, CAR(pair), tail);
+ tail = CONS(alloc_top, CAR(pair), tail);
list = CDR(pair);
ASSERT(is_list(list) || is_nil(list));
- heap_top += 2;
+ alloc_top += 2;
}
if (is_nil(list)) {
@@ -333,7 +957,7 @@ static BIF_RETTYPE lists_reverse_onheap(Process *c_p,
{
static const Uint CELLS_PER_RED = 60;
- Eterm *heap_top, *heap_end;
+ Eterm *alloc_start, *alloc_top, *alloc_end;
Uint cells_left, max_cells;
Eterm list, tail;
@@ -343,21 +967,27 @@ static BIF_RETTYPE lists_reverse_onheap(Process *c_p,
cells_left = max_cells = CELLS_PER_RED * (1 + ERTS_BIF_REDS_LEFT(c_p));
ASSERT(HEAP_LIMIT(c_p) >= HEAP_TOP(c_p) + 2);
- heap_end = HEAP_LIMIT(c_p) - 2;
- heap_top = HEAP_TOP(c_p);
+ alloc_start = HEAP_TOP(c_p);
+ alloc_end = HEAP_LIMIT(c_p) - 2;
+ alloc_top = alloc_start;
+
+ /* Don't process more cells than we have reductions for. */
+ alloc_end = MIN(alloc_top + (cells_left * 2), alloc_end);
- while (heap_top < heap_end && is_list(list)) {
+ while (alloc_top < alloc_end && is_list(list)) {
Eterm *pair = list_val(list);
- tail = CONS(heap_top, CAR(pair), tail);
+ tail = CONS(alloc_top, CAR(pair), tail);
list = CDR(pair);
- heap_top += 2;
+ alloc_top += 2;
}
- cells_left -= (heap_top - heap_end) / 2;
+ cells_left -= (alloc_top - alloc_start) / 2;
+ HEAP_TOP(c_p) = alloc_top;
+
+ ASSERT(cells_left >= 0 && cells_left <= max_cells);
BUMP_REDS(c_p, (max_cells - cells_left) / CELLS_PER_RED);
- HEAP_TOP(c_p) = heap_top;
if (is_nil(list)) {
BIF_RET(tail);
diff --git a/erts/emulator/beam/erl_bif_persistent.c b/erts/emulator/beam/erl_bif_persistent.c
new file mode 100644
index 0000000000..9dca768a18
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_persistent.c
@@ -0,0 +1,983 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2018. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Purpose: Implement persistent term storage.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "erl_driver.h"
+#include "bif.h"
+#include "erl_map.h"
+#include "erl_binary.h"
+
+/*
+ * The limit for the number of persistent terms before
+ * a warning is issued.
+ */
+
+#define WARNING_LIMIT 20000
+#define XSTR(s) STR(s)
+#define STR(s) #s
+
+/*
+ * Parameters for the hash table.
+ */
+#define INITIAL_SIZE 8
+#define LOAD_FACTOR ((Uint)50)
+#define MUST_GROW(t) (((Uint)100) * t->num_entries >= LOAD_FACTOR * t->allocated)
+#define MUST_SHRINK(t) (((Uint)200) * t->num_entries <= LOAD_FACTOR * t->allocated && \
+ t->allocated > INITIAL_SIZE)
+
+typedef struct hash_table {
+ Uint allocated;
+ Uint num_entries;
+ Uint mask;
+ Uint first_to_delete;
+ Uint num_to_delete;
+ erts_atomic_t refc;
+ struct hash_table* delete_next;
+ ErtsThrPrgrLaterOp thr_prog_op;
+ Eterm term[1];
+} HashTable;
+
+typedef struct trap_data {
+ HashTable* table;
+ Uint idx;
+ Uint remaining;
+ Uint memory; /* Used by info/0 to count used memory */
+} TrapData;
+
+/*
+ * Declarations of local functions.
+ */
+
+static HashTable* create_initial_table(void);
+static Uint lookup(HashTable* hash_table, Eterm key);
+static HashTable* copy_table(HashTable* old_table, Uint new_size, int rehash);
+static HashTable* tmp_table_copy(HashTable* old_table);
+static int try_seize_update_permission(Process* c_p);
+static void release_update_permission(int release_updater);
+static void table_updater(void* table);
+static void table_deleter(void* hash_table);
+static void dec_table_refc(Process* c_p, HashTable* old_table);
+static void delete_table(Process* c_p, HashTable* table);
+static void mark_for_deletion(HashTable* hash_table, Uint entry_index);
+static ErtsLiteralArea* term_to_area(Eterm tuple);
+static void suspend_updater(Process* c_p);
+static Eterm do_get_all(Process* c_p, TrapData* trap_data, Eterm res);
+static Eterm do_info(Process* c_p, TrapData* trap_data);
+static void append_to_delete_queue(HashTable* table);
+static HashTable* next_to_delete(void);
+static Eterm alloc_trap_data(Process* c_p);
+static int cleanup_trap_data(Binary *bp);
+
+/*
+ * Traps
+ */
+
+static Export persistent_term_get_all_export;
+static BIF_RETTYPE persistent_term_get_all_trap(BIF_ALIST_2);
+static Export persistent_term_info_export;
+static BIF_RETTYPE persistent_term_info_trap(BIF_ALIST_1);
+
+/*
+ * Pointer to the current hash table.
+ */
+
+static erts_atomic_t the_hash_table;
+
+/*
+ * Queue of processes waiting to update the hash table.
+ */
+
+struct update_queue_item {
+ Process *p;
+ struct update_queue_item* next;
+};
+
+static erts_mtx_t update_table_permission_mtx;
+static struct update_queue_item* update_queue = NULL;
+static Process* updater_process = NULL;
+
+/* Protected by update_table_permission_mtx */
+static ErtsThrPrgrLaterOp thr_prog_op;
+static int issued_warning = 0;
+
+/*
+ * Queue of hash tables to be deleted.
+ */
+
+static erts_mtx_t delete_queue_mtx;
+static HashTable* delete_queue_head = NULL;
+static HashTable** delete_queue_tail = &delete_queue_head;
+
+/*
+ * The following variables are only used during crash dumping. They
+ * are intialized by erts_init_persistent_dumping().
+ */
+
+ErtsLiteralArea** erts_persistent_areas;
+Uint erts_num_persistent_areas;
+
+void erts_init_bif_persistent_term(void)
+{
+ HashTable* hash_table;
+
+ /*
+ * Initialize the mutex protecting updates.
+ */
+
+ erts_mtx_init(&update_table_permission_mtx,
+ "update_persistent_term_permission",
+ NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC |
+ ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+
+ /*
+ * Initialize delete queue.
+ */
+
+ erts_mtx_init(&delete_queue_mtx,
+ "persistent_term_delete_permission",
+ NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC |
+ ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+
+ /*
+ * Allocate a small initial hash table.
+ */
+
+ hash_table = create_initial_table();
+ erts_atomic_init_nob(&the_hash_table, (erts_aint_t)hash_table);
+
+ /*
+ * Initialize export entry for traps
+ */
+
+ erts_init_trap_export(&persistent_term_get_all_export,
+ am_persistent_term, am_get_all_trap, 2,
+ &persistent_term_get_all_trap);
+ erts_init_trap_export(&persistent_term_info_export,
+ am_persistent_term, am_info_trap, 1,
+ &persistent_term_info_trap);
+}
+
+BIF_RETTYPE persistent_term_put_2(BIF_ALIST_2)
+{
+ Eterm key;
+ Eterm term;
+ Eterm heap[3];
+ Eterm tuple;
+ HashTable* hash_table;
+ Uint term_size;
+ Uint lit_area_size;
+ ErlOffHeap code_off_heap;
+ ErtsLiteralArea* literal_area;
+ erts_shcopy_t info;
+ Eterm* ptr;
+ Uint entry_index;
+
+ if (!try_seize_update_permission(BIF_P)) {
+ ERTS_BIF_YIELD2(bif_export[BIF_persistent_term_put_2],
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ }
+
+ hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+
+ key = BIF_ARG_1;
+ term = BIF_ARG_2;
+
+ entry_index = lookup(hash_table, key);
+
+ heap[0] = make_arityval(2);
+ heap[1] = key;
+ heap[2] = term;
+ tuple = make_tuple(heap);
+
+ if (is_nil(hash_table->term[entry_index])) {
+ Uint size = hash_table->allocated;
+ if (MUST_GROW(hash_table)) {
+ size *= 2;
+ }
+ hash_table = copy_table(hash_table, size, 0);
+ entry_index = lookup(hash_table, key);
+ hash_table->num_entries++;
+ } else {
+ Eterm tuple = hash_table->term[entry_index];
+ Eterm old_term;
+
+ ASSERT(is_tuple_arity(tuple, 2));
+ old_term = boxed_val(tuple)[2];
+ if (EQ(term, old_term)) {
+ /* Same value. No need to update anything. */
+ release_update_permission(0);
+ BIF_RET(am_ok);
+ } else {
+ /* Mark the old term for deletion. */
+ mark_for_deletion(hash_table, entry_index);
+ hash_table = copy_table(hash_table, hash_table->allocated, 0);
+ }
+ }
+
+ /*
+ * Preserve internal sharing in the term by using the
+ * sharing-preserving functions. However, literals must
+ * be copied in case the module holding them are unloaded.
+ */
+ INITIALIZE_SHCOPY(info);
+ info.copy_literals = 1;
+ term_size = copy_shared_calculate(tuple, &info);
+ ERTS_INIT_OFF_HEAP(&code_off_heap);
+ lit_area_size = ERTS_LITERAL_AREA_ALLOC_SIZE(term_size);
+ literal_area = erts_alloc(ERTS_ALC_T_LITERAL, lit_area_size);
+ ptr = &literal_area->start[0];
+ literal_area->end = ptr + term_size;
+ tuple = copy_shared_perform(tuple, term_size, &info, &ptr, &code_off_heap);
+ ASSERT(tuple_val(tuple) == literal_area->start);
+ literal_area->off_heap = code_off_heap.first;
+ DESTROY_SHCOPY(info);
+ erts_set_literal_tag(&tuple, literal_area->start, term_size);
+ hash_table->term[entry_index] = tuple;
+
+ erts_schedule_thr_prgr_later_op(table_updater, hash_table, &thr_prog_op);
+ suspend_updater(BIF_P);
+
+ /*
+ * Issue a warning once if the warning limit has been exceeded.
+ */
+
+ if (hash_table->num_entries > WARNING_LIMIT && issued_warning == 0) {
+ static char w[] =
+ "More than " XSTR(WARNING_LIMIT) " persistent terms "
+ "have been created.\n"
+ "It is recommended to avoid creating an excessive number of\n"
+ "persistent terms, as creation and deletion of persistent terms\n"
+ "will be slower as the number of persistent terms increases.\n";
+ issued_warning = 1;
+ erts_send_warning_to_logger_str(BIF_P->group_leader, w);
+ }
+
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_ok);
+}
+
+BIF_RETTYPE persistent_term_get_0(BIF_ALIST_0)
+{
+ HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ TrapData* trap_data;
+ Eterm res = NIL;
+ Eterm magic_ref;
+ Binary* mbp;
+
+ magic_ref = alloc_trap_data(BIF_P);
+ mbp = erts_magic_ref2bin(magic_ref);
+ trap_data = ERTS_MAGIC_BIN_DATA(mbp);
+ trap_data->table = hash_table;
+ trap_data->idx = 0;
+ trap_data->remaining = hash_table->num_entries;
+ res = do_get_all(BIF_P, trap_data, res);
+ if (trap_data->remaining == 0) {
+ BUMP_REDS(BIF_P, hash_table->num_entries);
+ trap_data->table = NULL; /* Prevent refc decrement */
+ BIF_RET(res);
+ } else {
+ /*
+ * Increment the ref counter to prevent an update operation (by put/2
+ * or erase/1) to delete this hash table.
+ */
+ erts_atomic_inc_nob(&hash_table->refc);
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(&persistent_term_get_all_export, BIF_P, magic_ref, res);
+ }
+}
+
+BIF_RETTYPE persistent_term_get_1(BIF_ALIST_1)
+{
+ Eterm key = BIF_ARG_1;
+ HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ Uint entry_index;
+ Eterm term;
+
+ entry_index = lookup(hash_table, key);
+ term = hash_table->term[entry_index];
+ if (is_boxed(term)) {
+ ASSERT(is_tuple_arity(term, 2));
+ BIF_RET(tuple_val(term)[2]);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE persistent_term_erase_1(BIF_ALIST_1)
+{
+ Eterm key = BIF_ARG_1;
+ HashTable* old_table;
+ HashTable* new_table;
+ Uint entry_index;
+ Eterm old_term;
+
+ if (!try_seize_update_permission(BIF_P)) {
+ ERTS_BIF_YIELD1(bif_export[BIF_persistent_term_erase_1],
+ BIF_P, BIF_ARG_1);
+ }
+
+ old_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ entry_index = lookup(old_table, key);
+ old_term = old_table->term[entry_index];
+ if (is_boxed(old_term)) {
+ Uint new_size;
+ HashTable* tmp_table;
+
+ /*
+ * Since we don't use any delete markers, we must rehash
+ * the table when deleting terms to ensure that all terms
+ * can still be reached if there are hash collisions.
+ * We can't rehash in place and it would not be safe to modify
+ * the old table yet, so we will first need a new
+ * temporary table copy of the same size as the old one.
+ */
+
+ ASSERT(is_tuple_arity(old_term, 2));
+ tmp_table = tmp_table_copy(old_table);
+
+ /*
+ * Delete the term from the temporary table. Then copy the
+ * temporary table to a new table, rehashing the entries
+ * while copying.
+ */
+
+ tmp_table->term[entry_index] = NIL;
+ tmp_table->num_entries--;
+ new_size = tmp_table->allocated;
+ if (MUST_SHRINK(tmp_table)) {
+ new_size /= 2;
+ }
+ new_table = copy_table(tmp_table, new_size, 1);
+ erts_free(ERTS_ALC_T_TMP, tmp_table);
+
+ mark_for_deletion(old_table, entry_index);
+ erts_schedule_thr_prgr_later_op(table_updater, new_table, &thr_prog_op);
+ suspend_updater(BIF_P);
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
+ }
+
+ /*
+ * Key is not present. Nothing to do.
+ */
+
+ ASSERT(is_nil(old_term));
+ release_update_permission(0);
+ BIF_RET(am_false);
+}
+
+BIF_RETTYPE erts_internal_erase_persistent_terms_0(BIF_ALIST_0)
+{
+ HashTable* old_table;
+ HashTable* new_table;
+
+ if (!try_seize_update_permission(BIF_P)) {
+ ERTS_BIF_YIELD0(bif_export[BIF_erts_internal_erase_persistent_terms_0],
+ BIF_P);
+ }
+ old_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ old_table->first_to_delete = 0;
+ old_table->num_to_delete = old_table->allocated;
+ new_table = create_initial_table();
+ erts_schedule_thr_prgr_later_op(table_updater, new_table, &thr_prog_op);
+ suspend_updater(BIF_P);
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
+}
+
+BIF_RETTYPE persistent_term_info_0(BIF_ALIST_0)
+{
+ HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ TrapData* trap_data;
+ Eterm res = NIL;
+ Eterm magic_ref;
+ Binary* mbp;
+
+ magic_ref = alloc_trap_data(BIF_P);
+ mbp = erts_magic_ref2bin(magic_ref);
+ trap_data = ERTS_MAGIC_BIN_DATA(mbp);
+ trap_data->table = hash_table;
+ trap_data->idx = 0;
+ trap_data->remaining = hash_table->num_entries;
+ trap_data->memory = 0;
+ res = do_info(BIF_P, trap_data);
+ if (trap_data->remaining == 0) {
+ BUMP_REDS(BIF_P, hash_table->num_entries);
+ trap_data->table = NULL; /* Prevent refc decrement */
+ BIF_RET(res);
+ } else {
+ /*
+ * Increment the ref counter to prevent an update operation (by put/2
+ * or erase/1) to delete this hash table.
+ */
+ erts_atomic_inc_nob(&hash_table->refc);
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(&persistent_term_info_export, BIF_P, magic_ref, res);
+ }
+}
+
+Uint
+erts_persistent_term_count(void)
+{
+ HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ return hash_table->num_entries;
+}
+
+void
+erts_init_persistent_dumping(void)
+{
+ HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ ErtsLiteralArea** area_p;
+ Uint i;
+
+ /*
+ * Overwrite the array of Eterms in the current hash table
+ * with pointers to literal areas.
+ */
+
+ erts_persistent_areas = (ErtsLiteralArea **) hash_table->term;
+ erts_num_persistent_areas = hash_table->num_entries;
+ area_p = erts_persistent_areas;
+ for (i = 0; i < hash_table->allocated; i++) {
+ Eterm term = hash_table->term[i];
+
+ if (is_boxed(term)) {
+ *area_p++ = term_to_area(term);
+ }
+ }
+}
+
+/*
+ * Local functions.
+ */
+
+static HashTable*
+create_initial_table(void)
+{
+ HashTable* hash_table;
+ int i;
+
+ hash_table = (HashTable *) erts_alloc(ERTS_ALC_T_PERSISTENT_TERM,
+ sizeof(HashTable)+sizeof(Eterm) *
+ (INITIAL_SIZE-1));
+ hash_table->allocated = INITIAL_SIZE;
+ hash_table->num_entries = 0;
+ hash_table->mask = INITIAL_SIZE-1;
+ hash_table->first_to_delete = 0;
+ hash_table->num_to_delete = 0;
+ erts_atomic_init_nob(&hash_table->refc, (erts_aint_t)1);
+ for (i = 0; i < INITIAL_SIZE; i++) {
+ hash_table->term[i] = NIL;
+ }
+ return hash_table;
+}
+
+static BIF_RETTYPE
+persistent_term_get_all_trap(BIF_ALIST_2)
+{
+ TrapData* trap_data;
+ Eterm res = BIF_ARG_2;
+ Uint bump_reds;
+ Binary* mbp;
+
+ ASSERT(is_list(BIF_ARG_2));
+ mbp = erts_magic_ref2bin(BIF_ARG_1);
+ trap_data = ERTS_MAGIC_BIN_DATA(mbp);
+ bump_reds = trap_data->remaining;
+ res = do_get_all(BIF_P, trap_data, res);
+ ASSERT(is_list(res));
+ if (trap_data->remaining > 0) {
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(&persistent_term_get_all_export, BIF_P, BIF_ARG_1, res);
+ } else {
+ /*
+ * Decrement ref count (and possibly delete the hash table
+ * and associated literal area).
+ */
+ dec_table_refc(BIF_P, trap_data->table);
+ trap_data->table = NULL; /* Prevent refc decrement */
+ BUMP_REDS(BIF_P, bump_reds);
+ BIF_RET(res);
+ }
+}
+
+static Eterm
+do_get_all(Process* c_p, TrapData* trap_data, Eterm res)
+{
+ HashTable* hash_table;
+ Uint remaining;
+ Uint idx;
+ Uint max_iter;
+ Uint i;
+ Eterm* hp;
+ Uint heap_size;
+ struct copy_term {
+ Uint key_size;
+ Eterm* tuple_ptr;
+ } *copy_data;
+
+ hash_table = trap_data->table;
+ idx = trap_data->idx;
+#if defined(DEBUG) || defined(VALGRIND)
+ max_iter = 50;
+#else
+ max_iter = ERTS_BIF_REDS_LEFT(c_p);
+#endif
+ remaining = trap_data->remaining < max_iter ?
+ trap_data->remaining : max_iter;
+ trap_data->remaining -= remaining;
+
+ copy_data = (struct copy_term *) erts_alloc(ERTS_ALC_T_TMP,
+ remaining *
+ sizeof(struct copy_term));
+ i = 0;
+ heap_size = (2 + 3) * remaining;
+ while (remaining != 0) {
+ Eterm term = hash_table->term[idx];
+ if (is_tuple(term)) {
+ Uint key_size;
+ Eterm* tup_val;
+
+ ASSERT(is_tuple_arity(term, 2));
+ tup_val = tuple_val(term);
+ key_size = size_object(tup_val[1]);
+ copy_data[i].key_size = key_size;
+ copy_data[i].tuple_ptr = tup_val;
+ heap_size += key_size;
+ i++;
+ remaining--;
+ }
+ idx++;
+ }
+ trap_data->idx = idx;
+
+ hp = HAlloc(c_p, heap_size);
+ remaining = i;
+ for (i = 0; i < remaining; i++) {
+ Eterm* tuple_ptr;
+ Uint key_size;
+ Eterm key;
+ Eterm tup;
+
+ tuple_ptr = copy_data[i].tuple_ptr;
+ key_size = copy_data[i].key_size;
+ key = copy_struct(tuple_ptr[1], key_size, &hp, &c_p->off_heap);
+ tup = TUPLE2(hp, key, tuple_ptr[2]);
+ hp += 3;
+ res = CONS(hp, tup, res);
+ hp += 2;
+ }
+ erts_free(ERTS_ALC_T_TMP, copy_data);
+ return res;
+}
+
+static BIF_RETTYPE
+persistent_term_info_trap(BIF_ALIST_1)
+{
+ TrapData* trap_data = (TrapData *) BIF_ARG_1;
+ Eterm res;
+ Uint bump_reds;
+ Binary* mbp;
+
+ mbp = erts_magic_ref2bin(BIF_ARG_1);
+ trap_data = ERTS_MAGIC_BIN_DATA(mbp);
+ bump_reds = trap_data->remaining;
+ res = do_info(BIF_P, trap_data);
+ if (trap_data->remaining > 0) {
+ ASSERT(res == am_ok);
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP1(&persistent_term_info_export, BIF_P, BIF_ARG_1);
+ } else {
+ /*
+ * Decrement ref count (and possibly delete the hash table
+ * and associated literal area).
+ */
+ dec_table_refc(BIF_P, trap_data->table);
+ trap_data->table = NULL; /* Prevent refc decrement */
+ BUMP_REDS(BIF_P, bump_reds);
+ ASSERT(is_map(res));
+ BIF_RET(res);
+ }
+}
+
+#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
+
+static Eterm
+do_info(Process* c_p, TrapData* trap_data)
+{
+ HashTable* hash_table;
+ Uint remaining;
+ Uint idx;
+ Uint max_iter;
+
+ hash_table = trap_data->table;
+ idx = trap_data->idx;
+#if defined(DEBUG) || defined(VALGRIND)
+ max_iter = 50;
+#else
+ max_iter = ERTS_BIF_REDS_LEFT(c_p);
+#endif
+ remaining = trap_data->remaining < max_iter ? trap_data->remaining : max_iter;
+ trap_data->remaining -= remaining;
+ while (remaining != 0) {
+ if (is_boxed(hash_table->term[idx])) {
+ ErtsLiteralArea* area;
+ area = term_to_area(hash_table->term[idx]);
+ trap_data->memory += sizeof(ErtsLiteralArea) +
+ sizeof(Eterm) * (area->end - area->start - 1);
+ remaining--;
+ }
+ idx++;
+ }
+ trap_data->idx = idx;
+ if (trap_data->remaining > 0) {
+ return am_ok; /* Dummy return value */
+ } else {
+ Eterm* hp;
+ Eterm count_term;
+ Eterm memory_term;
+ Eterm res;
+ Uint memory;
+ Uint hsz = MAP_SZ(2);
+
+ memory = sizeof(HashTable) + (trap_data->table->allocated-1) *
+ sizeof(Eterm) + trap_data->memory;
+ (void) erts_bld_uint(NULL, &hsz, hash_table->num_entries);
+ (void) erts_bld_uint(NULL, &hsz, memory);
+ hp = HAlloc(c_p, hsz);
+ count_term = erts_bld_uint(&hp, NULL, hash_table->num_entries);
+ memory_term = erts_bld_uint(&hp, NULL, memory);
+ res = MAP2(hp, am_count, count_term, am_memory, memory_term);
+ return res;
+ }
+}
+
+#undef DECL_AM
+
+static Eterm
+alloc_trap_data(Process* c_p)
+{
+ Binary* mbp = erts_create_magic_binary(sizeof(TrapData),
+ cleanup_trap_data);
+ Eterm* hp;
+
+ hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
+ return erts_mk_magic_ref(&hp, &MSO(c_p), mbp);
+}
+
+static int
+cleanup_trap_data(Binary *bp)
+{
+ TrapData* trap_data = ERTS_MAGIC_BIN_DATA(bp);
+
+ if (trap_data->table) {
+ /*
+ * The process has been killed and is now exiting.
+ * Decrement the reference counter for the table.
+ */
+ dec_table_refc(NULL, trap_data->table);
+ }
+ return 1;
+}
+
+static Uint
+lookup(HashTable* hash_table, Eterm key)
+{
+ Uint mask = hash_table->mask;
+ Eterm* table = hash_table->term;
+ Uint32 idx = make_internal_hash(key, 0);
+ Eterm term;
+
+ do {
+ idx++;
+ term = table[idx & mask];
+ } while (is_boxed(term) && !EQ(key, (tuple_val(term))[1]));
+ return idx & mask;
+}
+
+static HashTable*
+tmp_table_copy(HashTable* old_table)
+{
+ Uint size = old_table->allocated;
+ HashTable* tmp_table;
+ Uint i;
+
+ tmp_table = (HashTable *) erts_alloc(ERTS_ALC_T_TMP,
+ sizeof(HashTable) +
+ sizeof(Eterm) * (size-1));
+ *tmp_table = *old_table;
+ for (i = 0; i < size; i++) {
+ tmp_table->term[i] = old_table->term[i];
+ }
+ return tmp_table;
+}
+
+static HashTable*
+copy_table(HashTable* old_table, Uint new_size, int rehash)
+{
+ HashTable* new_table;
+ Uint old_size = old_table->allocated;
+ Uint i;
+
+ new_table = (HashTable *) erts_alloc(ERTS_ALC_T_PERSISTENT_TERM,
+ sizeof(HashTable) +
+ sizeof(Eterm) * (new_size-1));
+ if (old_table->allocated == new_size && !rehash) {
+ /*
+ * Same size and no key deleted. Make an exact copy of the table.
+ */
+ *new_table = *old_table;
+ for (i = 0; i < new_size; i++) {
+ new_table->term[i] = old_table->term[i];
+ }
+ } else {
+ /*
+ * The size of the table has changed or an element has been
+ * deleted. Must rehash, by inserting all old terms into the
+ * new (empty) table.
+ */
+ new_table->allocated = new_size;
+ new_table->num_entries = old_table->num_entries;
+ new_table->mask = new_size - 1;
+ for (i = 0; i < new_size; i++) {
+ new_table->term[i] = NIL;
+ }
+ for (i = 0; i < old_size; i++) {
+ if (is_tuple(old_table->term[i])) {
+ Eterm key = tuple_val(old_table->term[i])[1];
+ Uint entry_index = lookup(new_table, key);
+ ASSERT(is_nil(new_table->term[entry_index]));
+ new_table->term[entry_index] = old_table->term[i];
+ }
+ }
+ }
+ new_table->first_to_delete = 0;
+ new_table->num_to_delete = 0;
+ erts_atomic_init_nob(&new_table->refc, (erts_aint_t)1);
+ return new_table;
+}
+
+static void
+mark_for_deletion(HashTable* hash_table, Uint entry_index)
+{
+ hash_table->first_to_delete = entry_index;
+ hash_table->num_to_delete = 1;
+}
+
+static ErtsLiteralArea*
+term_to_area(Eterm tuple)
+{
+ ASSERT(is_tuple_arity(tuple, 2));
+ return (ErtsLiteralArea *) (((char *) tuple_val(tuple)) -
+ offsetof(ErtsLiteralArea, start));
+}
+
+static void
+table_updater(void* data)
+{
+ HashTable* old_table;
+ HashTable* new_table;
+
+ old_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ new_table = (HashTable *) data;
+ ASSERT(new_table->num_to_delete == 0);
+ erts_atomic_set_nob(&the_hash_table, (erts_aint_t)new_table);
+ append_to_delete_queue(old_table);
+ erts_schedule_thr_prgr_later_op(table_deleter,
+ old_table,
+ &old_table->thr_prog_op);
+ release_update_permission(1);
+}
+
+static void
+table_deleter(void* data)
+{
+ HashTable* old_table = (HashTable *) data;
+
+ dec_table_refc(NULL, old_table);
+}
+
+static void
+dec_table_refc(Process* c_p, HashTable* old_table)
+{
+ erts_aint_t refc = erts_atomic_dec_read_nob(&old_table->refc);
+
+ if (refc == 0) {
+ HashTable* to_delete;
+
+ while ((to_delete = next_to_delete()) != NULL) {
+ delete_table(c_p, to_delete);
+ }
+ }
+}
+
+static void
+delete_table(Process* c_p, HashTable* table)
+{
+ Uint idx = table->first_to_delete;
+ Uint n = table->num_to_delete;
+
+ /*
+ * There are no longer any references to this hash table.
+ *
+ * Any literals pointed for deletion can be queued for
+ * deletion and the table itself can be deallocated.
+ */
+
+#ifdef DEBUG
+ if (n == 1) {
+ ASSERT(is_tuple_arity(table->term[idx], 2));
+ }
+#endif
+
+ while (n > 0) {
+ Eterm term = table->term[idx];
+
+ if (is_tuple_arity(term, 2)) {
+ if (is_immed(tuple_val(term)[2])) {
+ erts_release_literal_area(term_to_area(term));
+ } else {
+ erts_queue_release_literals(c_p, term_to_area(term));
+ }
+ }
+ idx++, n--;
+ }
+ erts_free(ERTS_ALC_T_PERSISTENT_TERM, table);
+}
+
+/*
+ * Caller *must* yield if this function returns 0.
+ */
+
+static int
+try_seize_update_permission(Process* c_p)
+{
+ int success;
+
+ ASSERT(!erts_thr_progress_is_blocking()); /* to avoid deadlock */
+ ASSERT(c_p != NULL);
+
+ erts_mtx_lock(&update_table_permission_mtx);
+ ASSERT(updater_process != c_p);
+ success = (updater_process == NULL);
+ if (success) {
+ updater_process = c_p;
+ } else {
+ struct update_queue_item* qitem;
+ qitem = erts_alloc(ERTS_ALC_T_PERSISTENT_LOCK_Q, sizeof(*qitem));
+ qitem->p = c_p;
+ erts_proc_inc_refc(c_p);
+ qitem->next = update_queue;
+ update_queue = qitem;
+ erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
+ }
+ erts_mtx_unlock(&update_table_permission_mtx);
+ return success;
+}
+
+static void
+release_update_permission(int release_updater)
+{
+ erts_mtx_lock(&update_table_permission_mtx);
+ ASSERT(updater_process != NULL);
+
+ if (release_updater) {
+ erts_proc_lock(updater_process, ERTS_PROC_LOCK_STATUS);
+ if (!ERTS_PROC_IS_EXITING(updater_process)) {
+ erts_resume(updater_process, ERTS_PROC_LOCK_STATUS);
+ }
+ erts_proc_unlock(updater_process, ERTS_PROC_LOCK_STATUS);
+ }
+ updater_process = NULL;
+
+ while (update_queue != NULL) { /* Unleash the entire herd */
+ struct update_queue_item* qitem = update_queue;
+ erts_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS);
+ if (!ERTS_PROC_IS_EXITING(qitem->p)) {
+ erts_resume(qitem->p, ERTS_PROC_LOCK_STATUS);
+ }
+ erts_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS);
+ update_queue = qitem->next;
+ erts_proc_dec_refc(qitem->p);
+ erts_free(ERTS_ALC_T_PERSISTENT_LOCK_Q, qitem);
+ }
+ erts_mtx_unlock(&update_table_permission_mtx);
+}
+
+static void
+suspend_updater(Process* c_p)
+{
+#ifdef DEBUG
+ ASSERT(c_p != NULL);
+ erts_mtx_lock(&update_table_permission_mtx);
+ ASSERT(updater_process == c_p);
+ erts_mtx_unlock(&update_table_permission_mtx);
+#endif
+ erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
+}
+
+static void
+append_to_delete_queue(HashTable* table)
+{
+ erts_mtx_lock(&delete_queue_mtx);
+ table->delete_next = NULL;
+ *delete_queue_tail = table;
+ delete_queue_tail = &table->delete_next;
+ erts_mtx_unlock(&delete_queue_mtx);
+}
+
+static HashTable*
+next_to_delete(void)
+{
+ HashTable* table;
+
+ erts_mtx_lock(&delete_queue_mtx);
+ table = delete_queue_head;
+ if (table) {
+ if (erts_atomic_read_nob(&table->refc)) {
+ /*
+ * This hash table is still referenced. Hash tables
+ * must be deleted in order, so we return a NULL
+ * pointer.
+ */
+ table = NULL;
+ } else {
+ /*
+ * Remove the first hash table from the queue.
+ */
+ delete_queue_head = table->delete_next;
+ if (delete_queue_head == NULL) {
+ delete_queue_tail = &delete_queue_head;
+ }
+ }
+ }
+ erts_mtx_unlock(&delete_queue_mtx);
+ return table;
+}
diff --git a/erts/emulator/beam/erl_bif_unique.h b/erts/emulator/beam/erl_bif_unique.h
index 40b70667c0..944788c67c 100644
--- a/erts/emulator/beam/erl_bif_unique.h
+++ b/erts/emulator/beam/erl_bif_unique.h
@@ -242,11 +242,11 @@ erts_internal_ref_number_cmp(Uint32 num1[ERTS_REF_NUMBERS],
Uint32 num2[ERTS_REF_NUMBERS])
{
if (num1[2] != num2[2])
- return (int) ((Sint64) num1[2] - (Sint64) num2[2]);
+ return num1[2] > num2[2] ? 1 : -1;
if (num1[1] != num2[1])
- return (int) ((Sint64) num1[1] - (Sint64) num2[1]);
+ return num1[1] > num2[1] ? 1 : -1;
if (num1[0] != num2[0])
- return (int) ((Sint64) num1[0] - (Sint64) num2[0]);
+ return num1[0] > num2[0] ? 1 : -1;
return 0;
}
diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c
index 788718ab09..45e4be2426 100644
--- a/erts/emulator/beam/erl_db_tree.c
+++ b/erts/emulator/beam/erl_db_tree.c
@@ -1860,22 +1860,14 @@ static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid,
sc.mp = mpi.mp;
- stack = get_static_stack(tb);
if (!mpi.got_partial && mpi.some_limitation &&
CMP_EQ(mpi.least,mpi.most)) {
- TreeDbTerm* term = *(mpi.save_term);
doit_select_replace(tb,mpi.save_term,&sc,0 /* dummy */);
- if (stack != NULL) {
- if (TOP_NODE(stack) == term)
- // throw away potentially invalid reference
- REPLACE_TOP_NODE(stack, *(mpi.save_term));
- release_stack(tb, stack);
- }
+ reset_static_stack(tb); /* may refer replaced term */
RET_TO_BIF(erts_make_integer(sc.replaced,p),DB_ERROR_NONE);
}
- if (stack == NULL)
- stack = get_any_stack(tb);
+ stack = get_any_stack(tb);
if (mpi.some_limitation) {
if ((this = find_next_from_pb_key(tb, stack, mpi.most)) != NULL) {
diff --git a/erts/emulator/beam/erl_dirty_bif.tab b/erts/emulator/beam/erl_dirty_bif.tab
index 086275fbe5..20299ff604 100644
--- a/erts/emulator/beam/erl_dirty_bif.tab
+++ b/erts/emulator/beam/erl_dirty_bif.tab
@@ -59,8 +59,6 @@ dirty-cpu erts_debug:lcnt_clear/0
dirty-cpu-test erlang:'++'/2
dirty-cpu-test erlang:append/2
-dirty-cpu-test erlang:'--'/2
-dirty-cpu-test erlang:subtract/2
dirty-cpu-test erlang:iolist_size/1
dirty-cpu-test erlang:make_tuple/2
dirty-cpu-test erlang:make_tuple/3
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 4a80c00416..3a50b294d1 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -681,7 +681,7 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
ErtsMonotonicTime start_time;
ErtsSchedulerData *esdp = erts_proc_sched_data(p);
erts_aint32_t state;
- ERTS_MSACC_PUSH_STATE_M();
+ ERTS_MSACC_PUSH_STATE();
#ifdef USE_VM_PROBES
DTRACE_CHARBUF(pidbuf, DTRACE_TERM_BUF_SIZE);
#endif
@@ -711,7 +711,7 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
else if (p->live_hf_end != ERTS_INVALID_HFRAG_PTR)
live_hf_end = p->live_hf_end;
- ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_GC);
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_GC);
erts_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
if (erts_system_monitor_long_gc != 0)
@@ -759,7 +759,7 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
gc_trace_end_tag = am_gc_minor_end;
} else {
do_major_collection:
- ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_GC_FULL);
+ ERTS_MSACC_SET_STATE_CACHED_X(ERTS_MSACC_STATE_GC_FULL);
if (IS_TRACED_FL(p, F_TRACE_GC)) {
trace_gc(p, am_gc_major_start, need, THE_NON_VALUE);
}
@@ -770,7 +770,7 @@ do_major_collection:
p->flags &= ~(F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC);
DTRACE2(gc_major_end, pidbuf, reclaimed_now);
gc_trace_end_tag = am_gc_major_end;
- ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_GC);
+ ERTS_MSACC_SET_STATE_CACHED_X(ERTS_MSACC_STATE_GC);
}
reset_active_writer(p);
@@ -800,7 +800,7 @@ do_major_collection:
/* We have to make sure that we have space for need on the heap */
res = delay_garbage_collection(p, live_hf_end, need, fcalls);
- ERTS_MSACC_POP_STATE_M();
+ ERTS_MSACC_POP_STATE();
return res;
}
@@ -843,7 +843,7 @@ do_major_collection:
FLAGS(p) &= ~(F_FORCE_GC|F_HIBERNATED);
p->live_hf_end = ERTS_INVALID_HFRAG_PTR;
- ERTS_MSACC_POP_STATE_M();
+ ERTS_MSACC_POP_STATE();
#ifdef CHECK_FOR_HOLES
/*
@@ -1133,9 +1133,28 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
reds = (Sint64) garbage_collect(p, ERTS_INVALID_HFRAG_PTR, 0,
p->arg_reg, p->arity, fcalls,
ygen_usage);
+ if (ERTS_PROC_IS_EXITING(p)) {
+ return 0;
+ }
ASSERT(!(p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)));
+ if (MAX_HEAP_SIZE_GET(p)) {
+ Uint new_heap_size;
+ Uint old_heap_size;
+ Uint total_heap_size;
+
+ new_heap_size = HEAP_END(p) - HEAP_START(p);
+ old_heap_size = erts_next_heap_size(lit_size, 0);
+ total_heap_size = new_heap_size + old_heap_size;
+ if (MAX_HEAP_SIZE_GET(p) < total_heap_size &&
+ reached_max_heap_size(p, total_heap_size,
+ new_heap_size, old_heap_size)) {
+ erts_set_self_exiting(p, am_killed);
+ return 0;
+ }
+ }
+
/*
* Set GC state.
*/
@@ -2440,7 +2459,7 @@ erts_copy_one_frag(Eterm** hpp, ErlOffHeap* off_heap,
*hpp = hp;
for (i = 0; i < nrefs; i++) {
- if (is_not_immed(refs[i]))
+ if (is_not_immed(refs[i]) && !erts_is_literal(refs[i],ptr_val(refs[i])))
refs[i] = offset_ptr(refs[i], offs);
}
bp->off_heap.first = NULL;
diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c
index 6ec6f8065e..ef7a55fa38 100644
--- a/erts/emulator/beam/erl_hl_timer.c
+++ b/erts/emulator/beam/erl_hl_timer.c
@@ -3041,15 +3041,23 @@ erts_set_port_timer(Port *c_prt, Sint64 tmo)
check_canceled_queue(esdp, esdp->timer_service);
- timeout_pos = get_timeout_pos(erts_get_monotonic_time(esdp), tmo);
-
- create_timer = (tmo < ERTS_TIMER_WHEEL_MSEC
- ? create_tw_timer
- : create_hl_timer);
- tmr = (void *) create_timer(esdp, timeout_pos, 0, ERTS_TMR_PORT,
- (void *) c_prt, c_prt->common.id,
- THE_NON_VALUE, NULL, NULL, NULL);
- erts_atomic_set_relb(&c_prt->common.timer, (erts_aint_t) tmr);
+ if (tmo == 0) {
+ erts_atomic_set_relb(&c_prt->common.timer, ERTS_PTMR_TIMEDOUT);
+ erts_port_task_schedule(c_prt->common.id,
+ &c_prt->timeout_task,
+ ERTS_PORT_TASK_TIMEOUT);
+ } else {
+
+ timeout_pos = get_timeout_pos(erts_get_monotonic_time(esdp), tmo);
+
+ create_timer = (tmo < ERTS_TIMER_WHEEL_MSEC
+ ? create_tw_timer
+ : create_hl_timer);
+ tmr = (void *) create_timer(esdp, timeout_pos, 0, ERTS_TMR_PORT,
+ (void *) c_prt, c_prt->common.id,
+ THE_NON_VALUE, NULL, NULL, NULL);
+ erts_atomic_set_relb(&c_prt->common.timer, (erts_aint_t) tmr);
+ }
}
void
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 57c6c10c7f..99e788c718 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -354,6 +354,7 @@ erl_init(int ncpu,
erts_init_bif();
erts_init_bif_chksum();
erts_init_bif_binary();
+ erts_init_bif_persistent_term();
erts_init_bif_re();
erts_init_unicode(); /* after RE to get access to PCRE unicode */
erts_init_external();
@@ -2358,8 +2359,8 @@ system_cleanup(int flush_async)
* The exiting thread might be waiting for
* us to block; need to update status...
*/
- erts_thr_progress_active(NULL, 0);
- erts_thr_progress_prepare_wait(NULL);
+ erts_thr_progress_active(erts_thr_prgr_data(NULL), 0);
+ erts_thr_progress_prepare_wait(erts_thr_prgr_data(NULL));
}
/* Wait forever... */
while (1)
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 463ae898a3..1416c5f96c 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -97,6 +97,8 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "proc_btm", "pid" },
{ "dist_entry", "address" },
{ "dist_entry_links", "address" },
+ { "update_persistent_term_permission", NULL },
+ { "persistent_term_delete_permission", NULL },
{ "code_write_permission", NULL },
{ "purge_state", NULL },
{ "proc_status", "pid" },
diff --git a/erts/emulator/beam/erl_monitor_link.h b/erts/emulator/beam/erl_monitor_link.h
index 9ff8aa509a..ed7bf7d54a 100644
--- a/erts/emulator/beam/erl_monitor_link.h
+++ b/erts/emulator/beam/erl_monitor_link.h
@@ -1387,11 +1387,14 @@ ERTS_GLB_INLINE void
erts_monitor_release(ErtsMonitor *mon)
{
ErtsMonitorData *mdp = erts_monitor_to_data(mon);
- ERTS_ML_ASSERT(!(mon->flags & ERTS_ML_FLG_IN_TABLE));
ERTS_ML_ASSERT(erts_atomic32_read_nob(&mdp->refc) > 0);
- if (erts_atomic32_dec_read_nob(&mdp->refc) == 0)
+ if (erts_atomic32_dec_read_nob(&mdp->refc) == 0) {
+ ERTS_ML_ASSERT(!(mdp->origin.flags & ERTS_ML_FLG_IN_TABLE));
+ ERTS_ML_ASSERT(!(mdp->target.flags & ERTS_ML_FLG_IN_TABLE));
+
erts_monitor_destroy__(mdp);
+ }
}
ERTS_GLB_INLINE void
@@ -1399,12 +1402,14 @@ erts_monitor_release_both(ErtsMonitorData *mdp)
{
ERTS_ML_ASSERT((mdp->origin.flags & ERTS_ML_FLGS_SAME)
== (mdp->target.flags & ERTS_ML_FLGS_SAME));
- ERTS_ML_ASSERT(!(mdp->origin.flags & ERTS_ML_FLG_IN_TABLE));
- ERTS_ML_ASSERT(!(mdp->target.flags & ERTS_ML_FLG_IN_TABLE));
ERTS_ML_ASSERT(erts_atomic32_read_nob(&mdp->refc) >= 2);
- if (erts_atomic32_add_read_nob(&mdp->refc, (erts_aint32_t) -2) == 0)
+ if (erts_atomic32_add_read_nob(&mdp->refc, (erts_aint32_t) -2) == 0) {
+ ERTS_ML_ASSERT(!(mdp->origin.flags & ERTS_ML_FLG_IN_TABLE));
+ ERTS_ML_ASSERT(!(mdp->target.flags & ERTS_ML_FLG_IN_TABLE));
+
erts_monitor_destroy__(mdp);
+ }
}
ERTS_GLB_INLINE int
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index f4dc60941a..18ed782ae3 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -421,8 +421,25 @@ static void schedule_delete_dist_entry(DistEntry* dep)
*
* Note that timeouts do not guarantee thread progress.
*/
- erts_schedule_thr_prgr_later_op(start_timer_delete_dist_entry,
- dep, &dep->later_op);
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ if (esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ erts_schedule_thr_prgr_later_op(start_timer_delete_dist_entry,
+ dep, &dep->later_op);
+ } else {
+ /*
+ * Since OTP 20, it's possible that destructor is executed on
+ * a dirty scheduler. Aux work cannot be done on a dirty
+ * scheduler, and scheduling any aux work on a dirty scheduler
+ * makes the scheduler to loop infinitely.
+ * To avoid this, make a spot jump: schedule this function again
+ * on a first normal scheduler. It is guaranteed to be always
+ * online. Since it's a rare event, this shall not pose a big
+ * utilisation hit.
+ */
+ erts_schedule_misc_aux_work(1,
+ (void (*)(void *))schedule_delete_dist_entry,
+ (void *) dep);
+ }
}
static void
diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h
index 2be0a5bf74..25976d38cc 100644
--- a/erts/emulator/beam/erl_port.h
+++ b/erts/emulator/beam/erl_port.h
@@ -334,6 +334,8 @@ Eterm erts_request_io_bytes(Process *c_p);
#define ERTS_PORT_SFLG_INVALID ((Uint32) (1 << 11))
/* Last port to terminate halts the emulator */
#define ERTS_PORT_SFLG_HALT ((Uint32) (1 << 12))
+/* Check if the event in ready_input should be cleaned */
+#define ERTS_PORT_SFLG_CHECK_FD_CLEANUP ((Uint32) (1 << 13))
#ifdef DEBUG
/* Only debug: make sure all flags aren't cleared unintentionally */
#define ERTS_PORT_SFLG_PORT_DEBUG ((Uint32) (1 << 31))
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 4928d80f27..c8f2e88127 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -97,6 +97,9 @@ static void chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_q
typedef union {
struct { /* I/O tasks */
ErlDrvEvent event;
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ int is_scheduler_event;
+#endif
} io;
struct {
ErtsProc2PortSigCallback callback;
@@ -141,6 +144,9 @@ struct ErtsPortTaskBusyCallerTable_ {
ErtsPortTaskBusyCaller pre_alloc_busy_caller;
};
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+erts_atomic_t erts_port_task_outstanding_io_tasks;
+#endif
static void begin_port_cleanup(Port *pp,
ErtsPortTask **execq,
@@ -578,13 +584,26 @@ reset_handle(ErtsPortTask *ptp)
}
static ERTS_INLINE void
-reset_executed_io_task_handle(ErtsPortTask *ptp)
+reset_executed_io_task_handle(Port *prt, ErtsPortTask *ptp)
{
if (ptp->u.alive.handle) {
ASSERT(ptp == handle2task(ptp->u.alive.handle));
- /* The port task handle is reset inside task_executed */
- erts_io_notify_port_task_executed(ptp->type, ptp->u.alive.handle,
- reset_port_task_handle);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ if (ptp->u.alive.td.io.is_scheduler_event) {
+ if ((erts_atomic32_read_nob(&prt->state) & ERTS_PORT_SFLG_CHECK_FD_CLEANUP)) {
+ erts_io_notify_port_task_executed(ptp->type, ptp->u.alive.handle,
+ reset_port_task_handle);
+ erts_atomic32_read_band_nob(&prt->state, ~ERTS_PORT_SFLG_CHECK_FD_CLEANUP);
+ } else {
+ reset_port_task_handle(ptp->u.alive.handle);
+ }
+ } else
+#endif
+ {
+ /* The port task handle is reset inside task_executed */
+ erts_io_notify_port_task_executed(ptp->type, ptp->u.alive.handle,
+ reset_port_task_handle);
+ }
}
}
@@ -1307,6 +1326,22 @@ erts_port_task_abort(ErtsPortTaskHandle *pthp)
res = - 1; /* Task already aborted, executing, or executed */
else {
reset_port_task_handle(pthp);
+
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ switch (ptp->type) {
+ case ERTS_PORT_TASK_INPUT:
+ case ERTS_PORT_TASK_OUTPUT:
+ if (ptp->u.alive.td.io.is_scheduler_event) {
+ ASSERT(erts_atomic_read_nob(
+ &erts_port_task_outstanding_io_tasks) > 0);
+ erts_atomic_dec_relb(&erts_port_task_outstanding_io_tasks);
+ }
+ break;
+ default:
+ break;
+ }
+#endif
+
res = 0;
}
}
@@ -1442,7 +1477,14 @@ erts_port_task_schedule(Eterm id,
va_list argp;
va_start(argp, type);
ptp->u.alive.td.io.event = va_arg(argp, ErlDrvEvent);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ ptp->u.alive.td.io.is_scheduler_event = va_arg(argp, int);
+#endif
va_end(argp);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ if (ptp->u.alive.td.io.is_scheduler_event)
+ erts_atomic_inc_relb(&erts_port_task_outstanding_io_tasks);
+#endif
break;
}
case ERTS_PORT_TASK_PROC_SIG: {
@@ -1621,12 +1663,14 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
int processing_busy_q;
int vreds = 0;
int reds = 0;
- erts_aint_t io_tasks_executed = 0;
int fpe_was_unmasked;
erts_aint32_t state;
int active;
Uint64 start_time = 0;
ErtsSchedulerData *esdp = runq->scheduler;
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ erts_aint_t io_tasks_executed = 0;
+#endif
ERTS_MSACC_PUSH_STATE_M();
ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq));
@@ -1722,8 +1766,11 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
for input and output */
(*pp->drv_ptr->ready_input)((ErlDrvData) pp->drv_data,
ptp->u.alive.td.io.event);
- reset_executed_io_task_handle(ptp);
- io_tasks_executed++;
+ reset_executed_io_task_handle(pp, ptp);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ if (ptp->u.alive.td.io.is_scheduler_event)
+ io_tasks_executed++;
+#endif
break;
case ERTS_PORT_TASK_OUTPUT:
reds = ERTS_PORT_REDS_OUTPUT;
@@ -1732,8 +1779,11 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
LTTNG_DRIVER(driver_ready_output, pp);
(*pp->drv_ptr->ready_output)((ErlDrvData) pp->drv_data,
ptp->u.alive.td.io.event);
- reset_executed_io_task_handle(ptp);
- io_tasks_executed++;
+ reset_executed_io_task_handle(pp, ptp);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ if (ptp->u.alive.td.io.is_scheduler_event)
+ io_tasks_executed++;
+#endif
break;
case ERTS_PORT_TASK_PROC_SIG: {
ErtsProc2PortSigData *sigdp = &ptp->u.alive.td.psig.data;
@@ -1799,6 +1849,15 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_unblock_fpe(fpe_was_unmasked);
ERTS_MSACC_POP_STATE_M();
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ if (io_tasks_executed) {
+ ASSERT(erts_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
+ >= io_tasks_executed);
+ erts_atomic_add_relb(&erts_port_task_outstanding_io_tasks,
+ -1*io_tasks_executed);
+ }
+#endif
+
ASSERT(runq == erts_get_runq_port(pp));
active = finalize_exec(pp, &execq, processing_busy_q);
@@ -2086,6 +2145,10 @@ erts_dequeue_port(ErtsRunQueue *rq)
void
erts_port_task_init(void)
{
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ erts_atomic_init_nob(&erts_port_task_outstanding_io_tasks,
+ (erts_aint_t) 0);
+#endif
init_port_task_alloc(erts_no_schedulers + erts_no_poll_threads
+ 1); /* aux_thread */
init_busy_caller_table_alloc();
diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h
index ae78a7d8a3..ca5183b305 100644
--- a/erts/emulator/beam/erl_port_task.h
+++ b/erts/emulator/beam/erl_port_task.h
@@ -38,6 +38,8 @@ typedef erts_atomic_t ErtsPortTaskHandle;
#ifndef ERL_PORT_TASK_H__
#define ERL_PORT_TASK_H__
+#include "erl_poll.h"
+
#undef ERTS_INCLUDE_SCHEDULER_INTERNALS
#if (defined(ERL_PROCESS_C__) \
|| defined(ERL_PORT_TASK_C__) \
@@ -54,8 +56,8 @@ typedef erts_atomic_t ErtsPortTaskHandle;
#define ERTS_PT_FLG_BAD_OUTPUT (1 << 4)
typedef enum {
- ERTS_PORT_TASK_INPUT,
- ERTS_PORT_TASK_OUTPUT,
+ ERTS_PORT_TASK_INPUT = 0,
+ ERTS_PORT_TASK_OUTPUT = 1,
ERTS_PORT_TASK_TIMEOUT,
ERTS_PORT_TASK_DIST_CMD,
ERTS_PORT_TASK_PROC_SIG
@@ -134,6 +136,12 @@ ERTS_GLB_INLINE void erts_port_task_sched_unlock(ErtsPortTaskSched *ptsp);
ERTS_GLB_INLINE int erts_port_task_sched_lock_is_locked(ErtsPortTaskSched *ptsp);
ERTS_GLB_INLINE void erts_port_task_sched_enter_exiting_state(ErtsPortTaskSched *ptsp);
+#if defined(ERTS_INCLUDE_SCHEDULER_INTERNALS) && ERTS_POLL_USE_SCHEDULER_POLLING
+ERTS_GLB_INLINE int erts_port_task_have_outstanding_io_tasks(void);
+/* NOTE: Do not access any of the exported variables directly */
+extern erts_atomic_t erts_port_task_outstanding_io_tasks;
+#endif
+
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
@@ -211,6 +219,15 @@ erts_port_task_sched_enter_exiting_state(ErtsPortTaskSched *ptsp)
erts_atomic32_read_bor_nob(&ptsp->flags, ERTS_PTS_FLG_EXITING);
}
+#if defined(ERTS_INCLUDE_SCHEDULER_INTERNALS) && ERTS_POLL_USE_SCHEDULER_POLLING
+ERTS_GLB_INLINE int
+erts_port_task_have_outstanding_io_tasks(void)
+{
+ return (erts_atomic_read_acqb(&erts_port_task_outstanding_io_tasks)
+ != 0);
+}
+#endif
+
#endif
#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 0f7f1598fd..2427d87f66 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -174,7 +174,6 @@ ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE];
typedef struct {
int aux_work;
int tse;
- int sys_schedule;
} ErtsBusyWaitParams;
static ErtsBusyWaitParams sched_busy_wait_params[ERTS_SCHED_TYPE_LAST + 1];
@@ -344,6 +343,9 @@ erts_sched_stat_t erts_sched_stat;
static erts_tsd_key_t ERTS_WRITE_UNLIKELY(sched_data_key);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+static erts_atomic32_t doing_sys_schedule;
+#endif
static erts_atomic32_t no_empty_run_queues;
long erts_runq_supervision_interval = 0;
static ethr_event runq_supervision_event;
@@ -1646,7 +1648,7 @@ haw_thr_prgr_wakeup(ErtsAuxWorkData *awdp, ErtsThrPrgrVal val)
awdp->latest_wakeup = val;
haw_chk_later_cleanup_op_wakeup(awdp, val);
}
- erts_thr_progress_wakeup(awdp->esdp, val);
+ erts_thr_progress_wakeup(erts_thr_prgr_data(awdp->esdp), val);
}
}
@@ -1656,7 +1658,7 @@ haw_thr_prgr_soft_wakeup(ErtsAuxWorkData *awdp, ErtsThrPrgrVal val)
if (erts_thr_progress_cmp(val, awdp->latest_wakeup) > 0) {
awdp->latest_wakeup = val;
haw_chk_later_cleanup_op_wakeup(awdp, val);
- erts_thr_progress_wakeup(awdp->esdp, val);
+ erts_thr_progress_wakeup(erts_thr_prgr_data(awdp->esdp), val);
}
}
@@ -1670,7 +1672,7 @@ haw_thr_prgr_later_cleanup_op_wakeup(ErtsAuxWorkData *awdp, ErtsThrPrgrVal val,
else {
awdp->latest_wakeup = val;
awdp->later_op.size = thr_prgr_later_cleanup_op_threshold;
- erts_thr_progress_wakeup(awdp->esdp, val);
+ erts_thr_progress_wakeup(erts_thr_prgr_data(awdp->esdp), val);
}
}
}
@@ -3066,6 +3068,7 @@ aux_thread(void *unused)
ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(-1);
erts_aint32_t aux_work;
ErtsThrPrgrCallbacks callbacks;
+ ErtsThrPrgrData *tpd;
int thr_prgr_active = 1;
ERTS_MSACC_DECLARE_CACHE();
@@ -3087,12 +3090,16 @@ aux_thread(void *unused)
callbacks.wait = thr_prgr_wait;
callbacks.finalize_wait = thr_prgr_fin_wait;
- erts_thr_progress_register_managed_thread(NULL, &callbacks, 1);
+ tpd = erts_thr_progress_register_managed_thread(NULL, &callbacks, 1);
init_aux_work_data(awdp, NULL, NULL);
awdp->ssi = ssi;
#if ERTS_POLL_USE_FALLBACK
- ssi->psi = erts_create_pollset_thread(-1);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ ssi->psi = erts_create_pollset_thread(-2, tpd);
+#else
+ ssi->psi = erts_create_pollset_thread(-1, tpd);
+#endif
#endif
sched_prep_spin_wait(ssi);
@@ -3105,11 +3112,11 @@ aux_thread(void *unused)
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
if (aux_work) {
if (!thr_prgr_active)
- erts_thr_progress_active(NULL, thr_prgr_active = 1);
+ erts_thr_progress_active(tpd, thr_prgr_active = 1);
aux_work = handle_aux_work(awdp, aux_work, 1);
ERTS_MSACC_UPDATE_CACHE();
- if (aux_work && erts_thr_progress_update(NULL))
- erts_thr_progress_leader_update(NULL);
+ if (aux_work && erts_thr_progress_update(tpd))
+ erts_thr_progress_leader_update(tpd);
}
if (!aux_work) {
@@ -3120,7 +3127,7 @@ aux_thread(void *unused)
#endif
if (thr_prgr_active)
- erts_thr_progress_active(NULL, thr_prgr_active = 0);
+ erts_thr_progress_active(tpd, thr_prgr_active = 0);
#if ERTS_POLL_USE_FALLBACK
@@ -3132,11 +3139,11 @@ aux_thread(void *unused)
if (flgs & ERTS_SSI_FLG_SLEEPING) {
ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- erts_check_io(ssi->psi);
+ erts_check_io(ssi->psi, ERTS_POLL_INF_TIMEOUT);
}
}
#else
- erts_thr_progress_prepare_wait(NULL);
+ erts_thr_progress_prepare_wait(tpd);
flgs = sched_spin_wait(ssi, 0);
@@ -3153,7 +3160,7 @@ aux_thread(void *unused)
ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_OTHER);
}
}
- erts_thr_progress_finalize_wait(NULL);
+ erts_thr_progress_finalize_wait(tpd);
#endif
}
@@ -3171,7 +3178,8 @@ poll_thread(void *arg)
erts_aint32_t aux_work;
ErtsThrPrgrCallbacks callbacks;
int thr_prgr_active = 1;
- struct erts_poll_thread *psi = erts_create_pollset_thread(id);
+ struct erts_poll_thread *psi;
+ ErtsThrPrgrData *tpd;
ERTS_MSACC_DECLARE_CACHE();
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -3192,9 +3200,12 @@ poll_thread(void *arg)
callbacks.wait = thr_prgr_wait;
callbacks.finalize_wait = thr_prgr_fin_wait;
- erts_thr_progress_register_managed_thread(NULL, &callbacks, 0);
+ tpd = erts_thr_progress_register_managed_thread(NULL, &callbacks, 0);
init_aux_work_data(awdp, NULL, NULL);
awdp->ssi = ssi;
+
+ psi = erts_create_pollset_thread(id, tpd);
+
ssi->psi = psi;
sched_prep_spin_wait(ssi);
@@ -3207,16 +3218,16 @@ poll_thread(void *arg)
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
if (aux_work) {
if (!thr_prgr_active)
- erts_thr_progress_active(NULL, thr_prgr_active = 1);
+ erts_thr_progress_active(tpd, thr_prgr_active = 1);
aux_work = handle_aux_work(awdp, aux_work, 1);
ERTS_MSACC_UPDATE_CACHE();
- if (aux_work && erts_thr_progress_update(NULL))
- erts_thr_progress_leader_update(NULL);
+ if (aux_work && erts_thr_progress_update(tpd))
+ erts_thr_progress_leader_update(tpd);
}
if (!aux_work) {
if (thr_prgr_active)
- erts_thr_progress_active(NULL, thr_prgr_active = 0);
+ erts_thr_progress_active(tpd, thr_prgr_active = 0);
flgs = sched_spin_wait(ssi, 0);
@@ -3226,7 +3237,7 @@ poll_thread(void *arg)
if (flgs & ERTS_SSI_FLG_SLEEPING) {
ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- erts_check_io(psi);
+ erts_check_io(psi, ERTS_POLL_INF_TIMEOUT);
}
}
}
@@ -3236,6 +3247,59 @@ poll_thread(void *arg)
return NULL;
}
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+static ERTS_INLINE void
+clear_sys_scheduling(void)
+{
+ erts_atomic32_set_mb(&doing_sys_schedule, 0);
+}
+
+static ERTS_INLINE int
+try_set_sys_scheduling(void)
+{
+ return 0 == erts_atomic32_cmpxchg_acqb(&doing_sys_schedule, 1, 0);
+}
+
+
+static ERTS_INLINE int
+prepare_for_sys_schedule(void)
+{
+ while (!erts_port_task_have_outstanding_io_tasks()
+ && try_set_sys_scheduling()) {
+ if (!erts_port_task_have_outstanding_io_tasks())
+ return 1;
+ clear_sys_scheduling();
+ }
+ return 0;
+}
+
+static void
+check_io_timer(void *null)
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ if (prepare_for_sys_schedule()) {
+ erts_check_io(esdp->ssi->psi, ERTS_POLL_NO_TIMEOUT);
+ clear_sys_scheduling();
+ }
+
+ /* The timer is cleared if this schedulers run-queue became empty
+ or if the CHECKIO flag was cleared. The CHECKIO flags is cleared
+ when a check_balance assigns another scheduler to be the poller in
+ the overload scenario. */
+ if ((ERTS_RUNQ_FLGS_GET_NOB(esdp->run_queue) & (ERTS_RUNQ_FLG_OUT_OF_WORK|ERTS_RUNQ_FLG_CHECKIO))
+ == ERTS_RUNQ_FLG_CHECKIO) {
+ erts_start_timer_callback(ERTS_POLL_SCHEDULER_POLLING_TIMEOUT,
+ check_io_timer, NULL);
+ } else {
+ ERTS_RUNQ_FLGS_UNSET(esdp->run_queue, ERTS_RUNQ_FLG_CHECKIO);
+ }
+}
+
+#else
+#define clear_sys_scheduling()
+#define prepare_for_sys_schedule() 0
+#endif
+
static void
scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
{
@@ -3286,13 +3350,13 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1);
ERTS_MSACC_UPDATE_CACHE();
- if (aux_work && erts_thr_progress_update(esdp))
- erts_thr_progress_leader_update(esdp);
+ if (aux_work && erts_thr_progress_update(erts_thr_prgr_data(esdp)))
+ erts_thr_progress_leader_update(erts_thr_prgr_data(esdp));
}
if (aux_work) {
@@ -3301,7 +3365,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
current_time = erts_get_monotonic_time(esdp);
if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) {
if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
erts_bump_timers(esdp->timer_wheel, current_time);
@@ -3321,19 +3385,36 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
}
if (do_timeout) {
if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
}
- else {
+ else if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && prepare_for_sys_schedule()) {
+ /* We sleep in check_io, only for normal schedulers */
+ if (thr_prgr_active) {
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 0);
+ sched_wall_time_change(esdp, 0);
+ }
+ flgs = sched_spin_wait(ssi, 0);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ erts_check_io(ssi->psi, timeout_time);
+ current_time = erts_get_monotonic_time(esdp);
+ }
+ }
+ clear_sys_scheduling();
+ } else {
if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
if (thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 0);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 0);
sched_wall_time_change(esdp, 0);
}
- erts_thr_progress_prepare_wait(esdp);
+ erts_thr_progress_prepare_wait(erts_thr_prgr_data(esdp));
}
-
flgs = sched_spin_wait(ssi, spincount);
if (flgs & ERTS_SSI_FLG_SLEEPING) {
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
@@ -3363,7 +3444,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
}
}
if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
- erts_thr_progress_finalize_wait(esdp);
+ erts_thr_progress_finalize_wait(erts_thr_prgr_data(esdp));
}
if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && current_time >= timeout_time)
erts_bump_timers(esdp->timer_wheel, current_time);
@@ -3392,7 +3473,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
if (ERTS_SCHEDULER_IS_DIRTY(esdp))
dirty_sched_wall_time_change(esdp, working = 1);
else if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
@@ -4580,6 +4661,15 @@ check_balance(ErtsRunQueue *c_rq)
if (blnc_no_rqs == 1) {
c_rq->check_balance_reds = INT_MAX;
erts_atomic32_set_nob(&balance_info.checking_balance, 0);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ c_rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS;
+ if ((ERTS_RUNQ_FLGS_GET_NOB(c_rq) & (ERTS_RUNQ_FLG_OUT_OF_WORK|ERTS_RUNQ_FLG_CHECKIO))
+ == 0) {
+ ERTS_RUNQ_FLGS_SET(c_rq, ERTS_RUNQ_FLG_CHECKIO);
+ erts_start_timer_callback(ERTS_POLL_SCHEDULER_POLLING_TIMEOUT, check_io_timer, NULL);
+ }
+ ERTS_RUNQ_FLGS_UNSET(c_rq, ERTS_RUNQ_FLGS_MIGRATION_INFO);
+#endif
return;
}
@@ -5099,6 +5189,19 @@ erts_fprintf(stderr, "--------------------------------\n");
/* Publish new migration paths... */
erts_atomic_set_wb(&erts_migration_paths, (erts_aint_t) new_mpaths);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ if (full_scheds == current_active) {
+ ERTS_ASSERT(full_scheds <= current_active);
+ /* All active schedulers ran for full, we need to do active polling,
+ so we setup a timer that does active polling */
+ if (!(ERTS_RUNQ_FLGS_GET_NOB(c_rq) & ERTS_RUNQ_FLG_CHECKIO)) {
+ /* Active polling is not running, start it */
+ erts_start_timer_callback(ERTS_POLL_SCHEDULER_POLLING_TIMEOUT, check_io_timer, NULL);
+ }
+ run_queue_info[c_rq->ix].flags |= ERTS_RUNQ_FLG_CHECKIO;
+ }
+#endif
+
/* Reset balance statistics in all online queues */
for (qix = 0; qix < blnc_no_rqs; qix++) {
Uint32 flags = run_queue_info[qix].flags;
@@ -5108,6 +5211,8 @@ erts_fprintf(stderr, "--------------------------------\n");
ASSERT(!(flags & ERTS_RUNQ_FLG_OUT_OF_WORK));
if (rq->waiting)
flags |= ERTS_RUNQ_FLG_OUT_OF_WORK;
+ if (rq != c_rq)
+ flags &= ~ERTS_RUNQ_FLG_CHECKIO;
rq->full_reds_history_sum
= run_queue_info[qix].full_reds_history_sum;
@@ -5117,8 +5222,7 @@ erts_fprintf(stderr, "--------------------------------\n");
ERTS_DBG_CHK_FULL_REDS_HISTORY(rq);
rq->out_of_work_count = 0;
- (void) ERTS_RUNQ_FLGS_READ_BSET(rq, ERTS_RUNQ_FLGS_MIGRATION_INFO, flags);
-
+ (void) ERTS_RUNQ_FLGS_READ_BSET(rq, ERTS_RUNQ_FLGS_MIGRATION_INFO|ERTS_RUNQ_FLG_CHECKIO, flags);
rq->max_len = erts_atomic32_read_dirty(&rq->len);
for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
ErtsRunQueueInfo *rqi;
@@ -5557,7 +5661,6 @@ erts_sched_set_busy_wait_threshold(ErtsSchedType sched_type, char *str)
return EINVAL;
}
- params->sys_schedule = sys_sched;
params->tse = sys_sched * ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT;
params->aux_work = sys_sched * aux_work_fact;
@@ -5768,6 +5871,9 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online, int no_poll_th
size_runqs = sizeof(ErtsAlignedRunQueue) * tot_rqs;
erts_aligned_run_queues =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_RUNQS, size_runqs);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ erts_atomic32_init_nob(&doing_sys_schedule, 0);
+#endif
erts_atomic32_init_nob(&no_empty_run_queues, 0);
erts_no_run_queues = n;
@@ -7565,7 +7671,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
if (aux_work|evacuate) {
if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp),
+ thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
if (aux_work)
@@ -7573,8 +7680,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
aux_work,
1);
- if (aux_work && erts_thr_progress_update(esdp))
- erts_thr_progress_leader_update(esdp);
+ if (aux_work && erts_thr_progress_update(erts_thr_prgr_data(esdp)))
+ erts_thr_progress_leader_update(erts_thr_prgr_data(esdp));
if (evacuate) {
erts_runq_lock(esdp->run_queue);
evacuate_run_queue(esdp->run_queue, &sbp);
@@ -7593,18 +7700,18 @@ suspend_scheduler(ErtsSchedulerData *esdp)
if (!aux_work && current_time < timeout_time) {
/* go to sleep... */
if (thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 0);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 0);
sched_wall_time_change(esdp, 0);
}
- erts_thr_progress_prepare_wait(NULL);
+ erts_thr_progress_prepare_wait(erts_thr_prgr_data(NULL));
suspend_normal_scheduler_sleep(esdp);
- erts_thr_progress_finalize_wait(NULL);
+ erts_thr_progress_finalize_wait(erts_thr_prgr_data(NULL));
current_time = erts_get_monotonic_time(esdp);
}
if (current_time >= timeout_time) {
if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
erts_bump_timers(esdp->timer_wheel, current_time);
@@ -7661,7 +7768,7 @@ suspend_scheduler(ErtsSchedulerData *esdp)
profile_scheduler(make_small(esdp->no), am_active);
if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
}
@@ -8296,6 +8403,11 @@ sched_thread_func(void *vesdp)
erts_msacc_init_thread("scheduler", no, 1);
erts_thr_progress_register_managed_thread(esdp, &callbacks, 0);
+
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ esdp->ssi->psi = erts_create_pollset_thread(-1, NULL);
+#endif
+
erts_alloc_register_scheduler(vesdp);
#ifdef ERTS_ENABLE_LOCK_CHECK
{
@@ -9313,12 +9425,12 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
}
}
- leader_update = erts_thr_progress_update(esdp);
+ leader_update = erts_thr_progress_update(erts_thr_prgr_data(esdp));
aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work);
if (aux_work | leader_update) {
erts_runq_unlock(rq);
if (leader_update)
- erts_thr_progress_leader_update(esdp);
+ erts_thr_progress_leader_update(erts_thr_prgr_data(esdp));
if (aux_work)
handle_aux_work(&esdp->aux_work_data, aux_work, 0);
erts_runq_lock(rq);
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 8d20ccdf90..a1b029adbe 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -173,8 +173,10 @@ extern int erts_dio_sched_thread_suggested_stack_size;
(((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 9))
#define ERTS_RUNQ_FLG_HALTING \
(((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 10))
+#define ERTS_RUNQ_FLG_CHECKIO \
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 11))
-#define ERTS_RUNQ_FLG_MAX (ERTS_RUNQ_FLG_BASE2 + 11)
+#define ERTS_RUNQ_FLG_MAX (ERTS_RUNQ_FLG_BASE2 + 12)
#define ERTS_RUNQ_FLGS_MIGRATION_QMASKS \
(ERTS_RUNQ_FLGS_EMIGRATE_QMASK \
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
index 243db4c734..ac5054ea10 100644
--- a/erts/emulator/beam/erl_process_dump.c
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -58,6 +58,7 @@ static void dump_externally(fmtfn_t to, void *to_arg, Eterm term);
static void mark_literal(Eterm* ptr);
static void init_literal_areas(void);
static void dump_literals(fmtfn_t to, void *to_arg);
+static void dump_persistent_terms(fmtfn_t to, void *to_arg);
static void dump_module_literals(fmtfn_t to, void *to_arg,
ErtsLiteralArea* lit_area);
@@ -74,6 +75,7 @@ erts_deep_process_dump(fmtfn_t to, void *to_arg)
all_binaries = NULL;
init_literal_areas();
+ erts_init_persistent_dumping();
for (i = 0; i < max; i++) {
Process *p = erts_pix2proc(i);
@@ -93,6 +95,7 @@ erts_deep_process_dump(fmtfn_t to, void *to_arg)
}
}
+ dump_persistent_terms(to, to_arg);
dump_literals(to, to_arg);
dump_binaries(to, to_arg, all_binaries);
}
@@ -775,6 +778,9 @@ init_literal_areas(void)
qsort(lit_areas, num_lit_areas, sizeof(ErtsLiteralArea *),
compare_areas);
+ qsort(erts_persistent_areas, erts_num_persistent_areas,
+ sizeof(ErtsLiteralArea *), compare_areas);
+
erts_runlock_old_code(code_ix);
}
@@ -796,6 +802,13 @@ static void mark_literal(Eterm* ptr)
ap = bsearch(ptr, lit_areas, num_lit_areas, sizeof(ErtsLiteralArea*),
search_areas);
+ if (ap == 0) {
+ ap = bsearch(ptr, erts_persistent_areas,
+ erts_num_persistent_areas,
+ sizeof(ErtsLiteralArea*),
+ search_areas);
+ }
+
/*
* If the literal was created by native code, this search will not
@@ -807,12 +820,12 @@ static void mark_literal(Eterm* ptr)
}
}
-
static void
dump_literals(fmtfn_t to, void *to_arg)
{
ErtsCodeIndex code_ix;
int i;
+ Uint idx;
code_ix = erts_active_code_ix();
erts_rlock_old_code(code_ix);
@@ -825,6 +838,28 @@ dump_literals(fmtfn_t to, void *to_arg)
}
erts_runlock_old_code(code_ix);
+
+ for (idx = 0; idx < erts_num_persistent_areas; idx++) {
+ dump_module_literals(to, to_arg, erts_persistent_areas[idx]);
+ }
+}
+
+static void
+dump_persistent_terms(fmtfn_t to, void *to_arg)
+{
+ Uint idx;
+
+ erts_print(to, to_arg, "=persistent_terms\n");
+
+ for (idx = 0; idx < erts_num_persistent_areas; idx++) {
+ ErtsLiteralArea* ap = erts_persistent_areas[idx];
+ Eterm tuple = make_tuple(ap->start);
+ Eterm* tup_val = tuple_val(tuple);
+
+ dump_element(to, to_arg, tup_val[1]);
+ erts_putc(to, to_arg, '|');
+ dump_element_nl(to, to_arg, tup_val[2]);
+ }
}
static void
@@ -963,7 +998,8 @@ dump_module_literals(fmtfn_t to, void *to_arg, ErtsLiteralArea* lit_area)
}
erts_putc(to, to_arg, '\n');
}
- } else if (is_export_header(w)) {
+ } else {
+ /* Dump everything else in the external format */
dump_externally(to, to_arg, term);
erts_putc(to, to_arg, '\n');
}
diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.h b/erts/emulator/beam/erl_sched_spec_pre_alloc.h
index b119c59ab3..74cc966cbe 100644
--- a/erts/emulator/beam/erl_sched_spec_pre_alloc.h
+++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.h
@@ -188,6 +188,7 @@ erts_sspa_alloc(erts_sspa_data_t *data, int cix)
erts_sspa_chunk_t *chnk;
erts_sspa_chunk_header_t *chdr;
erts_sspa_blk_t *res;
+ ERTS_MSACC_PUSH_AND_SET_STATE_M_X(ERTS_MSACC_STATE_ALLOC);
chnk = erts_sspa_cix2chunk(data, cix);
chdr = &chnk->aligned.header;
@@ -201,11 +202,15 @@ erts_sspa_alloc(erts_sspa_data_t *data, int cix)
chdr->local.last = NULL;
ERTS_SSPA_DBG_CHK_LCL(chdr);
}
- if (chdr->local.cnt <= chdr->local.lim)
- return (char *) erts_sspa_process_remote_frees(chdr, res);
+ if (chdr->local.cnt <= chdr->local.lim) {
+ res = erts_sspa_process_remote_frees(chdr, res);
+ ERTS_MSACC_POP_STATE_M_X();
+ return (char*) res;
+ }
else if (chdr->head.no_thr_progress_check < ERTS_SSPA_FORCE_THR_CHECK_PROGRESS)
chdr->head.no_thr_progress_check++;
ASSERT(res);
+ ERTS_MSACC_POP_STATE_M_X();
return (char *) res;
}
diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c
index aa08eb40ec..bac437efe9 100644
--- a/erts/emulator/beam/erl_thr_progress.c
+++ b/erts/emulator/beam/erl_thr_progress.c
@@ -508,6 +508,10 @@ init_wakeup_request_array(ErtsThrPrgrVal *w)
}
}
+ErtsThrPrgrData *erts_thr_progress_data(void) {
+ return erts_tsd_get(erts_thr_prgr_data_key__);
+}
+
void
erts_thr_progress_register_unmanaged_thread(ErtsThrPrgrCallbacks *callbacks)
{
@@ -551,7 +555,7 @@ erts_thr_progress_register_unmanaged_thread(ErtsThrPrgrCallbacks *callbacks)
}
-void
+ErtsThrPrgrData *
erts_thr_progress_register_managed_thread(ErtsSchedulerData *esdp,
ErtsThrPrgrCallbacks *callbacks,
int pref_wakeup)
@@ -630,6 +634,7 @@ erts_thr_progress_register_managed_thread(ErtsSchedulerData *esdp,
wakeup_managed(id);
}
callbacks->finalize_wait(callbacks->arg);
+ return tpd;
}
static ERTS_INLINE int
@@ -796,7 +801,7 @@ leader_update(ErtsThrPrgrData *tpd)
== ERTS_THR_PRGR_LFLG_NO_LEADER))
&& got_sched_wakeups()) {
/* Someone need to make progress */
- wakeup_managed(0);
+ wakeup_managed(tpd->id);
}
}
}
@@ -849,23 +854,22 @@ update(ErtsThrPrgrData *tpd)
}
int
-erts_thr_progress_update(ErtsSchedulerData *esdp)
+erts_thr_progress_update(ErtsThrPrgrData *tpd)
{
- return update(thr_prgr_data(esdp));
+ return update(tpd);
}
int
-erts_thr_progress_leader_update(ErtsSchedulerData *esdp)
+erts_thr_progress_leader_update(ErtsThrPrgrData *tpd)
{
- return leader_update(thr_prgr_data(esdp));
+ return leader_update(tpd);
}
void
-erts_thr_progress_prepare_wait(ErtsSchedulerData *esdp)
+erts_thr_progress_prepare_wait(ErtsThrPrgrData *tpd)
{
erts_aint32_t lflgs;
- ErtsThrPrgrData *tpd = thr_prgr_data(esdp);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_check_exact(NULL, 0);
@@ -884,14 +888,13 @@ erts_thr_progress_prepare_wait(ErtsSchedulerData *esdp)
== ERTS_THR_PRGR_LFLG_NO_LEADER
&& got_sched_wakeups()) {
/* Someone need to make progress */
- wakeup_managed(0);
+ wakeup_managed(tpd->id);
}
}
void
-erts_thr_progress_finalize_wait(ErtsSchedulerData *esdp)
+erts_thr_progress_finalize_wait(ErtsThrPrgrData *tpd)
{
- ErtsThrPrgrData *tpd = thr_prgr_data(esdp);
ErtsThrPrgrVal current, val;
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -921,9 +924,8 @@ erts_thr_progress_finalize_wait(ErtsSchedulerData *esdp)
}
void
-erts_thr_progress_active(ErtsSchedulerData *esdp, int on)
+erts_thr_progress_active(ErtsThrPrgrData *tpd, int on)
{
- ErtsThrPrgrData *tpd = thr_prgr_data(esdp);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_check_exact(NULL, 0);
@@ -973,7 +975,7 @@ unmanaged_continue(ErtsThrPrgrDelayHandle handle)
== (ERTS_THR_PRGR_LFLG_NO_LEADER|ERTS_THR_PRGR_LFLG_WAITING_UM)
&& got_sched_wakeups()) {
/* Others waiting for us... */
- wakeup_managed(0);
+ wakeup_managed(1);
}
}
}
@@ -1182,10 +1184,10 @@ request_wakeup_unmanaged(ErtsThrPrgrData *tpd, ErtsThrPrgrVal value)
}
void
-erts_thr_progress_wakeup(ErtsSchedulerData *esdp,
+erts_thr_progress_wakeup(ErtsThrPrgrData *tpd,
ErtsThrPrgrVal value)
{
- ErtsThrPrgrData *tpd = thr_prgr_data(esdp);
+
ASSERT(!tpd->is_temporary);
if (tpd->is_managed)
request_wakeup_managed(tpd, value);
diff --git a/erts/emulator/beam/erl_thr_progress.h b/erts/emulator/beam/erl_thr_progress.h
index 8329995b24..00a9e61407 100644
--- a/erts/emulator/beam/erl_thr_progress.h
+++ b/erts/emulator/beam/erl_thr_progress.h
@@ -123,22 +123,24 @@ extern ErtsThrPrgr erts_thr_prgr__;
void erts_thr_progress_pre_init(void);
void erts_thr_progress_init(int no_schedulers, int managed, int unmanaged);
-void erts_thr_progress_register_managed_thread(ErtsSchedulerData *esdp,
- ErtsThrPrgrCallbacks *,
- int);
+ErtsThrPrgrData *erts_thr_progress_register_managed_thread(
+ ErtsSchedulerData *esdp, ErtsThrPrgrCallbacks *, int);
void erts_thr_progress_register_unmanaged_thread(ErtsThrPrgrCallbacks *);
-void erts_thr_progress_active(ErtsSchedulerData *esdp, int on);
-void erts_thr_progress_wakeup(ErtsSchedulerData *esdp,
+void erts_thr_progress_active(ErtsThrPrgrData *, int on);
+void erts_thr_progress_wakeup(ErtsThrPrgrData *,
ErtsThrPrgrVal value);
-int erts_thr_progress_update(ErtsSchedulerData *esdp);
-int erts_thr_progress_leader_update(ErtsSchedulerData *esdp);
-void erts_thr_progress_prepare_wait(ErtsSchedulerData *esdp);
-void erts_thr_progress_finalize_wait(ErtsSchedulerData *esdp);
+int erts_thr_progress_update(ErtsThrPrgrData *);
+int erts_thr_progress_leader_update(ErtsThrPrgrData *);
+void erts_thr_progress_prepare_wait(ErtsThrPrgrData *);
+void erts_thr_progress_finalize_wait(ErtsThrPrgrData *);
ErtsThrPrgrDelayHandle erts_thr_progress_unmanaged_delay__(void);
void erts_thr_progress_unmanaged_continue__(int umrefc_ix);
+ErtsThrPrgrData *erts_thr_progress_data(void);
void erts_thr_progress_dbg_print_state(void);
+ERTS_GLB_INLINE ErtsThrPrgrData *erts_thr_prgr_data(ErtsSchedulerData *esdp);
+
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_nob__(ERTS_THR_PRGR_ATOMIC *atmc);
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_acqb__(ERTS_THR_PRGR_ATOMIC *atmc);
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_mb__(ERTS_THR_PRGR_ATOMIC *atmc);
@@ -161,6 +163,15 @@ ERTS_GLB_INLINE int erts_thr_progress_has_reached(ErtsThrPrgrVal val);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE ErtsThrPrgrData *
+erts_thr_prgr_data(ErtsSchedulerData *esdp) {
+ if (esdp) {
+ return &esdp->thr_progress_data;
+ } else {
+ return erts_thr_progress_data();
+ }
+}
+
ERTS_GLB_INLINE ErtsThrPrgrVal
erts_thr_prgr_read_nob__(ERTS_THR_PRGR_ATOMIC *atmc)
{
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 53a020e7a5..2350d4c02f 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -2177,6 +2177,7 @@ sys_msg_dispatcher_func(void *unused)
{
ErtsThrPrgrCallbacks callbacks;
ErtsSysMsgQ *local_sys_message_queue = NULL;
+ ErtsThrPrgrData *tpd;
int wait = 0;
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -2189,7 +2190,7 @@ sys_msg_dispatcher_func(void *unused)
callbacks.wait = sys_msg_dispatcher_wait;
callbacks.finalize_wait = sys_msg_dispatcher_fin_wait;
- erts_thr_progress_register_managed_thread(NULL, &callbacks, 0);
+ tpd = erts_thr_progress_register_managed_thread(NULL, &callbacks, 0);
while (1) {
int end_wait = 0;
@@ -2210,8 +2211,8 @@ sys_msg_dispatcher_func(void *unused)
if (!sys_message_queue) {
erts_mtx_unlock(&smq_mtx);
end_wait = 1;
- erts_thr_progress_active(NULL, 0);
- erts_thr_progress_prepare_wait(NULL);
+ erts_thr_progress_active(tpd, 0);
+ erts_thr_progress_prepare_wait(tpd);
erts_mtx_lock(&smq_mtx);
}
@@ -2225,8 +2226,8 @@ sys_msg_dispatcher_func(void *unused)
erts_mtx_unlock(&smq_mtx);
if (end_wait) {
- erts_thr_progress_finalize_wait(NULL);
- erts_thr_progress_active(NULL, 1);
+ erts_thr_progress_finalize_wait(tpd);
+ erts_thr_progress_active(tpd, 1);
}
/* Send trace messages ... */
@@ -2239,8 +2240,8 @@ sys_msg_dispatcher_func(void *unused)
Process *proc = NULL;
Port *port = NULL;
- if (erts_thr_progress_update(NULL))
- erts_thr_progress_leader_update(NULL);
+ if (erts_thr_progress_update(tpd))
+ erts_thr_progress_leader_update(tpd);
#ifdef DEBUG_PRINTOUTS
print_msg_type(smqp);
diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h
index b3bfa69052..880febba8b 100644
--- a/erts/emulator/beam/erl_utils.h
+++ b/erts/emulator/beam/erl_utils.h
@@ -22,6 +22,7 @@
#define ERL_UTILS_H__
#include "sys.h"
+#include "atom.h"
#include "erl_printf.h"
struct process;
@@ -112,10 +113,12 @@ int eq(Eterm, Eterm);
#define EQ(x,y) (((x) == (y)) || (is_not_both_immed((x),(y)) && eq((x),(y))))
-int erts_cmp_atoms(Eterm a, Eterm b);
-Sint erts_cmp(Eterm, Eterm, int, int);
-Sint erts_cmp_compound(Eterm, Eterm, int, int);
+ERTS_GLB_INLINE Sint erts_cmp(Eterm, Eterm, int, int);
+ERTS_GLB_INLINE int erts_cmp_atoms(Eterm a, Eterm b);
+
Sint cmp(Eterm a, Eterm b);
+Sint erts_cmp_compound(Eterm, Eterm, int, int);
+
#define CMP(A,B) erts_cmp(A,B,0,0)
#define CMP_TERM(A,B) erts_cmp(A,B,1,0)
#define CMP_EQ_ONLY(A,B) erts_cmp(A,B,0,1)
@@ -150,4 +153,56 @@ Sint cmp(Eterm a, Eterm b);
if (erts_cmp_compound(X,Y,0,EqOnly) Op 0) { Action; }; \
}
+#define erts_float_comp(x,y) (((x)<(y)) ? -1 : (((x)==(y)) ? 0 : 1))
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE int erts_cmp_atoms(Eterm a, Eterm b) {
+ Atom *aa = atom_tab(atom_val(a));
+ Atom *bb = atom_tab(atom_val(b));
+
+ byte *name_a, *name_b;
+ int len_a, len_b, diff;
+
+ diff = aa->ord0 - bb->ord0;
+
+ if (diff != 0) {
+ return diff;
+ }
+
+ name_a = &aa->name[3];
+ name_b = &bb->name[3];
+ len_a = aa->len-3;
+ len_b = bb->len-3;
+
+ if (len_a > 0 && len_b > 0) {
+ diff = sys_memcmp(name_a, name_b, MIN(len_a, len_b));
+
+ if (diff != 0) {
+ return diff;
+ }
+ }
+
+ return len_a - len_b;
+}
+
+ERTS_GLB_INLINE Sint erts_cmp(Eterm a, Eterm b, int exact, int eq_only) {
+ if (is_atom(a) && is_atom(b)) {
+ return erts_cmp_atoms(a, b);
+ } else if (is_both_small(a, b)) {
+ return (signed_val(a) - signed_val(b));
+ } else if (is_float(a) && is_float(b)) {
+ FloatDef af, bf;
+
+ GET_DOUBLE(a, af);
+ GET_DOUBLE(b, bf);
+
+ return erts_float_comp(af.fd, bf.fd);
+ }
+
+ return erts_cmp_compound(a,b,exact,eq_only);
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
#endif
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index 621ba108ba..9a66e491f3 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -1953,7 +1953,8 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
#define RETURN_STATE() \
do { \
- hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE+3); \
+ static const int TUPLE2_SIZE = 2 + 1; \
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE + TUPLE2_SIZE); \
c_term = erts_mk_magic_ref(&hp, &MSO(p), context_b); \
res = TUPLE2(hp, Term, c_term); \
BUMP_ALL_REDS(p); \
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 21ae205237..0631404599 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -906,6 +906,8 @@ typedef struct ErtsLiteralArea_ {
Eterm start[1]; /* beginning of area */
} ErtsLiteralArea;
+void erts_queue_release_literals(Process *c_p, ErtsLiteralArea* literals);
+
#define ERTS_LITERAL_AREA_ALLOC_SIZE(N) \
(sizeof(ErtsLiteralArea) + sizeof(Eterm)*((N) - 1))
@@ -1001,6 +1003,7 @@ typedef struct {
Uint literal_size;
Eterm *lit_purge_ptr;
Uint lit_purge_sz;
+ int copy_literals;
} erts_shcopy_t;
#define INITIALIZE_SHCOPY(info) \
@@ -1010,6 +1013,7 @@ typedef struct {
info.bitstore_start = info.bitstore_default; \
info.shtable_start = info.shtable_default; \
info.literal_size = 0; \
+ info.copy_literals = 0; \
if (larea__) { \
info.lit_purge_ptr = &larea__->start[0]; \
info.lit_purge_sz = larea__->end - info.lit_purge_ptr; \
@@ -1238,6 +1242,13 @@ Sint erts_re_set_loop_limit(Sint limit);
void erts_init_bif_binary(void);
Sint erts_binary_set_loop_limit(Sint limit);
+/* erl_bif_persistent.c */
+void erts_init_bif_persistent_term(void);
+Uint erts_persistent_term_count(void);
+void erts_init_persistent_dumping(void);
+extern ErtsLiteralArea** erts_persistent_areas;
+extern Uint erts_num_persistent_areas;
+
/* external.c */
void erts_init_external(void);
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index bb22548587..869a575cb4 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -325,6 +325,7 @@ typedef long Sint erts_align_attribute(sizeof(long));
#define UWORD_CONSTANT(Const) Const##UL
#define ERTS_UWORD_MAX ULONG_MAX
#define ERTS_SWORD_MAX LONG_MAX
+#define ERTS_SWORD_MIN LONG_MIN
#define ERTS_SIZEOF_ETERM SIZEOF_LONG
#define ErtsStrToSint strtol
#elif SIZEOF_VOID_P == SIZEOF_INT
@@ -335,6 +336,7 @@ typedef int Sint erts_align_attribute(sizeof(int));
#define UWORD_CONSTANT(Const) Const##U
#define ERTS_UWORD_MAX UINT_MAX
#define ERTS_SWORD_MAX INT_MAX
+#define ERTS_SWORD_MIN INT_MIN
#define ERTS_SIZEOF_ETERM SIZEOF_INT
#define ErtsStrToSint strtol
#elif SIZEOF_VOID_P == SIZEOF_LONG_LONG
@@ -345,6 +347,7 @@ typedef long long Sint erts_align_attribute(sizeof(long long));
#define UWORD_CONSTANT(Const) Const##ULL
#define ERTS_UWORD_MAX ULLONG_MAX
#define ERTS_SWORD_MAX LLONG_MAX
+#define ERTS_SWORD_MIN LLONG_MIN
#define ERTS_SIZEOF_ETERM SIZEOF_LONG_LONG
#if defined(__WIN32__)
#define ErtsStrToSint _strtoi64
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 08f8ca9788..d81bd89a48 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -2615,27 +2615,6 @@ not_equal:
}
-/*
- * Lexically compare two strings of bytes (string s1 length l1 and s2 l2).
- *
- * s1 < s2 return -1
- * s1 = s2 return 0
- * s1 > s2 return +1
- */
-static int cmpbytes(byte *s1, int l1, byte *s2, int l2)
-{
- int i;
- i = 0;
- while((i < l1) && (i < l2)) {
- if (s1[i] < s2[i]) return(-1);
- if (s1[i] > s2[i]) return(1);
- i++;
- }
- if (l1 < l2) return(-1);
- if (l1 > l2) return(1);
- return(0);
-}
-
/*
* Compare objects.
@@ -2649,20 +2628,6 @@ static int cmpbytes(byte *s1, int l1, byte *s2, int l2)
*
*/
-
-#define float_comp(x,y) (((x)<(y)) ? -1 : (((x)==(y)) ? 0 : 1))
-
-int erts_cmp_atoms(Eterm a, Eterm b)
-{
- Atom *aa = atom_tab(atom_val(a));
- Atom *bb = atom_tab(atom_val(b));
- int diff = aa->ord0 - bb->ord0;
- if (diff)
- return diff;
- return cmpbytes(aa->name+3, aa->len-3,
- bb->name+3, bb->len-3);
-}
-
/* cmp(Eterm a, Eterm b)
* For compatibility with HiPE - arith-based compare.
*/
@@ -2673,22 +2638,6 @@ Sint cmp(Eterm a, Eterm b)
Sint erts_cmp_compound(Eterm a, Eterm b, int exact, int eq_only);
-Sint erts_cmp(Eterm a, Eterm b, int exact, int eq_only)
-{
- if (is_atom(a) && is_atom(b)) {
- return erts_cmp_atoms(a, b);
- } else if (is_both_small(a, b)) {
- return (signed_val(a) - signed_val(b));
- } else if (is_float(a) && is_float(b)) {
- FloatDef af, bf;
- GET_DOUBLE(a, af);
- GET_DOUBLE(b, bf);
- return float_comp(af.fd, bf.fd);
- }
- return erts_cmp_compound(a,b,exact,eq_only);
-}
-
-
/* erts_cmp(Eterm a, Eterm b, int exact)
* exact = 1 -> term-based compare
* exact = 0 -> arith-based compare
@@ -2985,7 +2934,7 @@ tailrecur_ne:
GET_DOUBLE(a, af);
GET_DOUBLE(b, bf);
- ON_CMP_GOTO(float_comp(af.fd, bf.fd));
+ ON_CMP_GOTO(erts_float_comp(af.fd, bf.fd));
}
case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
@@ -3022,10 +2971,7 @@ tailrecur_ne:
ErlFunThing* f2 = (ErlFunThing *) fun_val(b);
Sint diff;
- diff = cmpbytes(atom_tab(atom_val(f1->fe->module))->name,
- atom_tab(atom_val(f1->fe->module))->len,
- atom_tab(atom_val(f2->fe->module))->name,
- atom_tab(atom_val(f2->fe->module))->len);
+ diff = erts_cmp_atoms((f1->fe)->module, (f2->fe)->module);
if (diff != 0) {
RETURN_NEQ(diff);
}
@@ -3219,7 +3165,7 @@ tailrecur_ne:
if (f2.fd < MAX_LOSSLESS_FLOAT && f2.fd > MIN_LOSSLESS_FLOAT) {
/* Float is within the no loss limit */
f1.fd = signed_val(aw);
- j = float_comp(f1.fd, f2.fd);
+ j = erts_float_comp(f1.fd, f2.fd);
}
#if ERTS_SIZEOF_ETERM == 8
else if (f2.fd > (double) (MAX_SMALL + 1)) {
@@ -3266,7 +3212,7 @@ tailrecur_ne:
if (big_to_double(aw, &f1.fd) < 0) {
j = big_sign(aw) ? -1 : 1;
} else {
- j = float_comp(f1.fd, f2.fd);
+ j = erts_float_comp(f1.fd, f2.fd);
}
} else {
big = double_to_big(f2.fd, big_buf, sizeof(big_buf)/sizeof(Eterm));
@@ -3282,7 +3228,7 @@ tailrecur_ne:
if (f1.fd < MAX_LOSSLESS_FLOAT && f1.fd > MIN_LOSSLESS_FLOAT) {
/* Float is within the no loss limit */
f2.fd = signed_val(bw);
- j = float_comp(f1.fd, f2.fd);
+ j = erts_float_comp(f1.fd, f2.fd);
}
#if ERTS_SIZEOF_ETERM == 8
else if (f1.fd > (double) (MAX_SMALL + 1)) {