aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/beam_bif_load.c72
-rw-r--r--erts/emulator/beam/beam_load.c10
-rw-r--r--erts/emulator/beam/bif.c4
-rw-r--r--erts/emulator/beam/big.c18
-rw-r--r--erts/emulator/beam/big.h2
-rw-r--r--erts/emulator/beam/binary.c80
-rw-r--r--erts/emulator/beam/bs_instrs.tab23
-rw-r--r--erts/emulator/beam/copy.c49
-rw-r--r--erts/emulator/beam/dist.c36
-rw-r--r--erts/emulator/beam/dist.h22
-rw-r--r--erts/emulator/beam/erl_alloc.types1
-rw-r--r--erts/emulator/beam/erl_alloc_util.c69
-rw-r--r--erts/emulator/beam/erl_bif_binary.c184
-rw-r--r--erts/emulator/beam/erl_bif_info.c31
-rw-r--r--erts/emulator/beam/erl_bif_lists.c15
-rw-r--r--erts/emulator/beam/erl_bif_persistent.c99
-rw-r--r--erts/emulator/beam/erl_binary.h118
-rw-r--r--erts/emulator/beam/erl_bits.c95
-rw-r--r--erts/emulator/beam/erl_bits.h18
-rw-r--r--erts/emulator/beam/erl_db.c15
-rw-r--r--erts/emulator/beam/erl_db.h4
-rw-r--r--erts/emulator/beam/erl_db_catree.c28
-rw-r--r--erts/emulator/beam/erl_db_catree.h3
-rw-r--r--erts/emulator/beam/erl_db_hash.c256
-rw-r--r--erts/emulator/beam/erl_db_hash.h6
-rw-r--r--erts/emulator/beam/erl_db_tree.c6
-rw-r--r--erts/emulator/beam/erl_db_tree.h4
-rw-r--r--erts/emulator/beam/erl_gc.c55
-rw-r--r--erts/emulator/beam/erl_monitor_link.c49
-rw-r--r--erts/emulator/beam/erl_monitor_link.h17
-rw-r--r--erts/emulator/beam/erl_node_tables.c251
-rw-r--r--erts/emulator/beam/erl_node_tables.h41
-rw-r--r--erts/emulator/beam/erl_printf_term.c29
-rw-r--r--erts/emulator/beam/erl_proc_sig_queue.c8
-rw-r--r--erts/emulator/beam/erl_process.c143
-rw-r--r--erts/emulator/beam/erl_process.h9
-rw-r--r--erts/emulator/beam/erl_trace.c62
-rw-r--r--erts/emulator/beam/erl_trace.h8
-rw-r--r--erts/emulator/beam/external.c84
-rw-r--r--erts/emulator/beam/global.h42
40 files changed, 1441 insertions, 625 deletions
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index bb1b2e5b27..4c8ee5178a 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -1799,6 +1799,78 @@ erts_queue_release_literals(Process* c_p, ErtsLiteralArea* literals)
}
}
+struct debug_la_oh {
+ void (*func)(ErlOffHeap *, void *);
+ void *arg;
+};
+
+static void debug_later_cleanup_literal_area_off_heap(void *vfap,
+ ErtsThrPrgrVal val,
+ void *vlrlap)
+{
+ struct debug_la_oh *fap = vfap;
+ ErtsLaterReleasLiteralArea *lrlap = vlrlap;
+ ErtsLiteralArea *lap = lrlap->la;
+ if (!erts_debug_have_accessed_literal_area(lap)) {
+ ErlOffHeap oh;
+ ERTS_INIT_OFF_HEAP(&oh);
+ oh.first = lap->off_heap;
+ (*fap->func)(&oh, fap->arg);
+ erts_debug_save_accessed_literal_area(lap);
+ }
+}
+
+static void debug_later_complete_literal_area_switch_off_heap(void *vfap,
+ ErtsThrPrgrVal val,
+ void *vlap)
+{
+ struct debug_la_oh *fap = vfap;
+ ErtsLiteralArea *lap = vlap;
+ if (lap && !erts_debug_have_accessed_literal_area(lap)) {
+ ErlOffHeap oh;
+ ERTS_INIT_OFF_HEAP(&oh);
+ oh.first = lap->off_heap;
+ (*fap->func)(&oh, fap->arg);
+ erts_debug_save_accessed_literal_area(lap);
+ }
+}
+
+
+void
+erts_debug_foreach_release_literal_area_off_heap(void (*func)(ErlOffHeap *, void *), void *arg)
+{
+ ErtsLiteralArea *lap;
+ ErlOffHeap oh;
+ ErtsLiteralAreaRef *ref;
+ struct debug_la_oh fa;
+ erts_mtx_lock(&release_literal_areas.mtx);
+ for (ref = release_literal_areas.first; ref; ref = ref->next) {
+ lap = ref->literal_area;
+ if (!erts_debug_have_accessed_literal_area(lap)) {
+ ERTS_INIT_OFF_HEAP(&oh);
+ oh.first = lap->off_heap;
+ (*func)(&oh, arg);
+ erts_debug_save_accessed_literal_area(lap);
+ }
+ }
+ erts_mtx_unlock(&release_literal_areas.mtx);
+ lap = ERTS_COPY_LITERAL_AREA();
+ if (lap && !erts_debug_have_accessed_literal_area(lap)) {
+ ERTS_INIT_OFF_HEAP(&oh);
+ oh.first = lap->off_heap;
+ (*func)(&oh, arg);
+ erts_debug_save_accessed_literal_area(lap);
+ }
+ fa.func = func;
+ fa.arg = arg;
+ erts_debug_later_op_foreach(later_release_literal_area,
+ debug_later_cleanup_literal_area_off_heap,
+ (void *) &fa);
+ erts_debug_later_op_foreach(complete_literal_area_switch,
+ debug_later_complete_literal_area_switch_off_heap,
+ (void *) &fa);
+}
+
/*
* Move code from current to old and null all export entries for the module
*/
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index 9add87d944..3d5683f19f 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -4615,7 +4615,15 @@ typedef struct SortGenOpArg {
static int
genopargtermcompare(SortGenOpArg* a, SortGenOpArg* b)
{
- return CMP_TERM(a->term, b->term);
+ Sint res = CMP_TERM(a->term, b->term);
+
+ if (res < 0) {
+ return -1;
+ } else if (res > 0) {
+ return 1;
+ }
+
+ return 0;
}
static int
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index dd04018ce6..9f67f46b31 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -1915,7 +1915,7 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm return_term, Eterm *refp,
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp,
"Discarding message %T from %T to %T in an old "
- "incarnation (%d) of this node (%d)\n",
+ "incarnation (%u) of this node (%u)\n",
msg,
p->common.id,
to,
@@ -1959,7 +1959,7 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm return_term, Eterm *refp,
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp,
"Discarding message %T from %T to %T in an old "
- "incarnation (%d) of this node (%d)\n",
+ "incarnation (%u) of this node (%u)\n",
msg,
p->common.id,
to,
diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c
index 522f50287a..7666f23a4f 100644
--- a/erts/emulator/beam/big.c
+++ b/erts/emulator/beam/big.c
@@ -2176,6 +2176,24 @@ term_to_Uint64(Eterm term, Uint64 *up)
#endif
}
+int
+term_to_Uint32(Eterm term, Uint32 *up)
+{
+#if ERTS_SIZEOF_ETERM == 4
+ return term_to_Uint(term,up);
+#else
+ if (is_small(term)) {
+ Sint i = signed_val(term);
+ if (i >= 0) {
+ *up = (Uint32) i;
+ return 1;
+ }
+ }
+ *up = BADARG;
+ return 0;
+#endif
+}
+
int term_to_Sint(Eterm term, Sint *sp)
{
diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h
index ad19cce395..3fed076419 100644
--- a/erts/emulator/beam/big.h
+++ b/erts/emulator/beam/big.h
@@ -168,6 +168,8 @@ Eterm erts_uint64_array_to_big(Uint **, int, int, Uint64 *);
int term_to_Uint64(Eterm, Uint64*);
int term_to_Sint64(Eterm, Sint64*);
#endif
+int term_to_Uint32(Eterm, Uint32*);
+
Uint32 big_to_uint32(Eterm b);
int term_equals_2pow32(Eterm);
diff --git a/erts/emulator/beam/binary.c b/erts/emulator/beam/binary.c
index a18228b84a..4ddf59092a 100644
--- a/erts/emulator/beam/binary.c
+++ b/erts/emulator/beam/binary.c
@@ -1153,51 +1153,47 @@ BIF_RETTYPE list_to_bitstring_1(BIF_ALIST_1)
BIF_RETTYPE split_binary_2(BIF_ALIST_2)
{
- Uint pos;
- ErlSubBin* sb1;
- ErlSubBin* sb2;
- size_t orig_size;
- Eterm orig;
- Uint offset;
- Uint bit_offset;
- Uint bit_size;
- Eterm* hp;
+ size_t orig_size, left_size, right_size;
+ Uint byte_offset, bit_offset, bit_size;
+ Uint split_at;
+
+ Eterm *hp, *hp_end;
+ Eterm left, right;
+ Eterm real_bin;
+ Eterm result;
+ byte *bptr;
if (is_not_binary(BIF_ARG_1)) {
- goto error;
- }
- if (!term_to_Uint(BIF_ARG_2, &pos)) {
- goto error;
- }
- if ((orig_size = binary_size(BIF_ARG_1)) < pos) {
- goto error;
+ BIF_ERROR(BIF_P, BADARG);
+ } else if (!term_to_Uint(BIF_ARG_2, &split_at)) {
+ BIF_ERROR(BIF_P, BADARG);
+ } else if ((orig_size = binary_size(BIF_ARG_1)) < split_at) {
+ BIF_ERROR(BIF_P, BADARG);
}
- hp = HAlloc(BIF_P, 2*ERL_SUB_BIN_SIZE+3);
- ERTS_GET_REAL_BIN(BIF_ARG_1, orig, offset, bit_offset, bit_size);
- sb1 = (ErlSubBin *) hp;
- sb1->thing_word = HEADER_SUB_BIN;
- sb1->size = pos;
- sb1->offs = offset;
- sb1->orig = orig;
- sb1->bitoffs = bit_offset;
- sb1->bitsize = 0;
- sb1->is_writable = 0;
- hp += ERL_SUB_BIN_SIZE;
-
- sb2 = (ErlSubBin *) hp;
- sb2->thing_word = HEADER_SUB_BIN;
- sb2->size = orig_size - pos;
- sb2->offs = offset + pos;
- sb2->orig = orig;
- sb2->bitoffs = bit_offset;
- sb2->bitsize = bit_size; /* The extra bits go into the second binary. */
- sb2->is_writable = 0;
- hp += ERL_SUB_BIN_SIZE;
-
- return TUPLE2(hp, make_binary(sb1), make_binary(sb2));
-
- error:
- BIF_ERROR(BIF_P, BADARG);
+
+ left_size = split_at;
+ right_size = orig_size - split_at;
+
+ ERTS_GET_REAL_BIN(BIF_ARG_1, real_bin, byte_offset, bit_offset, bit_size);
+ bptr = binary_bytes(real_bin);
+
+ hp = HAlloc(BIF_P, EXTRACT_SUB_BIN_HEAP_NEED * 2 + 3);
+ hp_end = hp + (EXTRACT_SUB_BIN_HEAP_NEED * 2 + 3);
+
+ left = erts_extract_sub_binary(&hp, real_bin, bptr,
+ byte_offset * 8 + bit_offset,
+ left_size * 8);
+
+ right = erts_extract_sub_binary(&hp, real_bin, bptr,
+ (byte_offset + split_at) * 8 + bit_offset,
+ right_size * 8 + bit_size);
+
+ result = TUPLE2(hp, left, right);
+ hp += 3;
+
+ HRelease(BIF_P, hp_end, hp);
+
+ return result;
}
diff --git a/erts/emulator/beam/bs_instrs.tab b/erts/emulator/beam/bs_instrs.tab
index bd1ad91e45..5f25bc2ad3 100644
--- a/erts/emulator/beam/bs_instrs.tab
+++ b/erts/emulator/beam/bs_instrs.tab
@@ -149,7 +149,7 @@ i_bs_get_binary_all2.execute(Fail, Live, Unit, Dst) {
ErlBinMatchBuffer *_mb;
Eterm _result;
- $GC_TEST_PRESERVE(ERL_SUB_BIN_SIZE, $Live, context);
+ $GC_TEST_PRESERVE(EXTRACT_SUB_BIN_HEAP_NEED, $Live, context);
_mb = ms_matchbuffer(context);
if (((_mb->size - _mb->offset) % $Unit) == 0) {
LIGHT_SWAPOUT;
@@ -179,7 +179,7 @@ i_bs_get_binary2.execute(Fail, Live, Sz, Flags, Dst) {
Eterm _result;
Uint _size;
$BS_GET_FIELD_SIZE($Sz, (($Flags) >> 3), $FAIL($Fail), _size);
- $GC_TEST_PRESERVE(ERL_SUB_BIN_SIZE, $Live, context);
+ $GC_TEST_PRESERVE(EXTRACT_SUB_BIN_HEAP_NEED, $Live, context);
_mb = ms_matchbuffer(context);
LIGHT_SWAPOUT;
_result = erts_bs_get_binary_2(c_p, _size, $Flags, _mb);
@@ -206,8 +206,7 @@ i_bs_get_binary_imm2.fetch(Ctx) {
i_bs_get_binary_imm2.execute(Fail, Live, Sz, Flags, Dst) {
ErlBinMatchBuffer *_mb;
Eterm _result;
- $GC_TEST_PRESERVE(heap_bin_size(ERL_ONHEAP_BIN_LIMIT),
- $Live, context);
+ $GC_TEST_PRESERVE(EXTRACT_SUB_BIN_HEAP_NEED, $Live, context);
_mb = ms_matchbuffer(context);
LIGHT_SWAPOUT;
_result = erts_bs_get_binary_2(c_p, $Sz, $Flags, _mb);
@@ -876,9 +875,7 @@ bs_start_match.execute(Fail, Live, Slots, Dst) {
result = erts_bs_start_match_2(c_p, context, slots);
HTOP = HEAP_TOP(c_p);
HEAP_SPACE_VERIFIED(0);
- if (is_non_value(result)) {
- $FAIL($Fail);
- }
+
$REFRESH_GEN_DEST();
$Dst = result;
} else {
@@ -1296,10 +1293,6 @@ i_bs_start_match3_gp.execute(Live, Fail, Dst, Pos) {
HTOP = HEAP_TOP(c_p);
HEAP_SPACE_VERIFIED(0);
- if (ms == NULL) {
- $FAIL($Fail);
- }
-
$REFRESH_GEN_DEST();
$Dst = make_matchstate(ms);
position = ms->mb.offset;
@@ -1348,10 +1341,6 @@ i_bs_start_match3.execute(Live, Fail, Dst) {
HTOP = HEAP_TOP(c_p);
HEAP_SPACE_VERIFIED(0);
- if (ms == NULL) {
- $FAIL($Fail);
- }
-
$REFRESH_GEN_DEST();
$Dst = make_matchstate(ms);
} else {
@@ -1523,10 +1512,6 @@ i_bs_start_match3.execute(Live, Fail, Dst) {
HTOP = HEAP_TOP(c_p);
HEAP_SPACE_VERIFIED(0);
- if (is_non_value(result)) {
- $FAIL($Fail);
- }
-
$REFRESH_GEN_DEST();
$Dst = result;
} else {
diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c
index db74b06cc5..9bbcba6f3b 100644
--- a/erts/emulator/beam/copy.c
+++ b/erts/emulator/beam/copy.c
@@ -604,7 +604,12 @@ cleanup:
/*
* Copy a structure to a heap.
*/
-Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint *bsz, erts_literal_area_t *litopt)
+Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap,
+ Uint *bsz, erts_literal_area_t *litopt
+#ifdef ERTS_COPY_REGISTER_LOCATION
+ , char *file, int line
+#endif
+ )
{
char* hstart;
Uint hsize;
@@ -854,7 +859,11 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
case EXTERNAL_REF_SUBTAG:
{
ExternalThing *etp = (ExternalThing *) objp;
+#if defined(ERTS_COPY_REGISTER_LOCATION) && defined(ERL_NODE_BOOKKEEP)
+ erts_ref_node_entry__(etp->node, 2, make_boxed(htop), file, line);
+#else
erts_ref_node_entry(etp->node, 2, make_boxed(htop));
+#endif
}
L_off_heap_node_container_common:
{
@@ -1326,8 +1335,13 @@ Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info)
* Copy object "obj" preserving sharing.
* Second half: copy and restore the object.
*/
-Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
- Eterm** hpp, ErlOffHeap* off_heap) {
+Uint copy_shared_perform_x(Eterm obj, Uint size, erts_shcopy_t *info,
+ Eterm** hpp, ErlOffHeap* off_heap
+#ifdef ERTS_COPY_REGISTER_LOCATION
+ , char *file, int line
+#endif
+ )
+{
Uint e;
unsigned sz;
Eterm* ptr;
@@ -1393,7 +1407,11 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
*resp = obj;
} else {
Uint bsz = 0;
- *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz, NULL); /* copy literal */
+ *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz, NULL
+#ifdef ERTS_COPY_REGISTER_LOCATION
+ , file, line
+#endif
+ ); /* copy literal */
hbot -= bsz;
}
goto cleanup_next;
@@ -1461,7 +1479,11 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
*resp = obj;
} else {
Uint bsz = 0;
- *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz, NULL); /* copy literal */
+ *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz, NULL
+#ifdef ERTS_COPY_REGISTER_LOCATION
+ , file, line
+#endif
+ ); /* copy literal */
hbot -= bsz;
}
goto cleanup_next;
@@ -1660,7 +1682,12 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
case EXTERNAL_REF_SUBTAG:
{
ExternalThing *etp = (ExternalThing *) ptr;
+
+#if defined(ERTS_COPY_REGISTER_LOCATION) && defined(ERL_NODE_BOOKKEEP)
+ erts_ref_node_entry__(etp->node, 2, make_boxed(hp), file, line);
+#else
erts_ref_node_entry(etp->node, 2, make_boxed(hp));
+#endif
}
off_heap_node_container_common:
{
@@ -1823,8 +1850,12 @@ all_clean:
*
* NOTE: Assumes that term is a tuple (ptr is an untagged tuple ptr).
*/
-Eterm copy_shallow(Eterm* ERTS_RESTRICT ptr, Uint sz, Eterm** hpp,
- ErlOffHeap* off_heap)
+Eterm
+copy_shallow_x(Eterm* ERTS_RESTRICT ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap
+#ifdef ERTS_COPY_REGISTER_LOCATION
+ , char *file, int line
+#endif
+ )
{
Eterm* tp = ptr;
Eterm* hp = *hpp;
@@ -1866,7 +1897,11 @@ Eterm copy_shallow(Eterm* ERTS_RESTRICT ptr, Uint sz, Eterm** hpp,
case EXTERNAL_REF_SUBTAG:
{
ExternalThing* etp = (ExternalThing *) (tp-1);
+#if defined(ERTS_COPY_REGISTER_LOCATION) && defined(ERL_NODE_BOOKKEEP)
+ erts_ref_node_entry__(etp->node, 2, make_boxed(hp-1), file, line);
+#else
erts_ref_node_entry(etp->node, 2, make_boxed(hp-1));
+#endif
}
off_heap_common:
{
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index 6bf866eac9..dafe805a6f 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -1266,18 +1266,6 @@ erts_dsig_send_group_leader(ErtsDSigSendContext *ctx, Eterm leader, Eterm remote
return dsig_send_ctl(ctx, ctl);
}
-struct dist_sequences {
- ErlHeapFragment hfrag;
- struct dist_sequences *parent;
- struct dist_sequences *left;
- struct dist_sequences *right;
- char is_red;
-
- Uint64 seq_id;
- int cnt;
- Sint ctl_len;
-};
-
#define ERTS_RBT_PREFIX dist_seq
#define ERTS_RBT_T DistSeqNode
#define ERTS_RBT_KEY_T Uint
@@ -1312,25 +1300,25 @@ struct dist_sequences {
#include "erl_rbtree.h"
-struct erts_dist_seq_tree_foreach_iter_arg {
- int (*func)(ErtsDistExternal *, void *, Sint);
+struct erts_debug_dist_seq_tree_foreach_iter_arg {
+ int (*func)(DistSeqNode *, void *, Sint);
void *arg;
};
static int
-erts_dist_seq_tree_foreach_iter(DistSeqNode *seq, void *arg, Sint reds)
+erts_debug_dist_seq_tree_foreach_iter(DistSeqNode *seq, void *arg, Sint reds)
{
- struct erts_dist_seq_tree_foreach_iter_arg *state = arg;
- return state->func(erts_get_dist_ext(&seq->hfrag), state->arg, reds);
+ struct erts_debug_dist_seq_tree_foreach_iter_arg *state = arg;
+ return state->func(seq, state->arg, reds);
}
void
-erts_dist_seq_tree_foreach(DistEntry *dep, int (*func)(ErtsDistExternal *, void *, Sint), void *arg)
+erts_debug_dist_seq_tree_foreach(DistEntry *dep, int (*func)(DistSeqNode *, void *, Sint), void *arg)
{
- struct erts_dist_seq_tree_foreach_iter_arg state;
+ struct erts_debug_dist_seq_tree_foreach_iter_arg state;
state.func = func;
state.arg = arg;
- dist_seq_rbt_foreach(dep->sequences, erts_dist_seq_tree_foreach_iter, &state);
+ dist_seq_rbt_foreach(dep->sequences, erts_debug_dist_seq_tree_foreach_iter, &state);
}
static int dist_seq_cleanup(DistSeqNode *seq, void *unused, Sint reds)
@@ -3774,12 +3762,10 @@ int distribution_info(fmtfn_t to, void *arg) /* Called by break handler */
BIF_RETTYPE setnode_2(BIF_ALIST_2)
{
Process *net_kernel;
- Uint creation;
+ Uint32 creation;
/* valid creation ? */
- if(!term_to_Uint(BIF_ARG_2, &creation))
- goto error;
- if(creation > 3)
+ if(!term_to_Uint32(BIF_ARG_2, &creation))
goto error;
/* valid node name ? */
@@ -3823,7 +3809,7 @@ BIF_RETTYPE setnode_2(BIF_ALIST_2)
erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_thr_progress_block();
inc_no_nodes();
- erts_set_this_node(BIF_ARG_1, (Uint32) creation);
+ erts_set_this_node(BIF_ARG_1, creation);
erts_is_alive = 1;
send_nodes_mon_msgs(NULL, am_nodeup, BIF_ARG_1, am_visible, NIL);
erts_thr_progress_unblock();
diff --git a/erts/emulator/beam/dist.h b/erts/emulator/beam/dist.h
index 067028634b..37ec88cc55 100644
--- a/erts/emulator/beam/dist.h
+++ b/erts/emulator/beam/dist.h
@@ -54,11 +54,12 @@
#define DFLAG_DIST_MANDATORY (DFLAG_EXTENDED_REFERENCES \
| DFLAG_EXTENDED_PIDS_PORTS \
| DFLAG_UTF8_ATOMS \
- | DFLAG_NEW_FUN_TAGS)
+ | DFLAG_NEW_FUN_TAGS \
+ | DFLAG_BIG_CREATION)
/*
* Additional optimistic flags when encoding toward pending connection.
- * If remote node (erl_interface) does not supporting these then we may need
+ * If remote node (erl_interface) does not support these then we may need
* to transcode messages enqueued before connection setup was finished.
*/
#define DFLAG_DIST_HOPEFULLY (DFLAG_EXPORT_PTR_TAG \
@@ -75,7 +76,6 @@
| DFLAG_SMALL_ATOM_TAGS \
| DFLAG_UTF8_ATOMS \
| DFLAG_MAP_TAG \
- | DFLAG_BIG_CREATION \
| DFLAG_SEND_SENDER \
| DFLAG_BIG_SEQTRACE_LABELS \
| DFLAG_EXIT_PAYLOAD \
@@ -274,6 +274,18 @@ typedef struct erts_dsig_send_context {
typedef struct dist_sequences DistSeqNode;
+struct dist_sequences {
+ ErlHeapFragment hfrag;
+ struct dist_sequences *parent;
+ struct dist_sequences *left;
+ struct dist_sequences *right;
+ char is_red;
+
+ Uint64 seq_id;
+ int cnt;
+ Sint ctl_len;
+};
+
/*
* erts_dsig_send_* return values.
*/
@@ -306,9 +318,9 @@ extern Uint erts_dist_cache_size(void);
extern Sint erts_abort_connection_rwunlock(DistEntry *dep);
-extern void erts_dist_seq_tree_foreach(
+extern void erts_debug_dist_seq_tree_foreach(
DistEntry *dep,
- int (*func)(ErtsDistExternal *, void*, Sint), void *args);
+ int (*func)(DistSeqNode *, void*, Sint), void *args);
extern int erts_dsig_prepare(ErtsDSigSendContext *,
DistEntry*,
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 58d586453c..349977ebe7 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -149,6 +149,7 @@ type MODULE_TABLE LONG_LIVED CODE module_tab
type TAINT LONG_LIVED CODE taint_list
type MODULE_REFS STANDARD CODE module_refs
type NC_TMP TEMPORARY SYSTEM nc_tmp
+type NC_STD STANDARD SYSTEM nc_std
type TMP TEMPORARY SYSTEM tmp
type UNDEF SYSTEM SYSTEM undefined
type DCACHE STANDARD SYSTEM dcache
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index 25ac3bc5af..bfc2f5992c 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -7112,12 +7112,21 @@ static int blockscan_cpool_yielding(blockscan_t *state)
return 0;
}
-static int blockscan_yield_helper(blockscan_t *state,
- int (*yielding_op)(blockscan_t*))
+/* */
+
+static int blockscan_finish(blockscan_t *state)
{
- /* Note that we don't check whether to abort here; only yielding_op knows
- * whether the carrier is still in the list/pool. */
+ if (ERTS_PROC_IS_EXITING(state->process)) {
+ state->abort(state->user_data);
+ return 0;
+ }
+ state->current_op = blockscan_finish;
+
+ return state->finish(state->user_data);
+}
+
+static void blockscan_lock_helper(blockscan_t *state) {
if ((state->allocator)->thread_safe) {
/* Locked scans have to be as short as possible. */
state->reductions = 1;
@@ -7126,34 +7135,18 @@ static int blockscan_yield_helper(blockscan_t *state,
} else {
state->reductions = BLOCKSCAN_REDUCTIONS;
}
+}
- if (yielding_op(state)) {
- state->next_op = state->current_op;
- }
-
+static void blockscan_unlock_helper(blockscan_t *state) {
if ((state->allocator)->thread_safe) {
erts_mtx_unlock(&(state->allocator)->mutex);
}
-
- return 1;
-}
-
-/* */
-
-static int blockscan_finish(blockscan_t *state)
-{
- if (ERTS_PROC_IS_EXITING(state->process)) {
- state->abort(state->user_data);
- return 0;
- }
-
- state->current_op = blockscan_finish;
-
- return state->finish(state->user_data);
}
static int blockscan_sweep_sbcs(blockscan_t *state)
{
+ blockscan_lock_helper(state);
+
if (state->current_op != blockscan_sweep_sbcs) {
SET_CARRIER_HDR(&state->dummy_carrier, 0, SCH_SBC, state->allocator);
state->current_clist = &(state->allocator)->sbc_list;
@@ -7163,11 +7156,19 @@ static int blockscan_sweep_sbcs(blockscan_t *state)
state->current_op = blockscan_sweep_sbcs;
state->next_op = blockscan_finish;
- return blockscan_yield_helper(state, blockscan_clist_yielding);
+ if (blockscan_clist_yielding(state)) {
+ state->next_op = state->current_op;
+ }
+
+ blockscan_unlock_helper(state);
+
+ return 1;
}
static int blockscan_sweep_mbcs(blockscan_t *state)
{
+ blockscan_lock_helper(state);
+
if (state->current_op != blockscan_sweep_mbcs) {
SET_CARRIER_HDR(&state->dummy_carrier, 0, SCH_MBC, state->allocator);
state->current_clist = &(state->allocator)->mbc_list;
@@ -7177,11 +7178,19 @@ static int blockscan_sweep_mbcs(blockscan_t *state)
state->current_op = blockscan_sweep_mbcs;
state->next_op = blockscan_sweep_sbcs;
- return blockscan_yield_helper(state, blockscan_clist_yielding);
+ if (blockscan_clist_yielding(state)) {
+ state->next_op = state->current_op;
+ }
+
+ blockscan_unlock_helper(state);
+
+ return 1;
}
static int blockscan_sweep_cpool(blockscan_t *state)
{
+ blockscan_lock_helper(state);
+
if (state->current_op != blockscan_sweep_cpool) {
SET_CARRIER_HDR(&state->dummy_carrier, 0, SCH_MBC, state->allocator);
state->cpool_cursor = (state->allocator)->cpool.sentinel;
@@ -7190,7 +7199,13 @@ static int blockscan_sweep_cpool(blockscan_t *state)
state->current_op = blockscan_sweep_cpool;
state->next_op = blockscan_sweep_mbcs;
- return blockscan_yield_helper(state, blockscan_cpool_yielding);
+ if (blockscan_cpool_yielding(state)) {
+ state->next_op = state->current_op;
+ }
+
+ blockscan_unlock_helper(state);
+
+ return 1;
}
static int blockscan_get_specific_allocator(int allocator_num,
diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c
index 4d6d31cd76..b8e56390c1 100644
--- a/erts/emulator/beam/erl_bif_binary.c
+++ b/erts/emulator/beam/erl_bif_binary.c
@@ -1750,9 +1750,8 @@ static Eterm do_split_single_result(Process *p, Eterm subject, BinaryFindContext
Uint offset;
Uint bit_offset;
Uint bit_size;
- ErlSubBin *sb1;
- ErlSubBin *sb2;
- Eterm *hp;
+ Uint hp_need;
+ Eterm *hp, *hp_end;
Eterm ret;
pos = ff->pos;
@@ -1765,57 +1764,58 @@ static Eterm do_split_single_result(Process *p, Eterm subject, BinaryFindContext
if (pos == 0) {
ret = NIL;
} else {
- hp = HAlloc(p, (ERL_SUB_BIN_SIZE + 2));
+ Eterm extracted;
+
+ hp_need = EXTRACT_SUB_BIN_HEAP_NEED + 2;
+
+ hp = HAlloc(p, hp_need);
+ hp_end = hp + hp_need;
+
ERTS_GET_REAL_BIN(subject, orig, offset, bit_offset, bit_size);
- sb1 = (ErlSubBin *) hp;
- sb1->thing_word = HEADER_SUB_BIN;
- sb1->size = pos;
- sb1->offs = offset;
- sb1->orig = orig;
- sb1->bitoffs = bit_offset;
- sb1->bitsize = bit_size;
- sb1->is_writable = 0;
- hp += ERL_SUB_BIN_SIZE;
-
- ret = CONS(hp, make_binary(sb1), NIL);
+ extracted = erts_extract_sub_binary(&hp, orig, binary_bytes(orig),
+ offset * 8 + bit_offset,
+ pos * 8 + bit_size);
+
+ ret = CONS(hp, extracted, NIL);
hp += 2;
+
+ HRelease(p, hp_end, hp);
+
+ return ret;
}
} else {
- if ((ctx->flags & BF_FLAG_SPLIT_TRIM_ALL) && (pos == 0)) {
- hp = HAlloc(p, 1 * (ERL_SUB_BIN_SIZE + 2));
- ERTS_GET_REAL_BIN(subject, orig, offset, bit_offset, bit_size);
- sb1 = NULL;
- } else {
- hp = HAlloc(p, 2 * (ERL_SUB_BIN_SIZE + 2));
- ERTS_GET_REAL_BIN(subject, orig, offset, bit_offset, bit_size);
- sb1 = (ErlSubBin *) hp;
- sb1->thing_word = HEADER_SUB_BIN;
- sb1->size = pos;
- sb1->offs = offset;
- sb1->orig = orig;
- sb1->bitoffs = bit_offset;
- sb1->bitsize = 0;
- sb1->is_writable = 0;
- hp += ERL_SUB_BIN_SIZE;
- }
-
- sb2 = (ErlSubBin *) hp;
- sb2->thing_word = HEADER_SUB_BIN;
- sb2->size = orig_size - pos - len;
- sb2->offs = offset + pos + len;
- sb2->orig = orig;
- sb2->bitoffs = bit_offset;
- sb2->bitsize = bit_size;
- sb2->is_writable = 0;
- hp += ERL_SUB_BIN_SIZE;
-
- ret = CONS(hp, make_binary(sb2), NIL);
- hp += 2;
- if (sb1 != NULL) {
- ret = CONS(hp, make_binary(sb1), ret);
- hp += 2;
- }
+ Eterm first, rest;
+
+ hp_need = (EXTRACT_SUB_BIN_HEAP_NEED + 2) * 2;
+
+ hp = HAlloc(p, hp_need);
+ hp_end = hp + hp_need;
+
+ ERTS_GET_REAL_BIN(subject, orig, offset, bit_offset, bit_size);
+
+ if ((ctx->flags & BF_FLAG_SPLIT_TRIM_ALL) && (pos == 0)) {
+ first = NIL;
+ } else {
+ first = erts_extract_sub_binary(&hp, orig, binary_bytes(orig),
+ offset * 8 + bit_offset,
+ pos * 8);
+ }
+
+ rest = erts_extract_sub_binary(&hp, orig, binary_bytes(orig),
+ (offset + pos + len) * 8 + bit_offset,
+ (orig_size - pos - len) * 8 + bit_size);
+
+ ret = CONS(hp, rest, NIL);
+ hp += 2;
+
+ if (first != NIL) {
+ ret = CONS(hp, first, ret);
+ hp += 2;
+ }
+
+ HRelease(p, hp_end, hp);
}
+
return ret;
}
@@ -1829,7 +1829,9 @@ static Eterm do_split_global_result(Process *p, Eterm subject, BinaryFindContext
Uint offset;
Uint bit_offset;
Uint bit_size;
- ErlSubBin *sb;
+ Uint extracted_offset;
+ Uint extracted_size;
+ Eterm extracted;
Uint do_trim;
Sint i;
register Uint reds = ctx->reds;
@@ -1852,7 +1854,8 @@ static Eterm do_split_global_result(Process *p, Eterm subject, BinaryFindContext
*ctxp = ctx;
fa = &(ctx->u.fa);
}
- erts_factory_proc_prealloc_init(&(fa->factory), p, (fa->size + 1) * (ERL_SUB_BIN_SIZE + 2));
+ erts_factory_proc_prealloc_init(&(fa->factory), p, (fa->size + 1) *
+ (EXTRACT_SUB_BIN_HEAP_NEED + 2));
ctx->state = BFResult;
}
@@ -1871,39 +1874,39 @@ static Eterm do_split_global_result(Process *p, Eterm subject, BinaryFindContext
}
return THE_NON_VALUE;
}
- sb = (ErlSubBin *)(fa->factory.hp);
- sb->size = fa->end_pos - (fad[i].pos + fad[i].len);
- if (!(sb->size == 0 && do_trim)) {
- sb->thing_word = HEADER_SUB_BIN;
- sb->offs = offset + fad[i].pos + fad[i].len;
- sb->orig = orig;
- sb->bitoffs = bit_offset;
- sb->bitsize = 0;
- sb->is_writable = 0;
- fa->factory.hp += ERL_SUB_BIN_SIZE;
- fa->term = CONS(fa->factory.hp, make_binary(sb), fa->term);
- fa->factory.hp += 2;
- do_trim &= ~BF_FLAG_SPLIT_TRIM;
- }
- fa->end_pos = fad[i].pos;
+
+ extracted_offset = (offset + fad[i].pos + fad[i].len) * 8 + bit_offset;
+ extracted_size = (fa->end_pos - (fad[i].pos + fad[i].len)) * 8;
+
+ if (!(extracted_size == 0 && do_trim)) {
+ extracted = erts_extract_sub_binary(&fa->factory.hp, orig,
+ binary_bytes(orig),
+ extracted_offset,
+ extracted_size);
+ fa->term = CONS(fa->factory.hp, extracted, fa->term);
+ fa->factory.hp += 2;
+
+ do_trim &= ~BF_FLAG_SPLIT_TRIM;
+ }
+
+ fa->end_pos = fad[i].pos;
}
fa->head = i;
ctx->reds = reds;
- sb = (ErlSubBin *)(fa->factory.hp);
- sb->size = fad[0].pos;
- if (!(sb->size == 0 && do_trim)) {
- sb->thing_word = HEADER_SUB_BIN;
- sb->offs = offset;
- sb->orig = orig;
- sb->bitoffs = bit_offset;
- sb->bitsize = 0;
- sb->is_writable = 0;
- fa->factory.hp += ERL_SUB_BIN_SIZE;
- fa->term = CONS(fa->factory.hp, make_binary(sb), fa->term);
- fa->factory.hp += 2;
+ extracted_offset = offset * 8 + bit_offset;
+ extracted_size = fad[0].pos * 8;
+
+ if (!(extracted_size == 0 && do_trim)) {
+ extracted = erts_extract_sub_binary(&fa->factory.hp, orig,
+ binary_bytes(orig),
+ extracted_offset,
+ extracted_size);
+ fa->term = CONS(fa->factory.hp, extracted, fa->term);
+ fa->factory.hp += 2;
}
+
erts_factory_close(&(fa->factory));
return fa->term;
@@ -1937,8 +1940,8 @@ BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen)
Uint offset;
Uint bit_offset;
Uint bit_size;
- Eterm* hp;
- ErlSubBin* sb;
+ Eterm *hp, *hp_end;
+ Eterm result;
if (is_not_binary(binary)) {
goto badarg;
@@ -1970,19 +1973,18 @@ BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen)
goto badarg;
}
- hp = HeapFragOnlyAlloc(p, ERL_SUB_BIN_SIZE);
+ hp = HeapFragOnlyAlloc(p, EXTRACT_SUB_BIN_HEAP_NEED);
+ hp_end = hp + EXTRACT_SUB_BIN_HEAP_NEED;
ERTS_GET_REAL_BIN(binary, orig, offset, bit_offset, bit_size);
- sb = (ErlSubBin *) hp;
- sb->thing_word = HEADER_SUB_BIN;
- sb->size = len;
- sb->offs = offset + pos;
- sb->orig = orig;
- sb->bitoffs = bit_offset;
- sb->bitsize = 0;
- sb->is_writable = 0;
-
- BIF_RET(make_binary(sb));
+
+ result = erts_extract_sub_binary(&hp, orig, binary_bytes(orig),
+ (offset + pos) * 8 + bit_offset,
+ len * 8);
+
+ HRelease(p, hp_end, hp);
+
+ BIF_RET(result);
badarg:
BIF_ERROR(p, BADARG);
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 0339589b79..d1ceffd23c 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -162,10 +162,10 @@ static Eterm current_stacktrace(ErtsHeapFactory *hfact, Process* rp,
Uint reserve_size);
static Eterm
-bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
+bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh, Eterm tail)
{
struct erl_off_heap_header* ohh;
- Eterm res = NIL;
+ Eterm res = tail;
Eterm tuple;
for (ohh = oh->first; ohh; ohh = ohh->next) {
@@ -1864,11 +1864,25 @@ process_info_aux(Process *c_p,
break;
case ERTS_PI_IX_BINARY: {
- Uint sz = 0;
- (void) bld_bin_list(NULL, &sz, &MSO(rp));
+ ErlHeapFragment *hfrag;
+ Uint sz;
+
+ res = NIL;
+ sz = 0;
+
+ (void)bld_bin_list(NULL, &sz, &MSO(rp), NIL);
+ for (hfrag = rp->mbuf; hfrag != NULL; hfrag = hfrag->next) {
+ (void)bld_bin_list(NULL, &sz, &hfrag->off_heap, NIL);
+ }
+
hp = erts_produce_heap(hfact, sz, reserve_size);
- res = bld_bin_list(&hp, NULL, &MSO(rp));
- break;
+
+ res = bld_bin_list(&hp, NULL, &MSO(rp), NIL);
+ for (hfrag = rp->mbuf; hfrag != NULL; hfrag = hfrag->next) {
+ res = bld_bin_list(&hp, NULL, &hfrag->off_heap, res);
+ }
+
+ break;
}
case ERTS_PI_IX_SEQUENTIAL_TRACE_TOKEN: {
@@ -2799,7 +2813,10 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
} else if (BIF_ARG_1 == am_threads) {
return am_true;
} else if (BIF_ARG_1 == am_creation) {
- return make_small(erts_this_node->creation);
+ Uint hsz = 0;
+ erts_bld_uint(NULL, &hsz, erts_this_node->creation);
+ hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
+ BIF_RET(erts_bld_uint(&hp, NULL, erts_this_node->creation));
} else if (BIF_ARG_1 == am_break_ignored) {
extern int ignore_break;
if (ignore_break)
diff --git a/erts/emulator/beam/erl_bif_lists.c b/erts/emulator/beam/erl_bif_lists.c
index b23fa77f5f..fa2edfef1e 100644
--- a/erts/emulator/beam/erl_bif_lists.c
+++ b/erts/emulator/beam/erl_bif_lists.c
@@ -413,12 +413,25 @@ typedef struct {
#define ERTS_RBT_GET_LEFT(T) ((T)->left)
#define ERTS_RBT_SET_LEFT(T, L) ((T)->left = (L))
#define ERTS_RBT_GET_KEY(T) ((T)->key)
-#define ERTS_RBT_CMP_KEYS(KX, KY) CMP_TERM(KX, KY)
+#define ERTS_RBT_CMP_KEYS(KX, KY) subtract_term_cmp((KX), (KY))
#define ERTS_RBT_WANT_LOOKUP_INSERT
#define ERTS_RBT_WANT_LOOKUP
#define ERTS_RBT_WANT_DELETE
#define ERTS_RBT_UNDEF
+/* erl_rbtree expects comparisons to return an int */
+static int subtract_term_cmp(Eterm a, Eterm b) {
+ Sint res = CMP_TERM(a, b);
+
+ if (res < 0) {
+ return -1;
+ } else if (res > 0) {
+ return 1;
+ }
+
+ return 0;
+}
+
#include "erl_rbtree.h"
static int subtract_continue(Process *p, ErtsSubtractContext *context);
diff --git a/erts/emulator/beam/erl_bif_persistent.c b/erts/emulator/beam/erl_bif_persistent.c
index f38e0cc5cb..8662a9e33a 100644
--- a/erts/emulator/beam/erl_bif_persistent.c
+++ b/erts/emulator/beam/erl_bif_persistent.c
@@ -1234,3 +1234,102 @@ next_to_delete(void)
erts_mtx_unlock(&delete_queue_mtx);
return table;
}
+
+/*
+ * test/debug functionality follow...
+ */
+
+static Uint accessed_literal_areas_size;
+static Uint accessed_no_literal_areas;
+static ErtsLiteralArea **accessed_literal_areas;
+
+int
+erts_debug_have_accessed_literal_area(ErtsLiteralArea *lap)
+{
+ Uint i;
+ for (i = 0; i < accessed_no_literal_areas; i++) {
+ if (accessed_literal_areas[i] == lap)
+ return !0;
+ }
+ return 0;
+}
+
+void
+erts_debug_save_accessed_literal_area(ErtsLiteralArea *lap)
+{
+ if (accessed_no_literal_areas == accessed_literal_areas_size) {
+ accessed_literal_areas_size += 10;
+ accessed_literal_areas = erts_realloc(ERTS_ALC_T_TMP,
+ accessed_literal_areas,
+ (sizeof(ErtsLiteralArea *)
+ *accessed_literal_areas_size));
+ }
+ accessed_literal_areas[accessed_no_literal_areas++] = lap;
+}
+
+static void debug_foreach_off_heap(HashTable *tbl, void (*func)(ErlOffHeap *, void *), void *arg)
+{
+ int i;
+
+ for (i = 0; i < tbl->allocated; i++) {
+ Eterm term = tbl->term[i];
+ if (is_tuple_arity(term, 2)) {
+ ErtsLiteralArea *lap = term_to_area(term);
+ ErlOffHeap oh;
+ if (!erts_debug_have_accessed_literal_area(lap)) {
+ ERTS_INIT_OFF_HEAP(&oh);
+ oh.first = lap->off_heap;
+ (*func)(&oh, arg);
+ erts_debug_save_accessed_literal_area(lap);
+ }
+ }
+ }
+}
+
+struct debug_la_oh {
+ void (*func)(ErlOffHeap *, void *);
+ void *arg;
+};
+
+static void debug_handle_table(void *vfap,
+ ErtsThrPrgrVal val,
+ void *vtbl)
+{
+ struct debug_la_oh *fap = vfap;
+ HashTable *tbl = vtbl;
+ debug_foreach_off_heap(tbl, fap->func, fap->arg);
+}
+
+void
+erts_debug_foreach_persistent_term_off_heap(void (*func)(ErlOffHeap *, void *), void *arg)
+{
+ HashTable *tbl;
+ struct debug_la_oh fa;
+ accessed_no_literal_areas = 0;
+ accessed_literal_areas_size = 10;
+ accessed_literal_areas = erts_alloc(ERTS_ALC_T_TMP,
+ (sizeof(ErtsLiteralArea *)
+ * accessed_literal_areas_size));
+
+ tbl = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ debug_foreach_off_heap(tbl, func, arg);
+ erts_mtx_lock(&delete_queue_mtx);
+ for (tbl = delete_queue_head; tbl; tbl = tbl->delete_next)
+ debug_foreach_off_heap(tbl, func, arg);
+ erts_mtx_unlock(&delete_queue_mtx);
+ fa.func = func;
+ fa.arg = arg;
+ erts_debug_later_op_foreach(table_updater,
+ debug_handle_table,
+ (void *) &fa);
+ erts_debug_later_op_foreach(table_deleter,
+ debug_handle_table,
+ (void *) &fa);
+ erts_debug_foreach_release_literal_area_off_heap(func, arg);
+
+ erts_free(ERTS_ALC_T_TMP, accessed_literal_areas);
+ accessed_no_literal_areas = 0;
+ accessed_literal_areas_size = 0;
+ accessed_literal_areas = NULL;
+}
+
diff --git a/erts/emulator/beam/erl_binary.h b/erts/emulator/beam/erl_binary.h
index c9c047255a..66b59ef1a3 100644
--- a/erts/emulator/beam/erl_binary.h
+++ b/erts/emulator/beam/erl_binary.h
@@ -351,6 +351,19 @@ erts_free_aligned_binary_bytes(byte* buf)
erts_free_aligned_binary_bytes_extra(buf,ERTS_ALC_T_TMP);
}
+/* A binary's size in bits must fit into a word for matching to work. We used
+ * to allow creating larger binaries than this, but they acted really strangely
+ * in Erlang code and were pretty much only usable in drivers and NIFs.
+ *
+ * This check also ensures, indirectly, that there won't be an overflow when
+ * the size is bumped by CHICKEN_PAD and the binary struct itself. */
+#define BINARY_OVERFLOW_CHECK(BYTE_SIZE) \
+ do { \
+ if (ERTS_UNLIKELY(BYTE_SIZE > ERTS_UWORD_MAX / CHAR_BIT)) { \
+ return NULL; \
+ } \
+ } while(0)
+
/* Explicit extra bytes allocated to counter buggy drivers.
** These extra bytes where earlier (< R13B04) added by an alignment-bug
** in this code. Do we dare remove this in some major release (R14?) maybe?
@@ -364,86 +377,107 @@ erts_free_aligned_binary_bytes(byte* buf)
ERTS_GLB_INLINE Binary *
erts_bin_drv_alloc_fnf(Uint size)
{
- Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
Binary *res;
+ Uint bsize;
+
+ BINARY_OVERFLOW_CHECK(size);
+ bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
- if (bsize < size) /* overflow */
- return NULL;
res = erts_alloc_fnf(ERTS_ALC_T_DRV_BINARY, bsize);
ERTS_CHK_BIN_ALIGNMENT(res);
+
if (res) {
- res->orig_size = size;
- res->intern.flags = BIN_FLAG_DRV;
+ res->orig_size = size;
+ res->intern.flags = BIN_FLAG_DRV;
erts_refc_init(&res->intern.refc, 1);
}
+
return res;
}
ERTS_GLB_INLINE Binary *
erts_bin_drv_alloc(Uint size)
{
- Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
+ Binary *res = erts_bin_drv_alloc_fnf(size);
+
+ if (res) {
+ return res;
+ }
+
+ erts_alloc_enomem(ERTS_ALC_T_DRV_BINARY, size);
+}
+
+ERTS_GLB_INLINE Binary *
+erts_bin_nrml_alloc_fnf(Uint size)
+{
Binary *res;
+ Uint bsize;
- if (bsize < size) /* overflow */
- erts_alloc_enomem(ERTS_ALC_T_DRV_BINARY, size);
- res = erts_alloc(ERTS_ALC_T_DRV_BINARY, bsize);
+ BINARY_OVERFLOW_CHECK(size);
+ bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
+
+ res = erts_alloc_fnf(ERTS_ALC_T_BINARY, bsize);
ERTS_CHK_BIN_ALIGNMENT(res);
- res->orig_size = size;
- res->intern.flags = BIN_FLAG_DRV;
- erts_refc_init(&res->intern.refc, 1);
+
+ if (res) {
+ res->orig_size = size;
+ res->intern.flags = 0;
+ erts_refc_init(&res->intern.refc, 1);
+ }
+
return res;
}
ERTS_GLB_INLINE Binary *
erts_bin_nrml_alloc(Uint size)
{
- Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
- Binary *res;
+ Binary *res = erts_bin_drv_alloc_fnf(size);
- if (bsize < size) /* overflow */
- erts_alloc_enomem(ERTS_ALC_T_BINARY, size);
- res = erts_alloc(ERTS_ALC_T_BINARY, bsize);
- ERTS_CHK_BIN_ALIGNMENT(res);
- res->orig_size = size;
- res->intern.flags = 0;
- erts_refc_init(&res->intern.refc, 1);
- return res;
+ if (res) {
+ return res;
+ }
+
+ erts_alloc_enomem(ERTS_ALC_T_BINARY, size);
}
ERTS_GLB_INLINE Binary *
erts_bin_realloc_fnf(Binary *bp, Uint size)
{
+ ErtsAlcType_t type;
Binary *nbp;
- Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
- ErtsAlcType_t type = (bp->intern.flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY
- : ERTS_ALC_T_BINARY;
+ Uint bsize;
+
+ type = (bp->intern.flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY
+ : ERTS_ALC_T_BINARY;
ASSERT((bp->intern.flags & BIN_FLAG_MAGIC) == 0);
- if (bsize < size) /* overflow */
- return NULL;
+
+ BINARY_OVERFLOW_CHECK(size);
+ bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
+
nbp = erts_realloc_fnf(type, (void *) bp, bsize);
ERTS_CHK_BIN_ALIGNMENT(nbp);
- if (nbp)
- nbp->orig_size = size;
+
+ if (nbp) {
+ nbp->orig_size = size;
+ }
+
return nbp;
}
ERTS_GLB_INLINE Binary *
erts_bin_realloc(Binary *bp, Uint size)
{
- Binary *nbp;
- Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
- ErtsAlcType_t type = (bp->intern.flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY
- : ERTS_ALC_T_BINARY;
- ASSERT((bp->intern.flags & BIN_FLAG_MAGIC) == 0);
- if (bsize < size) /* overflow */
- erts_realloc_enomem(type, bp, size);
- nbp = erts_realloc_fnf(type, (void *) bp, bsize);
- if (!nbp)
- erts_realloc_enomem(type, bp, bsize);
- ERTS_CHK_BIN_ALIGNMENT(nbp);
- nbp->orig_size = size;
- return nbp;
+ Binary *nbp = erts_bin_realloc_fnf(bp, size);
+
+ if (nbp) {
+ return nbp;
+ }
+
+ if (bp->intern.flags & BIN_FLAG_DRV) {
+ erts_realloc_enomem(ERTS_ALC_T_DRV_BINARY, bp, size);
+ } else {
+ erts_realloc_enomem(ERTS_ALC_T_BINARY, bp, size);
+ }
}
ERTS_GLB_INLINE void
diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c
index f5807d25d7..a0edbc81a4 100644
--- a/erts/emulator/beam/erl_bits.c
+++ b/erts/emulator/beam/erl_bits.c
@@ -124,10 +124,10 @@ erts_bs_start_match_2(Process *p, Eterm Binary, Uint Max)
ProcBin* pb;
ASSERT(is_binary(Binary));
+
total_bin_size = binary_size(Binary);
- if ((total_bin_size >> (8*sizeof(Uint)-3)) != 0) {
- return THE_NON_VALUE;
- }
+ ASSERT(total_bin_size <= ERTS_UWORD_MAX / CHAR_BIT);
+
NeededSize = ERL_BIN_MATCHSTATE_SIZE(Max);
hp = HeapOnlyAlloc(p, NeededSize);
ms = (ErlBinMatchState *) hp;
@@ -157,10 +157,9 @@ ErlBinMatchState *erts_bs_start_match_3(Process *p, Eterm Binary)
ProcBin* pb;
ASSERT(is_binary(Binary));
+
total_bin_size = binary_size(Binary);
- if ((total_bin_size >> (8*sizeof(Uint)-3)) != 0) {
- return NULL;
- }
+ ASSERT(total_bin_size <= ERTS_UWORD_MAX / CHAR_BIT);
NeededSize = ERL_BIN_MATCHSTATE_SIZE(0);
hp = HeapOnlyAlloc(p, NeededSize);
@@ -459,29 +458,25 @@ erts_bs_get_integer_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuff
Eterm
erts_bs_get_binary_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuffer* mb)
{
- ErlSubBin* sb;
+ Eterm result;
CHECK_MATCH_BUFFER(mb);
- if (mb->size - mb->offset < num_bits) { /* Asked for too many bits. */
- return THE_NON_VALUE;
+ if (mb->size - mb->offset < num_bits) {
+ /* Asked for too many bits. */
+ return THE_NON_VALUE;
}
/*
* From now on, we can't fail.
*/
- sb = (ErlSubBin *) HeapOnlyAlloc(p, ERL_SUB_BIN_SIZE);
-
- sb->thing_word = HEADER_SUB_BIN;
- sb->orig = mb->orig;
- sb->size = BYTE_OFFSET(num_bits);
- sb->bitsize = BIT_OFFSET(num_bits);
- sb->offs = BYTE_OFFSET(mb->offset);
- sb->bitoffs = BIT_OFFSET(mb->offset);
- sb->is_writable = 0;
+ result = erts_extract_sub_binary(&HEAP_TOP(p),
+ mb->orig, mb->base,
+ mb->offset, num_bits);
+
mb->offset += num_bits;
-
- return make_binary(sb);
+
+ return result;
}
Eterm
@@ -545,21 +540,19 @@ erts_bs_get_float_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuffer
Eterm
erts_bs_get_binary_all_2(Process *p, ErlBinMatchBuffer* mb)
{
- ErlSubBin* sb;
- Uint size;
+ Uint bit_size;
+ Eterm result;
CHECK_MATCH_BUFFER(mb);
- size = mb->size-mb->offset;
- sb = (ErlSubBin *) HeapOnlyAlloc(p, ERL_SUB_BIN_SIZE);
- sb->thing_word = HEADER_SUB_BIN;
- sb->size = BYTE_OFFSET(size);
- sb->bitsize = BIT_OFFSET(size);
- sb->offs = BYTE_OFFSET(mb->offset);
- sb->bitoffs = BIT_OFFSET(mb->offset);
- sb->is_writable = 0;
- sb->orig = mb->orig;
- mb->offset = mb->size;
- return make_binary(sb);
+ bit_size = mb->size - mb->offset;
+
+ result = erts_extract_sub_binary(&HEAP_TOP(p),
+ mb->orig, mb->base,
+ mb->offset, bit_size);
+
+ mb->offset = mb->size;
+
+ return result;
}
/****************************************************************
@@ -2097,3 +2090,39 @@ erts_copy_bits(byte* src, /* Base pointer to source. */
}
}
+Eterm erts_extract_sub_binary(Eterm **hp, Eterm base_bin, byte *base_data,
+ Uint bit_offset, Uint bit_size)
+{
+ Uint byte_offset, byte_size;
+
+ ERTS_CT_ASSERT(ERL_SUB_BIN_SIZE <= ERL_ONHEAP_BIN_LIMIT);
+
+ byte_offset = BYTE_OFFSET(bit_offset);
+ byte_size = BYTE_OFFSET(bit_size);
+
+ if (BIT_OFFSET(bit_size) == 0 && byte_size <= ERL_ONHEAP_BIN_LIMIT) {
+ ErlHeapBin *hb = (ErlHeapBin*)*hp;
+ *hp += heap_bin_size(byte_size);
+
+ hb->thing_word = header_heap_bin(byte_size);
+ hb->size = byte_size;
+
+ copy_binary_to_buffer(hb->data, 0, base_data, bit_offset, bit_size);
+
+ return make_binary(hb);
+ } else {
+ ErlSubBin *sb = (ErlSubBin*)*hp;
+ *hp += ERL_SUB_BIN_SIZE;
+
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->size = byte_size;
+ sb->offs = byte_offset;
+ sb->orig = base_bin;
+ sb->bitoffs = BIT_OFFSET(bit_offset);
+ sb->bitsize = BIT_OFFSET(bit_size);
+ sb->is_writable = 0;
+
+ return make_binary(sb);
+ }
+}
+
diff --git a/erts/emulator/beam/erl_bits.h b/erts/emulator/beam/erl_bits.h
index 50d353e1fa..0b2a6f3760 100644
--- a/erts/emulator/beam/erl_bits.h
+++ b/erts/emulator/beam/erl_bits.h
@@ -116,7 +116,7 @@ typedef struct erl_bin_match_struct{
do { \
if (BIT_OFFSET(DstBufOffset) == 0 && (SrcBufferOffset == 0) && \
(BIT_OFFSET(NumBits)==0) && (NumBits != 0)) { \
- sys_memcpy(DstBuffer+BYTE_OFFSET(DstBufOffset), \
+ sys_memcpy(((byte*)DstBuffer)+BYTE_OFFSET(DstBufOffset), \
SrcBuffer, NBYTES(NumBits)); \
} else { \
erts_copy_bits(SrcBuffer, SrcBufferOffset, 1, \
@@ -150,8 +150,11 @@ void erts_bits_destroy_state(ERL_BITS_PROTO_0);
Eterm erts_bs_start_match_2(Process *p, Eterm Bin, Uint Max);
ErlBinMatchState *erts_bs_start_match_3(Process *p, Eterm Bin);
Eterm erts_bs_get_integer_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuffer* mb);
-Eterm erts_bs_get_binary_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuffer* mb);
Eterm erts_bs_get_float_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuffer* mb);
+
+/* These will create heap binaries when appropriate, so they require free space
+ * up to EXTRACT_SUB_BIN_HEAP_NEED. */
+Eterm erts_bs_get_binary_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuffer* mb);
Eterm erts_bs_get_binary_all_2(Process *p, ErlBinMatchBuffer* mb);
/*
@@ -182,6 +185,17 @@ void erts_copy_bits(byte* src, size_t soffs, int sdir,
byte* dst, size_t doffs,int ddir, size_t n);
int erts_cmp_bits(byte* a_ptr, size_t a_offs, byte* b_ptr, size_t b_offs, size_t size);
+
+/* Extracts a region from base_bin as a sub-binary or heap binary, whichever
+ * is the most appropriate.
+ *
+ * The caller must ensure that there's enough free space at *hp */
+Eterm erts_extract_sub_binary(Eterm **hp, Eterm base_bin, byte *base_data,
+ Uint bit_offset, Uint num_bits);
+
+/* Pessimistic estimate of the words required for erts_extract_sub_binary */
+#define EXTRACT_SUB_BIN_HEAP_NEED (heap_bin_size(ERL_ONHEAP_BIN_LIMIT))
+
/*
* Flags for bs_get_* / bs_put_* / bs_init* instructions.
*/
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index d24f30f126..3c74ef493b 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -4415,7 +4415,7 @@ void db_info(fmtfn_t to, void *to_arg, int show) /* Called by break handler *
pdbi.to_arg = to_arg;
pdbi.show = show;
- erts_db_foreach_table(db_info_print, &pdbi);
+ erts_db_foreach_table(db_info_print, &pdbi, !0);
}
Uint
@@ -4428,7 +4428,7 @@ erts_get_ets_misc_mem_size(void)
/* SMP Note: May only be used when system is locked */
void
-erts_db_foreach_table(void (*func)(DbTable *, void *), void *arg)
+erts_db_foreach_table(void (*func)(DbTable *, void *), void *arg, int alive_only)
{
int ix;
@@ -4440,7 +4440,7 @@ erts_db_foreach_table(void (*func)(DbTable *, void *), void *arg)
if (first) {
DbTable *tb = first;
do {
- if (is_table_alive(tb))
+ if (!alive_only || is_table_alive(tb))
(*func)(tb, arg);
tb = tb->common.all.next;
} while (tb != first);
@@ -4457,6 +4457,15 @@ erts_db_foreach_offheap(DbTable *tb,
tb->common.meth->db_foreach_offheap(tb, func, arg);
}
+void
+erts_db_foreach_thr_prgr_offheap(void (*func)(ErlOffHeap *, void *),
+ void *arg)
+{
+ erts_db_foreach_thr_prgr_offheap_hash(func, arg);
+ erts_db_foreach_thr_prgr_offheap_tree(func, arg);
+ erts_db_foreach_thr_prgr_offheap_catree(func, arg);
+}
+
/* retrieve max number of ets tables */
Uint
erts_db_get_max_tabs()
diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h
index b3dc1b9ba3..c604744687 100644
--- a/erts/emulator/beam/erl_db.h
+++ b/erts/emulator/beam/erl_db.h
@@ -114,10 +114,12 @@ void init_db(ErtsDbSpinCount);
int erts_db_process_exiting(Process *, ErtsProcLocks, void **);
int erts_db_execute_free_fixation(Process*, DbFixation*);
void db_info(fmtfn_t, void *, int);
-void erts_db_foreach_table(void (*)(DbTable *, void *), void *);
+void erts_db_foreach_table(void (*)(DbTable *, void *), void *, int);
void erts_db_foreach_offheap(DbTable *,
void (*func)(ErlOffHeap *, void *),
void *);
+void erts_db_foreach_thr_prgr_offheap(void (*func)(ErlOffHeap *, void *),
+ void *);
extern int erts_ets_rwmtx_spin_count;
extern int user_requested_db_max_tabs; /* set in erl_init */
diff --git a/erts/emulator/beam/erl_db_catree.c b/erts/emulator/beam/erl_db_catree.c
index fed4b44a9b..8a89eb72df 100644
--- a/erts/emulator/beam/erl_db_catree.c
+++ b/erts/emulator/beam/erl_db_catree.c
@@ -2395,6 +2395,34 @@ void db_calc_stats_catree(DbTableCATree* tb, DbCATreeStats* stats)
} while (depth > 0);
}
+struct debug_catree_fa {
+ void (*func)(ErlOffHeap *, void *);
+ void *arg;
+};
+
+static void debug_free_route_node(void *vfap, ErtsThrPrgrVal val, void *vnp)
+{
+ DbTableCATreeNode *np = vnp;
+ if (np->u.route.key.oh) {
+ struct debug_catree_fa *fap = vfap;
+ ErlOffHeap oh;
+ ERTS_INIT_OFF_HEAP(&oh);
+ oh.first = np->u.route.key.oh;
+ (*fap->func)(&oh, fap->arg);
+ }
+}
+
+void
+erts_db_foreach_thr_prgr_offheap_catree(void (*func)(ErlOffHeap *, void *),
+ void *arg)
+{
+ struct debug_catree_fa fa;
+ fa.func = func;
+ fa.arg = arg;
+ erts_debug_later_op_foreach(do_free_route_node, debug_free_route_node, &fa);
+}
+
+
#ifdef HARDDEBUG
/*
diff --git a/erts/emulator/beam/erl_db_catree.h b/erts/emulator/beam/erl_db_catree.h
index c2c884eee3..2ede85e04e 100644
--- a/erts/emulator/beam/erl_db_catree.h
+++ b/erts/emulator/beam/erl_db_catree.h
@@ -132,6 +132,9 @@ typedef struct {
Uint max_depth;
} DbCATreeStats;
void db_calc_stats_catree(DbTableCATree*, DbCATreeStats*);
+void
+erts_db_foreach_thr_prgr_offheap_catree(void (*func)(ErlOffHeap *, void *),
+ void *arg);
#endif /* _DB_CATREE_H */
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index ceaccf7e44..44ecf7cce5 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -93,11 +93,9 @@
erts_flxctr_dec_read_centralized(&(DB)->common.counters, ERTS_DB_TABLE_NITEMS_COUNTER_ID)
#define RESET_NITEMS(DB) \
erts_flxctr_reset(&(DB)->common.counters, ERTS_DB_TABLE_NITEMS_COUNTER_ID)
-/*
- * The following symbols can be manipulated to "tune" the linear hash array
- */
+
#define GROW_LIMIT(NACTIVE) ((NACTIVE)*1)
-#define SHRINK_LIMIT(NACTIVE) ((NACTIVE) / 2)
+#define SHRINK_LIMIT(TB) erts_atomic_read_nob(&(TB)->shrink_limit)
/*
** We want the first mandatory segment to be small (to reduce minimal footprint)
@@ -137,6 +135,11 @@
#define BUCKET(tb, i) SEGTAB(tb)[SLOT_IX_TO_SEG_IX(i)]->buckets[(i) & EXT_SEGSZ_MASK]
+#ifdef DEBUG
+# define DBG_BUCKET_INACTIVE ((HashDbTerm*)0xdead5107)
+#endif
+
+
/*
* When deleting a table, the number of records to delete.
* Approximate number, because we must delete entire buckets.
@@ -377,7 +380,7 @@ typedef int (*extra_match_validator_t)(int keypos, Eterm match, Eterm guard, Ete
*/
static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix);
static void alloc_seg(DbTableHash *tb);
-static int free_seg(DbTableHash *tb, int free_records);
+static int free_seg(DbTableHash *tb);
static HashDbTerm* next_live(DbTableHash *tb, Uint *iptr, erts_rwmtx_t** lck_ptr,
HashDbTerm *list);
static HashDbTerm* search_list(DbTableHash* tb, Eterm key,
@@ -471,10 +474,8 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle);
static ERTS_INLINE void try_shrink(DbTableHash* tb)
{
- int nactive = NACTIVE(tb);
int nitems = NITEMS(tb);
- if (nactive > FIRST_SEGSZ && nitems < SHRINK_LIMIT(nactive)
- && !IS_FIXED(tb)) {
+ if (nitems < SHRINK_LIMIT(tb) && !IS_FIXED(tb)) {
shrink(tb, nitems);
}
}
@@ -685,6 +686,7 @@ int db_create_hash(Process *p, DbTable *tbl)
erts_atomic_init_nob(&tb->szm, FIRST_SEGSZ_MASK);
erts_atomic_init_nob(&tb->nactive, FIRST_SEGSZ);
+ erts_atomic_init_nob(&tb->shrink_limit, 0);
erts_atomic_init_nob(&tb->fixdel, (erts_aint_t)NULL);
erts_atomic_init_nob(&tb->segtab, (erts_aint_t)NULL);
SET_SEGTAB(tb, tb->first_segtab);
@@ -771,7 +773,7 @@ static int db_next_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
b = next_live(tb, &ix, &lck, b->next);
if (tb->common.status & (DB_BAG | DB_DUPLICATE_BAG)) {
while (b != 0) {
- if (!has_live_key(tb, b, key, hval)) {
+ if (!has_key(tb, b, key, hval)) {
break;
}
b = next_live(tb, &ix, &lck, b->next);
@@ -781,6 +783,7 @@ static int db_next_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
*ret = am_EOT;
}
else {
+ ASSERT(!is_pseudo_deleted(b));
*ret = db_copy_key(p, tbl, &b->dbterm);
RUNLOCK_HASH(lck);
}
@@ -2466,7 +2469,7 @@ static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds)
erts_atomic_set_relb(&tb->fixdel, (erts_aint_t)NULL);
while(tb->nslots != 0) {
- reds -= EXT_SEGSZ/64 + free_seg(tb, 1);
+ reds -= EXT_SEGSZ/64 + free_seg(tb);
/*
* If we have done enough work, get out here.
@@ -2664,6 +2667,34 @@ static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix)
return est;
}
+static void calc_shrink_limit(DbTableHash* tb)
+{
+ erts_aint_t shrink_limit;
+
+ if (tb->nslots >= (FIRST_SEGSZ + 2*EXT_SEGSZ)) {
+ /*
+ * Start shrink when we can remove one extra segment
+ * and still remain below 50% load.
+ */
+ shrink_limit = (tb->nslots - EXT_SEGSZ) / 2;
+ }
+ else {
+ /*
+ * But don't shrink below two segments.
+ * Why? In order to have chance of getting rid of the last extra segment,
+ * and rehash it into the first small segment, we either have to start
+ * early and do speculative joining of buckets or we have to join a lot
+ * of buckets during each delete-op.
+ *
+ * Instead keep segment #2 once allocated. I also think it's a good bet
+ * a shrinking large table will grow large again.
+ */
+ shrink_limit = 0;
+ }
+ erts_atomic_set_nob(&tb->shrink_limit, shrink_limit);
+}
+
+
/* Extend table with one new segment
*/
static void alloc_seg(DbTableHash *tb)
@@ -2682,8 +2713,17 @@ static void alloc_seg(DbTableHash *tb)
segtab[seg_ix] = (struct segment*) erts_db_alloc(ERTS_ALC_T_DB_SEG,
(DbTable *) tb,
SIZEOF_SEGMENT(EXT_SEGSZ));
- sys_memset(segtab[seg_ix], 0, SIZEOF_SEGMENT(EXT_SEGSZ));
+#ifdef DEBUG
+ {
+ int i;
+ for (i = 0; i < EXT_SEGSZ; i++) {
+ segtab[seg_ix]->buckets[i] = DBG_BUCKET_INACTIVE;
+ }
+ }
+#endif
tb->nslots += EXT_SEGSZ;
+
+ calc_shrink_limit(tb);
}
static void dealloc_ext_segtab(void* lop_data)
@@ -2693,10 +2733,19 @@ static void dealloc_ext_segtab(void* lop_data)
erts_free(ERTS_ALC_T_DB_SEG, est);
}
-/* Shrink table by freeing the top segment
+struct dealloc_seg_ops {
+ struct segment* segp;
+ Uint seg_sz;
+
+ struct ext_segtab* est;
+};
+
+/* Shrink table by removing the top segment
** free_records: 1=free any records in segment, 0=assume segment is empty
+** ds_ops: (out) Instructions for dealloc_seg().
*/
-static int free_seg(DbTableHash *tb, int free_records)
+static int remove_seg(DbTableHash *tb, int free_records,
+ struct dealloc_seg_ops *ds_ops)
{
const int seg_ix = SLOT_IX_TO_SEG_IX(tb->nslots) - 1;
struct segment** const segtab = SEGTAB(tb);
@@ -2704,24 +2753,47 @@ static int free_seg(DbTableHash *tb, int free_records)
Uint seg_sz;
int nrecords = 0;
+ ERTS_LC_ASSERT(IS_TAB_WLOCKED(tb) || tb->common.status & DB_DELETE
+ || erts_atomic_read_nob(&tb->is_resizing));
+
ASSERT(segp != NULL);
-#ifndef DEBUG
- if (free_records)
-#endif
- {
- int i = (seg_ix == 0) ? FIRST_SEGSZ : EXT_SEGSZ;
- while (i--) {
- HashDbTerm* p = segp->buckets[i];
+ if (free_records) {
+ int ix, n;
+ if (seg_ix == 0) {
+ /* First segment (always fully active) */
+ n = FIRST_SEGSZ;
+ ix = FIRST_SEGSZ-1;
+ }
+ else if (NACTIVE(tb) < tb->nslots) {
+ /* Last extended segment partially active */
+ n = (NACTIVE(tb) - FIRST_SEGSZ) & EXT_SEGSZ_MASK;
+ ix = (NACTIVE(tb)-1) & EXT_SEGSZ_MASK;
+ }
+ else {
+ /* Full extended segment */
+ n = EXT_SEGSZ;
+ ix = EXT_SEGSZ - 1;
+ }
+ for ( ; n > 0; n--, ix--) {
+ HashDbTerm* p = segp->buckets[ix & EXT_SEGSZ_MASK];
while(p != 0) {
HashDbTerm* nxt = p->next;
- ASSERT(free_records); /* segment not empty as assumed? */
free_term(tb, p);
p = nxt;
++nrecords;
}
}
}
-
+#ifdef DEBUG
+ else {
+ int ix = (seg_ix == 0) ? FIRST_SEGSZ-1 : EXT_SEGSZ-1;
+ for ( ; ix >= 0; ix--) {
+ ASSERT(segp->buckets[ix] == DBG_BUCKET_INACTIVE);
+ }
+ }
+#endif
+
+ ds_ops->est = NULL;
if (seg_ix >= NSEG_1) {
struct ext_segtab* est = ErtsContainerStruct_(segtab,struct ext_segtab,segtab);
@@ -2730,35 +2802,64 @@ static int free_seg(DbTableHash *tb, int free_records)
SET_SEGTAB(tb, est->prev_segtab);
tb->nsegs = est->prev_nsegs;
- if (!tb->common.is_thread_safe) {
- /*
- * Table is doing a graceful shrink operation and we must avoid
- * deallocating this segtab while it may still be read by other
- * threads. Schedule deallocation with thread progress to make
- * sure no lingering threads are still hanging in BUCKET macro
- * with an old segtab pointer.
- */
- erts_schedule_db_free(&tb->common, dealloc_ext_segtab,
- est, &est->lop,
- SIZEOF_EXT_SEGTAB(est->nsegs));
- }
- else
- erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable*)tb, est,
- SIZEOF_EXT_SEGTAB(est->nsegs));
+ ds_ops->est = est;
}
}
+
seg_sz = (seg_ix == 0) ? FIRST_SEGSZ : EXT_SEGSZ;
- erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable *)tb, segp, SIZEOF_SEGMENT(seg_sz));
+ tb->nslots -= seg_sz;
+ ASSERT(tb->nslots >= 0);
+
+ ds_ops->segp = segp;
+ ds_ops->seg_sz = seg_sz;
#ifdef DEBUG
if (seg_ix < tb->nsegs)
SEGTAB(tb)[seg_ix] = NULL;
#endif
- tb->nslots -= seg_sz;
- ASSERT(tb->nslots >= 0);
+ calc_shrink_limit(tb);
return nrecords;
}
+/*
+ * Deallocate segment removed by remove_seg()
+ */
+static void dealloc_seg(DbTableHash *tb, struct dealloc_seg_ops* ds_ops)
+{
+ struct ext_segtab* est = ds_ops->est;
+
+ if (est) {
+ if (!tb->common.is_thread_safe) {
+ /*
+ * Table is doing a graceful shrink operation and we must avoid
+ * deallocating this segtab while it may still be read by other
+ * threads. Schedule deallocation with thread progress to make
+ * sure no lingering threads are still hanging in BUCKET macro
+ * with an old segtab pointer.
+ */
+ erts_schedule_db_free(&tb->common, dealloc_ext_segtab,
+ est, &est->lop,
+ SIZEOF_EXT_SEGTAB(est->nsegs));
+ }
+ else
+ erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable*)tb, est,
+ SIZEOF_EXT_SEGTAB(est->nsegs));
+ }
+
+ erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable *)tb,
+ ds_ops->segp, SIZEOF_SEGMENT(ds_ops->seg_sz));
+}
+
+/* Remove and deallocate top segment and all its contained objects */
+static int free_seg(DbTableHash *tb)
+{
+ struct dealloc_seg_ops ds_ops;
+ int reds;
+
+ reds = remove_seg(tb, 1, &ds_ops);
+ dealloc_seg(tb, &ds_ops);
+ return reds;
+}
/*
** Copy terms from ptr1 until ptr2
@@ -2880,6 +2981,7 @@ static void grow(DbTableHash* tb, int nitems)
pnext = &BUCKET(tb, from_ix);
p = *pnext;
to_pnext = &BUCKET(tb, to_ix);
+ ASSERT(*to_pnext == DBG_BUCKET_INACTIVE);
while (p != NULL) {
if (is_pseudo_deleted(p)) { /* rare but possible with fine locking */
*pnext = p->next;
@@ -2916,19 +3018,21 @@ abort:
*/
static void shrink(DbTableHash* tb, int nitems)
{
- HashDbTerm** src_bp;
- HashDbTerm** dst_bp;
+ struct dealloc_seg_ops ds_ops;
+ HashDbTerm* src;
+ HashDbTerm* tail;
HashDbTerm** bp;
erts_rwmtx_t* lck;
int src_ix, dst_ix, low_szm;
int nactive;
int loop_limit = 5;
+ ds_ops.segp = NULL;
do {
if (!begin_resizing(tb))
return; /* already in progress */
nactive = NACTIVE(tb);
- if (!(nactive > FIRST_SEGSZ && nitems < SHRINK_LIMIT(nactive))) {
+ if (!(nitems < SHRINK_LIMIT(tb))) {
goto abort; /* already done (race) */
}
src_ix = nactive - 1;
@@ -2945,41 +3049,49 @@ static void shrink(DbTableHash* tb, int nitems)
goto abort;
}
- src_bp = &BUCKET(tb, src_ix);
- dst_bp = &BUCKET(tb, dst_ix);
- bp = src_bp;
-
- /*
- * We join lists by appending "dst" at the end of "src"
- * as we must step through "src" anyway to purge pseudo deleted.
- */
- while(*bp != NULL) {
- if (is_pseudo_deleted(*bp)) {
- HashDbTerm* deleted = *bp;
- *bp = deleted->next;
- free_term(tb, deleted);
- } else {
- bp = &(*bp)->next;
- }
- }
- *bp = *dst_bp;
- *dst_bp = *src_bp;
- *src_bp = NULL;
-
+ src = BUCKET(tb, src_ix);
+#ifdef DEBUG
+ BUCKET(tb, src_ix) = DBG_BUCKET_INACTIVE;
+#endif
nactive = src_ix;
erts_atomic_set_nob(&tb->nactive, nactive);
if (dst_ix == 0) {
erts_atomic_set_relb(&tb->szm, low_szm);
}
- WUNLOCK_HASH(lck);
-
if (tb->nslots - src_ix >= EXT_SEGSZ) {
- free_seg(tb, 0);
+ remove_seg(tb, 0, &ds_ops);
}
done_resizing(tb);
- } while (--loop_limit
- && nactive > FIRST_SEGSZ && nitems < SHRINK_LIMIT(nactive));
+ if (src) {
+ /*
+ * We join buckets by appending "dst" list at the end of "src" list
+ * as we must step through "src" anyway to purge pseudo deleted.
+ */
+ bp = &BUCKET(tb, dst_ix);
+ tail = *bp;
+ *bp = src;
+
+ while(*bp != NULL) {
+ if (is_pseudo_deleted(*bp)) {
+ HashDbTerm* deleted = *bp;
+ *bp = deleted->next;
+ free_term(tb, deleted);
+ } else {
+ bp = &(*bp)->next;
+ }
+ }
+ *bp = tail;
+ }
+
+ WUNLOCK_HASH(lck);
+
+ if (ds_ops.segp) {
+ dealloc_seg(tb, &ds_ops);
+ ds_ops.segp = NULL;
+ }
+
+ } while (--loop_limit && nitems < SHRINK_LIMIT(tb));
return;
abort:
@@ -3262,6 +3374,12 @@ Eterm erts_ets_hash_sizeof_ext_segtab(void)
return make_small(((SIZEOF_EXT_SEGTAB(0)-1) / sizeof(UWord)) + 1);
}
+void
+erts_db_foreach_thr_prgr_offheap_hash(void (*func)(ErlOffHeap *, void *),
+ void *arg)
+{
+}
+
#ifdef ERTS_ENABLE_LOCK_COUNT
void erts_lcnt_enable_db_hash_lock_count(DbTableHash *tb, int enable) {
int i;
diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h
index eae5537ba4..b26b82056f 100644
--- a/erts/emulator/beam/erl_db_hash.h
+++ b/erts/emulator/beam/erl_db_hash.h
@@ -63,9 +63,10 @@ typedef struct db_table_hash_fine_locks {
typedef struct db_table_hash {
DbTableCommon common;
- /* SMP: szm and nactive are write-protected by is_resizing or table write lock */
+ /* szm, nactive, shrink_limit are write-protected by is_resizing or table write lock */
erts_atomic_t szm; /* current size mask. */
erts_atomic_t nactive; /* Number of "active" slots */
+ erts_atomic_t shrink_limit; /* Shrink table when fewer objects than this */
erts_atomic_t segtab; /* The segment table (struct segment**) */
struct segment* first_segtab[1];
@@ -110,6 +111,9 @@ typedef struct {
void db_calc_stats_hash(DbTableHash* tb, DbHashStats*);
Eterm erts_ets_hash_sizeof_ext_segtab(void);
+void
+erts_db_foreach_thr_prgr_offheap_hash(void (*func)(ErlOffHeap *, void *),
+ void *arg);
#ifdef ERTS_ENABLE_LOCK_COUNT
void erts_lcnt_enable_db_hash_lock_count(DbTableHash *tb, int enable);
diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c
index 492ea81b63..19b94b0634 100644
--- a/erts/emulator/beam/erl_db_tree.c
+++ b/erts/emulator/beam/erl_db_tree.c
@@ -3936,6 +3936,12 @@ static int doit_select_replace(DbTableCommon *tb, TreeDbTerm **this,
return 1;
}
+void
+erts_db_foreach_thr_prgr_offheap_tree(void (*func)(ErlOffHeap *, void *),
+ void *arg)
+{
+}
+
#ifdef TREE_DEBUG
static void do_dump_tree2(DbTableCommon* tb, int to, void *to_arg, int show,
TreeDbTerm *t, int offset)
diff --git a/erts/emulator/beam/erl_db_tree.h b/erts/emulator/beam/erl_db_tree.h
index 54da2a6bc1..06e0005801 100644
--- a/erts/emulator/beam/erl_db_tree.h
+++ b/erts/emulator/beam/erl_db_tree.h
@@ -53,4 +53,8 @@ void db_initialize_tree(void);
int db_create_tree(Process *p, DbTable *tbl);
+void
+erts_db_foreach_thr_prgr_offheap_tree(void (*func)(ErlOffHeap *, void *),
+ void *arg);
+
#endif /* _DB_TREE_H */
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 67a73e4d57..13b1f8ab4d 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -151,6 +151,7 @@ static void grow_new_heap(Process *p, Uint new_sz, Eterm* objv, int nobj);
static void sweep_off_heap(Process *p, int fullsweep);
static void offset_heap(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size);
static void offset_heap_ptr(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size);
+static void offset_heap_ptr_nstack(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size);
static void offset_rootset(Process *p, Sint offs, char* area, Uint area_size,
Eterm* objv, int nobj);
static void offset_off_heap(Process* p, Sint offs, char* area, Uint area_size);
@@ -1054,9 +1055,10 @@ erts_garbage_collect_hibernate(Process* p)
n_htop = tmp_n_htop; \
} while(0)
+
/*
* offset_nstack() can ignore the descriptor-based traversal the other
- * nstack procedures use and simply call offset_heap_ptr() instead.
+ * nstack procedures use and do a simpler word by word traversal instead.
* This relies on two facts:
* 1. The only live non-Erlang terms on an nstack are return addresses,
* and they will be skipped thanks to the low/high range check.
@@ -1071,14 +1073,51 @@ static ERTS_INLINE void offset_nstack(Process* p, Sint offs,
{
if (p->hipe.nstack) {
ASSERT(p->hipe.nsp && p->hipe.nstend);
- offset_heap_ptr(hipe_nstack_start(p), hipe_nstack_used(p),
- offs, area, area_size);
+ offset_heap_ptr_nstack(hipe_nstack_start(p), hipe_nstack_used(p),
+ offs, area, area_size);
}
else {
ASSERT(!p->hipe.nsp && !p->hipe.nstend);
}
}
+/*
+ * This is the same as offset_heap_ptr()
+ *
+ * Except for VALGRIND. It allows benign offsetting of undefined (dead) words
+ * on the nstack while also retaining them as undefined. This suppresses
+ * valgrinds "Conditional jump or move depends on uninitialised value(s)".
+ */
+static void
+offset_heap_ptr_nstack(Eterm* hp, Uint sz, Sint offs,
+ char* area, Uint area_size)
+{
+ while (sz--) {
+ Eterm val = *hp;
+#ifdef VALGRIND
+ Eterm val_vbits;
+ VALGRIND_GET_VBITS(&val, &val_vbits, sizeof(val));
+ VALGRIND_MAKE_MEM_DEFINED(&val, sizeof(val));
+#endif
+ switch (primary_tag(val)) {
+ case TAG_PRIMARY_LIST:
+ case TAG_PRIMARY_BOXED:
+ if (ErtsInArea(ptr_val(val), area, area_size)) {
+#ifdef VALGRIND
+ VALGRIND_SET_VBITS(&val, val_vbits, sizeof(val));
+#endif
+ *hp = offset_ptr(val, offs);
+ }
+ hp++;
+ break;
+ default:
+ hp++;
+ break;
+ }
+ }
+}
+
+
#else /* !HIPE */
#define fullsweep_nstack(p,n_htop) (n_htop)
@@ -2848,7 +2887,7 @@ sweep_off_heap(Process *p, int fullsweep)
if (is_external_header(((struct erl_off_heap_header*) boxed_val(ptr->thing_word))->thing_word))
erts_node_bookkeep(((ExternalThing*)ptr)->node,
make_boxed(&ptr->thing_word),
- ERL_NODE_DEC);
+ ERL_NODE_DEC, __FILE__, __LINE__);
*prev = ptr = (struct erl_off_heap_header*) boxed_val(ptr->thing_word);
ASSERT(!IS_MOVED_BOXED(ptr->thing_word));
switch (ptr->thing_word) {
@@ -2879,7 +2918,7 @@ sweep_off_heap(Process *p, int fullsweep)
if (is_external_header(ptr->thing_word)) {
erts_node_bookkeep(((ExternalThing*)ptr)->node,
make_boxed(&ptr->thing_word),
- ERL_NODE_INC);
+ ERL_NODE_INC, __FILE__, __LINE__);
}
prev = &ptr->next;
ptr = ptr->next;
@@ -3050,9 +3089,11 @@ offset_heap(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size)
if (is_external_header(oh->thing_word)) {
erts_node_bookkeep(((ExternalThing*)oh)->node,
- make_boxed(((Eterm*)oh)-offs), ERL_NODE_DEC);
+ make_boxed(((Eterm*)oh)-offs),
+ ERL_NODE_DEC, __FILE__, __LINE__);
erts_node_bookkeep(((ExternalThing*)oh)->node,
- make_boxed((Eterm*)oh), ERL_NODE_INC);
+ make_boxed((Eterm*)oh), ERL_NODE_INC,
+ __FILE__, __LINE__);
}
if (ErtsInArea(oh->next, area, area_size)) {
diff --git a/erts/emulator/beam/erl_monitor_link.c b/erts/emulator/beam/erl_monitor_link.c
index 1c6b4afaa3..43028be39d 100644
--- a/erts/emulator/beam/erl_monitor_link.c
+++ b/erts/emulator/beam/erl_monitor_link.c
@@ -671,6 +671,24 @@ erts_monitor_tree_foreach(ErtsMonitor *root,
arg);
}
+void
+erts_debug_monitor_tree_destroying_foreach(ErtsMonitor *root,
+ ErtsMonitorFunc func,
+ void *arg,
+ void *vysp)
+{
+ void *tmp_vysp = erts_alloc(ERTS_ALC_T_ML_YIELD_STATE,
+ sizeof(ErtsMonLnkYieldState));
+ Sint reds;
+ sys_memcpy(tmp_vysp, tmp_vysp, sizeof(ErtsMonLnkYieldState));
+ do {
+ reds = ml_rbt_foreach_yielding((ErtsMonLnkNode *) root,
+ (ErtsMonLnkNodeFunc) func,
+ arg, &tmp_vysp, (Sint) INT_MAX);
+ } while (reds <= 0);
+ ERTS_ML_ASSERT(!tmp_vysp);
+}
+
int
erts_monitor_tree_foreach_yielding(ErtsMonitor *root,
ErtsMonitorFunc func,
@@ -716,6 +734,19 @@ erts_monitor_list_foreach(ErtsMonitor *list,
arg, &ystate, (Sint) INT_MAX));
}
+void
+erts_debug_monitor_list_destroying_foreach(ErtsMonitor *list,
+ ErtsMonitorFunc func,
+ void *arg,
+ void *vysp)
+{
+ void *tmp_vysp = vysp;
+ while (!ml_dl_list_foreach_yielding((ErtsMonLnkNode *) list,
+ (int (*)(ErtsMonLnkNode *, void *, Sint)) func,
+ arg, &tmp_vysp, (Sint) INT_MAX));
+ ERTS_ML_ASSERT(!tmp_vysp);
+}
+
int
erts_monitor_list_foreach_yielding(ErtsMonitor *list,
ErtsMonitorFunc func,
@@ -1080,6 +1111,24 @@ erts_link_tree_foreach(ErtsLink *root,
}
+void
+erts_debug_link_tree_destroying_foreach(ErtsLink *root,
+ ErtsLinkFunc func,
+ void *arg,
+ void *vysp)
+{
+ void *tmp_vysp = erts_alloc(ERTS_ALC_T_ML_YIELD_STATE,
+ sizeof(ErtsMonLnkYieldState));
+ Sint reds;
+ sys_memcpy(tmp_vysp, vysp, sizeof(ErtsMonLnkYieldState));
+ do {
+ reds = ml_rbt_foreach_yielding((ErtsMonLnkNode *) root,
+ (ErtsMonLnkNodeFunc) func,
+ arg, &tmp_vysp, (Sint) INT_MAX);
+ } while (reds <= 0);
+ ERTS_ML_ASSERT(!tmp_vysp);
+}
+
int
erts_link_tree_foreach_yielding(ErtsLink *root,
ErtsLinkFunc func,
diff --git a/erts/emulator/beam/erl_monitor_link.h b/erts/emulator/beam/erl_monitor_link.h
index eff861fce8..86be400c09 100644
--- a/erts/emulator/beam/erl_monitor_link.h
+++ b/erts/emulator/beam/erl_monitor_link.h
@@ -1509,6 +1509,17 @@ ERTS_GLB_INLINE ErtsMonitorSuspend *erts_monitor_suspend(ErtsMonitor *mon)
#endif
+void
+erts_debug_monitor_tree_destroying_foreach(ErtsMonitor *root,
+ ErtsMonitorFunc func,
+ void *arg,
+ void *vysp);
+void
+erts_debug_monitor_list_destroying_foreach(ErtsMonitor *list,
+ ErtsMonitorFunc func,
+ void *arg,
+ void *vysp);
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* Link Operations *
\* */
@@ -2365,4 +2376,10 @@ erts_link_dist_delete(ErtsLink *lnk)
#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+void
+erts_debug_link_tree_destroying_foreach(ErtsLink *root,
+ ErtsLinkFunc func,
+ void *arg,
+ void *vysp);
+
#endif /* ERL_MONITOR_LINK_H__ */
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index 4eb6c3e214..285252a53e 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -932,7 +932,8 @@ static void try_delete_node(void *venp)
*
* If refc > 0, the entry is in use. Keep the entry.
*/
- erts_node_bookkeep(enp, THE_NON_VALUE, ERL_NODE_DEC);
+ erts_node_bookkeep(enp, THE_NON_VALUE, ERL_NODE_DEC,
+ __FILE__, __LINE__);
refc = erts_refc_dectest(&enp->refc, -1);
if (refc == -1)
(void) hash_erase(&erts_node_table, (void *) enp);
@@ -976,7 +977,7 @@ static void print_node(void *venp, void *vpndp)
if(pndp->sysname == NIL) {
erts_print(pndp->to, pndp->to_arg, "Name: %T ", enp->sysname);
}
- erts_print(pndp->to, pndp->to_arg, " %d", enp->creation);
+ erts_print(pndp->to, pndp->to_arg, " %u", enp->creation);
#ifdef DEBUG
erts_print(pndp->to, pndp->to_arg, " (refc=%ld)",
erts_refc_read(&enp->refc, 0));
@@ -1019,7 +1020,7 @@ void erts_print_node_info(fmtfn_t to,
/* ----------------------------------------------------------------------- */
void
-erts_set_this_node(Eterm sysname, Uint creation)
+erts_set_this_node(Eterm sysname, Uint32 creation)
{
ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
ASSERT(2 <= de_refc_read(erts_this_dist_entry, 2));
@@ -1164,6 +1165,12 @@ void erts_lcnt_update_distribution_locks(int enable) {
* can damage the real-time properties of the system. *
\* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+#ifdef ERL_NODE_BOOKKEEP
+#define ERTS_DBG_NC_ALLOC_TYPE ERTS_ALC_T_NC_STD
+#else
+#define ERTS_DBG_NC_ALLOC_TYPE ERTS_ALC_T_NC_TMP
+#endif
+
#include "erl_db.h"
#undef INIT_AM
@@ -1188,10 +1195,12 @@ static Eterm AM_delayed_delete_timer;
static Eterm AM_thread_progress_delete_timer;
static Eterm AM_sequence;
static Eterm AM_signal;
+static Eterm AM_persistent_term;
static void setup_reference_table(void);
static Eterm reference_table_term(Uint **hpp, ErlOffHeap *ohp, Uint *szp);
static void delete_reference_table(void);
+static void clear_system(void);
#undef ERTS_MAX__
#define ERTS_MAX__(A, B) ((A) > (B) ? (A) : (B))
@@ -1244,11 +1253,11 @@ typedef struct inserted_bin_ {
Binary *bin_val;
} InsertedBin;
-static ReferredNode *referred_nodes;
+static ReferredNode *referred_nodes = NULL;
static int no_referred_nodes;
-static ReferredDist *referred_dists;
+static ReferredDist *referred_dists = NULL;
static int no_referred_dists;
-static InsertedBin *inserted_bins;
+static InsertedBin *inserted_bins = NULL;
Eterm
erts_get_node_and_dist_references(struct process *proc)
@@ -1284,9 +1293,15 @@ erts_get_node_and_dist_references(struct process *proc)
INIT_AM(thread_progress_delete_timer);
INIT_AM(signal);
INIT_AM(sequence);
+ INIT_AM(persistent_term);
references_atoms_need_init = 0;
}
+#ifdef ERL_NODE_BOOKKEEP
+ if (referred_nodes || referred_dists || inserted_bins)
+ delete_reference_table();
+#endif
+
setup_reference_table();
/* Get term size */
@@ -1304,7 +1319,11 @@ erts_get_node_and_dist_references(struct process *proc)
ASSERT(endp == hp);
+#ifndef ERL_NODE_BOOKKEEP
delete_reference_table();
+#endif
+
+ clear_system();
erts_thr_progress_unblock();
erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
@@ -1339,7 +1358,7 @@ insert_dist_referrer(ReferredDist *referred_dist,
break;
if(!drp) {
- drp = (DistReferrer *) erts_alloc(ERTS_ALC_T_NC_TMP,
+ drp = (DistReferrer *) erts_alloc(ERTS_DBG_NC_ALLOC_TYPE,
sizeof(DistReferrer));
drp->next = referred_dist->referrers;
referred_dist->referrers = drp;
@@ -1402,7 +1421,7 @@ insert_node_referrer(ReferredNode *referred_node, int type, Eterm id)
break;
if(!nrp) {
- nrp = (NodeReferrer *) erts_alloc(ERTS_ALC_T_NC_TMP,
+ nrp = (NodeReferrer *) erts_alloc(ERTS_DBG_NC_ALLOC_TYPE,
sizeof(NodeReferrer));
nrp->next = referred_node->referrers;
ERTS_INIT_OFF_HEAP(&nrp->off_heap);
@@ -1516,7 +1535,8 @@ insert_offheap(ErlOffHeap *oh, int type, Eterm id)
erts_match_prog_foreach_offheap((Binary *) u.mref->mb,
insert_offheap2,
(void *) &a);
- nib = erts_alloc(ERTS_ALC_T_NC_TMP, sizeof(InsertedBin));
+ nib = erts_alloc(ERTS_DBG_NC_ALLOC_TYPE,
+ sizeof(InsertedBin));
nib->bin_val = (Binary *) u.mref->mb;
nib->next = inserted_bins;
inserted_bins = nib;
@@ -1574,18 +1594,6 @@ static int clear_visited_monitor(ErtsMonitor *mon, void *p, Sint reds)
}
static void
-insert_p_monitors(ErtsPTabElementCommon *p)
-{
- Eterm id = p->id;
- erts_monitor_tree_foreach(p->u.alive.monitors,
- insert_monitor,
- (void *) &id);
- erts_monitor_list_foreach(p->u.alive.lt_monitors,
- insert_monitor,
- (void *) &id);
-}
-
-static void
insert_dist_monitors(DistEntry *dep)
{
if (dep->mld) {
@@ -1600,8 +1608,10 @@ insert_dist_monitors(DistEntry *dep)
static int
-insert_sequence(ErtsDistExternal *edep, void *arg, Sint reds)
+insert_sequence(DistSeqNode *seq, void *arg, Sint reds)
{
+ ErtsDistExternal *edep = erts_get_dist_ext(&seq->hfrag);
+ insert_offheap(&seq->hfrag.off_heap, SEQUENCE_REF, *(Eterm*)arg);
insert_dist_entry(edep->dep, SEQUENCE_REF, *(Eterm*)arg, 0);
return 1;
}
@@ -1609,18 +1619,7 @@ insert_sequence(ErtsDistExternal *edep, void *arg, Sint reds)
static void
insert_dist_sequences(DistEntry *dep)
{
- erts_dist_seq_tree_foreach(dep, insert_sequence, (void *) &dep->sysname);
-}
-
-static void
-clear_visited_p_monitors(ErtsPTabElementCommon *p)
-{
- erts_monitor_tree_foreach(p->u.alive.monitors,
- clear_visited_monitor,
- NULL);
- erts_monitor_list_foreach(p->u.alive.lt_monitors,
- clear_visited_monitor,
- NULL);
+ erts_debug_dist_seq_tree_foreach(dep, insert_sequence, (void *) &dep->sysname);
}
static void
@@ -1667,13 +1666,6 @@ static int clear_visited_link(ErtsLink *lnk, void *p, Sint reds)
}
static void
-insert_p_links(ErtsPTabElementCommon *p)
-{
- Eterm id = p->id;
- erts_link_tree_foreach(p->u.alive.links, insert_link, (void *) &id);
-}
-
-static void
insert_dist_links(DistEntry *dep)
{
if (dep->mld)
@@ -1683,14 +1675,6 @@ insert_dist_links(DistEntry *dep)
}
static void
-clear_visited_p_links(ErtsPTabElementCommon *p)
-{
- erts_link_tree_foreach(p->u.alive.links,
- clear_visited_link,
- NULL);
-}
-
-static void
clear_visited_dist_links(DistEntry *dep)
{
if (dep->mld)
@@ -1885,15 +1869,11 @@ insert_process(Process *proc)
insert_sig_ext,
(void *) proc);
- /* If the process is FREE, the proc->common field has been
- re-used by the ptab delete, so we cannot trust it. */
- if (!(erts_atomic32_read_nob(&proc->state) & ERTS_PSFLG_FREE)) {
- /* Insert links */
- insert_p_links(&proc->common);
-
- /* Insert monitors */
- insert_p_monitors(&proc->common);
- }
+ /* Insert monitors and links... */
+ erts_debug_proc_monitor_link_foreach(proc,
+ insert_monitor,
+ insert_link,
+ (void *) &proc->common.id);
{
DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(proc);
@@ -1906,6 +1886,12 @@ insert_process(Process *proc)
}
static void
+insert_process2(Process *proc, void *arg)
+{
+ insert_process(proc);
+}
+
+static void
insert_dist_suspended_procs(DistEntry *dep)
{
ErtsProcList *plist = erts_proclist_peek_first(dep->suspended);
@@ -1916,16 +1902,47 @@ insert_dist_suspended_procs(DistEntry *dep)
}
}
+static void clear_process(Process *proc);
+
+static void
+clear_dist_suspended_procs(DistEntry *dep)
+{
+ ErtsProcList *plist = erts_proclist_peek_first(dep->suspended);
+ while (plist) {
+ if (is_not_immed(plist->u.pid))
+ clear_process(plist->u.p);
+ plist = erts_proclist_peek_next(dep->suspended, plist);
+ }
+}
+
+static void
+insert_persistent_term(ErlOffHeap *ohp, void *arg)
+{
+ Eterm heap[3];
+ insert_offheap(ohp, SYSTEM_REF,
+ TUPLE2(&heap[0], AM_system, AM_persistent_term));
+}
+
+static void
+insert_ets_offheap_thr_prgr(ErlOffHeap *ohp, void *arg)
+{
+ Eterm heap[3];
+ insert_offheap(ohp, ETS_REF,
+ TUPLE2(&heap[0], AM_system, AM_ets));
+}
+
#ifdef ERL_NODE_BOOKKEEP
void
-erts_node_bookkeep(ErlNode *np, Eterm term, int what)
+erts_node_bookkeep(ErlNode *np, Eterm term, int what, char *f, int l)
{
- erts_aint_t slot = (erts_atomic_inc_read_nob(&np->slot) - 1) % 1024;
+ erts_aint_t slot = (erts_atomic_inc_read_nob(&np->slot) - 1) % ERTS_BOOKKEEP_SIZE;
ErtsSchedulerData *esdp = erts_get_scheduler_data();
Eterm who = THE_NON_VALUE;
ASSERT(np);
np->books[slot].what = what;
np->books[slot].term = term;
+ np->books[slot].file = f;
+ np->books[slot].line = l;
if (esdp->current_process) {
who = esdp->current_process->common.id;
} else if (esdp->current_port) {
@@ -1946,14 +1963,14 @@ setup_reference_table(void)
inserted_bins = NULL;
hash_get_info(&hi, &erts_node_table);
- referred_nodes = erts_alloc(ERTS_ALC_T_NC_TMP,
+ referred_nodes = erts_alloc(ERTS_DBG_NC_ALLOC_TYPE,
hi.objs*sizeof(ReferredNode));
no_referred_nodes = 0;
hash_foreach(&erts_node_table, init_referred_node, NULL);
ASSERT(no_referred_nodes == hi.objs);
hash_get_info(&hi, &erts_dist_table);
- referred_dists = erts_alloc(ERTS_ALC_T_NC_TMP,
+ referred_dists = erts_alloc(ERTS_DBG_NC_ALLOC_TYPE,
hi.objs*sizeof(ReferredDist));
no_referred_dists = 0;
hash_foreach(&erts_dist_table, init_referred_dist, NULL);
@@ -1990,8 +2007,9 @@ setup_reference_table(void)
if (proc)
insert_process(proc);
}
+ erts_debug_free_process_foreach(insert_process2, NULL);
- erts_foreach_sys_msg_in_q(insert_sys_msg);
+ erts_debug_foreach_sys_msg_in_q(insert_sys_msg);
/* Insert all ports */
max = erts_ptab_max(&erts_port);
@@ -2008,10 +2026,18 @@ setup_reference_table(void)
if (state & ERTS_PORT_SFLGS_DEAD)
continue;
- /* Insert links */
- insert_p_links(&prt->common);
- /* Insert monitors */
- insert_p_monitors(&prt->common);
+ /* Insert links */
+ erts_link_tree_foreach(ERTS_P_LINKS(prt),
+ insert_link,
+ (void *) &prt->common.id);
+ /* Insert monitors */
+ erts_monitor_tree_foreach(ERTS_P_MONITORS(prt),
+ insert_monitor,
+ (void *) &prt->common.id);
+ /* Insert local target monitors */
+ erts_monitor_list_foreach(ERTS_P_LT_MONITORS(prt),
+ insert_monitor,
+ (void *) &prt->common.id);
/* Insert port data */
ohp = erts_port_data_offheap(prt);
if (ohp)
@@ -2091,11 +2117,16 @@ setup_reference_table(void)
}
/* Insert all ets tables */
- erts_db_foreach_table(insert_ets_table, NULL);
+ erts_db_foreach_table(insert_ets_table, NULL, 0);
+ erts_db_foreach_thr_prgr_offheap(insert_ets_offheap_thr_prgr, NULL);
/* Insert all bif timers */
erts_debug_bif_timer_foreach(insert_bif_timer, NULL);
+ /* Insert persistent term storage */
+ erts_debug_foreach_persistent_term_off_heap(insert_persistent_term,
+ NULL);
+
/* Insert node table (references to dist) */
hash_foreach(&erts_node_table, insert_erl_node, NULL);
}
@@ -2348,8 +2379,7 @@ static void noop_sig_ext(ErtsDistExternal *ext, void *arg)
static void
delete_reference_table(void)
{
- DistEntry *dep;
- int i, max;
+ int i;
for(i = 0; i < no_referred_nodes; i++) {
NodeReferrer *nrp;
NodeReferrer *tnrp;
@@ -2358,11 +2388,13 @@ delete_reference_table(void)
erts_cleanup_offheap(&nrp->off_heap);
tnrp = nrp;
nrp = nrp->next;
- erts_free(ERTS_ALC_T_NC_TMP, (void *) tnrp);
+ erts_free(ERTS_DBG_NC_ALLOC_TYPE, (void *) tnrp);
}
}
- if (referred_nodes)
- erts_free(ERTS_ALC_T_NC_TMP, (void *) referred_nodes);
+ if (referred_nodes) {
+ erts_free(ERTS_DBG_NC_ALLOC_TYPE, (void *) referred_nodes);
+ referred_nodes = NULL;
+ }
for(i = 0; i < no_referred_dists; i++) {
DistReferrer *drp;
@@ -2371,34 +2403,57 @@ delete_reference_table(void)
while(drp) {
tdrp = drp;
drp = drp->next;
- erts_free(ERTS_ALC_T_NC_TMP, (void *) tdrp);
+ erts_free(ERTS_DBG_NC_ALLOC_TYPE, (void *) tdrp);
}
}
- if (referred_dists)
- erts_free(ERTS_ALC_T_NC_TMP, (void *) referred_dists);
+ if (referred_dists) {
+ erts_free(ERTS_DBG_NC_ALLOC_TYPE, (void *) referred_dists);
+ referred_dists = NULL;
+ }
while(inserted_bins) {
InsertedBin *ib = inserted_bins;
inserted_bins = inserted_bins->next;
- erts_free(ERTS_ALC_T_NC_TMP, (void *)ib);
+ erts_free(ERTS_DBG_NC_ALLOC_TYPE, (void *)ib);
}
+}
+
+static void clear_process(Process *proc)
+{
+ erts_proc_sig_debug_foreach_sig(proc,
+ noop_sig_msg,
+ noop_sig_offheap,
+ clear_visited_monitor,
+ clear_visited_link,
+ noop_sig_ext,
+ (void *) proc);
+
+
+ /* Clear monitors and links... */
+ erts_debug_proc_monitor_link_foreach(proc,
+ clear_visited_monitor,
+ clear_visited_link,
+ (void *) &proc->common.id);
+}
+
+static void clear_process2(Process *proc, void *arg)
+{
+ clear_process(proc);
+}
- /* Cleanup... */
+static void
+clear_system(void)
+{
+ DistEntry *dep;
+ int i, max;
+ /* Clear... */
max = erts_ptab_max(&erts_proc);
for (i = 0; i < max; i++) {
Process *proc = erts_pix2proc(i);
- if (proc) {
- clear_visited_p_links(&proc->common);
- clear_visited_p_monitors(&proc->common);
- erts_proc_sig_debug_foreach_sig(proc,
- noop_sig_msg,
- noop_sig_offheap,
- clear_visited_monitor,
- clear_visited_link,
- noop_sig_ext,
- (void *) proc);
- }
+ if (proc)
+ clear_process(proc);
}
+ erts_debug_free_process_foreach(clear_process2, NULL);
max = erts_ptab_max(&erts_port);
for (i = 0; i < max; i++) {
@@ -2413,28 +2468,42 @@ delete_reference_table(void)
if (state & ERTS_PORT_SFLGS_DEAD)
continue;
- clear_visited_p_links(&prt->common);
- clear_visited_p_monitors(&prt->common);
+ /* Clear links */
+ erts_link_tree_foreach(ERTS_P_LINKS(prt),
+ clear_visited_link,
+ (void *) &prt->common.id);
+ /* Clear monitors */
+ erts_monitor_tree_foreach(ERTS_P_MONITORS(prt),
+ clear_visited_monitor,
+ (void *) &prt->common.id);
+ /* Clear local target monitors */
+ erts_monitor_list_foreach(ERTS_P_LT_MONITORS(prt),
+ clear_visited_monitor,
+ (void *) &prt->common.id);
}
for(dep = erts_visible_dist_entries; dep; dep = dep->next) {
clear_visited_dist_links(dep);
clear_visited_dist_monitors(dep);
+ clear_dist_suspended_procs(dep);
}
for(dep = erts_hidden_dist_entries; dep; dep = dep->next) {
clear_visited_dist_links(dep);
clear_visited_dist_monitors(dep);
+ clear_dist_suspended_procs(dep);
}
for(dep = erts_pending_dist_entries; dep; dep = dep->next) {
clear_visited_dist_links(dep);
clear_visited_dist_monitors(dep);
+ clear_dist_suspended_procs(dep);
}
for(dep = erts_not_connected_dist_entries; dep; dep = dep->next) {
clear_visited_dist_links(dep);
clear_visited_dist_monitors(dep);
+ clear_dist_suspended_procs(dep);
}
}
diff --git a/erts/emulator/beam/erl_node_tables.h b/erts/emulator/beam/erl_node_tables.h
index aa8af12555..beae2df75f 100644
--- a/erts/emulator/beam/erl_node_tables.h
+++ b/erts/emulator/beam/erl_node_tables.h
@@ -187,6 +187,7 @@ set pagination off
set $i = 0
set $node = referred_nodes[$node_ix].node
while $i < $node->slot.counter
+ printf "%s:%d ", $node->books[$i].file, $node->books[$i].line
printf "%p: ", $node->books[$i].term
etp-1 $node->books[$i].who
printf " "
@@ -211,8 +212,12 @@ lists:usort(lists:filter(fun({V,N}) -> N /= 0 end, maps:to_list(Accs))).
struct erl_node_bookkeeping {
Eterm who;
Eterm term;
+ char *file;
+ int line;
enum { ERL_NODE_INC, ERL_NODE_DEC } what;
};
+
+#define ERTS_BOOKKEEP_SIZE (1024)
#endif
typedef struct erl_node_ {
@@ -222,7 +227,7 @@ typedef struct erl_node_ {
Uint32 creation; /* Creation */
DistEntry *dist_entry; /* Corresponding dist entry */
#ifdef ERL_NODE_BOOKKEEP
- struct erl_node_bookkeeping books[1024];
+ struct erl_node_bookkeeping books[ERTS_BOOKKEEP_SIZE];
erts_atomic_t slot;
#endif
} ErlNode;
@@ -259,7 +264,7 @@ void erts_set_dist_entry_pending(DistEntry *);
void erts_set_dist_entry_connected(DistEntry *, Eterm, Uint);
ErlNode *erts_find_or_insert_node(Eterm, Uint32, Eterm);
void erts_schedule_delete_node(ErlNode *);
-void erts_set_this_node(Eterm, Uint);
+void erts_set_this_node(Eterm, Uint32);
Uint erts_node_table_size(void);
void erts_init_node_tables(int);
void erts_node_table_info(fmtfn_t, void *);
@@ -276,14 +281,21 @@ Eterm erts_build_dhandle(Eterm **hpp, ErlOffHeap*, DistEntry*, Uint32 conn_id);
Eterm erts_make_dhandle(Process *c_p, DistEntry*, Uint32 conn_id);
ERTS_GLB_INLINE void erts_init_node_entry(ErlNode *np, erts_aint_t val);
+#ifdef ERL_NODE_BOOKKEEP
+#define erts_ref_node_entry(NP, MIN, T) erts_ref_node_entry__((NP), (MIN), (T), __FILE__, __LINE__)
+#define erts_deref_node_entry(NP, T) erts_deref_node_entry__((NP), (T), __FILE__, __LINE__)
+ERTS_GLB_INLINE erts_aint_t erts_ref_node_entry__(ErlNode *np, int min_val, Eterm term, char *file, int line);
+ERTS_GLB_INLINE void erts_deref_node_entry__(ErlNode *np, Eterm term, char *file, int line);
+#else
ERTS_GLB_INLINE erts_aint_t erts_ref_node_entry(ErlNode *np, int min_val, Eterm term);
ERTS_GLB_INLINE void erts_deref_node_entry(ErlNode *np, Eterm term);
+#endif
ERTS_GLB_INLINE void erts_de_rlock(DistEntry *dep);
ERTS_GLB_INLINE void erts_de_runlock(DistEntry *dep);
ERTS_GLB_INLINE void erts_de_rwlock(DistEntry *dep);
ERTS_GLB_INLINE void erts_de_rwunlock(DistEntry *dep);
#ifdef ERL_NODE_BOOKKEEP
-void erts_node_bookkeep(ErlNode *, Eterm , int);
+void erts_node_bookkeep(ErlNode *, Eterm , int, char *file, int line);
#else
#define erts_node_bookkeep(...)
#endif
@@ -296,21 +308,40 @@ erts_init_node_entry(ErlNode *np, erts_aint_t val)
erts_refc_init(&np->refc, val);
}
+#ifdef ERL_NODE_BOOKKEEP
+
+ERTS_GLB_INLINE erts_aint_t
+erts_ref_node_entry__(ErlNode *np, int min_val, Eterm term, char *file, int line)
+{
+ erts_node_bookkeep(np, term, ERL_NODE_INC, file, line);
+ return erts_refc_inctest(&np->refc, min_val);
+}
+
+ERTS_GLB_INLINE void
+erts_deref_node_entry__(ErlNode *np, Eterm term, char *file, int line)
+{
+ erts_node_bookkeep(np, term, ERL_NODE_DEC, file, line);
+ if (erts_refc_dectest(&np->refc, 0) == 0)
+ erts_schedule_delete_node(np);
+}
+
+#else
+
ERTS_GLB_INLINE erts_aint_t
erts_ref_node_entry(ErlNode *np, int min_val, Eterm term)
{
- erts_node_bookkeep(np, term, ERL_NODE_INC);
return erts_refc_inctest(&np->refc, min_val);
}
ERTS_GLB_INLINE void
erts_deref_node_entry(ErlNode *np, Eterm term)
{
- erts_node_bookkeep(np, term, ERL_NODE_DEC);
if (erts_refc_dectest(&np->refc, 0) == 0)
erts_schedule_delete_node(np);
}
+#endif
+
ERTS_GLB_INLINE void
erts_de_rlock(DistEntry *dep)
{
diff --git a/erts/emulator/beam/erl_printf_term.c b/erts/emulator/beam/erl_printf_term.c
index 2e33a8a782..67c486a0db 100644
--- a/erts/emulator/beam/erl_printf_term.c
+++ b/erts/emulator/beam/erl_printf_term.c
@@ -533,13 +533,34 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount) {
case EXPORT_DEF:
{
Export* ep = *((Export **) (export_val(wobj) + 1));
- Atom* module = atom_tab(atom_val(ep->info.mfa.module));
- Atom* name = atom_tab(atom_val(ep->info.mfa.function));
+ long tdcount;
+ int tres;
PRINT_STRING(res, fn, arg, "fun ");
- PRINT_BUF(res, fn, arg, module->name, module->len);
+
+ /* We pass a temporary 'dcount' and adjust the real one later to ensure
+ * that the fun doesn't get split up between the module and function
+ * name. */
+ tdcount = MAX_ATOM_SZ_LIMIT;
+ tres = print_atom_name(fn, arg, ep->info.mfa.module, &tdcount);
+ if (tres < 0) {
+ res = tres;
+ goto L_done;
+ }
+ *dcount -= (MAX_ATOM_SZ_LIMIT - tdcount);
+ res += tres;
+
PRINT_CHAR(res, fn, arg, ':');
- PRINT_BUF(res, fn, arg, name->name, name->len);
+
+ tdcount = MAX_ATOM_SZ_LIMIT;
+ tres = print_atom_name(fn, arg, ep->info.mfa.function, &tdcount);
+ if (tres < 0) {
+ res = tres;
+ goto L_done;
+ }
+ *dcount -= (MAX_ATOM_SZ_LIMIT - tdcount);
+ res += tres;
+
PRINT_CHAR(res, fn, arg, '/');
PRINT_SWORD(res, fn, arg, 'd', 0, 1,
(ErlPfSWord) ep->info.mfa.arity);
diff --git a/erts/emulator/beam/erl_proc_sig_queue.c b/erts/emulator/beam/erl_proc_sig_queue.c
index 6bbd59e8e3..b60fb64342 100644
--- a/erts/emulator/beam/erl_proc_sig_queue.c
+++ b/erts/emulator/beam/erl_proc_sig_queue.c
@@ -4689,10 +4689,12 @@ erts_proc_sig_debug_foreach_sig(Process *c_p,
case ERTS_SIG_Q_OP_MONITOR_DOWN:
switch (type) {
case ERTS_SIG_Q_TYPE_GEN_EXIT:
- if (ERTS_SIG_IS_GEN_EXIT_EXTERNAL(sig))
- debug_foreach_sig_external(sig, ext_func, arg);
- else
+ if (!ERTS_SIG_IS_GEN_EXIT_EXTERNAL(sig))
debug_foreach_sig_heap_frags(&sig->hfrag, oh_func, arg);
+ else {
+ oh_func(&sig->hfrag.off_heap, arg);
+ debug_foreach_sig_external(sig, ext_func, arg);
+ }
break;
case ERTS_LNK_TYPE_PORT:
case ERTS_LNK_TYPE_PROC:
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 70f48ddd97..e8c83276f5 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -8568,9 +8568,6 @@ erts_start_schedulers(void)
{
ethr_tid tid;
int res = 0;
- Uint actual;
- Uint wanted = erts_no_schedulers;
- Uint wanted_no_schedulers = erts_no_schedulers;
char name[16];
ethr_thr_opts opts = ETHR_THR_OPTS_DEFAULT_INITER;
int ix;
@@ -8584,40 +8581,34 @@ erts_start_schedulers(void)
erts_snprintf(opts.name, 16, "runq_supervisor");
erts_atomic_init_nob(&runq_supervisor_sleeping, 0);
if (0 != ethr_event_init(&runq_supervision_event))
- erts_exit(ERTS_ERROR_EXIT, "Failed to create run-queue supervision event\n");
+ erts_exit(ERTS_ABORT_EXIT, "Failed to create run-queue supervision event\n");
res = ethr_thr_create(&runq_supervisor_tid,
runq_supervisor,
NULL,
&opts);
if (0 != res)
- erts_exit(ERTS_ERROR_EXIT, "Failed to create run-queue supervision thread, "
+ erts_exit(ERTS_ABORT_EXIT, "Failed to create run-queue supervision thread, "
"error = %d\n", res);
}
opts.suggested_stack_size = erts_sched_thread_suggested_stack_size;
- if (wanted < 1)
- wanted = 1;
- if (wanted > ERTS_MAX_NO_OF_SCHEDULERS) {
- wanted = ERTS_MAX_NO_OF_SCHEDULERS;
- res = ENOTSUP;
- }
-
- for (actual = 0; actual < wanted; actual++) {
- ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(actual);
-
- ASSERT(actual == esdp->no - 1);
-
- erts_snprintf(opts.name, 16, "%lu_scheduler", actual + 1);
+ ASSERT(erts_no_schedulers > 0 && erts_no_schedulers <= ERTS_MAX_NO_OF_SCHEDULERS);
+ for (ix = 0; ix < erts_no_schedulers; ix++) {
+ ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(ix);
+ ASSERT(ix == esdp->no - 1);
+ erts_snprintf(opts.name, 16, "%lu_scheduler", ix + 1);
res = ethr_thr_create(&esdp->tid, sched_thread_func, (void*)esdp, &opts);
-
if (res != 0) {
- break;
+ erts_exit(ERTS_ABORT_EXIT, "Failed to create scheduler thread %d, error = %d\n", ix, res);
}
}
- erts_no_schedulers = actual;
+
+ /* Probably not needed as thread create will imply a memory barrier,
+ but we do one just to be safe. */
+ ERTS_THR_MEMORY_BARRIER;
{
for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) {
@@ -8626,7 +8617,7 @@ erts_start_schedulers(void)
opts.suggested_stack_size = erts_dcpu_sched_thread_suggested_stack_size;
res = ethr_thr_create(&esdp->tid,sched_dirty_cpu_thread_func,(void*)esdp,&opts);
if (res != 0)
- erts_exit(ERTS_ERROR_EXIT, "Failed to create dirty cpu scheduler thread %d, error = %d\n", ix, res);
+ erts_exit(ERTS_ABORT_EXIT, "Failed to create dirty cpu scheduler thread %d, error = %d\n", ix, res);
}
for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix);
@@ -8634,40 +8625,22 @@ erts_start_schedulers(void)
opts.suggested_stack_size = erts_dio_sched_thread_suggested_stack_size;
res = ethr_thr_create(&esdp->tid,sched_dirty_io_thread_func,(void*)esdp,&opts);
if (res != 0)
- erts_exit(ERTS_ERROR_EXIT, "Failed to create dirty io scheduler thread %d, error = %d\n", ix, res);
+ erts_exit(ERTS_ABORT_EXIT, "Failed to create dirty io scheduler thread %d, error = %d\n", ix, res);
}
}
- ERTS_THR_MEMORY_BARRIER;
-
erts_snprintf(opts.name, 16, "aux");
res = ethr_thr_create(&tid, aux_thread, NULL, &opts);
if (res != 0)
- erts_exit(ERTS_ERROR_EXIT, "Failed to create aux thread, error = %d\n", res);
+ erts_exit(ERTS_ABORT_EXIT, "Failed to create aux thread, error = %d\n", res);
for (ix = 0; ix < erts_no_poll_threads; ix++) {
erts_snprintf(opts.name, 16, "%d_poller", ix);
res = ethr_thr_create(&tid, poll_thread, (void*)(UWord)ix, &opts);
if (res != 0)
- erts_exit(ERTS_ERROR_EXIT, "Failed to create poll thread\n");
- }
-
- if (actual < 1)
- erts_exit(ERTS_ERROR_EXIT,
- "Failed to create any scheduler-threads: %s (%d)\n",
- erl_errno_id(res),
- res);
- if (res != 0) {
- erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- ASSERT(actual != wanted_no_schedulers);
- erts_dsprintf(dsbufp,
- "Failed to create %beu scheduler-threads (%s:%d); "
- "only %beu scheduler-thread%s created.\n",
- wanted_no_schedulers, erl_errno_id(res), res,
- actual, actual == 1 ? " was" : "s were");
- erts_send_error_to_logger_nogl(dsbufp);
+ erts_exit(ERTS_ABORT_EXIT, "Failed to create poll thread\n");
}
}
@@ -12143,6 +12116,7 @@ erts_proc_exit_handle_dist_monitor(ErtsMonitor *mon, void *vctxt, Sint reds)
ErtsHeapFactory factory;
Sint reds_consumed = 0;
+ ASSERT(c_p->flags & F_DISABLE_GC);
ASSERT(erts_monitor_is_target(mon) && mon->type == ERTS_MON_TYPE_DIST_PROC);
mdp = erts_monitor_to_data(mon);
@@ -12190,7 +12164,6 @@ erts_proc_exit_handle_dist_monitor(ErtsMonitor *mon, void *vctxt, Sint reds)
switch (code) {
case ERTS_DSIG_SEND_CONTINUE:
case ERTS_DSIG_SEND_YIELD:
- erts_set_gc_state(c_p, 0);
ctxt->dist_state = erts_dsend_export_trap_context(c_p, &ctx);
reds_consumed = reds; /* force yield */
break;
@@ -12198,7 +12171,6 @@ erts_proc_exit_handle_dist_monitor(ErtsMonitor *mon, void *vctxt, Sint reds)
break;
case ERTS_DSIG_SEND_TOO_LRG:
erts_kill_dist_connection(dep, dist->connection_id);
- erts_set_gc_state(c_p, 1);
break;
default:
ASSERT(! "Invalid dsig send exit monitor result");
@@ -12402,6 +12374,7 @@ erts_proc_exit_handle_dist_link(ErtsLink *lnk, void *vctxt, Sint reds)
ErtsHeapFactory factory;
Sint reds_consumed = 0;
+ ASSERT(c_p->flags & F_DISABLE_GC);
ASSERT(lnk->type == ERTS_LNK_TYPE_DIST_PROC);
dlnk = erts_link_to_other(lnk, &ldp);
dist = ((ErtsLinkDataExtended *) ldp)->dist;
@@ -12441,7 +12414,6 @@ erts_proc_exit_handle_dist_link(ErtsLink *lnk, void *vctxt, Sint reds)
switch (code) {
case ERTS_DSIG_SEND_YIELD:
case ERTS_DSIG_SEND_CONTINUE:
- erts_set_gc_state(c_p, 0);
ctxt->dist_state = erts_dsend_export_trap_context(c_p, &ctx);
reds_consumed = reds; /* force yield */
break;
@@ -12449,7 +12421,6 @@ erts_proc_exit_handle_dist_link(ErtsLink *lnk, void *vctxt, Sint reds)
break;
case ERTS_DSIG_SEND_TOO_LRG:
erts_kill_dist_connection(dep, dist->connection_id);
- erts_set_gc_state(c_p, 1);
break;
default:
ASSERT(! "Invalid dsig send exit monitor result");
@@ -12997,6 +12968,8 @@ restart:
yield_allowed = 0;
#endif
+ /* Enable GC again, through strictly not needed it puts
+ the process in a consistent state. */
erts_set_gc_state(p, 1);
/* Set state to not active as we don't want this process
@@ -13541,3 +13514,79 @@ erts_debug_later_op_foreach(void (*callback)(void*),
}
}
}
+
+void
+erts_debug_free_process_foreach(void (*func)(Process *, void *), void *arg)
+{
+ ErtsRunQueue *rq;
+ int ix, prio;
+ for (ix = 0; ix < erts_no_run_queues; ix++) {
+ rq = ERTS_RUNQ_IX(ix);
+ for (prio = PRIORITY_MAX; prio < PRIORITY_LOW; prio++) {
+ Process *p = rq->procs.prio[prio].first;
+ for (; p; p = p->next) {
+ if (ERTS_PSFLG_FREE & erts_atomic32_read_nob(&p->state))
+ (*func)(p, arg);
+ }
+ }
+ }
+}
+
+void
+erts_debug_proc_monitor_link_foreach(Process *proc,
+ int (*monitor_func)(ErtsMonitor *, void *, Sint ),
+ int (*link_func)(ErtsLink *, void *, Sint ),
+ void *arg)
+{
+ if (!(erts_atomic32_read_nob(&proc->state) & ERTS_PSFLG_FREE)) {
+ /* For all links */
+ erts_link_tree_foreach(ERTS_P_LINKS(proc),
+ link_func,
+ arg);
+ /* For all monitors */
+ erts_monitor_tree_foreach(ERTS_P_MONITORS(proc),
+ monitor_func,
+ arg);
+ /* For all local target monitors */
+ erts_monitor_list_foreach(ERTS_P_LT_MONITORS(proc),
+ monitor_func,
+ arg);
+ }
+ else {
+ struct continue_exit_state *ce_state = proc->u.terminate;
+
+ /* For all links */
+ if (ce_state->phase == ERTS_CONTINUE_EXIT_LINKS)
+ erts_debug_link_tree_destroying_foreach(ce_state->links,
+ link_func,
+ arg,
+ ce_state->yield_state);
+ else
+ erts_link_tree_foreach(ce_state->links,
+ link_func,
+ arg);
+
+ /* For all monitors */
+ if (ce_state->phase == ERTS_CONTINUE_EXIT_MONITORS)
+ erts_debug_monitor_tree_destroying_foreach(ce_state->monitors,
+ monitor_func,
+ arg,
+ ce_state->yield_state);
+ else
+ erts_monitor_tree_foreach(ce_state->monitors,
+ monitor_func,
+ arg);
+
+ /* For all local target monitors */
+ if (ce_state->phase == ERTS_CONTINUE_EXIT_LT_MONITORS)
+ erts_debug_monitor_list_destroying_foreach(ce_state->lt_monitors,
+ monitor_func,
+ arg,
+ ce_state->yield_state);
+ else
+ erts_monitor_list_foreach(ce_state->lt_monitors,
+ monitor_func,
+ arg);
+
+ }
+}
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 3f3c7de64d..405611c584 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -701,6 +701,13 @@ void
erts_debug_later_op_foreach(void (*callback)(void*),
void (*func)(void *, ErtsThrPrgrVal, void *),
void *arg);
+void
+erts_debug_free_process_foreach(void (*func)(Process *, void *), void *arg);
+void
+erts_debug_proc_monitor_link_foreach(Process *proc,
+ int (*monitor_func)(ErtsMonitor *, void *, Sint ),
+ int (*link_func)(ErtsLink *, void *, Sint ),
+ void *arg);
#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
@@ -2355,6 +2362,8 @@ erts_try_change_runq_proc(Process *p, ErtsRunQueue *rq)
old_rqint);
if (act_rqint == old_rqint)
return !0;
+
+ old_rqint = act_rqint;
}
}
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index ffe0752b46..5c46a10d64 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -2195,11 +2195,12 @@ sys_msg_dispatcher_wait(void *vwait_p)
erts_mtx_unlock(&smq_mtx);
}
+static ErtsSysMsgQ *local_sys_message_queue = NULL;
+
static void *
sys_msg_dispatcher_func(void *unused)
{
ErtsThrPrgrCallbacks callbacks;
- ErtsSysMsgQ *local_sys_message_queue = NULL;
ErtsThrPrgrData *tpd;
int wait = 0;
@@ -2207,6 +2208,8 @@ sys_msg_dispatcher_func(void *unused)
erts_lc_set_thread_name("system message dispatcher");
#endif
+ local_sys_message_queue = NULL;
+
callbacks.arg = (void *) &wait;
callbacks.wakeup = sys_msg_dispatcher_wakeup;
callbacks.prepare_wait = sys_msg_dispatcher_prep_wait;
@@ -2263,6 +2266,8 @@ sys_msg_dispatcher_func(void *unused)
Process *proc = NULL;
Port *port = NULL;
+ ASSERT(is_value(smqp->msg));
+
if (erts_thr_progress_update(tpd))
erts_thr_progress_leader_update(tpd);
@@ -2375,6 +2380,7 @@ sys_msg_dispatcher_func(void *unused)
erts_fprintf(stderr, "dropped\n");
#endif
}
+ smqp->msg = THE_NON_VALUE;
}
}
@@ -2382,32 +2388,38 @@ sys_msg_dispatcher_func(void *unused)
}
void
-erts_foreach_sys_msg_in_q(void (*func)(Eterm,
- Eterm,
- Eterm,
- ErlHeapFragment *))
+erts_debug_foreach_sys_msg_in_q(void (*func)(Eterm,
+ Eterm,
+ Eterm,
+ ErlHeapFragment *))
{
- ErtsSysMsgQ *sm;
- erts_mtx_lock(&smq_mtx);
- for (sm = sys_message_queue; sm; sm = sm->next) {
- Eterm to;
- switch (sm->type) {
- case SYS_MSG_TYPE_SYSMON:
- to = erts_get_system_monitor();
- break;
- case SYS_MSG_TYPE_SYSPROF:
- to = erts_get_system_profile();
- break;
- case SYS_MSG_TYPE_ERRLGR:
- to = erts_get_system_logger();
- break;
- default:
- to = NIL;
- break;
- }
- (*func)(sm->from, to, sm->msg, sm->bp);
+ ErtsSysMsgQ *smq[] = {sys_message_queue, local_sys_message_queue};
+ int i;
+
+ ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
+
+ for (i = 0; i < sizeof(smq)/sizeof(smq[0]); i++) {
+ ErtsSysMsgQ *sm;
+ for (sm = smq[i]; sm; sm = sm->next) {
+ Eterm to;
+ switch (sm->type) {
+ case SYS_MSG_TYPE_SYSMON:
+ to = erts_get_system_monitor();
+ break;
+ case SYS_MSG_TYPE_SYSPROF:
+ to = erts_get_system_profile();
+ break;
+ case SYS_MSG_TYPE_ERRLGR:
+ to = erts_get_system_logger();
+ break;
+ default:
+ to = NIL;
+ break;
+ }
+ if (is_value(sm->msg))
+ (*func)(sm->from, to, sm->msg, sm->bp);
+ }
}
- erts_mtx_unlock(&smq_mtx);
}
diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h
index bb5c9ac276..d25f97c656 100644
--- a/erts/emulator/beam/erl_trace.h
+++ b/erts/emulator/beam/erl_trace.h
@@ -90,10 +90,10 @@ int erts_is_tracer_valid(Process* p);
void erts_check_my_tracer_proc(Process *);
void erts_block_sys_msg_dispatcher(void);
void erts_release_sys_msg_dispatcher(void);
-void erts_foreach_sys_msg_in_q(void (*func)(Eterm,
- Eterm,
- Eterm,
- ErlHeapFragment *));
+void erts_debug_foreach_sys_msg_in_q(void (*func)(Eterm,
+ Eterm,
+ Eterm,
+ ErlHeapFragment *));
Eterm erts_set_system_logger(Eterm);
Eterm erts_get_system_logger(void);
void erts_queue_error_logger_message(Eterm, Eterm, ErlHeapFragment *);
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index 4156eb8d1e..5cea253ebe 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -51,18 +51,17 @@
#define MAX_STRING_LEN 0xffff
-/* MAX value for the creation field in pid, port and reference
- for the local node and for the current external format.
-
- Larger creation values than this are allowed in external pid, port and refs
- encoded with NEW_PID_EXT, NEW_PORT_EXT and NEWER_REFERENCE_EXT.
- The point here is to prepare for future upgrade to 32-bit creation.
- OTP-19 (erts-8.0) can handle big creation values from other (newer) nodes,
- but do not use big creation values for the local node yet,
- as we still may have to communicate with older nodes.
+/*
+ * MAX value for the creation field in pid, port and reference
+ * for the old PID_EXT, PORT_EXT, REFERENCE_EXT and NEW_REFERENCE_EXT.
+ * Older nodes (OTP 19-22) will send us these so we must be able to decode them.
+ *
+ * From OTP 23 DFLAG_BIG_CREATION is mandatory so this node will always
+ * encode with new big 32-bit creations using NEW_PID_EXT, NEW_PORT_EXT
+ * and NEWER_REFERENCE_EXT.
*/
-#define ERTS_MAX_LOCAL_CREATION (3)
-#define is_valid_creation(Cre) ((unsigned)(Cre) <= ERTS_MAX_LOCAL_CREATION)
+#define ERTS_MAX_TINY_CREATION (3)
+#define is_tiny_creation(Cre) ((unsigned)(Cre) <= ERTS_MAX_TINY_CREATION)
#undef ERTS_DEBUG_USE_DIST_SEP
#ifdef DEBUG
@@ -2469,7 +2468,8 @@ enc_pid(ErtsAtomCacheMap *acmp, Eterm pid, byte* ep, Uint32 dflags)
Eterm sysname = ((is_internal_pid(pid) && (dflags & DFLAG_INTERNAL_TAGS))
? INTERNAL_LOCAL_SYSNAME : pid_node_name(pid));
Uint32 creation = pid_creation(pid);
- byte* tagp = ep++;
+
+ *ep++ = NEW_PID_EXT;
/* insert atom here containing host and sysname */
ep = enc_atom(acmp, sysname, ep, dflags);
@@ -2481,15 +2481,8 @@ enc_pid(ErtsAtomCacheMap *acmp, Eterm pid, byte* ep, Uint32 dflags)
ep += 4;
put_int32(os, ep);
ep += 4;
- if (creation <= ERTS_MAX_LOCAL_CREATION) {
- *tagp = PID_EXT;
- *ep++ = creation;
- } else {
- ASSERT(is_external_pid(pid));
- *tagp = NEW_PID_EXT;
- put_int32(creation, ep);
- ep += 4;
- }
+ put_int32(creation, ep);
+ ep += 4;
return ep;
}
@@ -2609,7 +2602,7 @@ dec_pid(ErtsDistExternal *edep, ErtsHeapFactory* factory, byte* ep,
if (tag == PID_EXT) {
cre = get_int8(ep);
ep += 1;
- if (!is_valid_creation(cre)) {
+ if (!is_tiny_creation(cre)) {
return NULL;
}
} else {
@@ -2870,25 +2863,18 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
Eterm sysname = (((dflags & DFLAG_INTERNAL_TAGS) && is_internal_ref(obj))
? INTERNAL_LOCAL_SYSNAME : ref_node_name(obj));
Uint32 creation = ref_creation(obj);
- byte* tagp = ep++;
ASSERT(dflags & DFLAG_EXTENDED_REFERENCES);
erts_magic_ref_save_bin(obj);
+ *ep++ = NEWER_REFERENCE_EXT;
i = ref_no_numbers(obj);
put_int16(i, ep);
ep += 2;
ep = enc_atom(acmp, sysname, ep, dflags);
- if (creation <= ERTS_MAX_LOCAL_CREATION) {
- *tagp = NEW_REFERENCE_EXT;
- *ep++ = creation;
- } else {
- ASSERT(is_external_ref(obj));
- *tagp = NEWER_REFERENCE_EXT;
- put_int32(creation, ep);
- ep += 4;
- }
+ put_int32(creation, ep);
+ ep += 4;
ref_num = ref_numbers(obj);
for (j = 0; j < i; j++) {
put_int32(ref_num[j], ep);
@@ -2901,21 +2887,14 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
Eterm sysname = (((dflags & DFLAG_INTERNAL_TAGS) && is_internal_port(obj))
? INTERNAL_LOCAL_SYSNAME : port_node_name(obj));
Uint32 creation = port_creation(obj);
- byte* tagp = ep++;
+ *ep++ = NEW_PORT_EXT;
ep = enc_atom(acmp, sysname, ep, dflags);
j = port_number(obj);
put_int32(j, ep);
ep += 4;
- if (creation <= ERTS_MAX_LOCAL_CREATION) {
- *tagp = PORT_EXT;
- *ep++ = creation;
- } else {
- ASSERT(is_external_port(obj));
- *tagp = NEW_PORT_EXT;
- put_int32(creation, ep);
- ep += 4;
- }
+ put_int32(creation, ep);
+ ep += 4;
break;
}
case LIST_DEF:
@@ -3610,7 +3589,7 @@ dec_term_atom_common:
if (tag == PORT_EXT) {
cre = get_int8(ep);
ep++;
- if (!is_valid_creation(cre)) {
+ if (!is_tiny_creation(cre)) {
goto error;
}
}
@@ -3657,7 +3636,7 @@ dec_term_atom_common:
cre = get_int8(ep);
ep += 1;
- if (!is_valid_creation(cre)) {
+ if (!is_tiny_creation(cre)) {
goto error;
}
goto ref_ext_common;
@@ -3671,7 +3650,7 @@ dec_term_atom_common:
cre = get_int8(ep);
ep += 1;
- if (!is_valid_creation(cre)) {
+ if (!is_tiny_creation(cre)) {
goto error;
}
r0 = get_int32(ep);
@@ -4334,30 +4313,21 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
result += 1 + 4 + 1 + i; /* tag,size,sign,digits */
break;
case EXTERNAL_PID_DEF:
- if (external_pid_creation(obj) > ERTS_MAX_LOCAL_CREATION)
- result += 3;
- /*fall through*/
case PID_DEF:
result += (1 + encode_size_struct2(acmp, pid_node_name(obj), dflags) +
- 4 + 4 + 1);
+ 4 + 4 + 4);
break;
case EXTERNAL_REF_DEF:
- if (external_ref_creation(obj) > ERTS_MAX_LOCAL_CREATION)
- result += 3;
- /*fall through*/
case REF_DEF:
ASSERT(dflags & DFLAG_EXTENDED_REFERENCES);
i = ref_no_numbers(obj);
result += (1 + 2 + encode_size_struct2(acmp, ref_node_name(obj), dflags) +
- 1 + 4*i);
+ 4 + 4*i);
break;
case EXTERNAL_PORT_DEF:
- if (external_port_creation(obj) > ERTS_MAX_LOCAL_CREATION)
- result += 3;
- /*fall through*/
case PORT_DEF:
result += (1 + encode_size_struct2(acmp, port_node_name(obj), dflags) +
- 4 + 1);
+ 4 + 4);
break;
case LIST_DEF: {
int is_str = is_external_string(obj, &m);
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 4c8d3d3dbe..0c2cf98033 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -907,7 +907,8 @@ Eterm erl_is_function(Process* p, Eterm arg1, Eterm arg2);
/* beam_bif_load.c */
Eterm erts_check_process_code(Process *c_p, Eterm module, int *redsp, int fcalls);
Eterm erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed);
-
+void erts_debug_foreach_release_literal_area_off_heap(void (*func)(ErlOffHeap *, void *),
+ void *arg);
typedef struct ErtsLiteralArea_ {
struct erl_off_heap_header *off_heap;
Eterm *end;
@@ -1072,17 +1073,46 @@ Uint size_object_x(Eterm, erts_literal_area_t*);
#define size_object_litopt(Term,LitArea) size_object_x(Term,LitArea)
Uint copy_shared_calculate(Eterm, erts_shcopy_t*);
-Eterm copy_shared_perform(Eterm, Uint, erts_shcopy_t*, Eterm**, ErlOffHeap*);
-
Uint size_shared(Eterm);
+/* #define ERTS_COPY_REGISTER_LOCATION */
+
+#ifdef ERTS_COPY_REGISTER_LOCATION
+
+#define copy_shared_perform(U, V, X, Y, Z) \
+ copy_shared_perform_x((U), (V), (X), (Y), (Z), __FILE__, __LINE__)
+Eterm copy_shared_perform_x(Eterm, Uint, erts_shcopy_t*, Eterm**, ErlOffHeap*,
+ char *file, int line);
+
+Eterm copy_struct_x(Eterm, Uint, Eterm**, ErlOffHeap*, Uint*, erts_literal_area_t*,
+ char *file, int line);
+#define copy_struct(Obj,Sz,HPP,OH) \
+ copy_struct_x(Obj,Sz,HPP,OH,NULL,NULL,__FILE__,__LINE__)
+#define copy_struct_litopt(Obj,Sz,HPP,OH,LitArea) \
+ copy_struct_x(Obj,Sz,HPP,OH,NULL,LitArea,__FILE__,__LINE__)
+
+#define copy_shallow(R, SZ, HPP, OH) \
+ copy_shallow_x((R), (SZ), (HPP), (OH), __FILE__, __LINE__)
+Eterm copy_shallow_x(Eterm* ERTS_RESTRICT, Uint, Eterm**, ErlOffHeap*,
+ char *file, int line);
+
+#else
+
+#define copy_shared_perform(U, V, X, Y, Z) \
+ copy_shared_perform_x((U), (V), (X), (Y), (Z))
+Eterm copy_shared_perform_x(Eterm, Uint, erts_shcopy_t*, Eterm**, ErlOffHeap*);
+
Eterm copy_struct_x(Eterm, Uint, Eterm**, ErlOffHeap*, Uint*, erts_literal_area_t*);
#define copy_struct(Obj,Sz,HPP,OH) \
copy_struct_x(Obj,Sz,HPP,OH,NULL,NULL)
#define copy_struct_litopt(Obj,Sz,HPP,OH,LitArea) \
copy_struct_x(Obj,Sz,HPP,OH,NULL,LitArea)
-Eterm copy_shallow(Eterm* ERTS_RESTRICT, Uint, Eterm**, ErlOffHeap*);
+#define copy_shallow(R, SZ, HPP, OH) \
+ copy_shallow_x((R), (SZ), (HPP), (OH))
+Eterm copy_shallow_x(Eterm* ERTS_RESTRICT, Uint, Eterm**, ErlOffHeap*);
+
+#endif
void erts_move_multi_frags(Eterm** hpp, ErlOffHeap*, ErlHeapFragment* first,
Eterm* refs, unsigned nrefs, int literals);
@@ -1257,6 +1287,10 @@ Uint erts_persistent_term_count(void);
void erts_init_persistent_dumping(void);
extern ErtsLiteralArea** erts_persistent_areas;
extern Uint erts_num_persistent_areas;
+void erts_debug_foreach_persistent_term_off_heap(void (*func)(ErlOffHeap *, void *),
+ void *arg);
+int erts_debug_have_accessed_literal_area(ErtsLiteralArea *lap);
+void erts_debug_save_accessed_literal_area(ErtsLiteralArea *lap);
/* external.c */
void erts_init_external(void);