aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator')
-rw-r--r--erts/emulator/beam/beam_bif_load.c14
-rw-r--r--erts/emulator/beam/beam_emu.c5
-rw-r--r--erts/emulator/beam/copy.c84
-rw-r--r--erts/emulator/beam/erl_bif_info.c7
-rw-r--r--erts/emulator/beam/erl_db.c4
-rw-r--r--erts/emulator/beam/erl_db_hash.c2
-rw-r--r--erts/emulator/beam/erl_gc.c118
-rw-r--r--erts/emulator/beam/erl_hl_timer.c5
-rw-r--r--erts/emulator/beam/erl_init.c5
-rw-r--r--erts/emulator/beam/erl_message.c17
-rw-r--r--erts/emulator/beam/erl_nif.c7
-rw-r--r--erts/emulator/beam/erl_process.c10
-rw-r--r--erts/emulator/beam/erl_trace.c20
-rw-r--r--erts/emulator/beam/global.h31
-rw-r--r--erts/emulator/beam/hash.c21
-rw-r--r--erts/emulator/beam/utils.c157
-rw-r--r--erts/emulator/drivers/common/inet_drv.c1
-rw-r--r--erts/emulator/hipe/hipe_gc.c119
-rw-r--r--erts/emulator/hipe/hipe_risc_gc.h13
-rw-r--r--erts/emulator/hipe/hipe_stack.h3
-rw-r--r--erts/emulator/hipe/hipe_x86_gc.h17
-rw-r--r--erts/emulator/internal_doc/Tracing.md2
-rw-r--r--erts/emulator/test/Makefile1
-rw-r--r--erts/emulator/test/distribution_SUITE.erl101
-rw-r--r--erts/emulator/test/hipe_SUITE.erl64
-rw-r--r--erts/emulator/test/hipe_SUITE_data/literals.erl26
-rw-r--r--erts/emulator/test/hipe_SUITE_data/ref_cell.erl64
-rw-r--r--erts/emulator/test/map_SUITE.erl35
-rw-r--r--erts/emulator/test/port_SUITE.erl25
29 files changed, 708 insertions, 270 deletions
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 15e878ba65..37e74400bc 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -37,6 +37,10 @@
#include "erl_bits.h"
#include "erl_thr_progress.h"
+#ifdef HIPE
+# include "hipe_stack.h"
+#endif
+
static void set_default_trace_pattern(Eterm module);
static Eterm check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls);
static void delete_code(Module* modp);
@@ -873,12 +877,12 @@ check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls
continue;
{
ErlHeapFragment *hf;
- Uint lit_sz;
+ Uint lit_sz = 0;
for (hf=hfrag; hf; hf = hf->next) {
if (check_mod_funs(rp, &hfrag->off_heap, mod_start, mod_size))
return am_true;
- lit_sz = hfrag_literal_size(&hf->mem[0], &hf->mem[hf->used_size],
- literals, lit_bsize);
+ lit_sz += hfrag_literal_size(&hf->mem[0], &hf->mem[hf->used_size],
+ literals, lit_bsize);
}
if (lit_sz > 0) {
ErlHeapFragment *bp = new_message_buffer(lit_sz);
@@ -916,6 +920,10 @@ check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls
}
if (any_heap_ref_ptrs(rp->stop, rp->hend, literals, lit_bsize))
goto try_literal_gc;
+#ifdef HIPE
+ if (nstack_any_heap_ref_ptrs(rp, literals, lit_bsize))
+ goto try_literal_gc;
+#endif
if (any_heap_refs(rp->heap, rp->htop, literals, lit_bsize))
goto try_literal_gc;
if (any_heap_refs(rp->old_heap, rp->old_htop, literals, lit_bsize))
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 757b69787a..6100c6c923 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -2161,7 +2161,10 @@ void process_main(void)
c_p->i = (BeamInstr *) Arg(0); /* L1 */
SWAPOUT;
c_p->arity = 0;
- erts_smp_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_ACTIVE);
+
+ if (!ERTS_PTMR_IS_TIMED_OUT(c_p))
+ erts_smp_atomic32_read_band_relb(&c_p->state,
+ ~ERTS_PSFLG_ACTIVE);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
c_p->current = NULL;
diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c
index ccc4cbad43..c4dcd6a3cc 100644
--- a/erts/emulator/beam/copy.c
+++ b/erts/emulator/beam/copy.c
@@ -40,7 +40,8 @@ static void move_one_frag(Eterm** hpp, ErlHeapFragment*, ErlOffHeap*, int);
/*
* Copy object "obj" to process p.
*/
-Eterm copy_object_x(Eterm obj, Process* to, Uint extra) {
+Eterm copy_object_x(Eterm obj, Process* to, Uint extra)
+{
if (!is_immed(obj)) {
Uint size = size_object(obj);
Eterm* hp = HAllocX(to, size, extra);
@@ -70,33 +71,46 @@ Eterm copy_object_x(Eterm obj, Process* to, Uint extra) {
* Return the "flat" size of the object.
*/
-Uint size_object(Eterm obj)
+#define in_literal_purge_area(PTR) \
+ (lit_purge_ptr && ( \
+ (lit_purge_ptr <= (PTR) && \
+ (PTR) < (lit_purge_ptr + lit_purge_sz))))
+
+Uint size_object_x(Eterm obj, erts_literal_area_t *litopt)
{
Uint sum = 0;
Eterm* ptr;
int arity;
+ Eterm *lit_purge_ptr = litopt ? litopt->lit_purge_ptr : NULL;
+ Uint lit_purge_sz = litopt ? litopt->lit_purge_sz : 0;
#ifdef DEBUG
Eterm mypid = erts_get_current_pid();
#endif
-
DECLARE_ESTACK(s);
-
VERBOSE(DEBUG_SHCOPY, ("[pid=%T] size_object %p\n", mypid, obj));
for (;;) {
switch (primary_tag(obj)) {
case TAG_PRIMARY_LIST:
- sum += 2;
ptr = list_val(obj);
+ if (litopt && erts_is_literal(obj,ptr) && !in_literal_purge_area(ptr)) {
+ goto pop_next;
+ }
+ sum += 2;
obj = *ptr++;
if (!IS_CONST(obj)) {
ESTACK_PUSH(s, obj);
- }
+ }
obj = *ptr;
break;
case TAG_PRIMARY_BOXED:
{
- Eterm hdr = *boxed_val(obj);
+ Eterm hdr;
+ ptr = boxed_val(obj);
+ if (litopt && erts_is_literal(obj,ptr) && !in_literal_purge_area(ptr)) {
+ goto pop_next;
+ }
+ hdr = *ptr;
ASSERT(is_header(hdr));
switch (hdr & _TAG_HEADER_MASK) {
case ARITYVAL_SUBTAG:
@@ -279,10 +293,6 @@ do { \
#define COUNT_OFF_HEAP (0)
-#define IN_LITERAL_PURGE_AREA(info, ptr) \
- ((info)->range_ptr && ( \
- (info)->range_ptr <= (ptr) && \
- (ptr) < ((info)->range_ptr + (info)->range_sz)))
/*
* Return the real size of an object and find sharing information
* This currently returns the same as erts_debug:size/1.
@@ -599,7 +609,7 @@ cleanup:
/*
* Copy a structure to a heap.
*/
-Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint *bsz)
+Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint *bsz, erts_literal_area_t *litopt)
{
char* hstart;
Uint hsize;
@@ -616,6 +626,8 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
Eterm hdr;
Eterm *hend;
int i;
+ Eterm *lit_purge_ptr = litopt ? litopt->lit_purge_ptr : NULL;
+ Uint lit_purge_sz = litopt ? litopt->lit_purge_sz : 0;
#ifdef DEBUG
Eterm org_obj = obj;
Uint org_sz = sz;
@@ -651,7 +663,6 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
L_copy:
while (hp != htop) {
obj = *hp;
-
switch (primary_tag(obj)) {
case TAG_PRIMARY_IMMED1:
hp++;
@@ -667,6 +678,10 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
L_copy_list:
tailp = argp;
+ if (litopt && erts_is_literal(obj,objp) && !in_literal_purge_area(objp)) {
+ *tailp = obj;
+ goto L_copy;
+ }
for (;;) {
tp = tailp;
elem = CAR(objp);
@@ -674,18 +689,23 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
hbot -= 2;
CAR(hbot) = elem;
tailp = &CDR(hbot);
- }
- else {
+ } else {
CAR(htop) = elem;
tailp = &CDR(htop);
htop += 2;
}
*tp = make_list(tailp - 1);
obj = CDR(objp);
+
if (!is_list(obj)) {
break;
}
objp = list_val(obj);
+
+ if (litopt && erts_is_literal(obj,objp) && !in_literal_purge_area(objp)) {
+ *tailp = obj;
+ goto L_copy;
+ }
}
switch (primary_tag(obj)) {
case TAG_PRIMARY_IMMED1: *tailp = obj; goto L_copy;
@@ -695,7 +715,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
"%s, line %d: Internal error in copy_struct: 0x%08x\n",
__FILE__, __LINE__,obj);
}
-
+
case TAG_PRIMARY_BOXED:
if (ErtsInArea(boxed_val(obj),hstart,hsize)) {
hp++;
@@ -705,6 +725,10 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
L_copy_boxed:
objp = boxed_val(obj);
+ if (litopt && erts_is_literal(obj,objp) && !in_literal_purge_area(objp)) {
+ *argp = obj;
+ break;
+ }
hdr = *objp;
switch (hdr & _TAG_HEADER_MASK) {
case ARITYVAL_SUBTAG:
@@ -765,7 +789,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
extra_bytes = 1;
} else {
extra_bytes = 0;
- }
+ }
real_size = size+extra_bytes;
objp = binary_val(real_bin);
if (thing_subtag(*objp) == HEAP_BINARY_SUBTAG) {
@@ -780,7 +804,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
} else {
ProcBin* from = (ProcBin *) objp;
ProcBin* to;
-
+
ASSERT(thing_subtag(*objp) == REFC_BINARY_SUBTAG);
if (from->flags) {
erts_emasculate_writable_binary(from);
@@ -900,6 +924,12 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
*bsz = hend - hbot;
} else {
#ifdef DEBUG
+ if (!eq(org_obj, res)) {
+ erts_exit(ERTS_ABORT_EXIT,
+ "Internal error in copy_struct() when copying %T:"
+ " not equal to copy %T\n",
+ org_obj, res);
+ }
if (htop != hbot)
erts_exit(ERTS_ABORT_EXIT,
"Internal error in copy_struct() when copying %T:"
@@ -1036,6 +1066,8 @@ Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info)
Uint e;
unsigned sz;
Eterm* ptr;
+ Eterm *lit_purge_ptr = info->lit_purge_ptr;
+ Uint lit_purge_sz = info->lit_purge_sz;
#ifdef DEBUG
Eterm mypid = erts_get_current_pid();
#endif
@@ -1081,7 +1113,7 @@ Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info)
/* off heap list pointers are copied verbatim */
if (erts_is_literal(obj,ptr)) {
VERBOSE(DEBUG_SHCOPY, ("[pid=%T] bypassed copying %p is %T\n", mypid, ptr, obj));
- if (IN_LITERAL_PURGE_AREA(info,ptr))
+ if (in_literal_purge_area(ptr))
info->literal_size += size_object(obj);
goto pop_next;
}
@@ -1132,7 +1164,7 @@ Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info)
/* off heap pointers to boxes are copied verbatim */
if (erts_is_literal(obj,ptr)) {
VERBOSE(DEBUG_SHCOPY, ("[pid=%T] bypassed copying %p is %T\n", mypid, ptr, obj));
- if (IN_LITERAL_PURGE_AREA(info,ptr))
+ if (in_literal_purge_area(ptr))
info->literal_size += size_object(obj);
goto pop_next;
}
@@ -1298,6 +1330,8 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
Eterm* resp;
Eterm *hbot, *hend;
unsigned remaining;
+ Eterm *lit_purge_ptr = info->lit_purge_ptr;
+ Uint lit_purge_sz = info->lit_purge_sz;
#ifdef DEBUG
Eterm mypid = erts_get_current_pid();
Eterm saved_obj = obj;
@@ -1347,11 +1381,11 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
ptr = list_val(obj);
/* off heap list pointers are copied verbatim */
if (erts_is_literal(obj,ptr)) {
- if (!IN_LITERAL_PURGE_AREA(info,ptr)) {
+ if (!in_literal_purge_area(ptr)) {
*resp = obj;
} else {
Uint bsz = 0;
- *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz);
+ *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz, NULL); /* copy literal */
hbot -= bsz;
}
goto cleanup_next;
@@ -1415,11 +1449,11 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
ptr = boxed_val(obj);
/* off heap pointers to boxes are copied verbatim */
if (erts_is_literal(obj,ptr)) {
- if (!IN_LITERAL_PURGE_AREA(info,ptr)) {
+ if (!in_literal_purge_area(ptr)) {
*resp = obj;
} else {
Uint bsz = 0;
- *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz);
+ *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz, NULL); /* copy literal */
hbot -= bsz;
}
goto cleanup_next;
@@ -1923,7 +1957,7 @@ move_one_frag(Eterm** hpp, ErlHeapFragment* frag, ErlOffHeap* off_heap, int lite
if (is_header(val)) {
struct erl_off_heap_header* hdr = (struct erl_off_heap_header*)hp;
ASSERT(ptr + header_arity(val) < end);
- MOVE_BOXED(ptr, val, hp, &dummy_ref);
+ MOVE_BOXED(ptr, val, hp, &dummy_ref);
switch (val & _HEADER_SUBTAG_MASK) {
case REFC_BINARY_SUBTAG:
case FUN_SUBTAG:
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index e5d3f38ce4..cb7278696f 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -2284,9 +2284,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
} else if (ERTS_IS_ATOM_STR("dist_ctrl", BIF_ARG_1)) {
DistEntry *dep;
i = 0;
- /* Need to be the only thread running... */
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
for (dep = erts_visible_dist_entries; dep; dep = dep->next)
++i;
for (dep = erts_hidden_dist_entries; dep; dep = dep->next)
@@ -2309,8 +2307,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
res = CONS(hp, tpl, res);
hp += 2;
}
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
BIF_RET(res);
} else if (BIF_ARG_1 == am_system_version) {
erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index bad34211a5..128a7b3865 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -1094,7 +1094,7 @@ BIF_RETTYPE ets_insert_2(BIF_ALIST_2)
CHECK_TABLES();
- /* Write lock table if more than one object to keep atomicy */
+ /* Write lock table if more than one object to keep atomicity */
kind = ((is_list(BIF_ARG_2) && CDR(list_val(BIF_ARG_2)) != NIL)
? LCK_WRITE : LCK_WRITE_REC);
@@ -1164,7 +1164,7 @@ BIF_RETTYPE ets_insert_new_2(BIF_ALIST_2)
Eterm lookup_ret;
DbTableMethod* meth;
- /* More than one object, use LCK_WRITE to keep atomicy */
+ /* More than one object, use LCK_WRITE to keep atomicity */
kind = LCK_WRITE;
tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, kind);
if (tb == NULL) {
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index 12ae086b31..5e6fe4f460 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -40,7 +40,7 @@
** DB_FINE_LOCKED set. The table variable is_thread_safe will then indicate
** if operations need to obtain fine grained locks or not. Some operations
** will for example always use exclusive table lock to guarantee
-** a higher level of atomicy.
+** a higher level of atomicity.
*/
/* FIXATION:
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index a224383493..ef18a508a5 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -885,6 +885,58 @@ erts_garbage_collect_hibernate(Process* p)
}
+/*
+ * HiPE native code stack scanning procedures:
+ * - fullsweep_nstack()
+ * - gensweep_nstack()
+ * - offset_nstack()
+ * - sweep_literals_nstack()
+ */
+#if defined(HIPE)
+
+#define GENSWEEP_NSTACK(p,old_htop,n_htop) \
+ do { \
+ Eterm *tmp_old_htop = old_htop; \
+ Eterm *tmp_n_htop = n_htop; \
+ gensweep_nstack((p), &tmp_old_htop, &tmp_n_htop); \
+ old_htop = tmp_old_htop; \
+ n_htop = tmp_n_htop; \
+ } while(0)
+
+/*
+ * offset_nstack() can ignore the descriptor-based traversal the other
+ * nstack procedures use and simply call offset_heap_ptr() instead.
+ * This relies on two facts:
+ * 1. The only live non-Erlang terms on an nstack are return addresses,
+ * and they will be skipped thanks to the low/high range check.
+ * 2. Dead values, even if mistaken for pointers into the low/high area,
+ * can be offset safely since they won't be dereferenced.
+ *
+ * XXX: WARNING: If HiPE starts storing other non-Erlang values on the
+ * nstack, such as floats, then this will have to be changed.
+ */
+static ERTS_INLINE void offset_nstack(Process* p, Sint offs,
+ char* area, Uint area_size)
+{
+ if (p->hipe.nstack) {
+ ASSERT(p->hipe.nsp && p->hipe.nstend);
+ offset_heap_ptr(hipe_nstack_start(p), hipe_nstack_used(p),
+ offs, area, area_size);
+ }
+ else {
+ ASSERT(!p->hipe.nsp && !p->hipe.nstend);
+ }
+}
+
+#else /* !HIPE */
+
+#define fullsweep_nstack(p,n_htop) (n_htop)
+#define GENSWEEP_NSTACK(p,old_htop,n_htop) do{}while(0)
+#define offset_nstack(p,offs,area,area_size) do{}while(0)
+#define sweep_literals_nstack(p,old_htop,area,area_size) (old_htop)
+
+#endif /* HIPE */
+
void
erts_garbage_collect_literals(Process* p, Eterm* literals,
Uint byte_lit_size,
@@ -947,7 +999,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
area_size = byte_lit_size;
n = setup_rootset(p, p->arg_reg, p->arity, &rootset);
roots = rootset.roots;
- old_htop = p->old_htop;
+ old_htop = sweep_literals_nstack(p, p->old_htop, area, area_size);
while (n--) {
Eterm* g_ptr = roots->v;
Uint g_sz = roots->sz;
@@ -1214,56 +1266,6 @@ minor_collection(Process* p, ErlHeapFragment *live_hf_end,
return -1;
}
-/*
- * HiPE native code stack scanning procedures:
- * - fullsweep_nstack()
- * - gensweep_nstack()
- * - offset_nstack()
- */
-#if defined(HIPE)
-
-#define GENSWEEP_NSTACK(p,old_htop,n_htop) \
- do { \
- Eterm *tmp_old_htop = old_htop; \
- Eterm *tmp_n_htop = n_htop; \
- gensweep_nstack((p), &tmp_old_htop, &tmp_n_htop); \
- old_htop = tmp_old_htop; \
- n_htop = tmp_n_htop; \
- } while(0)
-
-/*
- * offset_nstack() can ignore the descriptor-based traversal the other
- * nstack procedures use and simply call offset_heap_ptr() instead.
- * This relies on two facts:
- * 1. The only live non-Erlang terms on an nstack are return addresses,
- * and they will be skipped thanks to the low/high range check.
- * 2. Dead values, even if mistaken for pointers into the low/high area,
- * can be offset safely since they won't be dereferenced.
- *
- * XXX: WARNING: If HiPE starts storing other non-Erlang values on the
- * nstack, such as floats, then this will have to be changed.
- */
-static ERTS_INLINE void offset_nstack(Process* p, Sint offs,
- char* area, Uint area_size)
-{
- if (p->hipe.nstack) {
- ASSERT(p->hipe.nsp && p->hipe.nstend);
- offset_heap_ptr(hipe_nstack_start(p), hipe_nstack_used(p),
- offs, area, area_size);
- }
- else {
- ASSERT(!p->hipe.nsp && !p->hipe.nstend);
- }
-}
-
-#else /* !HIPE */
-
-#define fullsweep_nstack(p,n_htop) (n_htop)
-#define GENSWEEP_NSTACK(p,old_htop,n_htop) do{}while(0)
-#define offset_nstack(p,offs,area,area_size) do{}while(0)
-
-#endif /* HIPE */
-
static void
do_minor(Process *p, ErlHeapFragment *live_hf_end,
char *mature, Uint mature_size,
@@ -2155,26 +2157,18 @@ copy_one_frag(Eterm** hpp, ErlOffHeap* off_heap,
*hp++ = val;
break;
case TAG_PRIMARY_LIST:
-#ifdef SHCOPY_SEND
if (erts_is_literal(val,list_val(val))) {
*hp++ = val;
} else {
*hp++ = offset_ptr(val, offs);
}
-#else
- *hp++ = offset_ptr(val, offs);
-#endif
break;
case TAG_PRIMARY_BOXED:
-#ifdef SHCOPY_SEND
if (erts_is_literal(val,boxed_val(val))) {
*hp++ = val;
} else {
*hp++ = offset_ptr(val, offs);
}
-#else
- *hp++ = offset_ptr(val, offs);
-#endif
break;
case TAG_PRIMARY_HEADER:
*hp++ = val;
@@ -2260,10 +2254,12 @@ move_msgq_to_heap(Process *p)
ASSERT(mp->data.dist_ext->heap_size >= 0);
if (is_not_nil(ERL_MESSAGE_TOKEN(mp))) {
bp = erts_dist_ext_trailer(mp->data.dist_ext);
+ /* Tokens does not use literal optimization */
ERL_MESSAGE_TOKEN(mp) = copy_struct(ERL_MESSAGE_TOKEN(mp),
bp->used_size,
- &factory.hp,
- factory.off_heap);
+ &factory.hp,
+ factory.off_heap);
+
erts_cleanup_offheap(&bp->off_heap);
}
ERL_MESSAGE_TERM(mp) = erts_decode_dist_ext(&factory,
diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c
index ebeff51aac..f1bef28186 100644
--- a/erts/emulator/beam/erl_hl_timer.c
+++ b/erts/emulator/beam/erl_hl_timer.c
@@ -735,7 +735,10 @@ proc_timeout_common(Process *proc, void *tmr)
if (tmr == (void *) erts_smp_atomic_cmpxchg_mb(&proc->common.timer,
ERTS_PTMR_TIMEDOUT,
(erts_aint_t) tmr)) {
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&proc->state);
+ erts_aint32_t state;
+ erts_smp_proc_lock(proc, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ state = erts_smp_atomic32_read_acqb(&proc->state);
+ erts_smp_proc_unlock(proc, ERTS_PROC_LOCKS_MSG_RECEIVE);
if (!(state & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_EXITING)))
erts_schedule_process(proc, state, 0);
return 1;
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 2239b6f045..07cfacf14a 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -2346,14 +2346,15 @@ erts_exit_vv(int n, int flush_async, char *fmt, va_list args1, va_list args2)
if (erts_mtrace_enabled)
erts_mtrace_exit((Uint32) n);
+ if (fmt != NULL && *fmt != '\0')
+ erl_error(fmt, args2); /* Print error message. */
+
/* Produce an Erlang core dump if error */
if (((n == ERTS_ERROR_EXIT && erts_no_crash_dump == 0) || n == ERTS_DUMP_EXIT)
&& erts_initialized) {
erl_crash_dump_v((char*) NULL, 0, fmt, args1);
}
- if (fmt != NULL && *fmt != '\0')
- erl_error(fmt, args2); /* Print error message. */
sys_tty_reset(n);
if (n == ERTS_INTR_EXIT)
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index 91e06cde2d..e9f0586edd 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -696,6 +696,9 @@ erts_send_message(Process* sender,
erts_aint32_t receiver_state;
#ifdef SHCOPY_SEND
erts_shcopy_t info;
+#else
+ erts_literal_area_t litarea;
+ INITIALIZE_LITERAL_PURGE_AREA(litarea);
#endif
#ifdef USE_VM_PROBES
@@ -725,7 +728,7 @@ erts_send_message(Process* sender,
*/
if (have_seqtrace(stoken)) {
seq_trace_update_send(sender);
- seq_trace_output(stoken, message, SEQ_TRACE_SEND,
+ seq_trace_output(stoken, message, SEQ_TRACE_SEND,
receiver->common.id, sender);
seq_trace_size = 6; /* TUPLE5 */
}
@@ -741,7 +744,7 @@ erts_send_message(Process* sender,
INITIALIZE_SHCOPY(info);
msize = copy_shared_calculate(message, &info);
#else
- msize = size_object(message);
+ msize = size_object_litopt(message, &litarea);
#endif
mp = erts_alloc_message_heap_state(receiver,
&receiver_state,
@@ -760,7 +763,7 @@ erts_send_message(Process* sender,
DESTROY_SHCOPY(info);
#else
if (is_not_immed(message))
- message = copy_struct(message, msize, &hp, ohp);
+ message = copy_struct_litopt(message, msize, &hp, ohp, &litarea);
#endif
if (is_immed(stoken))
token = stoken;
@@ -796,7 +799,7 @@ erts_send_message(Process* sender,
INITIALIZE_SHCOPY(info);
msize = copy_shared_calculate(message, &info);
#else
- msize = size_object(message);
+ msize = size_object_litopt(message, &litarea);
#endif
mp = erts_alloc_message_heap_state(receiver,
&receiver_state,
@@ -810,7 +813,7 @@ erts_send_message(Process* sender,
DESTROY_SHCOPY(info);
#else
if (is_not_immed(message))
- message = copy_struct(message, msize, &hp, ohp);
+ message = copy_struct_litopt(message, msize, &hp, ohp, &litarea);
#endif
}
#ifdef USE_VM_PROBES
@@ -998,8 +1001,8 @@ erts_move_messages_off_heap(Process *c_p)
hp = hfrag->mem;
if (is_not_immed(ERL_MESSAGE_TERM(mp)))
ERL_MESSAGE_TERM(mp) = copy_struct(ERL_MESSAGE_TERM(mp),
- msg_sz, &hp,
- &hfrag->off_heap);
+ msg_sz, &hp,
+ &hfrag->off_heap);
if (is_not_immed(ERL_MESSAGE_TOKEN(mp)))
ERL_MESSAGE_TOKEN(mp) = copy_struct(ERL_MESSAGE_TOKEN(mp),
token_sz, &hp,
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index c6127a4967..6bd27d11f4 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -627,9 +627,12 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
MBUF(&menv->phony_proc) = NULL;
}
} else {
- Uint sz = size_object(msg);
+ erts_literal_area_t litarea;
ErlOffHeap *ohp;
Eterm *hp;
+ Uint sz;
+ INITIALIZE_LITERAL_PURGE_AREA(litarea);
+ sz = size_object_litopt(msg, &litarea);
if (env && !env->tracee) {
flush_env(env);
mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
@@ -649,7 +652,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
ohp = &bp->off_heap;
}
}
- msg = copy_struct(msg, sz, &hp, ohp);
+ msg = copy_struct_litopt(msg, sz, &hp, ohp, &litarea);
}
ERL_MESSAGE_TERM(mp) = msg;
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index b5d8c5bc75..3b246f5def 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -11099,6 +11099,9 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#ifdef SHCOPY_SPAWN
erts_shcopy_t info;
INITIALIZE_SHCOPY(info);
+#else
+ erts_literal_area_t litarea;
+ INITIALIZE_LITERAL_PURGE_AREA(litarea);
#endif
erts_smp_proc_lock(parent, ERTS_PROC_LOCKS_ALL_MINOR);
@@ -11157,7 +11160,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#ifdef SHCOPY_SPAWN
arg_size = copy_shared_calculate(args, &info);
#else
- arg_size = size_object(args);
+ arg_size = size_object_litopt(args, &litarea);
#endif
heap_need = arg_size;
@@ -11181,7 +11184,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
}
p->schedule_count = 0;
ASSERT(p->min_heap_size == erts_next_heap_size(p->min_heap_size, 0));
-
+
p->u.initial[INITIAL_MOD] = mod;
p->u.initial[INITIAL_FUN] = func;
p->u.initial[INITIAL_ARI] = (Uint) arity;
@@ -11239,7 +11242,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->arg_reg[2] = copy_shared_perform(args, arg_size, &info, &p->htop, &p->off_heap);
DESTROY_SHCOPY(info);
#else
- p->arg_reg[2] = copy_struct(args, arg_size, &p->htop, &p->off_heap);
+ p->arg_reg[2] = copy_struct_litopt(args, arg_size, &p->htop, &p->off_heap, &litarea);
#endif
p->arity = 3;
@@ -12276,7 +12279,6 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
erts_port_demonitor(pcontext->p,
ERTS_PORT_DEMONITOR_ORIGIN_ON_DEATHBED,
prt, mon->ref, NULL);
- return; /* let erts_port_demonitor do the deletion */
} else { /* remote by pid */
ASSERT(is_external_pid(mon->pid));
dep = external_pid_dist_entry(mon->pid);
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index d14d237ca6..c4396488f5 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -3108,6 +3108,7 @@ erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer)
if (is_not_nil(*tracer)) {
Uint offs = 2;
UWord size = 2 * sizeof(Eterm) + sizeof(ErtsThrPrgrLaterOp);
+ ErtsThrPrgrLaterOp *lop;
ASSERT(is_list(*tracer));
if (is_not_immed(ERTS_TRACER_STATE(*tracer))) {
hf = (void*)(((char*)(ptr_val(*tracer)) - offsetof(ErlHeapFragment, mem)));
@@ -3115,6 +3116,16 @@ erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer)
size = hf->alloc_size * sizeof(Eterm) + sizeof(ErlHeapFragment);
ASSERT(offs == size_object(*tracer));
}
+
+ /* sparc assumes that all structs are double word aligned, so we
+ have to align the ErtsThrPrgrLaterOp struct otherwise it may
+ segfault.*/
+ if ((UWord)(ptr_val(*tracer) + offs) % (sizeof(UWord)*2) == sizeof(UWord))
+ offs += 1;
+
+ lop = (ErtsThrPrgrLaterOp*)(ptr_val(*tracer) + offs);
+ ASSERT((UWord)lop % (sizeof(UWord)*2) == 0);
+
/* We schedule the free:ing of the tracer until after a thread progress
has been made so that we know that no schedulers have any references
to it. Because we do this, it is possible to release all locks of a
@@ -3122,9 +3133,7 @@ erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer)
without having to worry if it is free'd.
*/
erts_schedule_thr_prgr_later_cleanup_op(
- free_tracer, (void*)(*tracer),
- (ErtsThrPrgrLaterOp*)(ptr_val(*tracer) + offs),
- size);
+ free_tracer, (void*)(*tracer), lop, size);
}
if (is_nil(new_tracer)) {
@@ -3141,9 +3150,10 @@ erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer)
Eterm *hp, tracer_state = ERTS_TRACER_STATE(new_tracer),
tracer_module = ERTS_TRACER_MODULE(new_tracer);
Uint sz = size_object(tracer_state);
- hf = new_message_buffer(sz + 2 /* cons cell */ + (sizeof(ErtsThrPrgrLaterOp)+sizeof(Eterm)-1)/sizeof(Eterm));
+ hf = new_message_buffer(sz + 2 /* cons cell */ +
+ (sizeof(ErtsThrPrgrLaterOp)+sizeof(Eterm)-1)/sizeof(Eterm) + 1);
hp = hf->mem + 2;
- hf->used_size -= (sizeof(ErtsThrPrgrLaterOp)+sizeof(Eterm)-1)/sizeof(Eterm);
+ hf->used_size -= (sizeof(ErtsThrPrgrLaterOp)+sizeof(Eterm)-1)/sizeof(Eterm) + 1;
*tracer = copy_struct(tracer_state, sz, &hp, &hf->off_heap);
*tracer = CONS(hf->mem, tracer_module, *tracer);
ASSERT((void*)(((char*)(ptr_val(*tracer)) - offsetof(ErlHeapFragment, mem))) == hf);
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index bf2e50e52f..1423973739 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -1085,8 +1085,8 @@ typedef struct {
Eterm* shtable_start;
ErtsAlcType_t shtable_alloc_type;
Uint literal_size;
- Eterm *range_ptr;
- Uint range_sz;
+ Eterm *lit_purge_ptr;
+ Uint lit_purge_sz;
} erts_shcopy_t;
#define INITIALIZE_SHCOPY(info) \
@@ -1095,8 +1095,8 @@ do { \
info.bitstore_start = info.bitstore_default; \
info.shtable_start = info.shtable_default; \
info.literal_size = 0; \
- info.range_ptr = erts_clrange.ptr; \
- info.range_sz = erts_clrange.sz; \
+ info.lit_purge_ptr = erts_clrange.ptr; \
+ info.lit_purge_sz = erts_clrange.sz; \
} while(0)
#define DESTROY_SHCOPY(info) \
@@ -1113,18 +1113,35 @@ do { \
} while(0)
/* copy.c */
+typedef struct {
+ Eterm *lit_purge_ptr;
+ Uint lit_purge_sz;
+} erts_literal_area_t;
+
+#define INITIALIZE_LITERAL_PURGE_AREA(Area) \
+ do { \
+ (Area).lit_purge_ptr = erts_clrange.ptr; \
+ (Area).lit_purge_sz = erts_clrange.sz; \
+ } while(0)
+
Eterm copy_object_x(Eterm, Process*, Uint);
#define copy_object(Term, Proc) copy_object_x(Term,Proc,0)
-Uint size_object(Eterm);
+Uint size_object_x(Eterm, erts_literal_area_t*);
+#define size_object(Term) size_object_x(Term,NULL)
+#define size_object_litopt(Term,LitArea) size_object_x(Term,LitArea)
+
Uint copy_shared_calculate(Eterm, erts_shcopy_t*);
Eterm copy_shared_perform(Eterm, Uint, erts_shcopy_t*, Eterm**, ErlOffHeap*);
Uint size_shared(Eterm);
-Eterm copy_struct_x(Eterm, Uint, Eterm**, ErlOffHeap*, Uint* bsz);
+Eterm copy_struct_x(Eterm, Uint, Eterm**, ErlOffHeap*, Uint*, erts_literal_area_t*);
#define copy_struct(Obj,Sz,HPP,OH) \
- copy_struct_x(Obj,Sz,HPP,OH,NULL)
+ copy_struct_x(Obj,Sz,HPP,OH,NULL,NULL)
+#define copy_struct_litopt(Obj,Sz,HPP,OH,LitArea) \
+ copy_struct_x(Obj,Sz,HPP,OH,NULL,LitArea)
+
Eterm copy_shallow(Eterm*, Uint, Eterm**, ErlOffHeap*);
void erts_move_multi_frags(Eterm** hpp, ErlOffHeap*, ErlHeapFragment* first,
diff --git a/erts/emulator/beam/hash.c b/erts/emulator/beam/hash.c
index e255b961f1..cd038d100b 100644
--- a/erts/emulator/beam/hash.c
+++ b/erts/emulator/beam/hash.c
@@ -35,9 +35,9 @@
static const int h_size_table[] = {
2, 5, 11, 23, 47, 97, 197, 397, 797, /* double upto here */
1201, 1597,
- 2411, 3203,
+ 2411, 3203,
4813, 6421,
- 9643, 12853,
+ 9643, 12853,
19289, 25717,
51437,
102877,
@@ -49,8 +49,8 @@ static const int h_size_table[] = {
6584983,
13169977,
26339969,
- 52679969,
- -1
+ 52679969,
+ -1
};
/*
@@ -69,7 +69,7 @@ void hash_get_info(HashInfo *hi, Hash *h)
for (i = 0; i < size; i++) {
int depth = 0;
HashBucket* b = h->bucket[i];
-
+
while (b != (HashBucket*) 0) {
objects++;
depth++;
@@ -112,7 +112,7 @@ void hash_info(int to, void *arg, Hash* h)
/*
* Returns size of table in bytes. Stored objects not included.
*/
-int
+int
hash_table_sz(Hash *h)
{
int i;
@@ -190,7 +190,7 @@ void hash_delete(Hash* h)
HashBucket* b = h->bucket[i];
while (b != (HashBucket*) 0) {
HashBucket* b_next = b->next;
-
+
h->fun.free((void*) b);
b = b_next;
}
@@ -250,7 +250,7 @@ void* hash_get(Hash* h, void* tmpl)
HashValue hval = h->fun.hash(tmpl);
int ix = hval % h->size;
HashBucket* b = h->bucket[ix];
-
+
while(b != (HashBucket*) 0) {
if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0))
return (void*) b;
@@ -294,7 +294,7 @@ void* hash_erase(Hash* h, void* tmpl)
int ix = hval % h->size;
HashBucket* b = h->bucket[ix];
HashBucket* prev = 0;
-
+
while(b != 0) {
if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) {
if (prev != 0)
@@ -326,7 +326,7 @@ hash_remove(Hash *h, void *tmpl)
int ix = hval % h->size;
HashBucket *b = h->bucket[ix];
HashBucket *prev = NULL;
-
+
while (b) {
if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) {
if (prev)
@@ -355,4 +355,3 @@ void hash_foreach(Hash* h, void (*func)(void *, void *), void *func_arg2)
}
}
}
-
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 675fafa726..85647b8500 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -85,7 +85,7 @@ erts_heap_alloc(Process* p, Uint need, Uint xtra)
&& HEAP_TOP(p) >= p->space_verified_from
&& HEAP_TOP(p) + need <= p->space_verified_from + p->space_verified
&& HEAP_LIMIT(p) - HEAP_TOP(p) >= need) {
-
+
Uint consumed = need + (HEAP_TOP(p) - p->space_verified_from);
ASSERT(consumed <= p->space_verified);
p->space_verified -= consumed;
@@ -638,7 +638,7 @@ erts_bld_atom_uword_2tup_list(Uint **hpp, Uint *szp,
ui = uint_to_big(uints[i], *hpp);
*hpp += BIG_UINT_HEAP_SIZE;
}
-
+
res = CONS(*hpp+3, TUPLE2(*hpp, atoms[i], ui), res);
*hpp += 5;
}
@@ -676,14 +676,14 @@ erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length,
ui1 = uint_to_big(uints1[i], *hpp);
*hpp += BIG_UINT_HEAP_SIZE;
}
-
+
if (IS_USMALL(0, uints2[i]))
ui2 = make_small(uints2[i]);
else {
ui2 = uint_to_big(uints2[i], *hpp);
*hpp += BIG_UINT_HEAP_SIZE;
}
-
+
res = CONS(*hpp+4, TUPLE3(*hpp, atoms[i], ui1, ui2), res);
*hpp += 6;
}
@@ -794,7 +794,7 @@ hash_binary_bytes(Eterm bin, Uint sz, Uint32 hash)
Uint b;
Uint lshift = bitoffs;
Uint rshift = 8 - lshift;
-
+
while (sz--) {
b = (previous << lshift) & 0xFF;
previous = *ptr++;
@@ -805,7 +805,7 @@ hash_binary_bytes(Eterm bin, Uint sz, Uint32 hash)
b = (previous << lshift) & 0xFF;
previous = *ptr++;
b |= previous >> rshift;
-
+
b >>= 8 - bitsize;
hash = (hash*FUNNY_NUMBER1 + b) * FUNNY_NUMBER12 + bitsize;
}
@@ -835,21 +835,21 @@ Uint32 make_hash(Eterm term_arg)
do { \
Uint32 x = (Uint32) (Expr); \
hash = \
- (((((hash)*(Prime1) + (x & 0xFF)) * (Prime1) + \
- ((x >> 8) & 0xFF)) * (Prime1) + \
- ((x >> 16) & 0xFF)) * (Prime1) + \
+ (((((hash)*(Prime1) + (x & 0xFF)) * (Prime1) + \
+ ((x >> 8) & 0xFF)) * (Prime1) + \
+ ((x >> 16) & 0xFF)) * (Prime1) + \
(x >> 24)); \
} while(0)
-#define UINT32_HASH_RET(Expr, Prime1, Prime2) \
+#define UINT32_HASH_RET(Expr, Prime1, Prime2) \
UINT32_HASH_STEP(Expr, Prime1); \
hash = hash * (Prime2); \
- break
-
-
+ break
+
+
/*
* Significant additions needed for real 64 bit port with larger fixnums.
- */
+ */
/*
* Note, for the simple 64bit port, not utilizing the
@@ -864,7 +864,7 @@ tail_recur:
hash = hash*FUNNY_NUMBER3 + 1;
break;
case ATOM_DEF:
- hash = hash*FUNNY_NUMBER1 +
+ hash = hash*FUNNY_NUMBER1 +
(atom_tab(atom_val(term))->slot.bucket.hvalue);
break;
case SMALL_DEF:
@@ -893,9 +893,9 @@ tail_recur:
Export* ep = *((Export **) (export_val(term) + 1));
hash = hash * FUNNY_NUMBER11 + ep->code[2];
- hash = hash*FUNNY_NUMBER1 +
+ hash = hash*FUNNY_NUMBER1 +
(atom_tab(atom_val(ep->code[0]))->slot.bucket.hvalue);
- hash = hash*FUNNY_NUMBER1 +
+ hash = hash*FUNNY_NUMBER1 +
(atom_tab(atom_val(ep->code[1]))->slot.bucket.hvalue);
break;
}
@@ -906,7 +906,7 @@ tail_recur:
Uint num_free = funp->num_free;
hash = hash * FUNNY_NUMBER10 + num_free;
- hash = hash*FUNNY_NUMBER1 +
+ hash = hash*FUNNY_NUMBER1 +
(atom_tab(atom_val(funp->fe->module))->slot.bucket.hvalue);
hash = hash*FUNNY_NUMBER2 + funp->fe->old_index;
hash = hash*FUNNY_NUMBER2 + funp->fe->old_uniq;
@@ -931,7 +931,7 @@ tail_recur:
UINT32_HASH_RET(internal_ref_numbers(term)[0],FUNNY_NUMBER9,FUNNY_NUMBER10);
case EXTERNAL_REF_DEF:
UINT32_HASH_RET(external_ref_numbers(term)[0],FUNNY_NUMBER9,FUNNY_NUMBER10);
- case FLOAT_DEF:
+ case FLOAT_DEF:
{
FloatDef ff;
GET_DOUBLE(term, ff);
@@ -958,12 +958,12 @@ tail_recur:
** as multiplications on a Sparc is so slow.
*/
hash = hash*FUNNY_NUMBER2 + unsigned_val(*list);
-
+
if (is_not_list(CDR(list))) {
WSTACK_PUSH(stack, MAKE_HASH_CDR_POST_OP);
term = CDR(list);
goto tail_recur;
- }
+ }
list = list_val(CDR(list));
}
WSTACK_PUSH2(stack, CDR(list), MAKE_HASH_CDR_PRE_OP);
@@ -1004,17 +1004,17 @@ tail_recur:
}
hash *= is_neg ? FUNNY_NUMBER4 : FUNNY_NUMBER3;
break;
- }
+ }
case MAP_DEF:
hash = hash*FUNNY_NUMBER13 + FUNNY_NUMBER14 + make_hash2(term);
break;
- case TUPLE_DEF:
+ case TUPLE_DEF:
{
Eterm* ptr = tuple_val(term);
Uint arity = arityval(*ptr);
WSTACK_PUSH3(stack, (UWord) arity, (UWord)(ptr+1), (UWord) arity);
- op = MAKE_HASH_TUPLE_OP;
+ op = MAKE_HASH_TUPLE_OP;
}/*fall through*/
case MAKE_HASH_TUPLE_OP:
case MAKE_HASH_TERM_ARRAY_OP:
@@ -1031,8 +1031,8 @@ tail_recur:
hash = hash*FUNNY_NUMBER9 + arity;
}
break;
- }
-
+ }
+
default:
erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_hash(0x%X,0x%X)\n", term, op);
return 0;
@@ -1159,8 +1159,8 @@ make_hash2(Eterm term)
if (y < 0) { \
UINT32_HASH(-y, AConst); \
/* Negative numbers are unnecessarily mixed twice. */ \
- } \
- UINT32_HASH(y, AConst); \
+ } \
+ UINT32_HASH(y, AConst); \
} while(0)
#define IS_SSMALL28(x) (((Uint) (((x) >> (28-1)) + 1)) < 2)
@@ -1242,7 +1242,7 @@ make_hash2(Eterm term)
int arity = header_arity(hdr);
Eterm* elem = tuple_val(term);
UINT32_HASH(arity, HCONST_9);
- if (arity == 0) /* Empty tuple */
+ if (arity == 0) /* Empty tuple */
goto hash2_common;
for (i = arity; ; i--) {
term = elem[i];
@@ -1329,7 +1329,7 @@ make_hash2(Eterm term)
{
Export* ep = *((Export **) (export_val(term) + 1));
UINT32_HASH_2
- (ep->code[2],
+ (ep->code[2],
atom_tab(atom_val(ep->code[0]))->slot.bucket.hvalue,
HCONST);
UINT32_HASH
@@ -1343,7 +1343,7 @@ make_hash2(Eterm term)
ErlFunThing* funp = (ErlFunThing *) fun_val(term);
Uint num_free = funp->num_free;
UINT32_HASH_2
- (num_free,
+ (num_free,
atom_tab(atom_val(funp->fe->module))->slot.bucket.hvalue,
HCONST);
UINT32_HASH_2
@@ -1468,7 +1468,7 @@ make_hash2(Eterm term)
goto hash2_common;
}
break;
-
+
default:
erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_hash2(0x%X)\n", term);
}
@@ -1541,7 +1541,7 @@ make_hash2(Eterm term)
}
case HASH_MAP_PAIR:
hash_xor_pairs ^= hash;
- hash = 0;
+ hash = 0;
goto hash2_common;
default:
break;
@@ -1678,17 +1678,22 @@ make_internal_hash(Eterm term)
* the order in which keys and values are encountered.
* We therefore calculate context independent hashes for all .
* key-value pairs and then xor them together.
+ *
+ * We *do* need to use an initial seed for each pair, i.e. the
+ * hash value, so the hash value is reset for each pair with 'hash'.
+ * If we don't, no additional entropy is given to the system and the
+ * hash collision resolution in maps:from_list/1 would fail.
*/
ESTACK_PUSH(s, hash_xor_pairs);
ESTACK_PUSH(s, hash);
ESTACK_PUSH(s, HASH_MAP_TAIL);
- hash = 0;
- hash_xor_pairs = 0;
for (i = size - 1; i >= 0; i--) {
+ ESTACK_PUSH(s, hash); /* initial seed for all pairs */
ESTACK_PUSH(s, HASH_MAP_PAIR);
ESTACK_PUSH(s, vs[i]);
ESTACK_PUSH(s, ks[i]);
}
+ hash_xor_pairs = 0;
goto pop_next;
}
case HAMT_SUBTAG_HEAD_ARRAY:
@@ -1700,7 +1705,6 @@ make_internal_hash(Eterm term)
ESTACK_PUSH(s, hash_xor_pairs);
ESTACK_PUSH(s, hash);
ESTACK_PUSH(s, HASH_MAP_TAIL);
- hash = 0;
hash_xor_pairs = 0;
}
switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
@@ -1717,6 +1721,7 @@ make_internal_hash(Eterm term)
while (i) {
if (is_list(*ptr)) {
Eterm* cons = list_val(*ptr);
+ ESTACK_PUSH(s, hash); /* initial seed for all pairs */
ESTACK_PUSH(s, HASH_MAP_PAIR);
ESTACK_PUSH(s, CDR(cons));
ESTACK_PUSH(s, CAR(cons));
@@ -1906,6 +1911,7 @@ make_internal_hash(Eterm term)
pop_next:
if (ESTACK_ISEMPTY(s)) {
DESTROY_ESTACK(s);
+
return hash;
}
@@ -1920,7 +1926,7 @@ make_internal_hash(Eterm term)
}
case HASH_MAP_PAIR:
hash_xor_pairs ^= hash;
- hash = 0;
+ hash = (Uint32) ESTACK_POP(s); /* initial seed for all pairs */
goto pop_next;
case HASH_CDR:
@@ -1953,8 +1959,8 @@ Uint32 make_broken_hash(Eterm term)
DECLARE_WSTACK(stack);
unsigned op;
tail_recur:
- op = tag_val_def(term);
- for (;;) {
+ op = tag_val_def(term);
+ for (;;) {
switch (op) {
case NIL_DEF:
hash = hash*FUNNY_NUMBER3 + 1;
@@ -1976,8 +1982,7 @@ tail_recur:
{ /* like a bignum */
Uint32 y4 = (Uint32) y2;
hash = hash*FUNNY_NUMBER2 + ((y4 << 16) | (y4 >> 16));
- if (y3)
- {
+ if (y3) {
hash = hash*FUNNY_NUMBER2 + ((y3 << 16) | (y3 >> 16));
arity++;
}
@@ -2020,9 +2025,9 @@ tail_recur:
Export* ep = *((Export **) (export_val(term) + 1));
hash = hash * FUNNY_NUMBER11 + ep->code[2];
- hash = hash*FUNNY_NUMBER1 +
+ hash = hash*FUNNY_NUMBER1 +
(atom_tab(atom_val(ep->code[0]))->slot.bucket.hvalue);
- hash = hash*FUNNY_NUMBER1 +
+ hash = hash*FUNNY_NUMBER1 +
(atom_tab(atom_val(ep->code[1]))->slot.bucket.hvalue);
break;
}
@@ -2033,7 +2038,7 @@ tail_recur:
Uint num_free = funp->num_free;
hash = hash * FUNNY_NUMBER10 + num_free;
- hash = hash*FUNNY_NUMBER1 +
+ hash = hash*FUNNY_NUMBER1 +
(atom_tab(atom_val(funp->fe->module))->slot.bucket.hvalue);
hash = hash*FUNNY_NUMBER2 + funp->fe->old_index;
hash = hash*FUNNY_NUMBER2 + funp->fe->old_uniq;
@@ -2065,7 +2070,7 @@ tail_recur:
case EXTERNAL_REF_DEF:
hash = hash*FUNNY_NUMBER9 + external_ref_numbers(term)[0];
break;
- case FLOAT_DEF:
+ case FLOAT_DEF:
{
FloatDef ff;
GET_DOUBLE(term, ff);
@@ -2149,7 +2154,7 @@ tail_recur:
}
#else
-#error "unsupported D_EXP size"
+#error "unsupported D_EXP size"
#endif
hash = hash * (is_neg ? FUNNY_NUMBER3 : FUNNY_NUMBER2) + arity;
}
@@ -2158,14 +2163,14 @@ tail_recur:
case MAP_DEF:
hash = hash*FUNNY_NUMBER13 + FUNNY_NUMBER14 + make_hash2(term);
break;
- case TUPLE_DEF:
+ case TUPLE_DEF:
{
Eterm* ptr = tuple_val(term);
Uint arity = arityval(*ptr);
WSTACK_PUSH3(stack, (UWord) arity, (UWord) (ptr+1), (UWord) arity);
op = MAKE_HASH_TUPLE_OP;
- }/*fall through*/
+ }/*fall through*/
case MAKE_HASH_TUPLE_OP:
case MAKE_HASH_TERM_ARRAY_OP:
{
@@ -2193,7 +2198,7 @@ tail_recur:
DESTROY_WSTACK(stack);
return hash;
-
+
#undef MAKE_HASH_TUPLE_OP
#undef MAKE_HASH_TERM_ARRAY_OP
#undef MAKE_HASH_CDR_PRE_OP
@@ -2353,13 +2358,13 @@ static int do_send_term_to_logger(Eterm tag, Eterm gleader,
}
static ERTS_INLINE int
-send_info_to_logger(Eterm gleader, char *buf, int len)
+send_info_to_logger(Eterm gleader, char *buf, int len)
{
return do_send_to_logger(am_info_msg, gleader, buf, len);
}
static ERTS_INLINE int
-send_warning_to_logger(Eterm gleader, char *buf, int len)
+send_warning_to_logger(Eterm gleader, char *buf, int len)
{
Eterm tag;
switch (erts_error_logger_warnings) {
@@ -2371,7 +2376,7 @@ send_warning_to_logger(Eterm gleader, char *buf, int len)
}
static ERTS_INLINE int
-send_error_to_logger(Eterm gleader, char *buf, int len)
+send_error_to_logger(Eterm gleader, char *buf, int len)
{
return do_send_to_logger(am_error, gleader, buf, len);
}
@@ -2613,7 +2618,7 @@ tailrecur_ne:
break; /* not equal */
case TAG_PRIMARY_BOXED:
- {
+ {
Eterm hdr = *boxed_val(a);
switch (hdr & _TAG_HEADER_MASK) {
case ARITYVAL_SUBTAG:
@@ -2639,7 +2644,7 @@ tailrecur_ne:
Uint b_bitsize;
Uint a_bitoffs;
Uint b_bitoffs;
-
+
if (!is_binary(b)) {
goto not_equal;
}
@@ -2671,7 +2676,7 @@ tailrecur_ne:
{
ErlFunThing* f1;
ErlFunThing* f2;
-
+
if (!is_fun(b))
goto not_equal;
f1 = (ErlFunThing *) fun_val(a);
@@ -2702,7 +2707,7 @@ tailrecur_ne:
if(ap->header == bp->header && ap->node == bp->node) {
ASSERT(1 == external_data_words(a));
ASSERT(1 == external_data_words(b));
-
+
if (ap->data.ui[0] == bp->data.ui[0]) goto pop_next;
}
break; /* not equal */
@@ -2759,7 +2764,7 @@ tailrecur_ne:
if (alen == 3 && blen == 3) {
/* Most refs are of length 3 */
if (anum[1] == bnum[1] && anum[2] == bnum[2]) {
- goto pop_next;
+ goto pop_next;
} else {
goto not_equal;
}
@@ -2784,7 +2789,7 @@ tailrecur_ne:
for (i = common_len; i < blen; i++)
if (bnum[i] != 0)
goto not_equal;
- }
+ }
}
goto pop_next;
}
@@ -2792,7 +2797,7 @@ tailrecur_ne:
case NEG_BIG_SUBTAG:
{
int i;
-
+
if (!is_big(b))
goto not_equal;
aa = big_val(a);
@@ -2810,7 +2815,7 @@ tailrecur_ne:
{
FloatDef af;
FloatDef bf;
-
+
if (is_float(b)) {
GET_DOUBLE(a, af);
GET_DOUBLE(b, bf);
@@ -2889,7 +2894,7 @@ term_array: /* arrays in 'aa' and 'bb', length in 'sz' */
}
goto tailrecur_ne;
}
-
+
pop_next:
if (!WSTACK_ISEMPTY(stack)) {
UWord something = WSTACK_POP(stack);
@@ -3093,7 +3098,7 @@ tailrecur_ne:
}
anode = erts_this_node;
adata = internal_port_data(a);
-
+
port_common:
CMP_NODES(anode, bnode);
ON_CMP_GOTO((Sint)(adata - bdata));
@@ -3111,7 +3116,7 @@ tailrecur_ne:
}
anode = erts_this_node;
adata = internal_pid_data(a);
-
+
pid_common:
if (adata != bdata) {
RETURN_NEQ(adata < bdata ? -1 : 1);
@@ -3336,7 +3341,7 @@ tailrecur_ne:
diff = f1->num_free - f2->num_free;
if (diff != 0) {
RETURN_NEQ(diff);
- }
+ }
i = f1->num_free;
if (i == 0) goto pop_next;
aa = f1->env;
@@ -3397,10 +3402,10 @@ tailrecur_ne:
anum = internal_thing_ref_numbers(athing);
alen = internal_thing_ref_no_of_numbers(athing);
}
-
+
ref_common:
CMP_NODES(anode, bnode);
-
+
ASSERT(alen > 0 && blen > 0);
if (alen != blen) {
if (alen > blen) {
@@ -3418,7 +3423,7 @@ tailrecur_ne:
} while (alen < blen);
}
}
-
+
ASSERT(alen == blen);
for (i = (Sint) alen - 1; i >= 0; i--)
if (anum[i] != bnum[i])
@@ -3633,8 +3638,8 @@ term_array: /* arrays in 'aa' and 'bb', length in 'i' */
}
a = *aa;
b = *bb;
- goto tailrecur;
-
+ goto tailrecur;
+
pop_next:
if (!WSTACK_ISEMPTY(stack)) {
UWord something = WSTACK_POP(stack);
@@ -3897,19 +3902,19 @@ intlist_to_buf(Eterm list, char *buf, Sint len)
Eterm* listptr;
Sint sz = 0;
- if (is_nil(list))
+ if (is_nil(list))
return 0;
if (is_not_list(list))
return -1;
listptr = list_val(list);
while (sz < len) {
- if (!is_byte(*listptr))
+ if (!is_byte(*listptr))
return -1;
buf[sz++] = unsigned_val(*listptr);
if (is_nil(*(listptr + 1)))
return(sz);
- if (is_not_list(*(listptr + 1)))
+ if (is_not_list(*(listptr + 1)))
return -1;
listptr = list_val(*(listptr + 1));
}
@@ -4157,10 +4162,10 @@ do { \
} else if (yield_support && --yield_count <= 0)
goto L_yield;
}
-
+
res = len;
- L_return:
+ L_return:
DESTROY_ESTACK(s);
@@ -5053,7 +5058,7 @@ Process *p;
if(p)
print_process_info(ERTS_PRINT_STDERR, NULL, p);
}
-
+
void ppi(Eterm pid)
{
pp(erts_proc_lookup(pid));
@@ -5079,5 +5084,3 @@ ps(Process* p, Eterm* stop)
}
}
#endif
-
-
diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c
index 5ce0e1de9e..254d3baeb1 100644
--- a/erts/emulator/drivers/common/inet_drv.c
+++ b/erts/emulator/drivers/common/inet_drv.c
@@ -11407,6 +11407,7 @@ static void packet_inet_command(ErlDrvData e, char* buf, ErlDrvSizeT len)
VALGRIND_MAKE_MEM_DEFINED(mhdr.msg_control, mhdr.msg_controllen); /*suppress "uninitialised bytes"*/
mhdr.msg_flags = 0; /* Not used with "sendmsg" */
+ inet_output_count(desc, data_len);
/* Now do the actual sending. NB: "flags" in "sendmsg" itself are NOT
used: */
code = sock_sendmsg(desc->s, &mhdr, 0);
diff --git a/erts/emulator/hipe/hipe_gc.c b/erts/emulator/hipe/hipe_gc.c
index 566c65882e..68c65dea27 100644
--- a/erts/emulator/hipe/hipe_gc.c
+++ b/erts/emulator/hipe/hipe_gc.c
@@ -237,3 +237,122 @@ void gensweep_nstack(Process *p, Eterm **ptr_old_htop, Eterm **ptr_n_htop)
}
abort();
}
+
+Eterm *sweep_literals_nstack(Process *p, Eterm *old_htop, char *area,
+ Uint area_size)
+{
+ /* known nstack walk state */
+ Eterm *nsp;
+ Eterm *nsp_end;
+ const struct sdesc *sdesc;
+ /* arch-specific nstack walk state */
+ struct nstack_walk_state walk_state;
+
+ ASSERT(!p->hipe.gc_is_unsafe);
+
+ if (!p->hipe.nstack) {
+ ASSERT(!p->hipe.nsp && !p->hipe.nstend);
+ return old_htop;
+ }
+ if (!nstack_walk_init_check(p))
+ return old_htop;
+
+ ASSERT(p->hipe.nsp && p->hipe.nstend);
+ nsp = nstack_walk_nsp_begin(p);
+ nsp_end = nstack_walk_nsp_end(p);
+ sdesc = nstack_walk_init_sdesc_ignore_trap(p, &walk_state);
+
+ while (!nstack_walk_nsp_reached_end(nsp, nsp_end)) {
+ unsigned long ra;
+ unsigned sdesc_size = nstack_walk_frame_size(sdesc);
+ unsigned i = 0;
+ unsigned mask = sdesc->livebits[0];
+ for (;;) {
+ if (mask & 1) {
+ Eterm *nsp_i = nstack_walk_frame_index(nsp, i);
+ Eterm gval = *nsp_i;
+ if (is_boxed(gval)) {
+ Eterm *ptr = boxed_val(gval);
+ Eterm val = *ptr;
+ if (IS_MOVED_BOXED(val)) {
+ ASSERT(is_boxed(val));
+ *nsp_i = val;
+ } else if (ErtsInArea(ptr, area, area_size)) {
+ MOVE_BOXED(ptr, val, old_htop, nsp_i);
+ }
+ } else if (is_list(gval)) {
+ Eterm *ptr = list_val(gval);
+ Eterm val = *ptr;
+ if (IS_MOVED_CONS(val)) {
+ *nsp_i = ptr[1];
+ } else if (ErtsInArea(ptr, area, area_size)) {
+ MOVE_CONS(ptr, val, old_htop, nsp_i);
+ }
+ }
+ }
+ if (++i >= sdesc_size)
+ break;
+ if (i & 31)
+ mask >>= 1;
+ else
+ mask = sdesc->livebits[i >> 5];
+ }
+ ra = nstack_walk_frame_ra(nsp, sdesc);
+ if (ra == (unsigned long)nbif_stack_trap_ra)
+ ra = (unsigned long)p->hipe.ngra;
+ sdesc = hipe_find_sdesc(ra);
+ nsp = nstack_walk_next_frame(nsp, sdesc_size);
+ }
+ return old_htop;
+}
+
+int
+nstack_any_heap_ref_ptrs(Process *rp, char* mod_start, Uint mod_size)
+{
+ Eterm *nsp;
+ Eterm *nsp_end;
+ const struct sdesc *sdesc;
+ /* arch-specific nstack walk state */
+ struct nstack_walk_state walk_state;
+
+ ASSERT(!rp->hipe.gc_is_unsafe);
+
+ if (!rp->hipe.nstack || !nstack_walk_init_check(rp)) return 0;
+ ASSERT(rp->hipe.nsp && rp->hipe.nstend);
+ nsp = nstack_walk_nsp_begin(rp);
+ nsp_end = nstack_walk_nsp_end(rp);
+ sdesc = nstack_walk_init_sdesc_ignore_trap(rp, &walk_state);
+
+ while (!nstack_walk_nsp_reached_end(nsp, nsp_end)) {
+ unsigned long ra;
+ unsigned sdesc_size = nstack_walk_frame_size(sdesc);
+ unsigned i = 0;
+ unsigned mask = sdesc->livebits[0];
+ for (;;) {
+ if (mask & 1) {
+ Eterm *nsp_i = nstack_walk_frame_index(nsp, i);
+ Eterm val = *nsp_i;
+ switch (primary_tag(val)) {
+ case TAG_PRIMARY_BOXED:
+ case TAG_PRIMARY_LIST:
+ if (ErtsInArea(val, mod_start, mod_size)) {
+ return 1;
+ }
+ break;
+ }
+ }
+ if (++i >= sdesc_size)
+ break;
+ if (i & 31)
+ mask >>= 1;
+ else
+ mask = sdesc->livebits[i >> 5];
+ }
+ ra = nstack_walk_frame_ra(nsp, sdesc);
+ if (ra == (unsigned long)nbif_stack_trap_ra)
+ ra = (unsigned long)rp->hipe.ngra;
+ sdesc = hipe_find_sdesc(ra);
+ nsp = nstack_walk_next_frame(nsp, sdesc_size);
+ }
+ return 0;
+}
diff --git a/erts/emulator/hipe/hipe_risc_gc.h b/erts/emulator/hipe/hipe_risc_gc.h
index 315f8e7f9f..09568c140e 100644
--- a/erts/emulator/hipe/hipe_risc_gc.h
+++ b/erts/emulator/hipe/hipe_risc_gc.h
@@ -51,6 +51,19 @@ nstack_walk_init_sdesc(const Process *p, struct nstack_walk_state *state)
return sdesc;
}
+static inline const struct sdesc*
+nstack_walk_init_sdesc_ignore_trap(const Process *p,
+ struct nstack_walk_state *state)
+{
+ unsigned long ra = (unsigned long)p->hipe.nra;
+ const struct sdesc *sdesc;
+ if (ra == (unsigned long)&nbif_stack_trap_ra)
+ ra = (unsigned long)p->hipe.ngra;
+ sdesc = hipe_find_sdesc(ra);
+ state->sdesc0 = sdesc;
+ return sdesc;
+}
+
static inline void nstack_walk_update_trap(Process *p, const struct sdesc *sdesc0)
{
Eterm *nsp = p->hipe.nsp;
diff --git a/erts/emulator/hipe/hipe_stack.h b/erts/emulator/hipe/hipe_stack.h
index 4ea7d5c031..afa0ed4256 100644
--- a/erts/emulator/hipe/hipe_stack.h
+++ b/erts/emulator/hipe/hipe_stack.h
@@ -131,5 +131,8 @@ static __inline__ void hipe_check_nstack(Process *p, unsigned nwords)
*/
extern Eterm *fullsweep_nstack(Process *p, Eterm *n_htop);
extern void gensweep_nstack(Process *p, Eterm **ptr_old_htop, Eterm **ptr_n_htop);
+extern Eterm *sweep_literals_nstack(Process *p, Eterm *n_htop, char *area,
+ Uint area_size);
+extern int nstack_any_heap_ref_ptrs(Process *, char* mod_start, Uint mod_size);
#endif /* HIPE_STACK_H */
diff --git a/erts/emulator/hipe/hipe_x86_gc.h b/erts/emulator/hipe/hipe_x86_gc.h
index c22b28c2d5..00fe03d8f9 100644
--- a/erts/emulator/hipe/hipe_x86_gc.h
+++ b/erts/emulator/hipe/hipe_x86_gc.h
@@ -81,6 +81,23 @@ nstack_walk_init_sdesc(const Process *p, struct nstack_walk_state *state)
#endif
}
+static inline const struct sdesc*
+nstack_walk_init_sdesc_ignore_trap(const Process *p,
+ struct nstack_walk_state *state)
+{
+#ifdef SKIP_YOUNGEST_FRAME
+ unsigned long ra = p->hipe.nsp[0];
+ const struct sdesc *sdesc;
+ if (ra == (unsigned long)nbif_stack_trap_ra)
+ ra = (unsigned long)p->hipe.ngra;
+ sdesc = hipe_find_sdesc(ra);
+ state->sdesc0 = sdesc;
+ return sdesc;
+#else
+ return nstack_walk_init_sdesc(p, state);
+#endif
+}
+
static inline void nstack_walk_update_trap(Process *p, const struct sdesc *sdesc0)
{
#ifdef SKIP_YOUNGEST_FRAME
diff --git a/erts/emulator/internal_doc/Tracing.md b/erts/emulator/internal_doc/Tracing.md
index 30bc5327a7..728f315263 100644
--- a/erts/emulator/internal_doc/Tracing.md
+++ b/erts/emulator/internal_doc/Tracing.md
@@ -57,7 +57,7 @@ generations of breakpoints are kept and indentified by index of 0 and
1. The global atomic variables `erts_active_bp_index` will determine
which generation of breakpoints running code will use.
-### Atomicy Without Atomic Operations
+### Atomicity Without Atomic Operations
Not using the code loading generations (or any other code duplication)
means that `trace_pattern` must at some point write to the active beam
diff --git a/erts/emulator/test/Makefile b/erts/emulator/test/Makefile
index b580211eff..2e48c475d5 100644
--- a/erts/emulator/test/Makefile
+++ b/erts/emulator/test/Makefile
@@ -70,6 +70,7 @@ MODULES= \
guard_SUITE \
hash_SUITE \
hibernate_SUITE \
+ hipe_SUITE \
list_bif_SUITE \
lttng_SUITE \
map_SUITE \
diff --git a/erts/emulator/test/distribution_SUITE.erl b/erts/emulator/test/distribution_SUITE.erl
index c6939a695d..159fbc806b 100644
--- a/erts/emulator/test/distribution_SUITE.erl
+++ b/erts/emulator/test/distribution_SUITE.erl
@@ -43,7 +43,7 @@
lost_exit/1, link_to_dead/1, link_to_dead_new_node/1,
applied_monitor_node/1, ref_port_roundtrip/1, nil_roundtrip/1,
trap_bif_1/1, trap_bif_2/1, trap_bif_3/1,
- stop_dist/1,
+ stop_dist/1,
dist_auto_connect_never/1, dist_auto_connect_once/1,
dist_parallel_send/1,
atom_roundtrip/1,
@@ -66,13 +66,13 @@
sendersender/4, sendersender2/4]).
%% epmd_module exports
--export([start_link/0, register_node/2, port_please/2]).
+-export([start_link/0, register_node/2, register_node/3, port_please/2]).
suite() ->
[{ct_hooks,[ts_install_cth]},
{timetrap, {minutes, 4}}].
-all() ->
+all() ->
[ping, {group, bulk_send}, {group, local_send},
link_to_busy, exit_to_busy, lost_exit, link_to_dead,
link_to_dead_new_node, applied_monitor_node,
@@ -83,7 +83,7 @@ all() ->
bad_dist_structure, {group, bad_dist_ext},
start_epmd_false, epmd_module].
-groups() ->
+groups() ->
[{bulk_send, [], [bulk_send_small, bulk_send_big, bulk_send_bigbig]},
{local_send, [],
[local_send_small, local_send_big, local_send_legal]},
@@ -844,59 +844,50 @@ dist_auto_connect_once(Config) when is_list(Config) ->
%% Result is sent here through relay node.
dist_auto_connect_never(Config) when is_list(Config) ->
Self = self(),
- {ok, RelayNode} =
- start_node(dist_auto_connect_relay),
- spawn(RelayNode,
+ {ok, RelayNode} = start_node(dist_auto_connect_relay),
+ spawn(RelayNode,
fun() ->
register(dist_auto_connect_relay, self()),
- dist_auto_connect_relay(Self)
+ dist_auto_connect_relay(Self)
end),
{ok, Handle} = dist_auto_connect_start(dist_auto_connect, never),
- Result =
- receive
- {do_dist_auto_connect, ok} ->
- ok;
- {do_dist_auto_connect, Error} ->
- {error, Error};
- Other ->
- {error, Other}
- after 32000 ->
- timeout
- end,
+ Result = receive
+ {do_dist_auto_connect, ok} ->
+ ok;
+ {do_dist_auto_connect, Error} ->
+ {error, Error};
+ Other ->
+ {error, Other}
+ after 32000 ->
+ timeout
+ end,
stop_node(RelayNode),
- Stopped = dist_auto_connect_stop(Handle),
- Junk =
- receive
- {do_dist_auto_connect, _} = J ->
- J
- after 0 ->
- ok
- end,
+ Stopped = dist_auto_connect_stop(Handle),
+ Junk = receive
+ {do_dist_auto_connect, _} = J -> J
+ after 0 -> ok
+ end,
{ok, ok, ok} = {Result, Stopped, Junk},
ok.
do_dist_auto_connect([never]) ->
Node = list_to_atom("dist_auto_connect_relay@" ++ hostname()),
- io:format("~p:do_dist_auto_connect([false]) Node=~p~n",
- [?MODULE, Node]),
+ io:format("~p:do_dist_auto_connect([false]) Node=~p~n", [?MODULE, Node]),
Ping = net_adm:ping(Node),
- io:format("~p:do_dist_auto_connect([false]) Ping=~p~n",
- [?MODULE, Ping]),
+ io:format("~p:do_dist_auto_connect([false]) Ping=~p~n", [?MODULE, Ping]),
Result = case Ping of
pang -> ok;
_ -> {error, Ping}
end,
- io:format("~p:do_dist_auto_connect([false]) Result=~p~n",
- [?MODULE, Result]),
+ io:format("~p:do_dist_auto_connect([false]) Result=~p~n", [?MODULE, Result]),
net_kernel:connect_node(Node),
catch {dist_auto_connect_relay, Node} ! {do_dist_auto_connect, Result};
% receive after 1000 -> ok end,
% halt();
do_dist_auto_connect(Arg) ->
- io:format("~p:do_dist_auto_connect(~p)~n",
- [?MODULE, Arg]),
+ io:format("~p:do_dist_auto_connect(~p)~n", [?MODULE, Arg]),
receive after 10000 -> ok end,
halt().
@@ -912,11 +903,11 @@ dist_auto_connect_start(Name, Value) when is_list(Name), is_atom(Value) ->
[%"xterm -e ",
atom_to_list(lib:progname()),
% " -noinput ",
- " -detached ",
+ " -detached ",
long_or_short(), " ", Name,
" -setcookie ", Cookie,
" -pa ", ModuleDir,
- " -s ", atom_to_list(?MODULE),
+ " -s ", atom_to_list(?MODULE),
" do_dist_auto_connect ", ValueStr,
" -kernel dist_auto_connect ", ValueStr]),
io:format("~p:dist_auto_connect_start() cmd: ~p~n", [?MODULE, Cmd]),
@@ -947,7 +938,7 @@ dist_auto_connect_stop(Port, Node, Pid, N) when is_integer(N) ->
end.
-dist_auto_connect_relay(Parent) ->
+dist_auto_connect_relay(Parent) ->
receive X ->
catch Parent ! X
end,
@@ -1321,7 +1312,7 @@ get_conflicting_unicode_atoms(CIX, N) ->
start_monitor(Offender,P) ->
Parent = self(),
Q = spawn(Offender,
- fun () ->
+ fun () ->
Ref = erlang:monitor(process,P),
Parent ! {self(),ref,Ref},
receive
@@ -1458,8 +1449,8 @@ bad_dist_structure(Config) when is_list(Config) ->
pong = rpc:call(Victim, net_adm, ping, [Offender]),
P ! two,
P ! check_msgs,
- receive
- {P, messages_checked} -> ok
+ receive
+ {P, messages_checked} -> ok
after 5000 ->
exit(victim_is_dead)
end,
@@ -1765,7 +1756,7 @@ send_bad_structure(Offender,Victim,Bad,WhereToPutSelf,PayLoad) ->
pong = net_adm:ping(Node),
DPrt = dport(Node),
Bad1 = case WhereToPutSelf of
- 0 ->
+ 0 ->
Bad;
N when N > 0 ->
setelement(N,Bad,self())
@@ -1779,8 +1770,8 @@ send_bad_structure(Offender,Victim,Bad,WhereToPutSelf,PayLoad) ->
port_command(DPrt, DData),
Parent ! {DData,Done}
end),
- receive
- {WhatSent,Done} ->
+ receive
+ {WhatSent,Done} ->
io:format("Offender sent ~p~n",[WhatSent]),
ok
after 5000 ->
@@ -1887,7 +1878,7 @@ dmsg_fake_hdr2() ->
1, size(A2), A2,
2, size(A3), A3].
-dmsg_ext(Term) ->
+dmsg_ext(Term) ->
<<131, Res/binary>> = term_to_binary(Term),
Res.
@@ -1934,7 +1925,9 @@ epmd_module(Config) when is_list(Config) ->
start_link() ->
ignore.
-register_node(_Name, Port) ->
+register_node(Name, Port) ->
+ register_node(Name, Port, inet_tcp).
+register_node(_Name, Port, _Driver) ->
%% Save the port number we're listening on.
application:set_env(kernel, dist_listen_port, Port),
Creation = rand:uniform(3),
@@ -1972,7 +1965,7 @@ start_node(Name, Args, Rel) when is_atom(Name), is_list(Rel) ->
[] -> [];
_ -> [{erl,[{release,Rel}]}]
end,
- test_server:start_node(Name, slave,
+ test_server:start_node(Name, slave,
[{args,
Args++" -setcookie "++Cookie++" -pa \""++Pa++"\""}
| RelArg]);
@@ -2040,17 +2033,15 @@ inet_rpc_server_loop(Sock) ->
start_relay_node(Node, Args) ->
Pa = filename:dirname(code:which(?MODULE)),
Cookie = "NOT"++atom_to_list(erlang:get_cookie()),
- {ok, LSock} = gen_tcp:listen(0, [binary, {packet, 4},
- {active, false}]),
+ {ok, LSock} = gen_tcp:listen(0, [binary, {packet, 4}, {active, false}]),
{ok, Port} = inet:port(LSock),
{ok, Host} = inet:gethostname(),
RunArg = "-run " ++ atom_to_list(?MODULE) ++ " inet_rpc_server " ++
Host ++ " " ++ integer_to_list(Port),
- {ok, NN} =
- test_server:start_node(Node, peer,
- [{args, Args ++
- " -setcookie "++Cookie++" -pa "++Pa++" "++
- RunArg}]),
+ {ok, NN} = test_server:start_node(Node, peer,
+ [{args, Args ++
+ " -setcookie "++Cookie++" -pa "++Pa++" "++
+ RunArg}]),
[N,H] = string:tokens(atom_to_list(NN),"@"),
{ok, Sock} = gen_tcp:accept(LSock),
pang = net_adm:ping(NN),
@@ -2066,7 +2057,7 @@ wait_dead(N,H,0) ->
wait_dead(N,H,X) ->
case erl_epmd:port_please(N,H) of
{port,_,_} ->
- receive
+ receive
after 1000 ->
ok
end,
diff --git a/erts/emulator/test/hipe_SUITE.erl b/erts/emulator/test/hipe_SUITE.erl
new file mode 100644
index 0000000000..3e682b8d88
--- /dev/null
+++ b/erts/emulator/test/hipe_SUITE.erl
@@ -0,0 +1,64 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2016. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(hipe_SUITE).
+-export([all/0, t_copy_literals/1]).
+
+all() ->
+ case erlang:system_info(hipe_architecture) of
+ undefined -> {skip, "HiPE is disabled"};
+ _ -> [t_copy_literals]
+ end.
+
+t_copy_literals(doc) ->
+ "Check that BEAM literals referenced from HiPE stack are copied by"
+ " check_process_code";
+t_copy_literals(Config) when is_list(Config) ->
+ %% Compile the the ref_cell and literals modules.
+ Data = proplists:get_value(data_dir, Config),
+ Priv = proplists:get_value(priv_dir, Config),
+ RefFile = filename:join(Data, "ref_cell"),
+ {ok,ref_cell} = c:c(RefFile, [{outdir,Priv},native]),
+ true = code:is_module_native(ref_cell),
+ LitFile = filename:join(Data, "literals"),
+ {ok,literals} = c:c(LitFile, [{outdir,Priv}]),
+
+ %% store references to literals on HiPE stacks
+ PA = ref_cell:start_link(),
+ ref_cell:call(PA, {put_res_of, fun literals:a/0}),
+ PB = ref_cell:start_link_deep(),
+ ref_cell:call(PB, {put_res_of, fun literals:b/0}),
+
+ %% purge the literals
+ _ = (catch erlang:purge_module(literals)),
+ true = erlang:delete_module(literals),
+ true = erlang:purge_module(literals),
+
+ %% check that the ex-literals are ok
+ [a,b,c] = ref_cell:call(PA, get),
+ {a,b,c} = ref_cell:call(PB, get),
+
+ %% cleanup
+ ref_cell:call(PA, done),
+ ref_cell:call(PB, done),
+ _ = (catch erlang:purge_module(ref_cell)),
+ true = erlang:delete_module(ref_cell),
+ true = erlang:purge_module(ref_cell),
+ ok.
diff --git a/erts/emulator/test/hipe_SUITE_data/literals.erl b/erts/emulator/test/hipe_SUITE_data/literals.erl
new file mode 100644
index 0000000000..31e443970f
--- /dev/null
+++ b/erts/emulator/test/hipe_SUITE_data/literals.erl
@@ -0,0 +1,26 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2016. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(literals).
+
+-export([a/0, b/0]).
+
+a() -> [a,b,c].
+b() -> {a,b,c}.
diff --git a/erts/emulator/test/hipe_SUITE_data/ref_cell.erl b/erts/emulator/test/hipe_SUITE_data/ref_cell.erl
new file mode 100644
index 0000000000..2654e4077b
--- /dev/null
+++ b/erts/emulator/test/hipe_SUITE_data/ref_cell.erl
@@ -0,0 +1,64 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2016. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(ref_cell).
+
+-export([start_link/0, start_link_deep/0, call/2]).
+
+-compile(native).
+
+-define(DEPTH, 100).
+-define(ALLOCS, 500).
+
+start_link() ->
+ spawn_link(fun() -> loop(undefined) end).
+
+start_link_deep() ->
+ spawn_link(fun() -> go_deep(?DEPTH) end).
+
+%% Create a stack large enough to get a graylimit trap placed next time there's
+%% a minor gc.
+go_deep(0) ->
+ alloc_some(?ALLOCS),
+ loop(undefined),
+ 0;
+go_deep(Depth) ->
+ go_deep(Depth-1)+1.
+
+%% Do some allocation to trigger a minor gc
+alloc_some(Amount) ->
+ Check = (Amount * (Amount + 1)) div 2,
+ Check = lists:sum(lists:seq(1, Amount)).
+
+call(Pid, Call) ->
+ Pid ! {Call, self()},
+ receive {Pid, Res} -> Res end.
+
+loop(Thing) ->
+ receive
+ {done, Pid} -> Pid ! {self(), done};
+ {{put_res_of, Fun}, Pid} ->
+ NewThing = Fun(),
+ Pid ! {self(), put},
+ loop(NewThing);
+ {get, Pid} ->
+ Pid ! {self(), Thing},
+ loop(Thing)
+ end.
diff --git a/erts/emulator/test/map_SUITE.erl b/erts/emulator/test/map_SUITE.erl
index b3870f0313..5af676c409 100644
--- a/erts/emulator/test/map_SUITE.erl
+++ b/erts/emulator/test/map_SUITE.erl
@@ -77,6 +77,7 @@
t_ets/1,
t_dets/1,
t_tracing/1,
+ t_hash_entropy/1,
%% instruction-level tests
t_has_map_fields/1,
@@ -140,6 +141,7 @@ all() -> [t_build_and_match_literals, t_build_and_match_literals_large,
t_pdict,
t_ets,
t_tracing,
+ t_hash_entropy,
%% instruction-level tests
t_has_map_fields,
@@ -3020,6 +3022,39 @@ do_badmap_17(Config) ->
id(I) -> I.
+%% OTP-13763
+t_hash_entropy(Config) when is_list(Config) ->
+ %% entropy bug in 18.3, 19.0
+ M1 = maps:from_list([{#{"id" => I}, ok}||I <- lists:seq(1,50000)]),
+
+ #{ #{"id" => 100} := ok,
+ #{"id" => 200} := ok,
+ #{"id" => 300} := ok,
+ #{"id" => 400} := ok,
+ #{"id" => 500} := ok,
+ #{"id" => 600} := ok,
+ #{"id" => 700} := ok,
+ #{"id" => 800} := ok,
+ #{"id" => 900} := ok,
+ #{"id" => 25061} := ok,
+ #{"id" => 39766} := ok } = M1,
+
+ M0 = maps:from_list([{I,ok}||I <- lists:seq(1,33)]),
+ M2 = maps:from_list([{M0#{"id" => I}, ok}||I <- lists:seq(1,50000)]),
+
+ ok = maps:get(M0#{"id" => 100}, M2),
+ ok = maps:get(M0#{"id" => 200}, M2),
+ ok = maps:get(M0#{"id" => 300}, M2),
+ ok = maps:get(M0#{"id" => 400}, M2),
+ ok = maps:get(M0#{"id" => 500}, M2),
+ ok = maps:get(M0#{"id" => 600}, M2),
+ ok = maps:get(M0#{"id" => 700}, M2),
+ ok = maps:get(M0#{"id" => 800}, M2),
+ ok = maps:get(M0#{"id" => 900}, M2),
+ ok = maps:get(M0#{"id" => 25061}, M2),
+ ok = maps:get(M0#{"id" => 39766}, M2),
+ ok.
+
%% OTP-13146
%% Provoke major GC with a lot of "fat" maps on external format in msg queue
%% causing heap fragments to be allocated.
diff --git a/erts/emulator/test/port_SUITE.erl b/erts/emulator/test/port_SUITE.erl
index 5c0bfdffd4..a9814d7df9 100644
--- a/erts/emulator/test/port_SUITE.erl
+++ b/erts/emulator/test/port_SUITE.erl
@@ -104,6 +104,7 @@
mon_port_name_demonitor/1,
mon_port_named/1,
mon_port_origin_dies/1,
+ mon_port_owner_dies/1,
mon_port_pid_demonitor/1,
mon_port_remote_on_remote/1,
mon_port_driver_die/1,
@@ -173,6 +174,7 @@ all() ->
mon_port_remote_on_remote,
mon_port_bad_remote_on_local,
mon_port_origin_dies,
+ mon_port_owner_dies,
mon_port_named,
mon_port_bad_named,
mon_port_pid_demonitor,
@@ -2637,6 +2639,29 @@ mon_port_origin_dies(Config) ->
Port5 ! {self(), {command, <<"1">>}}, % make port quit
ok.
+%% Port and Monitor owner dies before port is closed
+%% This testcase checks for a regression memory leak in erts
+%% when the controlling and monitoring process is the same process
+%% and the process dies
+mon_port_owner_dies(Config) ->
+ Self = self(),
+ Proc = spawn(fun() ->
+ Port = create_port(Config, ["-h1", "-q"]),
+ Self ! {test_started, Port},
+ erlang:monitor(port, Port),
+ receive stop -> ok end
+ end),
+ erlang:monitor(process, Proc), % we want to sync with its death
+ Port = receive {test_started,P} -> P
+ after 1000 -> ?assert(false) end,
+ ?assertMatch({proc_monitors, true, port_monitored_by, true},
+ port_is_monitored(Proc, Port)),
+ Proc ! stop,
+ %% receive from monitor
+ receive ExitP5 -> ?assertMatch({'DOWN', _, process, Proc, _}, ExitP5)
+ after 1000 -> ?assert(false) end,
+ ok.
+
%% Monitor a named port
mon_port_named(Config) ->
Name6 = test_port6,