aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam/erl_gc.c
diff options
context:
space:
mode:
authorRickard Green <[email protected]>2015-09-16 15:02:50 +0200
committerRickard Green <[email protected]>2015-11-12 15:25:48 +0100
commit3ac08f9b668613a4292436979eacc61863c2ab94 (patch)
tree8635b3377426bf1a332148b778189e9c61005c6d /erts/emulator/beam/erl_gc.c
parentec4a7e7150c47523a553cac806c86ec08ab2dae7 (diff)
downloadotp-3ac08f9b668613a4292436979eacc61863c2ab94.tar.gz
otp-3ac08f9b668613a4292436979eacc61863c2ab94.tar.bz2
otp-3ac08f9b668613a4292436979eacc61863c2ab94.zip
Fragmented young heap generation and off_heap_message_queue option
* The youngest generation of the heap can now consist of multiple blocks. Heap fragments and message fragments are added to the youngest generation when needed without triggering a GC. After a GC the youngest generation is contained in one single block. * The off_heap_message_queue process flag has been added. When enabled all message data in the queue is kept off heap. When a message is selected from the queue, the message fragment (or heap fragment) containing the actual message is attached to the youngest generation. Messages stored off heap is not part of GC.
Diffstat (limited to 'erts/emulator/beam/erl_gc.c')
-rw-r--r--erts/emulator/beam/erl_gc.c688
1 files changed, 334 insertions, 354 deletions
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 2d7b7cafa4..6a52e1a890 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -42,8 +42,6 @@
#include "dtrace-wrapper.h"
#include "erl_bif_unique.h"
-#define ERTS_CONTINUOUS_NEW_HEAP
-
#define ERTS_INACT_WR_PB_LEAVE_MUCH_LIMIT 1
#define ERTS_INACT_WR_PB_LEAVE_MUCH_PERCENTAGE 20
#define ERTS_INACT_WR_PB_LEAVE_LIMIT 10
@@ -60,58 +58,6 @@
# define ERTS_GC_ASSERT(B) ((void) 1)
#endif
-#ifdef ERTS_CONTINUOUS_NEW_HEAP
-#define ERTS_IS_NEW_HEAP_PTR__(TPtr, Ptr, NhPtr, NhSz, OhPtr, OhSz) \
- ErtsInArea((Ptr), (NhPtr), (NhSz))
-#define ERTS_IS_LITERAL_PTR__(TPtr, Ptr, NhPtr, NhSz, OhPtr, OhSz) \
- (!ErtsInArea((Ptr), (NhPtr), (NhSz)) \
- && !ErtsInArea((Ptr), (OhPtr), (OhSz)))
-
-#ifdef ERTS_GC_DEBUG
-#define ERTS_IS_NEW_HEAP_PTR(TPtr, Ptr, NhPtr, NhSz, OhPtr, OhSz) \
- (ERTS_IS_NEW_HEAP_PTR__((TPtr), (Ptr), (NhPtr), (NhSz), (OhPtr), (OhSz)) \
- ? (ERTS_GC_ASSERT(!erts_is_literal((TPtr), (Ptr)) \
- && !ErtsInArea((Ptr), (OhPtr), (OhSz))), 1) \
- : (ERTS_GC_ASSERT(erts_is_literal((TPtr), (Ptr)) \
- || ErtsInArea((Ptr), (OhPtr), (OhSz))), 0))
-#define ERTS_IS_LITERAL_PTR(TPtr, Ptr, NhPtr, NhSz, OhPtr, OhSz) \
- (ERTS_IS_LITERAL_PTR__((TPtr), (Ptr), (NhPtr), (NhSz), (OhPtr), (OhSz)) \
- ? (ERTS_GC_ASSERT(erts_is_literal((TPtr), (Ptr))), 1) \
- : (ERTS_GC_ASSERT(!erts_is_literal((TPtr), (Ptr))), 0))
-#endif
-
-#else
-
-#define ERTS_IS_NEW_HEAP_PTR__(TPtr, Ptr, NhPtr, NhSz, OhPtr, OhSz) \
- (!erts_is_literal((TPtr), (Ptr)) && !ErtsInArea((Ptr), (OhPtr), (OhSz)))
-#define ERTS_IS_LITERAL_PTR__(TPtr, Ptr, NhPtr, NhSz, OhPtr, OhSz) \
- (erts_is_literal((TPtr), (Ptr)))
-
-#ifdef ERTS_GC_DEBUG
-#define ERTS_IS_NEW_HEAP_PTR(TPtr, Ptr, NhPtr, NhSz, OhPtr, OhSz) \
- (ERTS_IS_NEW_HEAP_PTR__((TPtr), (Ptr), (NhPtr), (NhSz), (OhPtr), (OhSz)) \
- ? (ERTS_GC_ASSERT(ErtsInArea((Ptr), (NhPtr), (NhSz))), 1) \
- : (ERTS_GC_ASSERT(!ErtsInArea((Ptr), (NhPtr), (NhSz))), 0))
-#define ERTS_IS_LITERAL_PTR(TPtr, Ptr, NhPtr, NhSz, OhPtr, OhSz) \
- (ERTS_IS_LITERAL_PTR__((TPtr), (Ptr), (NhPtr), (NhSz), (OhPtr), (OhSz)) \
- ? (ERTS_GC_ASSERT(!ErtsInArea((Ptr), (NhPtr), (NhSz)) \
- && !ErtsInArea((Ptr), (OhPtr), (OhSz))), 1) \
- : (ERTS_GC_ASSERT(ErtsInArea((Ptr), (NhPtr), (NhSz)) \
- || ErtsInArea((Ptr), (OhPtr), (OhSz))), 0))
-#endif
-
-#endif
-
-#ifndef ERTS_IS_NEW_HEAP_PTR
-#define ERTS_IS_NEW_HEAP_PTR(TPtr, Ptr, NhPtr, NhSz, OhPtr, OhSz) \
- ERTS_IS_NEW_HEAP_PTR__((TPtr), (Ptr), (NhPtr), (NhSz), (OhPtr), (OhSz))
-#endif
-
-#ifndef ERTS_IS_LITERAL_PTR
-#define ERTS_IS_LITERAL_PTR(TPtr, Ptr, NhPtr, NhSz, OhPtr, OhSz) \
- ERTS_IS_LITERAL_PTR__((TPtr), (Ptr), (NhPtr), (NhSz), (OhPtr), (OhSz))
-#endif
-
/*
* Returns number of elements in an array.
*/
@@ -132,10 +78,10 @@
#define ErtsGcQuickSanityCheck(P) \
do { \
ASSERT((P)->heap < (P)->hend); \
- ASSERT((P)->heap_sz == (P)->hend - (P)->heap); \
+ ASSERT((p)->abandoned_heap || (P)->heap_sz == (P)->hend - (P)->heap); \
ASSERT((P)->heap <= (P)->htop && (P)->htop <= (P)->hend); \
ASSERT((P)->heap <= (P)->stop && (P)->stop <= (P)->hend); \
- ASSERT((P)->heap <= (P)->high_water && (P)->high_water <= (P)->hend);\
+ ASSERT((p)->abandoned_heap || ((P)->heap <= (P)->high_water && (P)->high_water <= (P)->hend)); \
OverRunCheck((P)); \
} while (0)
#else
@@ -163,31 +109,32 @@ typedef struct {
static Uint setup_rootset(Process*, Eterm*, int, Rootset*);
static void cleanup_rootset(Rootset *rootset);
-static Uint combined_message_size(Process* p);
static void remove_message_buffers(Process* p);
static Eterm *full_sweep_heaps(Process *p,
int hibernate,
Eterm *n_heap, Eterm* n_htop,
- char *h, Uint h_size,
char *oh, Uint oh_size,
Eterm *objv, int nobj);
-static int major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl);
-static int minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl);
-static void do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj);
+static int garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
+ int need, Eterm* objv, int nobj);
+static int major_collection(Process* p, ErlHeapFragment *live_hf_end,
+ int need, Eterm* objv, int nobj, Uint *recl);
+static int minor_collection(Process* p, ErlHeapFragment *live_hf_end,
+ int need, Eterm* objv, int nobj, Uint *recl);
+static void do_minor(Process *p, ErlHeapFragment *live_hf_end,
+ char *mature, Uint mature_size,
+ Uint new_sz, Eterm* objv, int nobj);
static Eterm *sweep_new_heap(Eterm *n_hp, Eterm *n_htop,
- char* new_heap, Uint new_heap_size,
char* old_heap, Uint old_heap_size);
static Eterm *sweep_heaps(Eterm *n_hp, Eterm *n_htop,
- char* new_heap, Uint new_heap_size,
char* old_heap, Uint old_heap_size);
static Eterm* sweep_literal_area(Eterm* n_hp, Eterm* n_htop,
- char* new_heap, Uint new_heap_size,
- char* old_heap, Uint old_heap_size,
- char* src, Uint src_size);
+ char* old_heap, Uint old_heap_size,
+ char* src, Uint src_size);
static Eterm* sweep_literals_to_old_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop,
char* src, Uint src_size);
-static Eterm* collect_heap_frags(Process* p, Eterm* heap,
- Eterm* htop, Eterm* objv, int nobj);
+static Eterm* collect_live_heap_frags(Process* p, ErlHeapFragment *live_hf_end,
+ Eterm* heap, Eterm* htop, Eterm* objv, int nobj);
static void adjust_after_fullsweep(Process *p, int need, Eterm *objv, int nobj);
static void shrink_new_heap(Process *p, Uint new_sz, Eterm *objv, int nobj);
static void grow_new_heap(Process *p, Uint new_sz, Eterm* objv, int nobj);
@@ -204,7 +151,6 @@ static void init_gc_info(ErtsGCInfo *gcip);
#ifdef HARDDEBUG
static void disallow_heap_frag_ref_in_heap(Process* p);
static void disallow_heap_frag_ref_in_old_heap(Process* p);
-static void disallow_heap_frag_ref(Process* p, Eterm* n_htop, Eterm* objv, int nobj);
#endif
#if defined(ARCH_64)
@@ -411,10 +357,19 @@ erts_offset_off_heap(ErlOffHeap *ohp, Sint offs, Eterm* low, Eterm* high)
#undef ptr_within
Eterm
-erts_gc_after_bif_call(Process* p, Eterm result, Eterm* regs, Uint arity)
+erts_gc_after_bif_call_lhf(Process* p, ErlHeapFragment *live_hf_end,
+ Eterm result, Eterm* regs, Uint arity)
{
int cost;
+ if (p->flags & F_HIBERNATE_SCHED) {
+ /*
+ * We just hibernated. We do *not* want to mess
+ * up the hibernation by an ordinary GC...
+ */
+ return result;
+ }
+
if (is_non_value(result)) {
if (p->freason == TRAP) {
#if HIPE
@@ -422,21 +377,28 @@ erts_gc_after_bif_call(Process* p, Eterm result, Eterm* regs, Uint arity)
regs = ERTS_PROC_GET_SCHDATA(p)->x_reg_array;
}
#endif
- cost = erts_garbage_collect(p, 0, regs, p->arity);
+ cost = garbage_collect(p, live_hf_end, 0, regs, p->arity);
} else {
- cost = erts_garbage_collect(p, 0, regs, arity);
+ cost = garbage_collect(p, live_hf_end, 0, regs, arity);
}
} else {
Eterm val[1];
val[0] = result;
- cost = erts_garbage_collect(p, 0, val, 1);
+ cost = garbage_collect(p, live_hf_end, 0, val, 1);
result = val[0];
}
BUMP_REDS(p, cost);
return result;
}
+Eterm
+erts_gc_after_bif_call(Process* p, Eterm result, Eterm* regs, Uint arity)
+{
+ return erts_gc_after_bif_call_lhf(p, ERTS_INVALID_HFRAG_PTR,
+ result, regs, arity);
+}
+
static ERTS_INLINE void reset_active_writer(Process *p)
{
struct erl_off_heap_header* ptr;
@@ -450,6 +412,117 @@ static ERTS_INLINE void reset_active_writer(Process *p)
}
}
+#define ERTS_DELAY_GC_EXTRA_FREE 40
+
+static int
+delay_garbage_collection(Process *p, ErlHeapFragment *live_hf_end, int need)
+{
+ ErlHeapFragment *hfrag;
+ Eterm *orig_heap, *orig_hend, *orig_htop, *orig_stop;
+ Eterm *stop, *hend;
+ Uint hsz, ssz;
+
+ ERTS_HOLE_CHECK(p);
+
+ if (p->live_hf_end == ERTS_INVALID_HFRAG_PTR)
+ p->live_hf_end = live_hf_end;
+
+ if (need == 0)
+ return 1;
+
+ /*
+ * Satisfy need in a heap fragment...
+ */
+ ASSERT(need > 0);
+
+ orig_heap = p->heap;
+ orig_hend = p->hend;
+ orig_htop = p->htop;
+ orig_stop = p->stop;
+
+ ssz = orig_hend - orig_stop;
+ hsz = ssz + need + ERTS_DELAY_GC_EXTRA_FREE;
+
+ hfrag = new_message_buffer(hsz);
+ hfrag->next = p->mbuf;
+ p->mbuf = hfrag;
+ p->mbuf_sz += hsz;
+ p->heap = p->htop = &hfrag->mem[0];
+ p->hend = hend = &hfrag->mem[hsz];
+ p->stop = stop = hend - ssz;
+ sys_memcpy((void *) stop, (void *) orig_stop, ssz * sizeof(Eterm));
+
+ if (p->abandoned_heap) {
+ /* Active heap already in a fragment; adjust it... */
+ ErlHeapFragment *hfrag = ((ErlHeapFragment *)
+ (((char *) orig_heap)
+ - offsetof(ErlHeapFragment, mem)));
+ Uint unused = orig_hend - orig_htop;
+ ASSERT(hfrag->used_size == hfrag->alloc_size);
+ ASSERT(hfrag->used_size >= unused);
+ hfrag->used_size -= unused;
+ p->mbuf_sz -= unused;
+ }
+ else {
+ /* Do not leave a hole in the abandoned heap... */
+ if (orig_htop < orig_hend) {
+ *orig_htop = make_pos_bignum_header(orig_hend-orig_htop-1);
+ if (orig_htop + 1 < orig_hend) {
+ orig_hend[-1] = (Uint) (orig_htop - orig_heap);
+ p->flags |= F_ABANDONED_HEAP_USE;
+ }
+ }
+ p->abandoned_heap = orig_heap;
+ }
+
+#ifdef CHECK_FOR_HOLES
+ p->last_htop = p->htop;
+ p->heap_hfrag = hfrag;
+#endif
+
+ /* Make sure that we do a proper GC as soon as possible... */
+ p->flags |= F_FORCE_GC;
+ return CONTEXT_REDS;
+}
+
+static ERTS_FORCE_INLINE Uint
+young_gen_usage(Process *p)
+{
+ Uint hsz;
+ Eterm *aheap;
+
+ hsz = p->mbuf_sz;
+ aheap = p->abandoned_heap;
+ if (!aheap)
+ hsz += p->htop - p->heap;
+ else {
+ /* used in orig heap */
+ if (p->flags & F_ABANDONED_HEAP_USE)
+ hsz += aheap[p->heap_sz-1];
+ else
+ hsz += p->heap_sz;
+ /* Remove unused part in latest fragment */
+ hsz -= p->hend - p->htop;
+ }
+ return hsz;
+}
+
+#define ERTS_GET_ORIG_HEAP(Proc, Heap, HTop) \
+ do { \
+ Eterm *aheap__ = (Proc)->abandoned_heap; \
+ if (!aheap__) { \
+ (Heap) = (Proc)->heap; \
+ (HTop) = (Proc)->htop; \
+ } \
+ else { \
+ (Heap) = aheap__; \
+ if ((Proc)->flags & F_ABANDONED_HEAP_USE) \
+ (HTop) = aheap__ + aheap__[(Proc)->heap_sz-1]; \
+ else \
+ (HTop) = aheap__ + (Proc)->heap_sz; \
+ } \
+ } while (0)
+
/*
* Garbage collect a process.
*
@@ -458,8 +531,9 @@ static ERTS_INLINE void reset_active_writer(Process *p)
* objv: Array of terms to add to rootset; that is to preserve.
* nobj: Number of objects in objv.
*/
-int
-erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
+static int
+garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
+ int need, Eterm* objv, int nobj)
{
Uint reclaimed_now = 0;
int done = 0;
@@ -469,10 +543,11 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
DTRACE_CHARBUF(pidbuf, DTRACE_TERM_BUF_SIZE);
#endif
- if (p->flags & F_DISABLE_GC) {
- ASSERT(need == 0);
- return 1;
- }
+ if (p->flags & F_DISABLE_GC)
+ return delay_garbage_collection(p, live_hf_end, need);
+
+ if (p->live_hf_end != ERTS_INVALID_HFRAG_PTR)
+ live_hf_end = p->live_hf_end;
esdp = erts_get_scheduler_data();
@@ -480,7 +555,7 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
trace_gc(p, am_gc_start);
}
- (void) erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
+ erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
if (erts_system_monitor_long_gc != 0)
start_time = erts_get_monotonic_time(esdp);
@@ -505,11 +580,11 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
while (!done) {
if ((FLAGS(p) & F_NEED_FULLSWEEP) != 0) {
DTRACE2(gc_major_start, pidbuf, need);
- done = major_collection(p, need, objv, nobj, &reclaimed_now);
+ done = major_collection(p, live_hf_end, need, objv, nobj, &reclaimed_now);
DTRACE2(gc_major_end, pidbuf, reclaimed_now);
} else {
DTRACE2(gc_minor_start, pidbuf, need);
- done = minor_collection(p, need, objv, nobj, &reclaimed_now);
+ done = minor_collection(p, live_hf_end, need, objv, nobj, &reclaimed_now);
DTRACE2(gc_minor_end, pidbuf, reclaimed_now);
}
}
@@ -551,6 +626,7 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
esdp->gc_info.reclaimed += reclaimed_now;
FLAGS(p) &= ~F_FORCE_GC;
+ p->live_hf_end = ERTS_INVALID_HFRAG_PTR;
#ifdef CHECK_FOR_HOLES
/*
@@ -583,6 +659,12 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
}
}
+int
+erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
+{
+ return garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj);
+}
+
/*
* Place all living data on a the new heap; deallocate any old heap.
* Meant to be used by hibernate/3.
@@ -606,16 +688,16 @@ erts_garbage_collect_hibernate(Process* p)
*/
erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
ErtsGcQuickSanityCheck(p);
- ASSERT(p->mbuf_sz == 0);
- ASSERT(p->mbuf == 0);
+ ASSERT(p->mbuf == NULL);
ASSERT(p->stop == p->hend); /* Stack must be empty. */
+ ASSERT(!p->abandoned_heap);
/*
* Do it.
*/
- heap_size = p->heap_sz + (p->old_htop - p->old_heap);
+ heap_size = p->heap_sz + (p->old_htop - p->old_heap) + p->mbuf_sz;
heap = (Eterm*) ERTS_HEAP_ALLOC(ERTS_ALC_T_TMP_HEAP,
sizeof(Eterm)*heap_size);
htop = heap;
@@ -624,8 +706,6 @@ erts_garbage_collect_hibernate(Process* p)
1,
heap,
htop,
- (char *) p->heap,
- (char *) p->htop - (char *) p->heap,
(char *) p->old_heap,
(char *) p->old_htop - (char *) p->old_heap,
p->arg_reg,
@@ -644,6 +724,7 @@ erts_garbage_collect_hibernate(Process* p)
}
FLAGS(p) &= ~F_FORCE_GC;
+ p->live_hf_end = ERTS_INVALID_HFRAG_PTR;
/*
* Move the heap to its final destination.
@@ -663,6 +744,8 @@ erts_garbage_collect_hibernate(Process* p)
sys_memcpy((void *) heap, (void *) p->heap, actual_size*sizeof(Eterm));
ERTS_HEAP_FREE(ERTS_ALC_T_TMP_HEAP, p->heap, p->heap_sz*sizeof(Eterm));
+ remove_message_buffers(p);
+
p->stop = p->hend = heap + heap_size;
offs = heap - p->heap;
@@ -808,7 +891,6 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
old_htop = sweep_literals_to_old_heap(p->heap, p->htop, old_htop, area, area_size);
old_htop = sweep_literal_area(p->old_heap, old_htop,
- (char *) p->heap, sizeof(Eterm)*p->heap_sz,
(char *) p->old_heap, sizeof(Eterm)*old_heap_size,
area, area_size);
ASSERT(p->old_htop <= old_htop && old_htop <= p->old_hend);
@@ -869,15 +951,18 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
}
static int
-minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
+minor_collection(Process* p, ErlHeapFragment *live_hf_end,
+ int need, Eterm* objv, int nobj, Uint *recl)
{
- Uint mature = HIGH_WATER(p) - HEAP_START(p);
+ Eterm *mature = p->abandoned_heap ? p->abandoned_heap : p->heap;
+ Uint mature_size = p->high_water - mature;
+ Uint size_before = young_gen_usage(p);
/*
* Allocate an old heap if we don't have one and if we'll need one.
*/
- if (OLD_HEAP(p) == NULL && mature != 0) {
+ if (OLD_HEAP(p) == NULL && mature_size != 0) {
Eterm* n_old;
/* Note: We choose a larger heap size than strictly needed,
@@ -885,7 +970,7 @@ minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
* This improved Estone by more than 1200 estones on my computer
* (Ultra Sparc 10).
*/
- Uint new_sz = erts_next_heap_size(HEAP_TOP(p) - HEAP_START(p), 1);
+ Uint new_sz = erts_next_heap_size(size_before, 1);
/* Create new, empty old_heap */
n_old = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_OLD_HEAP,
@@ -901,41 +986,25 @@ minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
*/
if (OLD_HEAP(p) &&
- ((mature <= OLD_HEND(p) - OLD_HTOP(p)) &&
- ((BIN_VHEAP_MATURE(p) < ( BIN_OLD_VHEAP_SZ(p) - BIN_OLD_VHEAP(p)))) &&
- ((BIN_OLD_VHEAP_SZ(p) > BIN_OLD_VHEAP(p))) ) ) {
- ErlMessage *msgp;
- Uint size_after;
- Uint need_after;
- const Uint stack_size = STACK_SZ_ON_HEAP(p);
- const Uint size_before = MBUF_SIZE(p) + (HEAP_TOP(p) - HEAP_START(p));
- Uint new_sz = HEAP_SIZE(p) + MBUF_SIZE(p) + combined_message_size(p);
+ ((mature_size <= OLD_HEND(p) - OLD_HTOP(p)) &&
+ ((BIN_VHEAP_MATURE(p) < ( BIN_OLD_VHEAP_SZ(p) - BIN_OLD_VHEAP(p)))) &&
+ ((BIN_OLD_VHEAP_SZ(p) > BIN_OLD_VHEAP(p))) ) ) {
+ Uint stack_size, size_after, need_after, new_sz;
+
+ stack_size = p->hend - p->stop;
+ new_sz = stack_size + size_before;
new_sz = next_heap_size(p, new_sz, 0);
- do_minor(p, new_sz, objv, nobj);
+ do_minor(p, live_hf_end, (char *) mature, mature_size*sizeof(Eterm),
+ new_sz, objv, nobj);
size_after = HEAP_TOP(p) - HEAP_START(p);
*recl += (size_before - size_after);
- /*
- * Copy newly received message onto the end of the new heap.
- */
- ErtsGcQuickSanityCheck(p);
- for (msgp = p->msg.first; msgp; msgp = msgp->next) {
- if (msgp->data.attached) {
- ErtsHeapFactory factory;
- erts_factory_proc_prealloc_init(&factory, p,
- erts_msg_attached_data_size(msgp));
- erts_move_msg_attached_data_to_heap(&factory, msgp);
- erts_factory_close(&factory);
- ErtsGcQuickSanityCheck(p);
- }
- }
ErtsGcQuickSanityCheck(p);
GEN_GCS(p)++;
need_after = ((HEAP_TOP(p) - HEAP_START(p))
- + erts_used_frag_sz(MBUF(p))
+ need
+ stack_size);
@@ -984,10 +1053,13 @@ minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
ASSERT(MBUF(p) == NULL);
return 1;
}
+
+ grow_new_heap(p, next_heap_size(p, need_after, 0), objv, nobj);
+ return 1;
}
/*
- * Still not enough room after minor collection. Must force a major collection.
+ * Not enough room for a minor collection. Must force a major collection.
*/
FLAGS(p) |= F_NEED_FULLSWEEP;
return 0;
@@ -1044,7 +1116,9 @@ static ERTS_INLINE void offset_nstack(Process* p, Sint offs,
#endif /* HIPE */
static void
-do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
+do_minor(Process *p, ErlHeapFragment *live_hf_end,
+ char *mature, Uint mature_size,
+ Uint new_sz, Eterm* objv, int nobj)
{
Rootset rootset; /* Rootset for GC (stack, dictionary, etc). */
Roots* roots;
@@ -1053,9 +1127,6 @@ do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
Eterm* ptr;
Eterm val;
Eterm gval;
- char* heap = (char *) HEAP_START(p);
- Uint heap_size = (char *) HEAP_TOP(p) - heap;
- Uint mature_size = (char *) HIGH_WATER(p) - heap;
Eterm* old_htop = OLD_HTOP(p);
Eterm* n_heap;
char* oh = (char *) OLD_HEAP(p);
@@ -1064,8 +1135,13 @@ do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
n_htop = n_heap = (Eterm*) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP,
sizeof(Eterm)*new_sz);
- if (MBUF(p) != NULL) {
- n_htop = collect_heap_frags(p, n_heap, n_htop, objv, nobj);
+ if (live_hf_end != ERTS_INVALID_HFRAG_PTR) {
+ /*
+ * Move heap frags that we know are completely live
+ * directly into the new young heap generation.
+ */
+ n_htop = collect_live_heap_frags(p, live_hf_end, n_heap, n_htop,
+ objv, nobj);
}
n = setup_rootset(p, objv, nobj, &rootset);
@@ -1088,11 +1164,9 @@ do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*g_ptr++ = val;
- } else if (ErtsInArea(ptr, heap, mature_size)) {
+ } else if (ErtsInArea(ptr, mature, mature_size)) {
MOVE_BOXED(ptr,val,old_htop,g_ptr++);
- } else if (ERTS_IS_NEW_HEAP_PTR(gval, ptr,
- heap, heap_size,
- oh, oh_size)) {
+ } else if (ErtsInYoungGen(gval, ptr, oh, oh_size)) {
MOVE_BOXED(ptr,val,n_htop,g_ptr++);
} else {
g_ptr++;
@@ -1105,11 +1179,9 @@ do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
val = *ptr;
if (IS_MOVED_CONS(val)) { /* Moved */
*g_ptr++ = ptr[1];
- } else if (ErtsInArea(ptr, heap, mature_size)) {
+ } else if (ErtsInArea(ptr, mature, mature_size)) {
MOVE_CONS(ptr,val,old_htop,g_ptr++);
- } else if (ERTS_IS_NEW_HEAP_PTR(gval, ptr,
- heap, heap_size,
- oh, oh_size)) {
+ } else if (ErtsInYoungGen(gval, ptr, oh, oh_size)) {
MOVE_CONS(ptr,val,n_htop,g_ptr++);
} else {
g_ptr++;
@@ -1134,8 +1206,7 @@ do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
*/
if (mature_size == 0) {
- n_htop = sweep_new_heap(n_heap, n_htop, heap, heap_size,
- oh, oh_size);
+ n_htop = sweep_new_heap(n_heap, n_htop, oh, oh_size);
} else {
Eterm* n_hp = n_heap;
Eterm* ptr;
@@ -1152,11 +1223,9 @@ do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*n_hp++ = val;
- } else if (ErtsInArea(ptr, heap, mature_size)) {
+ } else if (ErtsInArea(ptr, mature, mature_size)) {
MOVE_BOXED(ptr,val,old_htop,n_hp++);
- } else if (ERTS_IS_NEW_HEAP_PTR(gval, ptr,
- heap, heap_size,
- oh, oh_size)) {
+ } else if (ErtsInYoungGen(gval, ptr, oh, oh_size)) {
MOVE_BOXED(ptr,val,n_htop,n_hp++);
} else {
n_hp++;
@@ -1168,11 +1237,9 @@ do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
val = *ptr;
if (IS_MOVED_CONS(val)) {
*n_hp++ = ptr[1];
- } else if (ErtsInArea(ptr, heap, mature_size)) {
+ } else if (ErtsInArea(ptr, mature, mature_size)) {
MOVE_CONS(ptr,val,old_htop,n_hp++);
- } else if (ERTS_IS_NEW_HEAP_PTR(gval, ptr,
- heap, heap_size,
- oh, oh_size)) {
+ } else if (ErtsInYoungGen(gval, ptr, oh, oh_size)) {
MOVE_CONS(ptr,val,n_htop,n_hp++);
} else {
n_hp++;
@@ -1192,12 +1259,10 @@ do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
if (IS_MOVED_BOXED(val)) {
*origptr = val;
mb->base = binary_bytes(val);
- } else if (ErtsInArea(ptr, heap, mature_size)) {
+ } else if (ErtsInArea(ptr, mature, mature_size)) {
MOVE_BOXED(ptr,val,old_htop,origptr);
mb->base = binary_bytes(mb->orig);
- } else if (ERTS_IS_NEW_HEAP_PTR(*origptr, ptr,
- heap, heap_size,
- oh, oh_size)) {
+ } else if (ErtsInYoungGen(*origptr, ptr, oh, oh_size)) {
MOVE_BOXED(ptr,val,n_htop,origptr);
mb->base = binary_bytes(mb->orig);
}
@@ -1218,11 +1283,8 @@ do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
* may point to the old (soon to be deleted) new_heap.
*/
- if (OLD_HTOP(p) < old_htop) {
- old_htop = sweep_new_heap(OLD_HTOP(p), old_htop,
- heap, heap_size,
- oh, oh_size);
- }
+ if (OLD_HTOP(p) < old_htop)
+ old_htop = sweep_new_heap(OLD_HTOP(p), old_htop, oh, oh_size);
OLD_HTOP(p) = old_htop;
HIGH_WATER(p) = n_htop;
@@ -1254,8 +1316,12 @@ do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
#endif
ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
- (void*)HEAP_START(p),
+ (p->abandoned_heap
+ ? p->abandoned_heap
+ : HEAP_START(p)),
HEAP_SIZE(p) * sizeof(Eterm));
+ p->abandoned_heap = NULL;
+ p->flags &= ~F_ABANDONED_HEAP_USE;
HEAP_START(p) = n_heap;
HEAP_TOP(p) = n_htop;
HEAP_SIZE(p) = new_sz;
@@ -1272,15 +1338,12 @@ do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
*/
static int
-major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
+major_collection(Process* p, ErlHeapFragment *live_hf_end,
+ int need, Eterm* objv, int nobj, Uint *recl)
{
- const Uint size_before = ((HEAP_TOP(p) - HEAP_START(p))
- + (OLD_HTOP(p) - OLD_HEAP(p))
- + MBUF_SIZE(p));
+ Uint size_before, stack_size;
Eterm* n_heap;
Eterm* n_htop;
- char* src = (char *) HEAP_START(p);
- Uint src_size = (char *) HEAP_TOP(p) - src;
char* oh = (char *) OLD_HEAP(p);
Uint oh_size = (char *) OLD_HTOP(p) - oh;
Uint new_sz, stk_sz;
@@ -1290,9 +1353,11 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
* to receive all live data.
*/
- new_sz = (HEAP_SIZE(p) + MBUF_SIZE(p)
- + combined_message_size(p)
- + (OLD_HTOP(p) - OLD_HEAP(p)));
+ size_before = young_gen_usage(p);
+ size_before += p->old_htop - p->old_heap;
+ stack_size = p->hend - p->stop;
+
+ new_sz = stack_size + size_before;
new_sz = next_heap_size(p, new_sz, 0);
/*
@@ -1306,16 +1371,16 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
n_htop = n_heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP,
sizeof(Eterm)*new_sz);
- /*
- * Get rid of heap fragments.
- */
-
- if (MBUF(p) != NULL) {
- n_htop = collect_heap_frags(p, n_heap, n_htop, objv, nobj);
+ if (live_hf_end != ERTS_INVALID_HFRAG_PTR) {
+ /*
+ * Move heap frags that we know are completely live
+ * directly into the heap.
+ */
+ n_htop = collect_live_heap_frags(p, live_hf_end, n_heap, n_htop,
+ objv, nobj);
}
- n_htop = full_sweep_heaps(p, 0, n_heap, n_htop, src, src_size,
- oh, oh_size, objv, nobj);
+ n_htop = full_sweep_heaps(p, 0, n_heap, n_htop, oh, oh_size, objv, nobj);
/* Move the stack to the end of the heap */
stk_sz = HEAP_END(p) - p->stop;
@@ -1332,8 +1397,12 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
#endif
ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
- (void *) HEAP_START(p),
+ (p->abandoned_heap
+ ? p->abandoned_heap
+ : HEAP_START(p)),
(HEAP_END(p) - HEAP_START(p)) * sizeof(Eterm));
+ p->abandoned_heap = NULL;
+ p->flags &= ~F_ABANDONED_HEAP_USE;
HEAP_START(p) = n_heap;
HEAP_TOP(p) = n_htop;
HEAP_SIZE(p) = new_sz;
@@ -1346,24 +1415,6 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
*recl += size_before - (HEAP_TOP(p) - HEAP_START(p));
- {
- ErlMessage *msgp;
-
- /*
- * Copy newly received message onto the end of the new heap.
- */
- for (msgp = p->msg.first; msgp; msgp = msgp->next) {
- if (msgp->data.attached) {
- ErtsHeapFactory factory;
- erts_factory_proc_prealloc_init(&factory, p,
- erts_msg_attached_data_size(msgp));
- erts_move_msg_attached_data_to_heap(&factory, msgp);
- erts_factory_close(&factory);
- ErtsGcQuickSanityCheck(p);
- }
- }
- }
-
adjust_after_fullsweep(p, need, objv, nobj);
#ifdef HARDDEBUG
@@ -1379,7 +1430,6 @@ static Eterm *
full_sweep_heaps(Process *p,
int hibernate,
Eterm *n_heap, Eterm* n_htop,
- char *h, Uint h_size,
char *oh, Uint oh_size,
Eterm *objv, int nobj)
{
@@ -1420,9 +1470,7 @@ full_sweep_heaps(Process *p,
if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*g_ptr++ = val;
- } else if (!ERTS_IS_LITERAL_PTR(gval, ptr,
- h, h_size,
- oh, oh_size)) {
+ } else if (!erts_is_literal(gval, ptr)) {
MOVE_BOXED(ptr,val,n_htop,g_ptr++);
} else {
g_ptr++;
@@ -1435,9 +1483,7 @@ full_sweep_heaps(Process *p,
val = *ptr;
if (IS_MOVED_CONS(val)) {
*g_ptr++ = ptr[1];
- } else if (!ERTS_IS_LITERAL_PTR(gval, ptr,
- h, h_size,
- oh, oh_size)) {
+ } else if (!erts_is_literal(gval, ptr)) {
MOVE_CONS(ptr,val,n_htop,g_ptr++);
} else {
g_ptr++;
@@ -1462,7 +1508,7 @@ full_sweep_heaps(Process *p,
* until all is copied.
*/
- n_htop = sweep_heaps(n_heap, n_htop, h, h_size, oh, oh_size);
+ n_htop = sweep_heaps(n_heap, n_htop, oh, oh_size);
if (MSO(p).first) {
sweep_off_heap(p, 1);
@@ -1514,23 +1560,6 @@ adjust_after_fullsweep(Process *p, int need, Eterm *objv, int nobj)
}
/*
- * Return the size of all message buffers that are NOT linked in the
- * mbuf list.
- */
-static Uint
-combined_message_size(Process* p)
-{
- Uint sz;
- ErlMessage *msgp;
-
- for (sz = 0, msgp = p->msg.first; msgp; msgp = msgp->next) {
- if (msgp->data.attached)
- sz += erts_msg_attached_data_size(msgp);
- }
- return sz;
-}
-
-/*
* Remove all message buffers.
*/
static void
@@ -1540,6 +1569,10 @@ remove_message_buffers(Process* p)
free_message_buffer(MBUF(p));
MBUF(p) = NULL;
}
+ if (p->msg_frag) {
+ erts_cleanup_messages(p->msg_frag);
+ p->msg_frag = NULL;
+ }
MBUF_SIZE(p) = 0;
}
#ifdef HARDDEBUG
@@ -1551,64 +1584,6 @@ remove_message_buffers(Process* p)
* For performance reasons, we use _unchecked_list_val(), _unchecked_boxed_val(),
* and so on to avoid a function call.
*/
-
-static void
-disallow_heap_frag_ref(Process* p, Eterm* n_htop, Eterm* objv, int nobj)
-{
- ErlHeapFragment* mbuf;
- ErlHeapFragment* qb;
- Eterm gval;
- Eterm* ptr;
- Eterm val;
-
- ASSERT(p->htop != NULL);
- mbuf = MBUF(p);
-
- while (nobj--) {
- gval = *objv;
-
- switch (primary_tag(gval)) {
-
- case TAG_PRIMARY_BOXED: {
- ptr = _unchecked_boxed_val(gval);
- val = *ptr;
- if (IS_MOVED_BOXED(val)) {
- ASSERT(is_boxed(val));
- objv++;
- } else {
- for (qb = mbuf; qb != NULL; qb = qb->next) {
- if (ErtsInArea(ptr, qb->mem, qb->alloc_size*sizeof(Eterm))) {
- abort();
- }
- }
- objv++;
- }
- break;
- }
-
- case TAG_PRIMARY_LIST: {
- ptr = _unchecked_list_val(gval);
- val = *ptr;
- if (IS_MOVED_CONS(val)) {
- objv++;
- } else {
- for (qb = mbuf; qb != NULL; qb = qb->next) {
- if (ErtsInArea(ptr, qb->mem, qb->alloc_size*sizeof(Eterm))) {
- abort();
- }
- }
- objv++;
- }
- break;
- }
-
- default: {
- objv++;
- break;
- }
- }
- }
-}
static void
disallow_heap_frag_ref_in_heap(Process* p)
@@ -1737,7 +1712,6 @@ typedef enum {
static ERTS_FORCE_INLINE Eterm *
sweep(Eterm *n_hp, Eterm *n_htop,
ErtsSweepType type,
- char *h, Uint hsz,
char *oh, Uint ohsz,
char *src, Uint src_size)
{
@@ -1749,14 +1723,10 @@ sweep(Eterm *n_hp, Eterm *n_htop,
#define ERTS_IS_IN_SWEEP_AREA(TPtr, Ptr) \
(type == ErtsSweepHeaps \
- ? !ERTS_IS_LITERAL_PTR((TPtr), (Ptr), h, hsz, oh, ohsz) \
+ ? !erts_is_literal((TPtr), (Ptr)) \
: (type == ErtsSweepNewHeap \
- ? ERTS_IS_NEW_HEAP_PTR((TPtr), (Ptr), h, hsz, oh, ohsz) \
- : (ErtsInArea((Ptr), src, src_size) \
- ? (ERTS_GC_ASSERT(erts_is_literal((TPtr), (Ptr)) \
- && !ErtsInArea((Ptr), h, hsz) \
- && !ErtsInArea((Ptr), oh, ohsz)), 1) \
- : 0)))
+ ? ErtsInYoungGen((TPtr), (Ptr), oh, ohsz) \
+ : ErtsInArea((Ptr), src, src_size)))
while (n_hp != n_htop) {
ASSERT(n_hp < n_htop);
@@ -1820,38 +1790,30 @@ sweep(Eterm *n_hp, Eterm *n_htop,
}
static Eterm *
-sweep_new_heap(Eterm *n_hp, Eterm *n_htop,
- char* new_heap, Uint new_heap_size,
- char* old_heap, Uint old_heap_size)
+sweep_new_heap(Eterm *n_hp, Eterm *n_htop, char* old_heap, Uint old_heap_size)
{
return sweep(n_hp, n_htop,
ErtsSweepNewHeap,
- new_heap, new_heap_size,
old_heap, old_heap_size,
NULL, 0);
}
static Eterm *
-sweep_heaps(Eterm *n_hp, Eterm *n_htop,
- char* new_heap, Uint new_heap_size,
- char* old_heap, Uint old_heap_size)
+sweep_heaps(Eterm *n_hp, Eterm *n_htop, char* old_heap, Uint old_heap_size)
{
return sweep(n_hp, n_htop,
ErtsSweepHeaps,
- new_heap, new_heap_size,
old_heap, old_heap_size,
NULL, 0);
}
static Eterm *
sweep_literal_area(Eterm *n_hp, Eterm *n_htop,
- char* new_heap, Uint new_heap_size,
char* old_heap, Uint old_heap_size,
char* src, Uint src_size)
{
return sweep(n_hp, n_htop,
ErtsSweepLiteralArea,
- new_heap, new_heap_size,
old_heap, old_heap_size,
src, src_size);
}
@@ -1956,32 +1918,21 @@ move_one_area(Eterm* n_htop, char* src, Uint src_size)
*/
static Eterm*
-collect_heap_frags(Process* p, Eterm* n_hstart, Eterm* n_htop,
- Eterm* objv, int nobj)
+collect_live_heap_frags(Process* p, ErlHeapFragment *live_hf_end,
+ Eterm* n_hstart, Eterm* n_htop,
+ Eterm* objv, int nobj)
{
ErlHeapFragment* qb;
char* frag_begin;
Uint frag_size;
/*
- * We don't allow references to a heap fragments from the stack, heap,
- * or process dictionary.
- */
-#ifdef HARDDEBUG
- disallow_heap_frag_ref(p, n_htop, p->stop, STACK_START(p) - p->stop);
- if (p->dictionary != NULL) {
- disallow_heap_frag_ref(p, n_htop, p->dictionary->data, p->dictionary->used);
- }
- disallow_heap_frag_ref_in_heap(p);
-#endif
-
- /*
* Move the heap fragments to the new heap. Note that no GC is done on
* the heap fragments. Any garbage will thus be moved as well and survive
* until next GC.
*/
qb = MBUF(p);
- while (qb != NULL) {
+ while (qb != live_hf_end) {
ASSERT(!qb->off_heap.first); /* process fragments use the MSO(p) list */
frag_size = qb->used_size * sizeof(Eterm);
if (frag_size != 0) {
@@ -1996,9 +1947,7 @@ collect_heap_frags(Process* p, Eterm* n_hstart, Eterm* n_htop,
static Uint
setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
{
- Uint avail;
Roots* roots;
- ErlMessage* mp;
Uint n;
n = 0;
@@ -2076,31 +2025,48 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
ASSERT(n <= rootset->size);
- mp = p->msg.first;
- avail = rootset->size - n;
- while (mp != NULL) {
- if (avail == 0) {
- Uint new_size = 2*rootset->size;
- if (roots == rootset->def) {
- roots = erts_alloc(ERTS_ALC_T_ROOTSET,
- new_size*sizeof(Roots));
- sys_memcpy(roots, rootset->def, sizeof(rootset->def));
- } else {
- roots = erts_realloc(ERTS_ALC_T_ROOTSET,
- (void *) roots,
- new_size*sizeof(Roots));
- }
+ switch (p->flags & (F_OFF_HEAP_MSGQ|F_OFF_HEAP_MSGQ_CHNG)) {
+ case F_OFF_HEAP_MSGQ|F_OFF_HEAP_MSGQ_CHNG:
+ (void) erts_move_messages_off_heap(p);
+ case F_OFF_HEAP_MSGQ:
+ break;
+ case F_OFF_HEAP_MSGQ_CHNG:
+ case 0: {
+ /*
+ * Off heap message queue disabled, i.e. we may
+ * have references from the message queue to the
+ * heap...
+ */
+ ErtsMessage *mp;
+
+ /* Ensure large enough rootset... */
+ if (n + p->msg.len > rootset->size) {
+ Uint new_size = n + p->msg.len;
+ ERTS_GC_ASSERT(roots == rootset->def);
+ roots = erts_alloc(ERTS_ALC_T_ROOTSET,
+ new_size*sizeof(Roots));
+ sys_memcpy(roots, rootset->def, n*sizeof(Roots));
rootset->size = new_size;
- avail = new_size - n;
}
- if (mp->data.attached == NULL) {
- roots[n].v = mp->m;
- roots[n].sz = 2;
- n++;
- avail--;
+
+ for (mp = p->msg.first; mp; mp = mp->next) {
+
+ if (!mp->data.attached) {
+ /*
+ * Message may refer data on heap;
+ * add it to rootset...
+ */
+ roots[n].v = mp->m;
+ roots[n].sz = ERL_MESSAGE_REF_ARRAY_SZ;
+ n++;
+ }
}
- mp = mp->next;
+ break;
+ }
}
+
+ ASSERT(rootset->size >= n);
+
rootset->roots = roots;
rootset->num_roots = n;
return n;
@@ -2569,7 +2535,7 @@ offset_off_heap(Process* p, Sint offs, char* area, Uint area_size)
static void
offset_mqueue(Process *p, Sint offs, char* area, Uint area_size)
{
- ErlMessage* mp = p->msg.first;
+ ErtsMessage* mp = p->msg.first;
while (mp != NULL) {
Eterm mesg = ERL_MESSAGE_TERM(mp);
@@ -2656,7 +2622,7 @@ reply_gc_info(void *vgcirp)
Eterm **hpp;
Uint sz, *szp;
ErlOffHeap *ohp = NULL;
- ErlHeapFragment *bp = NULL;
+ ErtsMessage *mp = NULL;
ASSERT(esdp);
@@ -2682,12 +2648,13 @@ reply_gc_info(void *vgcirp)
if (hpp)
break;
- hp = erts_alloc_message_heap(sz, &bp, &ohp, rp, &rp_locks);
+ mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
+
szp = NULL;
hpp = &hp;
}
- erts_queue_message(rp, &rp_locks, bp, msg, NIL);
+ erts_queue_message(rp, &rp_locks, mp, msg, NIL);
if (gcirp->req_sched == esdp->no)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
@@ -2739,36 +2706,49 @@ erts_gc_info_request(Process *c_p)
static int
within2(Eterm *ptr, Process *p, Eterm *real_htop)
{
- ErlHeapFragment* bp = MBUF(p);
- ErlMessage* mp = p->msg.first;
- Eterm *htop = real_htop ? real_htop : HEAP_TOP(p);
+ ErlHeapFragment* bp;
+ ErtsMessage* mp;
+ Eterm *htop, *heap;
+
+ if (p->abandoned_heap)
+ ERTS_GET_ORIG_HEAP(p, heap, htop);
+ else {
+ heap = p->heap;
+ htop = real_htop ? real_htop : HEAP_TOP(p);
+ }
if (OLD_HEAP(p) && (OLD_HEAP(p) <= ptr && ptr < OLD_HEND(p))) {
return 1;
}
- if (HEAP_START(p) <= ptr && ptr < htop) {
+ if (heap <= ptr && ptr < htop) {
return 1;
}
- while (bp != NULL) {
- if (bp->mem <= ptr && ptr < bp->mem + bp->used_size) {
- return 1;
- }
- bp = bp->next;
- }
+
+ mp = p->msg_frag;
+ bp = p->mbuf;
+
+ if (bp)
+ goto search_heap_frags;
+
while (mp) {
- if (mp->data.attached) {
- ErlHeapFragment *hfp;
- if (is_value(ERL_MESSAGE_TERM(mp)))
- hfp = mp->data.heap_frag;
- else if (is_not_nil(ERL_MESSAGE_TOKEN(mp)))
- hfp = erts_dist_ext_trailer(mp->data.dist_ext);
- else
- hfp = NULL;
- if (hfp && hfp->mem <= ptr && ptr < hfp->mem + hfp->used_size)
+
+ if (mp->data.attached == ERTS_MSG_COMBINED_HFRAG)
+ bp = &mp->hfrag;
+ else
+ bp = mp->data.heap_frag;
+
+ mp = mp->next;
+
+ search_heap_frags:
+
+ while (bp) {
+ if (bp->mem <= ptr && ptr < bp->mem + bp->used_size) {
return 1;
+ }
+ bp = bp->next;
}
- mp = mp->next;
}
+
return 0;
}
@@ -2790,11 +2770,11 @@ do { \
__FILE__, __LINE__, #EXP); \
} while (0)
+
#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_LIST
# define ERTS_OFFHEAP_VISITED_BIT ((Eterm) 1 << 31)
#endif
-
void
erts_check_off_heap2(Process *p, Eterm *htop)
{
@@ -2823,7 +2803,7 @@ erts_check_off_heap2(Process *p, Eterm *htop)
}
ERTS_CHK_OFFHEAP_ASSERT(refc >= 1);
#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_LIST
- ERTS_CHK_OFFHEAP_ASSERT(!(u.hdr->thing_word & ERTS_EXTERNAL_VISITED_BIT));
+ ERTS_CHK_OFFHEAP_ASSERT(!(u.hdr->thing_word & ERTS_OFFHEAP_VISITED_BIT));
u.hdr->thing_word |= ERTS_OFFHEAP_VISITED_BIT;
#endif
if (old) {
@@ -2836,7 +2816,7 @@ erts_check_off_heap2(Process *p, Eterm *htop)
}
}
-#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_EXTERNAL_LIST
+#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_LIST
for (u.hdr = MSO(p).first; u.hdr; u.hdr = u.hdr->next)
u.hdr->thing_word &= ~ERTS_OFFHEAP_VISITED_BIT;
#endif