aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSverker Eriksson <[email protected]>2018-02-12 13:25:02 +0100
committerSverker Eriksson <[email protected]>2018-02-12 13:25:02 +0100
commitecea4b22696dc2aaa57d9f9750fe07efb6b71cde (patch)
treec6b7f5fc93465d5a766c6056deb8aa3633b28b0a
parentd1e89f8df4be7197fdab36a3e1662183a7dfe6ae (diff)
parentd74796ecb17a68d442e846c4032a57cb2c083686 (diff)
downloadotp-ecea4b22696dc2aaa57d9f9750fe07efb6b71cde.tar.gz
otp-ecea4b22696dc2aaa57d9f9750fe07efb6b71cde.tar.bz2
otp-ecea4b22696dc2aaa57d9f9750fe07efb6b71cde.zip
Merge 'sverker/carrier-migration-improvements'
into 'sverker/maint-19/alloc-n-migration/ERIERL-88'
-rw-r--r--erts/emulator/beam/erl_alloc.c6
-rw-r--r--erts/emulator/beam/erl_alloc_util.c771
-rw-r--r--erts/emulator/beam/erl_alloc_util.h148
-rw-r--r--erts/emulator/beam/erl_ao_firstfit_alloc.c123
-rw-r--r--erts/emulator/beam/erl_ao_firstfit_alloc.h1
-rw-r--r--erts/emulator/internal_doc/CarrierMigration.md138
-rw-r--r--erts/emulator/test/alloc_SUITE.erl65
-rw-r--r--erts/emulator/test/alloc_SUITE_data/allocator_test.h5
-rw-r--r--erts/emulator/test/alloc_SUITE_data/migration.c112
-rw-r--r--erts/preloaded/ebin/erlang.beambin105140 -> 105092 bytes
-rw-r--r--erts/preloaded/src/erlang.erl5
-rw-r--r--lib/stdlib/test/ets_SUITE.erl28
12 files changed, 862 insertions, 540 deletions
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 625aa98edf..7c57d47a53 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -3823,7 +3823,9 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
case 0xf15: erts_free(ERTS_ALC_T_TEST, (void*)a1); return 0;
- case 0xf16: {
+ case 0xf16: return (UWord) erts_realloc(ERTS_ALC_T_TEST, (void*)a1, (Uint)a2);
+
+ case 0xf17: {
Uint extra_hdr_sz = UNIT_CEILING((Uint)a1);
ErtsAllocatorThrSpec_t* ts = &erts_allctr_thr_spec[ERTS_ALC_A_TEST];
Uint offset = ts->allctr[0]->mbc_header_size;
@@ -3850,7 +3852,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
*(void**)a3 = orig_destroying_mbc;
return offset;
}
- case 0xf17: {
+ case 0xf18: {
ErtsAllocatorThrSpec_t* ts = &erts_allctr_thr_spec[ERTS_ALC_A_TEST];
return ts->allctr[0]->largest_mbc_size;
}
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index 0bf95a65be..875fb680bd 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -375,8 +375,10 @@ do { \
#define ERTS_CRR_ALCTR_FLG_IN_POOL (((erts_aint_t) 1) << 0)
#define ERTS_CRR_ALCTR_FLG_BUSY (((erts_aint_t) 1) << 1)
+#define ERTS_CRR_ALCTR_FLG_HOMECOMING (((erts_aint_t) 1) << 2)
#define ERTS_CRR_ALCTR_FLG_MASK (ERTS_CRR_ALCTR_FLG_IN_POOL | \
- ERTS_CRR_ALCTR_FLG_BUSY)
+ ERTS_CRR_ALCTR_FLG_BUSY | \
+ ERTS_CRR_ALCTR_FLG_HOMECOMING)
#ifdef ERTS_SMP
#define SBC_HEADER_SIZE \
@@ -583,7 +585,7 @@ do { \
DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
} while (0)
-#define STAT_MBC_CPOOL_INSERT(AP, CRR) \
+#define STAT_MBC_ABANDON(AP, CRR) \
do { \
UWord csz__ = CARRIER_SZ((CRR)); \
if (IS_MSEG_CARRIER((CRR))) \
@@ -1143,90 +1145,25 @@ unlink_carrier(CarrierList_t *cl, Carrier_t *crr)
ASSERT(crr->next);
crr->next->prev = crr->prev;
}
-}
-
-#ifdef ERTS_SMP
-
#ifdef DEBUG
-static int is_in_list(ErtsDoubleLink_t* sentinel, ErtsDoubleLink_t* node)
-{
- ErtsDoubleLink_t* p;
-
- ASSERT(node != sentinel);
- for (p = sentinel->next; p != sentinel; p = p->next) {
- if (p == node)
- return 1;
- }
- return 0;
-}
-#endif /* DEBUG */
-
-static ERTS_INLINE void
-link_edl_after(ErtsDoubleLink_t* after_me, ErtsDoubleLink_t* node)
-{
- ErtsDoubleLink_t* before_me = after_me->next;
- ASSERT(node != after_me && node != before_me);
- node->next = before_me;
- node->prev = after_me;
- before_me->prev = node;
- after_me->next = node;
-}
-
-static ERTS_INLINE void
-link_edl_before(ErtsDoubleLink_t* before_me, ErtsDoubleLink_t* node)
-{
- ErtsDoubleLink_t* after_me = before_me->prev;
- ASSERT(node != before_me && node != after_me);
- node->next = before_me;
- node->prev = after_me;
- before_me->prev = node;
- after_me->next = node;
-}
-
-static ERTS_INLINE void
-unlink_edl(ErtsDoubleLink_t* node)
-{
- node->next->prev = node->prev;
- node->prev->next = node->next;
+ crr->next = crr;
+ crr->prev = crr;
+#endif
}
-static ERTS_INLINE void
-relink_edl_before(ErtsDoubleLink_t* before_me, ErtsDoubleLink_t* node)
-{
- if (node != before_me && node != before_me->prev) {
- unlink_edl(node);
- link_edl_before(before_me, node);
- }
-}
+#ifdef ERTS_SMP
static ERTS_INLINE int is_abandoned(Carrier_t *crr)
{
- return crr->cpool.abandoned.next != NULL;
-}
-
-static ERTS_INLINE void
-link_abandoned_carrier(ErtsDoubleLink_t* list, Carrier_t *crr)
-{
- ASSERT(!is_abandoned(crr));
-
- link_edl_after(list, &crr->cpool.abandoned);
-
- ASSERT(crr->cpool.abandoned.next != &crr->cpool.abandoned);
- ASSERT(crr->cpool.abandoned.prev != &crr->cpool.abandoned);
+ return crr->cpool.state != ERTS_MBC_IS_HOME;
}
static ERTS_INLINE void
unlink_abandoned_carrier(Carrier_t *crr)
{
- ASSERT(is_in_list(&crr->cpool.orig_allctr->cpool.pooled_list,
- &crr->cpool.abandoned) ||
- is_in_list(&crr->cpool.orig_allctr->cpool.traitor_list,
- &crr->cpool.abandoned));
-
- unlink_edl(&crr->cpool.abandoned);
-
- crr->cpool.abandoned.next = NULL;
- crr->cpool.abandoned.prev = NULL;
+ if (crr->cpool.state == ERTS_MBC_WAS_POOLED) {
+ aoff_remove_pooled_mbc(crr->cpool.orig_allctr, crr);
+ }
}
static ERTS_INLINE void
@@ -1234,24 +1171,19 @@ clear_busy_pool_carrier(Allctr_t *allctr, Carrier_t *crr)
{
if (crr) {
erts_aint_t max_size;
- erts_aint_t new_val;
+ erts_aint_t iallctr;
max_size = (erts_aint_t) allctr->largest_fblk_in_mbc(allctr, crr);
erts_atomic_set_nob(&crr->cpool.max_size, max_size);
- new_val = (((erts_aint_t) allctr)|ERTS_CRR_ALCTR_FLG_IN_POOL);
-
-#ifdef ERTS_ALC_CPOOL_DEBUG
- {
- erts_aint_t old_val = new_val|ERTS_CRR_ALCTR_FLG_BUSY;
+ iallctr = erts_smp_atomic_read_nob(&crr->allctr);
+ ERTS_ALC_CPOOL_ASSERT((iallctr & ~ERTS_CRR_ALCTR_FLG_HOMECOMING)
+ == ((erts_aint_t)allctr |
+ ERTS_CRR_ALCTR_FLG_IN_POOL |
+ ERTS_CRR_ALCTR_FLG_BUSY));
- ERTS_ALC_CPOOL_ASSERT(old_val
- == erts_smp_atomic_xchg_relb(&crr->allctr,
- new_val));
- }
-#else
- erts_smp_atomic_set_relb(&crr->allctr, new_val);
-#endif
+ iallctr &= ~ERTS_CRR_ALCTR_FLG_BUSY;
+ erts_smp_atomic_set_relb(&crr->allctr, iallctr);
}
}
@@ -1667,6 +1599,11 @@ dealloc_mbc(Allctr_t *allctr, Carrier_t *crr)
#ifdef ERTS_SMP
+static void set_new_allctr_abandon_limit(Allctr_t*);
+static void abandon_carrier(Allctr_t*, Carrier_t*);
+static void poolify_my_carrier(Allctr_t*, Carrier_t*);
+static void enqueue_homecoming(Allctr_t*, Carrier_t*);
+
static ERTS_INLINE Allctr_t*
get_pref_allctr(void *extra)
{
@@ -1733,9 +1670,23 @@ get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep,
erts_aint_t act;
ERTS_ALC_CPOOL_ASSERT(!(iallctr & ERTS_CRR_ALCTR_FLG_BUSY));
- act = erts_smp_atomic_cmpxchg_ddrb(&crr->allctr,
- iallctr|ERTS_CRR_ALCTR_FLG_BUSY,
- iallctr);
+ if (iallctr & ERTS_CRR_ALCTR_FLG_HOMECOMING) {
+ /*
+ * This carrier has just been given back to us by writing
+ * to crr->allctr with a write barrier (see abandon_carrier).
+ *
+ * We need a mathing read barrier to guarantee a correct view
+ * of the carrier for deallocation work.
+ */
+ act = erts_smp_atomic_cmpxchg_rb(&crr->allctr,
+ iallctr|ERTS_CRR_ALCTR_FLG_BUSY,
+ iallctr);
+ }
+ else {
+ act = erts_smp_atomic_cmpxchg_ddrb(&crr->allctr,
+ iallctr|ERTS_CRR_ALCTR_FLG_BUSY,
+ iallctr);
+ }
if (act == iallctr) {
*busy_pcrr_pp = crr;
break;
@@ -1751,13 +1702,6 @@ get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep,
erts_mtx_unlock(&pref_allctr->mutex);
}
}
-
- ERTS_ALC_CPOOL_ASSERT(
- (((iallctr & ~ERTS_CRR_ALCTR_FLG_MASK) == (erts_aint_t) pref_allctr)
- ? (((iallctr & ERTS_CRR_ALCTR_FLG_MASK) == ERTS_CRR_ALCTR_FLG_IN_POOL)
- || ((iallctr & ERTS_CRR_ALCTR_FLG_MASK) == 0))
- : 1));
-
return used_allctr;
}
}
@@ -2009,9 +1953,9 @@ handle_delayed_fix_dealloc(Allctr_t *allctr, void *ptr)
/* Carrier migrated; need to redirect block to new owner... */
int cinit = used_allctr->dd.ix - allctr->dd.ix;
- ERTS_ALC_CPOOL_ASSERT(!busy_pcrr_p);
+ ERTS_ALC_CPOOL_ASSERT(!busy_pcrr_p);
- DEC_CC(allctr->calls.this_free);
+ DEC_CC(allctr->calls.this_free);
((ErtsAllctrFixDDBlock_t *) ptr)->fix_type = type;
if (ddq_enqueue(&used_allctr->dd.q, ptr, cinit))
@@ -2020,8 +1964,9 @@ handle_delayed_fix_dealloc(Allctr_t *allctr, void *ptr)
}
}
-static void
-schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr);
+static void schedule_dealloc_carrier(Allctr_t*, Carrier_t*);
+static void dealloc_my_carrier(Allctr_t*, Carrier_t*);
+
static ERTS_INLINE int
handle_delayed_dealloc(Allctr_t *allctr,
@@ -2083,39 +2028,61 @@ handle_delayed_dealloc(Allctr_t *allctr,
res = 1;
blk = UMEM2BLK(ptr);
- if (IS_FREE_LAST_MBC_BLK(blk)) {
+ if (blk->bhdr == HOMECOMING_MBC_BLK_HDR) {
/*
* A multiblock carrier that previously has been migrated away
- * from us and now is back to be deallocated. For more info
- * see schedule_dealloc_carrier().
- *
- * Note that we cannot use FBLK_TO_MBC(blk) since it
- * data has been overwritten by the queue.
+ * from us, was sent back to us either because
+ * - it became empty and we need to deallocated it, or
+ * - it was inserted into the pool and we need to update our pooled_tree
*/
- Carrier_t *crr = FIRST_BLK_TO_MBC(allctr, blk);
-
- /* Restore word overwritten by the dd-queue as it will be read
- * if this carrier is pulled from dc_list by cpool_fetch()
- */
- ERTS_ALC_CPOOL_ASSERT(FBLK_TO_MBC(blk) != crr);
- ERTS_CT_ASSERT(sizeof(ErtsAllctrDDBlock_t) == sizeof(void*));
-#ifdef MBC_ABLK_OFFSET_BITS
- blk->u.carrier = crr;
-#else
- blk->carrier = crr;
-#endif
+ Carrier_t *crr = ErtsContainerStruct(blk, Carrier_t,
+ cpool.homecoming_dd.blk);
+ Block_t* first_blk = MBC_TO_FIRST_BLK(allctr, crr);
+ erts_aint_t iallctr;
ERTS_ALC_CPOOL_ASSERT(ERTS_ALC_IS_CPOOL_ENABLED(allctr));
ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr);
- ERTS_ALC_CPOOL_ASSERT(((erts_aint_t) allctr)
- != (erts_smp_atomic_read_nob(&crr->allctr)
- & ~ERTS_CRR_ALCTR_FLG_MASK));
-
- erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr));
- schedule_dealloc_carrier(allctr, crr);
+ iallctr = erts_smp_atomic_read_nob(&crr->allctr);
+ ASSERT(iallctr & ERTS_CRR_ALCTR_FLG_HOMECOMING);
+ while (1) {
+ if ((iallctr & (~ERTS_CRR_ALCTR_FLG_MASK |
+ ERTS_CRR_ALCTR_FLG_IN_POOL))
+ == (erts_aint_t)allctr) {
+ /*
+ * Carrier is home (mine and not in pool)
+ */
+ ASSERT(!(iallctr & ERTS_CRR_ALCTR_FLG_BUSY));
+ erts_smp_atomic_set_nob(&crr->allctr, (erts_aint_t)allctr);
+ if (IS_FREE_LAST_MBC_BLK(first_blk))
+ dealloc_my_carrier(allctr, crr);
+ else
+ ASSERT(crr->cpool.state == ERTS_MBC_IS_HOME);
+ }
+ else {
+ erts_aint_t exp = iallctr;
+ erts_aint_t want = iallctr & ~ERTS_CRR_ALCTR_FLG_HOMECOMING;
+
+ iallctr = erts_smp_atomic_cmpxchg_nob(&crr->allctr,
+ want,
+ exp);
+ if (iallctr != exp)
+ continue; /* retry */
+
+ ASSERT(crr->cpool.state != ERTS_MBC_IS_HOME);
+ unlink_abandoned_carrier(crr);
+ if (iallctr & ERTS_CRR_ALCTR_FLG_IN_POOL)
+ poolify_my_carrier(allctr, crr);
+ else
+ crr->cpool.state = ERTS_MBC_WAS_TRAITOR;
+ }
+ break;
+ }
}
else {
+ ASSERT(IS_SBC_BLK(blk) || (ABLK_TO_MBC(blk) !=
+ ErtsContainerStruct(blk, Carrier_t,
+ cpool.homecoming_dd.blk)));
INC_CC(allctr->calls.this_free);
@@ -2157,14 +2124,20 @@ enqueue_dealloc_other_instance(ErtsAlcType_t type,
erts_alloc_notify_delayed_dealloc(allctr->ix);
}
-#endif
-
-#ifdef ERTS_SMP
-static void
-set_new_allctr_abandon_limit(Allctr_t *allctr);
-static void
-abandon_carrier(Allctr_t *allctr, Carrier_t *crr);
-
+static ERTS_INLINE void
+update_pooled_tree(Allctr_t *allctr, Carrier_t *crr, Uint blk_sz)
+{
+ if (allctr == crr->cpool.orig_allctr && crr->cpool.state == ERTS_MBC_WAS_POOLED) {
+ /*
+ * Update pooled_tree with a potentially new (larger) max_sz
+ */
+ AOFF_RBTree_t* crr_node = &crr->cpool.pooled;
+ if (blk_sz > crr_node->hdr.bhdr) {
+ crr_node->hdr.bhdr = blk_sz;
+ erts_aoff_larger_max_size(crr_node);
+ }
+ }
+}
static ERTS_INLINE void
check_abandon_carrier(Allctr_t *allctr, Block_t *fblk, Carrier_t **busy_pcrr_pp)
@@ -2172,9 +2145,6 @@ check_abandon_carrier(Allctr_t *allctr, Block_t *fblk, Carrier_t **busy_pcrr_pp)
Carrier_t *crr;
UWord ncrr_in_pool, largest_fblk;
- if (busy_pcrr_pp && *busy_pcrr_pp)
- return;
-
if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))
return;
@@ -2263,6 +2233,7 @@ dealloc_block(Allctr_t *allctr, void *ptr, ErtsAlcFixList_t *fix, int dec_cc_on_
else {
Carrier_t *busy_pcrr_p;
Allctr_t *used_allctr;
+
used_allctr = get_used_allctr(allctr, ERTS_ALC_TS_PREF_LOCK_NO, ptr,
NULL, &busy_pcrr_p);
if (used_allctr == allctr) {
@@ -2279,10 +2250,10 @@ dealloc_block(Allctr_t *allctr, void *ptr, ErtsAlcFixList_t *fix, int dec_cc_on_
/* Carrier migrated; need to redirect block to new owner... */
int cinit = used_allctr->dd.ix - allctr->dd.ix;
- ERTS_ALC_CPOOL_ASSERT(!busy_pcrr_p);
+ ERTS_ALC_CPOOL_ASSERT(!busy_pcrr_p);
- if (dec_cc_on_redirect)
- DEC_CC(allctr->calls.this_free);
+ if (dec_cc_on_redirect)
+ DEC_CC(allctr->calls.this_free);
if (ddq_enqueue(&used_allctr->dd.q, ptr, cinit))
erts_alloc_notify_delayed_dealloc(used_allctr->ix);
}
@@ -2527,16 +2498,17 @@ mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp)
ASSERT(blk_sz % sizeof(Unit_t) == 0);
ASSERT(IS_MBC_BLK(blk));
- if (is_first_blk
- && is_last_blk
- && allctr->main_carrier != FIRST_BLK_TO_MBC(allctr, blk)) {
- destroy_carrier(allctr, blk, busy_pcrr_pp);
+ if (is_first_blk && is_last_blk && crr != allctr->main_carrier) {
+ destroy_carrier(allctr, blk, busy_pcrr_pp);
}
else {
(*allctr->link_free_block)(allctr, blk);
HARD_CHECK_BLK_CARRIER(allctr, blk);
#ifdef ERTS_SMP
- check_abandon_carrier(allctr, blk, busy_pcrr_pp);
+ if (busy_pcrr_pp && *busy_pcrr_pp)
+ update_pooled_tree(allctr, crr, blk_sz);
+ else
+ check_abandon_carrier(allctr, blk, busy_pcrr_pp);
#endif
}
}
@@ -2572,8 +2544,19 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,
#else /* !MBC_REALLOC_ALWAYS_MOVES */
#ifdef ERTS_SMP
- if (busy_pcrr_pp && *busy_pcrr_pp)
- goto realloc_move; /* Don't want to use carrier in pool */
+ if (busy_pcrr_pp && *busy_pcrr_pp) {
+ /*
+ * Don't want to use carrier in pool
+ */
+ new_p = mbc_alloc(allctr, size);
+ if (!new_p)
+ return NULL;
+ new_blk = UMEM2BLK(new_p);
+ ASSERT(!(IS_MBC_BLK(new_blk) && ABLK_TO_MBC(new_blk) == *busy_pcrr_pp));
+ sys_memcpy(new_p, p, MIN(size, old_blk_sz - ABLK_HDR_SZ));
+ mbc_free(allctr, p, busy_pcrr_pp);
+ return new_p;
+ }
#endif
get_blk_sz = blk_sz = UMEMSZ2BLKSZ(allctr, size);
@@ -2809,9 +2792,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,
if (cand_blk_sz < get_blk_sz) {
/* We wont fit in cand_blk get a new one */
-#ifdef ERTS_SMP
- realloc_move:
-#endif
+
#endif /* !MBC_REALLOC_ALWAYS_MOVES */
new_p = mbc_alloc(allctr, size);
@@ -2916,8 +2897,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,
#ifdef ERTS_SMP
#define ERTS_ALC_MAX_DEALLOC_CARRIER 10
-#define ERTS_ALC_CPOOL_MAX_FETCH_INSPECT 20
-#define ERTS_ALC_CPOOL_MAX_TRAITOR_INSPECT 10
+#define ERTS_ALC_CPOOL_MAX_FETCH_INSPECT 100
#define ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT 100
#define ERTS_ALC_CPOOL_MAX_FAILED_STAT_READS 3
@@ -3081,19 +3061,18 @@ cpool_insert(Allctr_t *allctr, Carrier_t *crr)
ErtsAlcCPoolData_t *cpd1p, *cpd2p;
erts_aint_t val;
ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel;
+ Allctr_t *orig_allctr = crr->cpool.orig_allctr;
ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */
|| erts_thr_progress_is_managed_thread());
- ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_read_nob(&crr->allctr)
- == (erts_aint_t) allctr);
- erts_atomic_add_nob(&allctr->cpool.stat.blocks_size,
+ erts_atomic_add_nob(&orig_allctr->cpool.stat.blocks_size,
(erts_aint_t) crr->cpool.blocks_size);
- erts_atomic_add_nob(&allctr->cpool.stat.no_blocks,
+ erts_atomic_add_nob(&orig_allctr->cpool.stat.no_blocks,
(erts_aint_t) crr->cpool.blocks);
- erts_atomic_add_nob(&allctr->cpool.stat.carriers_size,
+ erts_atomic_add_nob(&orig_allctr->cpool.stat.carriers_size,
(erts_aint_t) CARRIER_SZ(crr));
- erts_atomic_inc_nob(&allctr->cpool.stat.no_carriers);
+ erts_atomic_inc_nob(&orig_allctr->cpool.stat.no_carriers);
/*
* We search in 'next' direction and begin by passing
@@ -3154,8 +3133,6 @@ cpool_insert(Allctr_t *allctr, Carrier_t *crr)
(erts_aint_t) &crr->cpool,
(erts_aint_t) cpd1p);
- erts_smp_atomic_set_wb(&crr->allctr,
- ((erts_aint_t) allctr)|ERTS_CRR_ALCTR_FLG_IN_POOL);
LTTNG3(carrier_pool_put, ERTS_ALC_A2AD(allctr->alloc_no), allctr->ix, CARRIER_SZ(crr));
}
@@ -3257,130 +3234,126 @@ cpool_delete(Allctr_t *allctr, Allctr_t *prev_allctr, Carrier_t *crr)
static Carrier_t *
cpool_fetch(Allctr_t *allctr, UWord size)
{
- int i, i_stop, has_passed_sentinel;
+ enum { IGNORANT, HAS_SEEN_SENTINEL, THE_LAST_ONE } loop_state;
+ int i;
Carrier_t *crr;
+ Carrier_t *reinsert_crr = NULL;
ErtsAlcCPoolData_t *cpdp;
- ErtsAlcCPoolData_t *cpool_entrance;
+ ErtsAlcCPoolData_t *cpool_entrance = NULL;
ErtsAlcCPoolData_t *sentinel;
- ErtsDoubleLink_t* dl;
- ErtsDoubleLink_t* first_old_traitor;
ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */
|| erts_thr_progress_is_managed_thread());
i = ERTS_ALC_CPOOL_MAX_FETCH_INSPECT;
- first_old_traitor = allctr->cpool.traitor_list.next;
- cpool_entrance = NULL;
LTTNG3(carrier_pool_get, ERTS_ALC_A2AD(allctr->alloc_no), allctr->ix, (unsigned long)size);
/*
- * Search my own pooled_list,
+ * Search my own pooled_tree,
* i.e my abandoned carriers that were in the pool last time I checked.
*/
+ do {
+ erts_aint_t exp, act;
+
+ crr = aoff_lookup_pooled_mbc(allctr, size);
+ if (!crr)
+ break;
+
+ ASSERT(crr->cpool.state == ERTS_MBC_WAS_POOLED);
+ ASSERT(crr->cpool.orig_allctr == allctr);
+
+ aoff_remove_pooled_mbc(allctr, crr);
+
+ exp = erts_smp_atomic_read_nob(&crr->allctr);
+ if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) {
+ ASSERT((exp & ~ERTS_CRR_ALCTR_FLG_MASK) == (erts_aint_t)allctr);
+ if (erts_atomic_read_nob(&crr->cpool.max_size) < size) {
+ /*
+ * This carrier has been fetched and inserted back again
+ * by a foreign allocator. That's why it has a stale search size.
+ */
+ ASSERT(exp & ERTS_CRR_ALCTR_FLG_HOMECOMING);
+ crr->cpool.pooled.hdr.bhdr = erts_atomic_read_nob(&crr->cpool.max_size);
+ aoff_add_pooled_mbc(allctr, crr);
+ INC_CC(allctr->cpool.stat.skip_size);
+ continue;
+ }
+ else if (exp & ERTS_CRR_ALCTR_FLG_BUSY) {
+ /*
+ * This must be our own carrier as part of a realloc call.
+ * Skip it to make things simpler.
+ * Must wait to re-insert to not be found again by lookup.
+ */
+ ASSERT(!reinsert_crr);
+ reinsert_crr = crr;
+ INC_CC(allctr->cpool.stat.skip_busy);
+ continue;
+ }
+
+ /* Try to fetch it... */
+ act = erts_smp_atomic_cmpxchg_mb(&crr->allctr,
+ exp & ~ERTS_CRR_ALCTR_FLG_IN_POOL,
+ exp);
+ if (act == exp) {
+ cpool_delete(allctr, allctr, crr);
+ crr->cpool.state = ERTS_MBC_IS_HOME;
+
+ if (reinsert_crr)
+ aoff_add_pooled_mbc(allctr, reinsert_crr);
+ return crr;
+ }
+ exp = act;
+ INC_CC(allctr->cpool.stat.skip_race);
+ }
+ else
+ INC_CC(allctr->cpool.stat.skip_not_pooled);
- dl = allctr->cpool.pooled_list.next;
- while(dl != &allctr->cpool.pooled_list) {
- erts_aint_t exp, act;
- crr = (Carrier_t *) (((char *) dl) - offsetof(Carrier_t, cpool.abandoned));
+ /* Not in pool anymore */
+ ASSERT(!(exp & ERTS_CRR_ALCTR_FLG_BUSY));
+ crr->cpool.state = ERTS_MBC_WAS_TRAITOR;
- ASSERT(!is_in_list(&allctr->cpool.traitor_list, dl));
- ASSERT(crr->cpool.orig_allctr == allctr);
- dl = dl->next;
- exp = erts_smp_atomic_read_rb(&crr->allctr);
- if ((exp & ERTS_CRR_ALCTR_FLG_MASK) == ERTS_CRR_ALCTR_FLG_IN_POOL
- && erts_atomic_read_nob(&crr->cpool.max_size) >= size) {
- /* Try to fetch it... */
- act = erts_smp_atomic_cmpxchg_mb(&crr->allctr,
- (erts_aint_t) allctr,
- exp);
- if (act == exp) {
- cpool_delete(allctr, ((Allctr_t *) (act & ~ERTS_CRR_ALCTR_FLG_MASK)), crr);
- unlink_abandoned_carrier(crr);
+ }while (--i > 0);
- /* Move sentinel to continue next search from here */
- relink_edl_before(dl, &allctr->cpool.pooled_list);
- return crr;
- }
- exp = act;
- }
- if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) {
- if (!cpool_entrance)
- cpool_entrance = &crr->cpool;
- }
- else { /* Not in pool, move to traitor_list */
- unlink_abandoned_carrier(crr);
- link_abandoned_carrier(&allctr->cpool.traitor_list, crr);
- }
- if (--i <= 0) {
- /* Move sentinel to continue next search from here */
- relink_edl_before(dl, &allctr->cpool.pooled_list);
- return NULL;
- }
- }
+ if (reinsert_crr)
+ aoff_add_pooled_mbc(allctr, reinsert_crr);
- /* Now search traitor_list.
- * i.e carriers employed by other allocators last time I checked.
- * They might have been abandoned since then.
+ /*
+ * Try find a nice cpool_entrance
*/
-
- i_stop = (i < ERTS_ALC_CPOOL_MAX_TRAITOR_INSPECT ?
- 0 : i - ERTS_ALC_CPOOL_MAX_TRAITOR_INSPECT);
- dl = first_old_traitor;
- while(dl != &allctr->cpool.traitor_list) {
- erts_aint_t exp, act;
- crr = (Carrier_t *) (((char *) dl) - offsetof(Carrier_t, cpool.abandoned));
- ASSERT(dl != &allctr->cpool.pooled_list);
- ASSERT(crr->cpool.orig_allctr == allctr);
- dl = dl->next;
- exp = erts_smp_atomic_read_rb(&crr->allctr);
- if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) {
- if (!(exp & ERTS_CRR_ALCTR_FLG_BUSY)
- && erts_atomic_read_nob(&crr->cpool.max_size) >= size) {
- /* Try to fetch it... */
- act = erts_smp_atomic_cmpxchg_mb(&crr->allctr,
- (erts_aint_t) allctr,
- exp);
- if (act == exp) {
- cpool_delete(allctr, ((Allctr_t *) (act & ~ERTS_CRR_ALCTR_FLG_MASK)), crr);
- unlink_abandoned_carrier(crr);
-
- /* Move sentinel to continue next search from here */
- relink_edl_before(dl, &allctr->cpool.traitor_list);
- return crr;
- }
- exp = act;
- }
- if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) {
- if (!cpool_entrance)
- cpool_entrance = &crr->cpool;
-
- /* Move to pooled_list */
- unlink_abandoned_carrier(crr);
- link_abandoned_carrier(&allctr->cpool.pooled_list, crr);
- }
- }
- if (--i <= i_stop) {
- /* Move sentinel to continue next search from here */
- relink_edl_before(dl, &allctr->cpool.traitor_list);
- if (i > 0)
- break;
- else
- return NULL;
- }
+ while (allctr->cpool.pooled_tree) {
+ erts_aint_t iallctr;
+
+ crr = ErtsContainerStruct(allctr->cpool.pooled_tree, Carrier_t, cpool.pooled);
+ iallctr = erts_smp_atomic_read_nob(&crr->allctr);
+ if (iallctr & ERTS_CRR_ALCTR_FLG_IN_POOL) {
+ cpool_entrance = &crr->cpool;
+ break;
+ }
+ /* Not in pool anymore */
+ ASSERT(!(iallctr & ERTS_CRR_ALCTR_FLG_BUSY));
+ aoff_remove_pooled_mbc(allctr, crr);
+ crr->cpool.state = ERTS_MBC_WAS_TRAITOR;
+
+ if (--i <= 0) {
+ INC_CC(allctr->cpool.stat.fail_pooled);
+ return NULL;
+ }
}
+
/*
* Finally search the shared pool and try employ foreign carriers
*/
-
sentinel = &carrier_pool[allctr->alloc_no].sentinel;
if (cpool_entrance) {
- /* We saw a pooled carried above, use it as entrance into the pool
+ /*
+ * We saw a pooled carried above, use it as entrance into the pool
*/
cpdp = cpool_entrance;
}
else {
- /* No pooled carried seen above. Start search at cpool sentinel,
+ /*
+ * No pooled carried seen above. Start search at cpool sentinel,
* but begin by passing one element before trying to fetch.
* This in order to avoid contention with threads inserting elements.
*/
@@ -3390,8 +3363,8 @@ cpool_fetch(Allctr_t *allctr, UWord size)
goto check_dc_list;
}
- has_passed_sentinel = 0;
- while (1) {
+ loop_state = IGNORANT;
+ do {
erts_aint_t exp;
cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev));
if (cpdp == cpool_entrance) {
@@ -3400,38 +3373,52 @@ cpool_fetch(Allctr_t *allctr, UWord size)
if (cpdp == sentinel)
break;
}
- i = 0; /* Last one to inspect */
+ loop_state = THE_LAST_ONE;
}
else if (cpdp == sentinel) {
- if (has_passed_sentinel) {
+ if (loop_state == HAS_SEEN_SENTINEL) {
/* We been here before. cpool_entrance must have been removed */
+ INC_CC(allctr->cpool.stat.entrance_removed);
break;
}
cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev));
if (cpdp == sentinel)
break;
- has_passed_sentinel = 1;
+ loop_state = HAS_SEEN_SENTINEL;
}
- crr = (Carrier_t *)(((char *)cpdp) - offsetof(Carrier_t, cpool));
+ crr = ErtsContainerStruct(cpdp, Carrier_t, cpool);
exp = erts_smp_atomic_read_rb(&crr->allctr);
- if (((exp & (ERTS_CRR_ALCTR_FLG_MASK)) == ERTS_CRR_ALCTR_FLG_IN_POOL)
- && (erts_atomic_read_nob(&cpdp->max_size) >= size)) {
+
+ if (erts_atomic_read_nob(&cpdp->max_size) < size) {
+ INC_CC(allctr->cpool.stat.skip_size);
+ }
+ else if ((exp & (ERTS_CRR_ALCTR_FLG_IN_POOL | ERTS_CRR_ALCTR_FLG_BUSY))
+ == ERTS_CRR_ALCTR_FLG_IN_POOL) {
erts_aint_t act;
- /* Try to fetch it... */
- act = erts_smp_atomic_cmpxchg_mb(&crr->allctr,
- (erts_aint_t) allctr,
- exp);
+ erts_aint_t want = (((erts_aint_t) allctr)
+ | (exp & ERTS_CRR_ALCTR_FLG_HOMECOMING));
+ /* Try to fetch it... */
+ act = erts_smp_atomic_cmpxchg_mb(&crr->allctr, want, exp);
if (act == exp) {
cpool_delete(allctr, ((Allctr_t *) (act & ~ERTS_CRR_ALCTR_FLG_MASK)), crr);
if (crr->cpool.orig_allctr == allctr) {
unlink_abandoned_carrier(crr);
- }
+ crr->cpool.state = ERTS_MBC_IS_HOME;
+ }
return crr;
}
}
- if (--i <= 0)
+
+ if (exp & ERTS_CRR_ALCTR_FLG_BUSY)
+ INC_CC(allctr->cpool.stat.skip_busy);
+ else
+ INC_CC(allctr->cpool.stat.skip_race);
+
+ if (--i <= 0) {
+ INC_CC(allctr->cpool.stat.fail_shared);
return NULL;
- }
+ }
+ }while (loop_state != THE_LAST_ONE);
check_dc_list:
/* Last; check our own pending dealloc carrier list... */
@@ -3440,23 +3427,23 @@ check_dc_list:
if (erts_atomic_read_nob(&crr->cpool.max_size) >= size) {
Block_t* blk;
unlink_carrier(&allctr->cpool.dc_list, crr);
-#ifdef ERTS_ALC_CPOOL_DEBUG
- ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_xchg_nob(&crr->allctr,
- ((erts_aint_t) allctr))
- == (((erts_aint_t) allctr) & ~ERTS_CRR_ALCTR_FLG_MASK));
-#else
- erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr));
-#endif
+ ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_read_nob(&crr->allctr)
+ == ((erts_aint_t) allctr));
blk = MBC_TO_FIRST_BLK(allctr, crr);
ASSERT(FBLK_TO_MBC(blk) == crr);
allctr->link_free_block(allctr, blk);
return crr;
}
crr = crr->prev;
- if (--i <= 0)
+ if (--i <= 0) {
+ INC_CC(allctr->cpool.stat.fail_pend_dealloc);
return NULL;
+ }
}
+ if (i != ERTS_ALC_CPOOL_MAX_FETCH_INSPECT)
+ INC_CC(allctr->cpool.stat.fail);
+
return NULL;
}
@@ -3511,9 +3498,6 @@ static void
schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr)
{
Allctr_t *orig_allctr;
- Block_t *blk;
- int check_pending_dealloc;
- erts_aint_t max_size;
ASSERT(IS_MB_CARRIER(crr));
@@ -3524,9 +3508,17 @@ schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr)
orig_allctr = crr->cpool.orig_allctr;
- if (allctr != orig_allctr) {
- int cinit = orig_allctr->dd.ix - allctr->dd.ix;
-
+ if (allctr == orig_allctr) {
+ if (!(erts_smp_atomic_read_nob(&crr->allctr) & ERTS_CRR_ALCTR_FLG_HOMECOMING)) {
+ dealloc_my_carrier(allctr, crr);
+ }
+ /*else
+ * Carrier was abandoned earlier by other thread and
+ * is still waiting for us in dd-queue.
+ * handle_delayed_dealloc() will handle it when crr is dequeued.
+ */
+ }
+ else {
/*
* We send the carrier to its origin for deallocation.
* This in order:
@@ -3535,29 +3527,39 @@ schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr)
* - to ensure that we always only reuse empty carriers
* originating from our own thread specific mseg_alloc
* instance which is beneficial on NUMA systems.
- *
- * The receiver will recognize that this is a carrier to
- * deallocate (and not a block which is the common case)
- * since the block is an mbc block that is free and last
- * in the carrier.
*/
- blk = MBC_TO_FIRST_BLK(allctr, crr);
- ERTS_ALC_CPOOL_ASSERT(IS_FREE_LAST_MBC_BLK(blk));
+ erts_aint_t iallctr;
+#ifdef ERTS_ALC_CPOOL_DEBUG
+ Block_t* first_blk = MBC_TO_FIRST_BLK(allctr, crr);
+ ERTS_ALC_CPOOL_ASSERT(IS_FREE_LAST_MBC_BLK(first_blk));
- ERTS_ALC_CPOOL_ASSERT(IS_MBC_FIRST_ABLK(allctr, blk));
- ERTS_ALC_CPOOL_ASSERT(crr == FBLK_TO_MBC(blk));
- ERTS_ALC_CPOOL_ASSERT(crr == FIRST_BLK_TO_MBC(allctr, blk));
- ERTS_ALC_CPOOL_ASSERT(((erts_aint_t) allctr)
- == (erts_smp_atomic_read_nob(&crr->allctr)
- & ~ERTS_CRR_ALCTR_FLG_MASK));
+ ERTS_ALC_CPOOL_ASSERT(IS_MBC_FIRST_ABLK(allctr, first_blk));
+ ERTS_ALC_CPOOL_ASSERT(crr == FBLK_TO_MBC(first_blk));
+ ERTS_ALC_CPOOL_ASSERT(crr == FIRST_BLK_TO_MBC(allctr, first_blk));
+ ERTS_ALC_CPOOL_ASSERT((erts_smp_atomic_read_nob(&crr->allctr)
+ & ~ERTS_CRR_ALCTR_FLG_HOMECOMING)
+ == (erts_aint_t) allctr);
+#endif
- if (ddq_enqueue(&orig_allctr->dd.q, BLK2UMEM(blk), cinit))
- erts_alloc_notify_delayed_dealloc(orig_allctr->ix);
- return;
+ iallctr = (erts_aint_t)orig_allctr | ERTS_CRR_ALCTR_FLG_HOMECOMING;
+ if (!(erts_smp_atomic_xchg_nob(&crr->allctr, iallctr)
+ & ERTS_CRR_ALCTR_FLG_HOMECOMING)) {
+ enqueue_homecoming(allctr, crr);
+ }
}
+}
- if (is_abandoned(crr))
- unlink_abandoned_carrier(crr);
+static void dealloc_my_carrier(Allctr_t *allctr, Carrier_t *crr)
+{
+ Block_t *blk;
+ int check_pending_dealloc;
+ erts_aint_t max_size;
+
+ ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr);
+ if (is_abandoned(crr)) {
+ unlink_abandoned_carrier(crr);
+ crr->cpool.state = ERTS_MBC_IS_HOME;
+ }
if (crr->cpool.thr_prgr == ERTS_THR_PRGR_INVALID
|| erts_thr_progress_has_reached(crr->cpool.thr_prgr)) {
@@ -3589,6 +3591,7 @@ schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr)
static ERTS_INLINE void
cpool_init_carrier_data(Allctr_t *allctr, Carrier_t *crr)
{
+ crr->cpool.homecoming_dd.blk.bhdr = HOMECOMING_MBC_BLK_HDR;
erts_atomic_init_nob(&crr->cpool.next, ERTS_AINT_NULL);
erts_atomic_init_nob(&crr->cpool.prev, ERTS_AINT_NULL);
crr->cpool.orig_allctr = allctr;
@@ -3607,8 +3610,7 @@ cpool_init_carrier_data(Allctr_t *allctr, Carrier_t *crr)
limit = (csz/100)*allctr->cpool.util_limit;
crr->cpool.abandon_limit = limit;
}
- crr->cpool.abandoned.next = NULL;
- crr->cpool.abandoned.prev = NULL;
+ crr->cpool.state = ERTS_MBC_IS_HOME;
}
static void
@@ -3634,18 +3636,62 @@ set_new_allctr_abandon_limit(Allctr_t *allctr)
static void
abandon_carrier(Allctr_t *allctr, Carrier_t *crr)
{
- STAT_MBC_CPOOL_INSERT(allctr, crr);
+ erts_aint_t iallctr;
- unlink_carrier(&allctr->mbc_list, crr);
- if (crr->cpool.orig_allctr == allctr) {
- link_abandoned_carrier(&allctr->cpool.pooled_list, crr);
- }
+ STAT_MBC_ABANDON(allctr, crr);
+ unlink_carrier(&allctr->mbc_list, crr);
allctr->remove_mbc(allctr, crr);
+ set_new_allctr_abandon_limit(allctr);
cpool_insert(allctr, crr);
- set_new_allctr_abandon_limit(allctr);
+
+ iallctr = erts_smp_atomic_read_nob(&crr->allctr);
+ if (allctr == crr->cpool.orig_allctr) {
+ /* preserve HOMECOMING flag */
+ ASSERT((iallctr & ~ERTS_CRR_ALCTR_FLG_HOMECOMING) == (erts_aint_t)allctr);
+ erts_smp_atomic_set_wb(&crr->allctr, iallctr | ERTS_CRR_ALCTR_FLG_IN_POOL);
+ poolify_my_carrier(allctr, crr);
+ }
+ else {
+ ASSERT((iallctr & ~ERTS_CRR_ALCTR_FLG_HOMECOMING) == (erts_aint_t)allctr);
+ iallctr = ((erts_aint_t)crr->cpool.orig_allctr |
+ ERTS_CRR_ALCTR_FLG_HOMECOMING |
+ ERTS_CRR_ALCTR_FLG_IN_POOL);
+ if (!(erts_smp_atomic_xchg_wb(&crr->allctr, iallctr)
+ & ERTS_CRR_ALCTR_FLG_HOMECOMING)) {
+
+ enqueue_homecoming(allctr, crr);
+ }
+ }
+}
+
+static void
+enqueue_homecoming(Allctr_t* allctr, Carrier_t* crr)
+{
+ Allctr_t* orig_allctr = crr->cpool.orig_allctr;
+ const int cinit = orig_allctr->dd.ix - allctr->dd.ix;
+ Block_t* dd_blk = &crr->cpool.homecoming_dd.blk;
+
+ /*
+ * The receiver will recognize this as a carrier
+ * (and not a block which is the common case)
+ * since the block header is HOMECOMING_MBC_BLK_HDR.
+ */
+ ASSERT(dd_blk->bhdr == HOMECOMING_MBC_BLK_HDR);
+ if (ddq_enqueue(&orig_allctr->dd.q, BLK2UMEM(dd_blk), cinit))
+ erts_alloc_notify_delayed_dealloc(orig_allctr->ix);
+}
+
+static void
+poolify_my_carrier(Allctr_t *allctr, Carrier_t *crr)
+{
+ ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr);
+
+ crr->cpool.pooled.hdr.bhdr = erts_atomic_read_nob(&crr->cpool.max_size);
+ aoff_add_pooled_mbc(allctr, crr);
+ crr->cpool.state = ERTS_MBC_WAS_POOLED;
}
static void
@@ -3804,6 +3850,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
crr = cpool_fetch(allctr, blk_sz);
if (crr) {
STAT_MBC_CPOOL_FETCH(allctr, crr);
+ INC_CC(allctr->cpool.stat.fetch);
link_carrier(&allctr->mbc_list, crr);
(*allctr->add_mbc)(allctr, crr);
blk = (*allctr->get_free_block)(allctr, blk_sz, NULL, 0);
@@ -4165,16 +4212,21 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp)
#ifdef ERTS_SMP
if (busy_pcrr_pp && *busy_pcrr_pp) {
+ erts_aint_t iallctr = erts_smp_atomic_read_nob(&crr->allctr);
ERTS_ALC_CPOOL_ASSERT(*busy_pcrr_pp == crr);
- *busy_pcrr_pp = NULL;
- ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_read_nob(&crr->allctr)
- == (((erts_aint_t) allctr)
- | ERTS_CRR_ALCTR_FLG_IN_POOL
- | ERTS_CRR_ALCTR_FLG_BUSY));
- erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr));
+ ERTS_ALC_CPOOL_ASSERT((iallctr & ~ERTS_CRR_ALCTR_FLG_HOMECOMING)
+ == (((erts_aint_t) allctr)
+ | ERTS_CRR_ALCTR_FLG_IN_POOL
+ | ERTS_CRR_ALCTR_FLG_BUSY));
+ ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr);
+
+ *busy_pcrr_pp = NULL;
+ erts_smp_atomic_set_nob(&crr->allctr,
+ (iallctr & ~(ERTS_CRR_ALCTR_FLG_IN_POOL |
+ ERTS_CRR_ALCTR_FLG_BUSY)));
cpool_delete(allctr, allctr, crr);
}
- else
+ else
#endif
{
unlink_carrier(&allctr->mbc_list, crr);
@@ -4256,6 +4308,17 @@ static struct {
Eterm mbcs;
#ifdef ERTS_SMP
Eterm mbcs_pool;
+ Eterm fetch;
+ Eterm fail_pooled;
+ Eterm fail_shared;
+ Eterm fail_pend_dealloc;
+ Eterm fail;
+ Eterm skip_size;
+ Eterm skip_busy;
+ Eterm skip_not_pooled;
+ Eterm skip_homecoming;
+ Eterm skip_race;
+ Eterm entrance_removed;
#endif
Eterm sbcs;
@@ -4347,6 +4410,17 @@ init_atoms(Allctr_t *allctr)
AM_INIT(mbcs);
#ifdef ERTS_SMP
AM_INIT(mbcs_pool);
+ AM_INIT(fetch);
+ AM_INIT(fail_pooled);
+ AM_INIT(fail_shared);
+ AM_INIT(fail_pend_dealloc);
+ AM_INIT(fail);
+ AM_INIT(skip_size);
+ AM_INIT(skip_busy);
+ AM_INIT(skip_not_pooled);
+ AM_INIT(skip_homecoming);
+ AM_INIT(skip_race);
+ AM_INIT(entrance_removed);
#endif
AM_INIT(sbcs);
@@ -4632,9 +4706,56 @@ info_cpool(Allctr_t *allctr,
if (hpp || szp) {
res = NIL;
+
+ if (!sz_only) {
+ add_3tup(hpp, szp, &res, am.fail_pooled,
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.fail_pooled)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.fail_pooled)));
+
+ add_3tup(hpp, szp, &res, am.fail_shared,
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.fail_shared)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.fail_shared)));
+
+ add_3tup(hpp, szp, &res, am.fail_pend_dealloc,
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.fail_pend_dealloc)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.fail_pend_dealloc)));
+
+ add_3tup(hpp, szp, &res, am.fail,
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.fail)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.fail)));
+
+ add_3tup(hpp, szp, &res, am.fetch,
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.fetch)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.fetch)));
+
+ add_3tup(hpp, szp, &res, am.skip_size,
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.skip_size)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.skip_size)));
+
+ add_3tup(hpp, szp, &res, am.skip_busy,
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.skip_busy)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.skip_busy)));
+
+ add_3tup(hpp, szp, &res, am.skip_not_pooled,
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.skip_not_pooled)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.skip_not_pooled)));
+
+ add_3tup(hpp, szp, &res, am.skip_homecoming,
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.skip_homecoming)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.skip_homecoming)));
+
+ add_3tup(hpp, szp, &res, am.skip_race,
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.skip_race)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.skip_race)));
+
+ add_3tup(hpp, szp, &res, am.entrance_removed,
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.entrance_removed)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.entrance_removed)));
+
add_2tup(hpp, szp, &res,
am.carriers_size,
bld_unstable_uint(hpp, szp, csz));
+ }
if (!sz_only)
add_2tup(hpp, szp, &res,
am.carriers,
@@ -5587,12 +5708,13 @@ erts_alcu_free_thr_pref(ErtsAlcType_t type, void *extra, void *p)
pref_allctr = get_pref_allctr(extra);
used_allctr = get_used_allctr(pref_allctr, ERTS_ALC_TS_PREF_LOCK_IF_USED,
p, NULL, &busy_pcrr_p);
- if (pref_allctr != used_allctr)
+ if (pref_allctr != used_allctr) {
enqueue_dealloc_other_instance(type,
- used_allctr,
- p,
- (used_allctr->dd.ix
- - pref_allctr->dd.ix));
+ used_allctr,
+ p,
+ (used_allctr->dd.ix
+ - pref_allctr->dd.ix));
+ }
else {
ERTS_ALCU_DBG_CHK_THR_ACCESS(used_allctr);
do_erts_alcu_free(type, used_allctr, p, &busy_pcrr_p);
@@ -6093,10 +6215,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->min_block_size = sz;
}
- allctr->cpool.pooled_list.next = &allctr->cpool.pooled_list;
- allctr->cpool.pooled_list.prev = &allctr->cpool.pooled_list;
- allctr->cpool.traitor_list.next = &allctr->cpool.traitor_list;
- allctr->cpool.traitor_list.prev = &allctr->cpool.traitor_list;
+ allctr->cpool.pooled_tree = NULL;
allctr->cpool.dc_list.first = NULL;
allctr->cpool.dc_list.last = NULL;
allctr->cpool.abandon_limit = 0;
diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h
index 6c0c5ca86a..fbcf16589d 100644
--- a/erts/emulator/beam/erl_alloc_util.h
+++ b/erts/emulator/beam/erl_alloc_util.h
@@ -313,45 +313,8 @@ int erts_alcu_try_set_dyn_param(Allctr_t*, Eterm param, Uint value);
typedef union {char c[ERTS_ALLOC_ALIGN_BYTES]; long l; double d;} Unit_t;
-#ifdef ERTS_SMP
-
-typedef struct ErtsDoubleLink_t_ {
- struct ErtsDoubleLink_t_ *next;
- struct ErtsDoubleLink_t_ *prev;
-}ErtsDoubleLink_t;
-
-typedef struct {
- erts_atomic_t next;
- erts_atomic_t prev;
- Allctr_t *orig_allctr; /* read-only while carrier is alive */
- ErtsThrPrgrVal thr_prgr;
- erts_atomic_t max_size;
- UWord abandon_limit;
- UWord blocks;
- UWord blocks_size;
- ErtsDoubleLink_t abandoned; /* node in pooled_list or traitor_list */
-} ErtsAlcCPoolData_t;
-
-#endif
-
typedef struct Carrier_t_ Carrier_t;
-struct Carrier_t_ {
- UWord chdr;
- Carrier_t *next;
- Carrier_t *prev;
- erts_smp_atomic_t allctr;
-#ifdef ERTS_SMP
- ErtsAlcCPoolData_t cpool; /* Overwritten by block if sbc */
-#endif
-};
-
-#define ERTS_ALC_CARRIER_TO_ALLCTR(C) \
- ((Allctr_t *) (erts_smp_atomic_read_nob(&(C)->allctr) & ~FLG_MASK))
-typedef struct {
- Carrier_t *first;
- Carrier_t *last;
-} CarrierList_t;
typedef struct {
UWord bhdr;
@@ -365,6 +328,22 @@ typedef struct {
#endif
} Block_t;
+typedef union ErtsAllctrDDBlock_t_ ErtsAllctrDDBlock_t;
+
+union ErtsAllctrDDBlock_t_ {
+ erts_atomic_t atmc_next;
+ ErtsAllctrDDBlock_t *ptr_next;
+};
+
+typedef struct {
+ Block_t blk;
+#if !MBC_ABLK_OFFSET_BITS
+ ErtsAllctrDDBlock_t umem_;
+#endif
+} ErtsFakeDDBlock_t;
+
+
+
#define THIS_FREE_BLK_HDR_FLG (((UWord) 1) << 0)
#define PREV_FREE_BLK_HDR_FLG (((UWord) 1) << 1)
#define LAST_BLK_HDR_FLG (((UWord) 1) << 2)
@@ -373,14 +352,13 @@ typedef struct {
(THIS_FREE_BLK_HDR_FLG | PREV_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG)
/*
- * FREE_LAST_MBC_BLK_HDR_FLGS is a special flag combo used for
- * distinguishing empty mbc's from allocated blocks in
- * handle_delayed_dealloc().
+ * HOMECOMING_MBC_BLK_HDR is a special block header combo used for
+ * distinguishing MBC's from allocated blocks in handle_delayed_dealloc().
*/
-#define FREE_LAST_MBC_BLK_HDR_FLGS (THIS_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG)
+#define HOMECOMING_MBC_BLK_HDR (THIS_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG)
#define IS_FREE_LAST_MBC_BLK(B) \
- (((B)->bhdr & FLG_MASK) == FREE_LAST_MBC_BLK_HDR_FLGS)
+ (((B)->bhdr & FLG_MASK) == (THIS_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG))
#define IS_SBC_BLK(B) (((B)->bhdr & FLG_MASK) == SBC_BLK_HDR_FLG)
#define IS_MBC_BLK(B) (!IS_SBC_BLK((B)))
@@ -404,6 +382,64 @@ typedef struct {
typedef UWord FreeBlkFtr_t; /* Footer of a free block */
+/* This AOFF stuff really belong in erl_ao_firstfit_alloc.h */
+typedef struct AOFF_RBTree_t_ AOFF_RBTree_t;
+struct AOFF_RBTree_t_ {
+ Block_t hdr;
+ AOFF_RBTree_t *parent;
+ AOFF_RBTree_t *left;
+ AOFF_RBTree_t *right;
+ Uint32 flags;
+ Uint32 max_sz; /* of all blocks in this sub-tree */
+};
+#ifdef ERTS_SMP
+void aoff_add_pooled_mbc(Allctr_t*, Carrier_t*);
+void aoff_remove_pooled_mbc(Allctr_t*, Carrier_t*);
+Carrier_t* aoff_lookup_pooled_mbc(Allctr_t*, Uint size);
+void erts_aoff_larger_max_size(AOFF_RBTree_t *node);
+#endif
+
+#ifdef ERTS_SMP
+
+typedef struct {
+ ErtsFakeDDBlock_t homecoming_dd;
+ erts_atomic_t next;
+ erts_atomic_t prev;
+ Allctr_t *orig_allctr; /* read-only while carrier is alive */
+ ErtsThrPrgrVal thr_prgr;
+ erts_atomic_t max_size;
+ UWord abandon_limit;
+ UWord blocks;
+ UWord blocks_size;
+ enum {
+ ERTS_MBC_IS_HOME,
+ ERTS_MBC_WAS_POOLED,
+ ERTS_MBC_WAS_TRAITOR
+ } state;
+ AOFF_RBTree_t pooled; /* node in pooled_tree */
+} ErtsAlcCPoolData_t;
+
+#endif
+
+struct Carrier_t_ {
+ UWord chdr;
+ Carrier_t *next;
+ Carrier_t *prev;
+ erts_smp_atomic_t allctr;
+#ifdef ERTS_SMP
+ ErtsAlcCPoolData_t cpool; /* Overwritten by block if sbc */
+#endif
+};
+
+#define ERTS_ALC_CARRIER_TO_ALLCTR(C) \
+ ((Allctr_t *) (erts_smp_atomic_read_nob(&(C)->allctr) & ~FLG_MASK))
+
+typedef struct {
+ Carrier_t *first;
+ Carrier_t *last;
+} CarrierList_t;
+
+
typedef Uint64 CallCounter_t;
typedef struct {
@@ -441,13 +477,6 @@ typedef struct {
#ifdef ERTS_SMP
-typedef union ErtsAllctrDDBlock_t_ ErtsAllctrDDBlock_t;
-
-union ErtsAllctrDDBlock_t_ {
- erts_atomic_t atmc_next;
- ErtsAllctrDDBlock_t *ptr_next;
-};
-
typedef struct {
ErtsAllctrDDBlock_t marker;
erts_atomic_t last;
@@ -562,15 +591,14 @@ struct Allctr_t_ {
UWord crr_set_flgs;
UWord crr_clr_flgs;
- /* Carriers */
+ /* Carriers *employed* by this allocator */
CarrierList_t mbc_list;
CarrierList_t sbc_list;
#ifdef ERTS_SMP
struct {
- /* pooled_list, traitor list and dc_list contain only
- carriers _created_ by this allocator */
- ErtsDoubleLink_t pooled_list;
- ErtsDoubleLink_t traitor_list;
+ /* pooled_tree and dc_list contain only
+ carriers *created* by this allocator */
+ AOFF_RBTree_t* pooled_tree;
CarrierList_t dc_list;
UWord abandon_limit;
@@ -584,6 +612,17 @@ struct Allctr_t_ {
erts_atomic_t no_blocks;
erts_atomic_t carriers_size;
erts_atomic_t no_carriers;
+ CallCounter_t fail_pooled;
+ CallCounter_t fail_shared;
+ CallCounter_t fail_pend_dealloc;
+ CallCounter_t fail;
+ CallCounter_t fetch;
+ CallCounter_t skip_size;
+ CallCounter_t skip_busy;
+ CallCounter_t skip_not_pooled;
+ CallCounter_t skip_homecoming;
+ CallCounter_t skip_race;
+ CallCounter_t entrance_removed;
} stat;
} cpool;
#endif
@@ -686,7 +725,6 @@ void erts_alcu_assert_failed(char* expr, char* file, int line, char *func);
int is_sbc_blk(Block_t*);
#endif
-
#endif /* #if defined(GET_ERL_ALLOC_UTIL_IMPL)
&& !defined(ERL_ALLOC_UTIL_IMPL__) */
diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.c b/erts/emulator/beam/erl_ao_firstfit_alloc.c
index ad34fb389a..73576c0189 100644
--- a/erts/emulator/beam/erl_ao_firstfit_alloc.c
+++ b/erts/emulator/beam/erl_ao_firstfit_alloc.c
@@ -98,18 +98,6 @@
#define RBT_ASSERT(x)
#endif
-
-/* Types... */
-typedef struct AOFF_RBTree_t_ AOFF_RBTree_t;
-
-struct AOFF_RBTree_t_ {
- Block_t hdr;
- AOFF_RBTree_t *parent;
- AOFF_RBTree_t *left;
- AOFF_RBTree_t *right;
- Uint32 flags;
- Uint32 max_sz; /* of all blocks in this sub-tree */
-};
#define AOFF_BLK_SZ(B) MBC_FBLK_SZ(&(B)->hdr)
/* BF block nodes keeps list of all with equal size
@@ -143,7 +131,7 @@ struct AOFF_Carrier_t_ {
*/
#ifdef HARD_DEBUG
-# define HARD_CHECK_IS_MEMBER(ROOT,NODE) rbt_assert_is_member(ROOT,NODE)
+# define HARD_CHECK_IS_MEMBER(ROOT,NODE) ASSERT(rbt_is_member(ROOT,NODE))
# define HARD_CHECK_TREE(CRR,ORDER,ROOT,SZ) check_tree(CRR, ORDER, ROOT, SZ)
static AOFF_RBTree_t * check_tree(Carrier_t*, enum AOFFSortOrder, AOFF_RBTree_t*, Uint);
#else
@@ -186,6 +174,27 @@ static ERTS_INLINE void lower_max_size(AOFF_RBTree_t *node,
else ASSERT(new_max == old_max);
}
+#ifdef ERTS_SMP
+/*
+ * Set possibly new larger 'max_sz' of node and propagate change toward root
+ */
+void erts_aoff_larger_max_size(AOFF_RBTree_t *node)
+{
+ AOFF_RBTree_t* x = node;
+ const Uint new_sz = node->hdr.bhdr;
+
+ ASSERT(!x->left || x->left->max_sz <= x->max_sz);
+ ASSERT(!x->right || x->right->max_sz <= x->max_sz);
+
+ while (new_sz > x->max_sz) {
+ x->max_sz = new_sz;
+ x = x->parent;
+ if (!x)
+ break;
+ }
+}
+#endif
+
/* Compare nodes for both carrier and block trees */
static ERTS_INLINE SWord cmp_blocks(enum AOFFSortOrder order,
AOFF_RBTree_t* lhs, AOFF_RBTree_t* rhs)
@@ -246,9 +255,6 @@ static UWord aoff_largest_fblk_in_mbc(Allctr_t*, Carrier_t*);
static void rbt_delete(AOFF_RBTree_t** root, AOFF_RBTree_t* del);
static void rbt_insert(enum AOFFSortOrder, AOFF_RBTree_t** root, AOFF_RBTree_t* blk);
static AOFF_RBTree_t* rbt_search(AOFF_RBTree_t* root, Uint size);
-#ifdef HARD_DEBUG
-static int rbt_assert_is_member(AOFF_RBTree_t* root, AOFF_RBTree_t* node);
-#endif
static Eterm info_options(Allctr_t *, char *, fmtfn_t *, void *, Uint **, Uint *);
static void init_atoms(void);
@@ -753,19 +759,20 @@ aoff_link_free_block(Allctr_t *allctr, Block_t *block)
rbt_insert(alc->blk_order, &blk_crr->root, blk);
- /* Update the carrier tree with a potentially new (larger) max_sz
- */
+ /*
+ * Update carrier tree with a potentially new (larger) max_sz
+ */
crr_node = &blk_crr->rbt_node;
if (blk_sz > crr_node->hdr.bhdr) {
- ASSERT(blk_sz == blk_crr->root->max_sz);
- crr_node->hdr.bhdr = blk_sz;
- while (blk_sz > crr_node->max_sz) {
- crr_node->max_sz = blk_sz;
- crr_node = crr_node->parent;
- if (!crr_node) break;
- }
+ ASSERT(blk_sz == blk_crr->root->max_sz);
+ crr_node->hdr.bhdr = blk_sz;
+ while (blk_sz > crr_node->max_sz) {
+ crr_node->max_sz = blk_sz;
+ crr_node = crr_node->parent;
+ if (!crr_node) break;
+ }
}
- HARD_CHECK_TREE(&blk_crr->crr, alc->blk_order, blk_crr->root, 0);
+ HARD_CHECK_TREE(NULL, alc->crr_order, alc->mbc_root, 0);
}
static void
@@ -860,6 +867,18 @@ rbt_search(AOFF_RBTree_t* root, Uint size)
}
}
+#ifdef ERTS_SMP
+Carrier_t* aoff_lookup_pooled_mbc(Allctr_t* allctr, Uint size)
+{
+ AOFF_RBTree_t* node;
+
+ if (!allctr->cpool.pooled_tree)
+ return NULL;
+ node = rbt_search(allctr->cpool.pooled_tree, size);
+ return node ? ErtsContainerStruct(node, Carrier_t, cpool.pooled) : NULL;
+}
+#endif
+
static Block_t *
aoff_get_free_block(Allctr_t *allctr, Uint size,
Block_t *cand_blk, Uint cand_size)
@@ -961,16 +980,31 @@ static void aoff_add_mbc(Allctr_t *allctr, Carrier_t *carrier)
HARD_CHECK_TREE(NULL, alc->crr_order, *root, 0);
}
+#ifdef ERTS_SMP
+void aoff_add_pooled_mbc(Allctr_t *allctr, Carrier_t *crr)
+{
+ AOFF_RBTree_t **root = &allctr->cpool.pooled_tree;
+
+ ASSERT(allctr == crr->cpool.orig_allctr);
+ HARD_CHECK_TREE(NULL, 0, *root, 0);
+
+ /* Link carrier in address order tree
+ */
+ rbt_insert(FF_AOFF, root, &crr->cpool.pooled);
+
+ HARD_CHECK_TREE(NULL, 0, *root, 0);
+}
+#endif
+
static void aoff_remove_mbc(Allctr_t *allctr, Carrier_t *carrier)
{
- AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr;
- AOFF_Carrier_t *crr = (AOFF_Carrier_t*) carrier;
- AOFF_RBTree_t **root = &alc->mbc_root;
+ AOFF_RBTree_t **root = &((AOFFAllctr_t*)allctr)->mbc_root;
+ AOFF_Carrier_t *crr = (AOFF_Carrier_t*)carrier;
ASSERT(allctr == ERTS_ALC_CARRIER_TO_ALLCTR(carrier));
if (!IS_CRR_IN_TREE(crr,*root))
- return;
+ return;
HARD_CHECK_TREE(NULL, alc->crr_order, *root, 0);
@@ -983,6 +1017,26 @@ static void aoff_remove_mbc(Allctr_t *allctr, Carrier_t *carrier)
HARD_CHECK_TREE(NULL, alc->crr_order, *root, 0);
}
+#ifdef ERTS_SMP
+void aoff_remove_pooled_mbc(Allctr_t *allctr, Carrier_t *crr)
+{
+ ASSERT(allctr == crr->cpool.orig_allctr);
+
+ HARD_CHECK_TREE(NULL, 0, allctr->cpool.pooled_tree, 0);
+
+ rbt_delete(&allctr->cpool.pooled_tree, &crr->cpool.pooled);
+#ifdef DEBUG
+ crr->cpool.pooled.parent = NULL;
+ crr->cpool.pooled.left = NULL;
+ crr->cpool.pooled.right = NULL;
+ crr->cpool.pooled.max_sz = 0;
+#endif
+ HARD_CHECK_TREE(NULL, 0, allctr->cpool.pooled_tree, 0);
+
+}
+#endif
+
+
static UWord aoff_largest_fblk_in_mbc(Allctr_t* allctr, Carrier_t* carrier)
{
AOFF_Carrier_t *crr = (AOFF_Carrier_t*) carrier;
@@ -1116,12 +1170,13 @@ erts_aoffalc_test(UWord op, UWord a1, UWord a2)
#ifdef HARD_DEBUG
-
-static int rbt_assert_is_member(AOFF_RBTree_t* root, AOFF_RBTree_t* node)
+static int rbt_is_member(AOFF_RBTree_t* root, AOFF_RBTree_t* node)
{
while (node != root) {
- ASSERT(node->parent);
- ASSERT(node->parent->left == node || node->parent->right == node);
+ if (!node->parent || (node->parent->left != node &&
+ node->parent->right != node)) {
+ return 0;
+ }
node = node->parent;
}
return 1;
diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.h b/erts/emulator/beam/erl_ao_firstfit_alloc.h
index b5492551e6..9cf4fc81a8 100644
--- a/erts/emulator/beam/erl_ao_firstfit_alloc.h
+++ b/erts/emulator/beam/erl_ao_firstfit_alloc.h
@@ -55,7 +55,6 @@ Allctr_t *erts_aoffalc_start(AOFFAllctr_t *, AOFFAllctrInit_t*, AllctrInit_t *);
#define GET_ERL_ALLOC_UTIL_IMPL
#include "erl_alloc_util.h"
-
struct AOFFAllctr_t_ {
Allctr_t allctr; /* Has to be first! */
diff --git a/erts/emulator/internal_doc/CarrierMigration.md b/erts/emulator/internal_doc/CarrierMigration.md
index 2a9594db25..3a796d11b7 100644
--- a/erts/emulator/internal_doc/CarrierMigration.md
+++ b/erts/emulator/internal_doc/CarrierMigration.md
@@ -3,17 +3,17 @@ Carrier Migration
The ERTS memory allocators manage memory blocks in two types of raw
memory chunks. We call these chunks of raw memory
-*carriers*. Singleblock carriers which only contain one large block,
-and multiblock carriers which contain multiple blocks. A carrier is
+*carriers*. Single-block carriers which only contain one large block,
+and multi-block carriers which contain multiple blocks. A carrier is
typically created using `mmap()` on unix systems. However, how a
carrier is created is of minor importance. An allocator instance
-typically manages a mixture of single- and multiblock carriers.
+typically manages a mixture of single- and multi-block carriers.
Problem
-------
When a carrier is empty, i.e. contains only one large free block, it
-is deallocated. Since multiblock carriers can contain both allocated
+is deallocated. Since multi-block carriers can contain both allocated
blocks and free blocks at the same time, an allocator instance might
be stuck with a large amount of poorly utilized carriers if the memory
load decreases. After a peak in memory usage it is expected that not
@@ -23,9 +23,9 @@ can usually be reused if the memory load increases again. However,
since each scheduler thread manages its own set of allocator
instances, and memory load is not necessarily correlated to CPU load, we
might get into a situation where there are lots of poorly utilized
-multiblock carriers on some allocator instances while we need to
-allocate new multiblock carriers on other allocator instances. In
-scenarios like this, the demand for multiblock carriers in the system
+multi-block carriers on some allocator instances while we need to
+allocate new multi-block carriers on other allocator instances. In
+scenarios like this, the demand for multi-block carriers in the system
might increase at the same time as the actual memory demand in the
system has decreased which is both unwanted and quite unexpected for
the end user.
@@ -34,7 +34,7 @@ Solution
--------
In order to prevent scenarios like this we've implemented support for
-migration of multiblock carriers between allocator instances of the
+migration of multi-block carriers between allocator instances of the
same type.
### Management of Free Blocks ###
@@ -44,7 +44,7 @@ and add it to another we need to be able to move references to the
free blocks of the carrier between the allocator instances. The
allocator instance specific data structure referring to the free
blocks it manages often refers to the same carrier from multiple
-places. For example, when the address order bestfit strategy is used
+places. For example, when the address order best-fit strategy is used
this data structure is a binary search tree spanning all carriers that
the allocator instance manages. Free blocks in one specific carrier
can be referred to from potentially every other carrier that is
@@ -135,7 +135,7 @@ carriers between scheduler specific allocator instances of the same
allocator type.
Each allocator instance keeps track of the current utilization of its
-multiblock carriers. When the total utilization falls below the "abandon
+multi-block carriers. When the total utilization falls below the "abandon
carrier utilization limit" it starts to inspect the utilization of the
current carrier when deallocations are made. If also the utilization
of the carrier falls below the "abandon carrier utilization limit" it
@@ -144,31 +144,45 @@ and inserts the carrier into the pool.
Since the carrier has been unlinked from the data structure of
available free blocks, no more allocations will be made in the
-carrier. The allocator instance putting the carrier into the pool,
-however, still has the responsibility of performing deallocations in
-it while it remains in the pool. The allocator instance with this
-deallocation responsibility is here called the **employer**.
-
-Each carrier has a flag field containing information about the
-employing allocator instance, a flag indicating if the carrier is in
-the pool or not, and a flag indicating if it is busy or not. When the
-carrier is in the pool, the employing allocator instance needs to mark it
-as busy while operating on it. If another thread inspects it in order
-to try to fetch it from the pool, it will skip it if it is busy. When
-fetching the carrier from the pool, employment will change and further
+carrier.
+
+The allocator instance that created a carrier is called its **owner**.
+Ownership never changes.
+
+The allocator instance that has the responsibility to perform deallocations in a
+carrier is called its **employer**. The employer may also perform allocations if
+the carrier is not in the pool. Employment may change when a carrier is fetched from
+or inserted into the pool.
+
+Deallocations in a carrier, while it remains in the pool, is always performed
+the owner. That is, all pooled carriers are employed by their owners.
+
+Each carrier has an atomic word containing a pointer to the employing allocator
+instance and three bit flags; IN_POOL, BUSY and HOMECOMING.
+
+When fetching a carrier from the pool, employment may change and further
deallocations in the carrier will be redirected to the new
employer using the delayed dealloc functionality.
-If a carrier in the pool becomes empty, it will be withdrawn from the
-pool. All carriers that become empty are also always passed to its
-**owning** allocator instance for deallocation using the delayed
-dealloc functionality. Since carriers this way always will be
-deallocated by the owner that allocated the carrier, the
+When a foreign allocator instance abandons a carrier back into the pool, it will
+also pass it back to its **owner** using the delayed dealloc queue. When doing
+this it will set the HOMECOMING bit flag to mark it as "enqueued". The owner
+will later clear the HOMECOMING bit when the carrier is dequeued. This mechanism
+prevents a carrier from being enqueued again before it has been dequeued.
+
+When a carrier becomes empty, it will be deallocated. Carrier deallocation is
+always done by the owner that allocated the carrier. By doing this, the
underlying functionality of allocating and deallocating carriers can
remain simple and doesn't have to bother about multiple threads. In a
NUMA system we will also not mix carriers originating from multiple
NUMA nodes.
+If a carrier in the pool becomes empty, it will be withdrawn from the
+pool and be deallocated by the owner which already employs it.
+
+If a carrier employed by a foreign allocator becomes empty, it will be passed
+back to the owner for deallocation using the delayed dealloc functionality.
+
In short:
* The allocator instance that created a carrier **owns** it.
@@ -177,34 +191,31 @@ In short:
* The allocator instance that uses a carrier **employs** it.
* An **employer** can abandon a carrier into the pool.
* Pooled carriers are not allocated from.
-* Deallocation in a pooled carrier is still performed by its **employer**.
-* **Employment** can only change when a carrier is fetched from the pool.
+* Pooled carriers are always **employed** by their **owner**.
+* **Employment** can only change from **owner** to a foreign allocator
+ when a carrier is fetched from the pool.
+
### Searching the pool ###
+When an allocator instance needs more carrier space, it inspects the pool. If no
+carrier could be fetched from the pool, it will allocate a new
+carrier. Regardless of where the allocator instance gets the carrier from, it
+just links in the carrier into its data structure of free blocks.
+
To harbor real time characteristics, searching the pool is
limited. We only inspect a limited number of carriers. If none of
those carriers had a free block large enough to satisfy the allocation
-request, the search will fail. A carrier in the pool can also be busy
+request, the search will fail. A carrier in the pool can also be BUSY
if another thread is currently doing block deallocation work on the
-carrier. A busy carrier will also be skipped by the search as it can
+carrier. A BUSY carrier will also be skipped by the search as it can
not satisfy the request. The pool is lock-free and we do not want to
block, waiting for the other thread to finish.
-#### Before OTP 17.4 ####
+### The bad cluster problem ###
-When an allocator instance needs more carrier space, it always begins
-by inspecting its own carriers that are waiting for thread progress
-before they can be deallocated. If no such carrier could be found, it
-then inspects the pool. If no carrier could be fetched from the pool,
-it will allocate a new carrier. Regardless of where the allocator
-instance gets the carrier from it the just links in the carrier into
-its data structure of free blocks.
-
-#### After OTP 17.4 ####
-
-The old search algorithm had a problem as the search always started at
-the same position in the pool, the sentinel. This could lead to
+Before OTP-17.4 the search algorithm had a problem as the search always started
+at the same position in the pool, the sentinel. This could lead to
contention from concurrent searching processes. But even worse, it
could lead to a "bad" state when searches fail with a high rate
leading to new carriers instead being allocated. These new carriers
@@ -236,26 +247,27 @@ The result is that we prefer carriers created by the thread itself,
which is good for NUMA performance. And we get more entry points when
searching the pool, which will ease contention and clustering.
+### Our own pooled tree ###
+
To do the first search among own carriers, every allocator instance
-has two new lists: `pooled_list` and `traitor_list`. These lists are only
-accessed by the allocator itself and they only contain the allocator's
-own carriers. When an owned carrier is abandoned and put in the
-pool, it is also linked into `pooled_list`. When we search our
-`pooled_list` and find a carrier that is no longer in the pool, we
-move that carrier from `pooled_list` to `traitor_list` as it is now
-employed by another allocator. If searching `pooled_list` fails, we
-also do a limited search of `traitor_list`. When finding an abandoned
-carrier in `traitor_list` it is either employed or moved back to
-`pooled_list` if it could not satisfy the allocation request.
-
-When searching `pooled_list` and `traitor_list` we always start at the
-point where the last search ended. This to avoid clustering
-problems and increase the probability to find a "good" carrier. As
-`pooled_list` and `traitor_list` are only accessed by the owning
-allocator instance, they need no thread synchronization at all.
+has a `pooled_tree` of carriers. This tree is only accessed by the allocator
+itself and can only contain its own carriers. When a carrier is
+abandoned and put in the pool, it is also inserted into `pooled_tree`. This is
+either done direct, if the carrier was already employed by its owner, or by
+first passing it back to the owner via the delayed dealloc queue.
+
+When we search our `pooled_tree` and find a carrier that is no longer in the
+pool, we remove that carrier from `pooled_tree` and mark it as TRAITOR, as it is
+now employed by a foreign allocator. We will not find any carriers in
+`pooled_tree` that are marked as BUSY by other threads.
+
+If no carrier in `pooled_tree` had a large enough free block, we search it again
+to find any carrier that may act as an entry point into the shared list of all
+pooled carriers. This in order to, if possible, avoid starting at the sentinel
+and thereby ease the "bad clustering" problem.
Furthermore, the search for own carriers that are scheduled
-for deallocation is now done as the last search option. The idea is
+for deallocation is done as the last search option. The idea is
that it is better to reuse a poorly utilized carrier than to
resurrect an empty carrier that was just about to be released back to
the OS.
@@ -271,14 +283,14 @@ load did not.
When using the `aoffcaobf` or `aoff` strategies compared to `gf` or
`bf`, we loose some performance since we get more modifications in the
data structure of free blocks. This performance penalty is however
-reduced using the `aoffcbf` strategy. A tradeoff between memory
+reduced using the `aoffcbf` strategy. A trade off between memory
consumption and performance is however inevitable, and it is up to
the user to decide what is most important.
Further work
------------
-It would be quite easy to extend this to allow migration of multiblock
+It would be quite easy to extend this to allow migration of multi-block
carriers between all allocator types. More or less the only obstacle
is maintenance of the statistics information.
diff --git a/erts/emulator/test/alloc_SUITE.erl b/erts/emulator/test/alloc_SUITE.erl
index e4b4465c60..3d29776530 100644
--- a/erts/emulator/test/alloc_SUITE.erl
+++ b/erts/emulator/test/alloc_SUITE.erl
@@ -69,8 +69,11 @@ cpool(Cfg) -> drv_case(Cfg).
migration(Cfg) ->
case erlang:system_info(smp_support) of
true ->
- drv_case(Cfg, concurrent, "+MZe true"),
- drv_case(Cfg, concurrent, "+MZe true +MZas ageffcbf");
+ %% Enable test_alloc.
+ %% Disable driver_alloc to avoid recursive alloc_util calls
+ %% through enif_mutex_create() in my_creating_mbc().
+ drv_case(Cfg, concurrent, "+MZe true +MRe false"),
+ drv_case(Cfg, concurrent, "+MZe true +MRe false +MZas ageffcbf");
false ->
{skipped, "No smp"}
end.
@@ -117,7 +120,7 @@ erts_mmap_do(Config, SCO, SCRPM, SCRFSD) ->
0 -> O1;
_ -> O1 ++ " +MMscrfsd"++integer_to_list(SCRFSD)
end,
- {ok, Node} = start_node(Config, Opts),
+ {ok, Node} = start_node(Config, Opts, []),
Self = self(),
Ref = make_ref(),
F = fun() ->
@@ -234,7 +237,9 @@ drv_case(Config) ->
drv_case(Config, Mode, NodeOpts) when is_list(Config) ->
case os:type() of
{Family, _} when Family == unix; Family == win32 ->
- {ok, Node} = start_node(Config, NodeOpts),
+ %%Prog = {prog,"/my/own/otp/bin/cerl -debug"},
+ Prog = [],
+ {ok, Node} = start_node(Config, NodeOpts, Prog),
Self = self(),
Ref = make_ref(),
spawn_link(Node,
@@ -300,19 +305,35 @@ wait_for_memory_deallocations() ->
end.
print_stats(migration) ->
- {Btot,Ctot} = lists:foldl(fun({instance,Inr,Istats}, {Bacc,Cacc}) ->
- {mbcs,MBCS} = lists:keyfind(mbcs, 1, Istats),
- Btup = lists:keyfind(blocks, 1, MBCS),
- Ctup = lists:keyfind(carriers, 1, MBCS),
- io:format("{instance,~p,~p,~p}\n", [Inr, Btup, Ctup]),
- {tuple_add(Bacc,Btup),tuple_add(Cacc,Ctup)};
- (_, Acc) -> Acc
- end,
- {{blocks,0,0,0},{carriers,0,0,0}},
- erlang:system_info({allocator,test_alloc})),
-
+ IFun = fun({instance,Inr,Istats}, {Bacc,Cacc,Pacc}) ->
+ {mbcs,MBCS} = lists:keyfind(mbcs, 1, Istats),
+ Btup = lists:keyfind(blocks, 1, MBCS),
+ Ctup = lists:keyfind(carriers, 1, MBCS),
+
+ Ptup = case lists:keyfind(mbcs_pool, 1, Istats) of
+ {mbcs_pool,POOL} ->
+ {blocks, Bpool} = lists:keyfind(blocks, 1, POOL),
+ {carriers, Cpool} = lists:keyfind(carriers, 1, POOL),
+ {pool, Bpool, Cpool};
+ false ->
+ {pool, 0, 0}
+ end,
+ io:format("{instance,~p,~p,~p,~p}}\n",
+ [Inr, Btup, Ctup, Ptup]),
+ {tuple_add(Bacc,Btup),tuple_add(Cacc,Ctup),
+ tuple_add(Pacc,Ptup)};
+ (_, Acc) -> Acc
+ end,
+
+ {Btot,Ctot,Ptot} = lists:foldl(IFun,
+ {{blocks,0,0,0},{carriers,0,0,0},{pool,0,0}},
+ erlang:system_info({allocator,test_alloc})),
+
+ {pool, PBtot, PCtot} = Ptot,
io:format("Number of blocks : ~p\n", [Btot]),
- io:format("Number of carriers: ~p\n", [Ctot]);
+ io:format("Number of carriers: ~p\n", [Ctot]),
+ io:format("Number of pooled blocks : ~p\n", [PBtot]),
+ io:format("Number of pooled carriers: ~p\n", [PCtot]);
print_stats(_) -> ok.
tuple_add(T1, T2) ->
@@ -409,13 +430,13 @@ handle_result(_State, Result0) ->
continue
end.
-start_node(Config, Opts) when is_list(Config), is_list(Opts) ->
+start_node(Config, Opts, Prog) when is_list(Config), is_list(Opts) ->
case proplists:get_value(debug,Config) of
true -> {ok, node()};
- _ -> start_node_1(Config, Opts)
+ _ -> start_node_1(Config, Opts, Prog)
end.
-start_node_1(Config, Opts) ->
+start_node_1(Config, Opts, Prog) ->
Pa = filename:dirname(code:which(?MODULE)),
Name = list_to_atom(atom_to_list(?MODULE)
++ "-"
@@ -424,7 +445,11 @@ start_node_1(Config, Opts) ->
++ integer_to_list(erlang:system_time(second))
++ "-"
++ integer_to_list(erlang:unique_integer([positive]))),
- test_server:start_node(Name, slave, [{args, Opts++" -pa "++Pa}]).
+ ErlArg = case Prog of
+ [] -> [];
+ _ -> [{erl,[Prog]}]
+ end,
+ test_server:start_node(Name, slave, [{args, Opts++" -pa "++Pa} | ErlArg]).
stop_node(Node) when Node =:= node() -> ok;
stop_node(Node) ->
diff --git a/erts/emulator/test/alloc_SUITE_data/allocator_test.h b/erts/emulator/test/alloc_SUITE_data/allocator_test.h
index 97ee58cdad..5272f86c98 100644
--- a/erts/emulator/test/alloc_SUITE_data/allocator_test.h
+++ b/erts/emulator/test/alloc_SUITE_data/allocator_test.h
@@ -156,7 +156,8 @@ typedef void* erts_cond;
#define IS_SMP_ENABLED ((int) ALC_TEST0(0xf13))
#define ALLOC_TEST(S) ((void*) ALC_TEST1(0xf14, (S)))
#define FREE_TEST(P) ((void) ALC_TEST1(0xf15, (P)))
-#define SET_TEST_MBC_USER_HEADER(SZ,CMBC,DMBC) ((int)ALC_TEST3(0xf16, (SZ), (CMBC), (DMBC)))
-#define GET_TEST_MBC_SIZE() ((int) ALC_TEST0(0xf17))
+#define REALLOC_TEST(P,S) ((void*) ALC_TEST2(0xf16, (P), (S)))
+#define SET_TEST_MBC_USER_HEADER(SZ,CMBC,DMBC) ((int)ALC_TEST3(0xf17, (SZ), (CMBC), (DMBC)))
+#define GET_TEST_MBC_SIZE() ((int) ALC_TEST0(0xf18))
#endif
diff --git a/erts/emulator/test/alloc_SUITE_data/migration.c b/erts/emulator/test/alloc_SUITE_data/migration.c
index b9a4de03b3..1d974225fc 100644
--- a/erts/emulator/test/alloc_SUITE_data/migration.c
+++ b/erts/emulator/test/alloc_SUITE_data/migration.c
@@ -223,6 +223,42 @@ static int rand_int(MigrationState* state, int low, int high)
return low + (x % (high+1-low));
}
+enum Operation
+{
+ ALLOCATE_OP,
+ FREE_OP,
+ REALLOC_OP,
+ CLEANUP_OP
+};
+
+static enum Operation rand_op(MigrationState* state)
+{
+ int r = rand_int(state, 1, 100);
+ switch (state->phase) {
+ case GROWING:
+ FATAL_ASSERT(state->nblocks < state->max_nblocks);
+ if (r > 10 || state->nblocks == 0)
+ return ALLOCATE_OP;
+ else if (r > 5)
+ return FREE_OP;
+ else
+ return REALLOC_OP;
+
+ case SHRINKING:
+ FATAL_ASSERT(state->nblocks > 0);
+ if (r > 10 || state->nblocks == state->max_nblocks)
+ return FREE_OP;
+ else if (r > 5)
+ return ALLOCATE_OP;
+ else
+ return REALLOC_OP;
+
+ case CLEANUP:
+ return CLEANUP_OP;
+ default:
+ FATAL_ASSERT(!"Invalid op phase");
+ }
+}
static void do_cleanup(TestCaseState_t *tcs, MigrationState* state)
{
@@ -275,53 +311,75 @@ testcase_run(TestCaseState_t *tcs)
state->goal_nblocks = rand_int(state, 1, state->max_nblocks);
}
- switch (state->phase) {
- case GROWING: {
+ switch (rand_op(state)) {
+ case ALLOCATE_OP: {
MyBlock* p;
FATAL_ASSERT(!state->blockv[state->nblocks]);
- p = ALLOC_TEST(rand_int(state, state->block_size/2, state->block_size));
+ p = ALLOC_TEST(rand_int(state, state->block_size/2, state->block_size));
FATAL_ASSERT(p);
add_block(p, state);
- state->blockv[state->nblocks] = p;
- if (++state->nblocks >= state->goal_nblocks) {
- /*testcase_printf(tcs, "%d: Grown to %d blocks", tcs->thr_nr, state->nblocks);*/
- state->phase = SHRINKING;
- state->goal_nblocks = rand_int(state, 0, state->goal_nblocks-1);
- }
- else
- FATAL_ASSERT(!state->blockv[state->nblocks]);
+ state->blockv[state->nblocks++] = p;
break;
}
- case SHRINKING: {
+ case FREE_OP: {
int ix = rand_int(state, 0, state->nblocks-1);
FATAL_ASSERT(state->blockv[ix]);
remove_block(state->blockv[ix]);
FREE_TEST(state->blockv[ix]);
state->blockv[ix] = state->blockv[--state->nblocks];
state->blockv[state->nblocks] = NULL;
-
- if (state->nblocks <= state->goal_nblocks) {
- /*testcase_printf(tcs, "%d: Shrunk to %d blocks", tcs->thr_nr, state->nblocks);*/
- if (++state->round >= MAX_ROUNDS) {
- state->phase = CLEANUP;
- } else {
- state->phase = GROWING;
- state->goal_nblocks = rand_int(state, state->goal_nblocks+1, state->max_nblocks);
- }
- }
break;
}
+ case REALLOC_OP: {
+ int ix = rand_int(state, 0, state->nblocks-1);
+ MyBlock* p;
+ FATAL_ASSERT(state->blockv[ix]);
+ remove_block(state->blockv[ix]);
+ p = REALLOC_TEST(state->blockv[ix], rand_int(state, state->block_size/2, state->block_size));
+ FATAL_ASSERT(p);
+ add_block(p, state);
+ state->blockv[ix] = p;
+ break;
+ }
+ case CLEANUP_OP:
+ do_cleanup(tcs, state);
+ break;
+ default:
+ FATAL_ASSERT(!"Invalid operation");
+ }
+
+ switch (state->phase) {
+ case GROWING: {
+ if (state->nblocks >= state->goal_nblocks) {
+ /*testcase_printf(tcs, "%d: Grown to %d blocks", tcs->thr_nr, state->nblocks);*/
+ state->phase = SHRINKING;
+ state->goal_nblocks = rand_int(state, 0, state->goal_nblocks-1);
+ }
+ else
+ FATAL_ASSERT(!state->blockv[state->nblocks]);
+ break;
+ }
+ case SHRINKING: {
+ if (state->nblocks <= state->goal_nblocks) {
+ /*testcase_printf(tcs, "%d: Shrunk to %d blocks", tcs->thr_nr, state->nblocks);*/
+ if (++state->round >= MAX_ROUNDS) {
+ state->phase = CLEANUP;
+ } else {
+ state->phase = GROWING;
+ state->goal_nblocks = rand_int(state, state->goal_nblocks+1, state->max_nblocks);
+ }
+ }
+ break;
+ }
case CLEANUP:
- do_cleanup(tcs, state);
- break;
+ case DONE:
+ break;
default:
FATAL_ASSERT(!"Invalid phase");
}
- if (state->phase == DONE) {
- }
- else {
+ if (state->phase != DONE) {
testcase_continue(tcs);
}
}
diff --git a/erts/preloaded/ebin/erlang.beam b/erts/preloaded/ebin/erlang.beam
index 7a2d9e5a81..24d3d8fc84 100644
--- a/erts/preloaded/ebin/erlang.beam
+++ b/erts/preloaded/ebin/erlang.beam
Binary files differ
diff --git a/erts/preloaded/src/erlang.erl b/erts/preloaded/src/erlang.erl
index 6030f7312d..1bd3d14826 100644
--- a/erts/preloaded/src/erlang.erl
+++ b/erts/preloaded/src/erlang.erl
@@ -3717,15 +3717,14 @@ memory_is_supported() ->
get_blocks_size([{blocks_size, Sz, _, _} | Rest], Acc) ->
get_blocks_size(Rest, Acc+Sz);
-get_blocks_size([{_, _, _, _} | Rest], Acc) ->
- get_blocks_size(Rest, Acc);
get_blocks_size([{blocks_size, Sz} | Rest], Acc) ->
get_blocks_size(Rest, Acc+Sz);
-get_blocks_size([{_, _} | Rest], Acc) ->
+get_blocks_size([_ | Rest], Acc) ->
get_blocks_size(Rest, Acc);
get_blocks_size([], Acc) ->
Acc.
+
blocks_size([{Carriers, SizeList} | Rest], Acc) when Carriers == mbcs;
Carriers == mbcs_pool;
Carriers == sbcs ->
diff --git a/lib/stdlib/test/ets_SUITE.erl b/lib/stdlib/test/ets_SUITE.erl
index 00e02a06cc..a58497523a 100644
--- a/lib/stdlib/test/ets_SUITE.erl
+++ b/lib/stdlib/test/ets_SUITE.erl
@@ -5716,25 +5716,39 @@ etsmem() ->
end},
{Mem,AllTabs}.
-verify_etsmem({MemInfo,AllTabs}) ->
+
+verify_etsmem(MI) ->
wait_for_test_procs(),
+ verify_etsmem(MI, 1).
+
+verify_etsmem({MemInfo,AllTabs}, Try) ->
case etsmem() of
{MemInfo,_} ->
io:format("Ets mem info: ~p", [MemInfo]),
- case MemInfo of
- {ErlMem,EtsAlloc} when ErlMem == notsup; EtsAlloc == undefined ->
+ case {MemInfo, Try} of
+ {{ErlMem,EtsAlloc},_} when ErlMem == notsup; EtsAlloc == undefined ->
%% Use 'erl +Mea max' to do more complete memory leak testing.
{comment,"Incomplete or no mem leak testing"};
- _ ->
- ok
+ {_, 1} ->
+ ok;
+ _ ->
+ {comment, "Transient memory discrepancy"}
end;
{MemInfo2, AllTabs2} ->
io:format("Expected: ~p", [MemInfo]),
io:format("Actual: ~p", [MemInfo2]),
io:format("Changed tables before: ~p\n",[AllTabs -- AllTabs2]),
io:format("Changed tables after: ~p\n", [AllTabs2 -- AllTabs]),
- ets_test_spawn_logger ! {failed_memcheck, get('__ETS_TEST_CASE__')},
- {comment, "Failed memory check"}
+ case Try < 2 of
+ true ->
+ io:format("\nThis discrepancy could be caused by an "
+ "inconsistent memory \"snapshot\""
+ "\nTry again...\n", []),
+ verify_etsmem({MemInfo, AllTabs}, Try+1);
+ false ->
+ ets_test_spawn_logger ! {failed_memcheck, get('__ETS_TEST_CASE__')},
+ {comment, "Failed memory check"}
+ end
end.