aboutsummaryrefslogtreecommitdiffstats
path: root/erts
diff options
context:
space:
mode:
Diffstat (limited to 'erts')
-rw-r--r--erts/doc/src/erts_alloc.xml6
-rw-r--r--erts/emulator/Makefile.in4
-rw-r--r--erts/emulator/beam/beam_bp.c16
-rw-r--r--erts/emulator/beam/erl_afit_alloc.c11
-rw-r--r--erts/emulator/beam/erl_afit_alloc.h5
-rw-r--r--erts/emulator/beam/erl_alloc.c22
-rw-r--r--erts/emulator/beam/erl_alloc_util.c920
-rw-r--r--erts/emulator/beam/erl_alloc_util.h55
-rw-r--r--erts/emulator/beam/erl_ao_firstfit_alloc.c16
-rw-r--r--erts/emulator/beam/erl_bestfit_alloc.c45
-rw-r--r--erts/emulator/beam/erl_gc.c46
-rw-r--r--erts/emulator/beam/erl_goodfit_alloc.c13
-rw-r--r--erts/emulator/beam/erl_trace.c6
-rw-r--r--erts/emulator/sys/common/erl_mseg.c1134
-rw-r--r--erts/emulator/sys/common/erl_mseg.h31
-rw-r--r--erts/emulator/test/alloc_SUITE_data/allocator_test.h8
-rw-r--r--erts/emulator/test/alloc_SUITE_data/basic.c2
-rw-r--r--erts/emulator/test/alloc_SUITE_data/bucket_mask.c65
-rw-r--r--erts/emulator/test/alloc_SUITE_data/coalesce.c6
-rw-r--r--erts/emulator/test/alloc_SUITE_data/mseg_clear_cache.c4
-rw-r--r--erts/emulator/test/beam_SUITE.erl77
-rw-r--r--erts/emulator/test/process_SUITE.erl2047
22 files changed, 2344 insertions, 2195 deletions
diff --git a/erts/doc/src/erts_alloc.xml b/erts/doc/src/erts_alloc.xml
index ec5e7d9b74..87d6682328 100644
--- a/erts/doc/src/erts_alloc.xml
+++ b/erts/doc/src/erts_alloc.xml
@@ -341,7 +341,8 @@
Largest (<c>mseg_alloc</c>) multiblock carrier size (in
kilobytes). See <seealso marker="#mseg_mbc_sizes">the description
on how sizes for mseg_alloc multiblock carriers are decided</seealso>
- in "the <c>alloc_util</c> framework" section.</item>
+ in "the <c>alloc_util</c> framework" section. On 32-bit Unix style OS
+ this limit can not be set higher than 128 megabyte.</item>
<tag><marker id="M_mbcgs"><c><![CDATA[+M<S>mbcgs <ratio>]]></c></marker></tag>
<item>
(<c>mseg_alloc</c>) multiblock carrier growth stages. See
@@ -413,7 +414,8 @@
Singleblock carrier threshold. Blocks larger than this
threshold will be placed in singleblock carriers. Blocks
smaller than this threshold will be placed in multiblock
- carriers.</item>
+ carriers. On 32-bit Unix style OS this threshold can not be set higher
+ than 8 megabytes.</item>
<tag><marker id="M_sbmbcs"><c><![CDATA[+M<S>sbmbcs <size>]]></c></marker></tag>
<item>
Small block multiblock carrier size (in bytes). Memory blocks smaller
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index 67320697ef..89c948cc00 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -61,7 +61,7 @@ else
ifeq ($(TYPE),purify)
PURIFY = purify $(PURIFY_BUILD_OPTIONS)
TYPEMARKER = .purify
-TYPE_FLAGS = $(DEBUG_CFLAGS) -DPURIFY -DNO_JUMP_TABLE -DERTS_MSEG_FAKE_SEGMENTS
+TYPE_FLAGS = $(DEBUG_CFLAGS) -DPURIFY -DNO_JUMP_TABLE
ENABLE_ALLOC_TYPE_VARS += purify
else
@@ -92,7 +92,7 @@ else
ifeq ($(TYPE),valgrind)
PURIFY =
TYPEMARKER = .valgrind
-TYPE_FLAGS = $(DEBUG_CFLAGS) -DVALGRIND -DNO_JUMP_TABLE -DERTS_MSEG_FAKE_SEGMENTS
+TYPE_FLAGS = $(DEBUG_CFLAGS) -DVALGRIND -DNO_JUMP_TABLE
ENABLE_ALLOC_TYPE_VARS += valgrind
else
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 2168d410b5..c1e11f6448 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -65,10 +65,10 @@
#define ERTS_BPF_ALL 0xFF
-extern Eterm beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
-extern Eterm beam_return_trace[1]; /* OpCode(i_return_trace) */
-extern Eterm beam_exception_trace[1]; /* OpCode(i_exception_trace) */
-extern Eterm beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
+extern BeamInstr beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
+extern BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */
+extern BeamInstr beam_exception_trace[1]; /* OpCode(i_exception_trace) */
+extern BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
erts_smp_atomic32_t erts_active_bp_index;
erts_smp_atomic32_t erts_staging_bp_index;
@@ -161,7 +161,7 @@ erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified)
for (current = 0; current < num_modules; current++) {
BeamInstr** code_base = (BeamInstr **) module[current]->curr.code;
BeamInstr* code;
- Uint num_functions = (Uint) code_base[MI_NUM_FUNCTIONS];
+ Uint num_functions = (Uint)(UWord) code_base[MI_NUM_FUNCTIONS];
Uint fi;
if (specified > 0) {
@@ -172,7 +172,7 @@ erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified)
}
for (fi = 0; fi < num_functions; fi++) {
- Eterm* pc;
+ BeamInstr* pc;
int wi;
code = code_base[MI_FUNCTIONS+fi];
@@ -555,7 +555,7 @@ erts_clear_module_break(Module *modp) {
if (code_base == NULL) {
return 0;
}
- n = (Uint) code_base[MI_NUM_FUNCTIONS];
+ n = (Uint)(UWord) code_base[MI_NUM_FUNCTIONS];
for (i = 0; i < n; ++i) {
BeamInstr* pc;
@@ -919,7 +919,7 @@ do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
E -= 1;
ASSERT(c_p->htop <= E && E <= c_p->hend);
E[0] = make_cp(c_p->cp);
- c_p->cp = (BeamInstr *) beam_return_to_trace;
+ c_p->cp = beam_return_to_trace;
}
if (flags & MATCH_SET_RX_TRACE) {
E -= 3;
diff --git a/erts/emulator/beam/erl_afit_alloc.c b/erts/emulator/beam/erl_afit_alloc.c
index 570cc59be2..306b32691c 100644
--- a/erts/emulator/beam/erl_afit_alloc.c
+++ b/erts/emulator/beam/erl_afit_alloc.c
@@ -37,6 +37,12 @@
#define GET_ERL_AF_ALLOC_IMPL
#include "erl_afit_alloc.h"
+struct AFFreeBlock_t_ {
+ Block_t block_head;
+ AFFreeBlock_t *prev;
+ AFFreeBlock_t *next;
+};
+#define AF_BLK_SZ(B) MBC_FBLK_SZ(&(B)->block_head)
#define MIN_MBC_SZ (16*1024)
#define MIN_MBC_FIRST_FREE_SZ (4*1024)
@@ -80,7 +86,6 @@ erts_afalc_start(AFAllctr_t *afallctr,
init->sbmbct = 0; /* Small mbc not supported by afit */
- allctr->mbc_header_size = sizeof(Carrier_t);
allctr->min_mbc_size = MIN_MBC_SZ;
allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ;
allctr->min_block_size = sizeof(AFFreeBlock_t);
@@ -118,7 +123,7 @@ get_free_block(Allctr_t *allctr, Uint size, Block_t *cand_blk, Uint cand_size,
ASSERT(!cand_blk || cand_size >= size);
- if (afallctr->free_list && BLK_SZ(afallctr->free_list) >= size) {
+ if (afallctr->free_list && AF_BLK_SZ(afallctr->free_list) >= size) {
AFFreeBlock_t *res = afallctr->free_list;
afallctr->free_list = res->next;
if (res->next)
@@ -135,7 +140,7 @@ link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
AFFreeBlock_t *blk = (AFFreeBlock_t *) block;
AFAllctr_t *afallctr = (AFAllctr_t *) allctr;
- if (afallctr->free_list && BLK_SZ(afallctr->free_list) > BLK_SZ(blk)) {
+ if (afallctr->free_list && AF_BLK_SZ(afallctr->free_list) > AF_BLK_SZ(blk)) {
blk->next = afallctr->free_list->next;
blk->prev = afallctr->free_list;
afallctr->free_list->next = blk;
diff --git a/erts/emulator/beam/erl_afit_alloc.h b/erts/emulator/beam/erl_afit_alloc.h
index ea408a7194..87caccac20 100644
--- a/erts/emulator/beam/erl_afit_alloc.h
+++ b/erts/emulator/beam/erl_afit_alloc.h
@@ -49,11 +49,6 @@ Allctr_t *erts_afalc_start(AFAllctr_t *, AFAllctrInit_t *, AllctrInit_t *);
#include "erl_alloc_util.h"
typedef struct AFFreeBlock_t_ AFFreeBlock_t;
-struct AFFreeBlock_t_ {
- Block_t block_head;
- AFFreeBlock_t *prev;
- AFFreeBlock_t *next;
-};
struct AFAllctr_t_ {
Allctr_t allctr; /* Has to be first! */
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 04d807b780..5da8c69c03 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -310,9 +310,9 @@ set_default_ll_alloc_opts(struct au_init *ip)
ip->init.util.name_prefix = "ll_";
ip->init.util.alloc_no = ERTS_ALC_A_LONG_LIVED;
#ifndef SMALL_MEMORY
- ip->init.util.mmbcs = 2*1024*1024; /* Main carrier size */
+ ip->init.util.mmbcs = 2*1024*1024 - 40; /* Main carrier size */
#else
- ip->init.util.mmbcs = 1*1024*1024; /* Main carrier size */
+ ip->init.util.mmbcs = 1*1024*1024 - 40; /* Main carrier size */
#endif
ip->init.util.ts = ERTS_ALC_MTA_LONG_LIVED;
ip->init.util.asbcst = 0;
@@ -1173,6 +1173,11 @@ handle_au_arg(struct au_init *auip,
break;
case 'e':
auip->enable = get_bool_value(sub_param+1, argv, ip);
+#if !HAVE_ERTS_SBMBC
+ if (auip->init.util.alloc_no == ERTS_ALC_A_SBMBC) {
+ auip->enable = 0;
+ }
+#endif
break;
case 'l':
if (has_prefix("lmbcs", sub_param)) {
@@ -1233,10 +1238,16 @@ handle_au_arg(struct au_init *auip,
auip->init.util.sbct = get_kb_value(sub_param + 4, argv, ip);
}
else if (has_prefix("sbmbcs", sub_param)) {
- auip->init.util.sbmbcs = get_byte_value(sub_param + 6, argv, ip);
+#if HAVE_ERTS_SBMBC
+ auip->init.util.sbmbcs =
+#endif
+ get_byte_value(sub_param + 6, argv, ip);
}
else if (has_prefix("sbmbct", sub_param)) {
- auip->init.util.sbmbct = get_byte_value(sub_param + 6, argv, ip);
+#if HAVE_ERTS_SBMBC
+ auip->init.util.sbmbct =
+#endif
+ get_byte_value(sub_param + 6, argv, ip);
}
else if (has_prefix("smbcs", sub_param)) {
auip->default_.smbcs = 0;
@@ -1403,6 +1414,9 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
else if (strcmp("max", arg) == 0) {
for (a = 0; a < aui_sz; a++)
aui[a]->enable = 1;
+#if !HAVE_ERTS_SBMBC
+ init->sbmbc_alloc.enable = 0;
+#endif
}
else if (strcmp("config", arg) == 0) {
init->erts_alloc_config = 1;
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index 97ba306a79..3d6761339b 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -78,10 +78,12 @@ int erts_have_sbmbc_alloc;
#if HAVE_ERTS_MSEG
-#define INV_MSEG_UNIT_MASK ((UWord) (mseg_unit_size - 1))
-#define MSEG_UNIT_MASK (~INV_MSEG_UNIT_MASK)
+#define MSEG_UNIT_SHIFT MSEG_ALIGN_BITS
+#define MSEG_UNIT_SZ (1 << MSEG_UNIT_SHIFT)
+#define MSEG_UNIT_MASK ((~(UWord)0) << MSEG_UNIT_SHIFT)
+
#define MSEG_UNIT_FLOOR(X) ((X) & MSEG_UNIT_MASK)
-#define MSEG_UNIT_CEILING(X) MSEG_UNIT_FLOOR((X) + INV_MSEG_UNIT_MASK)
+#define MSEG_UNIT_CEILING(X) MSEG_UNIT_FLOOR((X) + ~MSEG_UNIT_MASK)
#endif
@@ -104,7 +106,6 @@ int erts_have_sbmbc_alloc;
static Uint sys_alloc_carrier_size;
#if HAVE_ERTS_MSEG
static Uint max_mseg_carriers;
-static Uint mseg_unit_size;
#endif
#define ONE_GIGA (1000000000)
@@ -117,16 +118,47 @@ static Uint mseg_unit_size;
? ((CC).giga_no--, (CC).no = ONE_GIGA - 1) \
: (CC).no--)
-/* ... */
+/* Multi block carrier (MBC) memory layout in R16:
+
+Empty MBC:
+[Carrier_t|pad|Block_t L0T|fhdr| free... ]
+
+MBC after allocating first block:
+[Carrier_t|pad|Block_t 000| udata |pad|Block_t L0T|fhdr| free... ]
+
+MBC after allocating second block:
+[Carrier_t|pad|Block_t 000| udata |pad|Block_t 000| udata |pad|Block_t L0T|fhdr| free... ]
+
+MBC after deallocating first block:
+[Carrier_t|pad|Block_t 00T|fhdr| free |FreeBlkFtr_t|Block_t 0P0| udata |pad|Block_t L0T|fhdr| free... ]
+
+
+ udata = Allocated user data
+ pad = Padding to ensure correct alignment for user data
+ fhdr = Allocator specific header to keep track of free block
+ free = Unused free memory
+ T = This block is free (THIS_FREE_BLK_HDR_FLG)
+ P = Previous block is free (PREV_FREE_BLK_HDR_FLG)
+ L = Last block in carrier (LAST_BLK_HDR_FLG)
+*/
+
+/* Single block carrier (SBC):
+[Carrier_t|pad|Block_t 111| udata... ]
+*/
+
/* Blocks ... */
-#define SBC_BLK_FTR_FLG (((UWord) 1) << 0)
+#define UNUSED0_BLK_FTR_FLG (((UWord) 1) << 0)
#define UNUSED1_BLK_FTR_FLG (((UWord) 1) << 1)
#define UNUSED2_BLK_FTR_FLG (((UWord) 1) << 2)
-#define ABLK_HDR_SZ (sizeof(Block_t))
-#define FBLK_FTR_SZ (sizeof(UWord))
+#if MBC_ABLK_OFFSET_BITS
+# define ABLK_HDR_SZ (offsetof(Block_t,u))
+#else
+# define ABLK_HDR_SZ (sizeof(Block_t))
+#endif
+#define FBLK_FTR_SZ (sizeof(FreeBlkFtr_t))
#define UMEMSZ2BLKSZ(AP, SZ) \
(ABLK_HDR_SZ + (SZ) <= (AP)->min_block_size \
@@ -136,88 +168,181 @@ static Uint mseg_unit_size;
#define UMEM2BLK(P) ((Block_t *) (((char *) (P)) - ABLK_HDR_SZ))
#define BLK2UMEM(P) ((void *) (((char *) (P)) + ABLK_HDR_SZ))
-#define PREV_BLK_SZ(B) \
- ((UWord) (*(((UWord *) (B)) - 1) & SZ_MASK))
+#define PREV_BLK_SZ(B) ((UWord) (((FreeBlkFtr_t *)(B))[-1]))
#define SET_BLK_SZ_FTR(B, SZ) \
- (*((UWord *) (((char *) (B)) + (SZ) - sizeof(UWord))) = (SZ))
+ (((FreeBlkFtr_t *) (((char *) (B)) + (SZ)))[-1] = (SZ))
#define THIS_FREE_BLK_HDR_FLG (((UWord) 1) << 0)
#define PREV_FREE_BLK_HDR_FLG (((UWord) 1) << 1)
#define LAST_BLK_HDR_FLG (((UWord) 1) << 2)
-#define SET_BLK_SZ(B, SZ) \
+/* Special flag combo for (allocated) SBC blocks
+*/
+#define SBC_BLK_HDR_FLG (THIS_FREE_BLK_HDR_FLG | PREV_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG)
+
+#define SET_MBC_ABLK_SZ(B, SZ) \
(ASSERT(((SZ) & FLG_MASK) == 0), \
- (*((Block_t *) (B)) = ((*((Block_t *) (B)) & FLG_MASK) | (SZ))))
-#define SET_BLK_FREE(B) \
- (*((Block_t *) (B)) |= THIS_FREE_BLK_HDR_FLG)
-#define SET_BLK_ALLOCED(B) \
- (*((Block_t *) (B)) &= ~THIS_FREE_BLK_HDR_FLG)
-#define SET_PREV_BLK_FREE(B) \
- (*((Block_t *) (B)) |= PREV_FREE_BLK_HDR_FLG)
+ (B)->bhdr = (((B)->bhdr) & ~MBC_ABLK_SZ_MASK) | (SZ))
+#define SET_MBC_FBLK_SZ(B, SZ) \
+ (ASSERT(((SZ) & FLG_MASK) == 0), \
+ (B)->bhdr = (((B)->bhdr) & ~MBC_FBLK_SZ_MASK) | (SZ))
+#define SET_SBC_BLK_SZ(B, SZ) \
+ (ASSERT(((SZ) & FLG_MASK) == 0), \
+ (B)->bhdr = (((B)->bhdr) & ~SBC_BLK_SZ_MASK) | (SZ))
+#define SET_PREV_BLK_FREE(AP,B) \
+ (ASSERT(!IS_MBC_FIRST_BLK(AP,B)), \
+ ASSERT(!IS_FREE_BLK(B)), \
+ (B)->bhdr |= PREV_FREE_BLK_HDR_FLG)
#define SET_PREV_BLK_ALLOCED(B) \
- (*((Block_t *) (B)) &= ~PREV_FREE_BLK_HDR_FLG)
+ ((B)->bhdr &= ~PREV_FREE_BLK_HDR_FLG)
#define SET_LAST_BLK(B) \
- (*((Block_t *) (B)) |= LAST_BLK_HDR_FLG)
+ ((B)->bhdr |= LAST_BLK_HDR_FLG)
#define SET_NOT_LAST_BLK(B) \
- (*((Block_t *) (B)) &= ~LAST_BLK_HDR_FLG)
+ ((B)->bhdr &= ~LAST_BLK_HDR_FLG)
#define SBH_THIS_FREE THIS_FREE_BLK_HDR_FLG
-#define SBH_THIS_ALLOCED ((UWord) 0)
#define SBH_PREV_FREE PREV_FREE_BLK_HDR_FLG
-#define SBH_PREV_ALLOCED ((UWord) 0)
#define SBH_LAST_BLK LAST_BLK_HDR_FLG
-#define SBH_NOT_LAST_BLK ((UWord) 0)
-#define SET_BLK_HDR(B, Sz, F) \
- (ASSERT(((Sz) & FLG_MASK) == 0), *((Block_t *) (B)) = ((Sz) | (F)))
+
+#if MBC_ABLK_OFFSET_BITS
+
+# define MBC_SZ_MAX_LIMIT ((((UWord)1 << MBC_ABLK_OFFSET_BITS) - 1) << MSEG_ALIGN_BITS)
+
+# define BLK_CARRIER_OFFSET(B, C) (((char*)(B) - (char*)(C)) >> MSEG_UNIT_SHIFT)
+
+# define SET_MBC_ABLK_HDR(B, Sz, F, C) \
+ (ASSERT(((Sz) & ~MBC_ABLK_SZ_MASK) == 0), \
+ ASSERT(!((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG))), \
+ (B)->bhdr = ((Sz) | (F) | (BLK_CARRIER_OFFSET(B,C) << MBC_ABLK_OFFSET_SHIFT)))
+
+# define SET_MBC_FBLK_HDR(B, Sz, F, C) \
+ (ASSERT(((Sz) & ~MBC_FBLK_SZ_MASK) == 0), \
+ ASSERT(((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG|PREV_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \
+ (B)->bhdr = ((Sz) | (F)), \
+ (B)->u.carrier = (C))
+
+# define ABLK_TO_MBC(B) \
+ (ASSERT(IS_MBC_BLK(B) && IS_ALLOCED_BLK(B)), \
+ (Carrier_t*)((MSEG_UNIT_FLOOR((UWord)(B)) - \
+ (((B)->bhdr >> MBC_ABLK_OFFSET_SHIFT) << MSEG_UNIT_SHIFT))))
+
+# define FBLK_TO_MBC(B) \
+ (ASSERT(IS_MBC_BLK(B) && IS_FREE_BLK(B)), \
+ (B)->u.carrier)
+
+# define BLK_TO_MBC(B) (IS_FREE_BLK(B) ? FBLK_TO_MBC(B) : ABLK_TO_MBC(B))
+
+# define IS_MBC_FIRST_ABLK(AP,B) \
+ ((((UWord)(B) & ~MSEG_UNIT_MASK) == MBC_HEADER_SIZE(AP)) \
+ && ((B)->bhdr & MBC_ABLK_OFFSET_MASK) == 0)
+
+# define IS_MBC_FIRST_FBLK(AP,B) \
+ ((char*)(B) == (char*)((B)->u.carrier) + MBC_HEADER_SIZE(AP))
+
+# define IS_MBC_FIRST_BLK(AP,B) \
+ (IS_FREE_BLK(B) ? IS_MBC_FIRST_FBLK(AP,B) : IS_MBC_FIRST_ABLK(AP,B))
+
+# define SET_BLK_FREE(B) \
+ (ASSERT(!IS_PREV_BLK_FREE(B)), \
+ (B)->u.carrier = ABLK_TO_MBC(B), \
+ (B)->bhdr |= THIS_FREE_BLK_HDR_FLG, \
+ (B)->bhdr &= (MBC_ABLK_SZ_MASK|FLG_MASK))
+
+# define SET_BLK_ALLOCED(B) \
+ (ASSERT(((B)->bhdr & (MBC_ABLK_OFFSET_MASK|THIS_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \
+ (B)->bhdr &= ~THIS_FREE_BLK_HDR_FLG, \
+ (B)->bhdr |= (BLK_CARRIER_OFFSET(B,(B)->u.carrier) << MBC_ABLK_OFFSET_SHIFT))
+
+#else /* !MBC_ABLK_OFFSET_BITS */
+
+# define MBC_SZ_MAX_LIMIT ((UWord)~0)
+
+# define SET_MBC_ABLK_HDR(B, Sz, F, C) \
+ (ASSERT(((Sz) & FLG_MASK) == 0), \
+ ASSERT(!((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG))), \
+ ASSERT((UWord)(F) < SBC_BLK_HDR_FLG), \
+ (B)->bhdr = ((Sz) | (F)), \
+ (B)->carrier = (C))
+
+# define SET_MBC_FBLK_HDR(B, Sz, F, C) \
+ (ASSERT(((Sz) & FLG_MASK) == 0), \
+ ASSERT(((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG|PREV_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \
+ (B)->bhdr = ((Sz) | (F)), \
+ (B)->carrier = (C))
+
+# define BLK_TO_MBC(B) ((B)->carrier)
+# define ABLK_TO_MBC(B) BLK_TO_MBC(B)
+# define FBLK_TO_MBC(B) BLK_TO_MBC(B)
+
+# define IS_MBC_FIRST_BLK(AP,B) \
+ ((char*)(B) == (char*)((B)->carrier) + MBC_HEADER_SIZE(AP))
+# define IS_MBC_FIRST_ABLK(AP,B) IS_MBC_FIRST_BLK(AP,B)
+# define IS_MBC_FIRST_FBLK(AP,B) IS_MBC_FIRST_BLK(AP,B)
+
+# define SET_BLK_FREE(B) \
+ (ASSERT(!IS_PREV_BLK_FREE(B)), \
+ (B)->bhdr |= THIS_FREE_BLK_HDR_FLG)
+
+# define SET_BLK_ALLOCED(B) \
+ ((B)->bhdr &= ~THIS_FREE_BLK_HDR_FLG)
+
+#endif /* !MBC_ABLK_OFFSET_BITS */
+
+#define SET_SBC_BLK_HDR(B, Sz) \
+ (ASSERT(((Sz) & FLG_MASK) == 0), (B)->bhdr = ((Sz) | (SBC_BLK_HDR_FLG)))
+
#define BLK_UMEM_SZ(B) \
(BLK_SZ(B) - (ABLK_HDR_SZ))
#define IS_PREV_BLK_FREE(B) \
- (*((Block_t *) (B)) & PREV_FREE_BLK_HDR_FLG)
+ ((B)->bhdr & PREV_FREE_BLK_HDR_FLG)
#define IS_PREV_BLK_ALLOCED(B) \
(!IS_PREV_BLK_FREE((B)))
#define IS_FREE_BLK(B) \
- (*((Block_t *) (B)) & THIS_FREE_BLK_HDR_FLG)
+ (ASSERT(!IS_SBC_BLK(B)), (B)->bhdr & THIS_FREE_BLK_HDR_FLG)
#define IS_ALLOCED_BLK(B) \
(!IS_FREE_BLK((B)))
#define IS_LAST_BLK(B) \
- (*((Block_t *) (B)) & LAST_BLK_HDR_FLG)
+ ((B)->bhdr & LAST_BLK_HDR_FLG)
#define IS_NOT_LAST_BLK(B) \
(!IS_LAST_BLK((B)))
#define GET_LAST_BLK_HDR_FLG(B) \
- (*((Block_t*) (B)) & LAST_BLK_HDR_FLG)
+ ((B)->bhdr & LAST_BLK_HDR_FLG)
#define GET_THIS_FREE_BLK_HDR_FLG(B) \
- (*((Block_t*) (B)) & THIS_FREE_BLK_HDR_FLG)
+ ((B)->bhdr & THIS_FREE_BLK_HDR_FLG)
#define GET_PREV_FREE_BLK_HDR_FLG(B) \
- (*((Block_t*) (B)) & PREV_FREE_BLK_HDR_FLG)
+ ((B)->bhdr & PREV_FREE_BLK_HDR_FLG)
#define GET_BLK_HDR_FLGS(B) \
- (*((Block_t*) (B)) & FLG_MASK)
-
-#define IS_FIRST_BLK(B) \
- (IS_PREV_BLK_FREE((B)) && (PREV_BLK_SZ((B)) == 0))
-#define IS_NOT_FIRST_BLK(B) \
- (!IS_FIRST_BLK((B)))
-
-#define SET_SBC_BLK_FTR(FTR) \
- ((FTR) = (0 | SBC_BLK_FTR_FLG))
-#define SET_MBC_BLK_FTR(FTR) \
- ((FTR) = 0)
+ ((B)->bhdr & FLG_MASK)
#define IS_SBC_BLK(B) \
- (IS_PREV_BLK_FREE((B)) && (((UWord *) (B))[-1] & SBC_BLK_FTR_FLG))
+ (((B)->bhdr & FLG_MASK) == SBC_BLK_HDR_FLG)
#define IS_MBC_BLK(B) \
(!IS_SBC_BLK((B)))
+#define MBC_BLK_SZ(B) (IS_FREE_BLK(B) ? MBC_FBLK_SZ(B) : MBC_ABLK_SZ(B))
+
#define NXT_BLK(B) \
- ((Block_t *) (((char *) (B)) + BLK_SZ((B))))
+ (ASSERT(IS_MBC_BLK(B)), \
+ (Block_t *) (((char *) (B)) + MBC_BLK_SZ((B))))
#define PREV_BLK(B) \
((Block_t *) (((char *) (B)) - PREV_BLK_SZ((B))))
+#define BLK_AFTER(B,Sz) \
+ ((Block_t *) (((char *) (B)) + (Sz)))
+
+#define BLK_SZ(B) ((B)->bhdr & (((B)->bhdr & THIS_FREE_BLK_HDR_FLG) ? MBC_FBLK_SZ_MASK : MBC_ABLK_SZ_MASK))
+
/* Carriers ... */
+#define SBC_HEADER_SIZE (UNIT_CEILING(sizeof(Carrier_t) + ABLK_HDR_SZ) \
+ - ABLK_HDR_SZ)
+#define MBC_HEADER_SIZE(AP) SBC_HEADER_SIZE
+
+
#define MSEG_CARRIER_HDR_FLAG (((UWord) 1) << 0)
#define SBC_CARRIER_HDR_FLAG (((UWord) 1) << 1)
@@ -226,20 +351,20 @@ static Uint mseg_unit_size;
#define SCH_MBC 0
#define SCH_SBC SBC_CARRIER_HDR_FLAG
-#define SET_CARRIER_HDR(C, Sz, F) \
- (ASSERT(((Sz) & FLG_MASK) == 0), (C)->chdr = ((Sz) | (F)))
+#define SET_CARRIER_HDR(C, Sz, F, AP) \
+ (ASSERT(((Sz) & FLG_MASK) == 0), (C)->chdr = ((Sz) | (F)), (C)->allctr = (AP))
-#define BLK2SBC(AP, B) \
- ((Carrier_t *) (((char *) (B)) - (AP)->sbc_header_size))
-#define FBLK2MBC(AP, B) \
- ((Carrier_t *) (((char *) (B)) - (AP)->mbc_header_size))
+#define BLK_TO_SBC(B) \
+ ((Carrier_t *) (((char *) (B)) - SBC_HEADER_SIZE))
+#define FIRST_BLK_TO_MBC(AP, B) \
+ ((Carrier_t *) (((char *) (B)) - MBC_HEADER_SIZE(AP)))
-#define MBC2FBLK(AP, P) \
- ((Block_t *) (((char *) (P)) + (AP)->mbc_header_size))
+#define MBC_TO_FIRST_BLK(AP, P) \
+ ((Block_t *) (((char *) (P)) + MBC_HEADER_SIZE(AP)))
#define SBC2BLK(AP, P) \
- ((Block_t *) (((char *) (P)) + (AP)->sbc_header_size))
+ ((Block_t *) (((char *) (P)) + SBC_HEADER_SIZE))
#define SBC2UMEM(AP, P) \
- ((void *) (((char *) (P)) + ((AP)->sbc_header_size + ABLK_HDR_SZ)))
+ ((void *) (((char *) (P)) + (SBC_HEADER_SIZE + ABLK_HDR_SZ)))
#define IS_MSEG_CARRIER(C) \
((C)->chdr & MSEG_CARRIER_HDR_FLAG)
@@ -250,15 +375,6 @@ static Uint mseg_unit_size;
#define IS_MB_CARRIER(C) \
(!IS_SB_CARRIER((C)))
-#define SET_MSEG_CARRIER(C) \
- ((C)->chdr |= MSEG_CARRIER_HDR_FLAG)
-#define SET_SYS_ALLOC_CARRIER(C) \
- ((C)->chdr &= ~MSEG_CARRIER_HDR_FLAG)
-#define SET_SB_CARRIER(C) \
- ((C)->chdr |= SBC_CARRIER_HDR_FLAG)
-#define SET_MB_CARRIER(C) \
- ((C)->chdr &= ~SBC_CARRIER_HDR_FLAG)
-
#define SET_CARRIER_SZ(C, SZ) \
(ASSERT(((SZ) & FLG_MASK) == 0), \
((C)->chdr = ((C)->chdr & FLG_MASK) | (SZ)))
@@ -506,11 +622,11 @@ static void mbc_free(Allctr_t *allctr, void *p);
#if HAVE_ERTS_MSEG
static ERTS_INLINE void *
-alcu_mseg_alloc(Allctr_t *allctr, Uint *size_p)
+alcu_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
{
void *res;
- res = erts_mseg_alloc_opt(allctr->alloc_no, size_p, &allctr->mseg_opt);
+ res = erts_mseg_alloc_opt(allctr->alloc_no, size_p, flags, &allctr->mseg_opt);
INC_CC(allctr->calls.mseg_alloc);
return res;
}
@@ -521,7 +637,7 @@ alcu_mseg_realloc(Allctr_t *allctr, void *seg, Uint old_size, Uint *new_size_p)
void *res;
res = erts_mseg_realloc_opt(allctr->alloc_no, seg, old_size, new_size_p,
- &allctr->mseg_opt);
+ ERTS_MSEG_FLG_NONE, &allctr->mseg_opt);
INC_CC(allctr->calls.mseg_realloc);
return res;
}
@@ -765,13 +881,9 @@ erts_alcu_fix_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
#define ERTS_ALCU_DD_FIX_TYPE_OFFS \
((sizeof(ErtsAllctrDDBlock_t)-1)/sizeof(UWord) + 1)
-#define ERTS_AU_PREF_ALLOC_IX_MASK \
- ((((UWord) 1) << ERTS_AU_PREF_ALLOC_BITS) - 1)
-#define ERTS_AU_PREF_ALLOC_SIZE_MASK \
- ((((UWord) 1) << (sizeof(UWord)*8 - ERTS_AU_PREF_ALLOC_BITS)) - 1)
-static ERTS_INLINE int
-get_pref_allctr(void *extra, Allctr_t **allctr)
+static ERTS_INLINE Allctr_t*
+get_pref_allctr(void *extra)
{
ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
int pref_ix;
@@ -781,34 +893,33 @@ get_pref_allctr(void *extra, Allctr_t **allctr)
ASSERT(sizeof(UWord) == sizeof(Allctr_t *));
ASSERT(0 <= pref_ix && pref_ix < tspec->size);
- *allctr = tspec->allctr[pref_ix];
- return pref_ix;
+ return tspec->allctr[pref_ix];
}
-static ERTS_INLINE void *
-get_used_allctr(void *extra, void *p, Allctr_t **allctr, UWord *sizep)
+/* SMP note:
+ * get_used_allctr() must be safe WITHOUT locking the allocator while
+ * concurrent threads may be updating adjacent blocks.
+ * We rely on getting a consistent result (without atomic op) when reading
+ * the block header word even if a concurrent thread is updating
+ * the "PREV_FREE" flag bit.
+ */
+static ERTS_INLINE Allctr_t*
+get_used_allctr(void *extra, void *p, UWord *sizep)
{
- ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
- void *ptr = (void *) (((char *) p) - sizeof(UWord));
- UWord ainfo = *((UWord *) ptr);
- int aix = (int) (ainfo & ERTS_AU_PREF_ALLOC_IX_MASK);
- *allctr = tspec->allctr[aix];
- if (sizep)
- *sizep = ((ainfo >> ERTS_AU_PREF_ALLOC_BITS)
- & ERTS_AU_PREF_ALLOC_SIZE_MASK);
- return ptr;
-}
+ Block_t* blk = UMEM2BLK(p);
+ Carrier_t* crr;
-static ERTS_INLINE void *
-put_used_allctr(void *p, int ix, UWord size)
-{
- UWord ainfo = (size >= ERTS_AU_PREF_ALLOC_SIZE_MASK
- ? ERTS_AU_PREF_ALLOC_SIZE_MASK
- : size);
- ainfo <<= ERTS_AU_PREF_ALLOC_BITS;
- ainfo |= (UWord) ix;
- *((UWord *) p) = ainfo;
- return (void *) (((char *) p) + sizeof(UWord));
+ if (IS_SBC_BLK(blk)) {
+ crr = BLK_TO_SBC(blk);
+ if (sizep)
+ *sizep = SBC_BLK_SZ(blk) - ABLK_HDR_SZ;
+ }
+ else {
+ crr = ABLK_TO_MBC(blk);
+ if (sizep)
+ *sizep = MBC_ABLK_SZ(blk) - ABLK_HDR_SZ;
+ }
+ return crr->allctr;
}
static void
@@ -1209,10 +1320,8 @@ mbc_alloc_block(Allctr_t *allctr, Uint size, Uint *blk_szp, Uint32 *alcu_flgsp)
if ((*alcu_flgsp) & ERTS_ALCU_FLG_SBMBC)
blk = create_sbmbc(allctr, get_blk_sz);
else {
-#if HALFWORD_HEAP
- blk = create_carrier(allctr, get_blk_sz, CFLG_MBC|CFLG_FORCE_MSEG);
-#else
blk = create_carrier(allctr, get_blk_sz, CFLG_MBC);
+#if !HALFWORD_HEAP && !HAVE_SUPER_ALIGNED_MB_CARRIERS
if (!blk) {
/* Emergency! We couldn't create the carrier as we wanted.
Try to place it in a sys_alloced sbc. */
@@ -1242,6 +1351,7 @@ mbc_alloc_finalize(Allctr_t *allctr,
Block_t *blk,
Uint org_blk_sz,
UWord flags,
+ Carrier_t *crr,
Uint want_blk_sz,
int valid_blk_info,
Uint32 alcu_flgs)
@@ -1262,22 +1372,18 @@ mbc_alloc_finalize(Allctr_t *allctr,
/* Shrink block... */
blk_sz = want_blk_sz;
nxt_blk_sz = org_blk_sz - blk_sz;
- SET_BLK_HDR(blk,
- blk_sz,
- SBH_THIS_ALLOCED|SBH_NOT_LAST_BLK|prev_free_flg);
+ SET_MBC_ABLK_HDR(blk, blk_sz, prev_free_flg, crr);
- nxt_blk = NXT_BLK(blk);
- SET_BLK_HDR(nxt_blk,
- nxt_blk_sz,
- (SBH_THIS_FREE
- | SBH_PREV_ALLOCED
- | (flags & LAST_BLK_HDR_FLG)));
+ nxt_blk = BLK_AFTER(blk, blk_sz);
+ SET_MBC_FBLK_HDR(nxt_blk, nxt_blk_sz,
+ SBH_THIS_FREE|(flags & LAST_BLK_HDR_FLG),
+ crr);
if (!(flags & LAST_BLK_HDR_FLG)) {
SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz);
if (!valid_blk_info) {
- Block_t *nxt_nxt_blk = NXT_BLK(nxt_blk);
- SET_PREV_BLK_FREE(nxt_nxt_blk);
+ Block_t *nxt_nxt_blk = BLK_AFTER(nxt_blk, nxt_blk_sz);
+ SET_PREV_BLK_FREE(allctr, nxt_nxt_blk);
}
}
(*allctr->link_free_block)(allctr, nxt_blk, alcu_flgs);
@@ -1291,40 +1397,40 @@ mbc_alloc_finalize(Allctr_t *allctr,
|| nxt_blk == PREV_BLK(NXT_BLK(nxt_blk)));
ASSERT((flags & LAST_BLK_HDR_FLG)
|| IS_PREV_BLK_FREE(NXT_BLK(nxt_blk)));
- ASSERT(nxt_blk_sz == BLK_SZ(nxt_blk));
+ ASSERT(nxt_blk_sz == MBC_BLK_SZ(nxt_blk));
ASSERT(nxt_blk_sz % sizeof(Unit_t) == 0);
ASSERT(nxt_blk_sz >= allctr->min_block_size);
+ ASSERT(ABLK_TO_MBC(blk) == crr);
+ ASSERT(FBLK_TO_MBC(nxt_blk) == crr);
}
else {
+ ASSERT(org_blk_sz <= MBC_ABLK_SZ_MASK);
blk_sz = org_blk_sz;
if (flags & LAST_BLK_HDR_FLG) {
if (valid_blk_info)
SET_BLK_ALLOCED(blk);
else
- SET_BLK_HDR(blk,
- blk_sz,
- SBH_THIS_ALLOCED|SBH_LAST_BLK|prev_free_flg);
+ SET_MBC_ABLK_HDR(blk, blk_sz, SBH_LAST_BLK|prev_free_flg, crr);
}
else {
if (valid_blk_info)
SET_BLK_ALLOCED(blk);
else
- SET_BLK_HDR(blk,
- blk_sz,
- SBH_THIS_ALLOCED|SBH_NOT_LAST_BLK|prev_free_flg);
- nxt_blk = NXT_BLK(blk);
+ SET_MBC_ABLK_HDR(blk, blk_sz, prev_free_flg, crr);
+ nxt_blk = BLK_AFTER(blk, blk_sz);
SET_PREV_BLK_ALLOCED(nxt_blk);
}
ASSERT((flags & LAST_BLK_HDR_FLG)
? IS_LAST_BLK(blk)
: IS_NOT_LAST_BLK(blk));
+ ASSERT(ABLK_TO_MBC(blk) == crr);
}
STAT_MBC_BLK_ALLOC(allctr, blk_sz, alcu_flgs);
ASSERT(IS_ALLOCED_BLK(blk));
- ASSERT(blk_sz == BLK_SZ(blk));
+ ASSERT(blk_sz == MBC_BLK_SZ(blk));
ASSERT(blk_sz % sizeof(Unit_t) == 0);
ASSERT(blk_sz >= allctr->min_block_size);
ASSERT(blk_sz >= want_blk_sz);
@@ -1348,8 +1454,9 @@ mbc_alloc(Allctr_t *allctr, Uint size)
if (IS_MBC_BLK(blk))
mbc_alloc_finalize(allctr,
blk,
- BLK_SZ(blk),
+ MBC_FBLK_SZ(blk),
GET_BLK_HDR_FLGS(blk),
+ FBLK_TO_MBC(blk),
blk_sz,
1,
alcu_flgs);
@@ -1370,7 +1477,7 @@ mbc_free(Allctr_t *allctr, void *p)
ASSERT(p);
blk = UMEM2BLK(p);
- blk_sz = BLK_SZ(blk);
+ blk_sz = MBC_ABLK_SZ(blk);
if (blk_sz < allctr->sbmbc_threshold)
alcu_flgs |= ERTS_ALCU_FLG_SBMBC;
@@ -1381,17 +1488,18 @@ mbc_free(Allctr_t *allctr, void *p)
STAT_MBC_BLK_FREE(allctr, blk_sz, alcu_flgs);
- is_first_blk = IS_FIRST_BLK(blk);
+ is_first_blk = IS_MBC_FIRST_ABLK(allctr, blk);
is_last_blk = IS_LAST_BLK(blk);
- if (!is_first_blk && IS_PREV_BLK_FREE(blk)) {
+ if (IS_PREV_BLK_FREE(blk)) {
+ ASSERT(!is_first_blk);
/* Coalesce with previous block... */
blk = PREV_BLK(blk);
(*allctr->unlink_free_block)(allctr, blk, alcu_flgs);
- blk_sz += BLK_SZ(blk);
- is_first_blk = IS_FIRST_BLK(blk);
- SET_BLK_SZ(blk, blk_sz);
+ blk_sz += MBC_FBLK_SZ(blk);
+ is_first_blk = IS_MBC_FIRST_FBLK(allctr, blk);
+ SET_MBC_FBLK_SZ(blk, blk_sz);
}
else {
SET_BLK_FREE(blk);
@@ -1400,12 +1508,12 @@ mbc_free(Allctr_t *allctr, void *p)
if (is_last_blk)
SET_LAST_BLK(blk);
else {
- nxt_blk = NXT_BLK(blk);
+ nxt_blk = BLK_AFTER(blk, blk_sz);
if (IS_FREE_BLK(nxt_blk)) {
/* Coalesce with next block... */
(*allctr->unlink_free_block)(allctr, nxt_blk, alcu_flgs);
- blk_sz += BLK_SZ(nxt_blk);
- SET_BLK_SZ(blk, blk_sz);
+ blk_sz += MBC_FBLK_SZ(nxt_blk);
+ SET_MBC_FBLK_SZ(blk, blk_sz);
is_last_blk = IS_LAST_BLK(nxt_blk);
if (is_last_blk)
@@ -1416,26 +1524,26 @@ mbc_free(Allctr_t *allctr, void *p)
}
}
else {
- SET_PREV_BLK_FREE(nxt_blk);
+ SET_PREV_BLK_FREE(allctr, nxt_blk);
SET_NOT_LAST_BLK(blk);
SET_BLK_SZ_FTR(blk, blk_sz);
}
}
- ASSERT(is_last_blk ? IS_LAST_BLK(blk) : IS_NOT_LAST_BLK(blk));
- ASSERT(is_first_blk ? IS_FIRST_BLK(blk) : IS_NOT_FIRST_BLK(blk));
ASSERT(IS_FREE_BLK(blk));
+ ASSERT(!is_last_blk == !IS_LAST_BLK(blk));
+ ASSERT(!is_first_blk == !IS_MBC_FIRST_FBLK(allctr, blk));
ASSERT(is_first_blk || IS_PREV_BLK_ALLOCED(blk));
ASSERT(is_last_blk || IS_PREV_BLK_FREE(NXT_BLK(blk)));
- ASSERT(blk_sz == BLK_SZ(blk));
+ ASSERT(blk_sz == MBC_BLK_SZ(blk));
ASSERT(is_last_blk || blk == PREV_BLK(NXT_BLK(blk)));
ASSERT(blk_sz % sizeof(Unit_t) == 0);
ASSERT(IS_MBC_BLK(blk));
if (is_first_blk
&& is_last_blk
- && allctr->main_carrier != FBLK2MBC(allctr, blk)) {
+ && allctr->main_carrier != FIRST_BLK_TO_MBC(allctr, blk)) {
if (alcu_flgs & ERTS_ALCU_FLG_SBMBC)
destroy_sbmbc(allctr, blk);
else
@@ -1472,7 +1580,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
ASSERT(size < allctr->sbc_threshold);
blk = (Block_t *) UMEM2BLK(p);
- old_blk_sz = BLK_SZ(blk);
+ old_blk_sz = MBC_ABLK_SZ(blk);
ASSERT(old_blk_sz >= allctr->min_block_size);
@@ -1497,6 +1605,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
return p;
else if (blk_sz < old_blk_sz) {
/* Shrink block... */
+ Carrier_t* crr;
Block_t *nxt_nxt_blk;
Uint diff_sz_val = old_blk_sz - blk_sz;
Uint old_blk_sz_val = old_blk_sz;
@@ -1516,16 +1625,18 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
return NULL;
cand_blk_sz = old_blk_sz;
- if (!IS_PREV_BLK_FREE(blk) || IS_FIRST_BLK(blk))
+ if (!IS_PREV_BLK_FREE(blk)) {
cand_blk = blk;
+ }
else {
+ ASSERT(!IS_MBC_FIRST_ABLK(allctr, blk));
cand_blk = PREV_BLK(blk);
cand_blk_sz += PREV_BLK_SZ(blk);
}
if (!is_last_blk) {
- nxt_blk = NXT_BLK(blk);
+ nxt_blk = BLK_AFTER(blk, old_blk_sz);
if (IS_FREE_BLK(nxt_blk))
- cand_blk_sz += BLK_SZ(nxt_blk);
+ cand_blk_sz += MBC_FBLK_SZ(nxt_blk);
}
new_blk = (*allctr->get_free_block)(allctr,
@@ -1541,52 +1652,48 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
nxt_blk_sz = old_blk_sz - blk_sz;
- if ((is_last_blk || IS_ALLOCED_BLK(NXT_BLK(blk)))
+ if ((is_last_blk || IS_ALLOCED_BLK(BLK_AFTER(blk,old_blk_sz)))
&& (nxt_blk_sz < allctr->min_block_size))
return p;
HARD_CHECK_BLK_CARRIER(allctr, blk);
- SET_BLK_SZ(blk, blk_sz);
+ nxt_nxt_blk = BLK_AFTER(blk, old_blk_sz);
+
+ SET_MBC_ABLK_SZ(blk, blk_sz);
SET_NOT_LAST_BLK(blk);
- nxt_blk = NXT_BLK(blk);
- SET_BLK_HDR(nxt_blk,
- nxt_blk_sz,
- SBH_THIS_FREE|SBH_PREV_ALLOCED|SBH_NOT_LAST_BLK);
+ nxt_blk = BLK_AFTER(blk, blk_sz);
STAT_MBC_BLK_FREE(allctr, old_blk_sz, alcu_flgs);
STAT_MBC_BLK_ALLOC(allctr, blk_sz, alcu_flgs);
- ASSERT(BLK_SZ(blk) >= allctr->min_block_size);
+ ASSERT(MBC_BLK_SZ(blk) >= allctr->min_block_size);
- if (is_last_blk)
- SET_LAST_BLK(nxt_blk);
- else {
- nxt_nxt_blk = NXT_BLK(nxt_blk);
+ if (!is_last_blk) {
if (IS_FREE_BLK(nxt_nxt_blk)) {
/* Coalesce with next free block... */
- nxt_blk_sz += BLK_SZ(nxt_nxt_blk);
+ nxt_blk_sz += MBC_FBLK_SZ(nxt_nxt_blk);
(*allctr->unlink_free_block)(allctr, nxt_nxt_blk, alcu_flgs);
- SET_BLK_SZ(nxt_blk, nxt_blk_sz);
- is_last_blk = IS_LAST_BLK(nxt_nxt_blk);
- if (is_last_blk)
- SET_LAST_BLK(nxt_blk);
- else
- SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz);
+ is_last_blk = GET_LAST_BLK_HDR_FLG(nxt_nxt_blk);
}
else {
- SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz);
- SET_PREV_BLK_FREE(nxt_nxt_blk);
+ SET_PREV_BLK_FREE(allctr, nxt_nxt_blk);
}
+ SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz);
}
+ crr = ABLK_TO_MBC(blk);
+ SET_MBC_FBLK_HDR(nxt_blk, nxt_blk_sz,
+ SBH_THIS_FREE | (is_last_blk ? SBH_LAST_BLK : 0),
+ crr);
+
(*allctr->link_free_block)(allctr, nxt_blk, alcu_flgs);
ASSERT(IS_ALLOCED_BLK(blk));
- ASSERT(blk_sz == BLK_SZ(blk));
+ ASSERT(blk_sz == MBC_BLK_SZ(blk));
ASSERT(blk_sz % sizeof(Unit_t) == 0);
ASSERT(blk_sz >= allctr->min_block_size);
ASSERT(blk_sz >= size + ABLK_HDR_SZ);
@@ -1594,14 +1701,15 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
ASSERT(IS_FREE_BLK(nxt_blk));
ASSERT(IS_PREV_BLK_ALLOCED(nxt_blk));
- ASSERT(nxt_blk_sz == BLK_SZ(nxt_blk));
+ ASSERT(nxt_blk_sz == MBC_BLK_SZ(nxt_blk));
ASSERT(nxt_blk_sz % sizeof(Unit_t) == 0);
ASSERT(nxt_blk_sz >= allctr->min_block_size);
ASSERT(IS_MBC_BLK(nxt_blk));
ASSERT(is_last_blk ? IS_LAST_BLK(nxt_blk) : IS_NOT_LAST_BLK(nxt_blk));
ASSERT(is_last_blk || nxt_blk == PREV_BLK(NXT_BLK(nxt_blk)));
ASSERT(is_last_blk || IS_PREV_BLK_FREE(NXT_BLK(nxt_blk)));
-
+ ASSERT(FBLK_TO_MBC(nxt_blk) == crr);
+
HARD_CHECK_BLK_CARRIER(allctr, blk);
return p;
@@ -1610,8 +1718,8 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
/* Need larger block... */
if (!is_last_blk) {
- nxt_blk = NXT_BLK(blk);
- nxt_blk_sz = BLK_SZ(nxt_blk);
+ nxt_blk = BLK_AFTER(blk, old_blk_sz);
+ nxt_blk_sz = MBC_BLK_SZ(nxt_blk);
if (IS_FREE_BLK(nxt_blk) && get_blk_sz <= old_blk_sz + nxt_blk_sz) {
/* Grow into next block... */
@@ -1624,7 +1732,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
if (nxt_blk_sz < allctr->min_block_size) {
blk_sz += nxt_blk_sz;
- SET_BLK_SZ(blk, blk_sz);
+ SET_MBC_ABLK_SZ(blk, blk_sz);
if (is_last_blk) {
SET_LAST_BLK(blk);
@@ -1633,21 +1741,20 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
#endif
}
else {
- nxt_blk = NXT_BLK(blk);
+ nxt_blk = BLK_AFTER(blk, blk_sz);
SET_PREV_BLK_ALLOCED(nxt_blk);
#ifdef DEBUG
is_last_blk = IS_LAST_BLK(nxt_blk);
- nxt_blk_sz = BLK_SZ(nxt_blk);
+ nxt_blk_sz = MBC_BLK_SZ(nxt_blk);
#endif
}
}
else {
- SET_BLK_SZ(blk, blk_sz);
+ Carrier_t* crr = ABLK_TO_MBC(blk);
+ SET_MBC_ABLK_SZ(blk, blk_sz);
- nxt_blk = NXT_BLK(blk);
- SET_BLK_HDR(nxt_blk,
- nxt_blk_sz,
- SBH_THIS_FREE|SBH_PREV_ALLOCED|SBH_NOT_LAST_BLK);
+ nxt_blk = BLK_AFTER(blk, blk_sz);
+ SET_MBC_FBLK_HDR(nxt_blk, nxt_blk_sz, SBH_THIS_FREE, crr);
if (is_last_blk)
SET_LAST_BLK(nxt_blk);
@@ -1657,6 +1764,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
(*allctr->link_free_block)(allctr, nxt_blk, alcu_flgs);
ASSERT(IS_FREE_BLK(nxt_blk));
+ ASSERT(FBLK_TO_MBC(nxt_blk) == crr);
}
STAT_MBC_BLK_FREE(allctr, old_blk_sz, alcu_flgs);
@@ -1664,14 +1772,14 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
ASSERT(IS_ALLOCED_BLK(blk));
- ASSERT(blk_sz == BLK_SZ(blk));
+ ASSERT(blk_sz == MBC_BLK_SZ(blk));
ASSERT(blk_sz % sizeof(Unit_t) == 0);
ASSERT(blk_sz >= allctr->min_block_size);
ASSERT(blk_sz >= size + ABLK_HDR_SZ);
ASSERT(IS_MBC_BLK(blk));
ASSERT(!nxt_blk || IS_PREV_BLK_ALLOCED(nxt_blk));
- ASSERT(!nxt_blk || nxt_blk_sz == BLK_SZ(nxt_blk));
+ ASSERT(!nxt_blk || nxt_blk_sz == MBC_BLK_SZ(nxt_blk));
ASSERT(!nxt_blk || nxt_blk_sz % sizeof(Unit_t) == 0);
ASSERT(!nxt_blk || nxt_blk_sz >= allctr->min_block_size);
ASSERT(!nxt_blk || IS_MBC_BLK(nxt_blk));
@@ -1696,18 +1804,19 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
/* Need to grow in another block */
- if (!IS_PREV_BLK_FREE(blk) || IS_FIRST_BLK(blk)) {
+ if (!IS_PREV_BLK_FREE(blk)) {
cand_blk = NULL;
cand_blk_sz = 0;
}
else {
+ ASSERT(!IS_MBC_FIRST_ABLK(allctr, blk));
cand_blk = PREV_BLK(blk);
cand_blk_sz = old_blk_sz + PREV_BLK_SZ(blk);
if (!is_last_blk) {
- nxt_blk = NXT_BLK(blk);
+ nxt_blk = BLK_AFTER(blk, old_blk_sz);
if (IS_FREE_BLK(nxt_blk))
- cand_blk_sz += BLK_SZ(nxt_blk);
+ cand_blk_sz += MBC_FBLK_SZ(nxt_blk);
}
}
@@ -1743,8 +1852,9 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
if (new_blk) {
mbc_alloc_finalize(allctr,
new_blk,
- BLK_SZ(new_blk),
+ MBC_FBLK_SZ(new_blk),
GET_BLK_HDR_FLGS(new_blk),
+ FBLK_TO_MBC(new_blk),
blk_sz,
1,
alcu_flgs);
@@ -1754,6 +1864,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
return new_p;
}
else {
+ Carrier_t* crr;
Uint new_blk_sz;
UWord new_blk_flgs;
Uint prev_blk_sz;
@@ -1774,10 +1885,10 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
if (is_last_blk)
new_blk_flgs |= LAST_BLK_HDR_FLG;
else {
- nxt_blk = NXT_BLK(blk);
+ nxt_blk = BLK_AFTER(blk, old_blk_sz);
if (IS_FREE_BLK(nxt_blk)) {
new_blk_flgs |= GET_LAST_BLK_HDR_FLG(nxt_blk);
- new_blk_sz += BLK_SZ(nxt_blk);
+ new_blk_sz += MBC_FBLK_SZ(nxt_blk);
(*allctr->unlink_free_block)(allctr, nxt_blk, alcu_flgs);
}
}
@@ -1790,6 +1901,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
new_p = BLK2UMEM(new_blk);
blk_cpy_sz = MIN(blk_sz, old_blk_sz);
+ crr = FBLK_TO_MBC(new_blk);
if (prev_blk_sz >= blk_cpy_sz)
sys_memcpy(new_p, p, blk_cpy_sz - ABLK_HDR_SZ);
@@ -1800,6 +1912,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
new_blk,
new_blk_sz,
new_blk_flgs,
+ crr,
blk_sz,
0,
alcu_flgs);
@@ -1815,35 +1928,40 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
#ifdef DEBUG
#if HAVE_ERTS_MSEG
-#define ASSERT_MSEG_UNIT_SIZE_MULTIPLE(CSZ) ASSERT((CSZ) % mseg_unit_size == 0)
+#define ASSERT_MSEG_UNIT_SIZE_MULTIPLE(CSZ) ASSERT((CSZ) % MSEG_UNIT_SZ == 0)
#else
#define ASSERT_MSEG_UNIT_SIZE_MULTIPLE(CSZ)
#endif
-#define CHECK_1BLK_CARRIER(A, SBC, MSEGED, C, CSZ, B, BSZ) \
-do { \
- ASSERT(IS_FIRST_BLK((B))); \
- ASSERT(IS_LAST_BLK((B))); \
- ASSERT((CSZ) == CARRIER_SZ((C))); \
- ASSERT((BSZ) == BLK_SZ((B))); \
- ASSERT((BSZ) % sizeof(Unit_t) == 0); \
- if ((SBC)) { \
- ASSERT(IS_SBC_BLK((B))); \
- ASSERT(IS_SB_CARRIER((C))); \
- } \
- else { \
- ASSERT(IS_MBC_BLK((B))); \
- ASSERT(IS_MB_CARRIER((C))); \
- } \
- if ((MSEGED)) { \
- ASSERT(IS_MSEG_CARRIER((C))); \
- ASSERT_MSEG_UNIT_SIZE_MULTIPLE((CSZ)); \
- } \
- else { \
- ASSERT(IS_SYS_ALLOC_CARRIER((C))); \
- ASSERT((CSZ) % sizeof(Unit_t) == 0); \
- } \
-} while (0)
+static void CHECK_1BLK_CARRIER(Allctr_t* A, int SBC, int MSEGED, Carrier_t* C,
+ UWord CSZ, Block_t* B, UWord BSZ)
+{
+ ASSERT(IS_LAST_BLK((B)));
+ ASSERT((CSZ) == CARRIER_SZ((C)));
+ ASSERT((BSZ) % sizeof(Unit_t) == 0);
+ if ((SBC)) {
+ ASSERT((BSZ) == SBC_BLK_SZ((B)));
+ ASSERT((char*)B == (char*)C + SBC_HEADER_SIZE);
+ ASSERT(IS_SBC_BLK((B)));
+ ASSERT(IS_SB_CARRIER((C)));
+ }
+ else {
+ ASSERT(IS_FREE_BLK(B));
+ ASSERT((BSZ) == MBC_FBLK_SZ((B)));
+ ASSERT(IS_MBC_FIRST_FBLK(A, (B)));
+ ASSERT(IS_MBC_BLK((B)));
+ ASSERT(IS_MB_CARRIER((C)));
+ ASSERT(FBLK_TO_MBC(B) == (C));
+ }
+ if ((MSEGED)) {
+ ASSERT(IS_MSEG_CARRIER((C)));
+ ASSERT_MSEG_UNIT_SIZE_MULTIPLE((CSZ));
+ }
+ else {
+ ASSERT(IS_SYS_ALLOC_CARRIER((C)));
+ ASSERT((CSZ) % sizeof(Unit_t) == 0);
+ }
+}
#else
#define CHECK_1BLK_CARRIER(A, SBC, MSEGED, C, CSZ, B, BSZ)
@@ -1865,37 +1983,18 @@ create_sbmbc(Allctr_t *allctr, Uint umem_sz)
crr = erts_alloc(ERTS_ALC_T_SBMBC, crr_sz);
INC_CC(allctr->calls.sbmbc_alloc);
- SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_MBC);
-
- blk = MBC2FBLK(allctr, crr);
+ SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_MBC, allctr);
-#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- if (allctr->mbc_header_size % sizeof(Unit_t) == 0)
- crr_sz -= sizeof(UWord);
-#endif
+ blk = MBC_TO_FIRST_BLK(allctr, crr);
- blk_sz = UNIT_FLOOR(crr_sz - allctr->mbc_header_size);
+ blk_sz = UNIT_FLOOR(crr_sz - MBC_HEADER_SIZE(allctr));
- SET_MBC_BLK_FTR(((UWord *) blk)[-1]);
- SET_BLK_HDR(blk, blk_sz, SBH_THIS_FREE|SBH_PREV_FREE|SBH_LAST_BLK);
-
-#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- *((Carrier_t **) NXT_BLK(blk)) = crr;
-#endif
+ SET_MBC_FBLK_HDR(blk, blk_sz, SBH_THIS_FREE|SBH_LAST_BLK, crr);
link_carrier(&allctr->sbmbc_list, crr);
-#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- if (allctr->mbc_header_size % sizeof(Unit_t) == 0)
- crr_sz += sizeof(UWord);
-#endif
-
STAT_SBMBC_ALLOC(allctr, crr_sz);
CHECK_1BLK_CARRIER(allctr, 0, 0, crr, crr_sz, blk, blk_sz);
-#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- if (allctr->mbc_header_size % sizeof(Unit_t) == 0)
- crr_sz -= sizeof(UWord);
-#endif
if (allctr->creating_mbc)
(*allctr->creating_mbc)(allctr, crr, ERTS_ALCU_FLG_SBMBC);
@@ -1909,11 +2008,10 @@ destroy_sbmbc(Allctr_t *allctr, Block_t *blk)
Uint crr_sz;
Carrier_t *crr;
- ASSERT(IS_FIRST_BLK(blk));
-
ASSERT(IS_MBC_BLK(blk));
+ ASSERT(IS_MBC_FIRST_FBLK(allctr, blk));
- crr = FBLK2MBC(allctr, blk);
+ crr = FIRST_BLK_TO_MBC(allctr, blk);
crr_sz = CARRIER_SZ(crr);
#ifdef DEBUG
@@ -1952,14 +2050,25 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
Uint blk_sz, bcrr_sz, crr_sz;
#if HAVE_ERTS_MSEG
int have_tried_sys_alloc = 0, have_tried_mseg = 0;
+ Uint mseg_flags;
#endif
#ifdef DEBUG
int is_mseg = 0;
#endif
+#if HALFWORD_HEAP
+ flags |= CFLG_FORCE_MSEG;
+#elif HAVE_SUPER_ALIGNED_MB_CARRIERS
+ if (flags & CFLG_MBC) {
+ flags |= CFLG_FORCE_MSEG;
+ }
+#endif
+
ASSERT((flags & CFLG_SBC && !(flags & CFLG_MBC))
|| (flags & CFLG_MBC && !(flags & CFLG_SBC)));
+ ASSERT(!(flags & CFLG_FORCE_MSEG && flags & CFLG_FORCE_SYS_ALLOC));
+
blk_sz = UMEMSZ2BLKSZ(allctr, umem_sz);
#if HAVE_ERTS_MSEG
@@ -1974,29 +2083,27 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
if (allctr->sbcs.curr.norm.mseg.no >= allctr->max_mseg_sbcs)
goto try_sys_alloc;
}
+#if !HAVE_SUPER_ALIGNED_MB_CARRIERS
else {
if (allctr->mbcs.curr.norm.mseg.no >= allctr->max_mseg_mbcs)
goto try_sys_alloc;
}
+#endif
try_mseg:
if (flags & CFLG_SBC) {
- crr_sz = blk_sz + allctr->sbc_header_size;
+ crr_sz = blk_sz + SBC_HEADER_SIZE;
+ mseg_flags = ERTS_MSEG_FLG_NONE;
}
else {
crr_sz = (*allctr->get_next_mbc_size)(allctr);
- if (crr_sz < allctr->mbc_header_size + blk_sz)
- crr_sz = allctr->mbc_header_size + blk_sz;
-#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- if (allctr->mbc_header_size % sizeof(Unit_t) == 0)
- crr_sz += sizeof(UWord);
-#endif
+ if (crr_sz < MBC_HEADER_SIZE(allctr) + blk_sz)
+ crr_sz = MBC_HEADER_SIZE(allctr) + blk_sz;
+ mseg_flags = ERTS_MSEG_FLG_2POW;
}
- crr_sz = MSEG_UNIT_CEILING(crr_sz);
- ASSERT(crr_sz % mseg_unit_size == 0);
- crr = (Carrier_t *) alcu_mseg_alloc(allctr, &crr_sz);
+ crr = (Carrier_t *) alcu_mseg_alloc(allctr, &crr_sz, mseg_flags);
if (!crr) {
have_tried_mseg = 1;
if (!(have_tried_sys_alloc || flags & CFLG_FORCE_MSEG))
@@ -2008,32 +2115,31 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
is_mseg = 1;
#endif
if (flags & CFLG_SBC) {
- SET_CARRIER_HDR(crr, crr_sz, SCH_MSEG|SCH_SBC);
+ SET_CARRIER_HDR(crr, crr_sz, SCH_MSEG|SCH_SBC, allctr);
STAT_MSEG_SBC_ALLOC(allctr, crr_sz, blk_sz);
goto sbc_final_touch;
}
else {
- SET_CARRIER_HDR(crr, crr_sz, SCH_MSEG|SCH_MBC);
+#ifndef ARCH_64
+ ASSERT(crr_sz <= MBC_SZ_MAX_LIMIT);
+#endif
+ SET_CARRIER_HDR(crr, crr_sz, SCH_MSEG|SCH_MBC, allctr);
STAT_MSEG_MBC_ALLOC(allctr, crr_sz);
goto mbc_final_touch;
}
try_sys_alloc:
+
#endif /* #if HAVE_ERTS_MSEG */
if (flags & CFLG_SBC) {
- bcrr_sz = blk_sz + allctr->sbc_header_size;
+ bcrr_sz = blk_sz + SBC_HEADER_SIZE;
}
else {
- bcrr_sz = allctr->mbc_header_size + blk_sz;
+ bcrr_sz = MBC_HEADER_SIZE(allctr) + blk_sz;
if (!(flags & CFLG_MAIN_CARRIER)
&& bcrr_sz < allctr->smallest_mbc_size)
bcrr_sz = allctr->smallest_mbc_size;
-#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- if (allctr->mbc_header_size % sizeof(Unit_t) == 0)
- bcrr_sz += sizeof(UWord);
-#endif
-
}
crr_sz = (flags & CFLG_FORCE_SIZE
@@ -2057,7 +2163,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
}
}
if (flags & CFLG_SBC) {
- SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_SBC);
+ SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_SBC, allctr);
STAT_SYS_ALLOC_SBC_ALLOC(allctr, crr_sz, blk_sz);
#if HAVE_ERTS_MSEG
@@ -2066,8 +2172,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
blk = SBC2BLK(allctr, crr);
- SET_SBC_BLK_FTR(((UWord *) blk)[-1]);
- SET_BLK_HDR(blk, blk_sz, SBH_THIS_ALLOCED|SBH_PREV_FREE|SBH_LAST_BLK);
+ SET_SBC_BLK_HDR(blk, blk_sz);
link_carrier(&allctr->sbc_list, crr);
@@ -2075,28 +2180,18 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
}
else {
- SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_MBC);
+ SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_MBC, allctr);
STAT_SYS_ALLOC_MBC_ALLOC(allctr, crr_sz);
#if HAVE_ERTS_MSEG
mbc_final_touch:
#endif
- blk = MBC2FBLK(allctr, crr);
-
-#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- if (allctr->mbc_header_size % sizeof(Unit_t) == 0)
- crr_sz -= sizeof(UWord);
-#endif
-
- blk_sz = UNIT_FLOOR(crr_sz - allctr->mbc_header_size);
+ blk = MBC_TO_FIRST_BLK(allctr, crr);
- SET_MBC_BLK_FTR(((UWord *) blk)[-1]);
- SET_BLK_HDR(blk, blk_sz, SBH_THIS_FREE|SBH_PREV_FREE|SBH_LAST_BLK);
+ blk_sz = UNIT_FLOOR(crr_sz - MBC_HEADER_SIZE(allctr));
-#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- *((Carrier_t **) NXT_BLK(blk)) = crr;
-#endif
+ SET_MBC_FBLK_HDR(blk, blk_sz, SBH_THIS_FREE|SBH_LAST_BLK, crr);
if (flags & CFLG_MAIN_CARRIER) {
ASSERT(!allctr->main_carrier);
@@ -2105,15 +2200,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
link_carrier(&allctr->mbc_list, crr);
-#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- if (allctr->mbc_header_size % sizeof(Unit_t) == 0)
- crr_sz += sizeof(UWord);
-#endif
CHECK_1BLK_CARRIER(allctr, 0, is_mseg, crr, crr_sz, blk, blk_sz);
-#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- if (allctr->mbc_header_size % sizeof(Unit_t) == 0)
- crr_sz -= sizeof(UWord);
-#endif
if (allctr->creating_mbc)
(*allctr->creating_mbc)(allctr, crr, 0);
@@ -2142,8 +2229,8 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
HARD_CHECK_BLK_CARRIER(allctr, old_blk);
- old_blk_sz = BLK_SZ(old_blk);
- old_crr = BLK2SBC(allctr, old_blk);
+ old_blk_sz = SBC_BLK_SZ(old_blk);
+ old_crr = BLK_TO_SBC(old_blk);
old_crr_sz = CARRIER_SZ(old_crr);
ASSERT(IS_SB_CARRIER(old_crr));
ASSERT(IS_SBC_BLK(old_blk));
@@ -2157,7 +2244,7 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
if (!(flags & CFLG_FORCE_SYS_ALLOC)) {
- new_crr_sz = new_blk_sz + allctr->sbc_header_size;
+ new_crr_sz = new_blk_sz + SBC_HEADER_SIZE;
new_crr_sz = MSEG_UNIT_CEILING(new_crr_sz);
new_crr = (Carrier_t *) alcu_mseg_realloc(allctr,
old_crr,
@@ -2166,7 +2253,7 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
if (new_crr) {
SET_CARRIER_SZ(new_crr, new_crr_sz);
new_blk = SBC2BLK(allctr, new_crr);
- SET_BLK_SZ(new_blk, new_blk_sz);
+ SET_SBC_BLK_SZ(new_blk, new_blk_sz);
STAT_MSEG_SBC_ALLOC(allctr, new_crr_sz, new_blk_sz);
relink_carrier(&allctr->sbc_list, new_crr);
CHECK_1BLK_CARRIER(allctr, 1, 1, new_crr, new_crr_sz,
@@ -2174,6 +2261,11 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
DEBUG_SAVE_ALIGNMENT(new_crr);
return new_blk;
}
+#if HALFWORD_HEAP
+ /* Old carrier unchanged; restore stat */
+ STAT_MSEG_SBC_ALLOC(allctr, old_crr_sz, old_blk_sz);
+ return NULL;
+#endif
create_flags |= CFLG_FORCE_SYS_ALLOC; /* since mseg_realloc()
failed */
}
@@ -2196,7 +2288,7 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
else {
if (!(flags & CFLG_FORCE_MSEG)) {
#endif /* #if HAVE_ERTS_MSEG */
- new_bcrr_sz = new_blk_sz + allctr->sbc_header_size;
+ new_bcrr_sz = new_blk_sz + SBC_HEADER_SIZE;
new_crr_sz = (flags & CFLG_FORCE_SIZE
? UNIT_CEILING(new_bcrr_sz)
: SYS_ALLOC_CARRIER_CEILING(new_bcrr_sz));
@@ -2208,7 +2300,7 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
sys_realloc_success:
SET_CARRIER_SZ(new_crr, new_crr_sz);
new_blk = SBC2BLK(allctr, new_crr);
- SET_BLK_SZ(new_blk, new_blk_sz);
+ SET_SBC_BLK_SZ(new_blk, new_blk_sz);
STAT_SYS_ALLOC_SBC_FREE(allctr, old_crr_sz, old_blk_sz);
STAT_SYS_ALLOC_SBC_ALLOC(allctr, new_crr_sz, new_blk_sz);
relink_carrier(&allctr->sbc_list, new_crr);
@@ -2218,7 +2310,7 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
return new_blk;
}
else if (new_crr_sz > UNIT_CEILING(new_bcrr_sz)) {
- new_crr_sz = new_blk_sz + allctr->sbc_header_size;
+ new_crr_sz = new_blk_sz + SBC_HEADER_SIZE;
new_crr_sz = UNIT_CEILING(new_crr_sz);
new_crr = (Carrier_t *) alcu_sys_realloc(allctr,
(void *) old_crr,
@@ -2262,11 +2354,9 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk)
Uint is_mseg = 0;
#endif
- ASSERT(IS_FIRST_BLK(blk));
-
if (IS_SBC_BLK(blk)) {
- Uint blk_sz = BLK_SZ(blk);
- crr = BLK2SBC(allctr, blk);
+ Uint blk_sz = SBC_BLK_SZ(blk);
+ crr = BLK_TO_SBC(blk);
crr_sz = CARRIER_SZ(crr);
ASSERT(IS_LAST_BLK(blk));
@@ -2276,7 +2366,7 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk)
#if HAVE_ERTS_MSEG
if (IS_MSEG_CARRIER(crr)) {
is_mseg++;
- ASSERT(crr_sz % mseg_unit_size == 0);
+ ASSERT(crr_sz % MSEG_UNIT_SZ == 0);
STAT_MSEG_SBC_FREE(allctr, crr_sz, blk_sz);
}
else
@@ -2287,7 +2377,8 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk)
}
else {
- crr = FBLK2MBC(allctr, blk);
+ ASSERT(IS_MBC_FIRST_FBLK(allctr, blk));
+ crr = FIRST_BLK_TO_MBC(allctr, blk);
crr_sz = CARRIER_SZ(crr);
#ifdef DEBUG
@@ -2305,7 +2396,7 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk)
#if HAVE_ERTS_MSEG
if (IS_MSEG_CARRIER(crr)) {
is_mseg++;
- ASSERT(crr_sz % mseg_unit_size == 0);
+ ASSERT(crr_sz % MSEG_UNIT_SZ == 0);
STAT_MSEG_MBC_FREE(allctr, crr_sz);
}
else
@@ -3430,12 +3521,7 @@ do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size)
if (allctr->dd.use)
ERTS_ALCU_HANDLE_DD_IN_OP(allctr, 1);
#endif
-#if HALFWORD_HEAP
- blk = create_carrier(allctr, size,
- CFLG_SBC | CFLG_FORCE_MSEG);
-#else
blk = create_carrier(allctr, size, CFLG_SBC);
-#endif
res = blk ? BLK2UMEM(blk) : NULL;
}
else
@@ -3506,24 +3592,20 @@ erts_alcu_alloc_thr_spec(ErtsAlcType_t type, void *extra, Uint size)
void *
erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size)
{
- int pref_ix;
Allctr_t *pref_allctr;
void *res;
- pref_ix = get_pref_allctr(extra, &pref_allctr);
+ pref_allctr = get_pref_allctr(extra);
if (pref_allctr->thread_safe)
erts_mtx_lock(&pref_allctr->mutex);
ERTS_ALCU_DBG_CHK_THR_ACCESS(pref_allctr);
- res = do_erts_alcu_alloc(type, pref_allctr, size + sizeof(UWord));
+ res = do_erts_alcu_alloc(type, pref_allctr, size);
if (pref_allctr->thread_safe)
erts_mtx_unlock(&pref_allctr->mutex);
- if (res)
- res = put_used_allctr(res, pref_ix, size);
-
DEBUG_CHECK_ALIGNMENT(res);
@@ -3644,21 +3726,20 @@ erts_alcu_free_thr_pref(ErtsAlcType_t type, void *extra, void *p)
{
if (p) {
Allctr_t *pref_allctr, *used_allctr;
- void *ptr;
- get_pref_allctr(extra, &pref_allctr);
- ptr = get_used_allctr(extra, p, &used_allctr, NULL);
+ pref_allctr = get_pref_allctr(extra);
+ used_allctr = get_used_allctr(extra, p, NULL);
if (pref_allctr != used_allctr)
enqueue_dealloc_other_instance(type,
used_allctr,
- ptr,
+ p,
(used_allctr->dd.ix
- pref_allctr->dd.ix));
else {
if (used_allctr->thread_safe)
erts_mtx_lock(&used_allctr->mutex);
ERTS_ALCU_DBG_CHK_THR_ACCESS(used_allctr);
- do_erts_alcu_free(type, used_allctr, ptr);
+ do_erts_alcu_free(type, used_allctr, p);
if (used_allctr->thread_safe)
erts_mtx_unlock(&used_allctr->mutex);
}
@@ -3739,13 +3820,13 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
if (IS_MBC_BLK(blk))
res = mbc_realloc(allctr, p, size, alcu_flgs);
else {
- Uint used_sz = allctr->sbc_header_size + ABLK_HDR_SZ + size;
+ Uint used_sz = SBC_HEADER_SIZE + ABLK_HDR_SZ + size;
Uint crr_sz;
Uint diff_sz_val;
Uint crr_sz_val;
#if HAVE_ERTS_MSEG
- if (IS_SYS_ALLOC_CARRIER(BLK2SBC(allctr, blk)))
+ if (IS_SYS_ALLOC_CARRIER(BLK_TO_SBC(blk)))
#endif
crr_sz = SYS_ALLOC_CARRIER_CEILING(used_sz);
#if HAVE_ERTS_MSEG
@@ -3775,7 +3856,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
if (res) {
sys_memcpy((void*) res,
(void*) p,
- MIN(BLK_SZ(blk) - ABLK_HDR_SZ, size));
+ MIN(SBC_BLK_SZ(blk) - ABLK_HDR_SZ, size));
destroy_carrier(allctr, blk);
}
}
@@ -3798,16 +3879,12 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
else if (alcu_flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
return NULL;
else {
-#if HALFWORD_HEAP
- new_blk = create_carrier(allctr, size, CFLG_SBC | CFLG_FORCE_MSEG);
-#else
new_blk = create_carrier(allctr, size, CFLG_SBC);
-#endif
if (new_blk) {
res = BLK2UMEM(new_blk);
sys_memcpy((void *) res,
(void *) p,
- MIN(BLK_SZ(blk) - ABLK_HDR_SZ, size));
+ MIN(MBC_ABLK_SZ(blk) - ABLK_HDR_SZ, size));
mbc_free(allctr, p);
}
else
@@ -3966,16 +4043,15 @@ static ERTS_INLINE void *
realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size,
int force_move)
{
- int pref_ix;
- void *ptr, *res;
+ void *res;
Allctr_t *pref_allctr, *used_allctr;
UWord old_user_size;
if (!p)
return erts_alcu_alloc_thr_pref(type, extra, size);
- pref_ix = get_pref_allctr(extra, &pref_allctr);
- ptr = get_used_allctr(extra, p, &used_allctr, &old_user_size);
+ pref_allctr = get_pref_allctr(extra);
+ used_allctr = get_used_allctr(extra, p, &old_user_size);
ASSERT(used_allctr && pref_allctr);
@@ -3985,56 +4061,33 @@ realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size,
ERTS_ALCU_DBG_CHK_THR_ACCESS(used_allctr);
res = do_erts_alcu_realloc(type,
used_allctr,
- ptr,
- size + sizeof(UWord),
+ p,
+ size,
0);
if (used_allctr->thread_safe)
erts_mtx_unlock(&used_allctr->mutex);
- if (res)
- res = put_used_allctr(res, pref_ix, size);
}
else {
if (pref_allctr->thread_safe)
erts_mtx_lock(&pref_allctr->mutex);
- res = do_erts_alcu_alloc(type, pref_allctr, size + sizeof(UWord));
- if (pref_allctr->thread_safe && (!force_move
- || used_allctr != pref_allctr))
+ res = do_erts_alcu_alloc(type, pref_allctr, size);
+ if (pref_allctr->thread_safe && used_allctr != pref_allctr) {
erts_mtx_unlock(&pref_allctr->mutex);
+ }
if (res) {
- Block_t *blk;
- size_t cpy_size;
-
- res = put_used_allctr(res, pref_ix, size);
-
DEBUG_CHECK_ALIGNMENT(res);
- blk = UMEM2BLK(ptr);
- if (old_user_size != ERTS_AU_PREF_ALLOC_SIZE_MASK)
- cpy_size = old_user_size;
- else {
- if (used_allctr->thread_safe && (!force_move
- || used_allctr != pref_allctr))
- erts_mtx_lock(&used_allctr->mutex);
- ERTS_SMP_LC_ASSERT(!used_allctr->thread_safe ||
- erts_lc_mtx_is_locked(&used_allctr->mutex));
- cpy_size = BLK_SZ(blk);
- if (used_allctr->thread_safe && (!force_move
- || used_allctr != pref_allctr))
- erts_mtx_unlock(&used_allctr->mutex);
- cpy_size -= ABLK_HDR_SZ + sizeof(UWord);
- }
- if (cpy_size > size)
- cpy_size = size;
- sys_memcpy(res, p, cpy_size);
+ sys_memcpy(res, p, MIN(size,old_user_size));
- if (!force_move || used_allctr != pref_allctr)
+ if (used_allctr != pref_allctr) {
enqueue_dealloc_other_instance(type,
used_allctr,
- ptr,
+ p,
(used_allctr->dd.ix
- pref_allctr->dd.ix));
+ }
else {
- do_erts_alcu_free(type, used_allctr, ptr);
+ do_erts_alcu_free(type, used_allctr, p);
ASSERT(pref_allctr == used_allctr);
if (pref_allctr->thread_safe)
erts_mtx_unlock(&pref_allctr->mutex);
@@ -4111,7 +4164,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->ramv = init->ramv;
allctr->main_carrier_size = init->mmbcs;
- allctr->sbc_threshold = init->sbct;
+
#if HAVE_ERTS_MSEG
allctr->mseg_opt.abs_shrink_th = init->asbcst;
allctr->mseg_opt.rel_shrink_th = init->rsbcst;
@@ -4120,20 +4173,29 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->mbc_move_threshold = init->rmbcmt;
#if HAVE_ERTS_MSEG
allctr->max_mseg_sbcs = init->mmsbc;
+# if HAVE_SUPER_ALIGNED_MB_CARRIERS
+ allctr->max_mseg_mbcs = ~(Uint)0;
+# else
allctr->max_mseg_mbcs = init->mmmbc;
+# endif
#endif
allctr->largest_mbc_size = MAX(init->lmbcs, init->smbcs);
+#ifndef ARCH_64
+ if (allctr->largest_mbc_size > MBC_SZ_MAX_LIMIT) {
+ allctr->largest_mbc_size = MBC_SZ_MAX_LIMIT;
+ }
+#endif
allctr->smallest_mbc_size = init->smbcs;
allctr->mbc_growth_stages = MAX(1, init->mbcgs);
if (allctr->min_block_size < ABLK_HDR_SZ)
goto error;
allctr->min_block_size = UNIT_CEILING(allctr->min_block_size
- + sizeof(UWord));
+ + sizeof(FreeBlkFtr_t));
#if ERTS_SMP
if (init->tpref) {
- Uint sz = sizeof(Block_t);
+ Uint sz = ABLK_HDR_SZ;
sz += ERTS_ALCU_DD_FIX_TYPE_OFFS*sizeof(UWord);
if (init->fix)
sz += sizeof(UWord);
@@ -4143,6 +4205,23 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
}
#endif
+ allctr->sbc_threshold = init->sbct;
+#ifndef ARCH_64
+ if (allctr->sbc_threshold > 0) {
+ Uint max_mbc_block_sz = UNIT_CEILING(allctr->sbc_threshold - 1 + ABLK_HDR_SZ);
+ if (max_mbc_block_sz + UNIT_FLOOR(allctr->min_block_size - 1) > MBC_ABLK_SZ_MASK
+ || max_mbc_block_sz < allctr->sbc_threshold) { /* wrap around */
+ /*
+ * By limiting sbc_threshold to (hard limit - min_block_size)
+ * we avoid having to split off free "residue blocks"
+ * smaller than min_block_size.
+ */
+ max_mbc_block_sz = MBC_ABLK_SZ_MASK - UNIT_FLOOR(allctr->min_block_size - 1);
+ allctr->sbc_threshold = max_mbc_block_sz - ABLK_HDR_SZ + 1;
+ }
+ }
+#endif
+
allctr->sbmbc_threshold = init->sbmbct;
@@ -4158,7 +4237,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->sbmbc_size = init->sbmbcs;
min_size = allctr->sbmbc_threshold;
min_size += allctr->min_block_size;
- min_size += allctr->mbc_header_size;
+ min_size += MBC_HEADER_SIZE(allctr);
if (allctr->sbmbc_size < min_size)
allctr->sbmbc_size = min_size;
}
@@ -4203,59 +4282,26 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
if (!allctr->get_next_mbc_size)
allctr->get_next_mbc_size = get_next_mbc_size;
- if (allctr->mbc_header_size < sizeof(Carrier_t))
- goto error;
#ifdef ERTS_SMP
allctr->dd.use = 0;
if (init->tpref) {
- allctr->mbc_header_size = (UNIT_CEILING(allctr->mbc_header_size
- + FBLK_FTR_SZ
- + ABLK_HDR_SZ
- + sizeof(UWord))
- - ABLK_HDR_SZ
- - sizeof(UWord));
- allctr->sbc_header_size = (UNIT_CEILING(sizeof(Carrier_t)
- + FBLK_FTR_SZ
- + ABLK_HDR_SZ
- + sizeof(UWord))
- - ABLK_HDR_SZ
- - sizeof(UWord));
-
allctr->dd.use = 1;
init_dd_queue(&allctr->dd.q);
allctr->dd.ix = init->ix;
}
- else
#endif
- {
- allctr->mbc_header_size = (UNIT_CEILING(allctr->mbc_header_size
- + FBLK_FTR_SZ
- + ABLK_HDR_SZ)
- - ABLK_HDR_SZ);
- allctr->sbc_header_size = (UNIT_CEILING(sizeof(Carrier_t)
- + FBLK_FTR_SZ
- + ABLK_HDR_SZ)
- - ABLK_HDR_SZ);
- }
if (allctr->main_carrier_size) {
Block_t *blk;
-#if HALFWORD_HEAP
- blk = create_carrier(allctr,
- allctr->main_carrier_size,
- CFLG_MBC
- | CFLG_FORCE_SIZE
- | CFLG_FORCE_MSEG
- | CFLG_MAIN_CARRIER);
-#else
blk = create_carrier(allctr,
allctr->main_carrier_size,
CFLG_MBC
| CFLG_FORCE_SIZE
+#if !HALFWORD_HEAP && !HAVE_SUPER_ALIGNED_MB_CARRIERS
| CFLG_FORCE_SYS_ALLOC
- | CFLG_MAIN_CARRIER);
#endif
+ | CFLG_MAIN_CARRIER);
if (!blk)
goto error;
@@ -4303,9 +4349,9 @@ erts_alcu_stop(Allctr_t *allctr)
while (allctr->sbc_list.first)
destroy_carrier(allctr, SBC2BLK(allctr, allctr->sbc_list.first));
while (allctr->mbc_list.first)
- destroy_carrier(allctr, MBC2FBLK(allctr, allctr->mbc_list.first));
+ destroy_carrier(allctr, MBC_TO_FIRST_BLK(allctr, allctr->mbc_list.first));
while (allctr->sbmbc_list.first)
- destroy_sbmbc(allctr, MBC2FBLK(allctr, allctr->sbmbc_list.first));
+ destroy_sbmbc(allctr, MBC_TO_FIRST_BLK(allctr, allctr->sbmbc_list.first));
#ifdef USE_THREADS
if (allctr->thread_safe)
@@ -4319,17 +4365,9 @@ erts_alcu_stop(Allctr_t *allctr)
void
erts_alcu_init(AlcUInit_t *init)
{
-
+ ASSERT(SBC_BLK_SZ_MASK == MBC_FBLK_SZ_MASK); /* see BLK_SZ */
#if HAVE_ERTS_MSEG
- mseg_unit_size = erts_mseg_unit_size();
-
- if (mseg_unit_size % sizeof(Unit_t)) /* A little paranoid... */
- erl_exit(-1,
- "Mseg unit size (%d) not evenly divideble by "
- "internal unit size of alloc_util (%d)\n",
- mseg_unit_size,
- sizeof(Unit_t));
-
+ ASSERT(erts_mseg_unit_size() == MSEG_UNIT_SZ);
max_mseg_carriers = init->mmc;
sys_alloc_carrier_size = MSEG_UNIT_CEILING(init->ycs);
#else /* #if HAVE_ERTS_MSEG */
@@ -4372,11 +4410,10 @@ erts_alcu_test(unsigned long op, unsigned long a1, unsigned long a2)
case 0x00b: return (unsigned long) CARRIER_SZ((Carrier_t *) a1);
case 0x00c: return (unsigned long) SBC2BLK((Allctr_t *) a1,
(Carrier_t *) a2);
- case 0x00d: return (unsigned long) BLK2SBC((Allctr_t *) a1,
- (Block_t *) a2);
- case 0x00e: return (unsigned long) MBC2FBLK((Allctr_t *) a1,
+ case 0x00d: return (unsigned long) BLK_TO_SBC((Block_t *) a2);
+ case 0x00e: return (unsigned long) MBC_TO_FIRST_BLK((Allctr_t *) a1,
(Carrier_t *) a2);
- case 0x00f: return (unsigned long) FBLK2MBC((Allctr_t *) a1,
+ case 0x00f: return (unsigned long) FIRST_BLK_TO_MBC((Allctr_t *) a1,
(Block_t *) a2);
case 0x010: return (unsigned long) ((Allctr_t *) a1)->mbc_list.first;
case 0x011: return (unsigned long) ((Allctr_t *) a1)->mbc_list.last;
@@ -4388,7 +4425,7 @@ erts_alcu_test(unsigned long op, unsigned long a1, unsigned long a2)
case 0x017: return (unsigned long) ((Allctr_t *) a1)->min_block_size;
case 0x018: return (unsigned long) NXT_BLK((Block_t *) a1);
case 0x019: return (unsigned long) PREV_BLK((Block_t *) a1);
- case 0x01a: return (unsigned long) IS_FIRST_BLK((Block_t *) a1);
+ case 0x01a: return (unsigned long) IS_MBC_FIRST_BLK((Allctr_t*)a1, (Block_t *) a2);
case 0x01b: return (unsigned long) sizeof(Unit_t);
default: ASSERT(0); return ~((unsigned long) 0);
}
@@ -4432,6 +4469,13 @@ erts_alcu_verify_unused_ts(Allctr_t *allctr)
#endif
}
+#ifdef DEBUG
+int is_sbc_blk(Block_t* blk)
+{
+ return IS_SBC_BLK(blk);
+}
+#endif
+
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
static void
@@ -4441,34 +4485,37 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
CarrierList_t *cl;
if (IS_SBC_BLK(iblk)) {
- Carrier_t *sbc = BLK2SBC(allctr, iblk);
+ Carrier_t *sbc = BLK_TO_SBC(iblk);
ASSERT(SBC2BLK(allctr, sbc) == iblk);
- ASSERT(IS_ALLOCED_BLK(iblk));
- ASSERT(IS_FIRST_BLK(iblk));
- ASSERT(IS_LAST_BLK(iblk));
- ASSERT(CARRIER_SZ(sbc) - allctr->sbc_header_size >= BLK_SZ(iblk));
+ ASSERT(CARRIER_SZ(sbc) - SBC_HEADER_SIZE >= SBC_BLK_SZ(iblk));
#if HAVE_ERTS_MSEG
if (IS_MSEG_CARRIER(sbc)) {
- ASSERT(CARRIER_SZ(sbc) % mseg_unit_size == 0);
+ ASSERT(CARRIER_SZ(sbc) % MSEG_UNIT_SZ == 0);
}
#endif
crr = sbc;
cl = &allctr->sbc_list;
}
else {
- Carrier_t *mbc = NULL;
Block_t *prev_blk = NULL;
Block_t *blk;
char *carrier_end;
Uint is_free_blk;
Uint tot_blk_sz;
Uint blk_sz;
+ int has_wrapped_around = 0;
blk = iblk;
tot_blk_sz = 0;
+ crr = BLK_TO_MBC(blk);
+ ASSERT(IS_MB_CARRIER(crr));
+ /* Step around the carrier one whole lap starting at 'iblk'
+ */
while (1) {
+ ASSERT(IS_MBC_BLK(blk));
+ ASSERT(BLK_TO_MBC(blk) == crr);
if (prev_blk) {
ASSERT(NXT_BLK(prev_blk) == blk);
@@ -4481,18 +4528,16 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
}
}
- if (mbc) {
+ if (has_wrapped_around) {
+ ASSERT(((Block_t *) crr) < blk);
if (blk == iblk)
break;
- ASSERT(((Block_t *) mbc) < blk && blk < iblk);
+ ASSERT(blk < iblk);
}
else
ASSERT(blk >= iblk);
-
- ASSERT(IS_MBC_BLK(blk));
-
- blk_sz = BLK_SZ(blk);
+ blk_sz = MBC_BLK_SZ(blk);
ASSERT(blk_sz % sizeof(Unit_t) == 0);
ASSERT(blk_sz >= allctr->min_block_size);
@@ -4500,44 +4545,40 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
tot_blk_sz += blk_sz;
is_free_blk = (int) IS_FREE_BLK(blk);
- if(is_free_blk) {
- if (IS_NOT_LAST_BLK(blk))
- ASSERT(*((UWord *) (((char *) blk)+blk_sz-sizeof(UWord)))
- == blk_sz);
- }
+ ASSERT(!is_free_blk
+ || IS_LAST_BLK(blk)
+ || PREV_BLK_SZ(((char *) blk)+blk_sz) == blk_sz);
if (allctr->check_block)
(*allctr->check_block)(allctr, blk, (int) is_free_blk);
if (IS_LAST_BLK(blk)) {
carrier_end = ((char *) NXT_BLK(blk));
- mbc = *((Carrier_t **) NXT_BLK(blk));
+ has_wrapped_around = 1;
prev_blk = NULL;
- blk = MBC2FBLK(allctr, mbc);
- ASSERT(IS_FIRST_BLK(blk));
+ blk = MBC_TO_FIRST_BLK(allctr, crr);
+ ASSERT(IS_MBC_FIRST_BLK(allctr,blk));
}
else {
prev_blk = blk;
blk = NXT_BLK(blk);
}
}
-
- ASSERT(IS_MB_CARRIER(mbc));
- ASSERT((((char *) mbc)
- + allctr->mbc_header_size
+
+ ASSERT((((char *) crr)
+ + MBC_HEADER_SIZE(allctr)
+ tot_blk_sz) == carrier_end);
- ASSERT(((char *) mbc) + CARRIER_SZ(mbc) - sizeof(Unit_t) <= carrier_end
- && carrier_end <= ((char *) mbc) + CARRIER_SZ(mbc));
+ ASSERT(((char *) crr) + CARRIER_SZ(crr) - sizeof(Unit_t) <= carrier_end
+ && carrier_end <= ((char *) crr) + CARRIER_SZ(crr));
if (allctr->check_mbc)
- (*allctr->check_mbc)(allctr, mbc);
+ (*allctr->check_mbc)(allctr, crr);
#if HAVE_ERTS_MSEG
- if (IS_MSEG_CARRIER(mbc)) {
- ASSERT(CARRIER_SZ(mbc) % mseg_unit_size == 0);
+ if (IS_MSEG_CARRIER(crr)) {
+ ASSERT(CARRIER_SZ(crr) % MSEG_UNIT_SZ == 0);
}
#endif
- crr = mbc;
cl = &allctr->mbc_list;
}
@@ -4559,4 +4600,5 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
#endif
}
-#endif
+#endif /* ERTS_ALLOC_UTIL_HARD_DEBUG */
+
diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h
index cedf4ccf85..e0754e7f69 100644
--- a/erts/emulator/beam/erl_alloc_util.h
+++ b/erts/emulator/beam/erl_alloc_util.h
@@ -216,16 +216,40 @@ erts_aint32_t erts_alcu_fix_alloc_shrink(Allctr_t *, erts_aint32_t);
#define UNIT_FLOOR(X) ((X) & UNIT_MASK)
#define UNIT_CEILING(X) UNIT_FLOOR((X) + INV_UNIT_MASK)
+#define FLG_MASK INV_UNIT_MASK
+#define SBC_BLK_SZ_MASK UNIT_MASK
+#define MBC_FBLK_SZ_MASK UNIT_MASK
+#define CARRIER_SZ_MASK UNIT_MASK
-#define SZ_MASK (~((UWord) 0) << 3)
-#define FLG_MASK (~(SZ_MASK))
+#if HAVE_ERTS_MSEG
+# ifdef ARCH_64
+# define MBC_ABLK_OFFSET_BITS 24
+# elif HAVE_SUPER_ALIGNED_MB_CARRIERS
+# define MBC_ABLK_OFFSET_BITS 9
+ /* Affects hard limits for sbct and lmbcs documented in erts_alloc.xml */
+# endif
+#endif
+#ifndef MBC_ABLK_OFFSET_BITS
+# define MBC_ABLK_OFFSET_BITS 0 /* no carrier offset in block header */
+#endif
+
+#if MBC_ABLK_OFFSET_BITS
+# define MBC_ABLK_OFFSET_SHIFT (sizeof(UWord)*8 - MBC_ABLK_OFFSET_BITS)
+# define MBC_ABLK_OFFSET_MASK (~((UWord)0) << MBC_ABLK_OFFSET_SHIFT)
+# define MBC_ABLK_SZ_MASK (~MBC_ABLK_OFFSET_MASK & ~FLG_MASK)
+# define HAVE_ERTS_SBMBC 0
+#else
+# define MBC_ABLK_SZ_MASK (~FLG_MASK)
+# define HAVE_ERTS_SBMBC 1
+#endif
-#define BLK_SZ(B) \
- (*((Block_t *) (B)) & SZ_MASK)
+#define MBC_ABLK_SZ(B) (ASSERT_EXPR(!is_sbc_blk(B)), (B)->bhdr & MBC_ABLK_SZ_MASK)
+#define MBC_FBLK_SZ(B) (ASSERT_EXPR(!is_sbc_blk(B)), (B)->bhdr & MBC_FBLK_SZ_MASK)
+#define SBC_BLK_SZ(B) (ASSERT_EXPR(is_sbc_blk(B)), (B)->bhdr & SBC_BLK_SZ_MASK)
#define CARRIER_SZ(C) \
- ((C)->chdr & SZ_MASK)
+ ((C)->chdr & CARRIER_SZ_MASK)
extern int erts_have_sbmbc_alloc;
@@ -236,6 +260,7 @@ struct Carrier_t_ {
UWord chdr;
Carrier_t *next;
Carrier_t *prev;
+ Allctr_t *allctr;
};
typedef struct {
@@ -243,8 +268,19 @@ typedef struct {
Carrier_t *last;
} CarrierList_t;
-typedef UWord Block_t;
-typedef UWord FreeBlkFtr_t;
+typedef struct {
+ UWord bhdr;
+#if !MBC_ABLK_OFFSET_BITS
+ Carrier_t *carrier;
+#else
+ union {
+ Carrier_t *carrier; /* if free */
+ char udata__[1]; /* if allocated */
+ }u;
+#endif
+} Block_t;
+
+typedef UWord FreeBlkFtr_t; /* Footer of a free block */
typedef struct {
UWord giga_no;
@@ -381,8 +417,6 @@ struct Allctr_t_ {
#endif
/* */
- Uint mbc_header_size;
- Uint sbc_header_size;
Uint min_mbc_size;
Uint min_mbc_first_free_size;
Uint min_block_size;
@@ -469,6 +503,9 @@ void erts_alcu_verify_unused_ts(Allctr_t *allctr);
unsigned long erts_alcu_test(unsigned long, unsigned long, unsigned long);
+#ifdef DEBUG
+int is_sbc_blk(Block_t*);
+#endif
#endif /* #if defined(GET_ERL_ALLOC_UTIL_IMPL)
diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.c b/erts/emulator/beam/erl_ao_firstfit_alloc.c
index 5bdb752d3a..86b4696d8f 100644
--- a/erts/emulator/beam/erl_ao_firstfit_alloc.c
+++ b/erts/emulator/beam/erl_ao_firstfit_alloc.c
@@ -91,6 +91,7 @@ struct AOFF_RBTree_t_ {
AOFF_RBTree_t *right;
Uint max_sz; /* of all blocks in this sub-tree */
};
+#define AOFF_BLK_SZ(B) MBC_FBLK_SZ(&(B)->hdr)
#ifdef HARD_DEBUG
static AOFF_RBTree_t * check_tree(AOFF_RBTree_t* root, Uint);
@@ -102,7 +103,7 @@ static AOFF_RBTree_t * check_tree(AOFF_RBTree_t* root, Uint);
*/
static ERTS_INLINE Uint node_max_size(AOFF_RBTree_t *x)
{
- Uint sz = BLK_SZ(x);
+ Uint sz = AOFF_BLK_SZ(x);
if (x->left && x->left->max_sz > sz) {
sz = x->left->max_sz;
}
@@ -183,7 +184,6 @@ erts_aoffalc_start(AOFFAllctr_t *alc,
sys_memcpy((void *) alc, (void *) &zero.allctr, sizeof(AOFFAllctr_t));
- allctr->mbc_header_size = sizeof(Carrier_t);
allctr->min_mbc_size = MIN_MBC_SZ;
allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ;
allctr->min_block_size = sizeof(AOFF_RBTree_t);
@@ -587,7 +587,7 @@ aoff_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
AOFF_RBTree_t *blk = (AOFF_RBTree_t *) block;
AOFF_RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC)
? &alc->sbmbc_root : &alc->mbc_root);
- Uint blk_sz = BLK_SZ(blk);
+ Uint blk_sz = AOFF_BLK_SZ(blk);
#ifdef HARD_DEBUG
check_tree(*root, 0);
@@ -659,7 +659,7 @@ aoff_get_free_block(Allctr_t *allctr, Uint size,
if (x->left && x->left->max_sz >= size) {
x = x->left;
}
- else if (BLK_SZ(x) >= size) {
+ else if (AOFF_BLK_SZ(x) >= size) {
blk = x;
break;
}
@@ -910,12 +910,12 @@ check_tree(AOFF_RBTree_t* root, Uint size)
ASSERT(x->right > x);
ASSERT(x->right->max_sz <= x->max_sz);
}
- ASSERT(x->max_sz >= BLK_SZ(x));
- ASSERT(x->max_sz == BLK_SZ(x)
+ ASSERT(x->max_sz >= AOFF_BLK_SZ(x));
+ ASSERT(x->max_sz == AOFF_BLK_SZ(x)
|| x->max_sz == (x->left ? x->left->max_sz : 0)
|| x->max_sz == (x->right ? x->right->max_sz : 0));
- if (size && BLK_SZ(x) >= size) {
+ if (size && AOFF_BLK_SZ(x) >= size) {
if (!res || x < res) {
res = x;
}
@@ -956,7 +956,7 @@ print_tree_aux(AOFF_RBTree_t *x, int indent)
}
fprintf(stderr, "%s: sz=%lu addr=0x%lx max_size=%lu\r\n",
IS_BLACK(x) ? "BLACK" : "RED",
- BLK_SZ(x), (Uint)x, x->max_sz);
+ AOFF_BLK_SZ(x), (Uint)x, x->max_sz);
print_tree_aux(x->left, indent + INDENT_STEP);
}
}
diff --git a/erts/emulator/beam/erl_bestfit_alloc.c b/erts/emulator/beam/erl_bestfit_alloc.c
index c50fdeb4e8..743cbd93c9 100644
--- a/erts/emulator/beam/erl_bestfit_alloc.c
+++ b/erts/emulator/beam/erl_bestfit_alloc.c
@@ -73,6 +73,8 @@
#define SET_RED(N) (((RBTree_t *) (N))->flags |= RED_FLG)
#define SET_BLACK(N) (((RBTree_t *) (N))->flags &= ~RED_FLG)
+#define BF_BLK_SZ(B) MBC_FBLK_SZ(&(B)->hdr)
+
#undef ASSERT
#define ASSERT ASSERT_EXPR
@@ -177,7 +179,6 @@ erts_bfalc_start(BFAllctr_t *bfallctr,
bfallctr->address_order = bfinit->ao;
- allctr->mbc_header_size = sizeof(Carrier_t);
allctr->min_mbc_size = MIN_MBC_SZ;
allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ;
allctr->min_block_size = (bfinit->ao
@@ -592,7 +593,7 @@ aobf_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
? &bfallctr->sbmbc_root
: &bfallctr->mbc_root);
RBTree_t *blk = (RBTree_t *) block;
- Uint blk_sz = BLK_SZ(blk);
+ Uint blk_sz = BF_BLK_SZ(blk);
@@ -610,7 +611,7 @@ aobf_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
while (1) {
Uint size;
- size = BLK_SZ(x);
+ size = BF_BLK_SZ(x);
if (blk_sz < size || (blk_sz == size && blk < x)) {
if (!x->left) {
@@ -668,7 +669,7 @@ aobf_get_free_block(Allctr_t *allctr, Uint size,
ASSERT(!cand_blk || cand_size >= size);
while (x) {
- blk_sz = BLK_SZ(x);
+ blk_sz = BF_BLK_SZ(x);
if (blk_sz < size) {
x = x->right;
}
@@ -686,7 +687,7 @@ aobf_get_free_block(Allctr_t *allctr, Uint size,
#endif
if (cand_blk) {
- blk_sz = BLK_SZ(blk);
+ blk_sz = BF_BLK_SZ(blk);
if (cand_size < blk_sz)
return NULL; /* cand_blk was better */
if (cand_size == blk_sz && ((void *) cand_blk) < ((void *) blk))
@@ -711,7 +712,7 @@ bf_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
? &bfallctr->sbmbc_root
: &bfallctr->mbc_root);
RBTree_t *blk = (RBTree_t *) block;
- Uint blk_sz = BLK_SZ(blk);
+ Uint blk_sz = BF_BLK_SZ(blk);
SET_TREE_NODE(blk);
@@ -730,7 +731,7 @@ bf_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
while (1) {
Uint size;
- size = BLK_SZ(x);
+ size = BF_BLK_SZ(x);
if (blk_sz == size) {
@@ -796,7 +797,7 @@ bf_unlink_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
else if (LIST_NEXT(x)) {
/* Replace tree node by next element in list... */
- ASSERT(BLK_SZ(LIST_NEXT(x)) == BLK_SZ(x));
+ ASSERT(BF_BLK_SZ(LIST_NEXT(x)) == BF_BLK_SZ(x));
ASSERT(IS_TREE_NODE(x));
ASSERT(IS_LIST_ELEM(LIST_NEXT(x)));
@@ -834,7 +835,7 @@ bf_get_free_block(Allctr_t *allctr, Uint size,
ASSERT(!cand_blk || cand_size >= size);
while (x) {
- blk_sz = BLK_SZ(x);
+ blk_sz = BF_BLK_SZ(x);
if (blk_sz < size) {
x = x->right;
}
@@ -855,11 +856,11 @@ bf_get_free_block(Allctr_t *allctr, Uint size,
#ifdef HARD_DEBUG
{
RBTree_t *ct_blk = check_tree(root, 0, size);
- ASSERT(BLK_SZ(ct_blk) == BLK_SZ(blk));
+ ASSERT(BF_BLK_SZ(ct_blk) == BF_BLK_SZ(blk));
}
#endif
- if (cand_blk && cand_size <= BLK_SZ(blk))
+ if (cand_blk && cand_size <= BF_BLK_SZ(blk))
return NULL; /* cand_blk was better */
/* Use next block if it exist in order to avoid replacing
@@ -1093,36 +1094,36 @@ check_tree(RBTree_t *root, int ao, Uint size)
if (x->left) {
ASSERT(x->left->parent == x);
if (ao) {
- ASSERT(BLK_SZ(x->left) < BLK_SZ(x)
- || (BLK_SZ(x->left) == BLK_SZ(x) && x->left < x));
+ ASSERT(BF_BLK_SZ(x->left) < BF_BLK_SZ(x)
+ || (BF_BLK_SZ(x->left) == BF_BLK_SZ(x) && x->left < x));
}
else {
ASSERT(IS_TREE_NODE(x->left));
- ASSERT(BLK_SZ(x->left) < BLK_SZ(x));
+ ASSERT(BF_BLK_SZ(x->left) < BF_BLK_SZ(x));
}
}
if (x->right) {
ASSERT(x->right->parent == x);
if (ao) {
- ASSERT(BLK_SZ(x->right) > BLK_SZ(x)
- || (BLK_SZ(x->right) == BLK_SZ(x) && x->right > x));
+ ASSERT(BF_BLK_SZ(x->right) > BF_BLK_SZ(x)
+ || (BF_BLK_SZ(x->right) == BF_BLK_SZ(x) && x->right > x));
}
else {
ASSERT(IS_TREE_NODE(x->right));
- ASSERT(BLK_SZ(x->right) > BLK_SZ(x));
+ ASSERT(BF_BLK_SZ(x->right) > BF_BLK_SZ(x));
}
}
- if (size && BLK_SZ(x) >= size) {
+ if (size && BF_BLK_SZ(x) >= size) {
if (ao) {
if (!res
- || BLK_SZ(x) < BLK_SZ(res)
- || (BLK_SZ(x) == BLK_SZ(res) && x < res))
+ || BF_BLK_SZ(x) < BF_BLK_SZ(res)
+ || (BF_BLK_SZ(x) == BF_BLK_SZ(res) && x < res))
res = x;
}
else {
- if (!res || BLK_SZ(x) < BLK_SZ(res))
+ if (!res || BF_BLK_SZ(x) < BF_BLK_SZ(res))
res = x;
}
}
@@ -1168,7 +1169,7 @@ print_tree_aux(RBTree_t *x, int indent)
}
fprintf(stderr, "%s: sz=%lu addr=0x%lx\r\n",
IS_BLACK(x) ? "BLACK" : "RED",
- BLK_SZ(x),
+ BF_BLK_SZ(x),
(Uint) x);
print_tree_aux(x->left, indent + INDENT_STEP);
}
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index d377ba8f31..a33085315a 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -129,7 +129,7 @@ static void disallow_heap_frag_ref(Process* p, Eterm* n_htop, Eterm* objv, int n
#if defined(ARCH_64) && !HALFWORD_HEAP
# define MAX_HEAP_SIZES 154
#else
-# define MAX_HEAP_SIZES 55
+# define MAX_HEAP_SIZES 59
#endif
static Sint heap_sizes[MAX_HEAP_SIZES]; /* Suitable heap sizes. */
@@ -144,6 +144,7 @@ void
erts_init_gc(void)
{
int i = 0;
+ Sint max_heap_size = 0;
ASSERT(offsetof(ProcBin,thing_word) == offsetof(struct erl_off_heap_header,thing_word));
ASSERT(offsetof(ProcBin,thing_word) == offsetof(ErlFunThing,thing_word));
@@ -168,16 +169,30 @@ erts_init_gc(void)
* we really don't want that growth when the heaps are that big.
*/
- heap_sizes[0] = 34;
- heap_sizes[1] = 55;
- for (i = 2; i < 23; i++) {
- heap_sizes[i] = heap_sizes[i-1] + heap_sizes[i-2];
+ /* Growth stage 1 - Fibonacci + 1*/
+ /* 12,38 will hit size 233, the old default */
+
+ heap_sizes[0] = 12;
+ heap_sizes[1] = 38;
+
+ for(i = 2; i < 23; i++) {
+ /* one extra word for block header */
+ heap_sizes[i] = heap_sizes[i-1] + heap_sizes[i-2] + 1;
}
+
+ /* for 32 bit we want max_heap_size to be MAX(32bit) / 4 [words] (and halfword)
+ * for 64 bit we want max_heap_size to be MAX(52bit) / 8 [words]
+ */
+
+ max_heap_size = sizeof(Eterm) < 8 ? (Sint)((~(Uint)0)/(sizeof(Eterm))) :
+ (Sint)(((Uint64)1 << 53)/sizeof(Eterm));
+
+ /* Growth stage 2 - 20% growth */
/* At 1.3 mega words heap, we start to slow down. */
for (i = 23; i < ALENGTH(heap_sizes); i++) {
- heap_sizes[i] = 5*(heap_sizes[i-1]/4);
- if (heap_sizes[i] < 0) {
+ heap_sizes[i] = heap_sizes[i-1] + heap_sizes[i-1]/5;
+ if ((heap_sizes[i] < 0) || heap_sizes[i] > max_heap_size) {
/* Size turned negative. Discard this last size. */
i--;
break;
@@ -868,14 +883,12 @@ minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
}
}
- if (wanted < MIN_HEAP_SIZE(p)) {
- wanted = MIN_HEAP_SIZE(p);
- } else {
- wanted = next_heap_size(p, wanted, 0);
- }
+ wanted = wanted < MIN_HEAP_SIZE(p) ? MIN_HEAP_SIZE(p)
+ : next_heap_size(p, wanted, 0);
if (wanted < HEAP_SIZE(p)) {
shrink_new_heap(p, wanted, objv, nobj);
}
+
ASSERT(HEAP_SIZE(p) == next_heap_size(p, HEAP_SIZE(p), 0));
return 1; /* We are done. */
}
@@ -1434,11 +1447,10 @@ adjust_after_fullsweep(Process *p, Uint size_before, int need, Eterm *objv, int
I think this is better as fullsweep is used mainly on
small memory systems, but I could be wrong... */
wanted = 2 * need_after;
- if (wanted < p->min_heap_size) {
- sz = p->min_heap_size;
- } else {
- sz = next_heap_size(p, wanted, 0);
- }
+
+ sz = wanted < p->min_heap_size ? p->min_heap_size
+ : next_heap_size(p, wanted, 0);
+
if (sz < HEAP_SIZE(p)) {
shrink_new_heap(p, sz, objv, nobj);
}
diff --git a/erts/emulator/beam/erl_goodfit_alloc.c b/erts/emulator/beam/erl_goodfit_alloc.c
index e7d4ac2b67..f98d377fd6 100644
--- a/erts/emulator/beam/erl_goodfit_alloc.c
+++ b/erts/emulator/beam/erl_goodfit_alloc.c
@@ -205,7 +205,6 @@ erts_gfalc_start(GFAllctr_t *gfallctr,
init->sbmbct = 0; /* Small mbc not yet supported by goodfit */
- allctr->mbc_header_size = sizeof(Carrier_t);
allctr->min_mbc_size = MIN_MBC_SZ;
allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ;
allctr->min_block_size = sizeof(GFFreeBlock_t);
@@ -363,7 +362,7 @@ search_bucket(Allctr_t *allctr, int ix, Uint size)
blk && i < max_blk_search;
blk = blk->next, i++) {
- blk_sz = BLK_SZ(blk);
+ blk_sz = MBC_FBLK_SZ(&blk->block_head);
blk_on_lambc = (((char *) blk) < gfallctr->last_aux_mbc_end
&& gfallctr->last_aux_mbc_start <= ((char *) blk));
@@ -402,7 +401,7 @@ get_free_block(Allctr_t *allctr, Uint size,
if (min_bi == unsafe_bi) {
blk = search_bucket(allctr, min_bi, size);
if (blk) {
- if (cand_blk && cand_size <= BLK_SZ(blk))
+ if (cand_blk && cand_size <= MBC_FBLK_SZ(blk))
return NULL; /* cand_blk was better */
unlink_free_block(allctr, blk, flags);
return blk;
@@ -422,7 +421,7 @@ get_free_block(Allctr_t *allctr, Uint size,
/* We are guaranteed to find a block that fits in this bucket */
blk = search_bucket(allctr, min_bi, size);
ASSERT(blk);
- if (cand_blk && cand_size <= BLK_SZ(blk))
+ if (cand_blk && cand_size <= MBC_FBLK_SZ(blk))
return NULL; /* cand_blk was better */
unlink_free_block(allctr, blk, flags);
return blk;
@@ -435,7 +434,7 @@ link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
{
GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
GFFreeBlock_t *blk = (GFFreeBlock_t *) block;
- Uint sz = BLK_SZ(blk);
+ Uint sz = MBC_FBLK_SZ(&blk->block_head);
int i = BKT_IX(gfallctr, sz);
ASSERT(sz >= MIN_BLK_SZ);
@@ -456,7 +455,7 @@ unlink_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
{
GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
GFFreeBlock_t *blk = (GFFreeBlock_t *) block;
- Uint sz = BLK_SZ(blk);
+ Uint sz = MBC_FBLK_SZ(&blk->block_head);
int i = BKT_IX(gfallctr, sz);
if (!blk->prev) {
@@ -618,7 +617,7 @@ check_block(Allctr_t *allctr, Block_t * blk, int free_block)
GFFreeBlock_t *fblk;
if(free_block) {
- Uint blk_sz = BLK_SZ(blk);
+ Uint blk_sz = is_sbc_blk(blk) ? SBC_BLK_SZ(blk) : MBC_BLK_SZ(blk);
bi = BKT_IX(gfallctr, blk_sz);
ASSERT(gfallctr->bucket_mask.main & (((UWord) 1) << IX2SMIX(bi)));
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 41f33efa79..848877d43e 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -44,9 +44,9 @@
#undef DEBUG_PRINTOUTS
#endif
-extern Eterm beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
-extern Eterm beam_return_trace[1]; /* OpCode(i_return_trace) */
-extern Eterm beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
+extern BeamInstr beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
+extern BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */
+extern BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
/* Pseudo export entries. Never filled in with data, only used to
yield unique pointers of the correct type. */
diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c
index db2854fa40..94f9f76a20 100644
--- a/erts/emulator/sys/common/erl_mseg.c
+++ b/erts/emulator/sys/common/erl_mseg.c
@@ -57,26 +57,48 @@
/* Implement some other way to get the real page size if needed! */
#endif
-#define MAX_CACHE_SIZE 30
-
#undef MIN
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
#undef MAX
#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
-#undef PAGE_MASK
-#define INV_PAGE_MASK ((Uint) (page_size - 1))
-#define PAGE_MASK (~INV_PAGE_MASK)
-#define PAGE_FLOOR(X) ((X) & PAGE_MASK)
-#define PAGE_CEILING(X) PAGE_FLOOR((X) + INV_PAGE_MASK)
-#define PAGES(X) ((X) >> page_shift)
+#define INV_ALIGNED_MASK ((UWord) ((MSEG_ALIGNED_SIZE) - 1))
+#define ALIGNED_MASK (~INV_ALIGNED_MASK)
+#define ALIGNED_FLOOR(X) (((UWord)(X)) & ALIGNED_MASK)
+#define ALIGNED_CEILING(X) ALIGNED_FLOOR((X) + INV_ALIGNED_MASK)
+#define MAP_IS_ALIGNED(X) (((UWord)(X) & (MSEG_ALIGNED_SIZE - 1)) == 0)
+
+#define IS_2POW(X) ((X) && !((X) & ((X) - 1)))
+static ERTS_INLINE Uint ceil_2pow(Uint x) {
+ int i = 1 << (4 + (sizeof(Uint) != 4 ? 1 : 0));
+ x--;
+ do { x |= x >> i; } while(i >>= 1);
+ return x + 1;
+}
+static const int debruijn[32] = {
+ 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
+ 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
+};
+
+#define LOG2(X) (debruijn[((Uint32)(((X) & -(X)) * 0x077CB531U)) >> 27])
+
+#define CACHE_AREAS (32 - MSEG_ALIGN_BITS)
+
+#define SIZE_TO_CACHE_AREA_IDX(S) (LOG2((S)) - MSEG_ALIGN_BITS)
+#define MAX_CACHE_SIZE (30)
+
+#define MSEG_FLG_IS_2POW(X) ((X) & ERTS_MSEG_FLG_2POW)
+
+#ifdef DEBUG
+#define DBG(F,...) fprintf(stderr, (F), __VA_ARGS__ )
+#else
+#define DBG(F,...) do{}while(0)
+#endif
static int atoms_initialized;
typedef struct mem_kind_t MemKind;
-static void mseg_clear_cache(MemKind*);
-
#if HALFWORD_HEAP
static int initialize_pmmap(void);
static void *pmmap(size_t size);
@@ -116,15 +138,6 @@ static int mmap_fd;
#error "Not supported"
#endif /* #if HAVE_MMAP */
-#if defined(ERTS_MSEG_FAKE_SEGMENTS) && HALFWORD_HEAP
-# warning "ERTS_MSEG_FAKE_SEGMENTS will only be used for high memory segments"
-#endif
-
-#if defined(ERTS_MSEG_FAKE_SEGMENTS)
-#undef CAN_PARTLY_DESTROY
-#define CAN_PARTLY_DESTROY 0
-#endif
-
const ErtsMsegOpt_t erts_mseg_default_opt = {
1, /* Use cache */
1, /* Preserv data */
@@ -137,26 +150,17 @@ const ErtsMsegOpt_t erts_mseg_default_opt = {
};
-typedef struct cache_desc_t_ {
- void *seg;
- Uint size;
- struct cache_desc_t_ *next;
- struct cache_desc_t_ *prev;
-} cache_desc_t;
-
typedef struct {
Uint32 giga_no;
Uint32 no;
} CallCounter;
-static Uint page_size;
-static Uint page_shift;
-
typedef struct {
CallCounter alloc;
CallCounter dealloc;
CallCounter realloc;
CallCounter create;
+ CallCounter create_resize;
CallCounter destroy;
#if HAVE_MSEG_RECREATE
CallCounter recreate;
@@ -165,17 +169,25 @@ typedef struct {
CallCounter check_cache;
} ErtsMsegCalls;
+typedef struct cache_t_ cache_t;
+
+struct cache_t_ {
+ Uint size;
+ void *seg;
+ cache_t *next;
+};
+
+
typedef struct ErtsMsegAllctr_t_ ErtsMsegAllctr_t;
struct mem_kind_t {
- cache_desc_t cache_descs[MAX_CACHE_SIZE];
- cache_desc_t *free_cache_descs;
- cache_desc_t *cache;
- cache_desc_t *cache_end;
-
- Uint cache_size;
- Uint min_cached_seg_size;
- Uint max_cached_seg_size;
+
+ cache_t cache[MAX_CACHE_SIZE];
+ cache_t *cache_unpowered;
+ cache_t *cache_area[CACHE_AREAS];
+ cache_t *cache_free;
+
+ Sint cache_size;
Uint cache_hits;
struct {
@@ -320,8 +332,7 @@ static erts_mtx_t init_atoms_mutex; /* Also needed when !USE_THREADS */
static ERTS_INLINE void
-schedule_cache_check(ErtsMsegAllctr_t *ma)
-{
+schedule_cache_check(ErtsMsegAllctr_t *ma) {
if (!ma->is_cache_check_scheduled && ma->is_init_done) {
erts_set_aux_work_timeout(ma->ix,
@@ -331,12 +342,45 @@ schedule_cache_check(ErtsMsegAllctr_t *ma)
}
}
+/* remove ErtsMsegAllctr_t from arguments?
+ * only used for statistics
+ */
+static ERTS_INLINE void *
+mmap_align(ErtsMsegAllctr_t *ma, void *addr, size_t length, int prot, int flags, int fd, off_t offset) {
+
+ void *p, *q;
+ UWord d;
+
+ p = mmap(addr, length, prot, flags, fd, offset);
+
+ if (MAP_IS_ALIGNED(p) || p == MAP_FAILED)
+ return p;
+
+ if (ma)
+ INC_CC(ma, create_resize);
+
+ munmap(p, length);
+
+ if ((p = mmap(addr, length + MSEG_ALIGNED_SIZE, prot, flags, fd, offset)) == MAP_FAILED)
+ return MAP_FAILED;
+
+ q = (void *)ALIGNED_CEILING(p);
+ d = q - p;
+
+ if (d > 0)
+ munmap(p, d);
+
+ if (MSEG_ALIGNED_SIZE - d > 0)
+ munmap((void *) (q + length), MSEG_ALIGNED_SIZE - d);
+
+ return q;
+}
+
static ERTS_INLINE void *
mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size)
{
void *seg;
-
- ASSERT(size % page_size == 0);
+ ASSERT(size % MSEG_ALIGNED_SIZE == 0);
#if HALFWORD_HEAP
if (mk == &ma->low_mem) {
@@ -345,18 +389,17 @@ mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size)
erts_fprintf(stderr,"Pointer mask failure (0x%08lx)\n",(unsigned long) seg);
return NULL;
}
- }
- else
+ } else
#endif
{
-#if defined(ERTS_MSEG_FAKE_SEGMENTS)
- seg = erts_sys_alloc(ERTS_ALC_N_INVALID, NULL, size);
-#elif HAVE_MMAP
+#if HAVE_MMAP
{
- seg = (void *) mmap((void *) 0, (size_t) size,
+ seg = (void *) mmap_align(ma, (void *) 0, (size_t) size,
MMAP_PROT, MMAP_FLAGS, MMAP_FD, 0);
if (seg == (void *) MAP_FAILED)
seg = NULL;
+
+ ASSERT(MAP_IS_ALIGNED(seg) || !seg);
}
#else
# error "Missing mseg_create() implementation"
@@ -369,38 +412,24 @@ mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size)
}
static ERTS_INLINE void
-mseg_destroy(ErtsMsegAllctr_t *ma, MemKind* mk, void *seg, Uint size)
-{
-#ifdef DEBUG
- int res;
-#endif
+mseg_destroy(ErtsMsegAllctr_t *ma, MemKind* mk, void *seg, Uint size) {
+ ERTS_DECLARE_DUMMY(int res);
#if HALFWORD_HEAP
if (mk == &ma->low_mem) {
-#ifdef DEBUG
- res =
-#endif
- pmunmap((void *) seg, size);
+ res = pmunmap((void *) seg, size);
}
else
#endif
{
-#ifdef ERTS_MSEG_FAKE_SEGMENTS
- erts_sys_free(ERTS_ALC_N_INVALID, NULL, seg);
-#ifdef DEBUG
- res = 0;
-#endif
-#elif HAVE_MMAP
-#ifdef DEBUG
- res =
-#endif
- munmap((void *) seg, size);
+#ifdef HAVE_MMAP
+ res = munmap((void *) seg, size);
#else
# error "Missing mseg_destroy() implementation"
#endif
}
- ASSERT(size % page_size == 0);
+ ASSERT(size % MSEG_ALIGNED_SIZE == 0);
ASSERT(res == 0);
INC_CC(ma, destroy);
@@ -408,14 +437,36 @@ mseg_destroy(ErtsMsegAllctr_t *ma, MemKind* mk, void *seg, Uint size)
}
#if HAVE_MSEG_RECREATE
+#if defined(__NetBsd__)
+#define MREMAP_FLAGS (0)
+#else
+#define MREMAP_FLAGS (MREMAP_MAYMOVE)
+#endif
+
+
+/* mseg_recreate
+ * May return *unaligned* segments as in address not aligned to MSEG_ALIGNMENT
+ * it is still page aligned
+ *
+ * This is fine for single block carriers as long as we don't cache misaligned
+ * segments (since multiblock carriers may use them)
+ *
+ * For multiblock carriers we *need* MSEG_ALIGNMENT but mbc's will never be
+ * reallocated.
+ *
+ * This should probably be fixed the following way:
+ * 1) Use an option to segment allocation - NEED_ALIGNMENT
+ * 2) Add mremap_align which takes care of aligning a new a mremaped area
+ * 3) Fix the cache to handle of aligned and unaligned segments
+ */
static ERTS_INLINE void *
mseg_recreate(ErtsMsegAllctr_t *ma, MemKind* mk, void *old_seg, Uint old_size, Uint new_size)
{
void *new_seg;
- ASSERT(old_size % page_size == 0);
- ASSERT(new_size % page_size == 0);
+ ASSERT(old_size % MSEG_ALIGNED_SIZE == 0);
+ ASSERT(new_size % MSEG_ALIGNED_SIZE == 0);
#if HALFWORD_HEAP
if (mk == &ma->low_mem) {
@@ -426,22 +477,12 @@ mseg_recreate(ErtsMsegAllctr_t *ma, MemKind* mk, void *old_seg, Uint old_size, U
else
#endif
{
-#if defined(ERTS_MSEG_FAKE_SEGMENTS)
- new_seg = erts_sys_realloc(ERTS_ALC_N_INVALID, NULL, old_seg, new_size);
-#elif HAVE_MREMAP
-
- #if defined(__NetBSD__)
- new_seg = (void *) mremap((void *) old_seg,
- (size_t) old_size,
- NULL,
- (size_t) new_size,
- 0);
- #else
- new_seg = (void *) mremap((void *) old_seg,
- (size_t) old_size,
- (size_t) new_size,
- MREMAP_MAYMOVE);
- #endif
+#if HAVE_MREMAP
+#if defined(__NetBSD__)
+ new_seg = mremap(old_seg, (size_t)old_size, NULL, new_size, MREMAP_FLAGS);
+#else
+ new_seg = mremap(old_seg, (size_t)old_size, (size_t)new_size, MREMAP_FLAGS);
+#endif
if (new_seg == (void *) MAP_FAILED)
new_seg = NULL;
#else
@@ -475,151 +516,265 @@ do { \
#define ERTS_DBG_MK_CHK_THR_ACCESS(MK)
#endif
-static ERTS_INLINE cache_desc_t *
-alloc_cd(MemKind* mk)
-{
- cache_desc_t *cd = mk->free_cache_descs;
+/* NEW CACHE interface */
+
+static ERTS_INLINE cache_t *mseg_cache_alloc_descriptor(MemKind *mk) {
+ cache_t *c = mk->cache_free;
+
ERTS_DBG_MK_CHK_THR_ACCESS(mk);
- if (cd)
- mk->free_cache_descs = cd->next;
- return cd;
+ if (c)
+ mk->cache_free = c->next;
+
+ return c;
}
-static ERTS_INLINE void
-free_cd(MemKind* mk, cache_desc_t *cd)
-{
+static ERTS_INLINE void mseg_cache_free_descriptor(MemKind *mk, cache_t *c) {
ERTS_DBG_MK_CHK_THR_ACCESS(mk);
- cd->next = mk->free_cache_descs;
- mk->free_cache_descs = cd;
+ ASSERT(c);
+
+ c->seg = NULL;
+ c->size = 0;
+ c->next = mk->cache_free;
+ mk->cache_free = c;
}
+static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, Uint size) {
-static ERTS_INLINE void
-link_cd(MemKind* mk, cache_desc_t *cd)
-{
+ cache_t *c;
ERTS_DBG_MK_CHK_THR_ACCESS(mk);
- if (mk->cache)
- mk->cache->prev = cd;
- cd->next = mk->cache;
- cd->prev = NULL;
- mk->cache = cd;
-
- if (!mk->cache_end) {
- ASSERT(!cd->next);
- mk->cache_end = cd;
+
+ if (mk->cache_free && MAP_IS_ALIGNED(seg)) {
+ if (IS_2POW(size)) {
+ int ix = SIZE_TO_CACHE_AREA_IDX(size);
+
+ ASSERT(ix < CACHE_AREAS);
+ ASSERT((1 << (ix + MSEG_ALIGN_BITS)) == size);
+
+ /* unlink from free cache list */
+ c = mseg_cache_alloc_descriptor(mk);
+
+ /* link to cache area */
+ c->seg = seg;
+ c->size = size;
+ c->next = mk->cache_area[ix];
+
+ mk->cache_area[ix] = c;
+ mk->cache_size++;
+
+ ASSERT(mk->cache_size <= mk->ma->max_cache_size);
+
+ return 1;
+ } else {
+ /* unlink from free cache list */
+ c = mseg_cache_alloc_descriptor(mk);
+
+ /* link to cache area */
+ c->seg = seg;
+ c->size = size;
+ c->next = mk->cache_unpowered;
+
+ mk->cache_unpowered = c;
+ mk->cache_size++;
+
+ ASSERT(mk->cache_size <= mk->ma->max_cache_size);
+
+ return 1;
+ }
}
- mk->cache_size++;
+ return 0;
}
-#if CAN_PARTLY_DESTROY
-static ERTS_INLINE void
-end_link_cd(MemKind* mk, cache_desc_t *cd)
-{
+static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint *size_p) {
+
+ Uint size = *size_p;
+
ERTS_DBG_MK_CHK_THR_ACCESS(mk);
- if (mk->cache_end)
- mk->cache_end->next = cd;
- cd->next = NULL;
- cd->prev = mk->cache_end;
- mk->cache_end = cd;
-
- if (!mk->cache) {
- ASSERT(!cd->prev);
- mk->cache = cd;
- }
+ if (IS_2POW(size)) {
+
+ int i, ix = SIZE_TO_CACHE_AREA_IDX(size);
+ void *seg;
+ cache_t *c;
+ Uint csize;
+
+ for( i = ix; i < CACHE_AREAS; i++) {
+
+ if ((c = mk->cache_area[i]) == NULL)
+ continue;
+
+ ASSERT(IS_2POW(c->size));
+
+ /* unlink from cache area */
+ csize = c->size;
+ seg = c->seg;
+ mk->cache_area[i] = c->next;
+ c->next = NULL;
+ mk->cache_size--;
+ mk->cache_hits++;
+
+ /* link to free cache list */
+ mseg_cache_free_descriptor(mk, c);
+
+ ASSERT(!(mk->cache_size < 0));
+
+ /* divvy up the cache - if needed */
+ while( i > ix) {
+ csize = csize >> 1;
+ /* try to cache half of it */
+ if (!cache_bless_segment(mk, (char *)seg + csize, csize)) {
+ /* wouldn't cache .. destroy it instead */
+ mseg_destroy(mk->ma, mk, (char *)seg + csize, csize);
+ }
+ i--;
+ }
+ ASSERT(csize == size);
+ return seg;
+ }
+ }
+ else if (mk->cache_unpowered) {
+ void *seg;
+ cache_t *c, *pc;
+ Uint csize;
+ Uint bad_max_abs = mk->ma->abs_max_cache_bad_fit;
+ Uint bad_max_rel = mk->ma->rel_max_cache_bad_fit;
+
+ c = mk->cache_unpowered;
+ pc = c;
+
+ while (c) {
+ csize = c->size;
+ if (csize >= size &&
+ ((csize - size)*100 < bad_max_rel*size) &&
+ (csize - size) < bad_max_abs ) {
+
+ /* unlink from cache area */
+ seg = c->seg;
+
+ if (pc == c) {
+ mk->cache_unpowered = c->next;
+ } else {
+ pc->next = c->next;
+ }
+
+ c->next = NULL;
+ mk->cache_size--;
+ mk->cache_hits++;
+
+ /* link to free cache list */
+ mseg_cache_free_descriptor(mk, c);
+ *size_p = csize;
+
+ return seg;
+ }
- mk->cache_size++;
+ pc = c;
+ c = c->next;
+ }
+ }
+ return NULL;
}
-#endif
-static ERTS_INLINE void
-unlink_cd(MemKind* mk, cache_desc_t *cd)
-{
- ERTS_DBG_MK_CHK_THR_ACCESS(mk);
- if (cd->next)
- cd->next->prev = cd->prev;
- else
- mk->cache_end = cd->prev;
-
- if (cd->prev)
- cd->prev->next = cd->next;
- else
- mk->cache = cd->next;
- ASSERT(mk->cache_size > 0);
+/* *_mseg_check_*_cache
+ * Slowly remove segments cached in the allocator by
+ * using callbacks from aux-work in the scheduler.
+ */
+
+static ERTS_INLINE Uint mseg_drop_one_memkind_cache_size(MemKind *mk, cache_t **head) {
+ cache_t *c = NULL;
+
+ c = *head;
+
+ ASSERT( c != NULL );
+
+ *head = c->next;
+
+ if (erts_mtrace_enabled)
+ erts_mtrace_crr_free(SEGTYPE, SEGTYPE, c->seg);
+
+ mseg_destroy(mk->ma, mk, c->seg, c->size);
+ mseg_cache_free_descriptor(mk, c);
+
+ mk->segments.current.watermark--;
mk->cache_size--;
-}
-static ERTS_INLINE void
-check_cache_limits(MemKind* mk)
-{
- cache_desc_t *cd;
- ERTS_DBG_MK_CHK_THR_ACCESS(mk);
- mk->max_cached_seg_size = 0;
- mk->min_cached_seg_size = ~((Uint) 0);
- for (cd = mk->cache; cd; cd = cd->next) {
- if (cd->size < mk->min_cached_seg_size)
- mk->min_cached_seg_size = cd->size;
- if (cd->size > mk->max_cached_seg_size)
- mk->max_cached_seg_size = cd->size;
- }
+ ASSERT( mk->cache_size >= 0 );
+
+ return mk->cache_size;
}
-static ERTS_INLINE void
-adjust_cache_size(MemKind* mk, int force_check_limits)
-{
- cache_desc_t *cd;
- int check_limits = force_check_limits;
- Sint max_cached = ((Sint) mk->segments.current.watermark
- - (Sint) mk->segments.current.no);
- ERTS_DBG_MK_CHK_THR_ACCESS(mk);
- while (((Sint) mk->cache_size) > max_cached && ((Sint) mk->cache_size) > 0) {
- ASSERT(mk->cache_end);
- cd = mk->cache_end;
- if (!check_limits &&
- !(mk->min_cached_seg_size < cd->size
- && cd->size < mk->max_cached_seg_size)) {
- check_limits = 1;
- }
+static ERTS_INLINE Uint mseg_drop_memkind_cache_size(MemKind *mk, cache_t **head) {
+ cache_t *c = NULL, *next = NULL;
+
+ c = *head;
+ ASSERT( c != NULL );
+
+ while (c) {
+
+ next = c->next;
+
if (erts_mtrace_enabled)
- erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg);
- mseg_destroy(mk->ma, mk, cd->seg, cd->size);
- unlink_cd(mk,cd);
- free_cd(mk,cd);
- }
+ erts_mtrace_crr_free(SEGTYPE, SEGTYPE, c->seg);
- if (check_limits)
- check_cache_limits(mk);
-}
+ mseg_destroy(mk->ma, mk, c->seg, c->size);
+ mseg_cache_free_descriptor(mk, c);
-static Uint
-check_one_cache(MemKind* mk)
-{
- if (mk->segments.current.watermark > mk->segments.current.no)
mk->segments.current.watermark--;
- adjust_cache_size(mk, 0);
+ mk->cache_size--;
+
+ c = next;
+ }
+
+ *head = NULL;
+
+ ASSERT( mk->cache_size >= 0 );
- if (mk->cache_size)
- schedule_cache_check(mk->ma);
return mk->cache_size;
}
-static void do_cache_check(ErtsMsegAllctr_t *ma)
-{
- int empty_cache = 1;
+/* mseg_check_memkind_cache
+ * - Check if we can empty some cached segments in this
+ * MemKind.
+ */
+
+
+static Uint mseg_check_memkind_cache(MemKind *mk) {
+ int i;
+
+ ERTS_DBG_MK_CHK_THR_ACCESS(mk);
+
+ for (i = 0; i < CACHE_AREAS; i++) {
+ if (mk->cache_area[i] != NULL)
+ return mseg_drop_one_memkind_cache_size(mk, &(mk->cache_area[i]));
+ }
+
+ if (mk->cache_unpowered)
+ return mseg_drop_one_memkind_cache_size(mk, &(mk->cache_unpowered));
+
+ return 0;
+}
+
+/* mseg_cache_check
+ * - Check if we have some cache we can purge
+ * in any of the memkinds.
+ */
+
+static void mseg_cache_check(ErtsMsegAllctr_t *ma) {
MemKind* mk;
+ Uint empty_cache = 1;
ERTS_MSEG_LOCK(ma);
- for (mk=ma->mk_list; mk; mk=mk->next) {
- if (check_one_cache(mk))
+ for (mk = ma->mk_list; mk; mk = mk->next) {
+ if (mseg_check_memkind_cache(mk))
empty_cache = 0;
}
+ /* If all MemKinds caches are empty,
+ * remove aux-work callback
+ */
if (empty_cache) {
ma->is_cache_check_scheduled = 0;
- erts_set_aux_work_timeout(ma->ix,
- ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK,
- 0);
+ erts_set_aux_work_timeout(ma->ix, ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK, 0);
}
INC_CC(ma, check_cache);
@@ -627,27 +782,65 @@ static void do_cache_check(ErtsMsegAllctr_t *ma)
ERTS_MSEG_UNLOCK(ma);
}
-void erts_mseg_cache_check(void)
-{
- do_cache_check(ERTS_MSEG_ALLCTR_SS());
+/* erts_mseg_cache_check
+ * - This is a callback that is scheduled as aux-work from
+ * schedulers and is called at some interval if we have a cache
+ * on this mseg-allocator and memkind.
+ * - Purpose: Empty cache slowly so we don't collect mapped areas
+ * and bloat memory.
+ */
+
+void erts_mseg_cache_check(void) {
+ mseg_cache_check(ERTS_MSEG_ALLCTR_SS());
}
-static void
-mseg_clear_cache(MemKind* mk)
-{
- mk->segments.current.watermark = 0;
- adjust_cache_size(mk, 1);
+/* *_mseg_clear_*_cache
+ * Remove cached segments from the allocator completely
+ */
+
+static void mseg_clear_memkind_cache(MemKind *mk) {
+ int i;
+
+ /* drop pow2 caches */
+ for (i = 0; i < CACHE_AREAS; i++) {
+ if (mk->cache_area[i] == NULL)
+ continue;
+
+ mseg_drop_memkind_cache_size(mk, &(mk->cache_area[i]));
+ ASSERT(mk->cache_area[i] == NULL);
+ }
+ /* drop varied caches */
+ if(mk->cache_unpowered)
+ mseg_drop_memkind_cache_size(mk, &(mk->cache_unpowered));
+
+ ASSERT(mk->cache_unpowered == NULL);
+ ASSERT(mk->cache_size == 0);
+}
+
+static void mseg_clear_cache(ErtsMsegAllctr_t *ma) {
+ MemKind* mk;
+
+ ERTS_MSEG_LOCK(ma);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
+
- ASSERT(!mk->cache);
- ASSERT(!mk->cache_end);
- ASSERT(!mk->cache_size);
+ for (mk = ma->mk_list; mk; mk = mk->next) {
+ mseg_clear_memkind_cache(mk);
+ }
- mk->segments.current.watermark = mk->segments.current.no;
+ INC_CC(ma, clear_cache);
- INC_CC(mk->ma, clear_cache);
+ ERTS_MSEG_UNLOCK(ma);
}
+void erts_mseg_clear_cache(void) {
+ mseg_clear_cache(ERTS_MSEG_ALLCTR_SS());
+ mseg_clear_cache(ERTS_MSEG_ALLCTR_IX(0));
+}
+
+
+
static ERTS_INLINE MemKind* memkind(ErtsMsegAllctr_t *ma,
const ErtsMsegOpt_t *opt)
{
@@ -660,116 +853,40 @@ static ERTS_INLINE MemKind* memkind(ErtsMsegAllctr_t *ma,
static void *
mseg_alloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, Uint *size_p,
- const ErtsMsegOpt_t *opt)
+ Uint flags, const ErtsMsegOpt_t *opt)
{
- Uint max, min, diff_size, size;
- cache_desc_t *cd, *cand_cd;
+ Uint size;
void *seg;
MemKind* mk = memkind(ma, opt);
INC_CC(ma, alloc);
- size = PAGE_CEILING(*size_p);
+ /* Carrier align */
+ size = ALIGNED_CEILING(*size_p);
+
+ /* Cache optim (if applicable) */
+ if (MSEG_FLG_IS_2POW(flags) && !IS_2POW(size))
+ size = ceil_2pow(size);
#if CAN_PARTLY_DESTROY
if (size < ma->min_seg_size)
ma->min_seg_size = size;
#endif
+
+ if (opt->cache && mk->cache_size > 0 && (seg = cache_get_segment(mk, &size)) != NULL)
+ goto done;
- if (!opt->cache) {
- create_seg:
- adjust_cache_size(mk,0);
- seg = mseg_create(ma, mk, size);
- if (!seg) {
- mseg_clear_cache(mk);
- seg = mseg_create(ma, mk, size);
- if (!seg)
- size = 0;
- }
-
- *size_p = size;
- if (seg) {
- if (erts_mtrace_enabled)
- erts_mtrace_crr_alloc(seg, atype, ERTS_MTRACE_SEGMENT_ID, size);
- ERTS_MSEG_ALLOC_STAT(mk,size);
- }
- return seg;
- }
-
- if (size > mk->max_cached_seg_size)
- goto create_seg;
-
- if (size < mk->min_cached_seg_size) {
-
- diff_size = mk->min_cached_seg_size - size;
-
- if (diff_size > ma->abs_max_cache_bad_fit)
- goto create_seg;
-
- if (100*PAGES(diff_size) > ma->rel_max_cache_bad_fit*PAGES(size))
- goto create_seg;
-
- }
-
- max = 0;
- min = ~((Uint) 0);
- cand_cd = NULL;
-
- for (cd = mk->cache; cd; cd = cd->next) {
- if (cd->size >= size) {
- if (!cand_cd) {
- cand_cd = cd;
- continue;
- }
- else if (cd->size < cand_cd->size) {
- if (max < cand_cd->size)
- max = cand_cd->size;
- if (min > cand_cd->size)
- min = cand_cd->size;
- cand_cd = cd;
- continue;
- }
- }
- if (max < cd->size)
- max = cd->size;
- if (min > cd->size)
- min = cd->size;
- }
-
- mk->min_cached_seg_size = min;
- mk->max_cached_seg_size = max;
-
- if (!cand_cd)
- goto create_seg;
-
- diff_size = cand_cd->size - size;
-
- if (diff_size > ma->abs_max_cache_bad_fit
- || 100*PAGES(diff_size) > ma->rel_max_cache_bad_fit*PAGES(size)) {
- if (mk->max_cached_seg_size < cand_cd->size)
- mk->max_cached_seg_size = cand_cd->size;
- if (mk->min_cached_seg_size > cand_cd->size)
- mk->min_cached_seg_size = cand_cd->size;
- goto create_seg;
- }
-
- mk->cache_hits++;
-
- size = cand_cd->size;
- seg = cand_cd->seg;
-
- unlink_cd(mk,cand_cd);
- free_cd(mk,cand_cd);
+ if ((seg = mseg_create(ma, mk, size)) == NULL)
+ size = 0;
+done:
*size_p = size;
+ if (seg) {
+ if (erts_mtrace_enabled)
+ erts_mtrace_crr_alloc(seg, atype, ERTS_MTRACE_SEGMENT_ID, size);
- if (erts_mtrace_enabled) {
- erts_mtrace_crr_free(SEGTYPE, SEGTYPE, seg);
- erts_mtrace_crr_alloc(seg, atype, SEGTYPE, size);
- }
-
- if (seg)
ERTS_MSEG_ALLOC_STAT(mk,size);
+ }
return seg;
}
@@ -780,73 +897,42 @@ mseg_dealloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, Uint size,
const ErtsMsegOpt_t *opt)
{
MemKind* mk = memkind(ma, opt);
- cache_desc_t *cd;
+
ERTS_MSEG_DEALLOC_STAT(mk,size);
- if (!opt->cache || ma->max_cache_size == 0) {
- if (erts_mtrace_enabled)
- erts_mtrace_crr_free(atype, SEGTYPE, seg);
- mseg_destroy(ma, mk, seg, size);
+ if (opt->cache && cache_bless_segment(mk, seg, size)) {
+ schedule_cache_check(ma);
+ goto done;
}
- else {
- int check_limits = 0;
-
- if (size < mk->min_cached_seg_size)
- mk->min_cached_seg_size = size;
- if (size > mk->max_cached_seg_size)
- mk->max_cached_seg_size = size;
-
- if (!mk->free_cache_descs) {
- cd = mk->cache_end;
- if (!(mk->min_cached_seg_size < cd->size
- && cd->size < mk->max_cached_seg_size)) {
- check_limits = 1;
- }
- if (erts_mtrace_enabled)
- erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg);
- mseg_destroy(ma, mk, cd->seg, cd->size);
- unlink_cd(mk,cd);
- free_cd(mk,cd);
- }
- cd = alloc_cd(mk);
- ASSERT(cd);
- cd->seg = seg;
- cd->size = size;
- link_cd(mk,cd);
+ if (erts_mtrace_enabled)
+ erts_mtrace_crr_free(atype, SEGTYPE, seg);
- if (erts_mtrace_enabled) {
- erts_mtrace_crr_free(atype, SEGTYPE, seg);
- erts_mtrace_crr_alloc(seg, SEGTYPE, SEGTYPE, size);
- }
-
- /* ASSERT(segments.current.watermark >= segments.current.no + cache_size); */
-
- if (check_limits)
- check_cache_limits(mk);
+ mseg_destroy(ma, mk, seg, size);
- schedule_cache_check(ma);
-
- }
+done:
INC_CC(ma, dealloc);
}
static void *
mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg,
- Uint old_size, Uint *new_size_p, const ErtsMsegOpt_t *opt)
+ Uint old_size, Uint *new_size_p, Uint flags, const ErtsMsegOpt_t *opt)
{
MemKind* mk;
void *new_seg;
Uint new_size;
+ /* Just allocate a new segment if we didn't have one before */
if (!seg || !old_size) {
- new_seg = mseg_alloc(ma, atype, new_size_p, opt);
+ new_seg = mseg_alloc(ma, atype, new_size_p, flags, opt);
DEC_CC(ma, alloc);
return new_seg;
}
+
+ /* Dealloc old segment if new segment is of size 0 */
if (!(*new_size_p)) {
mseg_dealloc(ma, atype, seg, old_size, opt);
DEC_CC(ma, dealloc);
@@ -855,7 +941,13 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg,
mk = memkind(ma, opt);
new_seg = seg;
- new_size = PAGE_CEILING(*new_size_p);
+
+ /* Carrier align */
+ new_size = ALIGNED_CEILING(*new_size_p);
+
+ /* Cache optim (if applicable) */
+ if (MSEG_FLG_IS_2POW(flags) && !IS_2POW(new_size))
+ new_size = ceil_2pow(new_size);
if (new_size == old_size)
;
@@ -866,53 +958,27 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg,
if (new_size < ma->min_seg_size)
ma->min_seg_size = new_size;
#endif
-
+ /* +M<S>rsbcst <ratio> */
if (shrink_sz < opt->abs_shrink_th
- && 100*PAGES(shrink_sz) < opt->rel_shrink_th*PAGES(old_size)) {
+ && 100*shrink_sz < opt->rel_shrink_th*old_size) {
new_size = old_size;
}
else {
#if CAN_PARTLY_DESTROY
- if (shrink_sz > ma->min_seg_size
- && mk->free_cache_descs
- && opt->cache) {
- cache_desc_t *cd;
-
- cd = alloc_cd(mk);
- ASSERT(cd);
- cd->seg = ((char *) seg) + new_size;
- cd->size = shrink_sz;
- end_link_cd(mk,cd);
-
- if (erts_mtrace_enabled) {
- erts_mtrace_crr_realloc(new_seg,
- atype,
- SEGTYPE,
- seg,
- new_size);
- erts_mtrace_crr_alloc(cd->seg, SEGTYPE, SEGTYPE, cd->size);
- }
- schedule_cache_check(ma);
- }
- else {
- if (erts_mtrace_enabled)
- erts_mtrace_crr_realloc(new_seg,
- atype,
- SEGTYPE,
- seg,
- new_size);
- mseg_destroy(ma, mk, ((char *) seg) + new_size, shrink_sz);
- }
+ if (erts_mtrace_enabled)
+ erts_mtrace_crr_realloc(new_seg, atype, SEGTYPE, seg, new_size);
-#elif HAVE_MSEG_RECREATE
+ mseg_destroy(ma, mk, ((char *) seg) + new_size, shrink_sz);
+#elif HAVE_MSEG_RECREATE
goto do_recreate;
-
#else
+ new_seg = mseg_alloc(ma, atype, &new_size, flags, opt);
+
+ ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg);
- new_seg = mseg_alloc(ma, atype, &new_size, opt);
if (!new_seg)
new_size = old_size;
else {
@@ -921,16 +987,15 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg,
MIN(new_size, old_size));
mseg_dealloc(ma, atype, seg, old_size, opt);
}
-
#endif
-
}
}
else {
if (!opt->preserv) {
mseg_dealloc(ma, atype, seg, old_size, opt);
- new_seg = mseg_alloc(ma, atype, &new_size, opt);
+ new_seg = mseg_alloc(ma, atype, &new_size, flags, opt);
+ ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg);
}
else {
#if HAVE_MSEG_RECREATE
@@ -938,18 +1003,23 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg,
do_recreate:
#endif
new_seg = mseg_recreate(ma, mk, (void *) seg, old_size, new_size);
+ /* ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg);
+ * will not always be aligned and it ok for now
+ */
+
if (erts_mtrace_enabled)
erts_mtrace_crr_realloc(new_seg, atype, SEGTYPE, seg, new_size);
if (!new_seg)
new_size = old_size;
#else
- new_seg = mseg_alloc(ma, atype, &new_size, opt);
+ new_seg = mseg_alloc(ma, atype, &new_size, flags, opt);
+
+ ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg);
+
if (!new_seg)
new_size = old_size;
else {
- sys_memcpy(((char *) new_seg),
- ((char *) seg),
- MIN(new_size, old_size));
+ sys_memcpy(((char *) new_seg), ((char *) seg), MIN(new_size, old_size));
mseg_dealloc(ma, atype, seg, old_size, opt);
}
#endif
@@ -958,6 +1028,7 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg,
INC_CC(ma, realloc);
+ ASSERT(!MSEG_FLG_IS_2POW(flags) || IS_2POW(new_size));
*new_size_p = new_size;
ERTS_MSEG_REALLOC_STAT(mk, old_size, new_size);
@@ -990,6 +1061,7 @@ static struct {
Eterm mseg_dealloc;
Eterm mseg_realloc;
Eterm mseg_create;
+ Eterm mseg_create_resize;
Eterm mseg_destroy;
#if HAVE_MSEG_RECREATE
Eterm mseg_recreate;
@@ -1046,6 +1118,7 @@ init_atoms(ErtsMsegAllctr_t *ma)
AM_INIT(mseg_dealloc);
AM_INIT(mseg_realloc);
AM_INIT(mseg_create);
+ AM_INIT(mseg_create_resize);
AM_INIT(mseg_destroy);
#if HAVE_MSEG_RECREATE
AM_INIT(mseg_recreate);
@@ -1065,14 +1138,12 @@ init_atoms(ErtsMsegAllctr_t *ma)
erts_mtx_unlock(&init_atoms_mutex);
}
-
#define bld_uint erts_bld_uint
#define bld_cons erts_bld_cons
#define bld_tuple erts_bld_tuple
#define bld_string erts_bld_string
#define bld_2tup_list erts_bld_2tup_list
-
/*
* bld_unstable_uint() (instead of bld_uint()) is used when values may
* change between size check and actual build. This because a value
@@ -1116,6 +1187,7 @@ add_4tup(Uint **hpp, Uint *szp, Eterm *lp,
*lp = bld_cons(hpp, szp, bld_tuple(hpp, szp, 4, el1, el2, el3, el4), *lp);
}
+
static Eterm
info_options(ErtsMsegAllctr_t *ma,
char *prefix,
@@ -1176,6 +1248,7 @@ info_calls(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp
PRINT_CC(to, arg, dealloc);
PRINT_CC(to, arg, realloc);
PRINT_CC(to, arg, create);
+ PRINT_CC(to, arg, create_resize);
PRINT_CC(to, arg, destroy);
#if HAVE_MSEG_RECREATE
PRINT_CC(to, arg, recreate);
@@ -1215,6 +1288,10 @@ info_calls(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp
bld_unstable_uint(hpp, szp, ma->calls.create.giga_no),
bld_unstable_uint(hpp, szp, ma->calls.create.no));
+ add_3tup(hpp, szp, &res,
+ am.mseg_create_resize,
+ bld_unstable_uint(hpp, szp, ma->calls.create_resize.giga_no),
+ bld_unstable_uint(hpp, szp, ma->calls.create_resize.no));
add_3tup(hpp, szp, &res,
am.mseg_realloc,
@@ -1401,21 +1478,21 @@ erts_mseg_info(int ix,
}
void *
-erts_mseg_alloc_opt(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
+erts_mseg_alloc_opt(ErtsAlcType_t atype, Uint *size_p, Uint flags, const ErtsMsegOpt_t *opt)
{
ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt);
void *seg;
ERTS_MSEG_LOCK(ma);
ERTS_DBG_MA_CHK_THR_ACCESS(ma);
- seg = mseg_alloc(ma, atype, size_p, opt);
+ seg = mseg_alloc(ma, atype, size_p, flags, opt);
ERTS_MSEG_UNLOCK(ma);
return seg;
}
void *
-erts_mseg_alloc(ErtsAlcType_t atype, Uint *size_p)
+erts_mseg_alloc(ErtsAlcType_t atype, Uint *size_p, Uint flags)
{
- return erts_mseg_alloc_opt(atype, size_p, &erts_mseg_default_opt);
+ return erts_mseg_alloc_opt(atype, size_p, flags, &erts_mseg_default_opt);
}
void
@@ -1438,44 +1515,24 @@ erts_mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size)
void *
erts_mseg_realloc_opt(ErtsAlcType_t atype, void *seg,
Uint old_size, Uint *new_size_p,
+ Uint flags,
const ErtsMsegOpt_t *opt)
{
ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt);
void *new_seg;
ERTS_MSEG_LOCK(ma);
ERTS_DBG_MA_CHK_THR_ACCESS(ma);
- new_seg = mseg_realloc(ma, atype, seg, old_size, new_size_p, opt);
+ new_seg = mseg_realloc(ma, atype, seg, old_size, new_size_p, flags, opt);
ERTS_MSEG_UNLOCK(ma);
return new_seg;
}
void *
erts_mseg_realloc(ErtsAlcType_t atype, void *seg,
- Uint old_size, Uint *new_size_p)
+ Uint old_size, Uint *new_size_p, Uint flags)
{
return erts_mseg_realloc_opt(atype, seg, old_size, new_size_p,
- &erts_mseg_default_opt);
-}
-
-void
-erts_mseg_clear_cache(void)
-{
- ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_SS();
- MemKind* mk;
-
-start:
-
- ERTS_MSEG_LOCK(ma);
- ERTS_DBG_MA_CHK_THR_ACCESS(ma);
- for (mk=ma->mk_list; mk; mk=mk->next) {
- mseg_clear_cache(mk);
- }
- ERTS_MSEG_UNLOCK(ma);
-
- if (ma->ix != 0) {
- ma = ERTS_MSEG_ALLCTR_IX(0);
- goto start;
- }
+ flags, &erts_mseg_default_opt);
}
Uint
@@ -1496,28 +1553,32 @@ erts_mseg_no(const ErtsMsegOpt_t *opt)
Uint
erts_mseg_unit_size(void)
{
- return page_size;
+ return MSEG_ALIGNED_SIZE;
}
static void mem_kind_init(ErtsMsegAllctr_t *ma, MemKind* mk, const char* name)
{
- unsigned i;
+ int i;
- mk->cache = NULL;
- mk->cache_end = NULL;
- mk->max_cached_seg_size = 0;
- mk->min_cached_seg_size = ~((Uint) 0);
- mk->cache_size = 0;
- mk->cache_hits = 0;
+ for (i = 0; i < CACHE_AREAS; i++) {
+ mk->cache_area[i] = NULL;
+ }
+
+ mk->cache_free = NULL;
- if (ma->max_cache_size > 0) {
- for (i = 0; i < ma->max_cache_size - 1; i++)
- mk->cache_descs[i].next = &mk->cache_descs[i + 1];
- mk->cache_descs[ma->max_cache_size - 1].next = NULL;
- mk->free_cache_descs = &mk->cache_descs[0];
+ ASSERT(ma->max_cache_size <= MAX_CACHE_SIZE);
+
+ for (i = 0; i < ma->max_cache_size; i++) {
+ mk->cache[i].seg = NULL;
+ mk->cache[i].size = 0;
+ mk->cache[i].next = mk->cache_free;
+ mk->cache_free = &(mk->cache[i]);
}
- else
- mk->free_cache_descs = NULL;
+
+ mk->cache_unpowered = NULL;
+
+ mk->cache_size = 0;
+ mk->cache_hits = 0;
mk->segments.current.watermark = 0;
mk->segments.current.no = 0;
@@ -1570,15 +1631,10 @@ erts_mseg_init(ErtsMsegInit_t *init)
initialize_pmmap();
#endif
- page_size = GET_PAGE_SIZE;
+ if (!IS_2POW(GET_PAGE_SIZE))
+ erl_exit(ERTS_ABORT_EXIT, "erts_mseg: Unexpected page_size %beu\n", GET_PAGE_SIZE);
- page_shift = 1;
- while ((page_size >> page_shift) != 1) {
- if ((page_size & (1 << (page_shift - 1))) != 0)
- erl_exit(ERTS_ABORT_EXIT,
- "erts_mseg: Unexpected page_size %beu\n", page_size);
- page_shift++;
- }
+ ASSERT((MSEG_ALIGNED_SIZE % GET_PAGE_SIZE) == 0);
for (i = 0; i < no_mseg_allocators; i++) {
ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_IX(i);
@@ -1663,7 +1719,7 @@ erts_mseg_test(unsigned long op,
case 0x400: /* Have erts_mseg */
return (unsigned long) 1;
case 0x401:
- return (unsigned long) erts_mseg_alloc(ERTS_ALC_A_INVALID, (Uint *) a1);
+ return (unsigned long) erts_mseg_alloc(ERTS_ALC_A_INVALID, (Uint *) a1, (Uint) 0);
case 0x402:
erts_mseg_dealloc(ERTS_ALC_A_INVALID, (void *) a1, (Uint) a2);
return (unsigned long) 0;
@@ -1671,7 +1727,8 @@ erts_mseg_test(unsigned long op,
return (unsigned long) erts_mseg_realloc(ERTS_ALC_A_INVALID,
(void *) a1,
(Uint) a2,
- (Uint *) a3);
+ (Uint *) a3,
+ (Uint) 0);
case 0x404:
erts_mseg_clear_cache();
return (unsigned long) 0;
@@ -1707,7 +1764,40 @@ erts_mseg_test(unsigned long op,
* mapping tricks.
*/
-/*#define HARDDEBUG 1*/
+/* #define HARDDEBUG 1 */
+
+#ifdef HARDDEBUG
+static void dump_freelist(void)
+{
+ FreeBlock *p = first;
+
+ while (p) {
+ fprintf(stderr, "p = %p\r\np->num = %ld\r\np->next = %p\r\n\r\n",
+ (void *) p, (unsigned long) p->num, (void *) p->next);
+ p = p->next;
+ }
+}
+
+#define HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(PTR, SZ) \
+ fprintf(stderr,"Mapping of address %p with size %ld " \
+ "does not map complete pages (%s:%d)\r\n", \
+ (void *) (PTR), (unsigned long) (SZ),__FILE__, __LINE__)
+
+#define HARDDEBUG_HW_UNALIGNED_ALIGNMENT(PTR, SZ) \
+ fprintf(stderr,"Mapping of address %p with size %ld " \
+ "is not page aligned (%s:%d)\r\n", \
+ (void *) (PTR), (unsigned long) (SZ),__FILE__, __LINE__)
+
+#define HARDDEBUG_MAP_FAILED(PTR, SZ) \
+ fprintf(stderr, "Could not actually map memory " \
+ "at address %p with size %ld (%s:%d) ..\r\n", \
+ (void *) (PTR), (unsigned long) (SZ),__FILE__, __LINE__)
+#else
+#define HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(PTR, SZ) do{}while(0)
+#define HARDDEBUG_HW_UNALIGNED_ALIGNMENT(PTR, SZ) do{}while(0)
+#define HARDDEBUG_MAP_FAILED(PTR, SZ) do{}while(0)
+#endif
+
#ifdef __APPLE__
#define MAP_ANONYMOUS MAP_ANON
@@ -1726,49 +1816,20 @@ typedef struct _free_block {
struct _free_block *next;
} FreeBlock;
-/* Assigned once and for all */
-static size_t pagsz;
-
/* Protect with lock */
static FreeBlock *first;
-static size_t round_up_to_pagesize(size_t size)
-{
- size_t x = size / pagsz;
-
- if ((size % pagsz)) {
- ++x;
- }
-
- return pagsz * x;
-}
-
-static size_t round_down_to_pagesize(size_t size)
-{
- size_t x = size / pagsz;
-
- return pagsz * x;
-}
-
static void *do_map(void *ptr, size_t sz)
{
void *res;
- if (round_up_to_pagesize(sz) != sz) {
-#ifdef HARDDEBUG
- fprintf(stderr,"Mapping of address %p with size %ld "
- "does not map complete pages\r\n",
- (void *) ptr, (unsigned long) sz);
-#endif
+ if (ALIGNED_CEILING(sz) != sz) {
+ HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(ptr, sz);
return NULL;
}
- if (((unsigned long) ptr) % pagsz) {
-#ifdef HARDDEBUG
- fprintf(stderr,"Mapping of address %p with size %ld "
- "is not page aligned\r\n",
- (void *) ptr, (unsigned long) sz);
-#endif
+ if (((unsigned long) ptr) % MSEG_ALIGNED_SIZE) {
+ HARDDEBUG_HW_UNALIGNED_ALIGNMENT(ptr, sz);
return NULL;
}
@@ -1782,10 +1843,7 @@ static void *do_map(void *ptr, size_t sz)
#endif
if (res == MAP_FAILED) {
-#ifdef HARDDEBUG
- fprintf(stderr,"Mapping of address %p with size %ld failed!\r\n",
- (void *) ptr, (unsigned long) sz);
-#endif
+ HARDDEBUG_MAP_FAILED(ptr, sz);
return NULL;
}
@@ -1796,35 +1854,22 @@ static int do_unmap(void *ptr, size_t sz)
{
void *res;
- if (round_up_to_pagesize(sz) != sz) {
-#ifdef HARDDEBUG
- fprintf(stderr,"Mapping of address %p with size %ld "
- "does not map complete pages\r\n",
- (void *) ptr, (unsigned long) sz);
-#endif
+ if (ALIGNED_CEILING(sz) != sz) {
+ HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(ptr, sz);
return 1;
}
- if (((unsigned long) ptr) % pagsz) {
-#ifdef HARDDEBUG
- fprintf(stderr,"Mapping of address %p with size %ld "
- "is not page aligned\r\n",
- (void *) ptr, (unsigned long) sz);
-#endif
+ if (((unsigned long) ptr) % MSEG_ALIGNED_SIZE) {
+ HARDDEBUG_HW_UNALIGNED_ALIGNMENT(ptr, sz);
return 1;
}
-
res = mmap(ptr, sz,
- PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE
- | MAP_FIXED,
+ PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
-1 , 0);
if (res == MAP_FAILED) {
-#ifdef HARDDEBUG
- fprintf(stderr,"Mapping of address %p with size %ld failed!\r\n",
- (void *) ptr, (unsigned long) sz);
-#endif
+ HARDDEBUG_MAP_FAILED(ptr, sz);
return 1;
}
@@ -1862,8 +1907,6 @@ static int initialize_pmmap(void)
size_t rsz;
FreeBlock *initial;
-
- pagsz = getpagesize();
SET_RANGE_MIN();
if (sizeof(void *) != 8) {
erl_exit(1,"Halfword emulator cannot be run in 32bit mode");
@@ -1872,15 +1915,15 @@ static int initialize_pmmap(void)
p = (char *) RANGE_MIN;
q = (char *) RANGE_MAX;
- rsz = round_down_to_pagesize(q - p);
+ rsz = ALIGNED_FLOOR(q - p);
- rptr = mmap((void *) p, rsz,
+ rptr = mmap_align(NULL, (void *) p, rsz,
PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS |
MAP_NORESERVE | EXTRA_MAP_FLAGS,
-1 , 0);
#ifdef HARDDEBUG
printf("p=%p, rsz = %ld, pages = %ld, got range = %p -> %p\r\n",
- p, (unsigned long) rsz, (unsigned long) (rsz / pagsz),
+ p, (unsigned long) rsz, (unsigned long) (rsz / MSEG_ALIGNED_SIZE),
(void *) rptr, (void*)(rptr + rsz));
#endif
if ((UWord)(rptr + rsz) > RANGE_MAX) {
@@ -1892,39 +1935,27 @@ static int initialize_pmmap(void)
munmap((void*)RANGE_MAX, rsz - rsz_trunc);
rsz = rsz_trunc;
}
- if (!do_map(rptr,pagsz)) {
+ if (!do_map(rptr, MSEG_ALIGNED_SIZE)) {
erl_exit(1,"Could not actually mmap first page for halfword emulator...\n");
}
initial = (FreeBlock *) rptr;
- initial->num = (rsz / pagsz);
+ initial->num = (rsz / MSEG_ALIGNED_SIZE);
initial->next = NULL;
first = initial;
INIT_LOCK();
return 0;
}
-#ifdef HARDDEBUG
-static void dump_freelist(void)
-{
- FreeBlock *p = first;
-
- while (p) {
- printf("p = %p\r\np->num = %ld\r\np->next = %p\r\n\r\n",
- (void *) p, (unsigned long) p->num, (void *) p->next);
- p = p->next;
- }
-}
-#endif
-
-
static void *pmmap(size_t size)
{
- size_t real_size = round_up_to_pagesize(size);
- size_t num_pages = real_size / pagsz;
+ size_t real_size = ALIGNED_CEILING(size);
+ size_t num_pages = real_size / MSEG_ALIGNED_SIZE;
FreeBlock **block;
FreeBlock *tail;
FreeBlock *res;
+
TAKE_LOCK();
+
for (block = &first;
*block != NULL && (*block)->num < num_pages;
block = &((*block)->next))
@@ -1935,29 +1966,25 @@ static void *pmmap(size_t size)
}
if ((*block)->num == num_pages) {
/* nice, perfect fit */
- res = *block;
+ res = *block;
*block = (*block)->next;
} else {
tail = (FreeBlock *) (((char *) ((void *) (*block))) + real_size);
- if (!do_map(tail,pagsz)) {
-#ifdef HARDDEBUG
- fprintf(stderr, "Could not actually allocate page at %p...\r\n",
- (void *) tail);
-#endif
+ if (!do_map(tail, MSEG_ALIGNED_SIZE)) {
+ HARDDEBUG_MAP_FAILED(tail, MSEG_ALIGNED_SIZE);
RELEASE_LOCK();
return NULL;
}
- tail->num = (*block)->num - num_pages;
+ tail->num = (*block)->num - num_pages;
tail->next = (*block)->next;
res = *block;
*block = tail;
}
+
RELEASE_LOCK();
- if (!do_map(res,real_size)) {
-#ifdef HARDDEBUG
- fprintf(stderr, "Could not actually allocate %ld at %p...\r\n",
- (unsigned long) real_size, (void *) res);
-#endif
+
+ if (!do_map(res, real_size)) {
+ HARDDEBUG_MAP_FAILED(res, real_size);
return NULL;
}
@@ -1966,15 +1993,17 @@ static void *pmmap(size_t size)
static int pmunmap(void *p, size_t size)
{
- size_t real_size = round_up_to_pagesize(size);
- size_t num_pages = real_size / pagsz;
+ size_t real_size = ALIGNED_CEILING(size);
+ size_t num_pages = real_size / MSEG_ALIGNED_SIZE;
+
FreeBlock *block;
FreeBlock *last;
FreeBlock *nb = (FreeBlock *) p;
ASSERT(((unsigned long)p & CHECK_POINTER_MASK)==0);
- if (real_size > pagsz) {
- if (do_unmap(((char *) p) + pagsz,real_size - pagsz)) {
+
+ if (real_size > MSEG_ALIGNED_SIZE) {
+ if (do_unmap(((char *) p) + MSEG_ALIGNED_SIZE, real_size - MSEG_ALIGNED_SIZE)) {
return 1;
}
}
@@ -1993,7 +2022,7 @@ static int pmunmap(void *p, size_t size)
/* Merge new free block with following */
nb->num = block->num + num_pages;
nb->next = block->next;
- if (do_unmap(block,pagsz)) {
+ if (do_unmap(block, MSEG_ALIGNED_SIZE)) {
RELEASE_LOCK();
return 1;
}
@@ -2003,11 +2032,11 @@ static int pmunmap(void *p, size_t size)
nb->next = block;
}
if (last != NULL) {
- if (p == ((void *) (((char *) last) + (last->num * pagsz)))) {
+ if (p == ((void *) (((char *) last) + (last->num * MSEG_ALIGNED_SIZE)))) {
/* Merge with previous */
last->num += nb->num;
last->next = nb->next;
- if (do_unmap(nb,pagsz)) {
+ if (do_unmap(nb, MSEG_ALIGNED_SIZE)) {
RELEASE_LOCK();
return 1;
}
@@ -2024,10 +2053,10 @@ static int pmunmap(void *p, size_t size)
static void *pmremap(void *old_address, size_t old_size,
size_t new_size)
{
- size_t new_real_size = round_up_to_pagesize(new_size);
- size_t new_num_pages = new_real_size / pagsz;
- size_t old_real_size = round_up_to_pagesize(old_size);
- size_t old_num_pages = old_real_size / pagsz;
+ size_t new_real_size = ALIGNED_CEILING(new_size);
+ size_t new_num_pages = new_real_size / MSEG_ALIGNED_SIZE;
+ size_t old_real_size = ALIGNED_CEILING(old_size);
+ size_t old_num_pages = old_real_size / MSEG_ALIGNED_SIZE;
if (new_num_pages == old_num_pages) {
return old_address;
} else if (new_num_pages < old_num_pages) { /* Shrink */
@@ -2045,8 +2074,8 @@ static void *pmremap(void *old_address, size_t old_size,
(*block) > ((FreeBlock *)(((char *) vnfb) + nfb_real_size))) {
/* Normal link in */
if (nfb_pages > 1) {
- if (do_unmap((void *)(((char *) vnfb) + pagsz),
- (nfb_pages - 1)*pagsz)) {
+ if (do_unmap((void *)(((char *) vnfb) + MSEG_ALIGNED_SIZE),
+ (nfb_pages - 1)*MSEG_ALIGNED_SIZE)) {
return NULL;
}
}
@@ -2058,8 +2087,8 @@ static void *pmremap(void *old_address, size_t old_size,
nfb->num = nfb_pages + (*block)->num;
/* unmap also the first page of the next freeblock */
(*block) = nfb;
- if (do_unmap((void *)(((char *) vnfb) + pagsz),
- nfb_pages*pagsz)) {
+ if (do_unmap((void *)(((char *) vnfb) + MSEG_ALIGNED_SIZE),
+ nfb_pages*MSEG_ALIGNED_SIZE)) {
return NULL;
}
}
@@ -2094,9 +2123,9 @@ static void *pmremap(void *old_address, size_t old_size,
size_t remaining_pages = (*block)->num -
(new_num_pages - old_num_pages);
if (!remaining_pages) {
- void *p = (void *) (((char *) (*block)) + pagsz);
+ void *p = (void *) (((char *) (*block)) + MSEG_ALIGNED_SIZE);
void *n = (*block)->next;
- size_t x = ((*block)->num - 1) * pagsz;
+ size_t x = ((*block)->num - 1) * MSEG_ALIGNED_SIZE;
if (x > 0) {
if (do_map(p,x) == NULL) {
RELEASE_LOCK();
@@ -2108,7 +2137,7 @@ static void *pmremap(void *old_address, size_t old_size,
FreeBlock *nfb = (FreeBlock *) ((void *)
(((char *) old_address) +
new_real_size));
- void *p = (void *) (((char *) (*block)) + pagsz);
+ void *p = (void *) (((char *) (*block)) + MSEG_ALIGNED_SIZE);
if (do_map(p,new_real_size - old_real_size) == NULL) {
RELEASE_LOCK();
return NULL;
@@ -2122,5 +2151,4 @@ static void *pmremap(void *old_address, size_t old_size,
}
}
}
-
#endif /* HALFWORD_HEAP */
diff --git a/erts/emulator/sys/common/erl_mseg.h b/erts/emulator/sys/common/erl_mseg.h
index 741080fb78..6f373f13f9 100644
--- a/erts/emulator/sys/common/erl_mseg.h
+++ b/erts/emulator/sys/common/erl_mseg.h
@@ -32,12 +32,35 @@
#if HAVE_MMAP
# define HAVE_ERTS_MSEG 1
+# define HAVE_SUPER_ALIGNED_MB_CARRIERS 1
#else
# define HAVE_ERTS_MSEG 0
+# define HAVE_SUPER_ALIGNED_MB_CARRIERS 0
+#endif
+
+#if HAVE_SUPER_ALIGNED_MB_CARRIERS
+# define MSEG_ALIGN_BITS (18)
+ /* Affects hard limits for sbct and lmbcs documented in erts_alloc.xml */
+#else
+/* If we don't use super aligned multiblock carriers
+ * we will mmap with page size alignment (and thus use corresponding
+ * align bits).
+ *
+ * Current implementation needs this to be a constant and
+ * only uses this for user dev testing so setting page size
+ * to 4096 (12 bits) is fine.
+ */
+# define MSEG_ALIGN_BITS (12)
#endif
#if HAVE_ERTS_MSEG
+#define MSEG_ALIGNED_SIZE (1 << MSEG_ALIGN_BITS)
+
+#define ERTS_MSEG_FLG_NONE ((Uint)(0))
+#define ERTS_MSEG_FLG_2POW ((Uint)(1 << 0))
+
+
#define ERTS_MSEG_VSN_STR "0.9"
typedef struct {
@@ -68,13 +91,13 @@ typedef struct {
extern const ErtsMsegOpt_t erts_mseg_default_opt;
-void *erts_mseg_alloc(ErtsAlcType_t, Uint *);
-void *erts_mseg_alloc_opt(ErtsAlcType_t, Uint *, const ErtsMsegOpt_t *);
+void *erts_mseg_alloc(ErtsAlcType_t, Uint *, Uint);
+void *erts_mseg_alloc_opt(ErtsAlcType_t, Uint *, Uint, const ErtsMsegOpt_t *);
void erts_mseg_dealloc(ErtsAlcType_t, void *, Uint);
void erts_mseg_dealloc_opt(ErtsAlcType_t, void *, Uint, const ErtsMsegOpt_t *);
-void *erts_mseg_realloc(ErtsAlcType_t, void *, Uint, Uint *);
+void *erts_mseg_realloc(ErtsAlcType_t, void *, Uint, Uint *, Uint);
void *erts_mseg_realloc_opt(ErtsAlcType_t, void *, Uint, Uint *,
- const ErtsMsegOpt_t *);
+ Uint, const ErtsMsegOpt_t *);
void erts_mseg_clear_cache(void);
void erts_mseg_cache_check(void);
Uint erts_mseg_no( const ErtsMsegOpt_t *);
diff --git a/erts/emulator/test/alloc_SUITE_data/allocator_test.h b/erts/emulator/test/alloc_SUITE_data/allocator_test.h
index cd4a91d34a..c0396ddb61 100644
--- a/erts/emulator/test/alloc_SUITE_data/allocator_test.h
+++ b/erts/emulator/test/alloc_SUITE_data/allocator_test.h
@@ -60,9 +60,9 @@ typedef void* erts_cond;
#define IS_MMAP_C(C) ((Ulong) ALC_TEST1(0x00a, (C)))
#define C_SZ(C) ((Ulong) ALC_TEST1(0x00b, (C)))
#define SBC2BLK(A, C) ((Block_t *) ALC_TEST2(0x00c, (A), (C)))
-#define BLK2SBC(A, B) ((Carrier_t *) ALC_TEST2(0x00d, (A), (B)))
-#define MBC2FBLK(A, C) ((Block_t *) ALC_TEST2(0x00e, (A), (C)))
-#define FBLK2MBC(A, B) ((Carrier_t *) ALC_TEST2(0x00f, (A), (B)))
+#define BLK_TO_SBC(A, B) ((Carrier_t *) ALC_TEST2(0x00d, (A), (B)))
+#define MBC_TO_FIRST_BLK(A, C) ((Block_t *) ALC_TEST2(0x00e, (A), (C)))
+#define FIRST_BLK_TO_MBC(A, B) ((Carrier_t *) ALC_TEST2(0x00f, (A), (B)))
#define FIRST_MBC(A) ((Carrier_t *) ALC_TEST1(0x010, (A)))
#define LAST_MBC(A) ((Carrier_t *) ALC_TEST1(0x011, (A)))
#define FIRST_SBC(A) ((Carrier_t *) ALC_TEST1(0x012, (A)))
@@ -73,7 +73,7 @@ typedef void* erts_cond;
#define MIN_BLK_SZ(A) ((Ulong) ALC_TEST1(0x017, (A)))
#define NXT_BLK(B) ((Block_t *) ALC_TEST1(0x018, (B)))
#define PREV_BLK(B) ((Block_t *) ALC_TEST1(0x019, (B)))
-#define IS_FIRST_BLK(B) ((Ulong) ALC_TEST1(0x01a, (B)))
+#define IS_MBC_FIRST_BLK(A,B) ((Ulong) ALC_TEST2(0x01a, (A), (B)))
#define UNIT_SZ ((Ulong) ALC_TEST0(0x01b))
/* From erl_goodfit_alloc.c */
diff --git a/erts/emulator/test/alloc_SUITE_data/basic.c b/erts/emulator/test/alloc_SUITE_data/basic.c
index 4a5e888161..0c27665712 100644
--- a/erts/emulator/test/alloc_SUITE_data/basic.c
+++ b/erts/emulator/test/alloc_SUITE_data/basic.c
@@ -44,7 +44,7 @@ testcase_run(TestCaseState_t *tcs)
c = FIRST_MBC(a);
ASSERT(tcs, !NEXT_C(c));
- blk = MBC2FBLK(a, c);
+ blk = MBC_TO_FIRST_BLK(a, c);
ASSERT(tcs, IS_LAST_BLK(blk));
ASSERT(tcs, IS_FREE_BLK(blk));
diff --git a/erts/emulator/test/alloc_SUITE_data/bucket_mask.c b/erts/emulator/test/alloc_SUITE_data/bucket_mask.c
index b214f87e4a..34979cacf1 100644
--- a/erts/emulator/test/alloc_SUITE_data/bucket_mask.c
+++ b/erts/emulator/test/alloc_SUITE_data/bucket_mask.c
@@ -22,7 +22,7 @@
#include "allocator_test.h"
#include <stdio.h>
-#ifdef __WIN32__ && SIZEOF_VOID_P == 8
+#if defined(__WIN32__) && SIZEOF_VOID_P == 8
/* Use larger threashold for win64 as block alignment
is 16 bytes and not 8 */
#define SBCT ((1024*1024))
@@ -48,10 +48,16 @@ testcase_cleanup(TestCaseState_t *tcs)
void
testcase_run(TestCaseState_t *tcs)
{
- void *tmp;
- void **fence;
+ typedef struct linked_block {
+ struct linked_block* next;
+ }Linked;
+ Linked* link;
+ Linked* fence_list;
+ Linked* pad_list;
+ void* tmp;
void **blk;
Ulong sz;
+ Ulong residue;
Ulong smbcs;
int i;
int bi;
@@ -73,7 +79,7 @@ testcase_run(TestCaseState_t *tcs)
ASSERT(tcs, a);
min_blk_sz = MIN_BLK_SZ(a);
- smbcs = 2*(no_bkts*sizeof(void *) + min_blk_sz) + min_blk_sz;
+ smbcs = (no_bkts*sizeof(void *) + min_blk_sz) + min_blk_sz;
for (i = 0; i < no_bkts; i++) {
sz = BKT_MIN_SZ(a, i);
if (sz >= sbct)
@@ -98,26 +104,42 @@ testcase_run(TestCaseState_t *tcs)
tcs->extra = (void *) a;
ASSERT(tcs, a);
+
blk = (void **) ALLOC(a, no_bkts*sizeof(void *));
- fence = (void **) ALLOC(a, no_bkts*sizeof(void *));
- ASSERT(tcs, blk && fence);
+ ASSERT(tcs, blk);
+ fence_list = NULL;
testcase_printf(tcs, "Allocating blocks and fences\n");
for (i = 0; i < bi_tests; i++) {
sz = BKT_MIN_SZ(a, i);
blk[i] = ALLOC(a, sz - ablk_hdr_sz);
- fence[i] = ALLOC(a, 1);
- ASSERT(tcs, blk[i] && fence[i]);
+ link = (Linked*) ALLOC(a, sizeof(Linked));
+ ASSERT(tcs, blk[i] && link);
+ link->next = fence_list;
+ fence_list = link;
}
- tmp = (void *) UMEM2BLK(fence[bi_tests - 1]);
- tmp = (void *) NXT_BLK((Block_t *) tmp);
- ASSERT(tcs, IS_LAST_BLK(tmp));
- sz = BLK_SZ((Block_t *) tmp);
- testcase_printf(tcs, "Allocating leftover size = %lu\n", sz);
- tmp = ALLOC(a, sz - ablk_hdr_sz);
- ASSERT(tcs, tmp);
+ pad_list = 0;
+ do {
+ tmp = (void *) UMEM2BLK(link); /* last allocated */
+ tmp = (void *) NXT_BLK((Block_t *) tmp);
+ ASSERT(tcs, IS_LAST_BLK(tmp));
+ sz = BLK_SZ((Block_t *) tmp);
+ if (sz >= sbct) {
+ residue = sz;
+ sz = sbct - min_blk_sz;
+ residue -= sz;
+ }
+ else {
+ residue = 0;
+ }
+ testcase_printf(tcs, "Allocating leftover size = %lu, residue = %lu\n", sz, residue);
+ link = (Linked*) ALLOC(a, sz - ablk_hdr_sz);
+ ASSERT(tcs, link);
+ link->next = pad_list;
+ pad_list = link;
+ } while (residue);
bi = FIND_BKT(a, 0);
ASSERT(tcs, bi < 0);
@@ -135,16 +157,23 @@ testcase_run(TestCaseState_t *tcs)
for (i = 0; i < bi_tests; i++) {
FREE(a, blk[i]);
- FREE(a, fence[i]);
+ }
+ while (fence_list) {
+ link = fence_list;
+ fence_list = link->next;
+ FREE(a, link);
}
FREE(a, (void *) blk);
- FREE(a, (void *) fence);
bi = FIND_BKT(a, 0);
ASSERT(tcs, bi == no_bkts - 1);
- FREE(a, tmp);
+ while (pad_list) {
+ link = pad_list;
+ pad_list = link->next;
+ FREE(a, link);
+ }
bi = FIND_BKT(a, 0);
ASSERT(tcs, bi < 0);
diff --git a/erts/emulator/test/alloc_SUITE_data/coalesce.c b/erts/emulator/test/alloc_SUITE_data/coalesce.c
index 6f35d3279b..981fa6d43e 100644
--- a/erts/emulator/test/alloc_SUITE_data/coalesce.c
+++ b/erts/emulator/test/alloc_SUITE_data/coalesce.c
@@ -54,7 +54,7 @@ setup_sequence(TestCaseState_t *tcs, Allctr_t *a, Ulong bsz, int no,
no, bsz);
c = FIRST_MBC(a);
ASSERT(tcs, !NEXT_C(c));
- blk = MBC2FBLK(a, c);
+ blk = MBC_TO_FIRST_BLK(a, c);
ASSERT(tcs, IS_LAST_BLK(blk));
for (i = 0; i < no; i++)
@@ -266,7 +266,7 @@ testcase_name(void)
void
testcase_run(TestCaseState_t *tcs)
{
- char *argv_org[] = {"-tmmbcs1024", "-tsbct2048", "-trmbcmt100", "-tas", NULL, NULL};
+ char *argv_org[] = {"-tsmbcs511","-tmmbcs511", "-tsbct512", "-trmbcmt100", "-tas", NULL, NULL};
char *alg[] = {"af", "gf", "bf", "aobf", "aoff", NULL};
int i;
@@ -276,7 +276,7 @@ testcase_run(TestCaseState_t *tcs)
char *argv[sizeof(argv_org)/sizeof(argv_org[0])];
memcpy((void *) argv, (void *) argv_org, sizeof(argv_org));
- argv[4] = alg[i];
+ argv[5] = alg[i];
testcase_printf(tcs, " *** Starting \"%s\" allocator *** \n", alg[i]);
a = START_ALC("coalesce_", 0, argv);
ASSERT(tcs, a);
diff --git a/erts/emulator/test/alloc_SUITE_data/mseg_clear_cache.c b/erts/emulator/test/alloc_SUITE_data/mseg_clear_cache.c
index 0277616bd0..7d5608f890 100644
--- a/erts/emulator/test/alloc_SUITE_data/mseg_clear_cache.c
+++ b/erts/emulator/test/alloc_SUITE_data/mseg_clear_cache.c
@@ -52,10 +52,10 @@ testcase_run(TestCaseState_t *tcs)
tcs->extra = &seg[0];
for (i = 0; i < MAX_SEGS; i++) {
- seg[i].size = 1000;
+ seg[i].size = 1 << 18;
seg[i].ptr = MSEG_ALLOC(&seg[i].size);
ASSERT(tcs, seg[i].ptr);
- ASSERT(tcs, seg[i].size >= 1000);
+ ASSERT(tcs, seg[i].size >= (1 << 18));
}
n = MSEG_NO();
diff --git a/erts/emulator/test/beam_SUITE.erl b/erts/emulator/test/beam_SUITE.erl
index 02c6e19686..3197a4c137 100644
--- a/erts/emulator/test/beam_SUITE.erl
+++ b/erts/emulator/test/beam_SUITE.erl
@@ -54,7 +54,7 @@ end_per_group(_GroupName, Config) ->
%% Verify that apply(M, F, A) is really tail recursive.
apply_last(Config) when is_list(Config) ->
- Pid=spawn(?MODULE, applied, [self(), 10000]),
+ Pid = spawn(?MODULE, applied, [self(), 10000]),
Size =
receive
{Pid, finished} ->
@@ -94,32 +94,32 @@ apply_last_bif(Config) when is_list(Config) ->
%% Test three high register numbers in a put_list instruction
%% (to test whether packing works properly).
packed_registers(Config) when is_list(Config) ->
- ?line PrivDir = ?config(priv_dir, Config),
- ?line Mod = packed_regs,
- ?line Name = filename:join(PrivDir, atom_to_list(Mod) ++ ".erl"),
+ PrivDir = ?config(priv_dir, Config),
+ Mod = packed_regs,
+ Name = filename:join(PrivDir, atom_to_list(Mod) ++ ".erl"),
%% Generate a module which generates a list of tuples.
%% put_list(A) -> [{A, 600}, {A, 999}, ... {A, 0}].
- ?line Code = gen_packed_regs(600, ["-module("++atom_to_list(Mod)++").\n",
+ Code = gen_packed_regs(600, ["-module("++atom_to_list(Mod)++").\n",
"-export([put_list/1]).\n",
"put_list(A) ->\n["]),
- ?line ok = file:write_file(Name, Code),
+ ok = file:write_file(Name, Code),
%% Compile the module.
- ?line io:format("Compiling: ~s\n", [Name]),
- ?line CompRc = compile:file(Name, [{outdir, PrivDir}, report]),
- ?line io:format("Result: ~p\n",[CompRc]),
- ?line {ok, Mod} = CompRc,
+ io:format("Compiling: ~s\n", [Name]),
+ CompRc = compile:file(Name, [{outdir, PrivDir}, report]),
+ io:format("Result: ~p\n",[CompRc]),
+ {ok, Mod} = CompRc,
%% Load it.
- ?line io:format("Loading...\n",[]),
- ?line LoadRc = code:load_abs(filename:join(PrivDir, atom_to_list(Mod))),
- ?line {module,_Module} = LoadRc,
+ io:format("Loading...\n",[]),
+ LoadRc = code:load_abs(filename:join(PrivDir, atom_to_list(Mod))),
+ {module,_Module} = LoadRc,
%% Call it and verify result.
- ?line Term = {a, b},
- ?line L = Mod:put_list(Term),
- ?line verify_packed_regs(L, Term, 600),
+ Term = {a, b},
+ L = Mod:put_list(Term),
+ verify_packed_regs(L, Term, 600),
ok.
gen_packed_regs(0, Acc) ->
@@ -131,11 +131,11 @@ verify_packed_regs([], _, -1) -> ok;
verify_packed_regs([{Term, N}| T], Term, N) ->
verify_packed_regs(T, Term, N-1);
verify_packed_regs(L, Term, N) ->
- ?line ok = io:format("Expected [{~p, ~p}|T]; got\n~p\n", [Term, N, L]),
- ?line test_server:fail().
+ ok = io:format("Expected [{~p, ~p}|T]; got\n~p\n", [Term, N, L]),
+ test_server:fail().
buildo_mucho(Config) when is_list(Config) ->
- ?line buildo_mucho_1(),
+ buildo_mucho_1(),
ok.
buildo_mucho_1() ->
@@ -206,20 +206,27 @@ buildo_mucho_1() ->
{<<>>,1},{<<>>,1},{<<>>,1},{<<>>,1},{<<>>,1},{<<>>,1},{<<>>,1},{<<>>,1}].
heap_sizes(Config) when is_list(Config) ->
- ?line Sizes = erlang:system_info(heap_sizes),
- ?line io:format("~p heap sizes\n", [length(Sizes)]),
- ?line io:format("~p\n", [Sizes]),
+ Sizes = erlang:system_info(heap_sizes),
+ io:format("~p heap sizes\n", [length(Sizes)]),
+ io:format("~p\n", [Sizes]),
%% Verify that heap sizes increase monotonically.
- ?line Largest = lists:foldl(fun(E, P) when is_integer(P), E > P -> E;
+ Largest = lists:foldl(fun(E, P) when is_integer(P), E > P -> E;
(E, []) -> E
end, [], Sizes),
- %% Verify that the largest heap size consists of 31 or 63 bits.
- ?line
- case Largest bsr (erlang:system_info(wordsize)*8-2) of
- R when R > 0 -> ok
- end,
+ %% Verify that the largest heap size consists of
+ %% - 31 bits of bytes on 32 bits arch
+ %% - atleast 52 bits of bytes (48 is the maximum virtual address)
+ %% and at the most 63 bits on 64 bit archs
+ %% heap sizes are in words
+ case erlang:system_info(wordsize) of
+ 8 ->
+ 0 = (Largest*8) bsr 63,
+ true = (Largest*8) > (1 bsl 52);
+ 4 ->
+ 1 = (Largest*4) bsr 31
+ end,
ok.
%% Thanks to Igor Goryachev.
@@ -302,10 +309,10 @@ b() ->
end.
fconv(Config) when is_list(Config) ->
- ?line do_fconv(atom),
- ?line do_fconv(nil),
- ?line do_fconv(tuple_literal),
- ?line 3.0 = do_fconv(1.0, 2.0),
+ do_fconv(atom),
+ do_fconv(nil),
+ do_fconv(tuple_literal),
+ 3.0 = do_fconv(1.0, 2.0),
ok.
do_fconv(Type) ->
@@ -325,9 +332,9 @@ do_fconv(tuple_literal, Float) when is_float(Float) ->
Float + {a,b}.
select_val(Config) when is_list(Config) ->
- ?line zero = do_select_val(0),
- ?line big = do_select_val(1 bsl 64),
- ?line integer = do_select_val(42),
+ zero = do_select_val(0),
+ big = do_select_val(1 bsl 64),
+ integer = do_select_val(42),
ok.
do_select_val(X) ->
diff --git a/erts/emulator/test/process_SUITE.erl b/erts/emulator/test/process_SUITE.erl
index 898535693f..11dd88413f 100644
--- a/erts/emulator/test/process_SUITE.erl
+++ b/erts/emulator/test/process_SUITE.erl
@@ -118,15 +118,15 @@ fun_spawn(Fun) ->
%% (unclear if this test case will actually prove anything on
%% a modern computer with lots of memory).
spawn_with_binaries(Config) when is_list(Config) ->
- ?line L = lists:duplicate(2048, 42),
- ?line TwoMeg = lists:duplicate(1024, L),
- ?line Fun = fun() -> spawn(?MODULE, binary_owner, [list_to_binary(TwoMeg)]),
+ L = lists:duplicate(2048, 42),
+ TwoMeg = lists:duplicate(1024, L),
+ Fun = fun() -> spawn(?MODULE, binary_owner, [list_to_binary(TwoMeg)]),
receive after 1 -> ok end end,
- ?line Iter = case test_server:purify_is_running() of
+ Iter = case test_server:purify_is_running() of
true -> 10;
false -> 150
end,
- ?line test_server:do_times(Iter, Fun),
+ test_server:do_times(Iter, Fun),
ok.
binary_owner(Bin) when is_binary(Bin) ->
@@ -134,87 +134,87 @@ binary_owner(Bin) when is_binary(Bin) ->
%% Tests exit/1 with a big message.
t_exit_1(Config) when is_list(Config) ->
- ?line start_spawner(),
- ?line Dog = test_server:timetrap(test_server:seconds(20)),
- ?line process_flag(trap_exit, true),
- ?line test_server:do_times(10, fun t_exit_1/0),
- ?line test_server:timetrap_cancel(Dog),
- ?line stop_spawner(),
+ start_spawner(),
+ Dog = test_server:timetrap(test_server:seconds(20)),
+ process_flag(trap_exit, true),
+ test_server:do_times(10, fun t_exit_1/0),
+ test_server:timetrap_cancel(Dog),
+ stop_spawner(),
ok.
t_exit_1() ->
- ?line Pid = fun_spawn(fun() -> exit(kb_128()) end),
- ?line Garbage = kb_128(),
- ?line receive
+ Pid = fun_spawn(fun() -> exit(kb_128()) end),
+ Garbage = kb_128(),
+ receive
{'EXIT', Pid, Garbage} -> ok
end.
%% Tests exit/2 with a lot of data in the exit message.
t_exit_2_other(Config) when is_list(Config) ->
- ?line start_spawner(),
- ?line Dog = test_server:timetrap(test_server:seconds(20)),
- ?line process_flag(trap_exit, true),
- ?line test_server:do_times(10, fun t_exit_2_other/0),
- ?line test_server:timetrap_cancel(Dog),
- ?line stop_spawner(),
+ start_spawner(),
+ Dog = test_server:timetrap(test_server:seconds(20)),
+ process_flag(trap_exit, true),
+ test_server:do_times(10, fun t_exit_2_other/0),
+ test_server:timetrap_cancel(Dog),
+ stop_spawner(),
ok.
t_exit_2_other() ->
- ?line Pid = fun_spawn(fun() -> receive x -> ok end end),
- ?line Garbage = kb_128(),
- ?line exit(Pid, Garbage),
- ?line receive
+ Pid = fun_spawn(fun() -> receive x -> ok end end),
+ Garbage = kb_128(),
+ exit(Pid, Garbage),
+ receive
{'EXIT', Pid, Garbage} -> ok
end.
%% Tests that exit(Pid, normal) does not kill another process.;
t_exit_2_other_normal(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(20)),
- ?line process_flag(trap_exit, true),
- ?line Pid = fun_spawn(fun() -> receive x -> ok end end),
- ?line exit(Pid, normal),
- ?line receive
+ Dog = test_server:timetrap(test_server:seconds(20)),
+ process_flag(trap_exit, true),
+ Pid = fun_spawn(fun() -> receive x -> ok end end),
+ exit(Pid, normal),
+ receive
{'EXIT', Pid, Reason} ->
- ?line test_server:fail({process_died, Reason})
+ test_server:fail({process_died, Reason})
after 1000 ->
ok
end,
- ?line case process_info(Pid) of
+ case process_info(Pid) of
undefined ->
test_server:fail(process_died_on_normal);
List when is_list(List) ->
ok
end,
exit(Pid, kill),
- ?line test_server:timetrap_cancel(Dog),
+ test_server:timetrap_cancel(Dog),
ok.
%% Tests that we can trap an exit message sent with exit/2 from
%% the same process.
self_exit(Config) when is_list(Config) ->
- ?line start_spawner(),
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line process_flag(trap_exit, true),
- ?line test_server:do_times(200, fun self_exit/0),
- ?line test_server:timetrap_cancel(Dog),
- ?line stop_spawner(),
+ start_spawner(),
+ Dog = test_server:timetrap(test_server:seconds(10)),
+ process_flag(trap_exit, true),
+ test_server:do_times(200, fun self_exit/0),
+ test_server:timetrap_cancel(Dog),
+ stop_spawner(),
ok.
self_exit() ->
- ?line Garbage = eight_kb(),
- ?line P = self(),
- ?line true = exit(P, Garbage),
- ?line receive
+ Garbage = eight_kb(),
+ P = self(),
+ true = exit(P, Garbage),
+ receive
{'EXIT', P, Garbage} -> ok
end.
%% Tests exit(self(), normal) is equivalent to exit(normal) for a process
%% that doesn't trap exits.
normal_suicide_exit(Config) when is_list(Config) ->
- ?line process_flag(trap_exit, true),
- ?line Pid = fun_spawn(fun() -> exit(self(), normal) end),
- ?line receive
+ process_flag(trap_exit, true),
+ Pid = fun_spawn(fun() -> exit(self(), normal) end),
+ receive
{'EXIT', Pid, normal} -> ok;
Other -> test_server:fail({bad_message, Other})
end.
@@ -222,19 +222,19 @@ normal_suicide_exit(Config) when is_list(Config) ->
%% Tests exit(self(), Term) is equivalent to exit(Term) for a process
%% that doesn't trap exits.";
abnormal_suicide_exit(Config) when is_list(Config) ->
- ?line Garbage = eight_kb(),
- ?line process_flag(trap_exit, true),
- ?line Pid = fun_spawn(fun() -> exit(self(), Garbage) end),
- ?line receive
+ Garbage = eight_kb(),
+ process_flag(trap_exit, true),
+ Pid = fun_spawn(fun() -> exit(self(), Garbage) end),
+ receive
{'EXIT', Pid, Garbage} -> ok;
Other -> test_server:fail({bad_message, Other})
end.
%% Tests that exit(self(), die) cannot be catched.
t_exit_2_catch(Config) when is_list(Config) ->
- ?line process_flag(trap_exit, true),
- ?line Pid = fun_spawn(fun() -> catch exit(self(), die) end),
- ?line receive
+ process_flag(trap_exit, true),
+ Pid = fun_spawn(fun() -> catch exit(self(), die) end),
+ receive
{'EXIT', Pid, normal} ->
test_server:fail(catch_worked);
{'EXIT', Pid, die} ->
@@ -246,29 +246,29 @@ t_exit_2_catch(Config) when is_list(Config) ->
%% Tests trapping of an 'EXIT' message generated by a bad argument to
%% the abs/1 bif. The 'EXIT' message will intentionally be very big.
trap_exit_badarg(Config) when is_list(Config) ->
- ?line start_spawner(),
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line process_flag(trap_exit, true),
- ?line test_server:do_times(10, fun trap_exit_badarg/0),
- ?line test_server:timetrap_cancel(Dog),
- ?line stop_spawner(),
+ start_spawner(),
+ Dog = test_server:timetrap(test_server:seconds(10)),
+ process_flag(trap_exit, true),
+ test_server:do_times(10, fun trap_exit_badarg/0),
+ test_server:timetrap_cancel(Dog),
+ stop_spawner(),
ok.
trap_exit_badarg() ->
- ?line Pid = fun_spawn(fun() -> bad_guy(kb_128()) end),
- ?line Garbage = kb_128(),
- ?line receive
+ Pid = fun_spawn(fun() -> bad_guy(kb_128()) end),
+ Garbage = kb_128(),
+ receive
{'EXIT',Pid,{badarg,[{erlang,abs,[Garbage],Loc1},
{?MODULE,bad_guy,1,Loc2}|_]}}
when is_list(Loc1), is_list(Loc2) ->
ok;
Other ->
- ?line ok = io:format("Bad EXIT message: ~P", [Other, 30]),
- ?line test_server:fail(bad_exit_message)
+ ok = io:format("Bad EXIT message: ~P", [Other, 30]),
+ test_server:fail(bad_exit_message)
end.
bad_guy(Arg) ->
- ?line abs(Arg).
+ abs(Arg).
kb_128() ->
@@ -281,11 +281,11 @@ kb_128() ->
eight_kb() ->
B64 = lists:seq(1, 64),
- ?line B512 = {<<1>>,B64,<<2,3>>,B64,make_unaligned_sub_binary(<<4,5,6,7,8,9>>),
+ B512 = {<<1>>,B64,<<2,3>>,B64,make_unaligned_sub_binary(<<4,5,6,7,8,9>>),
B64,make_sub_binary([1,2,3,4,5,6]),
B64,make_sub_binary(lists:seq(1, ?heap_binary_size+1)),
B64,B64,B64,B64,big_binary()},
- ?line lists:duplicate(8, {B512,B512}).
+ lists:duplicate(8, {B512,B512}).
big_binary() ->
big_binary(10, [42]).
@@ -296,19 +296,19 @@ big_binary(N, Acc) ->
%% Test receiving an EXIT message when spawning a BIF with bad arguments.
trap_exit_badarg_in_bif(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line process_flag(trap_exit, true),
- ?line test_server:do_times(10, fun trap_exit_badarg_bif/0),
- ?line test_server:timetrap_cancel(Dog),
+ Dog = test_server:timetrap(test_server:seconds(10)),
+ process_flag(trap_exit, true),
+ test_server:do_times(10, fun trap_exit_badarg_bif/0),
+ test_server:timetrap_cancel(Dog),
ok.
trap_exit_badarg_bif() ->
- ?line Pid = spawn_link(erlang, node, [1]),
- ?line receive
+ Pid = spawn_link(erlang, node, [1]),
+ receive
{'EXIT', Pid, {badarg, _}} ->
ok;
Other ->
- ?line test_server:fail({unexpected, Other})
+ test_server:fail({unexpected, Other})
end.
%% The following sequences of events have crasched Beam.
@@ -321,27 +321,27 @@ trap_exit_badarg_bif() ->
%% 3) The process will crash the next time it executes 'receive'.
exit_and_timeout(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(20)),
+ Dog = test_server:timetrap(test_server:seconds(20)),
- ?line process_flag(trap_exit, true),
- ?line Parent = self(),
- ?line Low = fun_spawn(fun() -> eat_low(Parent) end),
- ?line High = fun_spawn(fun() -> eat_high(Low) end),
- ?line eat_wait_for(Low, High),
+ process_flag(trap_exit, true),
+ Parent = self(),
+ Low = fun_spawn(fun() -> eat_low(Parent) end),
+ High = fun_spawn(fun() -> eat_high(Low) end),
+ eat_wait_for(Low, High),
- ?line test_server:timetrap_cancel(Dog),
+ test_server:timetrap_cancel(Dog),
ok.
eat_wait_for(Low, High) ->
- ?line receive
- {'EXIT', Low, {you, are, dead}} ->
- ok;
- {'EXIT', High, normal} ->
- eat_wait_for(Low, High);
- Other ->
- test_server:fail({bad_message, Other})
- end.
+ receive
+ {'EXIT', Low, {you, are, dead}} ->
+ ok;
+ {'EXIT', High, normal} ->
+ eat_wait_for(Low, High);
+ Other ->
+ test_server:fail({bad_message, Other})
+ end.
eat_low(_Parent) ->
receive
@@ -374,27 +374,27 @@ loop(_, _) ->
%% Tries to send two different exit messages to a process.
%% (The second one should be ignored.)
exit_twice(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(20)),
+ Dog = test_server:timetrap(test_server:seconds(20)),
- ?line process_flag(trap_exit, true),
- ?line Low = fun_spawn(fun etwice_low/0),
- ?line High = fun_spawn(fun() -> etwice_high(Low) end),
- ?line etwice_wait_for(Low, High),
+ process_flag(trap_exit, true),
+ Low = fun_spawn(fun etwice_low/0),
+ High = fun_spawn(fun() -> etwice_high(Low) end),
+ etwice_wait_for(Low, High),
- ?line test_server:timetrap_cancel(Dog),
+ test_server:timetrap_cancel(Dog),
ok.
etwice_wait_for(Low, High) ->
- ?line receive
- {'EXIT', Low, first} ->
- ok;
- {'EXIT', Low, Other} ->
- test_server:fail({wrong_exit_reason, Other});
- {'EXIT', High, normal} ->
- etwice_wait_for(Low, High);
- Other ->
- test_server:fail({bad_message, Other})
- end.
+ receive
+ {'EXIT', Low, first} ->
+ ok;
+ {'EXIT', Low, Other} ->
+ test_server:fail({wrong_exit_reason, Other});
+ {'EXIT', High, normal} ->
+ etwice_wait_for(Low, High);
+ Other ->
+ test_server:fail({bad_message, Other})
+ end.
etwice_low() ->
etwice_low().
@@ -406,15 +406,15 @@ etwice_high(Low) ->
%% Tests the process_info/2 BIF.
t_process_info(Config) when is_list(Config) ->
- ?line [] = process_info(self(), registered_name),
- ?line register(my_name, self()),
- ?line {registered_name, my_name} = process_info(self(), registered_name),
- ?line {status, running} = process_info(self(), status),
- ?line {min_heap_size, 233} = process_info(self(), min_heap_size),
- ?line {min_bin_vheap_size, 46368} = process_info(self(), min_bin_vheap_size),
- ?line {current_function,{?MODULE,t_process_info,1}} =
+ [] = process_info(self(), registered_name),
+ register(my_name, self()),
+ {registered_name, my_name} = process_info(self(), registered_name),
+ {status, running} = process_info(self(), status),
+ {min_heap_size, 233} = process_info(self(), min_heap_size),
+ {min_bin_vheap_size,46422} = process_info(self(), min_bin_vheap_size),
+ {current_function,{?MODULE,t_process_info,1}} =
process_info(self(), current_function),
- ?line {current_function,{?MODULE,t_process_info,1}} =
+ {current_function,{?MODULE,t_process_info,1}} =
apply(erlang, process_info, [self(),current_function]),
%% current_location and current_stacktrace
@@ -425,9 +425,9 @@ t_process_info(Config) when is_list(Config) ->
verify_loc(Line2, Res2),
pi_stacktrace([{?MODULE,t_process_info,1,?LINE}]),
- ?line Gleader = group_leader(),
- ?line {group_leader, Gleader} = process_info(self(), group_leader),
- ?line {'EXIT',{badarg,_Info}} = (catch process_info('not_a_pid')),
+ Gleader = group_leader(),
+ {group_leader, Gleader} = process_info(self(), group_leader),
+ {'EXIT',{badarg,_Info}} = (catch process_info('not_a_pid')),
ok.
pi_stacktrace(Expected0) ->
@@ -509,50 +509,50 @@ process_info_looper(Parent) ->
%% Tests the process_info/1 BIF on another process with messages.
process_info_other_msg(Config) when is_list(Config) ->
Self = self(),
- ?line Pid = spawn_link(fun() -> other_process(Self) end),
+ Pid = spawn_link(fun() -> other_process(Self) end),
receive
{go_ahead,Pid} -> ok
end,
- ?line Own = {my,own,message},
+ Own = {my,own,message},
- ?line {messages,[Own]} = process_info(Pid, messages),
+ {messages,[Own]} = process_info(Pid, messages),
- ?line Garbage = kb_128(),
- ?line MsgA = {a,Garbage},
- ?line MsgB = {b,Garbage},
- ?line MsgC = {c,Garbage},
- ?line MsgD = {d,Garbage},
- ?line MsgE = {e,Garbage},
-
- ?line Pid ! MsgA,
- ?line {messages,[Own,MsgA]} = process_info(Pid, messages),
- ?line Pid ! MsgB,
- ?line {messages,[Own,MsgA,MsgB]} = process_info(Pid, messages),
- ?line Pid ! MsgC,
- ?line {messages,[Own,MsgA,MsgB,MsgC]} = process_info(Pid, messages),
- ?line Pid ! MsgD,
- ?line {messages,[Own,MsgA,MsgB,MsgC,MsgD]} = process_info(Pid, messages),
- ?line Pid ! MsgE,
- ?line {messages,[Own,MsgA,MsgB,MsgC,MsgD,MsgE]=All} = process_info(Pid, messages),
- ?line {memory,BytesOther} = process_info(Pid, memory),
- ?line {memory,BytesSelf} = process_info(self(), memory),
+ Garbage = kb_128(),
+ MsgA = {a,Garbage},
+ MsgB = {b,Garbage},
+ MsgC = {c,Garbage},
+ MsgD = {d,Garbage},
+ MsgE = {e,Garbage},
+
+ Pid ! MsgA,
+ {messages,[Own,MsgA]} = process_info(Pid, messages),
+ Pid ! MsgB,
+ {messages,[Own,MsgA,MsgB]} = process_info(Pid, messages),
+ Pid ! MsgC,
+ {messages,[Own,MsgA,MsgB,MsgC]} = process_info(Pid, messages),
+ Pid ! MsgD,
+ {messages,[Own,MsgA,MsgB,MsgC,MsgD]} = process_info(Pid, messages),
+ Pid ! MsgE,
+ {messages,[Own,MsgA,MsgB,MsgC,MsgD,MsgE]=All} = process_info(Pid, messages),
+ {memory,BytesOther} = process_info(Pid, memory),
+ {memory,BytesSelf} = process_info(self(), memory),
io:format("Memory ~p: ~p\n", [Pid,BytesOther]),
io:format("Memory ~p (self): ~p\n", [self(),BytesSelf]),
[Own,MsgA,MsgB,MsgC,MsgD,MsgE] = All,
- ?line Pid ! {self(),empty},
- ?line receive
+ Pid ! {self(),empty},
+ receive
empty -> ok
end,
- ?line {messages,[]} = process_info(Pid, messages),
+ {messages,[]} = process_info(Pid, messages),
- ?line {min_heap_size, 233} = process_info(Pid, min_heap_size),
- ?line {min_bin_vheap_size, 46368} = process_info(Pid, min_bin_vheap_size),
+ {min_heap_size, 233} = process_info(Pid, min_heap_size),
+ {min_bin_vheap_size, 46422} = process_info(Pid, min_bin_vheap_size),
- ?line Pid ! stop,
+ Pid ! stop,
ok.
process_info_other_dist_msg(Config) when is_list(Config) ->
@@ -560,52 +560,51 @@ process_info_other_dist_msg(Config) when is_list(Config) ->
%% Check that process_info can handle messages that have not been
%% decoded yet.
%%
- ?line {ok, Node} = start_node(Config),
- ?line Self = self(),
- ?line Pid = spawn_link(fun() -> other_process(Self) end),
- ?line receive {go_ahead,Pid} -> ok end,
+ {ok, Node} = start_node(Config),
+ Self = self(),
+ Pid = spawn_link(fun() -> other_process(Self) end),
+ receive {go_ahead,Pid} -> ok end,
- ?line Own = {my,own,message},
+ Own = {my,own,message},
- ?line {messages,[Own]} = process_info(Pid, messages),
- ?line Garbage = kb_128(),
- ?line MsgA = {a,self(),Garbage},
- ?line MsgB = {b,self(),Garbage},
- ?line MsgC = {c,self(),Garbage},
- ?line MsgD = {d,self(),Garbage},
- ?line MsgE = {e,self(),Garbage},
+ {messages,[Own]} = process_info(Pid, messages),
+ Garbage = kb_128(),
+ MsgA = {a,self(),Garbage},
+ MsgB = {b,self(),Garbage},
+ MsgC = {c,self(),Garbage},
+ MsgD = {d,self(),Garbage},
+ MsgE = {e,self(),Garbage},
%% We don't want the other process to decode messages itself
%% therefore we suspend it.
- ?line true = erlang:suspend_process(Pid),
- ?line spawn_link(Node, fun () ->
- Pid ! MsgA,
- Pid ! MsgB,
- Pid ! MsgC,
- Self ! check_abc
- end),
- ?line receive check_abc -> ok end,
- ?line [{status,suspended},
- {messages,[Own,MsgA,MsgB,MsgC]},
- {status,suspended}]= process_info(Pid, [status,messages,status]),
- ?line spawn_link(Node, fun () ->
- Pid ! MsgD,
- Pid ! MsgE,
- Self ! check_de
- end),
- ?line receive check_de -> ok end,
- ?line {messages,[Own,MsgA,MsgB,MsgC,MsgD,MsgE]=All}
- = process_info(Pid, messages),
- ?line true = erlang:resume_process(Pid),
- ?line Pid ! {self(), get_all_messages},
- ?line receive
+ true = erlang:suspend_process(Pid),
+ spawn_link(Node, fun () ->
+ Pid ! MsgA,
+ Pid ! MsgB,
+ Pid ! MsgC,
+ Self ! check_abc
+ end),
+ receive check_abc -> ok end,
+ [{status,suspended},
+ {messages,[Own,MsgA,MsgB,MsgC]},
+ {status,suspended}]= process_info(Pid, [status,messages,status]),
+ spawn_link(Node, fun () ->
+ Pid ! MsgD,
+ Pid ! MsgE,
+ Self ! check_de
+ end),
+ receive check_de -> ok end,
+ {messages,[Own,MsgA,MsgB,MsgC,MsgD,MsgE]=All} = process_info(Pid, messages),
+ true = erlang:resume_process(Pid),
+ Pid ! {self(), get_all_messages},
+ receive
{all_messages, AllMsgs} ->
- ?line All = AllMsgs
+ All = AllMsgs
end,
- ?line {messages,[]} = process_info(Pid, messages),
- ?line Pid ! stop,
- ?line stop_node(Node),
- ?line ok.
+ {messages,[]} = process_info(Pid, messages),
+ Pid ! stop,
+ stop_node(Node),
+ ok.
other_process(Parent) ->
@@ -652,38 +651,36 @@ process_info_2_list(doc) ->
process_info_2_list(suite) ->
[];
process_info_2_list(Config) when is_list(Config) ->
- ?line Proc = spawn(fun () ->
- receive after infinity -> ok end end),
+ Proc = spawn(fun () -> receive after infinity -> ok end end),
register(process_SUITE_process_info_2_list1, self()),
register(process_SUITE_process_info_2_list2, Proc),
- ?line erts_debug:set_internal_state(available_internal_state,true),
- ?line AllArgs = erts_debug:get_internal_state(process_info_args),
- ?line A1 = lists:sort(AllArgs) ++ [status] ++ lists:reverse(AllArgs),
+ erts_debug:set_internal_state(available_internal_state,true),
+ AllArgs = erts_debug:get_internal_state(process_info_args),
+ A1 = lists:sort(AllArgs) ++ [status] ++ lists:reverse(AllArgs),
%% Verify that argument is accepted as single atom
- ?line lists:foreach(fun (A) ->
- ?line {A, _} = process_info(Proc, A),
- ?line {A, _} = process_info(self(), A)
- end,
- A1),
+ lists:foreach(fun (A) ->
+ {A, _} = process_info(Proc, A),
+ {A, _} = process_info(self(), A)
+ end, A1),
%% Verify that order is preserved
- ?line ok = chk_pi_order(process_info(self(), A1), A1),
- ?line ok = chk_pi_order(process_info(Proc, A1), A1),
+ ok = chk_pi_order(process_info(self(), A1), A1),
+ ok = chk_pi_order(process_info(Proc, A1), A1),
%% Small arg list
- ?line A2 = [status, stack_size, trap_exit, priority],
- ?line [{status, _}, {stack_size, _}, {trap_exit, _}, {priority, _}]
+ A2 = [status, stack_size, trap_exit, priority],
+ [{status, _}, {stack_size, _}, {trap_exit, _}, {priority, _}]
= process_info(Proc, A2),
- ?line [{status, _}, {stack_size, _}, {trap_exit, _}, {priority, _}]
+ [{status, _}, {stack_size, _}, {trap_exit, _}, {priority, _}]
= process_info(self(), A2),
%% Huge arg list (note values are shared)
- ?line A3 = lists:duplicate(5000,backtrace),
- ?line V3 = process_info(Proc, A3),
- ?line 5000 = length(V3),
- ?line lists:foreach(fun ({backtrace, _}) -> ok end, V3),
- ?line ok.
+ A3 = lists:duplicate(5000,backtrace),
+ V3 = process_info(Proc, A3),
+ 5000 = length(V3),
+ lists:foreach(fun ({backtrace, _}) -> ok end, V3),
+ ok.
process_info_lock_reschedule(doc) ->
[];
@@ -692,43 +689,37 @@ process_info_lock_reschedule(suite) ->
process_info_lock_reschedule(Config) when is_list(Config) ->
%% We need a process that is running and an item that requires
%% process_info to take the main process lock.
- ?line Target1 = spawn_link(fun tok_loop/0),
- ?line Name1 = process_info_lock_reschedule_running,
- ?line register(Name1, Target1),
- ?line Target2 = spawn_link(fun () -> receive after infinity -> ok end end),
- ?line Name2 = process_info_lock_reschedule_waiting,
- ?line register(Name2, Target2),
- ?line PI = fun(_) ->
- ?line erlang:yield(),
- ?line [{registered_name, Name1}]
- = process_info(Target1, [registered_name]),
- ?line [{registered_name, Name2}]
- = process_info(Target2, [registered_name]),
- ?line erlang:yield(),
- ?line {registered_name, Name1}
- = process_info(Target1, registered_name),
- ?line {registered_name, Name2}
- = process_info(Target2, registered_name),
- ?line erlang:yield(),
- ?line [{registered_name, Name1}| _]
- = process_info(Target1),
- ?line [{registered_name, Name2}| _]
- = process_info(Target2)
- end,
- ?line lists:foreach(PI, lists:seq(1,1000)),
+ Target1 = spawn_link(fun tok_loop/0),
+ Name1 = process_info_lock_reschedule_running,
+ register(Name1, Target1),
+ Target2 = spawn_link(fun () -> receive after infinity -> ok end end),
+ Name2 = process_info_lock_reschedule_waiting,
+ register(Name2, Target2),
+ PI = fun(_) ->
+ erlang:yield(),
+ [{registered_name, Name1}] = process_info(Target1, [registered_name]),
+ [{registered_name, Name2}] = process_info(Target2, [registered_name]),
+ erlang:yield(),
+ {registered_name, Name1} = process_info(Target1, registered_name),
+ {registered_name, Name2} = process_info(Target2, registered_name),
+ erlang:yield(),
+ [{registered_name, Name1}| _] = process_info(Target1),
+ [{registered_name, Name2}| _] = process_info(Target2)
+ end,
+ lists:foreach(PI, lists:seq(1,1000)),
%% Make sure Target1 still is willing to "tok loop"
- ?line case process_info(Target1, status) of
- {status, OkStatus} when OkStatus == runnable;
- OkStatus == running;
- OkStatus == garbage_collecting ->
- ?line unlink(Target1),
- ?line unlink(Target2),
- ?line exit(Target1, bang),
- ?line exit(Target2, bang),
- ?line OkStatus;
- {status, BadStatus} ->
- ?line ?t:fail(BadStatus)
- end.
+ case process_info(Target1, status) of
+ {status, OkStatus} when OkStatus == runnable;
+ OkStatus == running;
+ OkStatus == garbage_collecting ->
+ unlink(Target1),
+ unlink(Target2),
+ exit(Target1, bang),
+ exit(Target2, bang),
+ OkStatus;
+ {status, BadStatus} ->
+ ?t:fail(BadStatus)
+ end.
pi_loop(_Name, _Pid, 0) ->
ok;
@@ -741,50 +732,50 @@ process_info_lock_reschedule2(doc) ->
process_info_lock_reschedule2(suite) ->
[];
process_info_lock_reschedule2(Config) when is_list(Config) ->
- ?line Parent = self(),
- ?line Fun = fun () ->
- receive {go, Name, Pid} -> ok end,
- pi_loop(Name, Pid, 10000),
- Parent ! {done, self()},
- receive after infinity -> ok end
- end,
- ?line P1 = spawn_link(Fun),
- ?line N1 = process_info_lock_reschedule2_1,
- ?line true = register(N1, P1),
- ?line P2 = spawn_link(Fun),
- ?line N2 = process_info_lock_reschedule2_2,
- ?line true = register(N2, P2),
- ?line P3 = spawn_link(Fun),
- ?line N3 = process_info_lock_reschedule2_3,
- ?line true = register(N3, P3),
- ?line P4 = spawn_link(Fun),
- ?line N4 = process_info_lock_reschedule2_4,
- ?line true = register(N4, P4),
- ?line P5 = spawn_link(Fun),
- ?line N5 = process_info_lock_reschedule2_5,
- ?line true = register(N5, P5),
- ?line P6 = spawn_link(Fun),
- ?line N6 = process_info_lock_reschedule2_6,
- ?line true = register(N6, P6),
- ?line P1 ! {go, N2, P2},
- ?line P2 ! {go, N1, P1},
- ?line P3 ! {go, N1, P1},
- ?line P4 ! {go, N1, P1},
- ?line P5 ! {go, N6, P6},
- ?line P6 ! {go, N5, P5},
- ?line receive {done, P1} -> ok end,
- ?line receive {done, P2} -> ok end,
- ?line receive {done, P3} -> ok end,
- ?line receive {done, P4} -> ok end,
- ?line receive {done, P5} -> ok end,
- ?line receive {done, P6} -> ok end,
- ?line unlink(P1), exit(P1, bang),
- ?line unlink(P2), exit(P2, bang),
- ?line unlink(P3), exit(P3, bang),
- ?line unlink(P4), exit(P4, bang),
- ?line unlink(P5), exit(P5, bang),
- ?line unlink(P6), exit(P6, bang),
- ?line ok.
+ Parent = self(),
+ Fun = fun () ->
+ receive {go, Name, Pid} -> ok end,
+ pi_loop(Name, Pid, 10000),
+ Parent ! {done, self()},
+ receive after infinity -> ok end
+ end,
+ P1 = spawn_link(Fun),
+ N1 = process_info_lock_reschedule2_1,
+ true = register(N1, P1),
+ P2 = spawn_link(Fun),
+ N2 = process_info_lock_reschedule2_2,
+ true = register(N2, P2),
+ P3 = spawn_link(Fun),
+ N3 = process_info_lock_reschedule2_3,
+ true = register(N3, P3),
+ P4 = spawn_link(Fun),
+ N4 = process_info_lock_reschedule2_4,
+ true = register(N4, P4),
+ P5 = spawn_link(Fun),
+ N5 = process_info_lock_reschedule2_5,
+ true = register(N5, P5),
+ P6 = spawn_link(Fun),
+ N6 = process_info_lock_reschedule2_6,
+ true = register(N6, P6),
+ P1 ! {go, N2, P2},
+ P2 ! {go, N1, P1},
+ P3 ! {go, N1, P1},
+ P4 ! {go, N1, P1},
+ P5 ! {go, N6, P6},
+ P6 ! {go, N5, P5},
+ receive {done, P1} -> ok end,
+ receive {done, P2} -> ok end,
+ receive {done, P3} -> ok end,
+ receive {done, P4} -> ok end,
+ receive {done, P5} -> ok end,
+ receive {done, P6} -> ok end,
+ unlink(P1), exit(P1, bang),
+ unlink(P2), exit(P2, bang),
+ unlink(P3), exit(P3, bang),
+ unlink(P4), exit(P4, bang),
+ unlink(P5), exit(P5, bang),
+ unlink(P6), exit(P6, bang),
+ ok.
many_args(0,_B,_C,_D,_E,_F,_G,_H,_I,_J) ->
ok;
@@ -802,120 +793,115 @@ process_info_lock_reschedule3(suite) ->
process_info_lock_reschedule3(Config) when is_list(Config) ->
%% We need a process that is running and an item that requires
%% process_info to take the main process lock.
- ?line Target1 = spawn_link(fun tok_loop/0),
- ?line Name1 = process_info_lock_reschedule_running,
- ?line register(Name1, Target1),
- ?line Target2 = spawn_link(fun () -> receive after infinity -> ok end end),
- ?line Name2 = process_info_lock_reschedule_waiting,
- ?line register(Name2, Target2),
- ?line PI = fun(N) ->
- case N rem 10 of
- 0 -> erlang:yield();
- _ -> ok
- end,
- ?line do_pi_msg_len({proc, Target1},
- {arg, message_queue_len})
- end,
- ?line many_args(100000,1,2,3,4,5,6,7,8,9),
- ?line lists:foreach(PI, lists:seq(1,1000000)),
+ Target1 = spawn_link(fun tok_loop/0),
+ Name1 = process_info_lock_reschedule_running,
+ register(Name1, Target1),
+ Target2 = spawn_link(fun () -> receive after infinity -> ok end end),
+ Name2 = process_info_lock_reschedule_waiting,
+ register(Name2, Target2),
+ PI = fun(N) ->
+ case N rem 10 of
+ 0 -> erlang:yield();
+ _ -> ok
+ end,
+ do_pi_msg_len({proc, Target1},
+ {arg, message_queue_len})
+ end,
+ many_args(100000,1,2,3,4,5,6,7,8,9),
+ lists:foreach(PI, lists:seq(1,1000000)),
%% Make sure Target1 still is willing to "tok loop"
- ?line case process_info(Target1, status) of
+ case process_info(Target1, status) of
{status, OkStatus} when OkStatus == runnable;
OkStatus == running;
OkStatus == garbage_collecting ->
- ?line unlink(Target1),
- ?line unlink(Target2),
- ?line exit(Target1, bang),
- ?line exit(Target2, bang),
- ?line OkStatus;
+ unlink(Target1),
+ unlink(Target2),
+ exit(Target1, bang),
+ exit(Target2, bang),
+ OkStatus;
{status, BadStatus} ->
- ?line ?t:fail(BadStatus)
+ ?t:fail(BadStatus)
end.
process_status_exiting(Config) when is_list(Config) ->
%% Make sure that erts_debug:get_internal_state({process_status,P})
%% returns exiting if it is in status P_EXITING.
- ?line erts_debug:set_internal_state(available_internal_state,true),
- ?line Prio = process_flag(priority, max),
- ?line P = spawn_opt(fun () -> receive after infinity -> ok end end,
+ erts_debug:set_internal_state(available_internal_state,true),
+ Prio = process_flag(priority, max),
+ P = spawn_opt(fun () -> receive after infinity -> ok end end,
[{priority, normal}]),
- ?line erlang:yield(),
+ erlang:yield(),
%% The tok_loop processes are here to make it hard for the exiting
%% process to be scheduled in for exit...
- ?line TokLoops = lists:map(fun (_) ->
- spawn_opt(fun tok_loop/0,
- [link,{priority, high}])
- end,
- lists:seq(1, erlang:system_info(schedulers_online))),
- ?line exit(P, boom),
- ?line wait_until(
- fun () ->
- exiting =:= erts_debug:get_internal_state({process_status,P})
- end),
- ?line lists:foreach(fun (Tok) -> unlink(Tok), exit(Tok,bang) end, TokLoops),
- ?line process_flag(priority, Prio),
- ?line ok.
+ TokLoops = lists:map(fun (_) ->
+ spawn_opt(fun tok_loop/0,
+ [link,{priority, high}])
+ end, lists:seq(1, erlang:system_info(schedulers_online))),
+ exit(P, boom),
+ wait_until(fun() ->
+ exiting =:= erts_debug:get_internal_state({process_status,P})
+ end),
+ lists:foreach(fun (Tok) -> unlink(Tok), exit(Tok,bang) end, TokLoops),
+ process_flag(priority, Prio),
+ ok.
otp_4725(Config) when is_list(Config) ->
- ?line Tester = self(),
- ?line Ref1 = make_ref(),
- ?line Pid1 = spawn_opt(fun () ->
- Tester ! {Ref1, process_info(self())},
- receive
- Ref1 -> bye
- end
- end,
- [link,
- {priority, max},
- {fullsweep_after, 600}]),
- ?line receive
- {Ref1, ProcInfo1A} ->
- ?line ProcInfo1B = process_info(Pid1),
- ?line Pid1 ! Ref1,
- ?line check_proc_infos(ProcInfo1A, ProcInfo1B)
- end,
- ?line Ref2 = make_ref(),
- ?line Pid2 = spawn_opt(fun () ->
- Tester ! {Ref2, process_info(self())},
- receive
- Ref2 -> bye
- end
- end,
- []),
- ?line receive
- {Ref2, ProcInfo2A} ->
- ?line ProcInfo2B = process_info(Pid2),
- ?line Pid2 ! Ref2,
- ?line check_proc_infos(ProcInfo2A, ProcInfo2B)
- end,
- ?line ok.
+ Tester = self(),
+ Ref1 = make_ref(),
+ Pid1 = spawn_opt(fun () ->
+ Tester ! {Ref1, process_info(self())},
+ receive
+ Ref1 -> bye
+ end
+ end, [link, {priority, max}, {fullsweep_after, 600}]),
+ receive
+ {Ref1, ProcInfo1A} ->
+ ProcInfo1B = process_info(Pid1),
+ Pid1 ! Ref1,
+ check_proc_infos(ProcInfo1A, ProcInfo1B)
+ end,
+ Ref2 = make_ref(),
+ Pid2 = spawn_opt(fun () ->
+ Tester ! {Ref2, process_info(self())},
+ receive
+ Ref2 -> bye
+ end
+ end,
+ []),
+ receive
+ {Ref2, ProcInfo2A} ->
+ ProcInfo2B = process_info(Pid2),
+ Pid2 ! Ref2,
+ check_proc_infos(ProcInfo2A, ProcInfo2B)
+ end,
+ ok.
check_proc_infos(A, B) ->
- ?line IC = lists:keysearch(initial_call, 1, A),
- ?line IC = lists:keysearch(initial_call, 1, B),
+ IC = lists:keysearch(initial_call, 1, A),
+ IC = lists:keysearch(initial_call, 1, B),
- ?line L = lists:keysearch(links, 1, A),
- ?line L = lists:keysearch(links, 1, B),
+ L = lists:keysearch(links, 1, A),
+ L = lists:keysearch(links, 1, B),
- ?line D = lists:keysearch(dictionary, 1, A),
- ?line D = lists:keysearch(dictionary, 1, B),
+ D = lists:keysearch(dictionary, 1, A),
+ D = lists:keysearch(dictionary, 1, B),
- ?line TE = lists:keysearch(trap_exit, 1, A),
- ?line TE = lists:keysearch(trap_exit, 1, B),
+ TE = lists:keysearch(trap_exit, 1, A),
+ TE = lists:keysearch(trap_exit, 1, B),
- ?line EH = lists:keysearch(error_handler, 1, A),
- ?line EH = lists:keysearch(error_handler, 1, B),
+ EH = lists:keysearch(error_handler, 1, A),
+ EH = lists:keysearch(error_handler, 1, B),
- ?line P = lists:keysearch(priority, 1, A),
- ?line P = lists:keysearch(priority, 1, B),
+ P = lists:keysearch(priority, 1, A),
+ P = lists:keysearch(priority, 1, B),
- ?line GL = lists:keysearch(group_leader, 1, A),
- ?line GL = lists:keysearch(group_leader, 1, B),
+ GL = lists:keysearch(group_leader, 1, A),
+ GL = lists:keysearch(group_leader, 1, B),
- ?line GC = lists:keysearch(garbage_collection, 1, A),
- ?line GC = lists:keysearch(garbage_collection, 1, B),
+ GC = lists:keysearch(garbage_collection, 1, A),
+ GC = lists:keysearch(garbage_collection, 1, B),
- ?line ok.
+ ok.
%% Dummies.
@@ -928,18 +914,18 @@ stop_spawner() ->
%% Tests erlang:bump_reductions/1.
bump_reductions(Config) when is_list(Config) ->
- ?line erlang:garbage_collect(),
- ?line receive after 1 -> ok end, % Clear reductions.
- ?line {reductions,R1} = process_info(self(), reductions),
- ?line true = erlang:bump_reductions(100),
- ?line {reductions,R2} = process_info(self(), reductions),
- ?line case R2-R1 of
+ erlang:garbage_collect(),
+ receive after 1 -> ok end, % Clear reductions.
+ {reductions,R1} = process_info(self(), reductions),
+ true = erlang:bump_reductions(100),
+ {reductions,R2} = process_info(self(), reductions),
+ case R2-R1 of
Diff when Diff < 100 ->
- ?line ok = io:format("R1 = ~w, R2 = ~w", [R1, R2]),
- ?line test_server:fail({small_diff, Diff});
+ ok = io:format("R1 = ~w, R2 = ~w", [R1, R2]),
+ test_server:fail({small_diff, Diff});
Diff when Diff > 110 ->
- ?line ok = io:format("R1 = ~w, R2 = ~w", [R1, R2]),
- ?line test_server:fail({big_diff, Diff});
+ ok = io:format("R1 = ~w, R2 = ~w", [R1, R2]),
+ test_server:fail({big_diff, Diff});
Diff ->
io:format("~p\n", [Diff]),
ok
@@ -949,11 +935,11 @@ bump_reductions(Config) when is_list(Config) ->
bump_big(R2, 16#08000000).
bump_big(Prev, Limit) ->
- ?line true = erlang:bump_reductions(100000), %Limited to CONTEXT_REDUCTIONS.
- ?line case process_info(self(), reductions) of
+ true = erlang:bump_reductions(100000), %Limited to CONTEXT_REDUCTIONS.
+ case process_info(self(), reductions) of
{reductions,Big} when is_integer(Big), Big > Limit ->
- ?line erlang:garbage_collect(),
- ?line io:format("~p\n", [Big]);
+ erlang:garbage_collect(),
+ io:format("~p\n", [Big]);
{reductions,R} when is_integer(R), R > Prev ->
bump_big(R, Limit)
end,
@@ -964,34 +950,34 @@ bump_big(Prev, Limit) ->
low_prio(Config) when is_list(Config) ->
case erlang:system_info(schedulers_online) of
1 ->
- ?line ok = low_prio_test(Config);
+ ok = low_prio_test(Config);
_ ->
- ?line erlang:system_flag(multi_scheduling, block),
- ?line ok = low_prio_test(Config),
- ?line erlang:system_flag(multi_scheduling, unblock),
- ?line {comment,
+ erlang:system_flag(multi_scheduling, block),
+ ok = low_prio_test(Config),
+ erlang:system_flag(multi_scheduling, unblock),
+ {comment,
"Test not written for SMP runtime system. "
"Multi scheduling blocked during test."}
end.
low_prio_test(Config) when is_list(Config) ->
- ?line process_flag(trap_exit, true),
- ?line S = spawn_link(?MODULE, prio_server, [0, 0]),
- ?line PCs = spawn_prio_clients(S, erlang:system_info(schedulers_online)),
- ?line timer:sleep(2000),
- ?line lists:foreach(fun (P) -> exit(P, kill) end, PCs),
- ?line S ! exit,
- ?line receive {'EXIT', S, {A, B}} -> check_prio(A, B) end,
+ process_flag(trap_exit, true),
+ S = spawn_link(?MODULE, prio_server, [0, 0]),
+ PCs = spawn_prio_clients(S, erlang:system_info(schedulers_online)),
+ timer:sleep(2000),
+ lists:foreach(fun (P) -> exit(P, kill) end, PCs),
+ S ! exit,
+ receive {'EXIT', S, {A, B}} -> check_prio(A, B) end,
ok.
check_prio(A, B) ->
- ?line Prop = A/B,
- ?line ok = io:format("Low=~p, High=~p, Prop=~p\n", [A, B, Prop]),
+ Prop = A/B,
+ ok = io:format("Low=~p, High=~p, Prop=~p\n", [A, B, Prop]),
%% It isn't 1/8, it's more like 0.3, but let's check that
%% the low-prio processes get some little chance to run at all.
- ?line true = (Prop < 1.0),
- ?line true = (Prop > 1/32).
+ true = (Prop < 1.0),
+ true = (Prop > 1/32).
prio_server(A, B) ->
receive
@@ -1051,25 +1037,25 @@ yield(Config) when is_list(Config) ->
end.
yield_test() ->
- ?line erlang:garbage_collect(),
- ?line receive after 1 -> ok end, % Clear reductions.
- ?line SC = schedcnt(start),
- ?line {reductions, R1} = process_info(self(), reductions),
- ?line {ok, true} = call_yield(middle),
- ?line true = call_yield(final),
- ?line true = call_yield(),
- ?line true = apply(erlang, yield, []),
- ?line {reductions, R2} = process_info(self(), reductions),
- ?line Schedcnt = schedcnt(stop, SC),
- ?line case {R2-R1, Schedcnt} of
- {Diff, 4} when Diff < 30 ->
- ?line ok = io:format("R1 = ~w, R2 = ~w, Schedcnt = ~w",
- [R1, R2, Schedcnt]);
- {Diff, _} ->
- ?line ok = io:format("R1 = ~w, R2 = ~w, Schedcnt = ~w",
- [R1, R2, Schedcnt]),
- ?line test_server:fail({measurement_error, Diff, Schedcnt})
- end.
+ erlang:garbage_collect(),
+ receive after 1 -> ok end, % Clear reductions.
+ SC = schedcnt(start),
+ {reductions, R1} = process_info(self(), reductions),
+ {ok, true} = call_yield(middle),
+ true = call_yield(final),
+ true = call_yield(),
+ true = apply(erlang, yield, []),
+ {reductions, R2} = process_info(self(), reductions),
+ Schedcnt = schedcnt(stop, SC),
+ case {R2-R1, Schedcnt} of
+ {Diff, 4} when Diff < 30 ->
+ ok = io:format("R1 = ~w, R2 = ~w, Schedcnt = ~w",
+ [R1, R2, Schedcnt]);
+ {Diff, _} ->
+ ok = io:format("R1 = ~w, R2 = ~w, Schedcnt = ~w",
+ [R1, R2, Schedcnt]),
+ test_server:fail({measurement_error, Diff, Schedcnt})
+ end.
call_yield() ->
erlang:yield().
@@ -1108,61 +1094,61 @@ schedcnt(stop, {Ref, Pid}) when is_reference(Ref), is_pid(Pid) ->
yield2(doc) -> [];
yield2(suite) -> [];
yield2(Config) when is_list(Config) ->
- ?line Me = self(),
- ?line Go = make_ref(),
- ?line RedDiff = make_ref(),
- ?line Done = make_ref(),
- ?line P = spawn(fun () ->
- receive Go -> ok end,
- {reductions, R1} = process_info(self(), reductions),
- {ok, true} = call_yield(middle),
- true = call_yield(final),
- true = call_yield(),
- true = apply(erlang, yield, []),
- {reductions, R2} = process_info(self(), reductions),
- Me ! {RedDiff, R2 - R1},
- exit(Done)
- end),
- ?line erlang:yield(),
-
- ?line 1 = erlang:trace(P, true, [running, procs, {tracer, self()}]),
-
- ?line P ! Go,
+ Me = self(),
+ Go = make_ref(),
+ RedDiff = make_ref(),
+ Done = make_ref(),
+ P = spawn(fun () ->
+ receive Go -> ok end,
+ {reductions, R1} = process_info(self(), reductions),
+ {ok, true} = call_yield(middle),
+ true = call_yield(final),
+ true = call_yield(),
+ true = apply(erlang, yield, []),
+ {reductions, R2} = process_info(self(), reductions),
+ Me ! {RedDiff, R2 - R1},
+ exit(Done)
+ end),
+ erlang:yield(),
+
+ 1 = erlang:trace(P, true, [running, procs, {tracer, self()}]),
+
+ P ! Go,
%% receive Go -> ok end,
- ?line {trace, P, in, _} = next_tmsg(P),
+ {trace, P, in, _} = next_tmsg(P),
%% {ok, true} = call_yield(middle),
- ?line {trace, P, out, _} = next_tmsg(P),
- ?line {trace, P, in, _} = next_tmsg(P),
+ {trace, P, out, _} = next_tmsg(P),
+ {trace, P, in, _} = next_tmsg(P),
%% true = call_yield(final),
- ?line {trace, P, out, _} = next_tmsg(P),
- ?line {trace, P, in, _} = next_tmsg(P),
+ {trace, P, out, _} = next_tmsg(P),
+ {trace, P, in, _} = next_tmsg(P),
%% true = call_yield(),
- ?line {trace, P, out, _} = next_tmsg(P),
- ?line {trace, P, in, _} = next_tmsg(P),
+ {trace, P, out, _} = next_tmsg(P),
+ {trace, P, in, _} = next_tmsg(P),
%% true = apply(erlang, yield, []),
- ?line {trace, P, out, _} = next_tmsg(P),
- ?line {trace, P, in, _} = next_tmsg(P),
+ {trace, P, out, _} = next_tmsg(P),
+ {trace, P, in, _} = next_tmsg(P),
%% exit(Done)
- ?line {trace, P, exit, Done} = next_tmsg(P),
+ {trace, P, exit, Done} = next_tmsg(P),
- ?line receive
+ receive
{RedDiff, Reductions} when Reductions < 30, Reductions > 0 ->
io:format("Reductions = ~p~n", [Reductions]),
- ?line ok;
+ ok;
{RedDiff, Reductions} ->
- ?line ?t:fail({unexpected_reduction_count, Reductions})
+ ?t:fail({unexpected_reduction_count, Reductions})
end,
- ?line none = next_tmsg(P),
+ none = next_tmsg(P),
- ?line ok.
+ ok.
next_tmsg(Pid) ->
receive
@@ -1178,19 +1164,19 @@ next_tmsg(Pid) ->
bad_register(Config) when is_list(Config) ->
Name = a_long_and_unused_name,
- ?line {'EXIT',{badarg,_}} = (catch register({bad,name}, self())),
- ?line fail_register(undefined, self()),
- ?line fail_register([bad,name], self()),
+ {'EXIT',{badarg,_}} = (catch register({bad,name}, self())),
+ fail_register(undefined, self()),
+ fail_register([bad,name], self()),
- ?line {Dead,Mref} = spawn_monitor(fun() -> true end),
+ {Dead,Mref} = spawn_monitor(fun() -> true end),
receive
{'DOWN',Mref,process,Dead,_} -> ok
end,
- ?line fail_register(Name, Dead),
- ?line fail_register(Name, make_ref()),
- ?line fail_register(Name, []),
- ?line fail_register(Name, {bad,process}),
- ?line fail_register(Name, <<>>),
+ fail_register(Name, Dead),
+ fail_register(Name, make_ref()),
+ fail_register(Name, []),
+ fail_register(Name, {bad,process}),
+ fail_register(Name, <<>>),
ok.
fail_register(Name, Process) ->
@@ -1201,50 +1187,50 @@ fail_register(Name, Process) ->
garbage_collect(doc) -> [];
garbage_collect(suite) -> [];
garbage_collect(Config) when is_list(Config) ->
- ?line Prio = process_flag(priority, high),
- ?line true = erlang:garbage_collect(),
- ?line TokLoopers = lists:map(fun (_) ->
- spawn_opt(fun tok_loop/0,
- [{priority, low}, link])
- end,
- lists:seq(1, 10)),
- ?line lists:foreach(fun (Pid) ->
- ?line Mon = erlang:monitor(process, Pid),
- ?line DownBefore = receive
- {'DOWN', Mon, _, _, _} ->
- ?line true
- after 0 ->
- ?line false
- end,
- ?line GC = erlang:garbage_collect(Pid),
- ?line DownAfter = receive
- {'DOWN', Mon, _, _, _} ->
- ?line true
- after 0 ->
- ?line false
- end,
- ?line true = erlang:demonitor(Mon),
- ?line case {DownBefore, DownAfter} of
- {true, _} -> ?line false = GC;
- {false, false} -> ?line true = GC;
- _ -> ?line GC
- end
- end,
- processes()),
- ?line lists:foreach(fun (Pid) ->
- unlink(Pid),
- exit(Pid, bang)
- end, TokLoopers),
- ?line process_flag(priority, Prio),
- ?line ok.
+ Prio = process_flag(priority, high),
+ true = erlang:garbage_collect(),
+
+ TokLoopers = lists:map(fun (_) ->
+ spawn_opt(fun tok_loop/0, [{priority, low}, link])
+ end, lists:seq(1, 10)),
+
+ lists:foreach(fun (Pid) ->
+ Mon = erlang:monitor(process, Pid),
+ DownBefore = receive
+ {'DOWN', Mon, _, _, _} ->
+ true
+ after 0 ->
+ false
+ end,
+ GC = erlang:garbage_collect(Pid),
+ DownAfter = receive
+ {'DOWN', Mon, _, _, _} ->
+ true
+ after 0 ->
+ false
+ end,
+ true = erlang:demonitor(Mon),
+ case {DownBefore, DownAfter} of
+ {true, _} -> false = GC;
+ {false, false} -> true = GC;
+ _ -> GC
+ end
+ end, processes()),
+
+ lists:foreach(fun (Pid) ->
+ unlink(Pid),
+ exit(Pid, bang)
+ end, TokLoopers),
+ process_flag(priority, Prio),
+ ok.
process_info_messages(doc) ->
["This used to cause the nofrag emulator to dump core"];
process_info_messages(suite) ->
[];
process_info_messages(Config) when is_list(Config) ->
- ?line process_info_messages_test(),
- ?line ok.
+ process_info_messages_test(),
+ ok.
process_info_messages_loop(0) -> ok;
process_info_messages_loop(N) -> process_info_messages_loop(N-1).
@@ -1259,43 +1245,42 @@ process_info_messages_send_my_msgs_to(Rcvr) ->
end.
process_info_messages_test() ->
- ?line Go = make_ref(),
- ?line Done = make_ref(),
- ?line Rcvr = self(),
- ?line Rcvr2 = spawn_link(fun () ->
- receive {Go, Rcvr} -> ok end,
- garbage_collect(),
- Rcvr ! {Done, self()}
- end),
- ?line Sndrs = lists:map(
- fun (_) ->
- spawn_link(fun () ->
- Rcvr ! {Go, self()},
- receive {Go, Rcvr} -> ok end,
- BigData = lists:seq(1, 1000),
- Rcvr ! BigData,
- Rcvr ! BigData,
- Rcvr ! BigData,
- Rcvr ! {Done, self()}
- end)
- end,
- lists:seq(1, 10)),
- ?line lists:foreach(fun (Sndr) -> receive {Go, Sndr} -> ok end end,
+ Go = make_ref(),
+ Done = make_ref(),
+ Rcvr = self(),
+ Rcvr2 = spawn_link(fun () ->
+ receive {Go, Rcvr} -> ok end,
+ garbage_collect(),
+ Rcvr ! {Done, self()}
+ end),
+ Sndrs = lists:map(
+ fun (_) ->
+ spawn_link(fun () ->
+ Rcvr ! {Go, self()},
+ receive {Go, Rcvr} -> ok end,
+ BigData = lists:seq(1, 1000),
+ Rcvr ! BigData,
+ Rcvr ! BigData,
+ Rcvr ! BigData,
+ Rcvr ! {Done, self()}
+ end)
+ end, lists:seq(1, 10)),
+ lists:foreach(fun (Sndr) -> receive {Go, Sndr} -> ok end end,
Sndrs),
- ?line garbage_collect(),
- ?line erlang:yield(),
- ?line lists:foreach(fun (Sndr) -> Sndr ! {Go, self()} end, Sndrs),
- ?line process_info_messages_loop(100000000),
- ?line Msgs = process_info(self(), messages),
- ?line lists:foreach(fun (Sndr) -> receive {Done, Sndr} -> ok end end,
+ garbage_collect(),
+ erlang:yield(),
+ lists:foreach(fun (Sndr) -> Sndr ! {Go, self()} end, Sndrs),
+ process_info_messages_loop(100000000),
+ Msgs = process_info(self(), messages),
+ lists:foreach(fun (Sndr) -> receive {Done, Sndr} -> ok end end,
Sndrs),
- ?line garbage_collect(),
- ?line Rcvr2 ! Msgs,
- ?line process_info_messages_send_my_msgs_to(Rcvr2),
- ?line Rcvr2 ! {Go, self()},
- ?line garbage_collect(),
- ?line receive {Done, Rcvr2} -> ok end,
- ?line Msgs.
+ garbage_collect(),
+ Rcvr2 ! Msgs,
+ process_info_messages_send_my_msgs_to(Rcvr2),
+ Rcvr2 ! {Go, self()},
+ garbage_collect(),
+ receive {Done, Rcvr2} -> ok end,
+ Msgs.
chk_badarg(Fun) ->
try Fun(), exit(no_badarg) catch error:badarg -> ok end.
@@ -1305,76 +1290,72 @@ process_flag_badarg(doc) ->
process_flag_badarg(suite) ->
[];
process_flag_badarg(Config) when is_list(Config) ->
- ?line chk_badarg(fun () -> process_flag(gurka, banan) end),
- ?line chk_badarg(fun () -> process_flag(trap_exit, gurka) end),
- ?line chk_badarg(fun () -> process_flag(error_handler, 1) end),
- ?line chk_badarg(fun () -> process_flag(min_heap_size, gurka) end),
- ?line chk_badarg(fun () -> process_flag(min_bin_vheap_size, gurka) end),
- ?line chk_badarg(fun () -> process_flag(min_bin_vheap_size, -1) end),
- ?line chk_badarg(fun () -> process_flag(priority, 4711) end),
- ?line chk_badarg(fun () -> process_flag(save_calls, hmmm) end),
- ?line P= spawn_link(fun () -> receive die -> ok end end),
- ?line chk_badarg(fun () -> process_flag(P, save_calls, hmmm) end),
- ?line chk_badarg(fun () -> process_flag(gurka, save_calls, hmmm) end),
- ?line P ! die,
- ?line ok.
+ chk_badarg(fun () -> process_flag(gurka, banan) end),
+ chk_badarg(fun () -> process_flag(trap_exit, gurka) end),
+ chk_badarg(fun () -> process_flag(error_handler, 1) end),
+ chk_badarg(fun () -> process_flag(min_heap_size, gurka) end),
+ chk_badarg(fun () -> process_flag(min_bin_vheap_size, gurka) end),
+ chk_badarg(fun () -> process_flag(min_bin_vheap_size, -1) end),
+ chk_badarg(fun () -> process_flag(priority, 4711) end),
+ chk_badarg(fun () -> process_flag(save_calls, hmmm) end),
+ P= spawn_link(fun () -> receive die -> ok end end),
+ chk_badarg(fun () -> process_flag(P, save_calls, hmmm) end),
+ chk_badarg(fun () -> process_flag(gurka, save_calls, hmmm) end),
+ P ! die,
+ ok.
-include_lib("stdlib/include/ms_transform.hrl").
otp_6237(doc) -> [];
otp_6237(suite) -> [];
otp_6237(Config) when is_list(Config) ->
- ?line Slctrs = lists:map(fun (_) ->
- spawn_link(fun () ->
- otp_6237_select_loop()
- end)
- end,
- lists:seq(1,5)),
- ?line lists:foreach(fun (_) -> otp_6237_test() end, lists:seq(1, 100)),
- ?line lists:foreach(fun (S) -> unlink(S),exit(S, kill) end, Slctrs),
- ?line ok.
+ Slctrs = lists:map(fun (_) ->
+ spawn_link(fun () ->
+ otp_6237_select_loop()
+ end)
+ end,
+ lists:seq(1,5)),
+ lists:foreach(fun (_) -> otp_6237_test() end, lists:seq(1, 100)),
+ lists:foreach(fun (S) -> unlink(S),exit(S, kill) end, Slctrs),
+ ok.
otp_6237_test() ->
- ?line Parent = self(),
- ?line Inited = make_ref(),
- ?line Die = make_ref(),
- ?line Pid = spawn_link(fun () ->
- register(otp_6237,self()),
- otp_6237 = ets:new(otp_6237,
- [named_table,
- ordered_set]),
- ets:insert(otp_6237,
- [{I,I}
- || I <- lists:seq(1, 100)]),
- %% Inserting a lot of bif timers
- %% increase the possibility that
- %% the test will fail when the
- %% original cleanup order is used
- lists:foreach(
- fun (_) ->
- erlang:send_after(1000000,
- self(),
- {a,b,c})
- end,
- lists:seq(1,1000)),
- Parent ! Inited,
- receive Die -> bye end
- end),
- ?line receive
- Inited -> ?line ok
- end,
- ?line Pid ! Die,
+ Parent = self(),
+ Inited = make_ref(),
+ Die = make_ref(),
+ Pid = spawn_link(fun () ->
+ register(otp_6237,self()),
+ otp_6237 = ets:new(otp_6237,
+ [named_table,
+ ordered_set]),
+ ets:insert(otp_6237,
+ [{I,I}
+ || I <- lists:seq(1, 100)]),
+ %% Inserting a lot of bif timers
+ %% increase the possibility that
+ %% the test will fail when the
+ %% original cleanup order is used
+ lists:foreach( fun (_) ->
+ erlang:send_after(1000000, self(), {a,b,c})
+ end, lists:seq(1,1000)),
+ Parent ! Inited,
+ receive Die -> bye end
+ end),
+ receive
+ Inited -> ok
+ end,
+ Pid ! Die,
otp_6237_whereis_loop().
otp_6237_whereis_loop() ->
- ?line case whereis(otp_6237) of
+ case whereis(otp_6237) of
undefined ->
- ?line otp_6237 = ets:new(otp_6237,
+ otp_6237 = ets:new(otp_6237,
[named_table,ordered_set]),
- ?line ets:delete(otp_6237),
- ?line ok;
+ ets:delete(otp_6237),
+ ok;
_ ->
- ?line otp_6237_whereis_loop()
+ otp_6237_whereis_loop()
end.
otp_6237_select_loop() ->
@@ -1382,7 +1363,6 @@ otp_6237_select_loop() ->
otp_6237_select_loop().
-
-define(NoTestProcs, 10000).
-record(ptab_list_bif_info, {min_start_reds,
tab_chunks,
@@ -1399,42 +1379,42 @@ processes_large_tab(doc) ->
processes_large_tab(suite) ->
[];
processes_large_tab(Config) when is_list(Config) ->
- ?line enable_internal_state(),
- ?line MaxDbgLvl = 20,
- ?line MinProcTabSize = 2*(1 bsl 15),
- ?line ProcTabSize0 = 1000000,
- ?line ProcTabSize1 = case {erlang:system_info(schedulers_online),
- erlang:system_info(logical_processors)} of
- {Schdlrs, Cpus} when is_integer(Cpus),
- Schdlrs =< Cpus ->
- ProcTabSize0;
- _ ->
- ProcTabSize0 div 4
- end,
- ?line ProcTabSize2 = case erlang:system_info(debug_compiled) of
- true -> ProcTabSize1 - 500000;
- false -> ProcTabSize1
- end,
+ enable_internal_state(),
+ MaxDbgLvl = 20,
+ MinProcTabSize = 2*(1 bsl 15),
+ ProcTabSize0 = 1000000,
+ ProcTabSize1 = case {erlang:system_info(schedulers_online),
+ erlang:system_info(logical_processors)} of
+ {Schdlrs, Cpus} when is_integer(Cpus),
+ Schdlrs =< Cpus ->
+ ProcTabSize0;
+ _ ->
+ ProcTabSize0 div 4
+ end,
+ ProcTabSize2 = case erlang:system_info(debug_compiled) of
+ true -> ProcTabSize1 - 500000;
+ false -> ProcTabSize1
+ end,
%% With high debug levels this test takes so long time that
%% the connection times out; therefore, shrink the test on
%% high debug levels.
- ?line DbgLvl = case erts_debug:get_internal_state(processes_bif_info) of
+ DbgLvl = case erts_debug:get_internal_state(processes_bif_info) of
#ptab_list_bif_info{debug_level = Lvl} when Lvl > MaxDbgLvl ->
20;
#ptab_list_bif_info{debug_level = Lvl} when Lvl < 0 ->
- ?line ?t:fail({debug_level, Lvl});
+ ?t:fail({debug_level, Lvl});
#ptab_list_bif_info{debug_level = Lvl} ->
Lvl
end,
- ?line ProcTabSize3 = ProcTabSize2 - (1300000 * DbgLvl div MaxDbgLvl),
- ?line ProcTabSize = case ProcTabSize3 < MinProcTabSize of
+ ProcTabSize3 = ProcTabSize2 - (1300000 * DbgLvl div MaxDbgLvl),
+ ProcTabSize = case ProcTabSize3 < MinProcTabSize of
true -> MinProcTabSize;
false -> ProcTabSize3
end,
- ?line {ok, LargeNode} = start_node(Config,
+ {ok, LargeNode} = start_node(Config,
"+P " ++ integer_to_list(ProcTabSize)),
- ?line Res = rpc:call(LargeNode, ?MODULE, processes_bif_test, []),
- ?line case rpc:call(LargeNode,
+ Res = rpc:call(LargeNode, ?MODULE, processes_bif_test, []),
+ case rpc:call(LargeNode,
erts_debug,
get_internal_state,
[processes_bif_info]) of
@@ -1442,40 +1422,37 @@ processes_large_tab(Config) when is_list(Config) ->
Chunks > 1 -> ok;
PBInfo -> ?t:fail(PBInfo)
end,
- ?line stop_node(LargeNode),
- ?line chk_processes_bif_test_res(Res).
+ stop_node(LargeNode),
+ chk_processes_bif_test_res(Res).
processes_default_tab(doc) ->
[];
processes_default_tab(suite) ->
[];
processes_default_tab(Config) when is_list(Config) ->
- ?line {ok, DefaultNode} = start_node(Config, ""),
- ?line Res = rpc:call(DefaultNode, ?MODULE, processes_bif_test, []),
- ?line stop_node(DefaultNode),
- ?line chk_processes_bif_test_res(Res).
+ {ok, DefaultNode} = start_node(Config, ""),
+ Res = rpc:call(DefaultNode, ?MODULE, processes_bif_test, []),
+ stop_node(DefaultNode),
+ chk_processes_bif_test_res(Res).
processes_small_tab(doc) ->
[];
processes_small_tab(suite) ->
[];
processes_small_tab(Config) when is_list(Config) ->
- ?line {ok, SmallNode} = start_node(Config, "+P 1024"),
- ?line Res = rpc:call(SmallNode, ?MODULE, processes_bif_test, []),
- ?line PBInfo = rpc:call(SmallNode,
- erts_debug,
- get_internal_state,
- [processes_bif_info]),
- ?line stop_node(SmallNode),
- ?line true = PBInfo#ptab_list_bif_info.tab_chunks < 10,
- ?line chk_processes_bif_test_res(Res).
+ {ok, SmallNode} = start_node(Config, "+P 1024"),
+ Res = rpc:call(SmallNode, ?MODULE, processes_bif_test, []),
+ PBInfo = rpc:call(SmallNode, erts_debug, get_internal_state, [processes_bif_info]),
+ stop_node(SmallNode),
+ true = PBInfo#ptab_list_bif_info.tab_chunks < 10,
+ chk_processes_bif_test_res(Res).
processes_this_tab(doc) ->
[];
processes_this_tab(suite) ->
[];
processes_this_tab(Config) when is_list(Config) ->
- ?line chk_processes_bif_test_res(processes_bif_test()).
+ chk_processes_bif_test_res(processes_bif_test()).
chk_processes_bif_test_res(ok) -> ok;
chk_processes_bif_test_res({comment, _} = Comment) -> Comment;
@@ -1575,26 +1552,26 @@ do_processes(WantReds) ->
processes().
processes_bif_test() ->
- ?line Tester = self(),
- ?line enable_internal_state(),
- ?line PBInfo = erts_debug:get_internal_state(processes_bif_info),
- ?line print_processes_bif_info(PBInfo),
- ?line WantReds = PBInfo#ptab_list_bif_info.min_start_reds + 10,
- ?line WillTrap = case PBInfo of
- #ptab_list_bif_info{tab_chunks = Chunks} when Chunks < 10 ->
- false; %% Skip for small tables
- #ptab_list_bif_info{tab_chunks = Chunks,
- tab_chunks_size = ChunksSize,
- tab_indices_per_red = IndiciesPerRed
- } ->
- Chunks*ChunksSize >= IndiciesPerRed*WantReds
- end,
- ?line Processes = fun () ->
- erts_debug:set_internal_state(reds_left,WantReds),
- processes()
- end,
+ Tester = self(),
+ enable_internal_state(),
+ PBInfo = erts_debug:get_internal_state(processes_bif_info),
+ print_processes_bif_info(PBInfo),
+ WantReds = PBInfo#ptab_list_bif_info.min_start_reds + 10,
+ WillTrap = case PBInfo of
+ #ptab_list_bif_info{tab_chunks = Chunks} when Chunks < 10 ->
+ false; %% Skip for small tables
+ #ptab_list_bif_info{tab_chunks = Chunks,
+ tab_chunks_size = ChunksSize,
+ tab_indices_per_red = IndiciesPerRed
+ } ->
+ Chunks*ChunksSize >= IndiciesPerRed*WantReds
+ end,
+ Processes = fun () ->
+ erts_debug:set_internal_state(reds_left,WantReds),
+ processes()
+ end,
- ?line ok = do_processes_bif_test(WantReds, WillTrap, Processes),
+ ok = do_processes_bif_test(WantReds, WillTrap, Processes),
case WillTrap of
false ->
@@ -1602,8 +1579,8 @@ processes_bif_test() ->
true ->
%% Do it again with a process suspended while
%% in the processes/0 bif.
- ?line erlang:system_flag(multi_scheduling, block),
- ?line Suspendee = spawn_link(fun () ->
+ erlang:system_flag(multi_scheduling, block),
+ Suspendee = spawn_link(fun () ->
Tester ! {suspend_me, self()},
Tester ! {self(),
done,
@@ -1613,175 +1590,156 @@ processes_bif_test() ->
ok
end
end),
- ?line receive {suspend_me, Suspendee} -> ok end,
- ?line erlang:suspend_process(Suspendee),
- ?line erlang:system_flag(multi_scheduling, unblock),
+ receive {suspend_me, Suspendee} -> ok end,
+ erlang:suspend_process(Suspendee),
+ erlang:system_flag(multi_scheduling, unblock),
- ?line [{status,suspended},
- {current_function,{erlang,ptab_list_continue,2}}]
- = process_info(Suspendee, [status, current_function]),
+ [{status,suspended},{current_function,{erlang,ptab_list_continue,2}}] =
+ process_info(Suspendee, [status, current_function]),
- ?line ok = do_processes_bif_test(WantReds, WillTrap, Processes),
+ ok = do_processes_bif_test(WantReds, WillTrap, Processes),
- ?line erlang:resume_process(Suspendee),
- ?line receive {Suspendee, done, _} -> ok end,
- ?line unlink(Suspendee),
- ?line exit(Suspendee, bang)
+ erlang:resume_process(Suspendee),
+ receive {Suspendee, done, _} -> ok end,
+ unlink(Suspendee),
+ exit(Suspendee, bang)
end,
case get(processes_bif_testcase_comment) of
- undefined -> ?line ok;
- Comment -> ?line {comment, Comment}
+ undefined -> ok;
+ Comment -> {comment, Comment}
end.
do_processes_bif_test(WantReds, DieTest, Processes) ->
- ?line Tester = self(),
- ?line SpawnProcesses = fun (Prio) ->
- spawn_opt(?MODULE,
- do_processes,
- [WantReds],
- [link, {priority, Prio}])
- end,
- ?line Cleaner = spawn_link(fun () ->
- process_flag(trap_exit, true),
- Tester ! {cleaner_alive, self()},
- processes_bif_cleaner()
- end),
- ?line receive {cleaner_alive, Cleaner} -> ok end,
+ Tester = self(),
+ SpawnProcesses = fun (Prio) ->
+ spawn_opt(?MODULE, do_processes, [WantReds], [link, {priority, Prio}])
+ end,
+ Cleaner = spawn_link(fun () ->
+ process_flag(trap_exit, true),
+ Tester ! {cleaner_alive, self()},
+ processes_bif_cleaner()
+ end),
+ receive {cleaner_alive, Cleaner} -> ok end,
try
- ?line DoIt = make_ref(),
- ?line GetGoing = make_ref(),
- ?line {NoTestProcs, TestProcs} = spawn_initial_hangarounds(Cleaner),
- ?line ?t:format("Testing with ~p processes~n", [NoTestProcs]),
- ?line SpawnHangAround = fun () ->
- spawn(?MODULE,
- hangaround,
- [Cleaner, new_hangaround])
- end,
- ?line Killer = spawn_opt(fun () ->
- Splt = NoTestProcs div 10,
- {TP1, TP23} = lists:split(Splt,
- TestProcs),
- {TP2, TP3} = lists:split(Splt, TP23),
- erlang:system_flag(multi_scheduling,
- block),
- Tester ! DoIt,
- receive GetGoing -> ok end,
- erlang:system_flag(multi_scheduling,
- unblock),
- SpawnProcesses(high),
- lists:foreach(
- fun (P) ->
- SpawnHangAround(),
- exit(P, bang)
- end,
- TP1),
- SpawnProcesses(high),
- erlang:yield(),
- lists:foreach(
- fun (P) ->
- SpawnHangAround(),
- exit(P, bang)
- end,
- TP2),
- SpawnProcesses(high),
- lists:foreach(
- fun (P) ->
- SpawnHangAround(),
- exit(P, bang)
- end,
- TP3)
- end,
- [{priority, high}, link]),
- ?line receive DoIt -> ok end,
- ?line process_flag(priority, low),
- ?line SpawnProcesses(low),
- ?line erlang:yield(),
- ?line process_flag(priority, normal),
- ?line CorrectProcs0 = erts_debug:get_internal_state(processes),
- ?line Killer ! GetGoing,
- ?line erts_debug:set_internal_state(reds_left, WantReds),
- ?line Procs0 = processes(),
- ?line Procs = lists:sort(Procs0),
- ?line CorrectProcs = lists:sort(CorrectProcs0),
- ?line LengthCorrectProcs = length(CorrectProcs),
- ?line ?t:format("~p = length(CorrectProcs)~n", [LengthCorrectProcs]),
- ?line true = LengthCorrectProcs > NoTestProcs,
- ?line case CorrectProcs =:= Procs of
- true ->
- ?line ok;
- false ->
- ?line processes_unexpected_result(CorrectProcs, Procs)
- end,
- ?line unlink(Killer),
- ?line exit(Killer, bang)
+ DoIt = make_ref(),
+ GetGoing = make_ref(),
+ {NoTestProcs, TestProcs} = spawn_initial_hangarounds(Cleaner),
+ ?t:format("Testing with ~p processes~n", [NoTestProcs]),
+ SpawnHangAround = fun () ->
+ spawn(?MODULE, hangaround, [Cleaner, new_hangaround])
+ end,
+ Killer = spawn_opt(fun () ->
+ Splt = NoTestProcs div 10,
+ {TP1, TP23} = lists:split(Splt, TestProcs),
+ {TP2, TP3} = lists:split(Splt, TP23),
+ erlang:system_flag(multi_scheduling, block),
+ Tester ! DoIt,
+ receive GetGoing -> ok end,
+ erlang:system_flag(multi_scheduling, unblock),
+ SpawnProcesses(high),
+ lists:foreach( fun (P) ->
+ SpawnHangAround(),
+ exit(P, bang)
+ end, TP1),
+ SpawnProcesses(high),
+ erlang:yield(),
+ lists:foreach( fun (P) ->
+ SpawnHangAround(),
+ exit(P, bang)
+ end, TP2),
+ SpawnProcesses(high),
+ lists:foreach(
+ fun (P) ->
+ SpawnHangAround(),
+ exit(P, bang)
+ end, TP3)
+ end, [{priority, high}, link]),
+ receive DoIt -> ok end,
+ process_flag(priority, low),
+ SpawnProcesses(low),
+ erlang:yield(),
+ process_flag(priority, normal),
+ CorrectProcs0 = erts_debug:get_internal_state(processes),
+ Killer ! GetGoing,
+ erts_debug:set_internal_state(reds_left, WantReds),
+ Procs0 = processes(),
+ Procs = lists:sort(Procs0),
+ CorrectProcs = lists:sort(CorrectProcs0),
+ LengthCorrectProcs = length(CorrectProcs),
+ ?t:format("~p = length(CorrectProcs)~n", [LengthCorrectProcs]),
+ true = LengthCorrectProcs > NoTestProcs,
+ case CorrectProcs =:= Procs of
+ true ->
+ ok;
+ false ->
+ processes_unexpected_result(CorrectProcs, Procs)
+ end,
+ unlink(Killer),
+ exit(Killer, bang)
after
unlink(Cleaner),
exit(Cleaner, kill),
%% Wait for the system to recover to a normal state...
wait_until_system_recover()
end,
- ?line do_processes_bif_die_test(DieTest, Processes),
- ?line ok.
+ do_processes_bif_die_test(DieTest, Processes),
+ ok.
do_processes_bif_die_test(false, _Processes) ->
- ?line ?t:format("Skipping test killing process executing processes/0~n",[]),
- ?line ok;
+ ?t:format("Skipping test killing process executing processes/0~n",[]),
+ ok;
do_processes_bif_die_test(true, Processes) ->
- ?line do_processes_bif_die_test(5, Processes);
+ do_processes_bif_die_test(5, Processes);
do_processes_bif_die_test(N, Processes) ->
- ?line ?t:format("Doing test killing process executing processes/0~n",[]),
+ ?t:format("Doing test killing process executing processes/0~n",[]),
try
- ?line Tester = self(),
- ?line Oooh_Nooooooo = make_ref(),
- ?line {_, DieWhileDoingMon} = erlang:spawn_monitor(
- fun () ->
- Victim = self(),
- spawn_opt(
- fun () ->
- exit(Victim, got_him)
- end,
- [link,
- {priority, max}]),
- Tester ! {Oooh_Nooooooo,
- hd(Processes())},
- exit(ohhhh_nooooo)
- end),
- ?line receive
- {'DOWN', DieWhileDoingMon, _, _, Reason} ->
- case Reason of
- got_him -> ok;
- _ -> throw({kill_in_trap, Reason})
- end
- end,
- ?line receive
- {Oooh_Nooooooo, _} ->
- ?line throw({kill_in_trap, 'Oooh_Nooooooo'})
- after 0 ->
- ?line ok
- end,
- ?line PrcsCllrsSeqLen = 2*erlang:system_info(schedulers_online),
- ?line PrcsCllrsSeq = lists:seq(1, PrcsCllrsSeqLen),
- ?line ProcsCallers = lists:map(
- fun (_) ->
- spawn_link(
- fun () ->
- Tester ! hd(Processes())
- end)
- end,
- PrcsCllrsSeq),
- ?line erlang:yield(),
+ Tester = self(),
+ Oooh_Nooooooo = make_ref(),
+ {_, DieWhileDoingMon} = erlang:spawn_monitor( fun () ->
+ Victim = self(),
+ spawn_opt(
+ fun () ->
+ exit(Victim, got_him)
+ end,
+ [link, {priority, max}]),
+ Tester ! {Oooh_Nooooooo,
+ hd(Processes())},
+ exit(ohhhh_nooooo)
+ end),
+ receive
+ {'DOWN', DieWhileDoingMon, _, _, Reason} ->
+ case Reason of
+ got_him -> ok;
+ _ -> throw({kill_in_trap, Reason})
+ end
+ end,
+ receive
+ {Oooh_Nooooooo, _} ->
+ throw({kill_in_trap, 'Oooh_Nooooooo'})
+ after 0 ->
+ ok
+ end,
+ PrcsCllrsSeqLen = 2*erlang:system_info(schedulers_online),
+ PrcsCllrsSeq = lists:seq(1, PrcsCllrsSeqLen),
+ ProcsCallers = lists:map( fun (_) ->
+ spawn_link(
+ fun () ->
+ Tester ! hd(Processes())
+ end)
+ end, PrcsCllrsSeq),
+ erlang:yield(),
{ProcsCallers1, ProcsCallers2} = lists:split(PrcsCllrsSeqLen div 2,
ProcsCallers),
- ?line process_flag(priority, high),
- ?line lists:foreach(
+ process_flag(priority, high),
+ lists:foreach(
fun (P) ->
unlink(P),
exit(P, bang)
end,
lists:reverse(ProcsCallers2) ++ ProcsCallers1),
- ?line process_flag(priority, normal),
- ?line ok
+ process_flag(priority, normal),
+ ok
catch
throw:{kill_in_trap, R} when N > 0 ->
?t:format("Failed to kill in trap: ~p~n", [R]),
@@ -1844,23 +1802,23 @@ processes_last_call_trap(doc) ->
processes_last_call_trap(suite) ->
[];
processes_last_call_trap(Config) when is_list(Config) ->
- ?line enable_internal_state(),
- ?line Processes = fun () -> processes() end,
- ?line PBInfo = erts_debug:get_internal_state(processes_bif_info),
- ?line print_processes_bif_info(PBInfo),
- ?line WantReds = case PBInfo#ptab_list_bif_info.min_start_reds of
- R when R > 10 -> R - 1;
- _R -> 9
- end,
- ?line lists:foreach(fun (_) ->
- ?line erts_debug:set_internal_state(reds_left,
- WantReds),
- Processes(),
- ?line erts_debug:set_internal_state(reds_left,
- WantReds),
- my_processes()
- end,
- lists:seq(1,100)).
+ enable_internal_state(),
+ Processes = fun () -> processes() end,
+ PBInfo = erts_debug:get_internal_state(processes_bif_info),
+ print_processes_bif_info(PBInfo),
+ WantReds = case PBInfo#ptab_list_bif_info.min_start_reds of
+ R when R > 10 -> R - 1;
+ _R -> 9
+ end,
+ lists:foreach(fun (_) ->
+ erts_debug:set_internal_state(reds_left,
+ WantReds),
+ Processes(),
+ erts_debug:set_internal_state(reds_left,
+ WantReds),
+ my_processes()
+ end,
+ lists:seq(1,100)).
my_processes() ->
processes().
@@ -1870,108 +1828,106 @@ processes_apply_trap(doc) ->
processes_apply_trap(suite) ->
[];
processes_apply_trap(Config) when is_list(Config) ->
- ?line enable_internal_state(),
- ?line PBInfo = erts_debug:get_internal_state(processes_bif_info),
- ?line print_processes_bif_info(PBInfo),
- ?line WantReds = case PBInfo#ptab_list_bif_info.min_start_reds of
- R when R > 10 -> R - 1;
- _R -> 9
- end,
- ?line lists:foreach(fun (_) ->
- ?line erts_debug:set_internal_state(reds_left,
- WantReds),
- ?line apply(erlang, processes, [])
- end,
- lists:seq(1,100)).
+ enable_internal_state(),
+ PBInfo = erts_debug:get_internal_state(processes_bif_info),
+ print_processes_bif_info(PBInfo),
+ WantReds = case PBInfo#ptab_list_bif_info.min_start_reds of
+ R when R > 10 -> R - 1;
+ _R -> 9
+ end,
+ lists:foreach(fun (_) ->
+ erts_debug:set_internal_state(reds_left,
+ WantReds),
+ apply(erlang, processes, [])
+ end, lists:seq(1,100)).
processes_gc_trap(doc) ->
[];
processes_gc_trap(suite) ->
[];
processes_gc_trap(Config) when is_list(Config) ->
- ?line Tester = self(),
- ?line enable_internal_state(),
- ?line PBInfo = erts_debug:get_internal_state(processes_bif_info),
- ?line print_processes_bif_info(PBInfo),
- ?line WantReds = PBInfo#ptab_list_bif_info.min_start_reds + 10,
- ?line Processes = fun () ->
- erts_debug:set_internal_state(reds_left,WantReds),
- processes()
- end,
+ Tester = self(),
+ enable_internal_state(),
+ PBInfo = erts_debug:get_internal_state(processes_bif_info),
+ print_processes_bif_info(PBInfo),
+ WantReds = PBInfo#ptab_list_bif_info.min_start_reds + 10,
+ Processes = fun () ->
+ erts_debug:set_internal_state(reds_left,WantReds),
+ processes()
+ end,
- ?line erlang:system_flag(multi_scheduling, block),
- ?line Suspendee = spawn_link(fun () ->
+ erlang:system_flag(multi_scheduling, block),
+ Suspendee = spawn_link(fun () ->
Tester ! {suspend_me, self()},
Tester ! {self(),
done,
hd(Processes())},
receive after infinity -> ok end
end),
- ?line receive {suspend_me, Suspendee} -> ok end,
- ?line erlang:suspend_process(Suspendee),
- ?line erlang:system_flag(multi_scheduling, unblock),
+ receive {suspend_me, Suspendee} -> ok end,
+ erlang:suspend_process(Suspendee),
+ erlang:system_flag(multi_scheduling, unblock),
- ?line [{status,suspended}, {current_function,{erlang,ptab_list_continue,2}}]
+ [{status,suspended}, {current_function,{erlang,ptab_list_continue,2}}]
= process_info(Suspendee, [status, current_function]),
- ?line erlang:garbage_collect(Suspendee),
- ?line erlang:garbage_collect(Suspendee),
+ erlang:garbage_collect(Suspendee),
+ erlang:garbage_collect(Suspendee),
- ?line erlang:resume_process(Suspendee),
- ?line receive {Suspendee, done, _} -> ok end,
- ?line erlang:garbage_collect(Suspendee),
- ?line erlang:garbage_collect(Suspendee),
+ erlang:resume_process(Suspendee),
+ receive {Suspendee, done, _} -> ok end,
+ erlang:garbage_collect(Suspendee),
+ erlang:garbage_collect(Suspendee),
- ?line unlink(Suspendee),
- ?line exit(Suspendee, bang),
- ?line ok.
+ unlink(Suspendee),
+ exit(Suspendee, bang),
+ ok.
process_flag_heap_size(doc) ->
[];
process_flag_heap_size(suite) ->
[];
process_flag_heap_size(Config) when is_list(Config) ->
- HSize = 2584, % must be gc fib number
- VHSize = 317811, % must be gc fib number
- ?line OldHmin = erlang:process_flag(min_heap_size, HSize),
- ?line {min_heap_size, HSize} = erlang:process_info(self(), min_heap_size),
- ?line OldVHmin = erlang:process_flag(min_bin_vheap_size, VHSize),
- ?line {min_bin_vheap_size, VHSize} = erlang:process_info(self(), min_bin_vheap_size),
- ?line HSize = erlang:process_flag(min_heap_size, OldHmin),
- ?line VHSize = erlang:process_flag(min_bin_vheap_size, OldVHmin),
- ?line ok.
+ HSize = 2586, % must be gc fib+ number
+ VHSize = 318187, % must be gc fib+ number
+ OldHmin = erlang:process_flag(min_heap_size, HSize),
+ {min_heap_size, HSize} = erlang:process_info(self(), min_heap_size),
+ OldVHmin = erlang:process_flag(min_bin_vheap_size, VHSize),
+ {min_bin_vheap_size, VHSize} = erlang:process_info(self(), min_bin_vheap_size),
+ HSize = erlang:process_flag(min_heap_size, OldHmin),
+ VHSize = erlang:process_flag(min_bin_vheap_size, OldVHmin),
+ ok.
spawn_opt_heap_size(doc) ->
[];
spawn_opt_heap_size(suite) ->
[];
spawn_opt_heap_size(Config) when is_list(Config) ->
- HSize = 987, % must be gc fib number
- VHSize = 46368, % must be gc fib number
- ?line Pid = spawn_opt(fun () -> receive stop -> ok end end,
+ HSize = 987, % must be gc fib+ number
+ VHSize = 46422, % must be gc fib+ number
+ Pid = spawn_opt(fun () -> receive stop -> ok end end,
[{min_heap_size, HSize},{ min_bin_vheap_size, VHSize}]),
- ?line {min_heap_size, HSize} = process_info(Pid, min_heap_size),
- ?line {min_bin_vheap_size, VHSize} = process_info(Pid, min_bin_vheap_size),
- ?line Pid ! stop,
- ?line ok.
+ {min_heap_size, HSize} = process_info(Pid, min_heap_size),
+ {min_bin_vheap_size, VHSize} = process_info(Pid, min_bin_vheap_size),
+ Pid ! stop,
+ ok.
processes_term_proc_list(doc) ->
[];
processes_term_proc_list(suite) ->
[];
processes_term_proc_list(Config) when is_list(Config) ->
- ?line Tester = self(),
- ?line as_expected = processes_term_proc_list_test(false),
- ?line {ok, Node} = start_node(Config, "+Mis true"),
- ?line RT = spawn_link(Node,
- fun () ->
- receive after 1000 -> ok end,
- processes_term_proc_list_test(false),
- Tester ! {it_worked, self()}
- end),
- ?line receive {it_worked, RT} -> ok end,
- ?line stop_node(Node),
- ?line ok.
+ Tester = self(),
+ as_expected = processes_term_proc_list_test(false),
+ {ok, Node} = start_node(Config, "+Mis true"),
+ RT = spawn_link(Node, fun () ->
+ receive after 1000 -> ok end,
+ processes_term_proc_list_test(false),
+ Tester ! {it_worked, self()}
+ end),
+ receive {it_worked, RT} -> ok end,
+ stop_node(Node),
+ ok.
-define(CHK_TERM_PROC_LIST(MC, XB),
chk_term_proc_list(?LINE, MC, XB)).
@@ -1997,35 +1953,34 @@ chk_term_proc_list(Line, MustChk, ExpectBlks) ->
ok.
processes_term_proc_list_test(MustChk) ->
- ?line Tester = self(),
- ?line enable_internal_state(),
- ?line PBInfo = erts_debug:get_internal_state(processes_bif_info),
- ?line print_processes_bif_info(PBInfo),
- ?line WantReds = PBInfo#ptab_list_bif_info.min_start_reds + 10,
- ?line #ptab_list_bif_info{tab_chunks = Chunks,
- tab_chunks_size = ChunksSize,
- tab_indices_per_red = IndiciesPerRed
- } = PBInfo,
- ?line true = Chunks > 1,
- ?line true = Chunks*ChunksSize >= IndiciesPerRed*WantReds,
- ?line Processes = fun () ->
- erts_debug:set_internal_state(reds_left,
- WantReds),
- processes()
- end,
- ?line Exit = fun (P) ->
- unlink(P),
- exit(P, bang),
- wait_until(
- fun () ->
- not lists:member(
- P,
- erts_debug:get_internal_state(
- processes))
- end)
- end,
- ?line SpawnSuspendProcessesProc
- = fun () ->
+ Tester = self(),
+ enable_internal_state(),
+ PBInfo = erts_debug:get_internal_state(processes_bif_info),
+ print_processes_bif_info(PBInfo),
+ WantReds = PBInfo#ptab_list_bif_info.min_start_reds + 10,
+ #ptab_list_bif_info{tab_chunks = Chunks,
+ tab_chunks_size = ChunksSize,
+ tab_indices_per_red = IndiciesPerRed
+ } = PBInfo,
+ true = Chunks > 1,
+ true = Chunks*ChunksSize >= IndiciesPerRed*WantReds,
+ Processes = fun () ->
+ erts_debug:set_internal_state(reds_left,
+ WantReds),
+ processes()
+ end,
+ Exit = fun (P) ->
+ unlink(P),
+ exit(P, bang),
+ wait_until(
+ fun () ->
+ not lists:member(
+ P,
+ erts_debug:get_internal_state(
+ processes))
+ end)
+ end,
+ SpawnSuspendProcessesProc = fun () ->
erlang:system_flag(multi_scheduling, block),
P = spawn_link(fun () ->
Tester ! {suspend_me, self()},
@@ -2042,72 +1997,72 @@ processes_term_proc_list_test(MustChk) ->
= process_info(P, [status, current_function]),
P
end,
- ?line ResumeProcessesProc = fun (P) ->
+ ResumeProcessesProc = fun (P) ->
erlang:resume_process(P),
receive {P, done, _} -> ok end
end,
- ?line ?CHK_TERM_PROC_LIST(MustChk, 0),
- ?line HangAround = fun () -> receive after infinity -> ok end end,
- ?line HA1 = spawn_link(HangAround),
- ?line HA2 = spawn_link(HangAround),
- ?line HA3 = spawn_link(HangAround),
- ?line S1 = SpawnSuspendProcessesProc(),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 1),
- ?line Exit(HA1),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 2),
- ?line S2 = SpawnSuspendProcessesProc(),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 3),
- ?line S3 = SpawnSuspendProcessesProc(),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 4),
- ?line Exit(HA2),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 5),
- ?line S4 = SpawnSuspendProcessesProc(),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 6),
- ?line Exit(HA3),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 7),
- ?line ResumeProcessesProc(S1),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 5),
- ?line ResumeProcessesProc(S3),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 4),
- ?line ResumeProcessesProc(S4),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 3),
- ?line ResumeProcessesProc(S2),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 0),
- ?line Exit(S1),
- ?line Exit(S2),
- ?line Exit(S3),
- ?line Exit(S4),
-
-
- ?line HA4 = spawn_link(HangAround),
- ?line HA5 = spawn_link(HangAround),
- ?line HA6 = spawn_link(HangAround),
- ?line S5 = SpawnSuspendProcessesProc(),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 1),
- ?line Exit(HA4),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 2),
- ?line S6 = SpawnSuspendProcessesProc(),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 3),
- ?line Exit(HA5),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 4),
- ?line S7 = SpawnSuspendProcessesProc(),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 5),
- ?line Exit(HA6),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 6),
- ?line S8 = SpawnSuspendProcessesProc(),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 7),
-
- ?line erlang:system_flag(multi_scheduling, block),
- ?line Exit(S8),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 7),
- ?line Exit(S5),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 6),
- ?line Exit(S7),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 6),
- ?line Exit(S6),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 0),
- ?line erlang:system_flag(multi_scheduling, unblock),
- ?line as_expected.
+ ?CHK_TERM_PROC_LIST(MustChk, 0),
+ HangAround = fun () -> receive after infinity -> ok end end,
+ HA1 = spawn_link(HangAround),
+ HA2 = spawn_link(HangAround),
+ HA3 = spawn_link(HangAround),
+ S1 = SpawnSuspendProcessesProc(),
+ ?CHK_TERM_PROC_LIST(MustChk, 1),
+ Exit(HA1),
+ ?CHK_TERM_PROC_LIST(MustChk, 2),
+ S2 = SpawnSuspendProcessesProc(),
+ ?CHK_TERM_PROC_LIST(MustChk, 3),
+ S3 = SpawnSuspendProcessesProc(),
+ ?CHK_TERM_PROC_LIST(MustChk, 4),
+ Exit(HA2),
+ ?CHK_TERM_PROC_LIST(MustChk, 5),
+ S4 = SpawnSuspendProcessesProc(),
+ ?CHK_TERM_PROC_LIST(MustChk, 6),
+ Exit(HA3),
+ ?CHK_TERM_PROC_LIST(MustChk, 7),
+ ResumeProcessesProc(S1),
+ ?CHK_TERM_PROC_LIST(MustChk, 5),
+ ResumeProcessesProc(S3),
+ ?CHK_TERM_PROC_LIST(MustChk, 4),
+ ResumeProcessesProc(S4),
+ ?CHK_TERM_PROC_LIST(MustChk, 3),
+ ResumeProcessesProc(S2),
+ ?CHK_TERM_PROC_LIST(MustChk, 0),
+ Exit(S1),
+ Exit(S2),
+ Exit(S3),
+ Exit(S4),
+
+
+ HA4 = spawn_link(HangAround),
+ HA5 = spawn_link(HangAround),
+ HA6 = spawn_link(HangAround),
+ S5 = SpawnSuspendProcessesProc(),
+ ?CHK_TERM_PROC_LIST(MustChk, 1),
+ Exit(HA4),
+ ?CHK_TERM_PROC_LIST(MustChk, 2),
+ S6 = SpawnSuspendProcessesProc(),
+ ?CHK_TERM_PROC_LIST(MustChk, 3),
+ Exit(HA5),
+ ?CHK_TERM_PROC_LIST(MustChk, 4),
+ S7 = SpawnSuspendProcessesProc(),
+ ?CHK_TERM_PROC_LIST(MustChk, 5),
+ Exit(HA6),
+ ?CHK_TERM_PROC_LIST(MustChk, 6),
+ S8 = SpawnSuspendProcessesProc(),
+ ?CHK_TERM_PROC_LIST(MustChk, 7),
+
+ erlang:system_flag(multi_scheduling, block),
+ Exit(S8),
+ ?CHK_TERM_PROC_LIST(MustChk, 7),
+ Exit(S5),
+ ?CHK_TERM_PROC_LIST(MustChk, 6),
+ Exit(S7),
+ ?CHK_TERM_PROC_LIST(MustChk, 6),
+ Exit(S6),
+ ?CHK_TERM_PROC_LIST(MustChk, 0),
+ erlang:system_flag(multi_scheduling, unblock),
+ as_expected.
otp_7738_waiting(doc) ->
@@ -2115,88 +2070,88 @@ otp_7738_waiting(doc) ->
otp_7738_waiting(suite) ->
[];
otp_7738_waiting(Config) when is_list(Config) ->
- ?line otp_7738_test(waiting).
+ otp_7738_test(waiting).
otp_7738_suspended(doc) ->
[];
otp_7738_suspended(suite) ->
[];
otp_7738_suspended(Config) when is_list(Config) ->
- ?line otp_7738_test(suspended).
+ otp_7738_test(suspended).
otp_7738_resume(doc) ->
[];
otp_7738_resume(suite) ->
[];
otp_7738_resume(Config) when is_list(Config) ->
- ?line otp_7738_test(resume).
+ otp_7738_test(resume).
otp_7738_test(Type) ->
- ?line T = self(),
- ?line S = spawn_link(fun () ->
- receive
- {suspend, Suspendee} ->
- erlang:suspend_process(Suspendee),
- T ! {suspended, Suspendee},
- receive
- after 10 ->
- erlang:resume_process(Suspendee),
- Suspendee ! wake_up
- end;
- {send, To, Msg} ->
- receive after 10 -> ok end,
- To ! Msg
- end
- end),
- ?line R = spawn_link(fun () ->
- X = lists:seq(1, 20000000),
- T ! {initialized, self()},
- ?line case Type of
- _ when Type == suspended;
- Type == waiting ->
- receive _ -> ok end;
- _ when Type == resume ->
- Receive = fun (F) ->
- receive
- _ ->
- ok
- after 0 ->
- F(F)
- end
- end,
- Receive(Receive)
- end,
- T ! {woke_up, self()},
- id(X)
- end),
- ?line receive {initialized, R} -> ok end,
- ?line receive after 10 -> ok end,
- ?line case Type of
+ T = self(),
+ S = spawn_link(fun () ->
+ receive
+ {suspend, Suspendee} ->
+ erlang:suspend_process(Suspendee),
+ T ! {suspended, Suspendee},
+ receive
+ after 10 ->
+ erlang:resume_process(Suspendee),
+ Suspendee ! wake_up
+ end;
+ {send, To, Msg} ->
+ receive after 10 -> ok end,
+ To ! Msg
+ end
+ end),
+ R = spawn_link(fun () ->
+ X = lists:seq(1, 20000000),
+ T ! {initialized, self()},
+ case Type of
+ _ when Type == suspended;
+ Type == waiting ->
+ receive _ -> ok end;
+ _ when Type == resume ->
+ Receive = fun (F) ->
+ receive
+ _ ->
+ ok
+ after 0 ->
+ F(F)
+ end
+ end,
+ Receive(Receive)
+ end,
+ T ! {woke_up, self()},
+ id(X)
+ end),
+ receive {initialized, R} -> ok end,
+ receive after 10 -> ok end,
+ case Type of
suspended ->
- ?line erlang:suspend_process(R),
- ?line S ! {send, R, wake_up};
+ erlang:suspend_process(R),
+ S ! {send, R, wake_up};
waiting ->
- ?line S ! {send, R, wake_up};
+ S ! {send, R, wake_up};
resume ->
- ?line S ! {suspend, R},
- ?line receive {suspended, R} -> ok end
+ S ! {suspend, R},
+ receive {suspended, R} -> ok end
end,
- ?line erlang:garbage_collect(R),
- ?line case Type of
+ erlang:garbage_collect(R),
+ case Type of
suspended ->
- ?line erlang:resume_process(R);
+ erlang:resume_process(R);
_ ->
- ?line ok
+ ok
end,
- ?line receive
+ receive
{woke_up, R} ->
- ?line ok
+ ok
after 2000 ->
- ?line I = process_info(R, [status, message_queue_len]),
- ?line ?t:format("~p~n", [I]),
- ?line ?t:fail(no_progress)
+ I = process_info(R, [status, message_queue_len]),
+ ?t:format("~p~n", [I]),
+ ?t:fail(no_progress)
end,
- ?line ok.
+ ok.
gor(Reds, Stop) ->
receive
@@ -2210,28 +2165,28 @@ gor(Reds, Stop) ->
end.
garb_other_running(Config) when is_list(Config) ->
- ?line Stop = make_ref(),
- ?line {Pid, Mon} = spawn_monitor(fun () -> gor(0, Stop) end),
- ?line Reds = lists:foldl(fun (_, OldReds) ->
- ?line erlang:garbage_collect(Pid),
- ?line receive after 1 -> ok end,
- ?line Pid ! {self(), reds},
- ?line receive
+ Stop = make_ref(),
+ {Pid, Mon} = spawn_monitor(fun () -> gor(0, Stop) end),
+ Reds = lists:foldl(fun (_, OldReds) ->
+ erlang:garbage_collect(Pid),
+ receive after 1 -> ok end,
+ Pid ! {self(), reds},
+ receive
{reds, NewReds, Pid} ->
- ?line true = (NewReds > OldReds),
- ?line NewReds
+ true = (NewReds > OldReds),
+ NewReds
end
end,
0,
lists:seq(1, 10000)),
- ?line receive after 1 -> ok end,
- ?line Pid ! {self(), Stop},
- ?line receive
+ receive after 1 -> ok end,
+ Pid ! {self(), Stop},
+ receive
{stopped, Stop, StopReds, Pid} ->
- ?line true = (StopReds > Reds)
+ true = (StopReds > Reds)
end,
- ?line receive {'DOWN', Mon, process, Pid, normal} -> ok end,
- ?line ok.
+ receive {'DOWN', Mon, process, Pid, normal} -> ok end,
+ ok.
%% Internal functions
@@ -2255,9 +2210,9 @@ start_node(Config) ->
start_node(Config, "").
start_node(Config, Args) when is_list(Config) ->
- ?line Pa = filename:dirname(code:which(?MODULE)),
- ?line {A, B, C} = now(),
- ?line Name = list_to_atom(atom_to_list(?MODULE)
+ Pa = filename:dirname(code:which(?MODULE)),
+ {A, B, C} = now(),
+ Name = list_to_atom(atom_to_list(?MODULE)
++ "-"
++ atom_to_list(?config(testcase, Config))
++ "-"
@@ -2266,7 +2221,7 @@ start_node(Config, Args) when is_list(Config) ->
++ integer_to_list(B)
++ "-"
++ integer_to_list(C)),
- ?line ?t:start_node(Name, slave, [{args, "-pa "++Pa++" "++Args}]).
+ ?t:start_node(Name, slave, [{args, "-pa "++Pa++" "++Args}]).
stop_node(Node) ->
?t:stop_node(Node).