diff options
Diffstat (limited to 'erts/emulator')
40 files changed, 1169 insertions, 725 deletions
| diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c index 9633de2021..b33aab7eee 100644 --- a/erts/emulator/beam/beam_debug.c +++ b/erts/emulator/beam/beam_debug.c @@ -786,8 +786,8 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr)  	    }  	}  	break; -    case op_i_put_tuple_xI: -    case op_i_put_tuple_yI: +    case op_put_tuple2_xI: +    case op_put_tuple2_yI:      case op_new_map_dtI:      case op_update_map_assoc_sdtI:      case op_update_map_exact_jsdtI: diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c index ab5920a67e..aa61a2d7f9 100644 --- a/erts/emulator/beam/beam_emu.c +++ b/erts/emulator/beam/beam_emu.c @@ -3061,12 +3061,14 @@ erts_gc_update_map_exact(Process* p, Eterm* reg, Uint live, Uint n, Eterm* new_p      Uint need;      flatmap_t *old_mp, *mp;      Eterm res; +    Eterm* old_hp;      Eterm* hp;      Eterm* E;      Eterm* old_keys;      Eterm* old_vals;      Eterm new_key;      Eterm map; +    int changed = 0;      n /= 2;		/* Number of values to be updated */      ASSERT(n > 0); @@ -3133,6 +3135,7 @@ erts_gc_update_map_exact(Process* p, Eterm* reg, Uint live, Uint n, Eterm* new_p       * Update map, keeping the old key tuple.       */ +    old_hp = p->htop;      hp = p->htop;      E = p->stop; @@ -3155,20 +3158,26 @@ erts_gc_update_map_exact(Process* p, Eterm* reg, Uint live, Uint n, Eterm* new_p  	    /* Not same keys */  	    *hp++ = *old_vals;  	} else { -	    GET_TERM(new_p[1], *hp); -	    hp++; -	    n--; +            GET_TERM(new_p[1], *hp); +            if(*hp != *old_vals) changed = 1; +            hp++; +            n--;  	    if (n == 0) { -		/* -		 * All updates done. Copy remaining values -		 * and return the result. -		 */ -		for (i++, old_vals++; i < num_old; i++) { -		    *hp++ = *old_vals++; -		} -		ASSERT(hp == p->htop + need); -		p->htop = hp; -		return res; +                /* +                * All updates done. Copy remaining values +                * if any changed or return the original one. +                */ +                if(changed) { +		    for (i++, old_vals++; i < num_old; i++) { +		        *hp++ = *old_vals++; +		    } +		    ASSERT(hp == p->htop + need); +		    p->htop = hp; +		    return res; +                } else { +                    p->htop = old_hp; +                    return map; +                }  	    } else {  		new_p += 2;  		GET_TERM(*new_p, new_key); diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c index e61199a8fd..50cbb37f3e 100644 --- a/erts/emulator/beam/beam_load.c +++ b/erts/emulator/beam/beam_load.c @@ -4245,21 +4245,55 @@ gen_make_fun2(LoaderState* stp, GenOpArg idx)  {      ErlFunEntry* fe;      GenOp* op; +    Uint arity, num_free;      if (idx.val >= stp->num_lambdas) { -	stp->lambda_error = "missing or short chunk 'FunT'"; -	fe = 0; +        stp->lambda_error = "missing or short chunk 'FunT'"; +        fe = 0; +        num_free = 0; +        arity = 0;      } else { -	fe = stp->lambdas[idx.val].fe; +        fe = stp->lambdas[idx.val].fe; +        num_free = stp->lambdas[idx.val].num_free; +        arity = fe->arity;      }      NEW_GENOP(stp, op); -    op->op = genop_i_make_fun_2; -    op->arity = 2; -    op->a[0].type = TAG_u; -    op->a[0].val = (BeamInstr) fe; -    op->a[1].type = TAG_u; -    op->a[1].val = stp->lambdas[idx.val].num_free; + +    /* +     * It's possible this is called before init process is started, +     * skip the optimisation in such case. +     */ +    if (num_free == 0 && erts_init_process_id != ERTS_INVALID_PID) { +        Uint lit; +        Eterm* hp; +        ErlFunThing* funp; + +        lit = new_literal(stp, &hp, ERL_FUN_SIZE); +        funp = (ErlFunThing *) hp; +        erts_refc_inc(&fe->refc, 2); +        funp->thing_word = HEADER_FUN; +        funp->next = NULL; +        funp->fe = fe; +        funp->num_free = 0; +        funp->creator = erts_init_process_id; +        funp->arity = arity; + +        op->op = genop_move_2; +        op->arity = 2; +        op->a[0].type = TAG_q; +        op->a[0].val = lit; +        op->a[1].type = TAG_x; +        op->a[1].val = 0; +    } else { +        op->op = genop_i_make_fun_2; +        op->arity = 2; +        op->a[0].type = TAG_u; +        op->a[0].val = (BeamInstr) fe; +        op->a[1].type = TAG_u; +        op->a[1].val = num_free; +    } +      op->next = NULL;      return op;  } diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index f18af8bcd7..56ac072449 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -1803,6 +1803,7 @@ ebif_bang_2(BIF_ALIST_2)  #define SEND_INTERNAL_ERROR	(-6)  #define SEND_AWAIT_RESULT	(-7)  #define SEND_YIELD_CONTINUE     (-8) +#define SEND_SYSTEM_LIMIT		(-9)  static Sint remote_send(Process *p, DistEntry *dep, @@ -1842,6 +1843,8 @@ static Sint remote_send(Process *p, DistEntry *dep,  	    res = SEND_YIELD_RETURN;  	else if (code == ERTS_DSIG_SEND_CONTINUE)  	    res = SEND_YIELD_CONTINUE; +	else if (code == ERTS_DSIG_SEND_TOO_LRG) +	    res = SEND_SYSTEM_LIMIT;  	else  	    res = 0;  	break; @@ -2162,6 +2165,9 @@ BIF_RETTYPE send_3(BIF_ALIST_3)      case SEND_BADARG:  	ERTS_BIF_PREP_ERROR(retval, p, BADARG);  	break; +    case SEND_SYSTEM_LIMIT: +	ERTS_BIF_PREP_ERROR(retval, p, SYSTEM_LIMIT); +	break;      case SEND_USER_ERROR:  	ERTS_BIF_PREP_ERROR(retval, p, EXC_ERROR);  	break; @@ -2218,6 +2224,10 @@ static BIF_RETTYPE dsend_continue_trap_1(BIF_ALIST_1)  	BUMP_ALL_REDS(BIF_P);  	BIF_TRAP1(&dsend_continue_trap_export, BIF_P, BIF_ARG_1);      } +    case ERTS_DSIG_SEND_TOO_LRG: { /*SEND_SYSTEM_LIMIT*/ +	erts_set_gc_state(BIF_P, 1); +	BIF_ERROR(BIF_P, SYSTEM_LIMIT); +    }      default:  	erts_exit(ERTS_ABORT_EXIT, "dsend_continue_trap invalid result %d\n", (int)result);  	break; @@ -2275,6 +2285,9 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg)      case SEND_BADARG:  	ERTS_BIF_PREP_ERROR(retval, p, BADARG);  	break; +    case SEND_SYSTEM_LIMIT: +	ERTS_BIF_PREP_ERROR(retval, p, SYSTEM_LIMIT); +	break;      case SEND_USER_ERROR:  	ERTS_BIF_PREP_ERROR(retval, p, EXC_ERROR);  	break; diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab index 7548924178..a770524221 100644 --- a/erts/emulator/beam/bif.tab +++ b/erts/emulator/beam/bif.tab @@ -629,7 +629,6 @@ bif maps:from_list/1  bif maps:is_key/2  bif maps:keys/1  bif maps:merge/2 -bif maps:new/0  bif maps:put/3  bif maps:remove/2  bif maps:update/3 diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h index 7556205063..a1ad75708c 100644 --- a/erts/emulator/beam/big.h +++ b/erts/emulator/beam/big.h @@ -42,7 +42,7 @@ typedef Uint16   ErtsHalfDigit;  #undef  BIG_HAVE_DOUBLE_DIGIT  typedef Uint32   ErtsHalfDigit;  #else -#error "can not determine machine size" +#error "cannot determine machine size"  #endif  typedef Uint  dsize_t;	 /* Vector size type */ diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c index 9ff52c92b8..81531f6cc8 100644 --- a/erts/emulator/beam/break.c +++ b/erts/emulator/beam/break.c @@ -108,6 +108,7 @@ process_killer(void)  		    erts_exit(0, "");  		switch(j) {  		case 'k': +                    ASSERT(erts_init_process_id != ERTS_INVALID_PID);                      /* Send a 'kill' exit signal from init process */                      erts_proc_sig_send_exit(NULL, erts_init_process_id,                                              rp->common.id, am_kill, NIL, diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c index ceb89a6910..e7c0ee65dc 100644 --- a/erts/emulator/beam/dist.c +++ b/erts/emulator/beam/dist.c @@ -1924,6 +1924,12 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx)  	    ASSERT(ctx->obuf->ext_endp <= &ctx->obuf->data[0] + ctx->data_size);  	    ctx->data_size = ctx->obuf->ext_endp - ctx->obuf->extp; +	    if (ctx->data_size > (Uint) INT_MAX) { +		free_dist_obuf(ctx->obuf); +                ctx->obuf = NULL; +		retval = ERTS_DSIG_SEND_TOO_LRG; +		goto done; +	    }              ctx->obuf->hopefull_flags = ctx->u.ec.hopefull_flags;  	    /* diff --git a/erts/emulator/beam/dist.h b/erts/emulator/beam/dist.h index 30b4b35c20..c8d0407456 100644 --- a/erts/emulator/beam/dist.h +++ b/erts/emulator/beam/dist.h @@ -376,6 +376,7 @@ typedef struct {  #define ERTS_DSIG_SEND_OK	0  #define ERTS_DSIG_SEND_YIELD	1  #define ERTS_DSIG_SEND_CONTINUE 2 +#define ERTS_DSIG_SEND_TOO_LRG  3  extern int erts_dsig_send_link(ErtsDSigData *, Eterm, Eterm);  extern int erts_dsig_send_msg(Eterm, Eterm, ErtsSendContext*); diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c index 8fe1ccb758..36c46fd7aa 100644 --- a/erts/emulator/beam/erl_alloc.c +++ b/erts/emulator/beam/erl_alloc.c @@ -64,9 +64,6 @@  #  error "Too many schedulers; cannot create that many pref alloc instances"  #endif -#define ERTS_ALC_FIX_TYPE_IX(T) \ -  (ERTS_ALC_T2N((T)) - ERTS_ALC_N_MIN_A_FIXED_SIZE) -  #define ERTS_ALC_DEFAULT_MAX_THR_PREF ERTS_MAX_NO_OF_SCHEDULERS  #if defined(SMALL_MEMORY) || defined(PURIFY) || defined(VALGRIND) @@ -156,20 +153,13 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(aireq,  ErtsAlcType_t erts_fix_core_allocator_ix; -enum allctr_type { -    GOODFIT, -    BESTFIT, -    AFIT, -    FIRSTFIT -}; -  struct au_init {      int enable;      int thr_spec;      int disable_allowed;      int thr_spec_allowed;      int carrier_migration_allowed; -    enum allctr_type	atype; +    ErtsAlcStrat_t	astrat;      struct {  	AllctrInit_t	util;  	GFAllctrInit_t	gf; @@ -219,7 +209,9 @@ typedef struct {      struct au_init test_alloc;  } erts_alc_hndl_args_init_t; -#define ERTS_AU_INIT__ {0, 0, 1, 1, 1, GOODFIT, DEFAULT_ALLCTR_INIT, {1,1,1,1}} +#define ERTS_AU_INIT__ {0, 0, 1, 1, 1, \ +                        ERTS_ALC_S_GOODFIT, DEFAULT_ALLCTR_INIT, \ +                        {1,1,1,1}}  #define SET_DEFAULT_ALLOC_OPTS(IP)					\  do {									\ @@ -233,7 +225,7 @@ set_default_sl_alloc_opts(struct au_init *ip)      SET_DEFAULT_ALLOC_OPTS(ip);      ip->enable			= AU_ALLOC_DEFAULT_ENABLE(1);      ip->thr_spec		= 1; -    ip->atype			= GOODFIT; +    ip->astrat			= ERTS_ALC_S_GOODFIT;      ip->init.util.name_prefix	= "sl_";      ip->init.util.alloc_no	= ERTS_ALC_A_SHORT_LIVED;  #ifndef SMALL_MEMORY @@ -252,7 +244,7 @@ set_default_std_alloc_opts(struct au_init *ip)      SET_DEFAULT_ALLOC_OPTS(ip);      ip->enable			= AU_ALLOC_DEFAULT_ENABLE(1);      ip->thr_spec		= 1; -    ip->atype			= BESTFIT; +    ip->astrat			= ERTS_ALC_S_BESTFIT;      ip->init.util.name_prefix	= "std_";      ip->init.util.alloc_no	= ERTS_ALC_A_STANDARD;  #ifndef SMALL_MEMORY @@ -270,7 +262,7 @@ set_default_ll_alloc_opts(struct au_init *ip)      SET_DEFAULT_ALLOC_OPTS(ip);      ip->enable			= AU_ALLOC_DEFAULT_ENABLE(1);      ip->thr_spec		= 0; -    ip->atype			= BESTFIT; +    ip->astrat			= ERTS_ALC_S_BESTFIT;      ip->init.bf.ao		= 1;      ip->init.util.ramv		= 0;      ip->init.util.mmsbc		= 0; @@ -299,7 +291,7 @@ set_default_literal_alloc_opts(struct au_init *ip)      ip->disable_allowed         = 0;      ip->thr_spec_allowed        = 0;      ip->carrier_migration_allowed = 0; -    ip->atype			= BESTFIT; +    ip->astrat			= ERTS_ALC_S_BESTFIT;      ip->init.bf.ao		= 1;      ip->init.util.ramv		= 0;      ip->init.util.mmsbc		= 0; @@ -349,7 +341,7 @@ set_default_exec_alloc_opts(struct au_init *ip)      ip->disable_allowed         = 0;      ip->thr_spec_allowed        = 0;      ip->carrier_migration_allowed = 0; -    ip->atype			= BESTFIT; +    ip->astrat			= ERTS_ALC_S_BESTFIT;      ip->init.bf.ao		= 1;      ip->init.util.ramv		= 0;      ip->init.util.mmsbc		= 0; @@ -378,7 +370,7 @@ set_default_temp_alloc_opts(struct au_init *ip)      ip->thr_spec		= 1;      ip->disable_allowed         = 0;      ip->carrier_migration_allowed = 0; -    ip->atype			= AFIT; +    ip->astrat			= ERTS_ALC_S_AFIT;      ip->init.util.name_prefix	= "temp_";      ip->init.util.alloc_no	= ERTS_ALC_A_TEMPORARY;  #ifndef SMALL_MEMORY @@ -397,7 +389,7 @@ set_default_eheap_alloc_opts(struct au_init *ip)      SET_DEFAULT_ALLOC_OPTS(ip);      ip->enable			= AU_ALLOC_DEFAULT_ENABLE(1);      ip->thr_spec		= 1; -    ip->atype			= GOODFIT; +    ip->astrat			= ERTS_ALC_S_GOODFIT;      ip->init.util.name_prefix	= "eheap_";      ip->init.util.alloc_no	= ERTS_ALC_A_EHEAP;  #ifndef SMALL_MEMORY @@ -416,7 +408,7 @@ set_default_binary_alloc_opts(struct au_init *ip)      SET_DEFAULT_ALLOC_OPTS(ip);      ip->enable			= AU_ALLOC_DEFAULT_ENABLE(1);      ip->thr_spec		= 1; -    ip->atype			= BESTFIT; +    ip->astrat			= ERTS_ALC_S_BESTFIT;      ip->init.util.name_prefix	= "binary_";      ip->init.util.alloc_no	= ERTS_ALC_A_BINARY;  #ifndef SMALL_MEMORY @@ -435,7 +427,7 @@ set_default_ets_alloc_opts(struct au_init *ip)      SET_DEFAULT_ALLOC_OPTS(ip);      ip->enable			= AU_ALLOC_DEFAULT_ENABLE(1);      ip->thr_spec		= 1; -    ip->atype			= BESTFIT; +    ip->astrat			= ERTS_ALC_S_BESTFIT;      ip->init.util.name_prefix	= "ets_";      ip->init.util.alloc_no	= ERTS_ALC_A_ETS;  #ifndef SMALL_MEMORY @@ -453,7 +445,7 @@ set_default_driver_alloc_opts(struct au_init *ip)      SET_DEFAULT_ALLOC_OPTS(ip);      ip->enable			= AU_ALLOC_DEFAULT_ENABLE(1);      ip->thr_spec		= 1; -    ip->atype			= BESTFIT; +    ip->astrat			= ERTS_ALC_S_BESTFIT;      ip->init.util.name_prefix	= "driver_";      ip->init.util.alloc_no	= ERTS_ALC_A_DRIVER;  #ifndef SMALL_MEMORY @@ -473,7 +465,7 @@ set_default_fix_alloc_opts(struct au_init *ip,      SET_DEFAULT_ALLOC_OPTS(ip);      ip->enable			= AU_ALLOC_DEFAULT_ENABLE(1);      ip->thr_spec		= 1; -    ip->atype			= BESTFIT; +    ip->astrat			= ERTS_ALC_S_BESTFIT;      ip->init.bf.ao = 1;      ip->init.util.name_prefix	= "fix_";      ip->init.util.fix_type_size	= fix_type_sizes; @@ -493,7 +485,7 @@ set_default_test_alloc_opts(struct au_init *ip)      SET_DEFAULT_ALLOC_OPTS(ip);      ip->enable			= 0; /* Disabled by default */      ip->thr_spec		= -1 * erts_no_schedulers; -    ip->atype			= FIRSTFIT; +    ip->astrat			= ERTS_ALC_S_FIRSTFIT;      ip->init.aoff.crr_order     = FF_AOFF;      ip->init.aoff.blk_order     = FF_BF;      ip->init.util.name_prefix	= "test_"; @@ -552,8 +544,8 @@ start_au_allocator(ErtsAlcType_t alctr_n,  static void  refuse_af_strategy(struct au_init *init)  { -    if (init->atype == AFIT) -	init->atype = GOODFIT; +    if (init->astrat == ERTS_ALC_S_AFIT) +	init->astrat = ERTS_ALC_S_GOODFIT;  }  #ifdef HARD_DEBUG @@ -576,7 +568,10 @@ static void adjust_fix_alloc_sizes(UWord extra_block_size)  	    for (i=0; i < tspec->size; i++) {  		Allctr_t* allctr = tspec->allctr[i];  		for (j=0; j < ERTS_ALC_NO_FIXED_SIZES; ++j) { -		    allctr->fix[j].type_size += extra_block_size; +                    size_t size = allctr->fix[j].type_size; +                    size = MAX(size + extra_block_size, +                               sizeof(ErtsAllctrDDBlock_t)); +		    allctr->fix[j].type_size = size;  		}  	    }  	} @@ -584,8 +579,11 @@ static void adjust_fix_alloc_sizes(UWord extra_block_size)  	{  	    Allctr_t* allctr = erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra;  	    for (j=0; j < ERTS_ALC_NO_FIXED_SIZES; ++j) { -		allctr->fix[j].type_size += extra_block_size; -	    }	 +                size_t size = allctr->fix[j].type_size; +                size = MAX(size + extra_block_size, +                           sizeof(ErtsAllctrDDBlock_t)); +                allctr->fix[j].type_size = size; +	    }  	}      }  } @@ -597,7 +595,7 @@ strategy_support_carrier_migration(struct au_init *auip)       * Currently only aoff* and ageff* support carrier       * migration, i.e, type AOFIRSTFIT.       */ -    return auip->atype == FIRSTFIT; +    return auip->astrat == ERTS_ALC_S_FIRSTFIT;  }  static ERTS_INLINE void @@ -612,7 +610,7 @@ adjust_carrier_migration_support(struct au_init *auip)  	 */  	if (!strategy_support_carrier_migration(auip)) {  	    /* Default to aoffcbf */ -	    auip->atype = FIRSTFIT; +	    auip->astrat = ERTS_ALC_S_FIRSTFIT;  	    auip->init.aoff.crr_order = FF_AOFF;  	    auip->init.aoff.blk_order = FF_BF;  	} @@ -1018,7 +1016,7 @@ start_au_allocator(ErtsAlcType_t alctr_n,      int i;      int size = 1;      void *as0; -    enum allctr_type atype; +    ErtsAlcStrat_t astrat;      ErtsAllocatorFunctions_t *af = &erts_allctrs[alctr_n];      ErtsAllocatorInfo_t *ai = &erts_allctrs_info[alctr_n];      ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[alctr_n]; @@ -1077,7 +1075,7 @@ start_au_allocator(ErtsAlcType_t alctr_n,      for (i = 0; i < size; i++) {  	Allctr_t *as; -	atype = init->atype; +	astrat = init->astrat;  	if (!init->thr_spec)  	    as0 = state; @@ -1094,8 +1092,8 @@ start_au_allocator(ErtsAlcType_t alctr_n,  		if (i != 0)  		    init->init.util.ts = 0;  		else { -		    if (atype == AFIT) -			atype = GOODFIT; +		    if (astrat == ERTS_ALC_S_AFIT) +			astrat = ERTS_ALC_S_GOODFIT;  		    init->init.util.ts = 1;  		}  		init->init.util.tspec = init->thr_spec + 1; @@ -1109,25 +1107,26 @@ start_au_allocator(ErtsAlcType_t alctr_n,  			 (((char *) fix_lists) + fix_list_size));  	} +        init->init.util.alloc_strat = astrat;  	init->init.util.ix = i; -	switch (atype) { -	case GOODFIT: +	switch (astrat) { +	case ERTS_ALC_S_GOODFIT:  	    as = erts_gfalc_start((GFAllctr_t *) as0,  					   &init->init.gf,  					   &init->init.util);  	    break; -	case BESTFIT: +	case ERTS_ALC_S_BESTFIT:  	    as = erts_bfalc_start((BFAllctr_t *) as0,  					   &init->init.bf,  					   &init->init.util);  	    break; -	case AFIT: +	case ERTS_ALC_S_AFIT:  	    as = erts_afalc_start((AFAllctr_t *) as0,  					   &init->init.af,  					   &init->init.util);  	    break; -	case FIRSTFIT: +	case ERTS_ALC_S_FIRSTFIT:  	    as = erts_aoffalc_start((AOFFAllctr_t *) as0,  					     &init->init.aoff,  					     &init->init.util); @@ -1363,51 +1362,59 @@ handle_au_arg(struct au_init *auip,  	else if(has_prefix("as", sub_param)) {  	    char *alg = get_value(sub_param + 2, argv, ip);  	    if (sys_strcmp("bf", alg) == 0) { -		auip->atype = BESTFIT; +		auip->astrat = ERTS_ALC_S_BESTFIT;  		auip->init.bf.ao = 0;  	    }  	    else if (sys_strcmp("aobf", alg) == 0) { -		auip->atype = BESTFIT; +		auip->astrat = ERTS_ALC_S_BESTFIT;  		auip->init.bf.ao = 1;  	    }  	    else if (sys_strcmp("gf", alg) == 0) { -		auip->atype = GOODFIT; +		auip->astrat = ERTS_ALC_S_GOODFIT;  	    }  	    else if (sys_strcmp("af", alg) == 0) { -		auip->atype = AFIT; +		auip->astrat = ERTS_ALC_S_AFIT;  	    }  	    else if (sys_strcmp("aoff", alg) == 0) { -		auip->atype = FIRSTFIT; +		auip->astrat = ERTS_ALC_S_FIRSTFIT;  		auip->init.aoff.crr_order = FF_AOFF;  		auip->init.aoff.blk_order = FF_AOFF;  	    }  	    else if (sys_strcmp("aoffcbf", alg) == 0) { -		auip->atype = FIRSTFIT; +		auip->astrat = ERTS_ALC_S_FIRSTFIT;  		auip->init.aoff.crr_order = FF_AOFF;  		auip->init.aoff.blk_order = FF_BF;  	    }  	    else if (sys_strcmp("aoffcaobf", alg) == 0) { -		auip->atype = FIRSTFIT; +		auip->astrat = ERTS_ALC_S_FIRSTFIT;  		auip->init.aoff.crr_order = FF_AOFF;  		auip->init.aoff.blk_order = FF_AOBF;  	    }              else if (sys_strcmp("ageffcaoff", alg) == 0) { -                auip->atype = FIRSTFIT; +                auip->astrat = ERTS_ALC_S_FIRSTFIT;  		auip->init.aoff.crr_order = FF_AGEFF;  		auip->init.aoff.blk_order = FF_AOFF;              }              else if (sys_strcmp("ageffcbf", alg) == 0) { -                auip->atype = FIRSTFIT; +                auip->astrat = ERTS_ALC_S_FIRSTFIT;  		auip->init.aoff.crr_order = FF_AGEFF;  		auip->init.aoff.blk_order = FF_BF;              }              else if (sys_strcmp("ageffcaobf", alg) == 0) { -                auip->atype = FIRSTFIT; +                auip->astrat = ERTS_ALC_S_FIRSTFIT;  		auip->init.aoff.crr_order = FF_AGEFF;  		auip->init.aoff.blk_order = FF_AOBF;              }  	    else { -		bad_value(param, sub_param + 1, alg); +                if (auip->init.util.alloc_no == ERTS_ALC_A_TEST +                    && sys_strcmp("chaosff", alg) == 0) { +                    auip->astrat = ERTS_ALC_S_FIRSTFIT; +                    auip->init.aoff.crr_order = FF_CHAOS; +                    auip->init.aoff.blk_order = FF_CHAOS; +                } +                else { +                    bad_value(param, sub_param + 1, alg); +                }  	    }  	    if (!strategy_support_carrier_migration(auip))  		auip->init.util.acul = 0; @@ -2030,33 +2037,55 @@ erts_realloc_n_enomem(ErtsAlcType_t n, void *ptr, Uint size)  }  static ERTS_INLINE UWord -alcu_size(ErtsAlcType_t ai, ErtsAlcUFixInfo_t *fi, int fisz) +alcu_size(ErtsAlcType_t alloc_no, ErtsAlcUFixInfo_t *fi, int fisz)  { -    UWord res = 0; +    UWord res; +    int ai; -    ASSERT(erts_allctrs_info[ai].enabled); -    ASSERT(erts_allctrs_info[ai].alloc_util); +    if (!erts_allctrs_info[alloc_no].thr_spec) { +        AllctrSize_t size; +        Allctr_t *allctr; -    if (!erts_allctrs_info[ai].thr_spec) { -	Allctr_t *allctr = erts_allctrs_info[ai].extra; -	AllctrSize_t asize; -	erts_alcu_current_size(allctr, &asize, fi, fisz); -	res += asize.blocks; +        allctr = erts_allctrs_info[alloc_no].extra; +        erts_alcu_current_size(allctr, &size, fi, fisz); + +        return size.blocks;      } -    else { -	ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[ai]; -	int i; -	ASSERT(tspec->enabled); +    res = 0; -	for (i = tspec->size - 1; i >= 0; i--) { -	    Allctr_t *allctr = tspec->allctr[i]; -	    AllctrSize_t asize; -	    if (allctr) { -		erts_alcu_current_size(allctr, &asize, fi, fisz); -		res += asize.blocks; -	    } -	} +    /* Thread-specific allocators can migrate carriers across types, so we have +     * to visit every allocator type to gather information on blocks that were +     * allocated by us. */ +    for (ai = ERTS_ALC_A_MIN; ai < ERTS_ALC_A_MAX; ai++) { +        ErtsAllocatorThrSpec_t *tspec; +        Allctr_t *allctr; +        int i; + +        if (!erts_allctrs_info[ai].thr_spec) { +            continue; +        } + +        tspec = &erts_allctr_thr_spec[ai]; +        ASSERT(tspec->enabled); + +        for (i = tspec->size - 1; i >= 0; i--) { +            allctr = tspec->allctr[i]; + +            if (allctr) { +                AllctrSize_t size; + +                if (ai == alloc_no) { +                    erts_alcu_current_size(allctr, &size, fi, fisz); +                } else { +                    erts_alcu_foreign_size(allctr, alloc_no, &size); +                } + +                ASSERT(((SWord)size.blocks) >= 0); + +                res += size.blocks; +            } +        }      }      return res; @@ -2400,6 +2429,7 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg)      }      if (want_tot_or_sys) { +        ASSERT(size.total >= size.processes);  	size.system = size.total - size.processes;      } @@ -3459,29 +3489,29 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)  	switch (op) {  	case 0xf00:  	    if (((Allctr_t *) a1)->thread_safe) -		return (UWord) erts_alcu_alloc_ts(ERTS_ALC_T_UNDEF, +		return (UWord) erts_alcu_alloc_ts(ERTS_ALC_T_TEST,  							  (void *) a1,  							  (Uint) a2);  	    else -		return (UWord) erts_alcu_alloc(ERTS_ALC_T_UNDEF, +		return (UWord) erts_alcu_alloc(ERTS_ALC_T_TEST,  						       (void *) a1,  						       (Uint) a2);  	case 0xf01:  	    if (((Allctr_t *) a1)->thread_safe) -		return (UWord) erts_alcu_realloc_ts(ERTS_ALC_T_UNDEF, +		return (UWord) erts_alcu_realloc_ts(ERTS_ALC_T_TEST,  							    (void *) a1,  							    (void *) a2,  							    (Uint) a3);  	    else -		return (UWord) erts_alcu_realloc(ERTS_ALC_T_UNDEF, +		return (UWord) erts_alcu_realloc(ERTS_ALC_T_TEST,  							 (void *) a1,  							 (void *) a2,  							 (Uint) a3);  	case 0xf02:  	    if (((Allctr_t *) a1)->thread_safe) -		erts_alcu_free_ts(ERTS_ALC_T_UNDEF, (void *) a1, (void *) a2); +		erts_alcu_free_ts(ERTS_ALC_T_TEST, (void *) a1, (void *) a2);  	    else -		erts_alcu_free(ERTS_ALC_T_UNDEF, (void *) a1, (void *) a2); +		erts_alcu_free(ERTS_ALC_T_TEST, (void *) a1, (void *) a2);  	    return 0;  	case 0xf03: {  	    Allctr_t *allctr; @@ -3489,8 +3519,10 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)  	    SET_DEFAULT_ALLOC_OPTS(&init);  	    init.enable = 1; -	    init.atype = GOODFIT; +	    init.astrat = ERTS_ALC_S_GOODFIT;  	    init.init.util.name_prefix = (char *) a1; +	    init.init.util.alloc_no = ERTS_ALC_A_TEST; +	    init.init.util.alloc_strat = init.astrat;  	    init.init.util.ts = 1;  	    if ((char **) a3) {  		char **argv = (char **) a3; @@ -3504,31 +3536,31 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)  		}  	    } -	    switch (init.atype) { -	    case GOODFIT: +	    switch (init.astrat) { +	    case ERTS_ALC_S_GOODFIT:  		allctr = erts_gfalc_start((GFAllctr_t *) -					  erts_alloc(ERTS_ALC_T_UNDEF, +					  erts_alloc(ERTS_ALC_T_TEST,  						     sizeof(GFAllctr_t)),  					  &init.init.gf,  					  &init.init.util);  		break; -	    case BESTFIT: +	    case ERTS_ALC_S_BESTFIT:  		allctr = erts_bfalc_start((BFAllctr_t *) -					  erts_alloc(ERTS_ALC_T_UNDEF, +					  erts_alloc(ERTS_ALC_T_TEST,  						     sizeof(BFAllctr_t)),  					  &init.init.bf,  					  &init.init.util);  		break; -	    case AFIT: +	    case ERTS_ALC_S_AFIT:  		allctr = erts_afalc_start((AFAllctr_t *) -					  erts_alloc(ERTS_ALC_T_UNDEF, +					  erts_alloc(ERTS_ALC_T_TEST,  							    sizeof(AFAllctr_t)),  					  &init.init.af,  					  &init.init.util);  		break; -	    case FIRSTFIT: +	    case ERTS_ALC_S_FIRSTFIT:  		allctr = erts_aoffalc_start((AOFFAllctr_t *) -					  erts_alloc(ERTS_ALC_T_UNDEF, +					  erts_alloc(ERTS_ALC_T_TEST,  						     sizeof(AOFFAllctr_t)),  					  &init.init.aoff,  					  &init.init.util); @@ -3544,7 +3576,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)  	}  	case 0xf04:  	    erts_alcu_stop((Allctr_t *) a1); -	    erts_free(ERTS_ALC_T_UNDEF, (void *) a1); +	    erts_free(ERTS_ALC_T_TEST, (void *) a1);  	    break;  	case 0xf05: return (UWord) 1;  	case 0xf06: return (UWord) ((Allctr_t *) a1)->thread_safe; @@ -3554,7 +3586,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)  	case 0xf07: return (UWord) ((Allctr_t *) a1)->thread_safe;  #endif  	case 0xf08: { -	    ethr_mutex *mtx = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(ethr_mutex)); +	    ethr_mutex *mtx = erts_alloc(ERTS_ALC_T_TEST, sizeof(ethr_mutex));  	    if (ethr_mutex_init(mtx) != 0)  		ERTS_ALC_TEST_ABORT;  	    return (UWord) mtx; @@ -3563,7 +3595,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)  	    ethr_mutex *mtx = (ethr_mutex *) a1;  	    if (ethr_mutex_destroy(mtx) != 0)  		ERTS_ALC_TEST_ABORT; -	    erts_free(ERTS_ALC_T_UNDEF, (void *) mtx); +	    erts_free(ERTS_ALC_T_TEST, (void *) mtx);  	    break;  	}  	case 0xf0a: @@ -3573,7 +3605,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)  	    ethr_mutex_unlock((ethr_mutex *) a1);  	    break;  	case 0xf0c: { -	    ethr_cond *cnd = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(ethr_cond)); +	    ethr_cond *cnd = erts_alloc(ERTS_ALC_T_TEST, sizeof(ethr_cond));  	    if (ethr_cond_init(cnd) != 0)  		ERTS_ALC_TEST_ABORT;  	    return (UWord) cnd; @@ -3582,7 +3614,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)  	    ethr_cond *cnd = (ethr_cond *) a1;  	    if (ethr_cond_destroy(cnd) != 0)  		ERTS_ALC_TEST_ABORT; -	    erts_free(ERTS_ALC_T_UNDEF, (void *) cnd); +	    erts_free(ERTS_ALC_T_TEST, (void *) cnd);  	    break;  	}  	case 0xf0e: @@ -3596,7 +3628,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)  	    break;  	}  	case 0xf10: { -	    ethr_tid *tid = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(ethr_tid)); +	    ethr_tid *tid = erts_alloc(ERTS_ALC_T_TEST, sizeof(ethr_tid));  	    if (ethr_thr_create(tid,  				(void * (*)(void *)) a1,  				(void *) a2, @@ -3608,7 +3640,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)  	    ethr_tid *tid = (ethr_tid *) a1;  	    if (ethr_thr_join(*tid, NULL) != 0)  		ERTS_ALC_TEST_ABORT; -	    erts_free(ERTS_ALC_T_UNDEF, (void *) tid); +	    erts_free(ERTS_ALC_T_TEST, (void *) tid);  	    break;  	}  	case 0xf12: @@ -3960,9 +3992,10 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)  static ErtsAllocatorFunctions_t real_allctrs[ERTS_ALC_A_MAX+1];  static void * -debug_alloc(ErtsAlcType_t n, void *extra, Uint size) +debug_alloc(ErtsAlcType_t type, void *extra, Uint size)  {      ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra; +    ErtsAlcType_t n;      Uint dsize;      void *res; @@ -3970,9 +4003,11 @@ debug_alloc(ErtsAlcType_t n, void *extra, Uint size)      erts_hdbg_chk_blks();  #endif +    n = ERTS_ALC_T2N(type); +      ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);      dsize = size + FENCE_SZ; -    res = (*real_af->alloc)(n, real_af->extra, dsize); +    res = (*real_af->alloc)(type, real_af->extra, dsize);      res = set_memory_fence(res, size, n); @@ -3986,14 +4021,17 @@ debug_alloc(ErtsAlcType_t n, void *extra, Uint size)  static void * -debug_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size) +debug_realloc(ErtsAlcType_t type, void *extra, void *ptr, Uint size)  {      ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra; +    ErtsAlcType_t n;      Uint dsize;      Uint old_size;      void *dptr;      void *res; +    n = ERTS_ALC_T2N(type); +      ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);      dsize = size + FENCE_SZ; @@ -4008,7 +4046,7 @@ debug_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)  		   0xf,  		   sizeof(Uint) + old_size - size); -    res = (*real_af->realloc)(n, real_af->extra, dptr, dsize); +    res = (*real_af->realloc)(type, real_af->extra, dptr, dsize);      res = set_memory_fence(res, size, n); @@ -4021,12 +4059,16 @@ debug_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)  }  static void -debug_free(ErtsAlcType_t n, void *extra, void *ptr) +debug_free(ErtsAlcType_t type, void *extra, void *ptr)  {      ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra; +    ErtsAlcType_t n;      void *dptr;      Uint size; -    int free_pattern = n; +    int free_pattern; + +    n = ERTS_ALC_T2N(type); +    free_pattern = n;      ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX); @@ -4041,7 +4083,7 @@ debug_free(ErtsAlcType_t n, void *extra, void *ptr)  #endif      sys_memset((void *) dptr, free_pattern, size + FENCE_SZ); -    (*real_af->free)(n, real_af->extra, dptr); +    (*real_af->free)(type, real_af->extra, dptr);  #ifdef PRINT_OPS      fprintf(stderr, "free(%s, 0x%lx)\r\n", ERTS_ALC_N2TD(n), (Uint) ptr); diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h index fcb58ff58a..c13cf3f5b0 100644 --- a/erts/emulator/beam/erl_alloc.h +++ b/erts/emulator/beam/erl_alloc.h @@ -26,10 +26,23 @@  #define ERL_THR_PROGRESS_TSD_TYPE_ONLY  #include "erl_thr_progress.h"  #undef ERL_THR_PROGRESS_TSD_TYPE_ONLY -#include "erl_alloc_util.h"  #include "erl_threads.h"  #include "erl_mmap.h" +typedef enum { +    ERTS_ALC_S_INVALID = 0, + +    ERTS_ALC_S_GOODFIT, +    ERTS_ALC_S_BESTFIT, +    ERTS_ALC_S_AFIT, +    ERTS_ALC_S_FIRSTFIT, + +    ERTS_ALC_S_MIN = ERTS_ALC_S_GOODFIT, +    ERTS_ALC_S_MAX = ERTS_ALC_S_FIRSTFIT +} ErtsAlcStrat_t; + +#include "erl_alloc_util.h" +  #ifdef DEBUG  #  undef ERTS_ALC_WANT_INLINE  #  define ERTS_ALC_WANT_INLINE 0 @@ -52,6 +65,14 @@  #define ERTS_ALC_NO_FIXED_SIZES \    (ERTS_ALC_N_MAX_A_FIXED_SIZE - ERTS_ALC_N_MIN_A_FIXED_SIZE + 1) +#define ERTS_ALC_IS_FIX_TYPE(T) \ +    (ERTS_ALC_T2N(T) >= ERTS_ALC_N_MIN_A_FIXED_SIZE && \ +     ERTS_ALC_T2N(T) <= ERTS_ALC_N_MAX_A_FIXED_SIZE) + +#define ERTS_ALC_FIX_TYPE_IX(T) \ +  (ASSERT(ERTS_ALC_IS_FIX_TYPE(T)), \ +   ERTS_ALC_T2N((T)) - ERTS_ALC_N_MIN_A_FIXED_SIZE) +  void erts_sys_alloc_init(void);  void *erts_sys_alloc(ErtsAlcType_t, void *, Uint);  void *erts_sys_realloc(ErtsAlcType_t, void *, void *, Uint); @@ -228,7 +249,7 @@ void *erts_alloc(ErtsAlcType_t type, Uint size)      void *res;      ERTS_MSACC_PUSH_AND_SET_STATE_X(ERTS_MSACC_STATE_ALLOC);      res = (*erts_allctrs[ERTS_ALC_T2A(type)].alloc)( -            ERTS_ALC_T2N(type), +            type,              erts_allctrs[ERTS_ALC_T2A(type)].extra,              size);      if (!res) @@ -243,7 +264,7 @@ void *erts_realloc(ErtsAlcType_t type, void *ptr, Uint size)      void *res;      ERTS_MSACC_PUSH_AND_SET_STATE_X(ERTS_MSACC_STATE_ALLOC);      res = (*erts_allctrs[ERTS_ALC_T2A(type)].realloc)( -	ERTS_ALC_T2N(type), +	type,  	erts_allctrs[ERTS_ALC_T2A(type)].extra,  	ptr,  	size); @@ -258,7 +279,7 @@ void erts_free(ErtsAlcType_t type, void *ptr)  {      ERTS_MSACC_PUSH_AND_SET_STATE_X(ERTS_MSACC_STATE_ALLOC);      (*erts_allctrs[ERTS_ALC_T2A(type)].free)( -	ERTS_ALC_T2N(type), +	type,  	erts_allctrs[ERTS_ALC_T2A(type)].extra,  	ptr);      ERTS_MSACC_POP_STATE_X(); @@ -271,7 +292,7 @@ void *erts_alloc_fnf(ErtsAlcType_t type, Uint size)      void *res;      ERTS_MSACC_PUSH_AND_SET_STATE_X(ERTS_MSACC_STATE_ALLOC);      res = (*erts_allctrs[ERTS_ALC_T2A(type)].alloc)( -	ERTS_ALC_T2N(type), +	type,  	erts_allctrs[ERTS_ALC_T2A(type)].extra,  	size);      ERTS_MSACC_POP_STATE_X(); @@ -285,7 +306,7 @@ void *erts_realloc_fnf(ErtsAlcType_t type, void *ptr, Uint size)      void *res;      ERTS_MSACC_PUSH_AND_SET_STATE_X(ERTS_MSACC_STATE_ALLOC);      res = (*erts_allctrs[ERTS_ALC_T2A(type)].realloc)( -	ERTS_ALC_T2N(type), +	type,  	erts_allctrs[ERTS_ALC_T2A(type)].extra,  	ptr,  	size); diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types index 5409b89bab..f1e99820af 100644 --- a/erts/emulator/beam/erl_alloc.types +++ b/erts/emulator/beam/erl_alloc.types @@ -30,10 +30,10 @@  #   name space).  # * Types, allocators, classes, and descriptions have different name  #   spaces. -# * The type, allocator, and class names INVALID are reserved and can -#   not be used. +# * The type, allocator, and class names INVALID are reserved and +#   cannot be used.  # * The descriptions invalid_allocator, invalid_class, and invalid_type -#   are reserved and can not be used. +#   are reserved and cannot be used.  # * Declarations can be done conditionally by use of a  #     +if <boolean_variable>  # diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c index a5740a08cf..b7a8b9c2d0 100644 --- a/erts/emulator/beam/erl_alloc_util.c +++ b/erts/emulator/beam/erl_alloc_util.c @@ -96,7 +96,6 @@ static int initialized = 0;  #define MBC_REALLOC_ALWAYS_MOVES  #endif -  /* alloc_util global parameters */  static Uint sys_alloc_carrier_size;  #if HAVE_ERTS_MSEG @@ -113,32 +112,38 @@ static int allow_sys_alloc_carriers;  #define DEC_CC(CC) ((CC)--) -/* Multi block carrier (MBC) memory layout in R16:  +/* Multi block carrier (MBC) memory layout in OTP 22:  Empty MBC: -[Carrier_t|pad|Block_t L0T|fhdr| free... ] +[Carrier_t|pad|Block_t L0T0|fhdr| free... ]  MBC after allocating first block: -[Carrier_t|pad|Block_t 000|        udata        |pad|Block_t L0T|fhdr| free... ] +[Carrier_t|pad|Block_t 0000|        udata        |pad|Block_t L0T0|fhdr| free... ]  MBC after allocating second block: -[Carrier_t|pad|Block_t 000|        udata        |pad|Block_t 000|   udata   |pad|Block_t L0T|fhdr| free... ] +[Carrier_t|pad|Block_t 0000|        udata        |pad|Block_t 0000|   udata   |pad|Block_t L0T0|fhdr| free... ]  MBC after deallocating first block: -[Carrier_t|pad|Block_t 00T|fhdr| free  |FreeBlkFtr_t|Block_t 0P0|   udata   |pad|Block_t L0T|fhdr| free... ] +[Carrier_t|pad|Block_t 00T0|fhdr| free  |FreeBlkFtr_t|Block_t 0P00|   udata   |pad|Block_t L0T0|fhdr| free... ] +MBC after allocating first block, with allocation tagging enabled: +[Carrier_t|pad|Block_t 000A|        udata        |atag|pad|Block_t L0T0|fhdr| free... ]      udata = Allocated user data +    atag  = A tag with basic metadata about this allocation      pad   = Padding to ensure correct alignment for user data      fhdr  = Allocator specific header to keep track of free block      free  = Unused free memory      T     = This block is free (THIS_FREE_BLK_HDR_FLG)      P     = Previous block is free (PREV_FREE_BLK_HDR_FLG)      L     = Last block in carrier (LAST_BLK_HDR_FLG) +    A     = Block has an allocation tag footer, only valid for allocated blocks +            (ATAG_BLK_HDR_FLG)  */  /* Single block carrier (SBC): -[Carrier_t|pad|Block_t 111| udata... ] +[Carrier_t|pad|Block_t 1110| udata... ] +[Carrier_t|pad|Block_t 111A| udata | atag]  */  /* Allocation tags ... @@ -154,20 +159,20 @@ MBC after deallocating first block:  typedef UWord alcu_atag_t; -#define MAKE_ATAG(IdAtom, Type) \ -    (ASSERT((Type) >= ERTS_ALC_N_MIN && (Type) <= ERTS_ALC_N_MAX), \ +#define MAKE_ATAG(IdAtom, TypeNum) \ +    (ASSERT((TypeNum) >= ERTS_ALC_N_MIN && (TypeNum) <= ERTS_ALC_N_MAX), \       ASSERT(atom_val(IdAtom) <= MAX_ATAG_ATOM_ID), \ -     (atom_val(IdAtom) << ERTS_ALC_N_BITS) | (Type)) +     (atom_val(IdAtom) << ERTS_ALC_N_BITS) | (TypeNum))  #define ATAG_ID(AT) (make_atom((AT) >> ERTS_ALC_N_BITS))  #define ATAG_TYPE(AT) ((AT) & ERTS_ALC_N_MASK)  #define MAX_ATAG_ATOM_ID (ERTS_UWORD_MAX >> ERTS_ALC_N_BITS) -#define DBG_IS_VALID_ATAG(Allocator, AT) \ +#define DBG_IS_VALID_ATAG(AT) \      (ATAG_TYPE(AT) >= ERTS_ALC_N_MIN && \       ATAG_TYPE(AT) <= ERTS_ALC_N_MAX && \ -     (Allocator)->alloc_no == ERTS_ALC_T2A(ERTS_ALC_N2T(ATAG_TYPE(AT)))) +     ATAG_ID(AT) <= MAX_ATAG_ATOM_ID)  /* Blocks ... */ @@ -182,10 +187,15 @@ typedef UWord alcu_atag_t;  #endif  #define FBLK_FTR_SZ (sizeof(FreeBlkFtr_t)) +#define BLK_HAS_ATAG(B) \ +    (!!((B)->bhdr & ATAG_BLK_HDR_FLG)) +  #define GET_BLK_ATAG(B) \ -    (((alcu_atag_t *) (((char *) (B)) + (BLK_SZ(B))))[-1]) +    (ASSERT(BLK_HAS_ATAG(B)), \ +     ((alcu_atag_t *) (((char *) (B)) + (BLK_SZ(B))))[-1])  #define SET_BLK_ATAG(B, T) \ -    (((alcu_atag_t *) (((char *) (B)) + (BLK_SZ(B))))[-1] = (T)) +    ((B)->bhdr |= ATAG_BLK_HDR_FLG, \ +     ((alcu_atag_t *) (((char *) (B)) + (BLK_SZ(B))))[-1] = (T))  #define BLK_ATAG_SZ(AP) ((AP)->atags ? sizeof(alcu_atag_t) : 0) @@ -203,13 +213,13 @@ typedef UWord alcu_atag_t;    (((FreeBlkFtr_t *) (((char *) (B)) + (SZ)))[-1] = (SZ))  #define SET_MBC_ABLK_SZ(B, SZ) \ -  (ASSERT(((SZ) & FLG_MASK) == 0), \ +  (ASSERT(((SZ) & BLK_FLG_MASK) == 0), \     (B)->bhdr = (((B)->bhdr) & ~MBC_ABLK_SZ_MASK) | (SZ))  #define SET_MBC_FBLK_SZ(B, SZ) \ -  (ASSERT(((SZ) & FLG_MASK) == 0), \ +  (ASSERT(((SZ) & BLK_FLG_MASK) == 0), \     (B)->bhdr = (((B)->bhdr) & ~MBC_FBLK_SZ_MASK) | (SZ))  #define SET_SBC_BLK_SZ(B, SZ) \ -  (ASSERT(((SZ) & FLG_MASK) == 0), \ +  (ASSERT(((SZ) & BLK_FLG_MASK) == 0), \     (B)->bhdr = (((B)->bhdr) & ~SBC_BLK_SZ_MASK) | (SZ))  #define SET_PREV_BLK_FREE(AP,B) \    (ASSERT(!IS_MBC_FIRST_BLK(AP,B)), \ @@ -235,12 +245,12 @@ typedef UWord alcu_atag_t;  #  define SET_MBC_ABLK_HDR(B, Sz, F, C) \      (ASSERT(((Sz) & ~MBC_ABLK_SZ_MASK) == 0), \ -     ASSERT(!((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG))), \ +     ASSERT(!((UWord)(F) & (~BLK_FLG_MASK|THIS_FREE_BLK_HDR_FLG))), \       (B)->bhdr = ((Sz) | (F) | (BLK_CARRIER_OFFSET(B,C) << MBC_ABLK_OFFSET_SHIFT)))  #  define SET_MBC_FBLK_HDR(B, Sz, F, C) \      (ASSERT(((Sz) & ~MBC_FBLK_SZ_MASK) == 0), \ -     ASSERT(((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG|PREV_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \ +     ASSERT(((UWord)(F) & (~BLK_FLG_MASK|THIS_FREE_BLK_HDR_FLG|PREV_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \       (B)->bhdr = ((Sz) | (F)), \       (B)->u.carrier = (C)) @@ -257,8 +267,8 @@ typedef UWord alcu_atag_t;  #  define SET_BLK_FREE(B) \    (ASSERT(!IS_PREV_BLK_FREE(B)), \     (B)->u.carrier = ABLK_TO_MBC(B), \ -   (B)->bhdr |= THIS_FREE_BLK_HDR_FLG, \ -   (B)->bhdr &= (MBC_ABLK_SZ_MASK|FLG_MASK)) +   (B)->bhdr &= (MBC_ABLK_SZ_MASK|LAST_BLK_HDR_FLG), \ +   (B)->bhdr |= THIS_FREE_BLK_HDR_FLG)  #  define SET_BLK_ALLOCED(B) \    (ASSERT(((B)->bhdr & (MBC_ABLK_OFFSET_MASK|THIS_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \ @@ -270,15 +280,16 @@ typedef UWord alcu_atag_t;  #  define MBC_SZ_MAX_LIMIT ((UWord)~0)  #  define SET_MBC_ABLK_HDR(B, Sz, F, C) \ -    (ASSERT(((Sz) & FLG_MASK) == 0), \ -     ASSERT(!((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG))), \ -     ASSERT((UWord)(F) < SBC_BLK_HDR_FLG), \ +    (ASSERT(((Sz) & BLK_FLG_MASK) == 0), \ +     ASSERT(((F) & ~BLK_FLG_MASK) == 0), \ +     ASSERT(!((UWord)(F) & (~BLK_FLG_MASK|THIS_FREE_BLK_HDR_FLG))), \       (B)->bhdr = ((Sz) | (F)), \       (B)->carrier = (C))  #  define SET_MBC_FBLK_HDR(B, Sz, F, C) \ -    (ASSERT(((Sz) & FLG_MASK) == 0), \ -     ASSERT(((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG|PREV_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \ +    (ASSERT(((Sz) & BLK_FLG_MASK) == 0), \ +     ASSERT(((F) & ~BLK_FLG_MASK) == 0), \ +     ASSERT(((UWord)(F) & (~BLK_FLG_MASK|THIS_FREE_BLK_HDR_FLG|PREV_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \       (B)->bhdr = ((Sz) | (F)), \       (B)->carrier = (C)) @@ -297,7 +308,7 @@ typedef UWord alcu_atag_t;  #endif /* !MBC_ABLK_OFFSET_BITS */  #define SET_SBC_BLK_HDR(B, Sz) \ -  (ASSERT(((Sz) & FLG_MASK) == 0), (B)->bhdr = ((Sz) | (SBC_BLK_HDR_FLG))) +  (ASSERT(((Sz) & BLK_FLG_MASK) == 0), (B)->bhdr = ((Sz) | (SBC_BLK_HDR_FLG)))  #define BLK_UMEM_SZ(B) \ @@ -320,7 +331,7 @@ typedef UWord alcu_atag_t;  #define GET_PREV_FREE_BLK_HDR_FLG(B) \    ((B)->bhdr & PREV_FREE_BLK_HDR_FLG)  #define GET_BLK_HDR_FLGS(B) \ -  ((B)->bhdr & FLG_MASK) +  ((B)->bhdr & BLK_FLG_MASK)  #define NXT_BLK(B) \    (ASSERT(IS_MBC_BLK(B)), \ @@ -419,7 +430,7 @@ do {										\  #define SCH_SBC				SBC_CARRIER_HDR_FLAG  #define SET_CARRIER_HDR(C, Sz, F, AP) \ -  (ASSERT(((Sz) & FLG_MASK) == 0), (C)->chdr = ((Sz) | (F)), \ +  (ASSERT(((Sz) & CRR_FLG_MASK) == 0), (C)->chdr = ((Sz) | (F)), \     erts_atomic_init_nob(&(C)->allctr, (erts_aint_t) (AP)))  #define BLK_TO_SBC(B) \ @@ -444,8 +455,8 @@ do {										\    (!IS_SB_CARRIER((C)))  #define SET_CARRIER_SZ(C, SZ) \ -  (ASSERT(((SZ) & FLG_MASK) == 0), \ -   ((C)->chdr = ((C)->chdr & FLG_MASK) | (SZ))) +  (ASSERT(((SZ) & CRR_FLG_MASK) == 0), \ +   ((C)->chdr = ((C)->chdr & CRR_FLG_MASK) | (SZ)))  #define CFLG_SBC				(1 << 0)  #define CFLG_MBC				(1 << 1) @@ -575,10 +586,12 @@ do {									\  	STAT_MSEG_MBC_ALLOC((AP), csz__);				\      else								\  	STAT_SYS_ALLOC_MBC_ALLOC((AP), csz__);				\ -    (AP)->mbcs.blocks.curr.no += (CRR)->cpool.blocks;			\ +    set_new_allctr_abandon_limit(AP);                                   \ +    (AP)->mbcs.blocks.curr.no += (CRR)->cpool.blocks[(AP)->alloc_no];   \      if ((AP)->mbcs.blocks.max.no < (AP)->mbcs.blocks.curr.no)		\  	(AP)->mbcs.blocks.max.no = (AP)->mbcs.blocks.curr.no;		\ -    (AP)->mbcs.blocks.curr.size += (CRR)->cpool.blocks_size;		\ +    (AP)->mbcs.blocks.curr.size +=                                      \ +       (CRR)->cpool.blocks_size[(AP)->alloc_no];                        \      if ((AP)->mbcs.blocks.max.size < (AP)->mbcs.blocks.curr.size)	\  	(AP)->mbcs.blocks.max.size = (AP)->mbcs.blocks.curr.size;	\  } while (0) @@ -601,25 +614,33 @@ do {									\      DEBUG_CHECK_CARRIER_NO_SZ((AP));					\  } while (0) -#define STAT_MBC_ABANDON(AP, CRR)					\ -do {									\ -    UWord csz__ = CARRIER_SZ((CRR));					\ -    if (IS_MSEG_CARRIER((CRR)))						\ -	STAT_MSEG_MBC_FREE((AP), csz__);				\ -    else								\ -	STAT_SYS_ALLOC_MBC_FREE((AP), csz__);				\ -    ERTS_ALC_CPOOL_ASSERT((AP)->mbcs.blocks.curr.no			\ -			  >= (CRR)->cpool.blocks);			\ -    (AP)->mbcs.blocks.curr.no -= (CRR)->cpool.blocks;			\ -    ERTS_ALC_CPOOL_ASSERT((AP)->mbcs.blocks.curr.size 			\ -			  >= (CRR)->cpool.blocks_size);			\ -    (AP)->mbcs.blocks.curr.size -= (CRR)->cpool.blocks_size;		\ +#define STAT_MBC_FREE(AP, CRR)                                               \ +do {                                                                         \ +    UWord csz__ = CARRIER_SZ((CRR));                                         \ +    if (IS_MSEG_CARRIER((CRR))) {                                            \ +        STAT_MSEG_MBC_FREE((AP), csz__);                                     \ +    } else {                                                                 \ +        STAT_SYS_ALLOC_MBC_FREE((AP), csz__);                                \ +    }                                                                        \ +    set_new_allctr_abandon_limit(AP);                                        \  } while (0) -#define STAT_MBC_BLK_ALLOC_CRR(CRR, BSZ)				\ +#define STAT_MBC_ABANDON(AP, CRR)                                            \ +do {                                                                         \ +    STAT_MBC_FREE(AP, CRR);                                                  \ +    ERTS_ALC_CPOOL_ASSERT((AP)->mbcs.blocks.curr.no                          \ +                          >= (CRR)->cpool.blocks[(AP)->alloc_no]);           \ +    (AP)->mbcs.blocks.curr.no -= (CRR)->cpool.blocks[(AP)->alloc_no];        \ +    ERTS_ALC_CPOOL_ASSERT((AP)->mbcs.blocks.curr.size                        \ +                          >= (CRR)->cpool.blocks_size[(AP)->alloc_no]);      \ +    (AP)->mbcs.blocks.curr.size -= (CRR)->cpool.blocks_size[(AP)->alloc_no]; \ +} while (0) + +#define STAT_MBC_BLK_ALLOC_CRR(AP, CRR, BSZ)				\  do {									\ -    (CRR)->cpool.blocks++;						\ -    (CRR)->cpool.blocks_size += (BSZ);					\ +    (CRR)->cpool.blocks[(AP)->alloc_no]++;				\ +    (CRR)->cpool.blocks_size[(AP)->alloc_no] += (BSZ);			\ +    (CRR)->cpool.total_blocks_size += (BSZ);				\  } while (0)  #define STAT_MBC_BLK_ALLOC(AP, CRR, BSZ, FLGS)	       			\ @@ -631,50 +652,67 @@ do {									\      cstats__->blocks.curr.size += (BSZ);				\      if (cstats__->blocks.max.size < cstats__->blocks.curr.size)		\  	cstats__->blocks.max.size = cstats__->blocks.curr.size;		\ -    STAT_MBC_BLK_ALLOC_CRR((CRR), (BSZ));				\ +    STAT_MBC_BLK_ALLOC_CRR((AP), (CRR), (BSZ));				\  } while (0)  static ERTS_INLINE int  stat_cpool_mbc_blk_free(Allctr_t *allctr, +                        ErtsAlcType_t type,  			Carrier_t *crr,  			Carrier_t **busy_pcrr_pp,  			UWord blksz)  { +    Allctr_t *orig_allctr; +    int alloc_no; -    ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks > 0); -    crr->cpool.blocks--; -    ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks_size >= blksz); -    crr->cpool.blocks_size -= blksz; +    alloc_no = ERTS_ALC_T2A(type); -    if (!busy_pcrr_pp || !*busy_pcrr_pp) -	return 0; +    ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks[alloc_no] > 0); +    crr->cpool.blocks[alloc_no]--; +    ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks_size[alloc_no] >= blksz); +    crr->cpool.blocks_size[alloc_no] -= blksz; +    ERTS_ALC_CPOOL_ASSERT(crr->cpool.total_blocks_size >= blksz); +    crr->cpool.total_blocks_size -= blksz; -    ERTS_ALC_CPOOL_ASSERT(crr == *busy_pcrr_pp); +    if (allctr->alloc_no == alloc_no && (!busy_pcrr_pp || !*busy_pcrr_pp)) { +        /* This is a local block, so we should not update the pool +         * statistics. */ +        return 0; +    } + +    /* This is either a foreign block that's been fetched from the pool, or any +     * block that's in the pool. The carrier's owner keeps the statistics for +     * both pooled and foreign blocks. */ + +    orig_allctr = crr->cpool.orig_allctr; + +    ERTS_ALC_CPOOL_ASSERT(alloc_no != allctr->alloc_no || +        (crr == *busy_pcrr_pp && allctr == orig_allctr));  #ifdef ERTS_ALC_CPOOL_DEBUG      ERTS_ALC_CPOOL_ASSERT( -	erts_atomic_dec_read_nob(&allctr->cpool.stat.no_blocks) >= 0); +	erts_atomic_dec_read_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no]) >= 0);      ERTS_ALC_CPOOL_ASSERT( -	erts_atomic_add_read_nob(&allctr->cpool.stat.blocks_size, +	erts_atomic_add_read_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no],  				 -((erts_aint_t) blksz)) >= 0);  #else -    erts_atomic_dec_nob(&allctr->cpool.stat.no_blocks); -    erts_atomic_add_nob(&allctr->cpool.stat.blocks_size, +    erts_atomic_dec_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no]); +    erts_atomic_add_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no],  			-((erts_aint_t) blksz));  #endif      return 1;  } -#define STAT_MBC_BLK_FREE(AP, CRR, BPCRRPP, BSZ, FLGS)			\ -do {									\ -    if (!stat_cpool_mbc_blk_free((AP), (CRR), (BPCRRPP), (BSZ))) {	\ -	CarriersStats_t *cstats__ = &(AP)->mbcs;			\ -	ASSERT(cstats__->blocks.curr.no > 0);				\ -	cstats__->blocks.curr.no--;					\ -	ASSERT(cstats__->blocks.curr.size >= (BSZ));			\ -	cstats__->blocks.curr.size -= (BSZ);				\ -    }									\ +#define STAT_MBC_BLK_FREE(AP, TYPE, CRR, BPCRRPP, BSZ, FLGS)               \ +do {                                                                       \ +    if (!stat_cpool_mbc_blk_free((AP), (TYPE), (CRR), (BPCRRPP), (BSZ))) { \ +        CarriersStats_t *cstats__ = &(AP)->mbcs;                           \ +        ASSERT(cstats__->blocks.curr.no > 0);                              \ +        cstats__->blocks.curr.no--;                                        \ +        ASSERT(cstats__->blocks.curr.size >= (BSZ));                       \ +        cstats__->blocks.curr.size -= (BSZ);                               \ +    }                                                                      \  } while (0)  /* Debug stuff... */ @@ -721,8 +759,8 @@ static void make_name_atoms(Allctr_t *allctr);  static Block_t *create_carrier(Allctr_t *, Uint, UWord);  static void destroy_carrier(Allctr_t *, Block_t *, Carrier_t **); -static void mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp); -static void dealloc_block(Allctr_t *, void *, ErtsAlcFixList_t *, int); +static void mbc_free(Allctr_t *allctr, ErtsAlcType_t type, void *p, Carrier_t **busy_pcrr_pp); +static void dealloc_block(Allctr_t *, ErtsAlcType_t, Uint32, void *, ErtsAlcFixList_t *);  static alcu_atag_t determine_alloc_tag(Allctr_t *allocator, ErtsAlcType_t type)  { @@ -764,14 +802,14 @@ static alcu_atag_t determine_alloc_tag(Allctr_t *allocator, ErtsAlcType_t type)          }      } -    return MAKE_ATAG(id, type); +    return MAKE_ATAG(id, ERTS_ALC_T2N(type));  }  static void set_alloc_tag(Allctr_t *allocator, void *p, alcu_atag_t tag)  {      Block_t *block; -    ASSERT(DBG_IS_VALID_ATAG(allocator, tag)); +    ASSERT(DBG_IS_VALID_ATAG(tag));      ASSERT(allocator->atags && p);      (void)allocator; @@ -1308,28 +1346,9 @@ chk_fix_list(Allctr_t *allctr, ErtsAlcFixList_t *fix, int ix, int before)  #define ERTS_DBG_CHK_FIX_LIST(A, FIX, IX, B)  #endif +static ERTS_INLINE Allctr_t *get_pref_allctr(void *extra);  static void *mbc_alloc(Allctr_t *allctr, Uint size); -typedef struct { -    ErtsAllctrDDBlock_t ddblock__; /* must be first */ -    ErtsAlcType_t fix_type; -} ErtsAllctrFixDDBlock_t; - -#define ERTS_ALC_FIX_NO_UNUSE (((ErtsAlcType_t) 1) << ERTS_ALC_N_BITS) - -static ERTS_INLINE void -dealloc_fix_block(Allctr_t *allctr, -		  ErtsAlcType_t type, -		  void *ptr, -		  ErtsAlcFixList_t *fix, -		  int dec_cc_on_redirect) -{ -    /* May be redirected... */ -    ASSERT((type & ERTS_ALC_FIX_NO_UNUSE) == 0); -    ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type = type | ERTS_ALC_FIX_NO_UNUSE; -    dealloc_block(allctr, ptr, fix, dec_cc_on_redirect); -} -  static ERTS_INLINE void  sched_fix_shrink(Allctr_t *allctr, int on)  { @@ -1371,7 +1390,7 @@ fix_cpool_check_shrink(Allctr_t *allctr,  	    if (fix->u.cpool.min_list_size > fix->list_size)  		fix->u.cpool.min_list_size = fix->list_size; -	    dealloc_fix_block(allctr, type, p, fix, 0); +	    dealloc_block(allctr, type, DEALLOC_FLG_FIX_SHRINK, p, fix);  	}      }  } @@ -1382,11 +1401,9 @@ fix_cpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)      void *res;      ErtsAlcFixList_t *fix; -    ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= type -	   && type <= ERTS_ALC_N_MAX_A_FIXED_SIZE); - -    fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE]; -    ASSERT(size == fix->type_size); +    fix = &allctr->fix[ERTS_ALC_FIX_TYPE_IX(type)]; +    ASSERT(type == fix->type && size == fix->type_size); +    ASSERT(size >= sizeof(ErtsAllctrDDBlock_t));      res = fix->list;      if (res) { @@ -1415,21 +1432,39 @@ fix_cpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)  static ERTS_INLINE void  fix_cpool_free(Allctr_t *allctr,  	       ErtsAlcType_t type, +               Uint32 flags,  	       void *p, -	       Carrier_t **busy_pcrr_pp, -	       int unuse) +	       Carrier_t **busy_pcrr_pp)  {      ErtsAlcFixList_t *fix; +    Allctr_t *fix_allctr; + +    /* If this isn't a fix allocator we need to update the fix list of our +     * neighboring fix_alloc to keep the statistics consistent. */ +    if (!allctr->fix) { +        ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[ERTS_ALC_A_FIXED_SIZE]; +        fix_allctr = get_pref_allctr(tspec); +        ASSERT(!fix_allctr->thread_safe); +        ASSERT(allctr != fix_allctr); +    } +    else { +        fix_allctr = allctr; +    } -    ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= type -	   && type <= ERTS_ALC_N_MAX_A_FIXED_SIZE); +    ASSERT(ERTS_ALC_IS_CPOOL_ENABLED(fix_allctr)); +    ASSERT(ERTS_ALC_IS_CPOOL_ENABLED(allctr)); -    fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE]; +    fix = &fix_allctr->fix[ERTS_ALC_FIX_TYPE_IX(type)]; +    ASSERT(type == fix->type); -    if (unuse) -	fix->u.cpool.used--; +    if (!(flags & DEALLOC_FLG_FIX_SHRINK)) { +        fix->u.cpool.used--; +    } -    if ((!busy_pcrr_pp || !*busy_pcrr_pp) +    /* We don't want foreign blocks to be long-lived, so we skip recycling if +     * allctr != fix_allctr. */ +    if (allctr == fix_allctr +        && (!busy_pcrr_pp || !*busy_pcrr_pp)  	&& !fix->u.cpool.shrink_list  	&& fix->list_size < ERTS_ALCU_FIX_MAX_LIST_SZ) {  	*((void **) p) = fix->list; @@ -1442,7 +1477,7 @@ fix_cpool_free(Allctr_t *allctr,  	if (IS_SBC_BLK(blk))  	    destroy_carrier(allctr, blk, NULL);  	else -	    mbc_free(allctr, p, busy_pcrr_pp); +	    mbc_free(allctr, type, p, busy_pcrr_pp);  	fix->u.cpool.allocated--;  	fix_cpool_check_shrink(allctr, type, fix, busy_pcrr_pp);      } @@ -1469,7 +1504,7 @@ fix_cpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)  	    fix->u.cpool.shrink_list = fix->u.cpool.min_list_size;  	    fix->u.cpool.min_list_size = fix->list_size;  	} -	type = (ErtsAlcType_t) (ix + ERTS_ALC_N_MIN_A_FIXED_SIZE); +	type = ERTS_ALC_N2T((ErtsAlcType_t) (ix + ERTS_ALC_N_MIN_A_FIXED_SIZE));  	for (o = 0; o < ERTS_ALC_FIX_MAX_SHRINK_OPS || flush; o++) {  	    void *ptr; @@ -1483,7 +1518,7 @@ fix_cpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)  	    fix->list = *((void **) ptr);  	    fix->list_size--;  	    fix->u.cpool.shrink_list--; -	    dealloc_fix_block(allctr, type, ptr, fix, 0); +	    dealloc_block(allctr, type, DEALLOC_FLG_FIX_SHRINK, ptr, fix);  	}  	if (fix->u.cpool.min_list_size > fix->list_size)  	    fix->u.cpool.min_list_size = fix->list_size; @@ -1509,11 +1544,9 @@ fix_nocpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)      ErtsAlcFixList_t *fix;      void *res; -    ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= type -	   && type <= ERTS_ALC_N_MAX_A_FIXED_SIZE); - -    fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE]; -    ASSERT(size == fix->type_size); +    fix = &allctr->fix[ERTS_ALC_FIX_TYPE_IX(type)]; +    ASSERT(type == fix->type && size == fix->type_size); +    ASSERT(size >= sizeof(ErtsAllctrDDBlock_t));      ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);      fix->u.nocpool.used++; @@ -1530,7 +1563,7 @@ fix_nocpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)  	    if (IS_SBC_BLK(blk))  		destroy_carrier(allctr, blk, NULL);  	    else -		mbc_free(allctr, p, NULL); +		mbc_free(allctr, type, p, NULL);  	    fix->u.nocpool.allocated--;  	}  	ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0); @@ -1565,10 +1598,8 @@ fix_nocpool_free(Allctr_t *allctr,      Block_t *blk;      ErtsAlcFixList_t *fix; -    ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= type -	   && type <= ERTS_ALC_N_MAX_A_FIXED_SIZE); - -    fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE]; +    fix = &allctr->fix[ERTS_ALC_T2N(type) - ERTS_ALC_N_MIN_A_FIXED_SIZE]; +    ASSERT(fix->type == type);      ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);      fix->u.nocpool.used--; @@ -1587,7 +1618,7 @@ fix_nocpool_free(Allctr_t *allctr,  	if (IS_SBC_BLK(blk))  	    destroy_carrier(allctr, blk, NULL);  	else -	    mbc_free(allctr, p, NULL); +	    mbc_free(allctr, type, p, NULL);  	p = fix->list;  	fix->list = *((void **) p);  	fix->list_size--; @@ -1598,7 +1629,7 @@ fix_nocpool_free(Allctr_t *allctr,      if (IS_SBC_BLK(blk))  	destroy_carrier(allctr, blk, NULL);      else -	mbc_free(allctr, p, NULL); +	mbc_free(allctr, type, p, NULL);      ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);  } @@ -1639,7 +1670,7 @@ fix_nocpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)  	    ptr = fix->list;  	    fix->list = *((void **) ptr);  	    fix->list_size--; -	    dealloc_block(allctr, ptr, NULL, 0); +	    dealloc_block(allctr, fix->type, 0, ptr, NULL);  	    fix->u.nocpool.allocated--;  	}  	if (fix->list_size != 0) { @@ -1681,6 +1712,7 @@ dealloc_mbc(Allctr_t *allctr, Carrier_t *crr)  } +static UWord allctr_abandon_limit(Allctr_t *allctr);  static void set_new_allctr_abandon_limit(Allctr_t*);  static void abandon_carrier(Allctr_t*, Carrier_t*);  static void poolify_my_carrier(Allctr_t*, Carrier_t*); @@ -1802,7 +1834,7 @@ get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep,  static void  init_dd_queue(ErtsAllctrDDQueue_t *ddq)  { -    erts_atomic_init_nob(&ddq->tail.data.marker.atmc_next, ERTS_AINT_NULL); +    erts_atomic_init_nob(&ddq->tail.data.marker.u.atmc_next, ERTS_AINT_NULL);      erts_atomic_init_nob(&ddq->tail.data.last,  			 (erts_aint_t) &ddq->tail.data.marker);      erts_atomic_init_nob(&ddq->tail.data.um_refc[0], 0); @@ -1823,17 +1855,17 @@ ddq_managed_thread_enqueue(ErtsAllctrDDQueue_t *ddq, void *ptr, int cinit)      erts_aint_t itmp;      ErtsAllctrDDBlock_t *enq, *this = ptr; -    erts_atomic_init_nob(&this->atmc_next, ERTS_AINT_NULL); +    erts_atomic_init_nob(&this->u.atmc_next, ERTS_AINT_NULL);      /* Enqueue at end of list... */      enq = (ErtsAllctrDDBlock_t *) erts_atomic_read_nob(&ddq->tail.data.last); -    itmp = erts_atomic_cmpxchg_relb(&enq->atmc_next, +    itmp = erts_atomic_cmpxchg_relb(&enq->u.atmc_next,  				    (erts_aint_t) this,  				    ERTS_AINT_NULL);      if (itmp == ERTS_AINT_NULL) {  	/* We are required to move last pointer */  #ifdef DEBUG -	ASSERT(ERTS_AINT_NULL == erts_atomic_read_nob(&this->atmc_next)); +	ASSERT(ERTS_AINT_NULL == erts_atomic_read_nob(&this->u.atmc_next));  	ASSERT(((erts_aint_t) enq)  	       == erts_atomic_xchg_relb(&ddq->tail.data.last,  					(erts_aint_t) this)); @@ -1851,8 +1883,8 @@ ddq_managed_thread_enqueue(ErtsAllctrDDQueue_t *ddq, void *ptr, int cinit)  	while (1) {  	    erts_aint_t itmp2; -	    erts_atomic_set_nob(&this->atmc_next, itmp); -	    itmp2 = erts_atomic_cmpxchg_relb(&enq->atmc_next, +	    erts_atomic_set_nob(&this->u.atmc_next, itmp); +	    itmp2 = erts_atomic_cmpxchg_relb(&enq->u.atmc_next,  					     (erts_aint_t) this,  					     itmp);  	    if (itmp == itmp2) @@ -1861,7 +1893,7 @@ ddq_managed_thread_enqueue(ErtsAllctrDDQueue_t *ddq, void *ptr, int cinit)  		itmp = itmp2;  	    else {  		enq = (ErtsAllctrDDBlock_t *) itmp2; -		itmp = erts_atomic_read_acqb(&enq->atmc_next); +		itmp = erts_atomic_read_acqb(&enq->u.atmc_next);  		ASSERT(itmp != ERTS_AINT_NULL);  	    }  	    i++; @@ -1877,8 +1909,8 @@ check_insert_marker(ErtsAllctrDDQueue_t *ddq, erts_aint_t ilast)  	erts_aint_t itmp;  	ErtsAllctrDDBlock_t *last = (ErtsAllctrDDBlock_t *) ilast; -	erts_atomic_init_nob(&ddq->tail.data.marker.atmc_next, ERTS_AINT_NULL); -	itmp = erts_atomic_cmpxchg_relb(&last->atmc_next, +	erts_atomic_init_nob(&ddq->tail.data.marker.u.atmc_next, ERTS_AINT_NULL); +	itmp = erts_atomic_cmpxchg_relb(&last->u.atmc_next,  					(erts_aint_t) &ddq->tail.data.marker,  					ERTS_AINT_NULL);  	if (itmp == ERTS_AINT_NULL) { @@ -1929,7 +1961,7 @@ ddq_dequeue(ErtsAllctrDDQueue_t *ddq)  	ASSERT(ddq->head.used_marker);  	ddq->head.used_marker = 0;  	blk = ((ErtsAllctrDDBlock_t *) -	       erts_atomic_read_nob(&blk->atmc_next)); +	       erts_atomic_read_nob(&blk->u.atmc_next));  	if (blk == ddq->head.unref_end) {  	    ddq->head.first = blk;  	    return NULL; @@ -1937,7 +1969,7 @@ ddq_dequeue(ErtsAllctrDDQueue_t *ddq)      }      ddq->head.first = ((ErtsAllctrDDBlock_t *) -		       erts_atomic_read_nob(&blk->atmc_next)); +		       erts_atomic_read_nob(&blk->u.atmc_next));      ASSERT(ddq->head.first); @@ -1999,19 +2031,13 @@ check_pending_dealloc_carrier(Allctr_t *allctr,  			      int *need_more_work);  static void -handle_delayed_fix_dealloc(Allctr_t *allctr, void *ptr) +handle_delayed_fix_dealloc(Allctr_t *allctr, ErtsAlcType_t type, Uint32 flags, +                           void *ptr)  { -    ErtsAlcType_t type; - -    type = ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type; - -    ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE -	   <= (type & ~ERTS_ALC_FIX_NO_UNUSE)); -    ASSERT((type & ~ERTS_ALC_FIX_NO_UNUSE) -	   <= ERTS_ALC_N_MAX_A_FIXED_SIZE); +    ASSERT(ERTS_ALC_IS_FIX_TYPE(type));      if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr)) -	fix_nocpool_free(allctr, (type & ~ERTS_ALC_FIX_NO_UNUSE), ptr); +	fix_nocpool_free(allctr, type, ptr);      else {  	Block_t *blk = UMEM2BLK(ptr);  	Carrier_t *busy_pcrr_p; @@ -2026,20 +2052,24 @@ handle_delayed_fix_dealloc(Allctr_t *allctr, void *ptr)  				      NULL, &busy_pcrr_p);  	if (used_allctr == allctr) {  	doit: -	    fix_cpool_free(allctr, (type & ~ERTS_ALC_FIX_NO_UNUSE), -			   ptr, &busy_pcrr_p, -			   !(type & ERTS_ALC_FIX_NO_UNUSE)); +	    fix_cpool_free(allctr, type, flags, ptr, &busy_pcrr_p);  	    clear_busy_pool_carrier(allctr, busy_pcrr_p);  	}  	else {  	    /* Carrier migrated; need to redirect block to new owner... */ -	    int cinit = used_allctr->dd.ix - allctr->dd.ix; +            ErtsAllctrDDBlock_t *dd_block; +            int cinit; + +            dd_block = (ErtsAllctrDDBlock_t*)ptr; +            dd_block->flags = flags; +            dd_block->type = type;              ERTS_ALC_CPOOL_ASSERT(!busy_pcrr_p);              DEC_CC(allctr->calls.this_free); -	    ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type = type; +            cinit = used_allctr->dd.ix - allctr->dd.ix; +  	    if (ddq_enqueue(&used_allctr->dd.q, ptr, cinit))  		erts_alloc_notify_delayed_dealloc(used_allctr->ix);  	} @@ -2063,7 +2093,6 @@ handle_delayed_dealloc(Allctr_t *allctr,      int need_mr_wrk = 0;      int have_checked_incoming = 0;      int ops = 0; -    ErtsAlcFixList_t *fix;      int res;      ErtsAllctrDDQueue_t *ddq; @@ -2072,8 +2101,6 @@ handle_delayed_dealloc(Allctr_t *allctr,      ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr); -    fix = allctr->fix; -      ddq = &allctr->dd.q;      res = 0; @@ -2162,16 +2189,27 @@ handle_delayed_dealloc(Allctr_t *allctr,              }  	}  	else { +            ErtsAllctrDDBlock_t *dd_block; +            ErtsAlcType_t type; +            Uint32 flags; +         +            dd_block = (ErtsAllctrDDBlock_t*)ptr; +            flags = dd_block->flags; +            type = dd_block->type; + +            flags |= DEALLOC_FLG_REDIRECTED; +              ASSERT(IS_SBC_BLK(blk) || (ABLK_TO_MBC(blk) !=                                         ErtsContainerStruct(blk, Carrier_t,                                                             cpool.homecoming_dd.blk)));  	    INC_CC(allctr->calls.this_free); -	    if (fix) -		handle_delayed_fix_dealloc(allctr, ptr); -	    else -		dealloc_block(allctr, ptr, NULL, 1); +	    if (ERTS_ALC_IS_FIX_TYPE(type)) { +		handle_delayed_fix_dealloc(allctr, type, flags, ptr); +	    } else { +		dealloc_block(allctr, type, flags, ptr, NULL); +            }  	}      } @@ -2199,8 +2237,10 @@ enqueue_dealloc_other_instance(ErtsAlcType_t type,  			       void *ptr,  			       int cinit)  { -    if (allctr->fix) -	((ErtsAllctrFixDDBlock_t*) ptr)->fix_type = type; +    ErtsAllctrDDBlock_t *dd_block = ((ErtsAllctrDDBlock_t*)ptr); + +    dd_block->type = type; +    dd_block->flags = 0;      if (ddq_enqueue(&allctr->dd.q, ptr, cinit))  	erts_alloc_notify_delayed_dealloc(allctr->ix); @@ -2230,10 +2270,7 @@ check_abandon_carrier(Allctr_t *allctr, Block_t *fblk, Carrier_t **busy_pcrr_pp)      if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))  	return; -    allctr->cpool.check_limit_count--; -    if (--allctr->cpool.check_limit_count <= 0) -	set_new_allctr_abandon_limit(allctr); - +    ASSERT(allctr->cpool.abandon_limit == allctr_abandon_limit(allctr));      ASSERT(erts_thr_progress_is_managed_thread());      if (allctr->cpool.disable_abandon) @@ -2251,7 +2288,7 @@ check_abandon_carrier(Allctr_t *allctr, Block_t *fblk, Carrier_t **busy_pcrr_pp)      if (allctr->main_carrier == crr)  	return; -    if (crr->cpool.blocks_size > crr->cpool.abandon_limit) +    if (crr->cpool.total_blocks_size > crr->cpool.abandon_limit)  	return;      if (crr->cpool.thr_prgr != ERTS_THR_PRGR_INVALID @@ -2287,24 +2324,26 @@ erts_alcu_check_delayed_dealloc(Allctr_t *allctr,  			   ERTS_ALCU_DD_OPS_LIM_LOW, NULL, NULL, NULL)  static void -dealloc_block(Allctr_t *allctr, void *ptr, ErtsAlcFixList_t *fix, int dec_cc_on_redirect) +dealloc_block(Allctr_t *allctr, ErtsAlcType_t type, Uint32 flags, void *ptr, +              ErtsAlcFixList_t *fix)  {      Block_t *blk = UMEM2BLK(ptr); +    ASSERT(!fix || type == fix->type); +      ERTS_LC_ASSERT(!allctr->thread_safe  		       || erts_lc_mtx_is_locked(&allctr->mutex));      if (IS_SBC_BLK(blk)) {  	destroy_carrier(allctr, blk, NULL);  	if (fix && ERTS_ALC_IS_CPOOL_ENABLED(allctr)) { -	    ErtsAlcType_t type = ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type; -	    if (!(type & ERTS_ALC_FIX_NO_UNUSE)) +	    if (!(flags & DEALLOC_FLG_FIX_SHRINK))  		fix->u.cpool.used--;  	    fix->u.cpool.allocated--;  	}      }      else if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr)) -	mbc_free(allctr, ptr, NULL); +	mbc_free(allctr, type, ptr, NULL);      else {  	Carrier_t *busy_pcrr_p;  	Allctr_t *used_allctr; @@ -2313,22 +2352,29 @@ dealloc_block(Allctr_t *allctr, void *ptr, ErtsAlcFixList_t *fix, int dec_cc_on_  				      NULL, &busy_pcrr_p);  	if (used_allctr == allctr) {  	    if (fix) { -		ErtsAlcType_t type = ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type; -		if (!(type & ERTS_ALC_FIX_NO_UNUSE)) +	        if (!(flags & DEALLOC_FLG_FIX_SHRINK))  		    fix->u.cpool.used--;  		fix->u.cpool.allocated--;  	    } -	    mbc_free(allctr, ptr, &busy_pcrr_p); +	    mbc_free(allctr, type, ptr, &busy_pcrr_p);  	    clear_busy_pool_carrier(allctr, busy_pcrr_p);  	}  	else {  	    /* Carrier migrated; need to redirect block to new owner... */ -	    int cinit = used_allctr->dd.ix - allctr->dd.ix; +            ErtsAllctrDDBlock_t *dd_block; +            int cinit; + +            dd_block = (ErtsAllctrDDBlock_t*)ptr; +            dd_block->flags = flags; +            dd_block->type = type;              ERTS_ALC_CPOOL_ASSERT(!busy_pcrr_p); -            if (dec_cc_on_redirect) +            if (flags & DEALLOC_FLG_REDIRECTED)                  DEC_CC(allctr->calls.this_free); + +            cinit = used_allctr->dd.ix - allctr->dd.ix; +  	    if (ddq_enqueue(&used_allctr->dd.q, ptr, cinit))  		erts_alloc_notify_delayed_dealloc(used_allctr->ix);  	} @@ -2495,7 +2541,7 @@ mbc_alloc(Allctr_t *allctr, Uint size)  }  static void -mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp) +mbc_free(Allctr_t *allctr, ErtsAlcType_t type, void *p, Carrier_t **busy_pcrr_pp)  {      Uint is_first_blk;      Uint is_last_blk; @@ -2517,7 +2563,8 @@ mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp)      crr = ABLK_TO_MBC(blk);      ERTS_ALC_CPOOL_FREE_OP(allctr); -    STAT_MBC_BLK_FREE(allctr, crr, busy_pcrr_pp, blk_sz, alcu_flgs); + +    STAT_MBC_BLK_FREE(allctr, type, crr, busy_pcrr_pp, blk_sz, alcu_flgs);      is_first_blk = IS_MBC_FIRST_ABLK(allctr, blk);      is_last_blk = IS_LAST_BLK(blk); @@ -2586,8 +2633,8 @@ mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp)  }  static void * -mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs, -	    Carrier_t **busy_pcrr_pp) +mbc_realloc(Allctr_t *allctr, ErtsAlcType_t type, void *p, Uint size, +            Uint32 alcu_flgs, Carrier_t **busy_pcrr_pp)  {      void *new_p;      Uint old_blk_sz; @@ -2625,7 +2672,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,          new_blk = UMEM2BLK(new_p);          ASSERT(!(IS_MBC_BLK(new_blk) && ABLK_TO_MBC(new_blk) == *busy_pcrr_pp));          sys_memcpy(new_p, p, MIN(size, old_blk_sz - ABLK_HDR_SZ)); -        mbc_free(allctr, p, busy_pcrr_pp); +        mbc_free(allctr, type, p, busy_pcrr_pp);          return new_p;      } @@ -2702,7 +2749,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,  	crr = ABLK_TO_MBC(blk);  	ERTS_ALC_CPOOL_REALLOC_OP(allctr); -	STAT_MBC_BLK_FREE(allctr, crr, NULL, old_blk_sz, alcu_flgs); +	STAT_MBC_BLK_FREE(allctr, type, crr, NULL, old_blk_sz, alcu_flgs);  	STAT_MBC_BLK_ALLOC(allctr, crr, blk_sz, alcu_flgs);  	ASSERT(MBC_BLK_SZ(blk) >= allctr->min_block_size); @@ -2806,7 +2853,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,  	    }  	    ERTS_ALC_CPOOL_REALLOC_OP(allctr); -	    STAT_MBC_BLK_FREE(allctr, crr, NULL, old_blk_sz, alcu_flgs); +	    STAT_MBC_BLK_FREE(allctr, type, crr, NULL, old_blk_sz, alcu_flgs);  	    STAT_MBC_BLK_ALLOC(allctr, crr, blk_sz, alcu_flgs);  	    ASSERT(IS_ALLOCED_BLK(blk)); @@ -2867,7 +2914,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,  	if (!new_p)  	    return NULL;  	sys_memcpy(new_p, p, MIN(size, old_blk_sz - ABLK_HDR_SZ)); -	mbc_free(allctr, p, busy_pcrr_pp); +	mbc_free(allctr, type, p, busy_pcrr_pp);  	return new_p; @@ -2897,7 +2944,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,  			       1);  	    new_p = BLK2UMEM(new_blk);  	    sys_memcpy(new_p, p, MIN(size, old_blk_sz - ABLK_HDR_SZ)); -	    mbc_free(allctr, p, NULL); +	    mbc_free(allctr, type, p, NULL);  	    return new_p;  	}  	else { @@ -2954,7 +3001,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,  			       0);  	    ERTS_ALC_CPOOL_FREE_OP(allctr); -	    STAT_MBC_BLK_FREE(allctr, crr, NULL, old_blk_sz, alcu_flgs); +	    STAT_MBC_BLK_FREE(allctr, type, crr, NULL, old_blk_sz, alcu_flgs);  	    return new_p;  	} @@ -2965,7 +3012,6 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,  #define ERTS_ALC_MAX_DEALLOC_CARRIER		10  #define ERTS_ALC_CPOOL_MAX_FETCH_INSPECT	100 -#define ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT	100  #define ERTS_ALC_CPOOL_MAX_FAILED_STAT_READS	3  #define ERTS_ALC_CPOOL_PTR_MOD_MRK		(((erts_aint_t) 1) << 0) @@ -2992,14 +3038,11 @@ typedef union {  #  error "Carrier pool implementation assumes ERTS_ALC_A_MIN > ERTS_ALC_A_INVALID"  #endif -/* - * The pool is only allowed to be manipulated by managed - * threads except in the alloc_SUITE:cpool case. In this - * test case carrier_pool[ERTS_ALC_A_INVALID] will be - * used. - */ +/* The pools are only allowed to be manipulated by managed threads except in + * the alloc_SUITE:cpool test, where only test_carrier_pool is used. */ -static ErtsAlcCrrPool_t carrier_pool[ERTS_ALC_A_MAX+1] erts_align_attribute(ERTS_CACHE_LINE_SIZE); +static ErtsAlcCrrPool_t firstfit_carrier_pool; +static ErtsAlcCrrPool_t test_carrier_pool;  #define ERTS_ALC_CPOOL_MAX_BACKOFF (1 << 8) @@ -3020,12 +3063,12 @@ backoff(int n)  static int  cpool_dbg_is_in_pool(Allctr_t *allctr, Carrier_t *crr)  { -    ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel; +    ErtsAlcCPoolData_t *sentinel = allctr->cpool.sentinel;      ErtsAlcCPoolData_t *cpdp = sentinel;      Carrier_t *tmp_crr;      while (1) { -	cpdp = (ErtsAlcCPoolData_t *) (erts_atomic_read_ddrb(&cpdp->next) & ~FLG_MASK); +	cpdp = (ErtsAlcCPoolData_t *) (erts_atomic_read_ddrb(&cpdp->next) & ~CRR_FLG_MASK);  	if (cpdp == sentinel)  	    return 0;  	tmp_crr = (Carrier_t *) (((char *) cpdp) - offsetof(Carrier_t, cpool)); @@ -3037,7 +3080,7 @@ cpool_dbg_is_in_pool(Allctr_t *allctr, Carrier_t *crr)  static int  cpool_is_empty(Allctr_t *allctr)  { -    ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel; +    ErtsAlcCPoolData_t *sentinel = allctr->cpool.sentinel;      return ((erts_atomic_read_rb(&sentinel->next) == (erts_aint_t) sentinel)  	    && (erts_atomic_read_rb(&sentinel->prev) == (erts_aint_t) sentinel));  } @@ -3127,16 +3170,31 @@ cpool_insert(Allctr_t *allctr, Carrier_t *crr)  {      ErtsAlcCPoolData_t *cpd1p, *cpd2p;      erts_aint_t val; -    ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel; +    ErtsAlcCPoolData_t *sentinel = allctr->cpool.sentinel;      Allctr_t *orig_allctr = crr->cpool.orig_allctr; -    ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */ +    ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_TEST /* testcase */  			  || erts_thr_progress_is_managed_thread()); -    erts_atomic_add_nob(&orig_allctr->cpool.stat.blocks_size, -			(erts_aint_t) crr->cpool.blocks_size); -    erts_atomic_add_nob(&orig_allctr->cpool.stat.no_blocks, -			(erts_aint_t) crr->cpool.blocks); +    { +        int alloc_no = allctr->alloc_no; + +        ERTS_ALC_CPOOL_ASSERT( +            erts_atomic_read_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no]) >= 0 && +            crr->cpool.blocks_size[alloc_no] >= 0); + +        ERTS_ALC_CPOOL_ASSERT( +            erts_atomic_read_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no]) >= 0 && +            crr->cpool.blocks[alloc_no] >= 0); + +        /* We only modify the counter for our current type since the others are +         * conceptually still in the pool. */ +        erts_atomic_add_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no], +                            ((erts_aint_t) crr->cpool.blocks_size[alloc_no])); +        erts_atomic_add_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no], +                            ((erts_aint_t) crr->cpool.blocks[alloc_no])); +    } +      erts_atomic_add_nob(&orig_allctr->cpool.stat.carriers_size,  			(erts_aint_t) CARRIER_SZ(crr));      erts_atomic_inc_nob(&orig_allctr->cpool.stat.no_carriers); @@ -3209,10 +3267,10 @@ cpool_delete(Allctr_t *allctr, Allctr_t *prev_allctr, Carrier_t *crr)      ErtsAlcCPoolData_t *cpd1p, *cpd2p;      erts_aint_t val;  #ifdef ERTS_ALC_CPOOL_DEBUG -    ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel; +    ErtsAlcCPoolData_t *sentinel = allctr->cpool.sentinel;  #endif -    ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */ +    ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_TEST /* testcase */  			  || erts_thr_progress_is_managed_thread());      ERTS_ALC_CPOOL_ASSERT(sentinel != &crr->cpool); @@ -3288,28 +3346,43 @@ cpool_delete(Allctr_t *allctr, Allctr_t *prev_allctr, Carrier_t *crr)      crr->cpool.thr_prgr = erts_thr_progress_later(NULL); -    erts_atomic_add_nob(&prev_allctr->cpool.stat.blocks_size, -			-((erts_aint_t) crr->cpool.blocks_size)); -    erts_atomic_add_nob(&prev_allctr->cpool.stat.no_blocks, -			-((erts_aint_t) crr->cpool.blocks)); -    erts_atomic_add_nob(&prev_allctr->cpool.stat.carriers_size, +    { +        Allctr_t *orig_allctr = crr->cpool.orig_allctr; +        int alloc_no = allctr->alloc_no; + +        ERTS_ALC_CPOOL_ASSERT(orig_allctr == prev_allctr); + +        ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks_size[alloc_no] <= +            erts_atomic_read_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no])); + +        ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks[alloc_no] <= +            erts_atomic_read_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no])); + +        /* We only modify the counters for our current type since the others +         * were, conceptually, never taken out of the pool. */ +        erts_atomic_add_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no], +                            -((erts_aint_t) crr->cpool.blocks_size[alloc_no])); +        erts_atomic_add_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no], +                            -((erts_aint_t) crr->cpool.blocks[alloc_no])); + +        erts_atomic_add_nob(&orig_allctr->cpool.stat.carriers_size,  			-((erts_aint_t) CARRIER_SZ(crr))); -    erts_atomic_dec_wb(&prev_allctr->cpool.stat.no_carriers); +        erts_atomic_dec_wb(&orig_allctr->cpool.stat.no_carriers); +    }  }  static Carrier_t *  cpool_fetch(Allctr_t *allctr, UWord size)  { -    enum { IGNORANT, HAS_SEEN_SENTINEL, THE_LAST_ONE } loop_state; -    int i; +    int i, seen_sentinel;      Carrier_t *crr;      Carrier_t *reinsert_crr = NULL;      ErtsAlcCPoolData_t *cpdp;      ErtsAlcCPoolData_t *cpool_entrance = NULL;      ErtsAlcCPoolData_t *sentinel; -    ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */ +    ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_TEST /* testcase */  			  || erts_thr_progress_is_managed_thread());      i = ERTS_ALC_CPOOL_MAX_FETCH_INSPECT; @@ -3411,48 +3484,39 @@ cpool_fetch(Allctr_t *allctr, UWord size)      /*       * Finally search the shared pool and try employ foreign carriers       */ -    sentinel = &carrier_pool[allctr->alloc_no].sentinel; +    sentinel = allctr->cpool.sentinel;      if (cpool_entrance) {          /*           * We saw a pooled carried above, use it as entrance into the pool  	 */ -	cpdp = cpool_entrance;      }      else {          /* -         * No pooled carried seen above. Start search at cpool sentinel, +         * No pooled carrier seen above. Start search at cpool sentinel,  	 * but begin by passing one element before trying to fetch.  	 * This in order to avoid contention with threads inserting elements.  	 */ -	cpool_entrance = sentinel; -	cpdp = cpool_aint2cpd(cpool_read(&cpool_entrance->prev)); -	if (cpdp == sentinel) +        cpool_entrance = cpool_aint2cpd(cpool_read(&sentinel->prev)); +	if (cpool_entrance == sentinel)  	    goto check_dc_list;      } -    loop_state = IGNORANT; +    cpdp = cpool_entrance; +    seen_sentinel = 0;      do {  	erts_aint_t exp;  	cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev)); -	if (cpdp == cpool_entrance) { -	    if (cpool_entrance == sentinel) { -		cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev)); -		if (cpdp == sentinel) -		    break; -	    } -            loop_state = THE_LAST_ONE; -	} -	else if (cpdp == sentinel) { -	    if (loop_state == HAS_SEEN_SENTINEL) { +        if (cpdp == sentinel) { +	    if (seen_sentinel) {  		/* We been here before. cpool_entrance must have been removed */                  INC_CC(allctr->cpool.stat.entrance_removed);  		break;  	    } -	    cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev)); -	    if (cpdp == sentinel) -                break; -            loop_state = HAS_SEEN_SENTINEL; +            seen_sentinel = 1; +            continue;  	} +        ASSERT(cpdp != cpool_entrance || seen_sentinel); +  	crr = ErtsContainerStruct(cpdp, Carrier_t, cpool);  	exp = erts_atomic_read_rb(&crr->allctr); @@ -3485,7 +3549,7 @@ cpool_fetch(Allctr_t *allctr, UWord size)              INC_CC(allctr->cpool.stat.fail_shared);  	    return NULL;          } -    }while (loop_state != THE_LAST_ONE); +    }while (cpdp != cpool_entrance);  check_dc_list:      /* Last; check our own pending dealloc carrier list... */ @@ -3664,8 +3728,9 @@ cpool_init_carrier_data(Allctr_t *allctr, Carrier_t *crr)      crr->cpool.orig_allctr = allctr;      crr->cpool.thr_prgr = ERTS_THR_PRGR_INVALID;      erts_atomic_init_nob(&crr->cpool.max_size, 0); -    crr->cpool.blocks = 0; -    crr->cpool.blocks_size = 0; +    sys_memset(&crr->cpool.blocks_size, 0, sizeof(crr->cpool.blocks_size)); +    sys_memset(&crr->cpool.blocks, 0, sizeof(crr->cpool.blocks)); +    crr->cpool.total_blocks_size = 0;      if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))  	crr->cpool.abandon_limit = 0;      else { @@ -3680,14 +3745,14 @@ cpool_init_carrier_data(Allctr_t *allctr, Carrier_t *crr)      crr->cpool.state = ERTS_MBC_IS_HOME;  } -static void -set_new_allctr_abandon_limit(Allctr_t *allctr) + + +static UWord +allctr_abandon_limit(Allctr_t *allctr)  {      UWord limit;      UWord csz; -    allctr->cpool.check_limit_count = ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT; -      csz = allctr->mbcs.curr.norm.mseg.size;      csz += allctr->mbcs.curr.norm.sys_alloc.size; @@ -3697,7 +3762,13 @@ set_new_allctr_abandon_limit(Allctr_t *allctr)      else  	limit = (csz/100)*allctr->cpool.util_limit; -    allctr->cpool.abandon_limit = limit; +    return limit; +} + +static void ERTS_INLINE +set_new_allctr_abandon_limit(Allctr_t *allctr) +{ +    allctr->cpool.abandon_limit = allctr_abandon_limit(allctr);  }  static void @@ -3709,7 +3780,6 @@ abandon_carrier(Allctr_t *allctr, Carrier_t *crr)      unlink_carrier(&allctr->mbc_list, crr);      allctr->remove_mbc(allctr, crr); -    set_new_allctr_abandon_limit(allctr);      cpool_insert(allctr, crr); @@ -3762,7 +3832,8 @@ poolify_my_carrier(Allctr_t *allctr, Carrier_t *crr)  }  static void -cpool_read_stat(Allctr_t *allctr, UWord *nocp, UWord *cszp, UWord *nobp, UWord *bszp) +cpool_read_stat(Allctr_t *allctr, int alloc_no, +                UWord *nocp, UWord *cszp, UWord *nobp, UWord *bszp)  {      int i;      UWord noc = 0, csz = 0, nob = 0, bsz = 0; @@ -3782,10 +3853,10 @@ cpool_read_stat(Allctr_t *allctr, UWord *nocp, UWord *cszp, UWord *nobp, UWord *  			? erts_atomic_read_nob(&allctr->cpool.stat.carriers_size)  			: 0);  	tnob = (UWord) (nobp -			? erts_atomic_read_nob(&allctr->cpool.stat.no_blocks) +			? erts_atomic_read_nob(&allctr->cpool.stat.no_blocks[alloc_no])  			: 0);  	tbsz = (UWord) (bszp -			? erts_atomic_read_nob(&allctr->cpool.stat.blocks_size) +			? erts_atomic_read_nob(&allctr->cpool.stat.blocks_size[alloc_no])  			: 0);  	if (tnoc == noc && tcsz == csz && tnob == nob && tbsz == bsz)  	    break; @@ -4040,6 +4111,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)  #if HAVE_ERTS_MSEG      mbc_final_touch:  #endif +        set_new_allctr_abandon_limit(allctr);  	blk = MBC_TO_FIRST_BLK(allctr, crr); @@ -4258,7 +4330,6 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp)      else {  	ASSERT(IS_MBC_FIRST_FBLK(allctr, blk));  	crr = FIRST_BLK_TO_MBC(allctr, blk); -	crr_sz = CARRIER_SZ(crr);  #ifdef DEBUG  	if (!allctr->stopped) { @@ -4290,15 +4361,7 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp)  	else  	{  	    unlink_carrier(&allctr->mbc_list, crr); -#if HAVE_ERTS_MSEG -	    if (IS_MSEG_CARRIER(crr)) { -		ASSERT(crr_sz % ERTS_SACRR_UNIT_SZ == 0); -		STAT_MSEG_MBC_FREE(allctr, crr_sz); -	    } -	    else -#endif -		STAT_SYS_ALLOC_MBC_FREE(allctr, crr_sz); - +            STAT_MBC_FREE(allctr, crr);              if (allctr->remove_mbc)                  allctr->remove_mbc(allctr, crr);  	} @@ -4312,7 +4375,7 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp)              LTTNG5(carrier_destroy,                  ERTS_ALC_A2AD(allctr->alloc_no),                  allctr->ix, -                crr_sz, +                CARRIER_SZ(crr),                  mbc_stats,                  sbc_stats);          } @@ -4390,6 +4453,8 @@ static struct {      Eterm blocks_size;      Eterm blocks; +    Eterm foreign_blocks; +      Eterm calls;      Eterm sys_alloc;      Eterm sys_free; @@ -4490,6 +4555,7 @@ init_atoms(Allctr_t *allctr)  	AM_INIT(carriers);  	AM_INIT(blocks_size);  	AM_INIT(blocks); +	AM_INIT(foreign_blocks);  	AM_INIT(calls);  	AM_INIT(sys_alloc); @@ -4625,7 +4691,6 @@ sz_info_fix(Allctr_t *allctr,  		ErtsAlcFixList_t *fix = &allctr->fix[ix];  		UWord alloced = fix->type_size * fix->u.cpool.allocated;  		UWord used = fix->type_size * fix->u.cpool.used; -                ErtsAlcType_t n = ERTS_ALC_N_MIN_A_FIXED_SIZE + ix;  		if (print_to_p) {  		    fmtfn_t to = *print_to_p; @@ -4633,14 +4698,14 @@ sz_info_fix(Allctr_t *allctr,  		    erts_print(to,  			       arg,  			       "fix type internal: %s %bpu %bpu\n", -			       (char *) ERTS_ALC_N2TD(n), +			       (char *) ERTS_ALC_T2TD(fix->type),  			       alloced,  			       used);  		}  		if (hpp || szp) {  		    add_3tup(hpp, szp, &res, -			     alloc_type_atoms[n], +			     alloc_type_atoms[ERTS_ALC_T2N(fix->type)],  			     bld_unstable_uint(hpp, szp, alloced),  			     bld_unstable_uint(hpp, szp, used));  		} @@ -4653,7 +4718,6 @@ sz_info_fix(Allctr_t *allctr,  	    ErtsAlcFixList_t *fix = &allctr->fix[ix];  	    UWord alloced = fix->type_size * fix->u.nocpool.allocated;  	    UWord used = fix->type_size*fix->u.nocpool.used; -            ErtsAlcType_t n = ERTS_ALC_N_MIN_A_FIXED_SIZE + ix;  	    if (print_to_p) {  		fmtfn_t to = *print_to_p; @@ -4661,14 +4725,14 @@ sz_info_fix(Allctr_t *allctr,  		erts_print(to,  			   arg,  			   "fix type: %s %bpu %bpu\n", -			   (char *) ERTS_ALC_N2TD(n), +			   (char *) ERTS_ALC_T2TD(fix->type),  			   alloced,  			   used);  	    }  	    if (hpp || szp) {  		add_3tup(hpp, szp, &res, -			 alloc_type_atoms[n], +			 alloc_type_atoms[ERTS_ALC_T2N(fix->type)],  			 bld_unstable_uint(hpp, szp, alloced),  			 bld_unstable_uint(hpp, szp, used));  	    } @@ -4741,9 +4805,9 @@ info_cpool(Allctr_t *allctr,      noc = csz = nob = bsz = ~0;      if (print_to_p || hpp) {  	if (sz_only) -	    cpool_read_stat(allctr, NULL, &csz, NULL, &bsz); +	    cpool_read_stat(allctr, allctr->alloc_no, NULL, &csz, NULL, &bsz);  	else -	    cpool_read_stat(allctr, &noc, &csz, &nob, &bsz); +	    cpool_read_stat(allctr, allctr->alloc_no, &noc, &csz, &nob, &bsz);      }      if (print_to_p) { @@ -4758,6 +4822,10 @@ info_cpool(Allctr_t *allctr,      }      if (hpp || szp) { +        Eterm foreign_blocks; +        int i; + +        foreign_blocks = NIL;  	res = NIL;        if (!sz_only) { @@ -4804,22 +4872,61 @@ info_cpool(Allctr_t *allctr,          add_3tup(hpp, szp, &res, am.entrance_removed,                   bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.entrance_removed)),                   bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.entrance_removed))); +       }  	add_2tup(hpp, szp, &res,  		 am.carriers_size,  		 bld_unstable_uint(hpp, szp, csz)); -      } -	if (!sz_only) -	    add_2tup(hpp, szp, &res, -		     am.carriers, -		     bld_unstable_uint(hpp, szp, noc)); + +        if (!sz_only) { +            add_2tup(hpp, szp, &res, +                     am.carriers, +                     bld_unstable_uint(hpp, szp, noc)); +        } +  	add_2tup(hpp, szp, &res,  		 am.blocks_size,  		 bld_unstable_uint(hpp, szp, bsz)); -	if (!sz_only) + +	if (!sz_only) {  	    add_2tup(hpp, szp, &res,  		     am.blocks,  		     bld_unstable_uint(hpp, szp, nob)); +        } + +        for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) { +            const char *name_str; +            Eterm name, info; + +            if (i == allctr->alloc_no) { +                continue; +            } + +            cpool_read_stat(allctr, i, NULL, NULL, &nob, &bsz); + +            if (bsz == 0 && (nob == 0 || sz_only)) { +                continue; +            } + +            name_str = ERTS_ALC_A2AD(i); +            info = NIL; + +            add_2tup(hpp, szp, &info, +                     am.blocks_size, +                     bld_unstable_uint(hpp, szp, bsz)); + +            if (!sz_only) { +                add_2tup(hpp, szp, &info, +                     am.blocks, +                     bld_unstable_uint(hpp, szp, nob)); +            } + +            name = am_atom_put(name_str, sys_strlen(name_str)); + +            add_2tup(hpp, szp, &foreign_blocks, name, info); +        } + +        add_2tup(hpp, szp, &res, am.foreign_blocks, foreign_blocks);      }      return res; @@ -5455,6 +5562,19 @@ erts_alcu_info(Allctr_t *allctr,      return res;  } +void +erts_alcu_foreign_size(Allctr_t *allctr, ErtsAlcType_t alloc_no, AllctrSize_t *size) +{ +    if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) { +        UWord csz, bsz; +        cpool_read_stat(allctr, alloc_no, NULL, &csz, NULL, &bsz); +        size->carriers = csz; +        size->blocks = bsz; +    } else { +        size->carriers = 0; +        size->blocks = 0; +    } +}  void  erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t *fi, int fisz) @@ -5473,7 +5593,7 @@ erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t *      if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {  	UWord csz, bsz; -	cpool_read_stat(allctr, NULL, &csz, NULL, &bsz); +	cpool_read_stat(allctr, allctr->alloc_no, NULL, &csz, NULL, &bsz);  	size->blocks += bsz;  	size->carriers += csz;      } @@ -5518,6 +5638,11 @@ do_erts_alcu_alloc(ErtsAlcType_t type, Allctr_t *allctr, Uint size)      ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr); +    /* Reject sizes that can't fit into the header word. */ +    if (size > ~BLK_FLG_MASK) { +        return NULL; +    } +  #if ALLOC_ZERO_EQ_NULL      if (!size)  	return NULL; @@ -5684,12 +5809,11 @@ do_erts_alcu_free(ErtsAlcType_t type, Allctr_t *allctr, void *p,      ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);      if (p) { -  	INC_CC(allctr->calls.this_free); -	if (allctr->fix) { +        if (ERTS_ALC_IS_FIX_TYPE(type)) {  	    if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) -		fix_cpool_free(allctr, type, p, busy_pcrr_pp, 1); +		fix_cpool_free(allctr, type, 0, p, busy_pcrr_pp);  	    else  		fix_nocpool_free(allctr, type, p);  	} @@ -5698,7 +5822,7 @@ do_erts_alcu_free(ErtsAlcType_t type, Allctr_t *allctr, void *p,  	    if (IS_SBC_BLK(blk))  		destroy_carrier(allctr, blk, NULL);  	    else -		mbc_free(allctr, p, busy_pcrr_pp); +		mbc_free(allctr, type, p, busy_pcrr_pp);  	}      }  } @@ -5800,6 +5924,11 @@ do_erts_alcu_realloc(ErtsAlcType_t type,  	return res;      } +    /* Reject sizes that can't fit into the header word. */ +    if (size > ~BLK_FLG_MASK) { +        return NULL; +    } +  #if ALLOC_ZERO_EQ_NULL      if (!size) {  	ASSERT(p); @@ -5816,7 +5945,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type,      if (size < allctr->sbc_threshold) {  	if (IS_MBC_BLK(blk)) -	    res = mbc_realloc(allctr, p, size, alcu_flgs, busy_pcrr_pp); +	    res = mbc_realloc(allctr, type, p, size, alcu_flgs, busy_pcrr_pp);  	else {  	    Uint used_sz = SBC_HEADER_SIZE + ABLK_HDR_SZ + size;  	    Uint crr_sz; @@ -5875,7 +6004,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type,  		sys_memcpy((void *) res,  			   (void *) p,  			   MIN(MBC_ABLK_SZ(blk) - ABLK_HDR_SZ, size)); -		mbc_free(allctr, p, busy_pcrr_pp); +		mbc_free(allctr, type, p, busy_pcrr_pp);  	    }  	    else  		res = NULL; @@ -6243,6 +6372,7 @@ int  erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)  {      /* erts_alcu_start assumes that allctr has been zeroed */ +    int i;      if (((UWord)allctr & ERTS_CRR_ALCTR_FLG_MASK) != 0) {          erts_exit(ERTS_ABORT_EXIT, "%s:%d:erts_alcu_start: Alignment error\n", @@ -6266,6 +6396,11 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)      allctr->ix				= init->ix;      allctr->alloc_no			= init->alloc_no; +    allctr->alloc_strat			= init->alloc_strat; + +    ASSERT(allctr->alloc_no >= ERTS_ALC_A_MIN && +           allctr->alloc_no <= ERTS_ALC_A_MAX); +      if (allctr->alloc_no < ERTS_ALC_A_MIN  	|| ERTS_ALC_A_MAX < allctr->alloc_no)  	allctr->alloc_no = ERTS_ALC_A_INVALID; @@ -6318,8 +6453,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)  						       + sizeof(FreeBlkFtr_t));      if (init->tpref) {  	Uint sz = ABLK_HDR_SZ; -	sz += (init->fix ?  -	       sizeof(ErtsAllctrFixDDBlock_t) : sizeof(ErtsAllctrDDBlock_t)); +	sz += sizeof(ErtsAllctrDDBlock_t);  	sz = UNIT_CEILING(sz);  	if (sz > allctr->min_block_size)  	    allctr->min_block_size = sz; @@ -6330,15 +6464,23 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)      allctr->cpool.dc_list.last = NULL;      allctr->cpool.abandon_limit = 0;      allctr->cpool.disable_abandon = 0; -    erts_atomic_init_nob(&allctr->cpool.stat.blocks_size, 0); -    erts_atomic_init_nob(&allctr->cpool.stat.no_blocks, 0); +    for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) { +        erts_atomic_init_nob(&allctr->cpool.stat.blocks_size[i], 0); +        erts_atomic_init_nob(&allctr->cpool.stat.no_blocks[i], 0); +    }      erts_atomic_init_nob(&allctr->cpool.stat.carriers_size, 0);      erts_atomic_init_nob(&allctr->cpool.stat.no_carriers, 0); -    allctr->cpool.check_limit_count = ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT;      if (!init->ts && init->acul && init->acnl) {          allctr->cpool.util_limit = init->acul;          allctr->cpool.in_pool_limit = init->acnl;          allctr->cpool.fblk_min_limit = init->acfml; + +        if (allctr->alloc_strat == ERTS_ALC_S_FIRSTFIT) { +            allctr->cpool.sentinel = &firstfit_carrier_pool.sentinel; +        } +        else if (allctr->alloc_no != ERTS_ALC_A_TEST) { +            ERTS_INTERNAL_ERROR("Impossible carrier migration config."); +        }      }      else {          allctr->cpool.util_limit = 0; @@ -6346,6 +6488,12 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)          allctr->cpool.fblk_min_limit = 0;      } +    /* The invasive tests don't really care whether the pool is enabled or not, +     * so we need to set this unconditionally for this allocator type. */ +    if (allctr->alloc_no == ERTS_ALC_A_TEST) { +        allctr->cpool.sentinel = &test_carrier_pool.sentinel; +    } +      allctr->sbc_threshold = adjust_sbct(allctr, init->sbct);  #if HAVE_ERTS_MSEG @@ -6457,9 +6605,9 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)  	allctr->fix_shrink_scheduled = 0;  	for (i = 0; i < ERTS_ALC_NO_FIXED_SIZES; i++) {  	    allctr->fix[i].type_size = init->fix_type_size[i]; +	    allctr->fix[i].type = ERTS_ALC_N2T(i + ERTS_ALC_N_MIN_A_FIXED_SIZE);  	    allctr->fix[i].list_size = 0;  	    allctr->fix[i].list = NULL; -	    ASSERT(allctr->fix[i].type_size >= sizeof(ErtsAllctrFixDDBlock_t));  	    if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {  		allctr->fix[i].u.cpool.min_list_size = 0;  		allctr->fix[i].u.cpool.shrink_list = 0; @@ -6508,12 +6656,16 @@ erts_alcu_stop(Allctr_t *allctr)  void  erts_alcu_init(AlcUInit_t *init)  { -    int i; -    for (i = 0; i <= ERTS_ALC_A_MAX; i++) { -	ErtsAlcCPoolData_t *sentinel = &carrier_pool[i].sentinel; -	erts_atomic_init_nob(&sentinel->next, (erts_aint_t) sentinel); -	erts_atomic_init_nob(&sentinel->prev, (erts_aint_t) sentinel); -    } +    ErtsAlcCPoolData_t *sentinel; + +    sentinel = &firstfit_carrier_pool.sentinel; +    erts_atomic_init_nob(&sentinel->next, (erts_aint_t) sentinel); +    erts_atomic_init_nob(&sentinel->prev, (erts_aint_t) sentinel); + +    sentinel = &test_carrier_pool.sentinel; +    erts_atomic_init_nob(&sentinel->next, (erts_aint_t) sentinel); +    erts_atomic_init_nob(&sentinel->prev, (erts_aint_t) sentinel); +      ERTS_CT_ASSERT(SBC_BLK_SZ_MASK == MBC_FBLK_SZ_MASK); /* see BLK_SZ */  #if HAVE_ERTS_MSEG      ASSERT(erts_mseg_unit_size() == ERTS_SACRR_UNIT_SZ); @@ -6695,7 +6847,7 @@ static int blockscan_cpool_yielding(blockscan_t *state)  {      ErtsAlcCPoolData_t *sentinel, *cursor; -    sentinel = &carrier_pool[(state->allocator)->alloc_no].sentinel; +    sentinel = (state->allocator)->cpool.sentinel;      cursor = blockscan_restore_cpool_cursor(state);      if (ERTS_PROC_IS_EXITING(state->process)) { @@ -6827,11 +6979,8 @@ static int blockscan_sweep_mbcs(blockscan_t *state)  static int blockscan_sweep_cpool(blockscan_t *state)  {      if (state->current_op != blockscan_sweep_cpool) { -        ErtsAlcCPoolData_t *sentinel; -          SET_CARRIER_HDR(&state->dummy_carrier, 0, SCH_MBC, state->allocator); -        sentinel = &carrier_pool[(state->allocator)->alloc_no].sentinel; -        state->cpool_cursor = sentinel; +        state->cpool_cursor = (state->allocator)->cpool.sentinel;      }      state->current_op = blockscan_sweep_cpool; @@ -7115,11 +7264,14 @@ static int gather_ahist_scan(Allctr_t *allocator,          alcu_atag_t tag;          block = SBC2BLK(allocator, carrier); -        tag = GET_BLK_ATAG(block); -        ASSERT(DBG_IS_VALID_ATAG(allocator, tag)); +        if (BLK_HAS_ATAG(block)) { +            tag = GET_BLK_ATAG(block); -        gather_ahist_update(state, tag, SBC_BLK_SZ(block)); +            ASSERT(DBG_IS_VALID_ATAG(tag)); + +            gather_ahist_update(state, tag, SBC_BLK_SZ(block)); +        }      } else {          UWord scanned_bytes = MBC_HEADER_SIZE(allocator); @@ -7130,10 +7282,10 @@ static int gather_ahist_scan(Allctr_t *allocator,          while (1) {              UWord block_size = MBC_BLK_SZ(block); -            if (IS_ALLOCED_BLK(block)) { +            if (IS_ALLOCED_BLK(block) && BLK_HAS_ATAG(block)) {                  alcu_atag_t tag = GET_BLK_ATAG(block); -                ASSERT(DBG_IS_VALID_ATAG(allocator, tag)); +                ASSERT(DBG_IS_VALID_ATAG(tag));                  gather_ahist_update(state, tag, block_size);              } @@ -7293,8 +7445,6 @@ int erts_alcu_gather_alloc_histograms(Process *p, int allocator_num,                                            sched_id,                                            &allocator)) {          return 0; -    } else if (!allocator->atags) { -        return 0;      }      ensure_atoms_initialized(allocator); diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h index f26ace1534..9ab8589bf3 100644 --- a/erts/emulator/beam/erl_alloc_util.h +++ b/erts/emulator/beam/erl_alloc_util.h @@ -24,6 +24,7 @@  #define ERTS_ALCU_VSN_STR "3.0"  #include "erl_alloc_types.h" +#include "erl_alloc.h"  #define ERL_THREADS_EMU_INTERNAL__  #include "erl_threads.h" @@ -44,6 +45,7 @@ typedef struct {  typedef struct {      char *name_prefix;      ErtsAlcType_t alloc_no; +    ErtsAlcStrat_t alloc_strat;      int force;      int ix;      int ts; @@ -101,6 +103,7 @@ typedef struct {  #define ERTS_DEFAULT_ALLCTR_INIT {                                         \      NULL,                                                                  \      ERTS_ALC_A_INVALID,	/* (number) alloc_no: allocator number           */\ +    ERTS_ALC_S_INVALID,	/* (number) alloc_strat: allocator strategy      */\      0,			/* (bool)   force:  force enabled                */\      0,			/* (number) ix: instance index                   */\      1,			/* (bool)   ts:     thread safe                  */\ @@ -138,6 +141,7 @@ typedef struct {  #define ERTS_DEFAULT_ALLCTR_INIT {                                         \      NULL,                                                                  \      ERTS_ALC_A_INVALID,	/* (number) alloc_no: allocator number           */\ +    ERTS_ALC_S_INVALID,	/* (number) alloc_strat: allocator strategy      */\      0,			/* (bool)   force:  force enabled                */\      0,			/* (number) ix: instance index                   */\      1,			/* (bool)   ts:     thread safe                  */\ @@ -188,6 +192,7 @@ Eterm	erts_alcu_info(Allctr_t *, int, int, fmtfn_t *, void *, Uint **, Uint *);  void	erts_alcu_init(AlcUInit_t *);  void    erts_alcu_current_size(Allctr_t *, AllctrSize_t *,  			       ErtsAlcUFixInfo_t *, int); +void    erts_alcu_foreign_size(Allctr_t *, ErtsAlcType_t, AllctrSize_t *);  void    erts_alcu_check_delayed_dealloc(Allctr_t *, int, int *, ErtsThrPrgrVal *, int *);  erts_aint32_t erts_alcu_fix_alloc_shrink(Allctr_t *, erts_aint32_t); @@ -286,10 +291,18 @@ void erts_alcu_sched_spec_data_init(struct ErtsSchedulerData_ *esdp);  #define UNIT_FLOOR(X)	((X) & UNIT_MASK)  #define UNIT_CEILING(X)	UNIT_FLOOR((X) + INV_UNIT_MASK) -#define FLG_MASK		INV_UNIT_MASK -#define SBC_BLK_SZ_MASK         UNIT_MASK -#define MBC_FBLK_SZ_MASK        UNIT_MASK -#define CARRIER_SZ_MASK         UNIT_MASK +/* We store flags in the bits that no one will ever use. Generally these are + * the bits below the alignment size, but for blocks we also steal the highest + * bit since the header's a size and no one can expect to be able to allocate + * objects that large. */ +#define HIGHEST_WORD_BIT        (((UWord) 1) << (sizeof(UWord) * CHAR_BIT - 1)) + +#define BLK_FLG_MASK            (INV_UNIT_MASK | HIGHEST_WORD_BIT) +#define SBC_BLK_SZ_MASK         (~BLK_FLG_MASK) +#define MBC_FBLK_SZ_MASK        (~BLK_FLG_MASK) + +#define CRR_FLG_MASK        INV_UNIT_MASK +#define CRR_SZ_MASK         UNIT_MASK  #if ERTS_HAVE_MSEG_SUPER_ALIGNED \      || (!HAVE_ERTS_MSEG && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC) @@ -299,9 +312,9 @@ void erts_alcu_sched_spec_data_init(struct ErtsSchedulerData_ *esdp);  #    define ERTS_SUPER_ALIGN_BITS 18  #  endif  #  ifdef ARCH_64  -#    define MBC_ABLK_OFFSET_BITS   24 +#    define MBC_ABLK_OFFSET_BITS   23  #  else -#    define MBC_ABLK_OFFSET_BITS   9 +#    define MBC_ABLK_OFFSET_BITS   8       /* Affects hard limits for sbct and lmbcs documented in erts_alloc.xml */  #  endif  #  define ERTS_SACRR_UNIT_SHIFT		ERTS_SUPER_ALIGN_BITS @@ -322,18 +335,17 @@ void erts_alcu_sched_spec_data_init(struct ErtsSchedulerData_ *esdp);  #if MBC_ABLK_OFFSET_BITS  #  define MBC_ABLK_OFFSET_SHIFT  (sizeof(UWord)*8 - MBC_ABLK_OFFSET_BITS) -#  define MBC_ABLK_OFFSET_MASK   (~((UWord)0) << MBC_ABLK_OFFSET_SHIFT) -#  define MBC_ABLK_SZ_MASK	(~MBC_ABLK_OFFSET_MASK & ~FLG_MASK) +#  define MBC_ABLK_OFFSET_MASK   ((~((UWord)0) << MBC_ABLK_OFFSET_SHIFT) & ~BLK_FLG_MASK) +#  define MBC_ABLK_SZ_MASK	(~MBC_ABLK_OFFSET_MASK & ~BLK_FLG_MASK)  #else -#  define MBC_ABLK_SZ_MASK	(~FLG_MASK) +#  define MBC_ABLK_SZ_MASK	(~BLK_FLG_MASK)  #endif  #define MBC_ABLK_SZ(B) (ASSERT(!is_sbc_blk(B)), (B)->bhdr & MBC_ABLK_SZ_MASK)  #define MBC_FBLK_SZ(B) (ASSERT(!is_sbc_blk(B)), (B)->bhdr & MBC_FBLK_SZ_MASK)  #define SBC_BLK_SZ(B) (ASSERT(is_sbc_blk(B)), (B)->bhdr & SBC_BLK_SZ_MASK) -#define CARRIER_SZ(C) \ -  ((C)->chdr & CARRIER_SZ_MASK) +#define CARRIER_SZ(C) ((C)->chdr & CRR_SZ_MASK)  typedef union {char c[ERTS_ALLOC_ALIGN_BYTES]; long l; double d;} Unit_t; @@ -351,12 +363,20 @@ typedef struct {  #endif  } Block_t; -typedef union ErtsAllctrDDBlock_t_ ErtsAllctrDDBlock_t; +typedef struct ErtsAllctrDDBlock__ { +    union  { +        struct ErtsAllctrDDBlock__ *ptr_next; +        erts_atomic_t atmc_next; +    } u; +    ErtsAlcType_t type; +    Uint32 flags; +} ErtsAllctrDDBlock_t; -union ErtsAllctrDDBlock_t_ { -    erts_atomic_t atmc_next; -    ErtsAllctrDDBlock_t *ptr_next; -}; +/* Deallocation was caused by shrinking a fix-list, so usage statistics has + * already been updated. */ +#define DEALLOC_FLG_FIX_SHRINK    (1 << 0) +/* Deallocation was redirected to another instance. */ +#define DEALLOC_FLG_REDIRECTED    (1 << 1)  typedef struct {      Block_t blk; @@ -365,11 +385,10 @@ typedef struct {  #endif  } ErtsFakeDDBlock_t; - -  #define THIS_FREE_BLK_HDR_FLG 	(((UWord) 1) << 0)  #define PREV_FREE_BLK_HDR_FLG 	(((UWord) 1) << 1)  #define LAST_BLK_HDR_FLG 	(((UWord) 1) << 2) +#define ATAG_BLK_HDR_FLG 	HIGHEST_WORD_BIT  #define SBC_BLK_HDR_FLG /* Special flag combo for (allocated) SBC blocks */\      (THIS_FREE_BLK_HDR_FLG | PREV_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG) @@ -381,9 +400,9 @@ typedef struct {  #define HOMECOMING_MBC_BLK_HDR (THIS_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG)  #define IS_FREE_LAST_MBC_BLK(B) \ -    (((B)->bhdr & FLG_MASK) == (THIS_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG)) +    (((B)->bhdr & BLK_FLG_MASK) == (THIS_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG)) -#define IS_SBC_BLK(B) (((B)->bhdr & FLG_MASK) == SBC_BLK_HDR_FLG) +#define IS_SBC_BLK(B) (((B)->bhdr & SBC_BLK_HDR_FLG) == SBC_BLK_HDR_FLG)  #define IS_MBC_BLK(B) (!IS_SBC_BLK((B)))  #define IS_FREE_BLK(B) (ASSERT(IS_MBC_BLK(B)), \  			(B)->bhdr & THIS_FREE_BLK_HDR_FLG) @@ -394,7 +413,8 @@ typedef struct {  #  define ABLK_TO_MBC(B) \      (ASSERT(IS_MBC_BLK(B) && !IS_FREE_BLK(B)), \       (Carrier_t*)((ERTS_SACRR_UNIT_FLOOR((UWord)(B)) - \ -		  (((B)->bhdr >> MBC_ABLK_OFFSET_SHIFT) << ERTS_SACRR_UNIT_SHIFT)))) +		  ((((B)->bhdr & ~BLK_FLG_MASK) >> MBC_ABLK_OFFSET_SHIFT) \ +                      << ERTS_SACRR_UNIT_SHIFT))))  #  define BLK_TO_MBC(B) (IS_FREE_BLK(B) ? FBLK_TO_MBC(B) : ABLK_TO_MBC(B))  #else  #  define FBLK_TO_MBC(B) ((B)->carrier) @@ -433,8 +453,9 @@ typedef struct {      ErtsThrPrgrVal thr_prgr;      erts_atomic_t max_size;      UWord abandon_limit; -    UWord blocks; -    UWord blocks_size; +    UWord blocks[ERTS_ALC_A_MAX + 1]; +    UWord blocks_size[ERTS_ALC_A_MAX + 1]; +    UWord total_blocks_size;      enum {          ERTS_MBC_IS_HOME,          ERTS_MBC_WAS_POOLED, @@ -452,7 +473,7 @@ struct Carrier_t_ {  };  #define ERTS_ALC_CARRIER_TO_ALLCTR(C) \ -  ((Allctr_t *) (erts_atomic_read_nob(&(C)->allctr) & ~FLG_MASK)) +  ((Allctr_t *) (erts_atomic_read_nob(&(C)->allctr) & ~CRR_FLG_MASK))  typedef struct {      Carrier_t *first; @@ -530,7 +551,6 @@ typedef struct {      } head;  } ErtsAllctrDDQueue_t; -  typedef struct {      size_t type_size;      SWord list_size; @@ -549,6 +569,7 @@ typedef struct {  	    UWord used;  	} cpool;      } u; +    ErtsAlcType_t type;  } ErtsAlcFixList_t;  struct Allctr_t_ { @@ -569,6 +590,9 @@ struct Allctr_t_ {      /* Allocator number */      ErtsAlcType_t	alloc_no; +    /* Allocator strategy */ +    ErtsAlcStrat_t	alloc_strat; +      /* Instance index */      int			ix; @@ -617,6 +641,9 @@ struct Allctr_t_ {  	AOFF_RBTree_t*   pooled_tree;  	CarrierList_t	 dc_list; +        /* the sentinel of the cpool we're attached to */ +        ErtsAlcCPoolData_t  *sentinel; +  	UWord		abandon_limit;  	int		disable_abandon;  	int		check_limit_count; @@ -624,8 +651,8 @@ struct Allctr_t_ {          UWord           in_pool_limit;    /* acnl */          UWord           fblk_min_limit;   /* acmfl */  	struct { -	    erts_atomic_t	blocks_size; -	    erts_atomic_t	no_blocks; +	    erts_atomic_t	blocks_size[ERTS_ALC_A_MAX + 1]; +	    erts_atomic_t	no_blocks[ERTS_ALC_A_MAX + 1];  	    erts_atomic_t	carriers_size;  	    erts_atomic_t	no_carriers;              CallCounter_t       fail_pooled; diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.c b/erts/emulator/beam/erl_ao_firstfit_alloc.c index 3f0ab33597..0e3e4c890a 100644 --- a/erts/emulator/beam/erl_ao_firstfit_alloc.c +++ b/erts/emulator/beam/erl_ao_firstfit_alloc.c @@ -107,9 +107,11 @@ typedef struct AOFF_Carrier_t_ AOFF_Carrier_t;  struct AOFF_Carrier_t_ {      Carrier_t crr; -    AOFF_RBTree_t rbt_node;     /* My node in the carrier tree */ -    AOFF_RBTree_t* root;        /* Root of my block tree */ +    AOFF_RBTree_t rbt_node;        /* My node in the carrier tree */ +    AOFF_RBTree_t* root;           /* Root of my block tree */ +    enum AOFFSortOrder blk_order;  }; +  #define RBT_NODE_TO_MBC(PTR) ErtsContainerStruct((PTR), AOFF_Carrier_t, rbt_node)  /*  @@ -281,15 +283,28 @@ erts_aoffalc_start(AOFFAllctr_t *alc,      sys_memcpy((void *) alc, (void *) &zero.allctr, sizeof(AOFFAllctr_t)); +    if (aoffinit->blk_order == FF_CHAOS) { +        const enum AOFFSortOrder orders[3] = {FF_AOFF, FF_AOBF, FF_BF}; +        int index = init->ix % (sizeof(orders) / sizeof(orders[0])); + +        ASSERT(init->alloc_no == ERTS_ALC_A_TEST); +        aoffinit->blk_order = orders[index]; +    } + +    if (aoffinit->crr_order == FF_CHAOS) { +        const enum AOFFSortOrder orders[2] = {FF_AGEFF, FF_AOFF}; +        int index = init->ix % (sizeof(orders) / sizeof(orders[0])); + +        ASSERT(init->alloc_no == ERTS_ALC_A_TEST); +        aoffinit->crr_order = orders[index]; +    } +      alc->blk_order                      = aoffinit->blk_order;      alc->crr_order                      = aoffinit->crr_order;      allctr->mbc_header_size		= sizeof(AOFF_Carrier_t);      allctr->min_mbc_size		= MIN_MBC_SZ;      allctr->min_mbc_first_free_size	= MIN_MBC_FIRST_FREE_SZ; -    allctr->min_block_size = (aoffinit->blk_order == FF_BF -                              ? (offsetof(AOFF_RBTree_t, u.next) -                                 + ErtsSizeofMember(AOFF_RBTree_t, u.next)) -                              : offsetof(AOFF_RBTree_t, u)); +    allctr->min_block_size              = sizeof(AOFF_RBTree_t);      allctr->vsn_str			= ERTS_ALC_AOFF_ALLOC_VSN_STR; @@ -512,14 +527,15 @@ tree_insert_fixup(AOFF_RBTree_t** root, AOFF_RBTree_t *blk)  static void  aoff_unlink_free_block(Allctr_t *allctr, Block_t *blk)  { -    AOFFAllctr_t* alc = (AOFFAllctr_t*)allctr;      AOFF_RBTree_t* del = (AOFF_RBTree_t*)blk;      AOFF_Carrier_t *crr = (AOFF_Carrier_t*) FBLK_TO_MBC(&del->hdr); +    (void)allctr; +      ASSERT(crr->rbt_node.hdr.bhdr == crr->root->max_sz); -    HARD_CHECK_TREE(&crr->crr, alc->blk_order, crr->root, 0); +    HARD_CHECK_TREE(&crr->crr, crr->blk_order, crr->root, 0); -    if (alc->blk_order == FF_BF) { +    if (crr->blk_order == FF_BF) {  	ASSERT(del->flags & IS_BF_FLG);  	if (IS_LIST_ELEM(del)) {  	    /* Remove from list */ @@ -540,14 +556,14 @@ aoff_unlink_free_block(Allctr_t *allctr, Block_t *blk)  	    replace(&crr->root, (AOFF_RBTree_t*)del, LIST_NEXT(del)); -	    HARD_CHECK_TREE(&crr->crr, alc->blk_order, crr->root, 0); +	    HARD_CHECK_TREE(&crr->crr, crr->blk_order, crr->root, 0);  	    return;  	}      }      rbt_delete(&crr->root, (AOFF_RBTree_t*)del); -    HARD_CHECK_TREE(&crr->crr, alc->blk_order, crr->root, 0); +    HARD_CHECK_TREE(&crr->crr, crr->blk_order, crr->root, 0);      /* Update the carrier tree with a potentially new (lower) max_sz       */     @@ -737,17 +753,18 @@ rbt_delete(AOFF_RBTree_t** root, AOFF_RBTree_t* del)  static void  aoff_link_free_block(Allctr_t *allctr, Block_t *block)  { -    AOFFAllctr_t* alc = (AOFFAllctr_t*) allctr;      AOFF_RBTree_t *blk = (AOFF_RBTree_t *) block;      AOFF_RBTree_t *crr_node;      AOFF_Carrier_t *blk_crr = (AOFF_Carrier_t*) FBLK_TO_MBC(block);      Uint blk_sz = AOFF_BLK_SZ(blk); +    (void)allctr; +      ASSERT(allctr == ERTS_ALC_CARRIER_TO_ALLCTR(&blk_crr->crr));      ASSERT(blk_crr->rbt_node.hdr.bhdr == (blk_crr->root ? blk_crr->root->max_sz : 0)); -    HARD_CHECK_TREE(&blk_crr->crr, alc->blk_order, blk_crr->root, 0); +    HARD_CHECK_TREE(&blk_crr->crr, blk_crr->blk_order, blk_crr->root, 0); -    rbt_insert(alc->blk_order, &blk_crr->root, blk); +    rbt_insert(blk_crr->blk_order, &blk_crr->root, blk);      /*       * Update carrier tree with a potentially new (larger) max_sz @@ -891,7 +908,7 @@ aoff_get_free_block(Allctr_t *allctr, Uint size,      /* Get block within carrier tree       */  #ifdef HARD_DEBUG -    dbg_blk = HARD_CHECK_TREE(&crr->crr, alc->blk_order, crr->root, size); +    dbg_blk = HARD_CHECK_TREE(&crr->crr, crr->blk_order, crr->root, size);  #endif      blk = rbt_search(crr->root, size); @@ -904,7 +921,7 @@ aoff_get_free_block(Allctr_t *allctr, Uint size,      if (!blk)  	return NULL; -    if (cand_blk && cmp_cand_blk(alc->blk_order, cand_blk, blk) < 0) { +    if (cand_blk && cmp_cand_blk(crr->blk_order, cand_blk, blk) < 0) {  	return NULL; /* cand_blk was better */      } @@ -927,21 +944,28 @@ static void aoff_creating_mbc(Allctr_t *allctr, Carrier_t *carrier)      AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr;      AOFF_Carrier_t *crr = (AOFF_Carrier_t*) carrier;      AOFF_RBTree_t **root = &alc->mbc_root; +    Sint64 bt = get_birth_time();      HARD_CHECK_TREE(NULL, alc->crr_order, *root, 0);      crr->rbt_node.hdr.bhdr = 0; -    if (alc->crr_order == FF_AGEFF || IS_DEBUG) { -        Sint64 bt = get_birth_time(); -        crr->rbt_node.u.birth_time = bt; -        crr->crr.cpool.pooled.u.birth_time = bt; -    } + +    /* While birth time is only used for FF_AGEFF, we have to set it for all +     * types as we can be migrated to an instance that uses it and we don't +     * want to mess its order up. */ +    crr->rbt_node.u.birth_time = bt; +    crr->crr.cpool.pooled.u.birth_time = bt; +      rbt_insert(alc->crr_order, root, &crr->rbt_node);      /* aoff_link_free_block will add free block later */      crr->root = NULL;      HARD_CHECK_TREE(NULL, alc->crr_order, *root, 0); + +    /* When a carrier has been migrated, its block order may differ from that +     * of the allocator it's been migrated to. */ +    crr->blk_order = alc->blk_order;  }  #define IS_CRR_IN_TREE(CRR,ROOT) \ diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.h b/erts/emulator/beam/erl_ao_firstfit_alloc.h index 68df9e0a49..9c9b98da86 100644 --- a/erts/emulator/beam/erl_ao_firstfit_alloc.h +++ b/erts/emulator/beam/erl_ao_firstfit_alloc.h @@ -32,7 +32,12 @@ enum AOFFSortOrder {      FF_AGEFF = 0,    /* carrier trees only */      FF_AOFF  = 1,      FF_AOBF  = 2,    /* block trees only */ -    FF_BF    = 3     /* block trees only */ +    FF_BF    = 3,    /* block trees only */ + +    FF_CHAOS = -1    /* A test-specific sort order that picks any of the above +                      * after instance id. Used to test that carriers created +                      * under one order will work fine after being migrated +                      * to another. */  };  typedef struct { diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c index a2610bf2e1..ff919082c3 100644 --- a/erts/emulator/beam/erl_bif_binary.c +++ b/erts/emulator/beam/erl_bif_binary.c @@ -208,8 +208,8 @@ typedef struct _ac_trie {  typedef struct _bm_data {      byte *x;      Sint len; +    Sint *badshift;      Sint *goodshift; -    Sint badshift[ALPHABET_SIZE];  } BMData;  typedef struct _ac_find_all_state { @@ -319,16 +319,104 @@ static void dump_ac_node(ACNode *node, int indent, int ch);   * The needed size of binary data for a search structure - given the   * accumulated string lengths.   */ -#define BM_SIZE(StrLen) 	      /* StrLen: length of searchstring */ \ -((MYALIGN(sizeof(Sint) * (StrLen))) + /* goodshift array */                \ - MYALIGN(StrLen) +                    /* searchstring saved */             \ - (MYALIGN(sizeof(BMData))))           /* Structure */ +#define BM_SIZE_SINGLE()    /* Single byte search string */ \ +(MYALIGN(1) +               /* searchstring saved */        \ + (MYALIGN(sizeof(BMData)))) /* Structure */ + +#define BM_SIZE_MULTI(StrLen) 	           /* StrLen: length of searchstring */ \ +((MYALIGN(sizeof(Uint) * (StrLen))) +      /* goodshift array */                \ + (MYALIGN(sizeof(Uint) * ALPHABET_SIZE)) + /* badshift array */                 \ + MYALIGN(StrLen) +                         /* searchstring saved */             \ + (MYALIGN(sizeof(BMData))))                /* Structure */  #define AC_SIZE(StrLens)       /* StrLens: sum of all searchstring lengths */ \  ((MYALIGN(sizeof(ACNode)) *                                                   \  ((StrLens)+1)) + 	       /* The actual nodes (including rootnode) */    \   MYALIGN(sizeof(ACTrie)))      /* Structure */ +/* + * Boyer Moore - most obviously implemented more or less exactly as + * Christian Charras and Thierry Lecroq describe it in "Handbook of + * Exact String-Matching Algorithms" + * http://www-igm.univ-mlv.fr/~lecroq/string/ + */ + +/* + * Call this to compute badshifts array + */ +static void compute_badshifts(BMData *bmd) +{ +    Sint i; +    Sint m = bmd->len; + +    for (i = 0; i < ALPHABET_SIZE; ++i) { +	bmd->badshift[i] = m; +    } +    for (i = 0; i < m - 1; ++i) { +	bmd->badshift[bmd->x[i]] = m - i - 1; +    } +} + +/* Helper for "compute_goodshifts" */ +static void compute_suffixes(byte *x, Sint m, Sint *suffixes) +{ +    int f,g,i; + +    suffixes[m - 1] = m; + +    f = 0; /* To avoid use before set warning */ + +    g = m - 1; + +    for (i = m - 2; i >= 0; --i) { +	if (i > g && suffixes[i + m - 1 - f] < i - g) { +	    suffixes[i] = suffixes[i + m - 1 - f]; +	} else { +	    if (i < g) { +		g = i; +	    } +	    f = i; +	    while ( g >= 0 && x[g] == x[g + m - 1 - f] ) { +		--g; +	    } +	    suffixes[i] = f - g; +	} +    } +} + +/* + * Call this to compute goodshift array + */ +static void compute_goodshifts(BMData *bmd) +{ +    Sint m = bmd->len; +    byte *x = bmd->x; +    Sint i, j; +    Sint *suffixes = erts_alloc(ERTS_ALC_T_TMP, m * sizeof(Sint)); + +    compute_suffixes(x, m, suffixes); + +    for (i = 0; i < m; ++i) { +	bmd->goodshift[i] = m; +    } + +    j = 0; + +    for (i = m - 1; i >= -1; --i) { +	if (i == -1 || suffixes[i] == i + 1) { +	    while (j < m - 1 - i) { +		if (bmd->goodshift[j] == m) { +		    bmd->goodshift[j] = m - 1 - i; +		} +		++j; +	    } +	} +    } +    for (i = 0; i <= m - 2; ++i) { +	bmd->goodshift[m - 1 - suffixes[i]] = m - 1 - i; +    } +    erts_free(ERTS_ALC_T_TMP, suffixes); +}  /*   * Callback for the magic binary @@ -377,11 +465,19 @@ static ACTrie *create_acdata(MyAllocator *my, Uint len,  /*   * The same initialization of allocator and basic data for Boyer-Moore. + * For single byte, we don't use goodshift and badshift, only memchr.   */  static BMData *create_bmdata(MyAllocator *my, byte *x, Uint len,  			     Binary **the_bin /* out */)  { -    Uint datasize = BM_SIZE(len); +    Uint datasize; + +    if(len > 1) { +	datasize = BM_SIZE_MULTI(len); +    } else { +	datasize = BM_SIZE_SINGLE(); +    } +      BMData *bmd;      Binary *mb = erts_create_magic_binary(datasize,cleanup_my_data_bm);      byte *data = ERTS_MAGIC_BIN_DATA(mb); @@ -390,7 +486,14 @@ static BMData *create_bmdata(MyAllocator *my, byte *x, Uint len,      bmd->x = my_alloc(my,len);      sys_memcpy(bmd->x,x,len);      bmd->len = len; -    bmd->goodshift = my_alloc(my,sizeof(Uint) * len); + +    if(len > 1) { +	bmd->goodshift = my_alloc(my, sizeof(Uint) * len); +	bmd->badshift = my_alloc(my, sizeof(Uint) * ALPHABET_SIZE); +	compute_badshifts(bmd); +	compute_goodshifts(bmd); +    } +      *the_bin = mb;      return bmd;  } @@ -711,91 +814,8 @@ static BFReturn ac_find_all_non_overlapping(BinaryFindContext *ctx, byte *haysta      return (m == 0) ? BF_NOT_FOUND : BF_OK;  } -/* - * Boyer Moore - most obviously implemented more or less exactly as - * Christian Charras and Thierry Lecroq describe it in "Handbook of - * Exact String-Matching Algorithms" - * http://www-igm.univ-mlv.fr/~lecroq/string/ - */ - -/* - * Call this to compute badshifts array - */ -static void compute_badshifts(BMData *bmd) -{ -    Sint i; -    Sint m = bmd->len; - -    for (i = 0; i < ALPHABET_SIZE; ++i) { -	bmd->badshift[i] = m; -    } -    for (i = 0; i < m - 1; ++i) { -	bmd->badshift[bmd->x[i]] = m - i - 1; -    } -} - -/* Helper for "compute_goodshifts" */ -static void compute_suffixes(byte *x, Sint m, Sint *suffixes) -{ -    int f,g,i; - -    suffixes[m - 1] = m; - -    f = 0; /* To avoid use before set warning */ - -    g = m - 1; - -    for (i = m - 2; i >= 0; --i) { -	if (i > g && suffixes[i + m - 1 - f] < i - g) { -	    suffixes[i] = suffixes[i + m - 1 - f]; -	} else { -	    if (i < g) { -		g = i; -	    } -	    f = i; -	    while ( g >= 0 && x[g] == x[g + m - 1 - f] ) { -		--g; -	    } -	    suffixes[i] = f - g; -	} -    } -} - -/* - * Call this to compute goodshift array - */ -static void compute_goodshifts(BMData *bmd) -{ -    Sint m = bmd->len; -    byte *x = bmd->x; -    Sint i, j; -    Sint *suffixes = erts_alloc(ERTS_ALC_T_TMP, m * sizeof(Sint)); - -    compute_suffixes(x, m, suffixes); - -    for (i = 0; i < m; ++i) { -	bmd->goodshift[i] = m; -    } - -    j = 0; - -    for (i = m - 1; i >= -1; --i) { -	if (i == -1 || suffixes[i] == i + 1) { -	    while (j < m - 1 - i) { -		if (bmd->goodshift[j] == m) { -		    bmd->goodshift[j] = m - 1 - i; -		} -		++j; -	    } -	} -    } -    for (i = 0; i <= m - 2; ++i) { -	bmd->goodshift[m - 1 - suffixes[i]] = m - 1 - i; -    } -    erts_free(ERTS_ALC_T_TMP, suffixes); -} -  #define BM_LOOP_FACTOR 10 /* Should we have a higher value? */ +#define MC_LOOP_FACTOR 8  static void bm_init_find_first_match(BinaryFindContext *ctx)  { @@ -819,13 +839,38 @@ static BFReturn bm_find_first_match(BinaryFindContext *ctx, byte *haystack)      Sint i;      Sint j = state->pos;      register Uint reds = *reductions; +    byte *pos_pointer; +    Sint needle_last = blen - 1; +    Sint mem_read = len - needle_last - j; -    while (j <= len - blen) { +    if (mem_read <= 0) { +	return BF_NOT_FOUND; +    } +    mem_read = MIN(mem_read, reds * MC_LOOP_FACTOR); +    ASSERT(mem_read > 0); + +    pos_pointer = memchr(&haystack[j + needle_last], needle[needle_last], mem_read); +    if (pos_pointer == NULL) { +	reds -= mem_read / MC_LOOP_FACTOR; +	j += mem_read; +    } else { +	reds -= (pos_pointer - &haystack[j]) / MC_LOOP_FACTOR; +	j = pos_pointer - haystack - needle_last; +    } + +    // Ensure we have at least one reduction before entering the loop +    ++reds; + +    for(;;) { +	if (j > len - blen) { +	    *reductions = reds; +	    return BF_NOT_FOUND; +	}  	if (--reds == 0) {  	    state->pos = j;  	    return BF_RESTART;  	} -	for (i = blen - 1; i >= 0 && needle[i] == haystack[i + j]; --i) +	for (i = needle_last; i >= 0 && needle[i] == haystack[i + j]; --i)  	    ;  	if (i < 0) { /* found */  	    *reductions = reds; @@ -835,8 +880,6 @@ static BFReturn bm_find_first_match(BinaryFindContext *ctx, byte *haystack)  	}  	j += MAX(gs[i],bs[haystack[i+j]] - blen + 1 + i);      } -    *reductions = reds; -    return BF_NOT_FOUND;  }  static void bm_init_find_all(BinaryFindContext *ctx) @@ -875,14 +918,38 @@ static BFReturn bm_find_all_non_overlapping(BinaryFindContext *ctx, byte *haysta      Sint *gs = bmd->goodshift;      Sint *bs = bmd->badshift;      byte *needle = bmd->x; -    Sint i; +    Sint i = -1; /* Use memchr on start and on every match */      Sint j = state->pos;      Uint m = state->m;      Uint allocated = state->allocated;      FindallData *out = state->out;      register Uint reds = *reductions; +    byte *pos_pointer; +    Sint needle_last = blen - 1; +    Sint mem_read; -    while (j <= len - blen) { +    for(;;) { +	if (i < 0) { +	    mem_read = len - needle_last - j; +	    if(mem_read <= 0) { +		goto done; +	    } +	    mem_read = MIN(mem_read, reds * MC_LOOP_FACTOR); +	    ASSERT(mem_read > 0); +	    pos_pointer = memchr(&haystack[j + needle_last], needle[needle_last], mem_read); +	    if (pos_pointer == NULL) { +		reds -= mem_read / MC_LOOP_FACTOR; +		j += mem_read; +	    } else { +		reds -= (pos_pointer - &haystack[j]) / MC_LOOP_FACTOR; +		j = pos_pointer - haystack - needle_last; +	    } +	    // Ensure we have at least one reduction when resuming the loop +	    ++reds; +	} +	if (j > len - blen) { +	    goto done; +	}  	if (--reds == 0) {  	    state->pos = j;  	    state->m = m; @@ -890,7 +957,7 @@ static BFReturn bm_find_all_non_overlapping(BinaryFindContext *ctx, byte *haysta  	    state->out = out;  	    return BF_RESTART;  	} -	for (i = blen - 1; i >= 0 && needle[i] == haystack[i + j]; --i) +	for (i = needle_last; i >= 0 && needle[i] == haystack[i + j]; --i)  	    ;  	if (i < 0) { /* found */  	    if (m >= allocated) { @@ -912,6 +979,7 @@ static BFReturn bm_find_all_non_overlapping(BinaryFindContext *ctx, byte *haysta  	    j += MAX(gs[i],bs[haystack[i+j]] - blen + 1 + i);  	}      } + done:      state->m = m;      state->out = out;      *reductions = reds; @@ -931,6 +999,7 @@ static int do_binary_match_compile(Eterm argument, Eterm *tag, Binary **binp)      Eterm t, b, comp_term = NIL;      Uint characters;      Uint words; +    Uint size;      characters = 0;      words = 0; @@ -946,11 +1015,12 @@ static int do_binary_match_compile(Eterm argument, Eterm *tag, Binary **binp)  	    if (binary_bitsize(b) != 0) {  		goto badarg;  	    } -	    if (binary_size(b) == 0) { +	    size = binary_size(b); +	    if (size == 0) {  		goto badarg;  	    }  	    ++words; -	    characters += binary_size(b); +	    characters += size;  	}  	if (is_not_nil(t)) {  	    goto badarg; @@ -987,8 +1057,6 @@ static int do_binary_match_compile(Eterm argument, Eterm *tag, Binary **binp)  	    bytes = erts_get_aligned_binary_bytes(comp_term, &temp_alloc);  	}  	bmd = create_bmdata(&my, bytes, characters, &bin); -	compute_badshifts(bmd); -	compute_goodshifts(bmd);  	erts_free_aligned_binary_bytes(temp_alloc);  	CHECK_ALLOCATOR(my);  	*tag = am_bm; @@ -3012,17 +3080,19 @@ static void dump_bm_data(BMData *bm)  	}      }      erts_printf(">>\n"); -    erts_printf("GoodShift array:\n"); -    for (i = 0; i < bm->len; ++i) { -	erts_printf("GoodShift[%d]: %ld\n", i, bm->goodshift[i]); -    } -    erts_printf("BadShift array:\n"); -    j = 0; -    for (i = 0; i < ALPHABET_SIZE; i += j) { -	for (j = 0; i + j < ALPHABET_SIZE && j < 6; ++j) { -	    erts_printf("BS[%03d]:%02ld, ", i+j, bm->badshift[i+j]); +    if(bm->len > 1) { +	erts_printf("GoodShift array:\n"); +	for (i = 0; i < bm->len; ++i) { +	    erts_printf("GoodShift[%d]: %ld\n", i, bm->goodshift[i]); +	} +	erts_printf("BadShift array:\n"); +	j = 0; +	for (i = 0; i < ALPHABET_SIZE; i += j) { +	    for (j = 0; i + j < ALPHABET_SIZE && j < 6; ++j) { +		erts_printf("BS[%03d]:%02ld, ", i+j, bm->badshift[i+j]); +	    } +	    erts_printf("\n");  	} -	erts_printf("\n");      }  } diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c index 4cda0948a0..639aee29dc 100644 --- a/erts/emulator/beam/erl_bif_ddll.c +++ b/erts/emulator/beam/erl_bif_ddll.c @@ -829,7 +829,7 @@ BIF_RETTYPE erl_ddll_format_error_int_1(BIF_ALIST_1)  	    "cannot be loaded/unloaded";  	break;      case am_permanent: -	errstring = "DDLL driver is permanent an can not be unloaded/loaded"; +	errstring = "DDLL driver is permanent an cannot be unloaded/loaded";  	break;      case am_not_loaded:  	errstring = "DDLL driver is not loaded"; diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c index 788718ab09..2eb874b005 100644 --- a/erts/emulator/beam/erl_db_tree.c +++ b/erts/emulator/beam/erl_db_tree.c @@ -3126,7 +3126,7 @@ static int partly_bound_can_match_lesser(Eterm partly_bound_1,      if (ret)  	erts_fprintf(stderr," can match lesser than ");      else -	erts_fprintf(stderr," can not match lesser than "); +	erts_fprintf(stderr," cannot match lesser than ");      erts_fprintf(stderr,"%T\n",partly_bound_2);  #endif      return ret; @@ -3144,7 +3144,7 @@ static int partly_bound_can_match_greater(Eterm partly_bound_1,      if (ret)  	erts_fprintf(stderr," can match greater than ");      else -	erts_fprintf(stderr," can not match greater than "); +	erts_fprintf(stderr," cannot match greater than ");      erts_fprintf(stderr,"%T\n",partly_bound_2);  #endif      return ret; diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c index 57c6c10c7f..5da7b43b9e 100644 --- a/erts/emulator/beam/erl_init.c +++ b/erts/emulator/beam/erl_init.c @@ -128,7 +128,7 @@ const Eterm etp_hole_marker = 0;  static int modified_sched_thread_suggested_stack_size = 0; -Eterm erts_init_process_id; +Eterm erts_init_process_id = ERTS_INVALID_PID;  /*   * Note about VxWorks: All variables must be initialized by executable code, @@ -2258,6 +2258,7 @@ erl_start(int argc, char **argv)      erts_init_process_id = erl_first_process_otp("otp_ring0", NULL, 0,                                                   boot_argc, boot_argv); +	ASSERT(erts_init_process_id != ERTS_INVALID_PID);      {  	/* diff --git a/erts/emulator/beam/erl_lock_count.h b/erts/emulator/beam/erl_lock_count.h index 89d95a73cf..0d47b16e0b 100644 --- a/erts/emulator/beam/erl_lock_count.h +++ b/erts/emulator/beam/erl_lock_count.h @@ -532,7 +532,7 @@ ERTS_GLB_INLINE  void lcnt_dec_lock_state__(ethr_atomic_t *l_state) {      ethr_sint_t state = ethr_atomic_dec_read_acqb(l_state); -    /* We can not assume that state is >= -1 here; unlock and unacquire might +    /* We cannot assume that state is >= -1 here; unlock and unacquire might       * bring it below -1 and race to increment it back. */      if(state < 0) { diff --git a/erts/emulator/beam/erl_map.c b/erts/emulator/beam/erl_map.c index cba17d3e6a..3d6c9eb43f 100644 --- a/erts/emulator/beam/erl_map.c +++ b/erts/emulator/beam/erl_map.c @@ -1505,25 +1505,6 @@ int hashmap_key_hash_cmp(Eterm* ap, Eterm* bp)      return ap ? -1 : 1;  } -/* maps:new/0 */ - -BIF_RETTYPE maps_new_0(BIF_ALIST_0) { -    Eterm* hp; -    Eterm tup; -    flatmap_t *mp; - -    hp    = HAlloc(BIF_P, (MAP_HEADER_FLATMAP_SZ + 1)); -    tup   = make_tuple(hp); -    *hp++ = make_arityval(0); - -    mp    = (flatmap_t*)hp; -    mp->thing_word = MAP_HEADER_FLATMAP; -    mp->size = 0; -    mp->keys = tup; - -    BIF_RET(make_flatmap(mp)); -} -  /* maps:put/3 */  BIF_RETTYPE maps_put_3(BIF_ALIST_3) { @@ -1707,11 +1688,16 @@ int erts_maps_update(Process *p, Eterm key, Eterm value, Eterm map, Eterm *res)  	return 0;  found_key: -	*hp++ = value; -	vs++; -	if (++i < n) -	    sys_memcpy(hp, vs, (n - i)*sizeof(Eterm)); -	*res = make_flatmap(shp); +        if(*vs == value) { +            HRelease(p, shp + MAP_HEADER_FLATMAP_SZ + n, shp); +            *res = map; +        } else { +	    *hp++ = value; +	    vs++; +	    if (++i < n) +	       sys_memcpy(hp, vs, (n - i)*sizeof(Eterm)); +	    *res = make_flatmap(shp); +        }  	return 1;      } @@ -1767,9 +1753,7 @@ Eterm erts_maps_put(Process *p, Eterm key, Eterm value, Eterm map) {  	if (is_immed(key)) {  	    for( i = 0; i < n; i ++) {  		if (ks[i] == key) { -		    *hp++ = value; -		    vs++; -		    c = 1; +                    goto found_key;  		} else {  		    *hp++ = *vs++;  		} @@ -1777,18 +1761,13 @@ Eterm erts_maps_put(Process *p, Eterm key, Eterm value, Eterm map) {  	} else {  	    for( i = 0; i < n; i ++) {  		if (EQ(ks[i], key)) { -		    *hp++ = value; -		    vs++; -		    c = 1; +		    goto found_key;  		} else {  		    *hp++ = *vs++;  		}  	    }  	} -	if (c) -	    return res; -  	/* the map will grow */  	if (n >= MAP_SMALL_MAP_LIMIT) { @@ -1843,6 +1822,18 @@ Eterm erts_maps_put(Process *p, Eterm key, Eterm value, Eterm map) {  	 */  	*shp = make_pos_bignum_header(0);  	return res; + +found_key: +        if(*vs == value) { +            HRelease(p, shp + MAP_HEADER_FLATMAP_SZ + n, shp); +            return map; +        } else { +            *hp++ = value; +            vs++; +            if (++i < n) +               sys_memcpy(hp, vs, (n - i)*sizeof(Eterm)); +            return res; +        }      }      ASSERT(is_hashmap(map)); diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c index ee6e6085b6..7339aa8874 100644 --- a/erts/emulator/beam/erl_nif.c +++ b/erts/emulator/beam/erl_nif.c @@ -1040,7 +1040,7 @@ ERL_NIF_TERM enif_make_copy(ErlNifEnv* dst_env, ERL_NIF_TERM src_term)      Eterm* hp;      /*       * No preserved sharing allowed as long as literals are also preserved. -     * Process independent environment can not be reached by purge. +     * Process independent environment cannot be reached by purge.       */      sz = size_object(src_term);      hp = alloc_heap(dst_env, sz); diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c index 243db4c734..706530023b 100644 --- a/erts/emulator/beam/erl_process_dump.c +++ b/erts/emulator/beam/erl_process_dump.c @@ -963,12 +963,16 @@ dump_module_literals(fmtfn_t to, void *to_arg, ErtsLiteralArea* lit_area)                      }                      erts_putc(to, to_arg, '\n');                  } -            } else if (is_export_header(w)) { +            } else if (is_export_header(w) || is_fun_header(w)) {                  dump_externally(to, to_arg, term);                  erts_putc(to, to_arg, '\n');              }              size = 1 + header_arity(w);              switch (w & _HEADER_SUBTAG_MASK) { +            case FUN_SUBTAG: +                ASSERT(((ErlFunThing*)(htop))->num_free == 0); +                size += 1; +                break;              case MAP_SUBTAG:                  if (is_flatmap_header(w)) {                      size += 1 + flatmap_get_size(htop); diff --git a/erts/emulator/beam/instrs.tab b/erts/emulator/beam/instrs.tab index 42c1168f85..da1dd3dc45 100644 --- a/erts/emulator/beam/instrs.tab +++ b/erts/emulator/beam/instrs.tab @@ -559,17 +559,19 @@ update_list(Hd, Dst) {      HTOP += 2;  } -i_put_tuple := i_put_tuple.make.fill; - -i_put_tuple.make(Dst) { -    $Dst = make_tuple(HTOP); -} - -i_put_tuple.fill(Arity) { +put_tuple2(Dst, Arity) {      Eterm* hp = HTOP;      Eterm arity = $Arity; +    /* +     * If operands are not packed (in the 32-bit VM), +     * is is not safe to use $Dst directly after I +     * has been updated. +     */ +    Eterm* dst_ptr = &($Dst); +      //| -no_next +    ASSERT(arity != 0);      *hp++ = make_arityval(arity);      I = $NEXT_INSTRUCTION;      do { @@ -586,6 +588,7 @@ i_put_tuple.fill(Arity) {              break;          }      } while (--arity != 0); +    *dst_ptr = make_tuple(HTOP);      HTOP = hp;      ASSERT(VALID_INSTR(* (Eterm *)I));      Goto(*I); diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab index e76d896ffc..d859c4bb24 100644 --- a/erts/emulator/beam/ops.tab +++ b/erts/emulator/beam/ops.tab @@ -483,9 +483,16 @@ is_eq f? s s  is_ne f? s s  # -# Putting things. +# Putting tuples. +# +# Code compiled with OTP 22 and later uses put_tuple2 to +# to construct a tuple. +# +# Code compiled before OTP 22 uses put_tuple + one put instruction +# per element. Translate to put_tuple2.  # +i_put_tuple/2  put_tuple Arity Dst => i_put_tuple Dst u  i_put_tuple Dst Arity Puts=* | put S1 | put S2 | \ @@ -495,11 +502,13 @@ i_put_tuple Dst Arity Puts=* | put S1 | put S2 | \  i_put_tuple Dst Arity Puts=* | put S => \  	    tuple_append_put(Arity, Dst, Puts, S) -i_put_tuple/2 +i_put_tuple Dst Arity Puts=* => put_tuple2 Dst Arity Puts -i_put_tuple xy I +put_tuple2 xy I  # +# Putting lists. +#  # The instruction "put_list Const [] Dst" were generated in rare  # circumstances up to and including OTP 18. Starting with OTP 19,  # AFAIK, it should never be generated. @@ -1097,23 +1106,29 @@ i_bs_match_string x f W W  bs_get_integer2 Fail=f Ms=x Live=u Sz=sq Unit=u Flags=u Dst=d => \  			gen_get_integer2(Fail, Ms, Live, Sz, Unit, Flags, Dst) +i_bs_get_integer_small_imm Ms Bits Fail Flags Y=y => \ +   i_bs_get_integer_small_imm Ms Bits Fail Flags x | move x Y + +i_bs_get_integer_imm Ms Bits Live Fail Flags Y=y => \ +   i_bs_get_integer_imm Ms Bits Live Fail Flags x | move x Y +  i_bs_get_integer_small_imm x W f? t x  i_bs_get_integer_imm x W t f? t x -i_bs_get_integer f? t t x s x -i_bs_get_integer_8 x f? x -i_bs_get_integer_16 x f? x +i_bs_get_integer f? t t x s xy +i_bs_get_integer_8 x f? xy +i_bs_get_integer_16 x f? xy  %if ARCH_64 -i_bs_get_integer_32 x f? x +i_bs_get_integer_32 x f? xy  %endif  # Fetching binaries from binaries.  bs_get_binary2 Fail=f Ms=x Live=u Sz=sq Unit=u Flags=u Dst=d => \  			gen_get_binary2(Fail, Ms, Live, Sz, Unit, Flags, Dst) -i_bs_get_binary_imm2 f? x t W t x -i_bs_get_binary2 f x t? s t x -i_bs_get_binary_all2 f? x t t x +i_bs_get_binary_imm2 f? x t W t xy +i_bs_get_binary2 f x t? s t xy +i_bs_get_binary_all2 f? x t t xy  i_bs_get_binary_all_reuse x f? t  # Fetching float from binaries. @@ -1122,7 +1137,7 @@ bs_get_float2 Fail=f Ms=x Live=u Sz=s Unit=u Flags=u Dst=d => \  bs_get_float2 Fail=f Ms=x Live=u Sz=q Unit=u Flags=u Dst=d => jump Fail -i_bs_get_float2 f? x t s t x +i_bs_get_float2 f? x t s t xy  # Miscellanous @@ -1156,14 +1171,14 @@ bs_context_to_binary x  # Utf8/utf16/utf32 support. (R12B-5)  #  bs_get_utf8 Fail=f Ms=x u u Dst=d => i_bs_get_utf8 Ms Fail Dst -i_bs_get_utf8 x f? x +i_bs_get_utf8 x f? xy  bs_skip_utf8 Fail=f Ms=x u u => i_bs_get_utf8 Ms Fail x  bs_get_utf16 Fail=f Ms=x u Flags=u Dst=d => i_bs_get_utf16 Ms Fail Flags Dst  bs_skip_utf16 Fail=f Ms=x u Flags=u => i_bs_get_utf16 Ms Fail Flags x -i_bs_get_utf16 x f? t x +i_bs_get_utf16 x f? t xy  bs_get_utf32 Fail=f Ms=x Live=u Flags=u Dst=d => \  	bs_get_integer2 Fail Ms Live i=32 u=1 Flags Dst | \ @@ -1182,6 +1197,9 @@ i_bs_validate_unicode_retract j s S  bs_init2 Fail Sz Words Regs Flags Dst | binary_too_big(Sz) => system_limit Fail +bs_init2 Fail Sz Words Regs Flags Dst=y => \ +   bs_init2 Fail Sz Words Regs Flags x | move x Dst +  bs_init2 Fail Sz=u Words=u==0 Regs Flags Dst => i_bs_init Sz Regs Dst  bs_init2 Fail Sz=u Words Regs Flags Dst => \ @@ -1202,6 +1220,8 @@ i_bs_init_heap W I t? x  bs_init_bits Fail Sz=o Words Regs Flags Dst => system_limit Fail +bs_init_bits Fail Sz Words Regs Flags Dst=y => \ +   bs_init_bits Fail Sz Words Regs Flags x | move x Dst  bs_init_bits Fail Sz=u Words=u==0 Regs Flags Dst => i_bs_init_bits Sz Regs Dst  bs_init_bits Fail Sz=u Words Regs Flags Dst =>  i_bs_init_bits_heap Sz Words Regs Dst @@ -1230,7 +1250,7 @@ bs_private_append Fail Size Unit Bin Flags Dst => \  bs_init_writable -i_bs_append j? I t? t s x +i_bs_append j? I t? t s xy  i_bs_private_append j? t s S x  # diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c index 08f8ca9788..996757ef43 100644 --- a/erts/emulator/beam/utils.c +++ b/erts/emulator/beam/utils.c @@ -1569,7 +1569,7 @@ make_hash2(Eterm term)   * MUST BE USED AS INPUT FOR THE HASH. Two different terms must always have a   * chance of hashing different when salted: hash([Salt|A]) vs hash([Salt|B]).   * - * This is why we can not use cached hash values for atoms for example. + * This is why we cannot use cached hash values for atoms for example.   *   */ diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c index 2048d0f625..91381bd60d 100644 --- a/erts/emulator/drivers/common/inet_drv.c +++ b/erts/emulator/drivers/common/inet_drv.c @@ -4393,7 +4393,7 @@ static void desc_close_read(inet_descriptor* desc)  {      if (desc->s != INVALID_SOCKET) {  #ifdef __WIN32__ -	/* This call can not be right??? +	/* This call cannot be right???  	 * We want to turn off read events but keep any write events.  	 * But on windows driver_select(...,READ,1) is only used as a  	 * way to hook into the pollset. sock_select is used to control diff --git a/erts/emulator/drivers/unix/ttsl_drv.c b/erts/emulator/drivers/unix/ttsl_drv.c index 28c6cc0f94..11bb4373d8 100644 --- a/erts/emulator/drivers/unix/ttsl_drv.c +++ b/erts/emulator/drivers/unix/ttsl_drv.c @@ -31,7 +31,7 @@  static int ttysl_init(void);  static ErlDrvData ttysl_start(ErlDrvPort, char*); -#ifdef HAVE_TERMCAP  /* else make an empty driver that can not be opened */ +#ifdef HAVE_TERMCAP  /* else make an empty driver that cannot be opened */  #ifndef WANT_NONBLOCKING  #define WANT_NONBLOCKING diff --git a/erts/emulator/internal_doc/CarrierMigration.md b/erts/emulator/internal_doc/CarrierMigration.md index 3a796d11b7..bb3d8aac28 100644 --- a/erts/emulator/internal_doc/CarrierMigration.md +++ b/erts/emulator/internal_doc/CarrierMigration.md @@ -34,8 +34,7 @@ Solution  --------  In order to prevent scenarios like this we've implemented support for -migration of multi-block carriers between allocator instances of the -same type. +migration of multi-block carriers between allocator instances.  ### Management of Free Blocks ### @@ -130,10 +129,6 @@ threads may have references to it via the pool.  ### Migration ### -There exists one pool for each allocator type enabling migration of -carriers between scheduler specific allocator instances of the same -allocator type. -  Each allocator instance keeps track of the current utilization of its  multi-block carriers. When the total utilization falls below the "abandon  carrier utilization limit" it starts to inspect the utilization of the @@ -208,8 +203,8 @@ limited. We only inspect a limited number of carriers. If none of  those carriers had a free block large enough to satisfy the allocation  request, the search will fail. A carrier in the pool can also be BUSY  if another thread is currently doing block deallocation work on the -carrier. A BUSY carrier will also be skipped by the search as it can -not satisfy the request. The pool is lock-free and we do not want to +carrier. A BUSY carrier will also be skipped by the search as it cannot +satisfy the request. The pool is lock-free and we do not want to  block, waiting for the other thread to finish.  ### The bad cluster problem ### @@ -287,11 +282,3 @@ reduced using the `aoffcbf` strategy. A trade off between memory  consumption and performance is however inevitable, and it is up to  the user to decide what is most important.  -Further work ------------- - -It would be quite easy to extend this to allow migration of multi-block -carriers between all allocator types. More or less the only obstacle -is maintenance of the statistics information. - - diff --git a/erts/emulator/test/alloc_SUITE.erl b/erts/emulator/test/alloc_SUITE.erl index 343afe85e6..4e0243c1cd 100644 --- a/erts/emulator/test/alloc_SUITE.erl +++ b/erts/emulator/test/alloc_SUITE.erl @@ -71,7 +71,8 @@ migration(Cfg) ->      %% Disable driver_alloc to avoid recursive alloc_util calls      %% through enif_mutex_create() in my_creating_mbc().      drv_case(Cfg, concurrent, "+MZe true +MRe false"), -    drv_case(Cfg, concurrent, "+MZe true +MRe false +MZas ageffcbf"). +    drv_case(Cfg, concurrent, "+MZe true +MRe false +MZas ageffcbf"), +    drv_case(Cfg, concurrent, "+MZe true +MRe false +MZas chaosff").  erts_mmap(Config) when is_list(Config) ->      case {os:type(), mmsc_flags()} of diff --git a/erts/emulator/test/call_trace_SUITE.erl b/erts/emulator/test/call_trace_SUITE.erl index d19f7f81ad..742592f88e 100644 --- a/erts/emulator/test/call_trace_SUITE.erl +++ b/erts/emulator/test/call_trace_SUITE.erl @@ -1395,7 +1395,7 @@ seq(M, N, R) when M =< N ->      seq(M, N-1, [N|R]);  seq(_, _, R) -> R. -%% lists:reverse can not be called since it is traced +%% lists:reverse cannot be called since it is traced  reverse(L) ->      reverse(L, []).  %% diff --git a/erts/emulator/test/code_SUITE.erl b/erts/emulator/test/code_SUITE.erl index 9c6dc3ff83..7e690fd870 100644 --- a/erts/emulator/test/code_SUITE.erl +++ b/erts/emulator/test/code_SUITE.erl @@ -332,6 +332,7 @@ constant_pools(Config) when is_list(Config) ->      A = literals:a(),      B = literals:b(),      C = literals:huge_bignum(), +    D = literals:funs(),      process_flag(trap_exit, true),      Self = self(), @@ -345,7 +346,7 @@ constant_pools(Config) when is_list(Config) ->      true = erlang:purge_module(literals),      NoOldHeap ! done,      receive -        {'EXIT',NoOldHeap,{A,B,C}} -> +        {'EXIT',NoOldHeap,{A,B,C,D}} ->              ok;          Other ->              ct:fail({unexpected,Other}) @@ -362,7 +363,7 @@ constant_pools(Config) when is_list(Config) ->      erlang:purge_module(literals),      OldHeap ! done,      receive -	{'EXIT',OldHeap,{A,B,C,[1,2,3|_]=Seq}} when length(Seq) =:= 16 -> +	{'EXIT',OldHeap,{A,B,C,D,[1,2,3|_]=Seq}} when length(Seq) =:= 16 ->  	    ok      end, @@ -390,7 +391,7 @@ constant_pools(Config) when is_list(Config) ->  	{'DOWN', Mon, process, Hib, Reason} ->  	    {undef, [{no_module,  		      no_function, -		      [{A,B,C,[1,2,3|_]=Seq}], _}]} = Reason, +		      [{A,B,C,D,[1,2,3|_]=Seq}], _}]} = Reason,  	    16 = length(Seq)      end,      HeapSz = TotHeapSz, %% Ensure restored to hibernated state... @@ -400,7 +401,9 @@ constant_pools(Config) when is_list(Config) ->  no_old_heap(Parent) ->      A = literals:a(),      B = literals:b(), -    Res = {A,B,literals:huge_bignum()}, +    C = literals:huge_bignum(), +    D = literals:funs(), +    Res = {A,B,C,D},      Parent ! go,      receive          done -> @@ -410,7 +413,9 @@ no_old_heap(Parent) ->  old_heap(Parent) ->      A = literals:a(),      B = literals:b(), -    Res = {A,B,literals:huge_bignum(),lists:seq(1, 16)}, +    C = literals:huge_bignum(), +    D = literals:funs(), +    Res = {A,B,C,D,lists:seq(1, 16)},      create_old_heap(),      Parent ! go,      receive @@ -421,7 +426,9 @@ old_heap(Parent) ->  hibernated(Parent) ->      A = literals:a(),      B = literals:b(), -    Res = {A,B,literals:huge_bignum(),lists:seq(1, 16)}, +    C = literals:huge_bignum(), +    D = literals:funs(), +    Res = {A,B,C,D,lists:seq(1, 16)},      Parent ! go,      erlang:hibernate(no_module, no_function, [Res]). @@ -755,7 +762,8 @@ t_copy_literals_frags(Config) when is_list(Config) ->                                            0, 1, 2, 3, 4, 5, 6, 7,                                            8, 9,10,11,12,13,14,15,                                            0, 1, 2, 3, 4, 5, 6, 7, -                                          8, 9,10,11,12,13,14,15>>}]), +                                          8, 9,10,11,12,13,14,15>>}, +                        {f, fun ?MODULE:all/0}]),      {module, ?mod} = erlang:load_module(?mod, Bin),      N = 6000, @@ -796,6 +804,7 @@ literal_receiver() ->              C = ?mod:c(),              D = ?mod:d(),              E = ?mod:e(), +            F = ?mod:f(),              literal_receiver();          {Pid, sender_confirm} ->              io:format("sender confirm ~w~n", [Pid]), @@ -811,7 +820,8 @@ literal_sender(N, Recv) ->                            ?mod:b(),                            ?mod:c(),                            ?mod:d(), -                          ?mod:e()]}, +                          ?mod:e(), +                          ?mod:f()]},      literal_sender(N - 1, Recv).  literal_switcher() -> diff --git a/erts/emulator/test/code_SUITE_data/literals.erl b/erts/emulator/test/code_SUITE_data/literals.erl index 7c3b0ebe73..13c8b412b0 100644 --- a/erts/emulator/test/code_SUITE_data/literals.erl +++ b/erts/emulator/test/code_SUITE_data/literals.erl @@ -19,7 +19,8 @@  %%  -module(literals). --export([a/0,b/0,huge_bignum/0,binary/0,unused_binaries/0,bits/0]). +-export([a/0,b/0,huge_bignum/0,funs/0, +         binary/0,unused_binaries/0,bits/0]).  -export([msg1/0,msg2/0,msg3/0,msg4/0,msg5/0]).  a() -> @@ -108,3 +109,8 @@ msg2() -> {"hello","world"}.  msg3() -> <<"halloj">>.  msg4() -> #{ 1=> "hello", b => "world"}.  msg5() -> {1,2,3,4,5,6}. + +funs() -> +    %% Literal funs (in a non-literal list). +    [fun ?MODULE:a/0, +     fun() -> ok end].                          %No environment. diff --git a/erts/emulator/test/driver_SUITE.erl b/erts/emulator/test/driver_SUITE.erl index 6f5d639d04..9ffb484eb4 100644 --- a/erts/emulator/test/driver_SUITE.erl +++ b/erts/emulator/test/driver_SUITE.erl @@ -2645,24 +2645,7 @@ wait_deallocations() ->  driver_alloc_size() ->      wait_deallocations(), -    case erlang:system_info({allocator_sizes, driver_alloc}) of -        false -> -            undefined; -        MemInfo -> -            CS = lists:foldl( -                   fun ({instance, _, L}, Acc) -> -                           {value,{_,MBCS}} = lists:keysearch(mbcs, 1, L), -                           {value,{_,SBCS}} = lists:keysearch(sbcs, 1, L), -                           [MBCS,SBCS | Acc] -                   end, -                   [], -                   MemInfo), -            lists:foldl( -              fun(L, Sz0) -> -                      {value,{_,Sz,_,_}} = lists:keysearch(blocks_size, 1, L), -                      Sz0+Sz -              end, 0, CS) -    end. +    erts_debug:alloc_blocks_size(driver_alloc).  rpc(Config, Fun) ->      case proplists:get_value(node, Config) of diff --git a/erts/emulator/test/erts_debug_SUITE.erl b/erts/emulator/test/erts_debug_SUITE.erl index 6aa7a445b5..f39dbedd8f 100644 --- a/erts/emulator/test/erts_debug_SUITE.erl +++ b/erts/emulator/test/erts_debug_SUITE.erl @@ -22,8 +22,10 @@  -include_lib("common_test/include/ct.hrl").  -export([all/0, suite/0, -	 test_size/1,flat_size_big/1,df/1,term_type/1, -	 instructions/1, stack_check/1]). +         test_size/1,flat_size_big/1,df/1,term_type/1, +         instructions/1, stack_check/1, alloc_blocks_size/1]). + +-export([do_alloc_blocks_size/0]).  suite() ->      [{ct_hooks,[ts_install_cth]}, @@ -31,7 +33,7 @@ suite() ->  all() ->       [test_size, flat_size_big, df, instructions, term_type, -     stack_check]. +     stack_check, alloc_blocks_size].  test_size(Config) when is_list(Config) ->      ConsCell1 = id([a|b]), @@ -210,5 +212,28 @@ instructions(Config) when is_list(Config) ->      _ = [list_to_atom(I) || I <- Is],      ok. +alloc_blocks_size(Config) when is_list(Config) -> +    F = fun(Args) -> +                Node = start_slave(Args), +                ok = rpc:call(Node, ?MODULE, do_alloc_blocks_size, []), +                true = test_server:stop_node(Node) +        end, +    F("+Meamax"), +    F("+Meamin"), +    F(""), +    ok. + +do_alloc_blocks_size() -> +    _ = erts_debug:alloc_blocks_size(binary_alloc), +    ok. + +start_slave(Args) -> +    Name = ?MODULE_STRING ++ "_slave", +    Pa = filename:dirname(code:which(?MODULE)), +    {ok, Node} = test_server:start_node(list_to_atom(Name), +                                        slave, +                                        [{args, "-pa " ++ Pa ++ " " ++ Args}]), +    Node. +  id(I) ->      I. diff --git a/erts/emulator/test/fun_SUITE.erl b/erts/emulator/test/fun_SUITE.erl index 73fe9b0d8f..f8a879182e 100644 --- a/erts/emulator/test/fun_SUITE.erl +++ b/erts/emulator/test/fun_SUITE.erl @@ -576,7 +576,7 @@ refc_dist(Config) when is_list(Config) ->      process_flag(trap_exit, true),      Pid = spawn_link(Node, fun() -> receive                                          Fun when is_function(Fun) -> -                                            2 = fun_refc(Fun), +                                            3 = fun_refc(Fun),                                              exit({normal,Fun}) end                             end),      F = fun() -> 42 end, @@ -598,7 +598,7 @@ refc_dist_send(Node, F) ->      Pid = spawn_link(Node, fun() -> receive                                          {To,Fun} when is_function(Fun) ->                                              wait_until(fun () -> -                                                               2 =:= fun_refc(Fun) +                                                               3 =:= fun_refc(Fun)                                                         end),                                              To ! Fun                                      end @@ -626,7 +626,7 @@ refc_dist_reg_send(Node, F) ->                                     Me ! Ref,                                     receive                                         {Me,Fun} when is_function(Fun) -> -                                           2 = fun_refc(Fun), +                                           3 = fun_refc(Fun),                                             Me ! Fun                                     end                             end), @@ -806,11 +806,13 @@ verify_not_undef(Fun, Tag) ->  	    ct:fail("tag ~w not defined in fun_info", [Tag]);  	{Tag,_} -> ok      end. -	     +  id(X) ->      X.  spawn_call(Node, AFun) -> +    Parent = self(), +    Init = erlang:whereis(init),      Pid = spawn_link(Node,  		     fun() ->  			     receive @@ -821,8 +823,10 @@ spawn_call(Node, AFun) ->  						_ -> lists:seq(0, Arity-1)  					    end,  				     Res = apply(Fun, Args), -				     {pid,Creator} = erlang:fun_info(Fun, pid), -				     Creator ! {result,Res} +                     case erlang:fun_info(Fun, pid) of +                        {pid,Init} -> Parent ! {result,Res}; +                        {pid,Creator} -> Creator ! {result,Res} +                     end  			     end  		     end),      Pid ! {AFun,AFun,AFun}, diff --git a/erts/emulator/test/process_SUITE.erl b/erts/emulator/test/process_SUITE.erl index 57eb082d64..f4b1d885fe 100644 --- a/erts/emulator/test/process_SUITE.erl +++ b/erts/emulator/test/process_SUITE.erl @@ -2233,8 +2233,8 @@ processes_term_proc_list(Config) when is_list(Config) ->      %% We have to run this test case with +S1 since instrument:allocations()      %% will report a free()'d block as present until it's actually deallocated      %% by its employer. -    Run("+MSe true +MSatags false +S1"), -    Run("+MSe true +MSatags true +S1"), +    Run("+MSe true +Muatags false +S1"), +    Run("+MSe true +Muatags true +S1"),      ok. @@ -2242,10 +2242,12 @@ processes_term_proc_list(Config) when is_list(Config) ->  	chk_term_proc_list(?LINE, MC, XB)).  chk_term_proc_list(Line, MustChk, ExpectBlks) -> -    Allocs = instrument:allocations(#{ allocator_types => [sl_alloc] }), +    Allocs = instrument:allocations(),      case {MustChk, Allocs} of  	{false, {error, not_enabled}} ->  	    not_enabled; +	{false, {ok, {_Shift, _Unscanned, ByOrigin}}} when ByOrigin =:= #{} -> +	    not_enabled;  	{_, {ok, {_Shift, _Unscanned, ByOrigin}}} ->              ByType = maps:get(system, ByOrigin, #{}),              Hist = maps:get(ptab_list_deleted_el, ByType, {}), diff --git a/erts/emulator/test/system_info_SUITE.erl b/erts/emulator/test/system_info_SUITE.erl index 21ab6b378a..8ea2d88ec4 100644 --- a/erts/emulator/test/system_info_SUITE.erl +++ b/erts/emulator/test/system_info_SUITE.erl @@ -457,11 +457,16 @@ cmp_memory(MWs, Str) ->      %% Total, processes, processes_used, and system will seldom      %% give us exactly the same result since the two readings      %% aren't taken atomically. +    %% +    %% Torerance is scaled according to the number of schedulers +    %% to match spawn_mem_workers. + +    Tolerance = 1.05 + 0.01 * erlang:system_info(schedulers_online), -    cmp_memory(total, EM, EDM, 1.05), -    cmp_memory(processes, EM, EDM, 1.05), -    cmp_memory(processes_used, EM, EDM, 1.05), -    cmp_memory(system, EM, EDM, 1.05), +    cmp_memory(total, EM, EDM, Tolerance), +    cmp_memory(processes, EM, EDM, Tolerance), +    cmp_memory(processes_used, EM, EDM, Tolerance), +    cmp_memory(system, EM, EDM, Tolerance),      ok. diff --git a/erts/emulator/test/timer_bif_SUITE.erl b/erts/emulator/test/timer_bif_SUITE.erl index fc11a04a31..15fe13c8c0 100644 --- a/erts/emulator/test/timer_bif_SUITE.erl +++ b/erts/emulator/test/timer_bif_SUITE.erl @@ -361,7 +361,7 @@ evil_timers(Config) when is_list(Config) ->      %%      %% 1. A timer started with erlang:start_timer(Time, Receiver, Msg),      %%    where Msg is a composite term, expires, and the receivers main -    %%    lock *can not* be acquired immediately (typically when the +    %%    lock *cannot* be acquired immediately (typically when the      %%    receiver *is* running).      %%      %%    The wrap tuple ({timeout, TRef, Msg}) will in this case @@ -372,7 +372,7 @@ evil_timers(Config) when is_list(Config) ->      RecvTimeOutMsgs0 = evil_recv_timeouts(200),      %% 2. A timer started with erlang:start_timer(Time, Receiver, Msg),      %%    where Msg is an immediate term, expires, and the receivers main -    %%    lock *can not* be acquired immediately (typically when the +    %%    lock *cannot* be acquired immediately (typically when the      %%    receiver *is* running).      %%      %%    The wrap tuple will in this case be allocated in a new | 
