diff options
Diffstat (limited to 'erts/emulator')
34 files changed, 3288 insertions, 1160 deletions
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in index fb4cde0e76..58e83540e1 100644 --- a/erts/emulator/Makefile.in +++ b/erts/emulator/Makefile.in @@ -559,13 +559,14 @@ GENERATE += $(TTF_DIR)/driver_tab.c # Preloaded code. # # This list must be consistent with PRE_LOADED_MODULES in -# lib/kernel/src/Makefile. +# erts/preloaded/src/Makefile. ifeq ($(TARGET),win32) # On windows the preloaded objects are in a resource object. PRELOAD_OBJ = $(OBJDIR)/beams.$(RES_EXT) PRELOAD_SRC = $(TARGET)/beams.rc $(PRELOAD_SRC): $(ERL_TOP)/erts/preloaded/ebin/otp_ring0.beam \ $(ERL_TOP)/erts/preloaded/ebin/init.beam \ + $(ERL_TOP)/erts/preloaded/ebin/prim_eval.beam \ $(ERL_TOP)/erts/preloaded/ebin/prim_inet.beam \ $(ERL_TOP)/erts/preloaded/ebin/prim_file.beam \ $(ERL_TOP)/erts/preloaded/ebin/zlib.beam \ @@ -579,6 +580,7 @@ PRELOAD_OBJ = $(OBJDIR)/preload.o PRELOAD_SRC = $(TARGET)/preload.c $(PRELOAD_SRC): $(ERL_TOP)/erts/preloaded/ebin/otp_ring0.beam \ $(ERL_TOP)/erts/preloaded/ebin/init.beam \ + $(ERL_TOP)/erts/preloaded/ebin/prim_eval.beam \ $(ERL_TOP)/erts/preloaded/ebin/prim_inet.beam \ $(ERL_TOP)/erts/preloaded/ebin/prim_file.beam \ $(ERL_TOP)/erts/preloaded/ebin/zlib.beam \ diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names index ce60bb9bbc..7d86e486f1 100644 --- a/erts/emulator/beam/atom.names +++ b/erts/emulator/beam/atom.names @@ -248,6 +248,7 @@ atom get_data atom get_seq_token atom get_tcw atom getenv +atom gather_gc_info_result atom gather_sched_wall_time_result atom getting_linked atom getting_unlinked @@ -322,6 +323,7 @@ atom maximum atom max_tables max_processes atom mbuf_size atom memory +atom memory_internal atom memory_types atom message atom message_binary diff --git a/erts/emulator/beam/erl_afit_alloc.c b/erts/emulator/beam/erl_afit_alloc.c index b45201af42..eca4e3b3bb 100644 --- a/erts/emulator/beam/erl_afit_alloc.c +++ b/erts/emulator/beam/erl_afit_alloc.c @@ -48,10 +48,9 @@ struct AFFreeBlock_t_ { #define MIN_MBC_FIRST_FREE_SZ (4*1024) /* Prototypes of callback functions */ -static Block_t * get_free_block (Allctr_t *, Uint, - Block_t *, Uint, Uint32); -static void link_free_block (Allctr_t *, Block_t *, Uint32); -static void unlink_free_block (Allctr_t *, Block_t *, Uint32); +static Block_t * get_free_block (Allctr_t *, Uint, Block_t *, Uint); +static void link_free_block (Allctr_t *, Block_t *); +static void unlink_free_block (Allctr_t *, Block_t *); static Eterm info_options (Allctr_t *, char *, int *, @@ -84,8 +83,7 @@ erts_afalc_start(AFAllctr_t *afallctr, sys_memcpy((void *) afallctr, (void *) &zero.allctr, sizeof(AFAllctr_t)); - init->sbmbct = 0; /* Small mbc not supported by afit */ - + allctr->mbc_header_size = sizeof(Carrier_t); allctr->min_mbc_size = MIN_MBC_SZ; allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ; allctr->min_block_size = sizeof(AFFreeBlock_t); @@ -100,6 +98,9 @@ erts_afalc_start(AFAllctr_t *afallctr, allctr->get_next_mbc_size = NULL; allctr->creating_mbc = NULL; allctr->destroying_mbc = NULL; + allctr->add_mbc = NULL; + allctr->remove_mbc = NULL; + allctr->largest_fblk_in_mbc = NULL; allctr->init_atoms = init_atoms; #ifdef ERTS_ALLOC_UTIL_HARD_DEBUG @@ -116,8 +117,7 @@ erts_afalc_start(AFAllctr_t *afallctr, } static Block_t * -get_free_block(Allctr_t *allctr, Uint size, Block_t *cand_blk, Uint cand_size, - Uint32 flags) +get_free_block(Allctr_t *allctr, Uint size, Block_t *cand_blk, Uint cand_size) { AFAllctr_t *afallctr = (AFAllctr_t *) allctr; @@ -135,7 +135,7 @@ get_free_block(Allctr_t *allctr, Uint size, Block_t *cand_blk, Uint cand_size, } static void -link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) +link_free_block(Allctr_t *allctr, Block_t *block) { AFFreeBlock_t *blk = (AFFreeBlock_t *) block; AFAllctr_t *afallctr = (AFAllctr_t *) allctr; @@ -156,7 +156,7 @@ link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) } static void -unlink_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) +unlink_free_block(Allctr_t *allctr, Block_t *block) { AFFreeBlock_t *blk = (AFFreeBlock_t *) block; AFAllctr_t *afallctr = (AFAllctr_t *) allctr; diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c index 403776aade..a547191d6d 100644 --- a/erts/emulator/beam/erl_alloc.c +++ b/erts/emulator/beam/erl_alloc.c @@ -71,6 +71,23 @@ #define AU_ALLOC_DEFAULT_ENABLE(X) (X) #endif +#define ERTS_ALC_DEFAULT_ENABLED_ACUL 60 +#define ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC 45 +#define ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC 85 + +#define ERTS_ALC_DEFAULT_ACUL 0 +#define ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC 0 +#define ERTS_ALC_DEFAULT_ACUL_LL_ALLOC 0 + +#ifndef ERTS_SMP +# undef ERTS_ALC_DEFAULT_ACUL +# define ERTS_ALC_DEFAULT_ACUL 0 +# undef ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC +# define ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC 0 +# undef ERTS_ALC_DEFAULT_ACUL_LL_ALLOC +# define ERTS_ALC_DEFAULT_ACUL_LL_ALLOC 0 +#endif + #ifdef DEBUG static Uint install_debug_functions(void); #if 0 @@ -101,11 +118,9 @@ typedef union { char align_aoffa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(AOFFAllctr_t))]; } ErtsAllocatorState_t; -static ErtsAllocatorState_t sbmbc_alloc_state; static ErtsAllocatorState_t std_alloc_state; static ErtsAllocatorState_t ll_alloc_state; #if HALFWORD_HEAP -static ErtsAllocatorState_t sbmbc_low_alloc_state; static ErtsAllocatorState_t std_low_alloc_state; static ErtsAllocatorState_t ll_low_alloc_state; #endif @@ -120,6 +135,7 @@ static ErtsAllocatorState_t fix_alloc_state; typedef struct { erts_smp_atomic32_t refc; int only_sz; + int internal; Uint req_sched; Process *proc; Eterm ref; @@ -162,6 +178,7 @@ enum allctr_type { struct au_init { int enable; int thr_spec; + int carrier_migration_allowed; enum allctr_type atype; struct { AllctrInit_t util; @@ -200,7 +217,6 @@ typedef struct { char *mtrace; char *nodename; } instr; - struct au_init sbmbc_alloc; struct au_init sl_alloc; struct au_init std_alloc; struct au_init ll_alloc; @@ -211,13 +227,12 @@ typedef struct { struct au_init driver_alloc; struct au_init fix_alloc; #if HALFWORD_HEAP - struct au_init sbmbc_low_alloc; struct au_init std_low_alloc; struct au_init ll_low_alloc; #endif } erts_alc_hndl_args_init_t; -#define ERTS_AU_INIT__ {0, 0, GOODFIT, DEFAULT_ALLCTR_INIT, {1,1,1,1}} +#define ERTS_AU_INIT__ {0, 0, 1, GOODFIT, DEFAULT_ALLCTR_INIT, {1,1,1,1}} #define SET_DEFAULT_ALLOC_OPTS(IP) \ do { \ @@ -225,32 +240,13 @@ do { \ sys_memcpy((void *) (IP), (void *) &aui__, sizeof(struct au_init)); \ } while (0) -static void -set_default_sbmbc_alloc_opts(struct au_init *ip) +static ERTS_INLINE void +set_default_acul(struct au_init *ip, int acul) { - SET_DEFAULT_ALLOC_OPTS(ip); - ip->enable = 0; - ip->thr_spec = 0; - ip->atype = BESTFIT; - ip->init.bf.ao = 1; - ip->init.util.ramv = 0; - ip->init.util.mmsbc = 0; - ip->init.util.mmmbc = 500; - ip->init.util.sbct = ~((UWord) 0); - ip->init.util.name_prefix = "sbmbc_"; - ip->init.util.alloc_no = ERTS_ALC_A_SBMBC; -#ifndef SMALL_MEMORY - ip->init.util.mmbcs = 2*1024*1024; /* Main carrier size */ -#else - ip->init.util.mmbcs = 1*1024*1024; /* Main carrier size */ -#endif - ip->init.util.ts = ERTS_ALC_MTA_SBMBC; - ip->init.util.asbcst = 0; - ip->init.util.rsbcst = 0; - ip->init.util.rsbcmt = 0; - ip->init.util.rmbcmt = 0; - ip->init.util.sbmbct = 0; - ip->init.util.sbmbcs = 0; + ip->thr_spec = 1; + ip->atype = AOFIRSTFIT; + ip->init.aoff.bf_within_carrier = 1; + ip->init.util.acul = acul; } static void @@ -258,8 +254,12 @@ set_default_sl_alloc_opts(struct au_init *ip) { SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = AU_ALLOC_DEFAULT_ENABLE(1); +#if ERTS_ALC_DEFAULT_ACUL + set_default_acul(ip, ERTS_ALC_DEFAULT_ACUL); +#else ip->thr_spec = 1; ip->atype = GOODFIT; +#endif ip->init.util.name_prefix = "sl_"; ip->init.util.mmmbc = 5; ip->init.util.alloc_no = ERTS_ALC_A_SHORT_LIVED; @@ -282,8 +282,12 @@ set_default_std_alloc_opts(struct au_init *ip) { SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = AU_ALLOC_DEFAULT_ENABLE(1); +#if ERTS_ALC_DEFAULT_ACUL + set_default_acul(ip, ERTS_ALC_DEFAULT_ACUL); +#else ip->thr_spec = 1; ip->atype = BESTFIT; +#endif ip->init.util.name_prefix = "std_"; ip->init.util.mmmbc = 5; ip->init.util.alloc_no = ERTS_ALC_A_STANDARD; @@ -300,9 +304,13 @@ set_default_ll_alloc_opts(struct au_init *ip) { SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = AU_ALLOC_DEFAULT_ENABLE(1); +#if ERTS_ALC_DEFAULT_ACUL_LL_ALLOC + set_default_acul(ip, ERTS_ALC_DEFAULT_ACUL_LL_ALLOC); +#else ip->thr_spec = 0; ip->atype = BESTFIT; ip->init.bf.ao = 1; +#endif ip->init.util.ramv = 0; ip->init.util.mmsbc = 0; ip->init.util.mmmbc = 0; @@ -319,8 +327,6 @@ set_default_ll_alloc_opts(struct au_init *ip) ip->init.util.rsbcst = 0; ip->init.util.rsbcmt = 0; ip->init.util.rmbcmt = 0; - ip->init.util.sbmbct = 0; - ip->init.util.sbmbcs = 0; } static void @@ -329,6 +335,7 @@ set_default_temp_alloc_opts(struct au_init *ip) SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = AU_ALLOC_DEFAULT_ENABLE(1); ip->thr_spec = 1; + ip->carrier_migration_allowed = 0; ip->atype = AFIT; ip->init.util.name_prefix = "temp_"; ip->init.util.alloc_no = ERTS_ALC_A_TEMPORARY; @@ -351,8 +358,12 @@ set_default_eheap_alloc_opts(struct au_init *ip) { SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = AU_ALLOC_DEFAULT_ENABLE(1); +#if ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC + set_default_acul(ip, ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC); +#else ip->thr_spec = 1; ip->atype = GOODFIT; +#endif ip->init.util.mmmbc = 100; ip->init.util.name_prefix = "eheap_"; ip->init.util.alloc_no = ERTS_ALC_A_EHEAP; @@ -374,8 +385,12 @@ set_default_binary_alloc_opts(struct au_init *ip) { SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = AU_ALLOC_DEFAULT_ENABLE(1); +#if ERTS_ALC_DEFAULT_ACUL + set_default_acul(ip, ERTS_ALC_DEFAULT_ACUL); +#else ip->thr_spec = 1; ip->atype = BESTFIT; +#endif ip->init.util.mmmbc = 50; ip->init.util.name_prefix = "binary_"; ip->init.util.alloc_no = ERTS_ALC_A_BINARY; @@ -392,8 +407,12 @@ set_default_ets_alloc_opts(struct au_init *ip) { SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = AU_ALLOC_DEFAULT_ENABLE(1); +#if ERTS_ALC_DEFAULT_ACUL + set_default_acul(ip, ERTS_ALC_DEFAULT_ACUL); +#else ip->thr_spec = 1; ip->atype = BESTFIT; +#endif ip->init.util.mmmbc = 100; ip->init.util.name_prefix = "ets_"; ip->init.util.alloc_no = ERTS_ALC_A_ETS; @@ -410,8 +429,12 @@ set_default_driver_alloc_opts(struct au_init *ip) { SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = AU_ALLOC_DEFAULT_ENABLE(1); +#if ERTS_ALC_DEFAULT_ACUL + set_default_acul(ip, ERTS_ALC_DEFAULT_ACUL); +#else ip->thr_spec = 1; ip->atype = BESTFIT; +#endif ip->init.util.name_prefix = "driver_"; ip->init.util.alloc_no = ERTS_ALC_A_DRIVER; #ifndef SMALL_MEMORY @@ -428,8 +451,12 @@ set_default_fix_alloc_opts(struct au_init *ip, { SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = AU_ALLOC_DEFAULT_ENABLE(1); +#if ERTS_ALC_DEFAULT_ACUL + set_default_acul(ip, ERTS_ALC_DEFAULT_ACUL); +#else ip->thr_spec = 1; ip->atype = BESTFIT; +#endif ip->init.bf.ao = 1; ip->init.util.name_prefix = "fix_"; ip->init.util.fix_type_size = fix_type_sizes; @@ -527,6 +554,39 @@ static void adjust_fix_alloc_sizes(UWord extra_block_size) } } +static ERTS_INLINE int +strategy_support_carrier_migration(struct au_init *auip) +{ + /* + * Currently only aoff and aoffcaobf support carrier + * migration, i.e, type AOFIRSTFIT. + */ + return auip->atype == AOFIRSTFIT; +} + +static ERTS_INLINE void +check_disable_carrier_migration(struct au_init *auip) +{ + if (!strategy_support_carrier_migration(auip) || !auip->thr_spec) + auip->init.util.acul = 0; +} + +static ERTS_INLINE void +ensure_carrier_migration_support(struct au_init *auip) +{ + auip->thr_spec = 1; /* Need thread preferred */ + + /* + * If strategy cannot handle carrier migration, + * default to a strategy that can... + */ + if (!strategy_support_carrier_migration(auip)) { + /* Default to aoffcaobf */ + auip->atype = AOFIRSTFIT; + auip->init.aoff.bf_within_carrier = 1; + } +} + void erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) { @@ -565,7 +625,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) hdbg_init(); #endif - erts_have_sbmbc_alloc = 0; ncpu = eaiop->ncpu; if (ncpu < 1) ncpu = 1; @@ -575,7 +634,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) erts_sys_alloc_init(); erts_init_utils_mem(); - set_default_sbmbc_alloc_opts(&init.sbmbc_alloc); set_default_sl_alloc_opts(&init.sl_alloc); set_default_std_alloc_opts(&init.std_alloc); set_default_ll_alloc_opts(&init.ll_alloc); @@ -591,7 +649,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) handle_args(argc, argv, &init); #ifndef ERTS_SMP - init.sbmbc_alloc.thr_spec = 0; init.sl_alloc.thr_spec = 0; init.std_alloc.thr_spec = 0; init.ll_alloc.thr_spec = 0; @@ -604,7 +661,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) if (init.erts_alloc_config) { /* Adjust flags that erts_alloc_config won't like */ - init.sbmbc_alloc.thr_spec = 0; init.temp_alloc.thr_spec = 0; init.sl_alloc.thr_spec = 0; init.std_alloc.thr_spec = 0; @@ -616,13 +672,22 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) init.fix_alloc.thr_spec = 0; } + check_disable_carrier_migration(&init.sl_alloc); + check_disable_carrier_migration(&init.std_alloc); + check_disable_carrier_migration(&init.ll_alloc); + check_disable_carrier_migration(&init.eheap_alloc); + check_disable_carrier_migration(&init.binary_alloc); + check_disable_carrier_migration(&init.ets_alloc); + check_disable_carrier_migration(&init.driver_alloc); + check_disable_carrier_migration(&init.fix_alloc); + + #ifdef ERTS_SMP /* Only temp_alloc can use thread specific interface */ if (init.temp_alloc.thr_spec) init.temp_alloc.thr_spec = erts_no_schedulers; /* Others must use thread preferred interface */ - adjust_tpref(&init.sbmbc_alloc, erts_no_schedulers); adjust_tpref(&init.sl_alloc, erts_no_schedulers); adjust_tpref(&init.std_alloc, erts_no_schedulers); adjust_tpref(&init.ll_alloc, erts_no_schedulers); @@ -641,7 +706,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) * The following allocators cannot be run with afit strategy. * Make sure they don't... */ - refuse_af_strategy(&init.sbmbc_alloc); refuse_af_strategy(&init.sl_alloc); refuse_af_strategy(&init.std_alloc); refuse_af_strategy(&init.ll_alloc); @@ -685,11 +749,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) #if HALFWORD_HEAP /* Init low memory variants by cloning */ - init.sbmbc_low_alloc = init.sbmbc_alloc; - init.sbmbc_low_alloc.init.util.name_prefix = "sbmbc_low_"; - init.sbmbc_low_alloc.init.util.alloc_no = ERTS_ALC_A_SBMBC_LOW; - init.sbmbc_low_alloc.init.util.low_mem = 1; - init.std_low_alloc = init.std_alloc; init.std_low_alloc.init.util.name_prefix = "std_low_"; init.std_low_alloc.init.util.alloc_no = ERTS_ALC_A_STANDARD_LOW; @@ -702,13 +761,11 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) init.ll_low_alloc.init.util.force = 1; init.ll_low_alloc.init.util.low_mem = 1; - set_au_allocator(ERTS_ALC_A_SBMBC_LOW, &init.sbmbc_low_alloc, ncpu); set_au_allocator(ERTS_ALC_A_STANDARD_LOW, &init.std_low_alloc, ncpu); set_au_allocator(ERTS_ALC_A_LONG_LIVED_LOW, &init.ll_low_alloc, ncpu); #endif /* HALFWORD */ set_au_allocator(ERTS_ALC_A_TEMPORARY, &init.temp_alloc, ncpu); - set_au_allocator(ERTS_ALC_A_SBMBC, &init.sbmbc_alloc, ncpu); set_au_allocator(ERTS_ALC_A_SHORT_LIVED, &init.sl_alloc, ncpu); set_au_allocator(ERTS_ALC_A_STANDARD, &init.std_alloc, ncpu); set_au_allocator(ERTS_ALC_A_LONG_LIVED, &init.ll_alloc, ncpu); @@ -735,20 +792,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) erts_mtrace_init(init.instr.mtrace, init.instr.nodename); - /* sbmbc_alloc() needs to be started first */ - start_au_allocator(ERTS_ALC_A_SBMBC, - &init.sbmbc_alloc, - &sbmbc_alloc_state); -#if HALFWORD_HEAP - start_au_allocator(ERTS_ALC_A_SBMBC_LOW, - &init.sbmbc_low_alloc, - &sbmbc_low_alloc_state); - erts_have_sbmbc_alloc = (init.sbmbc_alloc.enable - && init.sbmbc_low_alloc.enable); -#else - erts_have_sbmbc_alloc = init.sbmbc_alloc.enable; -#endif - start_au_allocator(ERTS_ALC_A_TEMPORARY, &init.temp_alloc, &temp_alloc_state); @@ -1138,6 +1181,7 @@ get_kb_value(char *param_end, char** argv, int* ip) return ((Uint) tmp)*1024; } +#if 0 static Uint get_byte_value(char *param_end, char** argv, int* ip) { @@ -1151,6 +1195,7 @@ get_byte_value(char *param_end, char** argv, int* ip) bad_value(param, param_end, value); return (Uint) tmp; } +#endif static Uint get_amount_value(char *param_end, char** argv, int* ip) @@ -1166,17 +1211,59 @@ get_amount_value(char *param_end, char** argv, int* ip) return (Uint) tmp; } +static Uint +get_acul_value(struct au_init *auip, char *param_end, char** argv, int* ip) +{ + Sint tmp; + char *rest; + char *param = argv[*ip]+1; + char *value = get_value(param_end, argv, ip); + if (sys_strcmp(value, "de") == 0) { + switch (auip->init.util.alloc_no) { + case ERTS_ALC_A_LONG_LIVED: +#if HALFWORD_HEAP + case ERTS_ALC_A_LONG_LIVED_LOW: +#endif + return ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC; + case ERTS_ALC_A_EHEAP: + return ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC; + default: + return ERTS_ALC_DEFAULT_ENABLED_ACUL; + } + } + errno = 0; + tmp = (Sint) ErtsStrToSint(value, &rest, 10); + if (errno != 0 || rest == value || tmp < 0 || 100 < tmp) + bad_value(param, param_end, value); + return (Uint) tmp; +} + static void handle_au_arg(struct au_init *auip, char* sub_param, char** argv, - int* ip) + int* ip, + int u_switch) { char *param = argv[*ip]+1; switch (sub_param[0]) { case 'a': - if(has_prefix("asbcst", sub_param)) { + if (has_prefix("acul", sub_param)) { + if (!auip->carrier_migration_allowed) { + if (!u_switch) + goto bad_switch; + else { + /* ignore */ + (void) get_acul_value(auip, sub_param + 4, argv, ip); + break; + } + } + ensure_carrier_migration_support(auip); + + auip->init.util.acul = get_acul_value(auip, sub_param + 4, argv, ip); + } + else if(has_prefix("asbcst", sub_param)) { auip->init.util.asbcst = get_kb_value(sub_param + 6, argv, ip); } else if(has_prefix("as", sub_param)) { @@ -1197,21 +1284,22 @@ handle_au_arg(struct au_init *auip, } else if (strcmp("aoff", alg) == 0) { auip->atype = AOFIRSTFIT; + auip->init.aoff.bf_within_carrier = 0; + } + else if (strcmp("aoffcaobf", alg) == 0) { + auip->atype = AOFIRSTFIT; + auip->init.aoff.bf_within_carrier = 1; } else { bad_value(param, sub_param + 1, alg); } + check_disable_carrier_migration(auip); } else goto bad_switch; break; case 'e': auip->enable = get_bool_value(sub_param+1, argv, ip); -#if !HAVE_ERTS_SBMBC - if (auip->init.util.alloc_no == ERTS_ALC_A_SBMBC) { - auip->enable = 0; - } -#endif break; case 'l': if (has_prefix("lmbcs", sub_param)) { @@ -1271,18 +1359,6 @@ handle_au_arg(struct au_init *auip, if(has_prefix("sbct", sub_param)) { auip->init.util.sbct = get_kb_value(sub_param + 4, argv, ip); } - else if (has_prefix("sbmbcs", sub_param)) { -#if HAVE_ERTS_SBMBC - auip->init.util.sbmbcs = -#endif - get_byte_value(sub_param + 6, argv, ip); - } - else if (has_prefix("sbmbct", sub_param)) { -#if HAVE_ERTS_SBMBC - auip->init.util.sbmbct = -#endif - get_byte_value(sub_param + 6, argv, ip); - } else if (has_prefix("smbcs", sub_param)) { auip->default_.smbcs = 0; auip->init.util.smbcs = get_kb_value(sub_param + 5, argv, ip); @@ -1298,6 +1374,7 @@ handle_au_arg(struct au_init *auip, } else if (res == 0) { auip->thr_spec = 0; + check_disable_carrier_migration(auip); break; } goto bad_switch; @@ -1312,7 +1389,6 @@ static void handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init) { struct au_init *aui[] = { - &init->sbmbc_alloc, &init->binary_alloc, &init->std_alloc, &init->ets_alloc, @@ -1339,25 +1415,22 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init) case 'M': switch (argv[i][2]) { case 'B': - handle_au_arg(&init->binary_alloc, &argv[i][3], argv, &i); - break; - case 'C': - handle_au_arg(&init->sbmbc_alloc, &argv[i][3], argv, &i); + handle_au_arg(&init->binary_alloc, &argv[i][3], argv, &i, 0); break; case 'D': - handle_au_arg(&init->std_alloc, &argv[i][3], argv, &i); + handle_au_arg(&init->std_alloc, &argv[i][3], argv, &i, 0); break; case 'E': - handle_au_arg(&init->ets_alloc, &argv[i][3], argv, &i); + handle_au_arg(&init->ets_alloc, &argv[i][3], argv, &i, 0); break; case 'F': - handle_au_arg(&init->fix_alloc, &argv[i][3], argv, &i); + handle_au_arg(&init->fix_alloc, &argv[i][3], argv, &i, 0); break; case 'H': - handle_au_arg(&init->eheap_alloc, &argv[i][3], argv, &i); + handle_au_arg(&init->eheap_alloc, &argv[i][3], argv, &i, 0); break; case 'L': - handle_au_arg(&init->ll_alloc, &argv[i][3], argv, &i); + handle_au_arg(&init->ll_alloc, &argv[i][3], argv, &i, 0); break; case 'M': if (has_prefix("amcbf", argv[i]+3)) { @@ -1383,13 +1456,13 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init) } break; case 'R': - handle_au_arg(&init->driver_alloc, &argv[i][3], argv, &i); + handle_au_arg(&init->driver_alloc, &argv[i][3], argv, &i, 0); break; case 'S': - handle_au_arg(&init->sl_alloc, &argv[i][3], argv, &i); + handle_au_arg(&init->sl_alloc, &argv[i][3], argv, &i, 0); break; case 'T': - handle_au_arg(&init->temp_alloc, &argv[i][3], argv, &i); + handle_au_arg(&init->temp_alloc, &argv[i][3], argv, &i, 0); break; case 'Y': { /* sys_alloc */ if (has_prefix("tt", param+2)) { @@ -1448,9 +1521,6 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init) else if (strcmp("max", arg) == 0) { for (a = 0; a < aui_sz; a++) aui[a]->enable = 1; -#if !HAVE_ERTS_SBMBC - init->sbmbc_alloc.enable = 0; -#endif } else if (strcmp("config", arg) == 0) { init->erts_alloc_config = 1; @@ -1478,6 +1548,7 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init) for (a = 0; a < aui_sz; a++) { aui[a]->thr_spec = 0; + check_disable_carrier_migration(aui[a]); aui[a]->init.util.ramv = 0; aui[a]->init.util.mmmbc = 10; aui[a]->init.util.lmbcs = 5*1024*1024; @@ -1542,7 +1613,7 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init) argv[start + 1] = val; i = start; } - handle_au_arg(aui[a], &argv[i][3], argv, &i); + handle_au_arg(aui[a], &argv[i][3], argv, &i, 1); } } break; @@ -2088,15 +2159,11 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg) return am_badarg; } - /* All alloc_util allocators except sbmbc_alloc *have* to be enabled */ + /* All alloc_util allocators *have* to be enabled */ for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++) { switch (ai) { case ERTS_ALC_A_SYSTEM: - case ERTS_ALC_A_SBMBC: -#if HALFWORD_HEAP - case ERTS_ALC_A_SBMBC_LOW: -#endif break; default: if (!erts_allctrs_info[ai].enabled @@ -2136,12 +2203,6 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg) * Often not thread safe and usually never * contain any allocated memory. */ - case ERTS_ALC_A_SBMBC: - /* Included in other allocators */ -#if HALFWORD_HEAP - case ERTS_ALC_A_SBMBC_LOW: - /* Included in other allocators */ -#endif continue; case ERTS_ALC_A_EHEAP: save = &size.processes; @@ -2585,7 +2646,7 @@ erts_allocator_info(int to, void *arg) as = erts_allctr_thr_spec[a].allctr[ai]; } /* Binary alloc has its own thread safety... */ - erts_alcu_info(as, 0, &to, arg, NULL, NULL); + erts_alcu_info(as, 0, 0, &to, arg, NULL, NULL); } else { switch (a) { @@ -2847,6 +2908,7 @@ reply_alloc_info(void *vair) int i; Eterm (*info_func)(Allctr_t *, int, + int, int *, void *, Uint **, @@ -2975,8 +3037,8 @@ reply_alloc_info(void *vair) allctr = erts_allctr_thr_spec[ai].allctr[0]; else allctr = erts_allctrs_info[ai].extra; - ainfo = info_func(allctr, hpp != NULL, NULL, - NULL, hpp, szp); + ainfo = info_func(allctr, air->internal, hpp != NULL, + NULL, NULL, hpp, szp); ainfo = erts_bld_tuple(hpp, szp, 3, alloc_atom, make_small(0), ainfo); } @@ -3011,7 +3073,7 @@ reply_alloc_info(void *vair) alloc_atom = erts_bld_atom(hpp, szp, (char *) ERTS_ALC_A2AD(ai)); allctr = erts_allctr_thr_spec[ai].allctr[sched_id]; - ainfo = info_func(allctr, hpp != NULL, NULL, + ainfo = info_func(allctr, air->internal, hpp != NULL, NULL, NULL, hpp, szp); ai_list = erts_bld_cons(hpp, szp, erts_bld_tuple( @@ -3067,7 +3129,8 @@ int erts_request_alloc_info(struct process *c_p, Eterm ref, Eterm allocs, - int only_sz) + int only_sz, + int internal) { ErtsAllocInfoReq *air = aireq_alloc(); Eterm req_ai[ERTS_ALC_A_MAX+1+2] = {0}; @@ -3079,6 +3142,8 @@ erts_request_alloc_info(struct process *c_p, air->only_sz = only_sz; + air->internal = internal; + air->proc = c_p; if (is_not_internal_ref(ref)) @@ -3273,14 +3338,13 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3) init.atype = GOODFIT; init.init.util.name_prefix = (char *) a1; init.init.util.ts = a2 ? 1 : 0; - init.init.util.sbmbct = 0; if ((char **) a3) { char **argv = (char **) a3; int i = 0; while (argv[i]) { if (argv[i][0] == '-' && argv[i][1] == 't') - handle_au_arg(&init, &argv[i][2], argv, &i); + handle_au_arg(&init, &argv[i][2], argv, &i, 0); else return (UWord) NULL; i++; @@ -3400,6 +3464,11 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3) ERTS_ALC_TEST_ABORT; break; #endif /* #ifdef USE_THREADS */ +#ifdef ERTS_SMP + case 0xf13: return (UWord) 1; +#else + case 0xf13: return (UWord) 0; +#endif default: break; } diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h index 9cafd8ddc8..d9fdfc6f58 100644 --- a/erts/emulator/beam/erl_alloc.h +++ b/erts/emulator/beam/erl_alloc.h @@ -65,7 +65,7 @@ Eterm erts_allocator_options(void *proc); struct process; int erts_request_alloc_info(struct process *c_p, Eterm ref, Eterm allocs, - int only_sz); + int only_sz, int internal); #define ERTS_ALLOC_INIT_DEF_OPTS_INITER {0} typedef struct { @@ -100,14 +100,6 @@ UWord erts_alc_test(UWord, #define ERTS_ALC_MIN_LONG_LIVED_TIME (10*60*1000) -#if HALFWORD_HEAP -#define ERTS_IS_SBMBC_ALLOCATOR_NO__(NO) \ - ((NO) == ERTS_ALC_A_SBMBC || (NO) == ERTS_ALC_A_SBMBC_LOW) -#else -#define ERTS_IS_SBMBC_ALLOCATOR_NO__(NO) \ - ((NO) == ERTS_ALC_A_SBMBC) -#endif - typedef struct { int alloc_util; int enabled; diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types index 5a92ab7f24..095ad24387 100644 --- a/erts/emulator/beam/erl_alloc.types +++ b/erts/emulator/beam/erl_alloc.types @@ -76,11 +76,6 @@ allocator SYSTEM true sys_alloc -allocator SBMBC true sbmbc_alloc -+if halfword -allocator SBMBC_LOW true sbmbc_low_alloc -+endif - +if smp allocator TEMPORARY true temp_alloc @@ -146,7 +141,6 @@ class SYSTEM system_data # # <TYPE> <ALLOCATOR> <CLASS> <DESCRIPTION> -type SBMBC SBMBC SYSTEM small_block_mbc type PROC FIXED_SIZE PROCESSES proc type PORT DRIVER SYSTEM port type ATOM LONG_LIVED ATOM atom_entry @@ -351,7 +345,6 @@ type SSB SHORT_LIVED PROCESSES ssb +if halfword -type SBMBC_LOW SBMBC_LOW SYSTEM small_block_mbc_low type DDLL_PROCESS STANDARD_LOW SYSTEM ddll_processes type MONITOR_LH STANDARD_LOW PROCESSES monitor_lh type NLINK_LH STANDARD_LOW PROCESSES nlink_lh @@ -366,6 +359,7 @@ type MONITOR_SH STANDARD_LOW PROCESSES monitor_sh type NLINK_SH STANDARD_LOW PROCESSES nlink_sh type AINFO_REQ STANDARD_LOW SYSTEM alloc_info_request type SCHED_WTIME_REQ STANDARD_LOW SYSTEM sched_wall_time_request +type GC_INFO_REQ STANDARD_LOW SYSTEM gc_info_request +else # "fullword" @@ -383,6 +377,7 @@ type MONITOR_SH FIXED_SIZE PROCESSES monitor_sh type NLINK_SH FIXED_SIZE PROCESSES nlink_sh type AINFO_REQ SHORT_LIVED SYSTEM alloc_info_request type SCHED_WTIME_REQ SHORT_LIVED SYSTEM sched_wall_time_request +type GC_INFO_REQ SHORT_LIVED SYSTEM gc_info_request +endif diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c index 53062979a6..b19e603a5f 100644 --- a/erts/emulator/beam/erl_alloc_util.c +++ b/erts/emulator/beam/erl_alloc_util.c @@ -71,21 +71,16 @@ #define ALLOC_ZERO_EQ_NULL 0 +#ifndef ERTS_MSEG_FLG_2POW +# define ERTS_MSEG_FLG_2POW 0 +#endif +#ifndef ERTS_MSEG_FLG_NONE +# define ERTS_MSEG_FLG_NONE 0 +#endif + static int atoms_initialized = 0; static int initialized = 0; -int erts_have_sbmbc_alloc; - -#if HAVE_ERTS_MSEG - -#define MSEG_UNIT_SHIFT MSEG_ALIGN_BITS -#define MSEG_UNIT_SZ (1 << MSEG_UNIT_SHIFT) -#define MSEG_UNIT_MASK ((~(UWord)0) << MSEG_UNIT_SHIFT) - -#define MSEG_UNIT_FLOOR(X) ((X) & MSEG_UNIT_MASK) -#define MSEG_UNIT_CEILING(X) MSEG_UNIT_FLOOR((X) + ~MSEG_UNIT_MASK) - -#endif #define INV_SYS_ALLOC_CARRIER_MASK ((UWord) (sys_alloc_carrier_size - 1)) #define SYS_ALLOC_CARRIER_MASK (~INV_SYS_ALLOC_CARRIER_MASK) @@ -110,13 +105,12 @@ static Uint max_mseg_carriers; #define ONE_GIGA (1000000000) -#define INC_CC(CC) ((CC).no == ONE_GIGA - 1 \ - ? ((CC).giga_no++, (CC).no = 0) \ - : (CC).no++) +#define ERTS_ALC_CC_GIGA_VAL(CC) ((CC) / ONE_GIGA) +#define ERTS_ALC_CC_VAL(CC) ((CC) % ONE_GIGA) + +#define INC_CC(CC) ((CC)++) -#define DEC_CC(CC) ((CC).no == 0 \ - ? ((CC).giga_no--, (CC).no = ONE_GIGA - 1) \ - : (CC).no--) +#define DEC_CC(CC) ((CC)--) /* Multi block carrier (MBC) memory layout in R16: @@ -173,14 +167,6 @@ MBC after deallocating first block: #define SET_BLK_SZ_FTR(B, SZ) \ (((FreeBlkFtr_t *) (((char *) (B)) + (SZ)))[-1] = (SZ)) -#define THIS_FREE_BLK_HDR_FLG (((UWord) 1) << 0) -#define PREV_FREE_BLK_HDR_FLG (((UWord) 1) << 1) -#define LAST_BLK_HDR_FLG (((UWord) 1) << 2) - -/* Special flag combo for (allocated) SBC blocks -*/ -#define SBC_BLK_HDR_FLG (THIS_FREE_BLK_HDR_FLG | PREV_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG) - #define SET_MBC_ABLK_SZ(B, SZ) \ (ASSERT(((SZ) & FLG_MASK) == 0), \ (B)->bhdr = (((B)->bhdr) & ~MBC_ABLK_SZ_MASK) | (SZ)) @@ -222,18 +208,7 @@ MBC after deallocating first block: ASSERT(((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG|PREV_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \ (B)->bhdr = ((Sz) | (F)), \ (B)->u.carrier = (C)) - -# define ABLK_TO_MBC(B) \ - (ASSERT(IS_MBC_BLK(B) && IS_ALLOCED_BLK(B)), \ - (Carrier_t*)((MSEG_UNIT_FLOOR((UWord)(B)) - \ - (((B)->bhdr >> MBC_ABLK_OFFSET_SHIFT) << MSEG_UNIT_SHIFT)))) - -# define FBLK_TO_MBC(B) \ - (ASSERT(IS_MBC_BLK(B) && IS_FREE_BLK(B)), \ - (B)->u.carrier) -# define BLK_TO_MBC(B) (IS_FREE_BLK(B) ? FBLK_TO_MBC(B) : ABLK_TO_MBC(B)) - # define IS_MBC_FIRST_ABLK(AP,B) \ ((((UWord)(B) & ~MSEG_UNIT_MASK) == MBC_HEADER_SIZE(AP)) \ && ((B)->bhdr & MBC_ABLK_OFFSET_MASK) == 0) @@ -272,10 +247,6 @@ MBC after deallocating first block: (B)->bhdr = ((Sz) | (F)), \ (B)->carrier = (C)) -# define BLK_TO_MBC(B) ((B)->carrier) -# define ABLK_TO_MBC(B) BLK_TO_MBC(B) -# define FBLK_TO_MBC(B) BLK_TO_MBC(B) - # define IS_MBC_FIRST_BLK(AP,B) \ ((char*)(B) == (char*)((B)->carrier) + MBC_HEADER_SIZE(AP)) # define IS_MBC_FIRST_ABLK(AP,B) IS_MBC_FIRST_BLK(AP,B) @@ -300,8 +271,6 @@ MBC after deallocating first block: ((B)->bhdr & PREV_FREE_BLK_HDR_FLG) #define IS_PREV_BLK_ALLOCED(B) \ (!IS_PREV_BLK_FREE((B))) -#define IS_FREE_BLK(B) \ - (ASSERT(!IS_SBC_BLK(B)), (B)->bhdr & THIS_FREE_BLK_HDR_FLG) #define IS_ALLOCED_BLK(B) \ (!IS_FREE_BLK((B))) #define IS_LAST_BLK(B) \ @@ -318,13 +287,6 @@ MBC after deallocating first block: #define GET_BLK_HDR_FLGS(B) \ ((B)->bhdr & FLG_MASK) -#define IS_SBC_BLK(B) \ - (((B)->bhdr & FLG_MASK) == SBC_BLK_HDR_FLG) -#define IS_MBC_BLK(B) \ - (!IS_SBC_BLK((B))) - -#define MBC_BLK_SZ(B) (IS_FREE_BLK(B) ? MBC_FBLK_SZ(B) : MBC_ABLK_SZ(B)) - #define NXT_BLK(B) \ (ASSERT(IS_MBC_BLK(B)), \ (Block_t *) (((char *) (B)) + MBC_BLK_SZ((B)))) @@ -338,9 +300,96 @@ MBC after deallocating first block: /* Carriers ... */ -#define SBC_HEADER_SIZE (UNIT_CEILING(sizeof(Carrier_t) + ABLK_HDR_SZ) \ - - ABLK_HDR_SZ) -#define MBC_HEADER_SIZE(AP) SBC_HEADER_SIZE +/* #define ERTS_ALC_CPOOL_DEBUG */ + +#if defined(DEBUG) && !defined(ERTS_ALC_CPOOL_DEBUG) +# define ERTS_ALC_CPOOL_DEBUG +#endif + +#ifndef ERTS_SMP +# undef ERTS_ALC_CPOOL_DEBUG +#endif + +#ifdef ERTS_ALC_CPOOL_DEBUG +# define ERTS_ALC_CPOOL_ASSERT(A) \ + ((void) ((A) \ + ? 1 \ + : (erts_alcu_assert_failed(#A, \ + (char *) __FILE__, \ + __LINE__, \ + (char *) __func__), \ + 0))) +#else +# define ERTS_ALC_CPOOL_ASSERT(A) ((void) 1) +#endif + +#ifdef ERTS_SMP +#define ERTS_ALC_IS_CPOOL_ENABLED(A) ((A)->cpool.util_limit) +#else +#define ERTS_ALC_IS_CPOOL_ENABLED(A) (0) +#endif + +#ifdef ERTS_SMP + +#define ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON 1000 +#define ERTS_ALC_CPOOL_ALLOC_OP_INC 8 +#define ERTS_ALC_CPOOL_FREE_OP_DEC 10 + +#define ERTS_ALC_CPOOL_ALLOC_OP(A) \ +do { \ + if ((A)->cpool.disable_abandon < ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON) { \ + (A)->cpool.disable_abandon += ERTS_ALC_CPOOL_ALLOC_OP_INC; \ + if ((A)->cpool.disable_abandon > ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON) \ + (A)->cpool.disable_abandon = ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON; \ + } \ +} while (0) + + +#if ERTS_ALC_CPOOL_ALLOC_OP_INC >= ERTS_ALC_CPOOL_FREE_OP_DEC +# error "Implementation assume ERTS_ALC_CPOOL_ALLOC_OP_INC < ERTS_ALC_CPOOL_FREE_OP_DEC" +#endif + +#define ERTS_ALC_CPOOL_REALLOC_OP(A) \ +do { \ + if ((A)->cpool.disable_abandon) { \ + (A)->cpool.disable_abandon -= (ERTS_ALC_CPOOL_FREE_OP_DEC \ + - ERTS_ALC_CPOOL_ALLOC_OP_INC); \ + if ((A)->cpool.disable_abandon < 0) \ + (A)->cpool.disable_abandon = 0; \ + } \ +} while (0) + +#define ERTS_ALC_CPOOL_FREE_OP(A) \ +do { \ + if ((A)->cpool.disable_abandon) { \ + (A)->cpool.disable_abandon -= ERTS_ALC_CPOOL_FREE_OP_DEC; \ + if ((A)->cpool.disable_abandon < 0) \ + (A)->cpool.disable_abandon = 0; \ + } \ +} while (0) + +#else +#define ERTS_ALC_CPOOL_ALLOC_OP(A) +#define ERTS_ALC_CPOOL_REALLOC_OP(A) +#define ERTS_ALC_CPOOL_FREE_OP(A) +#endif + +#define ERTS_CRR_ALCTR_FLG_IN_POOL (((erts_aint_t) 1) << 0) +#define ERTS_CRR_ALCTR_FLG_BUSY (((erts_aint_t) 1) << 1) + +#ifdef ERTS_SMP +#define SBC_HEADER_SIZE \ + (UNIT_CEILING(sizeof(Carrier_t) \ + - sizeof(ErtsAlcCPoolData_t) \ + + ABLK_HDR_SZ) \ + - ABLK_HDR_SZ) +#else +#define SBC_HEADER_SIZE \ + (UNIT_CEILING(sizeof(Carrier_t) \ + + ABLK_HDR_SZ) \ + - ABLK_HDR_SZ) +#endif +#define MBC_HEADER_SIZE(AP) ((AP)->mbc_header_size) #define MSEG_CARRIER_HDR_FLAG (((UWord) 1) << 0) @@ -352,7 +401,8 @@ MBC after deallocating first block: #define SCH_SBC SBC_CARRIER_HDR_FLAG #define SET_CARRIER_HDR(C, Sz, F, AP) \ - (ASSERT(((Sz) & FLG_MASK) == 0), (C)->chdr = ((Sz) | (F)), (C)->allctr = (AP)) + (ASSERT(((Sz) & FLG_MASK) == 0), (C)->chdr = ((Sz) | (F)), \ + erts_smp_atomic_init_nob(&(C)->allctr, (erts_aint_t) (AP))) #define BLK_TO_SBC(B) \ ((Carrier_t *) (((char *) (B)) - SBC_HEADER_SIZE)) @@ -385,6 +435,7 @@ MBC after deallocating first block: #define CFLG_FORCE_SYS_ALLOC (1 << 3) #define CFLG_FORCE_SIZE (1 << 4) #define CFLG_MAIN_CARRIER (1 << 5) +#define CFLG_NO_CPOOL (1 << 6) #ifdef ERTS_ALLOC_UTIL_HARD_DEBUG static void check_blk_carrier(Allctr_t *, Block_t *); @@ -412,11 +463,7 @@ static void check_blk_carrier(Allctr_t *, Block_t *); ASSERT(((AP)->mbcs.curr.norm.sys_alloc.no \ && (AP)->mbcs.curr.norm.sys_alloc.size) \ || (!(AP)->mbcs.curr.norm.sys_alloc.no \ - && !(AP)->mbcs.curr.norm.sys_alloc.size)); \ - ASSERT(((AP)->sbmbcs.curr.small_block.no \ - && (AP)->sbmbcs.curr.small_block.size) \ - || (!(AP)->sbmbcs.curr.small_block.no \ - && !(AP)->sbmbcs.curr.small_block.size)) + && !(AP)->mbcs.curr.norm.sys_alloc.size)); #else #define DEBUG_CHECK_CARRIER_NO_SZ(AP) @@ -487,17 +534,6 @@ do { \ + (AP)->mbcs.curr.norm.sys_alloc.size) -#define STAT_SBMBC_ALLOC(AP, CSZ) \ -do { \ - (AP)->sbmbcs.curr.small_block.no++; \ - (AP)->sbmbcs.curr.small_block.size += (CSZ); \ - if ((AP)->sbmbcs.max.no < (AP)->sbmbcs.curr.small_block.no) \ - (AP)->sbmbcs.max.no = (AP)->sbmbcs.curr.small_block.no; \ - if ((AP)->sbmbcs.max.size < (AP)->sbmbcs.curr.small_block.size) \ - (AP)->sbmbcs.max.size = (AP)->sbmbcs.curr.small_block.size; \ - DEBUG_CHECK_CARRIER_NO_SZ((AP)); \ -} while (0) - #define STAT_MSEG_MBC_ALLOC(AP, CSZ) \ do { \ (AP)->mbcs.curr.norm.mseg.no++; \ @@ -514,13 +550,19 @@ do { \ DEBUG_CHECK_CARRIER_NO_SZ((AP)); \ } while (0) -#define STAT_SBMBC_FREE(AP, CSZ) \ +#define STAT_MBC_CPOOL_FETCH(AP, CRR) \ do { \ - ASSERT((AP)->sbmbcs.curr.small_block.no > 0); \ - (AP)->sbmbcs.curr.small_block.no--; \ - ASSERT((AP)->sbmbcs.curr.small_block.size >= (CSZ)); \ - (AP)->sbmbcs.curr.small_block.size -= (CSZ); \ - DEBUG_CHECK_CARRIER_NO_SZ((AP)); \ + UWord csz__ = CARRIER_SZ((CRR)); \ + if (IS_MSEG_CARRIER((CRR))) \ + STAT_MSEG_MBC_ALLOC((AP), csz__); \ + else \ + STAT_SYS_ALLOC_MBC_ALLOC((AP), csz__); \ + (AP)->mbcs.blocks.curr.no += (CRR)->cpool.blocks; \ + if ((AP)->mbcs.blocks.max.no < (AP)->mbcs.blocks.curr.no) \ + (AP)->mbcs.blocks.max.no = (AP)->mbcs.blocks.curr.no; \ + (AP)->mbcs.blocks.curr.size += (CRR)->cpool.blocks_size; \ + if ((AP)->mbcs.blocks.max.size < (AP)->mbcs.blocks.curr.size) \ + (AP)->mbcs.blocks.max.size = (AP)->mbcs.blocks.curr.size; \ } while (0) #define STAT_MSEG_MBC_FREE(AP, CSZ) \ @@ -541,28 +583,88 @@ do { \ DEBUG_CHECK_CARRIER_NO_SZ((AP)); \ } while (0) -#define STAT_MBC_BLK_ALLOC(AP, BSZ, FLGS) \ +#define STAT_MBC_CPOOL_INSERT(AP, CRR) \ +do { \ + UWord csz__ = CARRIER_SZ((CRR)); \ + if (IS_MSEG_CARRIER((CRR))) \ + STAT_MSEG_MBC_FREE((AP), csz__); \ + else \ + STAT_SYS_ALLOC_MBC_FREE((AP), csz__); \ + ERTS_ALC_CPOOL_ASSERT((AP)->mbcs.blocks.curr.no \ + >= (CRR)->cpool.blocks); \ + (AP)->mbcs.blocks.curr.no -= (CRR)->cpool.blocks; \ + ERTS_ALC_CPOOL_ASSERT((AP)->mbcs.blocks.curr.size \ + >= (CRR)->cpool.blocks_size); \ + (AP)->mbcs.blocks.curr.size -= (CRR)->cpool.blocks_size; \ +} while (0) + +#ifdef ERTS_SMP +#define STAT_MBC_BLK_ALLOC_CRR(CRR, BSZ) \ +do { \ + (CRR)->cpool.blocks++; \ + (CRR)->cpool.blocks_size += (BSZ); \ +} while (0) +#else +#define STAT_MBC_BLK_ALLOC_CRR(CRR, BSZ) ((void) (CRR)) /* Get rid of warning */ +#endif + +#define STAT_MBC_BLK_ALLOC(AP, CRR, BSZ, FLGS) \ do { \ - CarriersStats_t *cstats__ = (((FLGS) & ERTS_ALCU_FLG_SBMBC) \ - ? &(AP)->sbmbcs \ - : &(AP)->mbcs); \ + CarriersStats_t *cstats__ = &(AP)->mbcs; \ cstats__->blocks.curr.no++; \ if (cstats__->blocks.max.no < cstats__->blocks.curr.no) \ cstats__->blocks.max.no = cstats__->blocks.curr.no; \ cstats__->blocks.curr.size += (BSZ); \ if (cstats__->blocks.max.size < cstats__->blocks.curr.size) \ cstats__->blocks.max.size = cstats__->blocks.curr.size; \ + STAT_MBC_BLK_ALLOC_CRR((CRR), (BSZ)); \ } while (0) -#define STAT_MBC_BLK_FREE(AP, BSZ, FLGS) \ +static ERTS_INLINE int +stat_cpool_mbc_blk_free(Allctr_t *allctr, + Carrier_t *crr, + Carrier_t **busy_pcrr_pp, + UWord blksz) +{ +#ifdef ERTS_SMP + + ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks > 0); + crr->cpool.blocks--; + ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks_size >= blksz); + crr->cpool.blocks_size -= blksz; + + if (!busy_pcrr_pp || !*busy_pcrr_pp) + return 0; + + ERTS_ALC_CPOOL_ASSERT(crr == *busy_pcrr_pp); + +#ifdef ERTS_ALC_CPOOL_DEBUG + ERTS_ALC_CPOOL_ASSERT( + erts_atomic_dec_read_nob(&allctr->cpool.stat.no_blocks) >= 0); + ERTS_ALC_CPOOL_ASSERT( + erts_atomic_add_read_nob(&allctr->cpool.stat.blocks_size, + -((erts_aint_t) blksz)) >= 0); +#else + erts_atomic_dec_nob(&allctr->cpool.stat.no_blocks); + erts_atomic_add_nob(&allctr->cpool.stat.blocks_size, + -((erts_aint_t) blksz)); +#endif + + return 1; +#else + return 0; +#endif +} + +#define STAT_MBC_BLK_FREE(AP, CRR, BPCRRPP, BSZ, FLGS) \ do { \ - CarriersStats_t *cstats__ = (((FLGS) & ERTS_ALCU_FLG_SBMBC) \ - ? &(AP)->sbmbcs \ - : &(AP)->mbcs); \ - ASSERT(cstats__->blocks.curr.no > 0); \ - cstats__->blocks.curr.no--; \ - ASSERT(cstats__->blocks.curr.size >= (BSZ)); \ - cstats__->blocks.curr.size -= (BSZ); \ + if (!stat_cpool_mbc_blk_free((AP), (CRR), (BPCRRPP), (BSZ))) { \ + CarriersStats_t *cstats__ = &(AP)->mbcs; \ + ASSERT(cstats__->blocks.curr.no > 0); \ + cstats__->blocks.curr.no--; \ + ASSERT(cstats__->blocks.curr.size >= (BSZ)); \ + cstats__->blocks.curr.size -= (BSZ); \ + } \ } while (0) /* Debug stuff... */ @@ -613,13 +715,42 @@ do { \ #define ERTS_ALCU_DBG_CHK_THR_ACCESS(A) #endif - static void make_name_atoms(Allctr_t *allctr); static Block_t *create_carrier(Allctr_t *, Uint, UWord); -static void destroy_carrier(Allctr_t *, Block_t *); -static void mbc_free(Allctr_t *allctr, void *p); +static void destroy_carrier(Allctr_t *, Block_t *, Carrier_t **); +static void mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp); +static void dealloc_block(Allctr_t *, void *, int); + +/* internal data... */ + +#if 0 + +static ERTS_INLINE void * +internal_alloc(UWord size) +{ + void *res = erts_sys_alloc(0, NULL, size); + if (!res) + erts_alloc_enomem(ERTS_ALC_T_UNDEF, size); + return res; +} +static ERTS_INLINE void * +internal_realloc(void *ptr, UWord size) +{ + void *res = erts_sys_realloc(0, NULL, ptr, size); + if (!res) + erts_alloc_enomem(ERTS_ALC_T_UNDEF, size); + return res; +} + +static ERTS_INLINE void +internal_free(void *ptr) +{ + erts_sys_free(0, NULL, ptr); +} + +#endif /* mseg ... */ @@ -781,10 +912,35 @@ unlink_carrier(CarrierList_t *cl, Carrier_t *crr) } } -static Block_t *create_sbmbc(Allctr_t *allctr, Uint umem_sz); -static void destroy_sbmbc(Allctr_t *allctr, Block_t *blk); -static Block_t *create_carrier(Allctr_t *, Uint, UWord); -static void destroy_carrier(Allctr_t *, Block_t *); +#ifdef ERTS_SMP + +static ERTS_INLINE void +clear_busy_pool_carrier(Allctr_t *allctr, Carrier_t *crr) +{ + if (crr) { + erts_aint_t max_size; + erts_aint_t new_val; + + max_size = (erts_aint_t) allctr->largest_fblk_in_mbc(allctr, crr); + erts_atomic_set_nob(&crr->cpool.max_size, max_size); + + new_val = (((erts_aint_t) allctr)|ERTS_CRR_ALCTR_FLG_IN_POOL); + +#ifdef ERTS_ALC_CPOOL_DEBUG + { + erts_aint_t old_val = new_val|ERTS_CRR_ALCTR_FLG_BUSY; + + ERTS_ALC_CPOOL_ASSERT(old_val + == erts_smp_atomic_xchg_relb(&crr->allctr, + new_val)); + } +#else + erts_smp_atomic_set_relb(&crr->allctr, new_val); +#endif + } +} + +#endif #if 0 #define ERTS_DBG_CHK_FIX_LIST(A, FIX, IX, B) \ @@ -806,13 +962,154 @@ chk_fix_list(Allctr_t *allctr, ErtsAlcFixList_t *fix, int ix, int before) #define ERTS_DBG_CHK_FIX_LIST(A, FIX, IX, B) #endif -erts_aint32_t -erts_alcu_fix_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs) +static void *mbc_alloc(Allctr_t *allctr, Uint size); + +#ifdef ERTS_SMP +typedef struct { + ErtsAllctrDDBlock_t ddblock__; /* must be first */ + ErtsAlcType_t fix_type; +} ErtsAllctrFixDDBlock_t; +#endif + +static ERTS_INLINE void +dealloc_fix_block(Allctr_t *allctr, + ErtsAlcType_t type, + void *ptr, + int dec_cc_on_redirect) +{ +#ifdef ERTS_SMP + /* May be redirected... */ + ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type = type; +#endif + dealloc_block(allctr, ptr, dec_cc_on_redirect); +} + +static ERTS_INLINE void +sched_fix_shrink(Allctr_t *allctr, int on) +{ + if (on && !allctr->fix_shrink_scheduled) { + allctr->fix_shrink_scheduled = 1; + erts_set_aux_work_timeout(allctr->ix, + (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM + | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC), + 1); + } + else if (!on && allctr->fix_shrink_scheduled) { + allctr->fix_shrink_scheduled = 0; + erts_set_aux_work_timeout(allctr->ix, + (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM + | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC), + 0); + } +} + +static ERTS_INLINE void +fix_cpool_check_shrink(Allctr_t *allctr, + ErtsAlcType_t type, + ErtsAlcFixList_t *fix, + Carrier_t **busy_pcrr_pp) +{ + if (fix->u.cpool.shrink_list > 0) { + if (fix->list_size == 0) + fix->u.cpool.shrink_list = 0; + else { + void *p; +#ifdef ERTS_SMP + if (busy_pcrr_pp) { + clear_busy_pool_carrier(allctr, *busy_pcrr_pp); + *busy_pcrr_pp = NULL; + } +#endif + fix->u.cpool.shrink_list--; + p = fix->list; + fix->list = *((void **) p); + fix->list_size--; + if (fix->u.cpool.min_list_size > fix->list_size) + fix->u.cpool.min_list_size = fix->list_size; + + fix->u.cpool.allocated--; + dealloc_fix_block(allctr, type, p, 0); + } + } +} + +static ERTS_INLINE void * +fix_cpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size) +{ + void *res; + ErtsAlcFixList_t *fix; + + ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= type + && type <= ERTS_ALC_N_MAX_A_FIXED_SIZE); + + fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE]; + + res = fix->list; + if (res) { + fix->list = *((void **) res); + fix->list_size--; + if (fix->u.cpool.min_list_size > fix->list_size) + fix->u.cpool.min_list_size = fix->list_size; + fix->u.cpool.used++; + fix_cpool_check_shrink(allctr, type, fix, NULL); + return res; + } + if (size < 2*sizeof(UWord)) + size += sizeof(UWord); + if (size >= allctr->sbc_threshold) { + Block_t *blk; + blk = create_carrier(allctr, size, CFLG_SBC); + res = blk ? BLK2UMEM(blk) : NULL; + } + else + res = mbc_alloc(allctr, size); + if (res) { + fix->u.cpool.used++; + fix->u.cpool.allocated++; + } + return res; +} + +static ERTS_INLINE void +fix_cpool_free(Allctr_t *allctr, + ErtsAlcType_t type, + void *p, + Carrier_t **busy_pcrr_pp) +{ + ErtsAlcFixList_t *fix; + + ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= type + && type <= ERTS_ALC_N_MAX_A_FIXED_SIZE); + + fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE]; + + fix->u.cpool.used--; + + if ((!busy_pcrr_pp || !*busy_pcrr_pp) + && !fix->u.cpool.shrink_list + && fix->list_size < ERTS_ALCU_FIX_MAX_LIST_SZ) { + *((void **) p) = fix->list; + fix->list = p; + fix->list_size++; + sched_fix_shrink(allctr, 1); + } + else { + Block_t *blk = UMEM2BLK(p); + if (IS_SBC_BLK(blk)) + destroy_carrier(allctr, blk, NULL); + else + mbc_free(allctr, p, busy_pcrr_pp); + fix->u.cpool.allocated--; + fix_cpool_check_shrink(allctr, type, fix, busy_pcrr_pp); + } +} + +static ERTS_INLINE erts_aint32_t +fix_cpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs) { int all_empty = 1; erts_aint32_t res = 0; int ix, o; - ErtsAlcFixList_t *fix = allctr->fix; int flush = flgs == 0; #ifdef USE_THREADS @@ -821,56 +1118,204 @@ erts_alcu_fix_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs) #endif for (ix = 0; ix < ERTS_ALC_NO_FIXED_SIZES; ix++) { + ErtsAlcFixList_t *fix = &allctr->fix[ix]; + ErtsAlcType_t type; ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1); - if (flgs & ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM) { - fix[ix].limit = fix[ix].max_used; - if (fix[ix].limit < fix[ix].used) - fix[ix].limit = fix[ix].used; - fix[ix].max_used = fix[ix].used; - ASSERT(fix[ix].limit >= 0); - - } - if (flush) { - fix[ix].limit = 0; - fix[ix].max_used = fix[ix].used; - ASSERT(fix[ix].limit >= 0); + if (flush) + fix->u.cpool.shrink_list = fix->list_size; + else if (flgs & ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM) { + fix->u.cpool.shrink_list = fix->u.cpool.min_list_size; + fix->u.cpool.min_list_size = fix->list_size; } + type = (ErtsAlcType_t) (ix + ERTS_ALC_N_MIN_A_FIXED_SIZE); for (o = 0; o < ERTS_ALC_FIX_MAX_SHRINK_OPS || flush; o++) { - Block_t *blk; void *ptr; - if (!flush && fix[ix].limit >= fix[ix].allocated) + if (fix->u.cpool.shrink_list == 0) break; - if (fix[ix].list_size == 0) + if (fix->list_size == 0) { + fix->u.cpool.shrink_list = 0; break; - ptr = fix[ix].list; - fix[ix].list = *((void **) ptr); - fix[ix].list_size--; + } + ptr = fix->list; + fix->list = *((void **) ptr); + fix->list_size--; + fix->u.cpool.shrink_list--; + fix->u.cpool.allocated--; + dealloc_fix_block(allctr, type, ptr, 0); + } + if (fix->u.cpool.min_list_size > fix->list_size) + fix->u.cpool.min_list_size = fix->list_size; + if (fix->list_size != 0) { + if (fix->u.cpool.shrink_list > 0) + res |= ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC; + all_empty = 0; + } + } + + if (all_empty) + sched_fix_shrink(allctr, 0); + +#ifdef USE_THREADS + if (allctr->thread_safe) + erts_mtx_unlock(&allctr->mutex); +#endif - blk = UMEM2BLK(ptr); + return res; +} +static ERTS_INLINE void * +fix_nocpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size) +{ + ErtsAlcFixList_t *fix; + void *res; + + ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= type + && type <= ERTS_ALC_N_MAX_A_FIXED_SIZE); + + fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE]; + + ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1); + fix->u.nocpool.used++; + res = fix->list; + if (res) { + fix->list_size--; + fix->list = *((void **) res); + if (fix->list && fix->u.nocpool.allocated > fix->u.nocpool.limit) { + Block_t *blk; + void *p = fix->list; + fix->list = *((void **) p); + fix->list_size--; + blk = UMEM2BLK(p); if (IS_SBC_BLK(blk)) - destroy_carrier(allctr, blk); + destroy_carrier(allctr, blk, NULL); else - mbc_free(allctr, ptr); + mbc_free(allctr, p, NULL); + fix->u.nocpool.allocated--; + } + ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0); + return res; + } + if (size < 2*sizeof(UWord)) + size += sizeof(UWord); + if (fix->u.nocpool.limit < fix->u.nocpool.used) + fix->u.nocpool.limit = fix->u.nocpool.used; + if (fix->u.nocpool.max_used < fix->u.nocpool.used) + fix->u.nocpool.max_used = fix->u.nocpool.used; + fix->u.nocpool.allocated++; - fix[ix].allocated--; + if (size >= allctr->sbc_threshold) { + Block_t *blk; + blk = create_carrier(allctr, size, CFLG_SBC); + res = blk ? BLK2UMEM(blk) : NULL; + } + else + res = mbc_alloc(allctr, size); + + if (!res) { + fix->u.nocpool.allocated--; + fix->u.nocpool.used--; + } + return res; +} + +static ERTS_INLINE void +fix_nocpool_free(Allctr_t *allctr, + ErtsAlcType_t type, + void *p) +{ + Block_t *blk; + ErtsAlcFixList_t *fix; + + ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= type + && type <= ERTS_ALC_N_MAX_A_FIXED_SIZE); + + fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE]; + + ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1); + fix->u.nocpool.used--; + if (fix->u.nocpool.allocated < fix->u.nocpool.limit + && fix->list_size < ERTS_ALCU_FIX_MAX_LIST_SZ) { + *((void **) p) = fix->list; + fix->list = p; + fix->list_size++; + sched_fix_shrink(allctr, 1); + ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0); + return; + } + fix->u.nocpool.allocated--; + if (fix->list && fix->u.nocpool.allocated > fix->u.nocpool.limit) { + blk = UMEM2BLK(p); + if (IS_SBC_BLK(blk)) + destroy_carrier(allctr, blk, NULL); + else + mbc_free(allctr, p, NULL); + p = fix->list; + fix->list = *((void **) p); + fix->list_size--; + fix->u.nocpool.allocated--; + } + + blk = UMEM2BLK(p); + if (IS_SBC_BLK(blk)) + destroy_carrier(allctr, blk, NULL); + else + mbc_free(allctr, p, NULL); + ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0); +} + +static ERTS_INLINE erts_aint32_t +fix_nocpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs) +{ + int all_empty = 1; + erts_aint32_t res = 0; + int ix, o; + int flush = flgs == 0; + +#ifdef USE_THREADS + if (allctr->thread_safe) + erts_mtx_lock(&allctr->mutex); +#endif + + for (ix = 0; ix < ERTS_ALC_NO_FIXED_SIZES; ix++) { + ErtsAlcFixList_t *fix = &allctr->fix[ix]; + ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1); + if (flgs & ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM) { + fix->u.nocpool.limit = fix->u.nocpool.max_used; + if (fix->u.nocpool.limit < fix->u.nocpool.used) + fix->u.nocpool.limit = fix->u.nocpool.used; + fix->u.nocpool.max_used = fix->u.nocpool.used; + ASSERT(fix->u.nocpool.limit >= 0); + + } + if (flush) { + fix->u.nocpool.limit = 0; + fix->u.nocpool.max_used = fix->u.nocpool.used; + ASSERT(fix->u.nocpool.limit >= 0); } - if (fix[ix].list_size != 0) { - if (fix[ix].limit < fix[ix].allocated) + for (o = 0; o < ERTS_ALC_FIX_MAX_SHRINK_OPS || flush; o++) { + void *ptr; + + if (!flush && fix->u.nocpool.limit >= fix->u.nocpool.allocated) + break; + if (fix->list_size == 0) + break; + ptr = fix->list; + fix->list = *((void **) ptr); + fix->list_size--; + dealloc_block(allctr, ptr, 0); + fix->u.nocpool.allocated--; + } + if (fix->list_size != 0) { + if (fix->u.nocpool.limit < fix->u.nocpool.allocated) res |= ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC; all_empty = 0; } ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0); } - if (all_empty && allctr->fix_shrink_scheduled) { - allctr->fix_shrink_scheduled = 0; - erts_set_aux_work_timeout(allctr->ix, - (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM - | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC), - 0); - } + if (all_empty) + sched_fix_shrink(allctr, 0); #ifdef USE_THREADS if (allctr->thread_safe) @@ -880,13 +1325,18 @@ erts_alcu_fix_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs) return res; } -#ifdef ERTS_SMP +erts_aint32_t +erts_alcu_fix_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs) +{ + if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) + return fix_cpool_alloc_shrink(allctr, flgs); + else + return fix_nocpool_alloc_shrink(allctr, flgs); +} -typedef struct { - ErtsAllctrDDBlock_t ddblock__; /* must be first */ - ErtsAlcType_t fix_type; -}ErtsAllctrFixDDBlock_t; +static void dealloc_carrier(Allctr_t *allctr, Carrier_t *crr, Uint mseg_flags); +#ifdef ERTS_SMP static ERTS_INLINE Allctr_t* get_pref_allctr(void *extra) @@ -902,6 +1352,9 @@ get_pref_allctr(void *extra) return tspec->allctr[pref_ix]; } +#define ERTS_ALC_TS_PREF_LOCK_IF_USED (1) +#define ERTS_ALC_TS_PREF_LOCK_NO (0) + /* SMP note: * get_used_allctr() must be safe WITHOUT locking the allocator while * concurrent threads may be updating adjacent blocks. @@ -910,22 +1363,85 @@ get_pref_allctr(void *extra) * the "PREV_FREE" flag bit. */ static ERTS_INLINE Allctr_t* -get_used_allctr(void *extra, void *p, UWord *sizep) +get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep, + Carrier_t **busy_pcrr_pp) { Block_t* blk = UMEM2BLK(p); - Carrier_t* crr; + Carrier_t *crr; + erts_aint_t iallctr; + Allctr_t *used_allctr; + + *busy_pcrr_pp = NULL; if (IS_SBC_BLK(blk)) { crr = BLK_TO_SBC(blk); if (sizep) *sizep = SBC_BLK_SZ(blk) - ABLK_HDR_SZ; + iallctr = erts_smp_atomic_read_dirty(&crr->allctr); } else { crr = ABLK_TO_MBC(blk); + if (sizep) *sizep = MBC_ABLK_SZ(blk) - ABLK_HDR_SZ; + if (!ERTS_ALC_IS_CPOOL_ENABLED(pref_allctr)) + iallctr = erts_smp_atomic_read_dirty(&crr->allctr); + else { + int locked_pref_allctr = 0; + iallctr = erts_smp_atomic_read_ddrb(&crr->allctr); + + if (ERTS_ALC_TS_PREF_LOCK_IF_USED == pref_lock + && pref_allctr->thread_safe) { + used_allctr = (Allctr_t *) (iallctr & ~FLG_MASK); + if (pref_allctr == used_allctr) { + erts_mtx_lock(&pref_allctr->mutex); + locked_pref_allctr = 1; + } + } + + while ((iallctr & ((~FLG_MASK)|ERTS_CRR_ALCTR_FLG_IN_POOL)) + == (((erts_aint_t) pref_allctr)|ERTS_CRR_ALCTR_FLG_IN_POOL)) { + erts_aint_t act; + + ERTS_ALC_CPOOL_ASSERT(!(iallctr & ERTS_CRR_ALCTR_FLG_BUSY)); + act = erts_smp_atomic_cmpxchg_ddrb(&crr->allctr, + iallctr|ERTS_CRR_ALCTR_FLG_BUSY, + iallctr); + if (act == iallctr) { + *busy_pcrr_pp = crr; + break; + } + iallctr = act; + } + + used_allctr = (Allctr_t *) (iallctr & ~FLG_MASK); + + if (ERTS_ALC_TS_PREF_LOCK_IF_USED == pref_lock) { + if (locked_pref_allctr && used_allctr != pref_allctr) { + /* Was taken out of pool; now owned by someone else */ + erts_mtx_unlock(&pref_allctr->mutex); + } + } + + ERTS_ALC_CPOOL_ASSERT( + (((iallctr & ~FLG_MASK) == (erts_aint_t) pref_allctr) + ? (((iallctr & FLG_MASK) == ERTS_CRR_ALCTR_FLG_IN_POOL) + || ((iallctr & FLG_MASK) == 0)) + : 1)); + + return used_allctr; + } + } + + used_allctr = (Allctr_t *) (iallctr & ~FLG_MASK); + + if (ERTS_ALC_TS_PREF_LOCK_IF_USED == pref_lock + && used_allctr == pref_allctr + && pref_allctr->thread_safe) { + erts_mtx_lock(&pref_allctr->mutex); } - return crr->allctr; + + return used_allctr; } static void @@ -1020,7 +1536,7 @@ check_insert_marker(ErtsAllctrDDQueue_t *ddq, erts_aint_t ilast) } static ERTS_INLINE int -ddq_enqueue(ErtsAlcType_t type, ErtsAllctrDDQueue_t *ddq, void *ptr, int cinit) +ddq_enqueue(ErtsAllctrDDQueue_t *ddq, void *ptr, int cinit) { int last_elem; int um_refc_ix = 0; @@ -1121,6 +1637,59 @@ store_earliest_thr_prgr(ErtsThrPrgrVal *prev_val, ErtsAllctrDDQueue_t *ddq) } } +static void +check_pending_dealloc_carrier(Allctr_t *allctr, + int *need_thr_progress, + ErtsThrPrgrVal *thr_prgr_p, + int *need_more_work); + +static void +handle_delayed_fix_dealloc(Allctr_t *allctr, void *ptr) +{ + ErtsAlcType_t type; + + type = ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type; + + ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= type + && type <= ERTS_ALC_N_MAX_A_FIXED_SIZE); + + if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr)) + fix_nocpool_free(allctr, type, ptr); + else { + Block_t *blk = UMEM2BLK(ptr); + Carrier_t *busy_pcrr_p; + Allctr_t *used_allctr; + + if (IS_SBC_BLK(blk)) { + busy_pcrr_p = NULL; + goto doit; + } + + used_allctr = get_used_allctr(allctr, ERTS_ALC_TS_PREF_LOCK_NO, ptr, + NULL, &busy_pcrr_p); + if (used_allctr == allctr) { + doit: + fix_cpool_free(allctr, type, ptr, &busy_pcrr_p); + clear_busy_pool_carrier(allctr, busy_pcrr_p); + } + else { + /* Carrier migrated; need to redirect block to new owner... */ + int cinit = used_allctr->dd.ix - allctr->dd.ix; + + ERTS_ALC_CPOOL_ASSERT(!busy_pcrr_p); + + DEC_CC(allctr->calls.this_free); + + ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type = type; + if (ddq_enqueue(&used_allctr->dd.q, ptr, cinit)) + erts_alloc_notify_delayed_dealloc(used_allctr->ix); + } + } +} + +static void +schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr); + static ERTS_INLINE int handle_delayed_dealloc(Allctr_t *allctr, int allctr_locked, @@ -1152,7 +1721,6 @@ handle_delayed_dealloc(Allctr_t *allctr, while (1) { Block_t *blk; void *ptr; - int ix; if (use_limit && ++ops > ops_limit) { if (ddq->head.first != ddq->head.unref_end) { @@ -1181,52 +1749,29 @@ handle_delayed_dealloc(Allctr_t *allctr, res = 1; - INC_CC(allctr->calls.this_free); - - if (fix) { - ErtsAlcType_t type; - - type = ((ErtsAllctrFixDDBlock_t*) ptr)->fix_type; - ix = type - ERTS_ALC_N_MIN_A_FIXED_SIZE; - ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1); - fix[ix].used--; - if (fix[ix].allocated < fix[ix].limit - && fix[ix].list_size < ERTS_ALCU_FIX_MAX_LIST_SZ) { - *((void **) ptr) = fix[ix].list; - fix[ix].list = ptr; - fix[ix].list_size++; - if (!allctr->fix_shrink_scheduled) { - allctr->fix_shrink_scheduled = 1; - erts_set_aux_work_timeout( - allctr->ix, - (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM - | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC), - 1); - } - ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0); - continue; - } - fix[ix].allocated--; - if (fix[ix].list && fix[ix].allocated > fix[ix].limit) { - blk = UMEM2BLK(ptr); - if (IS_SBC_BLK(blk)) - destroy_carrier(allctr, blk); - else - mbc_free(allctr, ptr); - ptr = fix[ix].list; - fix[ix].list = *((void **) ptr); - fix[ix].list_size--; - fix[ix].allocated--; - } + blk = UMEM2BLK(ptr); + if (IS_FREE_LAST_MBC_BLK(blk)) { + /* + * A multiblock carrier that previously has been migrated away + * from us and now is back to be deallocated... + * + * Note that we cannot use FBLK_TO_MBC(blk) since it + * data has been overwritten by the queue. + */ + Carrier_t *crr = FIRST_BLK_TO_MBC(allctr, blk); + ERTS_ALC_CPOOL_ASSERT(ERTS_ALC_IS_CPOOL_ENABLED(allctr)); + ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr); + schedule_dealloc_carrier(allctr, crr); } + else { - blk = UMEM2BLK(ptr); + INC_CC(allctr->calls.this_free); - if (IS_SBC_BLK(blk)) - destroy_carrier(allctr, blk); - else - mbc_free(allctr, ptr); - ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0); + if (fix) + handle_delayed_fix_dealloc(allctr, ptr); + else + dealloc_block(allctr, ptr, 1); + } } if (need_thr_progress && !(need_thr_prgr | need_mr_wrk)) { @@ -1236,6 +1781,12 @@ handle_delayed_dealloc(Allctr_t *allctr, store_earliest_thr_prgr(thr_prgr_p, ddq); } + if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) + check_pending_dealloc_carrier(allctr, + need_thr_progress, + thr_prgr_p, + need_more_work); + if (allctr->thread_safe && !allctr_locked) erts_mtx_unlock(&allctr->mutex); return res; @@ -1250,13 +1801,59 @@ enqueue_dealloc_other_instance(ErtsAlcType_t type, if (allctr->fix) ((ErtsAllctrFixDDBlock_t*) ptr)->fix_type = type; - if (ddq_enqueue(type, &allctr->dd.q, ptr, cinit)) + if (ddq_enqueue(&allctr->dd.q, ptr, cinit)) erts_alloc_notify_delayed_dealloc(allctr->ix); } #endif #ifdef ERTS_SMP +static void +set_new_allctr_abandon_limit(Allctr_t *allctr); +static void +abandon_carrier(Allctr_t *allctr, Carrier_t *crr); + + +static ERTS_INLINE void +check_abandon_carrier(Allctr_t *allctr, Block_t *fblk, Carrier_t **busy_pcrr_pp) +{ + Carrier_t *crr; + + if (busy_pcrr_pp && *busy_pcrr_pp) + return; + + if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr)) + return; + + allctr->cpool.check_limit_count--; + if (--allctr->cpool.check_limit_count <= 0) + set_new_allctr_abandon_limit(allctr); + + if (!erts_thr_progress_is_managed_thread()) + return; + + if (allctr->cpool.disable_abandon) + return; + + if (allctr->mbcs.blocks.curr.size > allctr->cpool.abandon_limit) + return; + + + crr = FBLK_TO_MBC(fblk); + + if (allctr->main_carrier == crr) + return; + + if (crr->cpool.blocks_size > crr->cpool.abandon_limit) + return; + + if (crr->cpool.thr_prgr != ERTS_THR_PRGR_INVALID + && !erts_thr_progress_has_reached(crr->cpool.thr_prgr)) + return; + + abandon_carrier(allctr, crr); +} + void erts_alcu_check_delayed_dealloc(Allctr_t *allctr, int limit, @@ -1274,78 +1871,88 @@ erts_alcu_check_delayed_dealloc(Allctr_t *allctr, } #endif -#define ERTS_ALCU_HANDLE_DD_IN_OP(Allctr, Locked) \ - handle_delayed_dealloc((Allctr), (Locked), 1, \ +#define ERTS_ALCU_HANDLE_DD_IN_OP(Allctr, Locked) \ + handle_delayed_dealloc((Allctr), (Locked), 1, \ ERTS_ALCU_DD_OPS_LIM_LOW, NULL, NULL, NULL) +static void +dealloc_block(Allctr_t *allctr, void *ptr, int dec_cc_on_redirect) +{ + Block_t *blk = UMEM2BLK(ptr); + + ERTS_SMP_LC_ASSERT(!allctr->thread_safe + || erts_lc_mtx_is_locked(&allctr->mutex)); + + if (IS_SBC_BLK(blk)) + destroy_carrier(allctr, blk, NULL); +#ifndef ERTS_SMP + else + mbc_free(allctr, ptr, NULL); +#else + else if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr)) + mbc_free(allctr, ptr, NULL); + else { + Carrier_t *busy_pcrr_p; + Allctr_t *used_allctr; + used_allctr = get_used_allctr(allctr, ERTS_ALC_TS_PREF_LOCK_NO, ptr, + NULL, &busy_pcrr_p); + if (used_allctr == allctr) { + mbc_free(allctr, ptr, &busy_pcrr_p); + clear_busy_pool_carrier(allctr, busy_pcrr_p); + } + else { + /* Carrier migrated; need to redirect block to new owner... */ + int cinit = used_allctr->dd.ix - allctr->dd.ix; + + ERTS_ALC_CPOOL_ASSERT(!busy_pcrr_p); + + if (dec_cc_on_redirect) + DEC_CC(allctr->calls.this_free); + if (ddq_enqueue(&used_allctr->dd.q, ptr, cinit)) + erts_alloc_notify_delayed_dealloc(used_allctr->ix); + } + } +#endif +} + /* Multi block carrier alloc/realloc/free ... */ /* NOTE! mbc_alloc() may in case of memory shortage place the requested * block in a sbc. */ static ERTS_INLINE void * -mbc_alloc_block(Allctr_t *allctr, Uint size, Uint *blk_szp, Uint32 *alcu_flgsp) +mbc_alloc_block(Allctr_t *allctr, Uint size, Uint *blk_szp) { Block_t *blk; Uint get_blk_sz; - Uint sbmbct; ASSERT(size); ASSERT(size < allctr->sbc_threshold); *blk_szp = get_blk_sz = UMEMSZ2BLKSZ(allctr, size); - sbmbct = allctr->sbmbc_threshold; - if (sbmbct) { - if (get_blk_sz < sbmbct) { - *alcu_flgsp |= ERTS_ALCU_FLG_SBMBC; - if (get_blk_sz + allctr->min_block_size > sbmbct) { - /* Since we use block size to determine if blocks are - located in sbmbc or not... */ - get_blk_sz += allctr->min_block_size; - } - } - } - -#ifdef ERTS_SMP - if (allctr->dd.use) - ERTS_ALCU_HANDLE_DD_IN_OP(allctr, 1); -#endif - - blk = (*allctr->get_free_block)(allctr, get_blk_sz, NULL, 0, *alcu_flgsp); - -#ifdef ERTS_SMP - if (!blk && allctr->dd.use) { - if (ERTS_ALCU_HANDLE_DD_IN_OP(allctr, 1)) - blk = (*allctr->get_free_block)(allctr, get_blk_sz, NULL, 0, - *alcu_flgsp); - } -#endif + blk = (*allctr->get_free_block)(allctr, get_blk_sz, NULL, 0); if (!blk) { - if ((*alcu_flgsp) & ERTS_ALCU_FLG_SBMBC) - blk = create_sbmbc(allctr, get_blk_sz); - else { - blk = create_carrier(allctr, get_blk_sz, CFLG_MBC); + blk = create_carrier(allctr, get_blk_sz, CFLG_MBC); #if !HALFWORD_HEAP && !HAVE_SUPER_ALIGNED_MB_CARRIERS - if (!blk) { - /* Emergency! We couldn't create the carrier as we wanted. - Try to place it in a sys_alloced sbc. */ - blk = create_carrier(allctr, - size, - (CFLG_SBC - | CFLG_FORCE_SIZE - | CFLG_FORCE_SYS_ALLOC)); - } -#endif + if (!blk) { + /* Emergency! We couldn't create the carrier as we wanted. + Try to place it in a sys_alloced sbc. */ + blk = create_carrier(allctr, + size, + (CFLG_SBC + | CFLG_FORCE_SIZE + | CFLG_FORCE_SYS_ALLOC)); } +#endif } #ifdef ERTS_ALLOC_UTIL_HARD_DEBUG if (IS_MBC_BLK(blk)) { - (*allctr->link_free_block)(allctr, blk, *alcu_flgsp); + (*allctr->link_free_block)(allctr, blk); HARD_CHECK_BLK_CARRIER(allctr, blk); - (*allctr->unlink_free_block)(allctr, blk, *alcu_flgsp); + (*allctr->unlink_free_block)(allctr, blk); } #endif @@ -1359,8 +1966,7 @@ mbc_alloc_finalize(Allctr_t *allctr, UWord flags, Carrier_t *crr, Uint want_blk_sz, - int valid_blk_info, - Uint32 alcu_flgs) + int valid_blk_info) { Uint blk_sz; Uint nxt_blk_sz; @@ -1392,7 +1998,7 @@ mbc_alloc_finalize(Allctr_t *allctr, SET_PREV_BLK_FREE(allctr, nxt_nxt_blk); } } - (*allctr->link_free_block)(allctr, nxt_blk, alcu_flgs); + (*allctr->link_free_block)(allctr, nxt_blk); ASSERT(IS_NOT_LAST_BLK(blk)); ASSERT(IS_FREE_BLK(nxt_blk)); @@ -1433,7 +2039,8 @@ mbc_alloc_finalize(Allctr_t *allctr, ASSERT(ABLK_TO_MBC(blk) == crr); } - STAT_MBC_BLK_ALLOC(allctr, blk_sz, alcu_flgs); + ERTS_ALC_CPOOL_ALLOC_OP(allctr); + STAT_MBC_BLK_ALLOC(allctr, crr, blk_sz, alcu_flgs); ASSERT(IS_ALLOCED_BLK(blk)); ASSERT(blk_sz == MBC_BLK_SZ(blk)); @@ -1453,8 +2060,7 @@ mbc_alloc(Allctr_t *allctr, Uint size) { Block_t *blk; Uint blk_sz; - Uint32 alcu_flgs = 0; - blk = mbc_alloc_block(allctr, size, &blk_sz, &alcu_flgs); + blk = mbc_alloc_block(allctr, size, &blk_sz); if (!blk) return NULL; if (IS_MBC_BLK(blk)) @@ -1464,35 +2070,34 @@ mbc_alloc(Allctr_t *allctr, Uint size) GET_BLK_HDR_FLGS(blk), FBLK_TO_MBC(blk), blk_sz, - 1, - alcu_flgs); + 1); return BLK2UMEM(blk); } static void -mbc_free(Allctr_t *allctr, void *p) +mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp) { Uint is_first_blk; Uint is_last_blk; - Uint32 alcu_flgs = 0; Uint blk_sz; Block_t *blk; Block_t *nxt_blk; - + Carrier_t *crr; ASSERT(p); blk = UMEM2BLK(p); blk_sz = MBC_ABLK_SZ(blk); - if (blk_sz < allctr->sbmbc_threshold) - alcu_flgs |= ERTS_ALCU_FLG_SBMBC; ASSERT(IS_MBC_BLK(blk)); ASSERT(blk_sz >= allctr->min_block_size); HARD_CHECK_BLK_CARRIER(allctr, blk); - STAT_MBC_BLK_FREE(allctr, blk_sz, alcu_flgs); + crr = ABLK_TO_MBC(blk); + + ERTS_ALC_CPOOL_FREE_OP(allctr); + STAT_MBC_BLK_FREE(allctr, crr, busy_pcrr_pp, blk_sz, alcu_flgs); is_first_blk = IS_MBC_FIRST_ABLK(allctr, blk); is_last_blk = IS_LAST_BLK(blk); @@ -1501,7 +2106,7 @@ mbc_free(Allctr_t *allctr, void *p) ASSERT(!is_first_blk); /* Coalesce with previous block... */ blk = PREV_BLK(blk); - (*allctr->unlink_free_block)(allctr, blk, alcu_flgs); + (*allctr->unlink_free_block)(allctr, blk); blk_sz += MBC_FBLK_SZ(blk); is_first_blk = IS_MBC_FIRST_FBLK(allctr, blk); @@ -1517,7 +2122,7 @@ mbc_free(Allctr_t *allctr, void *p) nxt_blk = BLK_AFTER(blk, blk_sz); if (IS_FREE_BLK(nxt_blk)) { /* Coalesce with next block... */ - (*allctr->unlink_free_block)(allctr, nxt_blk, alcu_flgs); + (*allctr->unlink_free_block)(allctr, nxt_blk); blk_sz += MBC_FBLK_SZ(nxt_blk); SET_MBC_FBLK_SZ(blk, blk_sz); @@ -1550,19 +2155,20 @@ mbc_free(Allctr_t *allctr, void *p) if (is_first_blk && is_last_blk && allctr->main_carrier != FIRST_BLK_TO_MBC(allctr, blk)) { - if (alcu_flgs & ERTS_ALCU_FLG_SBMBC) - destroy_sbmbc(allctr, blk); - else - destroy_carrier(allctr, blk); + destroy_carrier(allctr, blk, busy_pcrr_pp); } else { - (*allctr->link_free_block)(allctr, blk, alcu_flgs); + (*allctr->link_free_block)(allctr, blk); HARD_CHECK_BLK_CARRIER(allctr, blk); +#ifdef ERTS_SMP + check_abandon_carrier(allctr, blk, busy_pcrr_pp); +#endif } } static void * -mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) +mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs, + Carrier_t **busy_pcrr_pp) { void *new_p; Uint old_blk_sz; @@ -1576,11 +2182,6 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) Uint is_last_blk; #endif /* #ifndef MBC_REALLOC_ALWAYS_MOVES */ -#ifdef ERTS_SMP - if (allctr->dd.use) - ERTS_ALCU_HANDLE_DD_IN_OP(allctr, 1); -#endif - ASSERT(p); ASSERT(size); ASSERT(size < allctr->sbc_threshold); @@ -1594,13 +2195,13 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) if (alcu_flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE) return NULL; #else /* !MBC_REALLOC_ALWAYS_MOVES */ + +#ifdef ERTS_SMP + if (busy_pcrr_pp && *busy_pcrr_pp) + goto realloc_move; /* Don't want to use carrier in pool */ +#endif + get_blk_sz = blk_sz = UMEMSZ2BLKSZ(allctr, size); - if ((alcu_flgs & ERTS_ALCU_FLG_SBMBC) - && (blk_sz + allctr->min_block_size > allctr->sbmbc_threshold)) { - /* Since we use block size to determine if blocks are - located in sbmbc or not... */ - get_blk_sz = blk_sz + allctr->min_block_size; - } ASSERT(IS_ALLOCED_BLK(blk)); ASSERT(IS_MBC_BLK(blk)); @@ -1648,8 +2249,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) new_blk = (*allctr->get_free_block)(allctr, get_blk_sz, cand_blk, - cand_blk_sz, - alcu_flgs); + cand_blk_sz); if (new_blk || cand_blk != blk) goto move_into_new_blk; } @@ -1671,8 +2271,11 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) nxt_blk = BLK_AFTER(blk, blk_sz); - STAT_MBC_BLK_FREE(allctr, old_blk_sz, alcu_flgs); - STAT_MBC_BLK_ALLOC(allctr, blk_sz, alcu_flgs); + crr = ABLK_TO_MBC(blk); + + ERTS_ALC_CPOOL_REALLOC_OP(allctr); + STAT_MBC_BLK_FREE(allctr, crr, NULL, old_blk_sz, alcu_flgs); + STAT_MBC_BLK_ALLOC(allctr, crr, blk_sz, alcu_flgs); ASSERT(MBC_BLK_SZ(blk) >= allctr->min_block_size); @@ -1680,7 +2283,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) if (IS_FREE_BLK(nxt_nxt_blk)) { /* Coalesce with next free block... */ nxt_blk_sz += MBC_FBLK_SZ(nxt_nxt_blk); - (*allctr->unlink_free_block)(allctr, nxt_nxt_blk, alcu_flgs); + (*allctr->unlink_free_block)(allctr, nxt_nxt_blk); is_last_blk = GET_LAST_BLK_HDR_FLG(nxt_nxt_blk); } @@ -1690,12 +2293,11 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz); } - crr = ABLK_TO_MBC(blk); SET_MBC_FBLK_HDR(nxt_blk, nxt_blk_sz, SBH_THIS_FREE | (is_last_blk ? SBH_LAST_BLK : 0), crr); - (*allctr->link_free_block)(allctr, nxt_blk, alcu_flgs); + (*allctr->link_free_block)(allctr, nxt_blk); ASSERT(IS_ALLOCED_BLK(blk)); @@ -1718,6 +2320,10 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) HARD_CHECK_BLK_CARRIER(allctr, blk); +#ifdef ERTS_SMP + check_abandon_carrier(allctr, nxt_blk, NULL); +#endif + return p; } @@ -1727,11 +2333,12 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) nxt_blk = BLK_AFTER(blk, old_blk_sz); nxt_blk_sz = MBC_BLK_SZ(nxt_blk); if (IS_FREE_BLK(nxt_blk) && get_blk_sz <= old_blk_sz + nxt_blk_sz) { + Carrier_t* crr = ABLK_TO_MBC(blk); /* Grow into next block... */ HARD_CHECK_BLK_CARRIER(allctr, blk); - (*allctr->unlink_free_block)(allctr, nxt_blk, alcu_flgs); + (*allctr->unlink_free_block)(allctr, nxt_blk); nxt_blk_sz -= blk_sz - old_blk_sz; is_last_blk = IS_LAST_BLK(nxt_blk); @@ -1756,7 +2363,6 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) } } else { - Carrier_t* crr = ABLK_TO_MBC(blk); SET_MBC_ABLK_SZ(blk, blk_sz); nxt_blk = BLK_AFTER(blk, blk_sz); @@ -1767,15 +2373,15 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) else SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz); - (*allctr->link_free_block)(allctr, nxt_blk, alcu_flgs); + (*allctr->link_free_block)(allctr, nxt_blk); ASSERT(IS_FREE_BLK(nxt_blk)); ASSERT(FBLK_TO_MBC(nxt_blk) == crr); } - STAT_MBC_BLK_FREE(allctr, old_blk_sz, alcu_flgs); - STAT_MBC_BLK_ALLOC(allctr, blk_sz, alcu_flgs); - + ERTS_ALC_CPOOL_REALLOC_OP(allctr); + STAT_MBC_BLK_FREE(allctr, crr, NULL, old_blk_sz, alcu_flgs); + STAT_MBC_BLK_ALLOC(allctr, crr, blk_sz, alcu_flgs); ASSERT(IS_ALLOCED_BLK(blk)); ASSERT(blk_sz == MBC_BLK_SZ(blk)); @@ -1828,13 +2434,16 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) if (cand_blk_sz < get_blk_sz) { /* We wont fit in cand_blk get a new one */ +#ifdef ERTS_SMP + realloc_move: +#endif #endif /* !MBC_REALLOC_ALWAYS_MOVES */ new_p = mbc_alloc(allctr, size); if (!new_p) return NULL; sys_memcpy(new_p, p, MIN(size, old_blk_sz - ABLK_HDR_SZ)); - mbc_free(allctr, p); + mbc_free(allctr, p, busy_pcrr_pp); return new_p; @@ -1847,8 +2456,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) new_blk = (*allctr->get_free_block)(allctr, get_blk_sz, cand_blk, - cand_blk_sz, - alcu_flgs); + cand_blk_sz); move_into_new_blk: /* * new_blk, and cand_blk have to be correctly set @@ -1862,11 +2470,10 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) GET_BLK_HDR_FLGS(new_blk), FBLK_TO_MBC(new_blk), blk_sz, - 1, - alcu_flgs); + 1); new_p = BLK2UMEM(new_blk); sys_memcpy(new_p, p, MIN(size, old_blk_sz - ABLK_HDR_SZ)); - mbc_free(allctr, p); + mbc_free(allctr, p, NULL); return new_p; } else { @@ -1886,7 +2493,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) HARD_CHECK_BLK_CARRIER(allctr, blk); - (*allctr->unlink_free_block)(allctr, new_blk, alcu_flgs); /* prev */ + (*allctr->unlink_free_block)(allctr, new_blk); /* prev */ if (is_last_blk) new_blk_flgs |= LAST_BLK_HDR_FLG; @@ -1895,7 +2502,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) if (IS_FREE_BLK(nxt_blk)) { new_blk_flgs |= GET_LAST_BLK_HDR_FLG(nxt_blk); new_blk_sz += MBC_FBLK_SZ(nxt_blk); - (*allctr->unlink_free_block)(allctr, nxt_blk, alcu_flgs); + (*allctr->unlink_free_block)(allctr, nxt_blk); } } @@ -1920,10 +2527,10 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) new_blk_flgs, crr, blk_sz, - 0, - alcu_flgs); + 0); - STAT_MBC_BLK_FREE(allctr, old_blk_sz, alcu_flgs); + ERTS_ALC_CPOOL_FREE_OP(allctr); + STAT_MBC_BLK_FREE(allctr, crr, NULL, old_blk_sz, alcu_flgs); return new_p; } @@ -1931,6 +2538,635 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) #endif /* !MBC_REALLOC_ALWAYS_MOVES */ } +#ifdef ERTS_SMP + +#define ERTS_ALC_MAX_DEALLOC_CARRIER 10 +#define ERTS_ALC_CPOOL_MAX_FETCH_INSPECT 10 +#define ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT 100 +#define ERTS_ALC_CPOOL_MAX_NO_CARRIERS 5 +#define ERTS_ALC_CPOOL_INSERT_ALLOWED_OFFSET 100 +#define ERTS_ALC_CPOOL_MAX_FAILED_STAT_READS 3 + +#define ERTS_ALC_CPOOL_PTR_MOD_MRK (((erts_aint_t) 1) << 0) +#define ERTS_ALC_CPOOL_PTR_DEL_MRK (((erts_aint_t) 1) << 1) + +#define ERTS_ALC_CPOOL_PTR_MRKS \ + (ERTS_ALC_CPOOL_PTR_MOD_MRK | ERTS_ALC_CPOOL_PTR_DEL_MRK) + +/* + * When setting multiple mod markers we always + * set mod markers in pointer order and always + * on next pointers before prev pointers. + */ + +typedef union { + ErtsAlcCPoolData_t sentinel; + char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAlcCPoolData_t))]; +} ErtsAlcCrrPool_t; + +#if ERTS_ALC_A_INVALID != 0 +# error "Carrier pool implementation assumes ERTS_ALC_A_INVALID == 0" +#endif +#if ERTS_ALC_A_MIN <= ERTS_ALC_A_INVALID +# error "Carrier pool implementation assumes ERTS_ALC_A_MIN > ERTS_ALC_A_INVALID" +#endif + +/* + * The pool is only allowed to be manipulated by managed + * threads except in the alloc_SUITE:cpool case. In this + * test case carrier_pool[ERTS_ALC_A_INVALID] will be + * used. + */ + +static ErtsAlcCrrPool_t carrier_pool[ERTS_ALC_A_MAX+1] erts_align_attribute(ERTS_CACHE_LINE_SIZE); + +#define ERTS_ALC_CPOOL_MAX_BACKOFF (1 << 8) + +static int +backoff(int n) +{ + int i; + + for (i = 0; i < n; i++) + ERTS_SPIN_BODY; + + if (n >= ERTS_ALC_CPOOL_MAX_BACKOFF) + return ERTS_ALC_CPOOL_MAX_BACKOFF; + else + return n << 1; +} + +static int +cpool_dbg_is_in_pool(Allctr_t *allctr, Carrier_t *crr) +{ + ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel; + ErtsAlcCPoolData_t *cpdp = sentinel; + Carrier_t *tmp_crr; + + while (1) { + cpdp = (ErtsAlcCPoolData_t *) (erts_atomic_read_ddrb(&cpdp->next) & ~FLG_MASK); + if (cpdp == sentinel) + return 0; + tmp_crr = (Carrier_t *) (((char *) cpdp) - offsetof(Carrier_t, cpool)); + if (tmp_crr == crr) + return 1; + } +} + +static int +cpool_is_empty(Allctr_t *allctr) +{ + ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel; + return ((erts_atomic_read_rb(&sentinel->next) == (erts_aint_t) sentinel) + && (erts_atomic_read_rb(&sentinel->prev) == (erts_aint_t) sentinel)); +} + +static ERTS_INLINE ErtsAlcCPoolData_t * +cpool_aint2cpd(erts_aint_t aint) +{ + return (ErtsAlcCPoolData_t *) (aint & ~ERTS_ALC_CPOOL_PTR_MRKS); +} + +static ERTS_INLINE erts_aint_t +cpool_read(erts_atomic_t *aptr) +{ + return erts_atomic_read_acqb(aptr); +} + +static ERTS_INLINE void +cpool_init(erts_atomic_t *aptr, erts_aint_t val) +{ + erts_atomic_set_nob(aptr, val); +} + +static ERTS_INLINE void +cpool_set_mod_marked(erts_atomic_t *aptr, erts_aint_t new, erts_aint_t old) +{ +#ifdef ERTS_ALC_CPOOL_DEBUG + erts_aint_t act = erts_atomic_xchg_relb(aptr, new); + ERTS_ALC_CPOOL_ASSERT(act == (old | ERTS_ALC_CPOOL_PTR_MOD_MRK)); +#else + erts_atomic_set_relb(aptr, new); +#endif +} + + +static ERTS_INLINE erts_aint_t +cpool_try_mod_mark_exp(erts_atomic_t *aptr, erts_aint_t exp) +{ + ERTS_ALC_CPOOL_ASSERT((exp & ERTS_ALC_CPOOL_PTR_MOD_MRK) == 0); + return erts_atomic_cmpxchg_nob(aptr, exp | ERTS_ALC_CPOOL_PTR_MOD_MRK, exp); +} + +static ERTS_INLINE erts_aint_t +cpool_mod_mark_exp(erts_atomic_t *aptr, erts_aint_t exp) +{ + int b; + erts_aint_t act; + ERTS_ALC_CPOOL_ASSERT((exp & ERTS_ALC_CPOOL_PTR_MOD_MRK) == 0); + while (1) { + act = erts_atomic_cmpxchg_nob(aptr, + exp | ERTS_ALC_CPOOL_PTR_MOD_MRK, + exp); + if (act == exp) + return exp; + b = 1; + do { + if ((act & ~ERTS_ALC_CPOOL_PTR_MOD_MRK) != exp) + return act; + b = backoff(b); + act = erts_atomic_read_nob(aptr); + } while (act != exp); + } +} + +static ERTS_INLINE erts_aint_t +cpool_mod_mark(erts_atomic_t *aptr) +{ + int b; + erts_aint_t act, exp; + act = cpool_read(aptr); + while (1) { + b = 1; + while (act & ERTS_ALC_CPOOL_PTR_MOD_MRK) { + b = backoff(b); + act = erts_atomic_read_nob(aptr); + } + exp = act; + act = erts_atomic_cmpxchg_acqb(aptr, + exp | ERTS_ALC_CPOOL_PTR_MOD_MRK, + exp); + if (act == exp) + return exp; + } +} + +static void +cpool_insert(Allctr_t *allctr, Carrier_t *crr) +{ + ErtsAlcCPoolData_t *cpd1p, *cpd2p; + erts_aint_t val; + ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel; + + ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */ + || erts_thr_progress_is_managed_thread()); + ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_read_nob(&crr->allctr) + == (erts_aint_t) allctr); + + erts_atomic_add_nob(&allctr->cpool.stat.blocks_size, + (erts_aint_t) crr->cpool.blocks_size); + erts_atomic_add_nob(&allctr->cpool.stat.no_blocks, + (erts_aint_t) crr->cpool.blocks); + erts_atomic_add_nob(&allctr->cpool.stat.carriers_size, + (erts_aint_t) CARRIER_SZ(crr)); + erts_atomic_inc_nob(&allctr->cpool.stat.no_carriers); + + erts_smp_atomic_set_nob(&crr->allctr, + ((erts_aint_t) allctr)|ERTS_CRR_ALCTR_FLG_IN_POOL); + + /* + * We search in 'next' direction and begin by passing + * one element before trying to insert. This in order to + * avoid contention with threads fetching elements. + */ + + val = cpool_read(&sentinel->next); + + /* Find a predecessor to be, and set mod marker on its next ptr */ + + while (1) { + cpd1p = cpool_aint2cpd(val); + if (cpd1p == sentinel) { + val = cpool_mod_mark(&cpd1p->next); + break; + } + val = cpool_read(&cpd1p->next); + if (!(val & ERTS_ALC_CPOOL_PTR_MRKS)) { + erts_aint_t tmp = cpool_try_mod_mark_exp(&cpd1p->next, val); + if (tmp == val) { + val = tmp; + break; + } + val = tmp; + } + } + + /* Set mod marker on prev ptr of the to be successor */ + + cpd2p = cpool_aint2cpd(val); + + cpool_init(&crr->cpool.next, (erts_aint_t) cpd2p); + cpool_init(&crr->cpool.prev, (erts_aint_t) cpd1p); + + val = (erts_aint_t) cpd1p; + + while (1) { + int b; + erts_aint_t tmp; + + tmp = cpool_mod_mark_exp(&cpd2p->prev, val); + if (tmp == val) + break; + b = 1; + do { + b = backoff(b); + tmp = cpool_read(&cpd2p->prev); + } while (tmp != val); + } + + /* Write pointers to this element in successor and predecessor */ + + cpool_set_mod_marked(&cpd1p->next, + (erts_aint_t) &crr->cpool, + (erts_aint_t) cpd2p); + cpool_set_mod_marked(&cpd2p->prev, + (erts_aint_t) &crr->cpool, + (erts_aint_t) cpd1p); +} + +static void +cpool_delete(Allctr_t *allctr, Allctr_t *prev_allctr, Carrier_t *crr) +{ + ErtsAlcCPoolData_t *cpd1p, *cpd2p; + erts_aint_t val; +#ifdef ERTS_ALC_CPOOL_DEBUG + ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel; +#endif + + ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */ + || erts_thr_progress_is_managed_thread()); + ERTS_ALC_CPOOL_ASSERT(sentinel != &crr->cpool); + + /* Set mod marker on next ptr of our predecessor */ + + val = (erts_aint_t) &crr->cpool; + while (1) { + erts_aint_t tmp; + cpd1p = cpool_aint2cpd(cpool_read(&crr->cpool.prev)); + tmp = cpool_mod_mark_exp(&cpd1p->next, val); + if (tmp == val) + break; + } + + /* Set mod marker on our next ptr */ + + val = cpool_mod_mark(&crr->cpool.next); + + /* Set mod marker on the prev ptr of our successor */ + + cpd2p = cpool_aint2cpd(val); + + val = (erts_aint_t) &crr->cpool; + + while (1) { + int b; + erts_aint_t tmp; + + tmp = cpool_mod_mark_exp(&cpd2p->prev, val); + if (tmp == val) + break; + b = 1; + do { + b = backoff(b); + tmp = cpool_read(&cpd2p->prev); + } while (tmp != val); + } + + /* Set mod marker on our prev ptr */ + + val = (erts_aint_t) cpd1p; + + while (1) { + int b; + erts_aint_t tmp; + + tmp = cpool_mod_mark_exp(&crr->cpool.prev, val); + if (tmp == val) + break; + b = 1; + do { + b = backoff(b); + tmp = cpool_read(&cpd2p->prev); + } while (tmp != val); + } + + /* Write pointers past this element in predecessor and successor */ + + cpool_set_mod_marked(&cpd1p->next, + (erts_aint_t) cpd2p, + (erts_aint_t) &crr->cpool); + cpool_set_mod_marked(&cpd2p->prev, + (erts_aint_t) cpd1p, + (erts_aint_t) &crr->cpool); + + /* Repleace mod markers with delete markers on this element */ + cpool_set_mod_marked(&crr->cpool.next, + ((erts_aint_t) cpd2p) | ERTS_ALC_CPOOL_PTR_DEL_MRK, + ((erts_aint_t) cpd2p) | ERTS_ALC_CPOOL_PTR_MOD_MRK); + cpool_set_mod_marked(&crr->cpool.prev, + ((erts_aint_t) cpd1p) | ERTS_ALC_CPOOL_PTR_DEL_MRK, + ((erts_aint_t) cpd1p) | ERTS_ALC_CPOOL_PTR_MOD_MRK); + + crr->cpool.thr_prgr = erts_thr_progress_later(NULL); + + erts_atomic_add_nob(&prev_allctr->cpool.stat.blocks_size, + -((erts_aint_t) crr->cpool.blocks_size)); + erts_atomic_add_nob(&prev_allctr->cpool.stat.no_blocks, + -((erts_aint_t) crr->cpool.blocks)); + erts_atomic_add_nob(&prev_allctr->cpool.stat.carriers_size, + -((erts_aint_t) CARRIER_SZ(crr))); + erts_atomic_dec_wb(&prev_allctr->cpool.stat.no_carriers); + +} + +static Carrier_t * +cpool_fetch(Allctr_t *allctr, UWord size) +{ + int i; + Carrier_t *crr; + ErtsAlcCPoolData_t *cpdp; + ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel; + + ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */ + || erts_thr_progress_is_managed_thread()); + + i = 0; + + /* First; check our own pending dealloc carrier list... */ + crr = allctr->cpool.dc_list.last; + while (crr && i < ERTS_ALC_CPOOL_MAX_FETCH_INSPECT) { + if (erts_atomic_read_nob(&crr->cpool.max_size) >= size) { + unlink_carrier(&allctr->cpool.dc_list, crr); +#ifdef ERTS_ALC_CPOOL_DEBUG + ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_xchg_nob(&crr->allctr, + ((erts_aint_t) allctr)) + == (((erts_aint_t) allctr) & ~FLG_MASK)); +#else + erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr)); +#endif + return crr; + } + crr = crr->prev; + i++; + } + + /* ... then the pool ... */ + + /* + * We search in 'prev' direction and begin by passing + * one element before trying to fetch. This in order to + * avoid contention with threads inserting elements. + */ + + cpdp = cpool_aint2cpd(cpool_read(&sentinel->prev)); + if (cpdp == sentinel) + return NULL; + + while (i < ERTS_ALC_CPOOL_MAX_FETCH_INSPECT) { + erts_aint_t exp; + cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev)); + if (cpdp == sentinel) { + cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev)); + if (cpdp == sentinel) + return NULL; + i = ERTS_ALC_CPOOL_MAX_FETCH_INSPECT; /* Last one to inspect */ + } + crr = (Carrier_t *) (((char *) cpdp) - offsetof(Carrier_t, cpool)); + exp = erts_smp_atomic_read_rb(&crr->allctr); + if (((exp & (ERTS_CRR_ALCTR_FLG_IN_POOL|ERTS_CRR_ALCTR_FLG_BUSY)) + == ERTS_CRR_ALCTR_FLG_IN_POOL) + && (erts_atomic_read_nob(&cpdp->max_size) >= size)) { + erts_aint_t act; + /* Try to fetch it... */ + act = erts_smp_atomic_cmpxchg_mb(&crr->allctr, + (erts_aint_t) allctr, + exp); + if (act == exp) { + cpool_delete(allctr, ((Allctr_t *) (act & ~FLG_MASK)), crr); + return crr; + } + } + i++; + } + return NULL; +} + +static void +check_pending_dealloc_carrier(Allctr_t *allctr, + int *need_thr_progress, + ErtsThrPrgrVal *thr_prgr_p, + int *need_more_work) +{ + Carrier_t *crr = allctr->cpool.dc_list.first; + + if (crr) { + ErtsThrPrgrVal current = erts_thr_progress_current(); + int i = 0; + + do { + Carrier_t *dcrr; + + if (!erts_thr_progress_has_reached_this(current, crr->cpool.thr_prgr)) + break; + + dcrr = crr; + crr = crr->next; + dealloc_carrier(allctr, dcrr, ERTS_MSEG_FLG_2POW); + i++; + } while (crr && i < ERTS_ALC_MAX_DEALLOC_CARRIER); + + allctr->cpool.dc_list.first = crr; + if (!crr) + allctr->cpool.dc_list.last = NULL; + else { + crr->prev = NULL; + + if (need_more_work) { + ERTS_ALC_CPOOL_ASSERT(need_thr_progress && thr_prgr_p); + if (erts_thr_progress_has_reached_this(current, crr->cpool.thr_prgr)) + *need_more_work = 1; + else { + *need_thr_progress = 1; + if (*thr_prgr_p == ERTS_THR_PRGR_INVALID + || erts_thr_progress_cmp(crr->cpool.thr_prgr, + *thr_prgr_p) < 0) { + *thr_prgr_p = crr->cpool.thr_prgr; + } + } + } + } + } +} + +static void +schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr) +{ + Allctr_t *used_allctr; + int check_pending_dealloc; + erts_aint_t max_size; + + if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr)) { + dealloc_carrier(allctr, crr, ERTS_MSEG_FLG_2POW); + return; + } + + used_allctr = crr->cpool.orig_allctr; + + if (allctr != used_allctr) { + Block_t *blk = MBC_TO_FIRST_BLK(allctr, crr); + int cinit = used_allctr->dd.ix - allctr->dd.ix; + + /* + * Receiver will recognize that this is a carrier to + * deallocate since the block is an mbc block that + * is free and last in carrier... + */ + ERTS_ALC_CPOOL_ASSERT(IS_FREE_LAST_MBC_BLK(blk)); + + ERTS_ALC_CPOOL_ASSERT(IS_MBC_FIRST_ABLK(allctr, blk)); + ERTS_ALC_CPOOL_ASSERT(crr == FBLK_TO_MBC(blk)); + ERTS_ALC_CPOOL_ASSERT(crr == FIRST_BLK_TO_MBC(used_allctr, blk)); + + if (ddq_enqueue(&used_allctr->dd.q, BLK2UMEM(blk), cinit)) + erts_alloc_notify_delayed_dealloc(used_allctr->ix); + return; + } + + if (crr->cpool.thr_prgr == ERTS_THR_PRGR_INVALID + || erts_thr_progress_has_reached(crr->cpool.thr_prgr)) { + dealloc_carrier(allctr, crr, ERTS_MSEG_FLG_2POW); + return; + } + + max_size = (erts_aint_t) allctr->largest_fblk_in_mbc(allctr, crr); + erts_atomic_set_nob(&crr->cpool.max_size, max_size); + + crr->next = NULL; + crr->prev = allctr->cpool.dc_list.last; + if (allctr->cpool.dc_list.last) { + check_pending_dealloc = 1; + allctr->cpool.dc_list.last->next = crr; + } + else { + check_pending_dealloc = 0; + allctr->cpool.dc_list.first = crr; + } + allctr->cpool.dc_list.last = crr; + if (check_pending_dealloc) + check_pending_dealloc_carrier(allctr, NULL, NULL, NULL); + erts_alloc_ensure_handle_delayed_dealloc_call(allctr->ix); +} + +static ERTS_INLINE void +cpool_init_carrier_data(Allctr_t *allctr, Carrier_t *crr) +{ + erts_atomic_init_nob(&crr->cpool.next, ERTS_AINT_NULL); + erts_atomic_init_nob(&crr->cpool.prev, ERTS_AINT_NULL); + crr->cpool.orig_allctr = allctr; + crr->cpool.thr_prgr = ERTS_THR_PRGR_INVALID; + erts_atomic_init_nob(&crr->cpool.max_size, 0); + crr->cpool.blocks = 0; + crr->cpool.blocks_size = 0; + if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr)) + crr->cpool.abandon_limit = 0; + else { + UWord csz = CARRIER_SZ(crr); + UWord limit = csz*allctr->cpool.util_limit; + if (limit > csz) + limit /= 100; + else + limit = (csz/100)*allctr->cpool.util_limit; + crr->cpool.abandon_limit = limit; + } +} + +static void +set_new_allctr_abandon_limit(Allctr_t *allctr) +{ + UWord limit; + UWord csz; + + allctr->cpool.check_limit_count = ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT; + + csz = allctr->mbcs.curr.norm.mseg.size; + csz += allctr->mbcs.curr.norm.sys_alloc.size; + + limit = csz*allctr->cpool.util_limit; + if (limit > csz) + limit /= 100; + else + limit = (csz/100)*allctr->cpool.util_limit; + + allctr->cpool.abandon_limit = limit; +} + +static void +abandon_carrier(Allctr_t *allctr, Carrier_t *crr) +{ + erts_aint_t max_size; + + STAT_MBC_CPOOL_INSERT(allctr, crr); + + unlink_carrier(&allctr->mbc_list, crr); + + allctr->remove_mbc(allctr, crr); + + max_size = (erts_aint_t) allctr->largest_fblk_in_mbc(allctr, crr); + erts_atomic_set_nob(&crr->cpool.max_size, max_size); + + cpool_insert(allctr, crr); + + set_new_allctr_abandon_limit(allctr); +} + +static void +cpool_read_stat(Allctr_t *allctr, UWord *nocp, UWord *cszp, UWord *nobp, UWord *bszp) +{ + int i; + UWord noc = 0, csz = 0, nob = 0, bsz = 0; + + /* + * We try to get consistent values, but after + * ERTS_ALC_CPOOL_MAX_FAILED_STAT_READS failed + * tries we give up and present what we got... + */ + for (i = 0; i <= ERTS_ALC_CPOOL_MAX_FAILED_STAT_READS; i++) { + UWord tnoc, tcsz, tnob, tbsz; + + tnoc = (UWord) (nocp + ? erts_atomic_read_nob(&allctr->cpool.stat.no_carriers) + : 0); + tcsz = (UWord) (cszp + ? erts_atomic_read_nob(&allctr->cpool.stat.carriers_size) + : 0); + tnob = (UWord) (nobp + ? erts_atomic_read_nob(&allctr->cpool.stat.no_blocks) + : 0); + tbsz = (UWord) (bszp + ? erts_atomic_read_nob(&allctr->cpool.stat.blocks_size) + : 0); + if (tnoc == noc && tcsz == csz && tnob == nob && tbsz == bsz) + break; + noc = tnoc; + csz = tcsz; + nob = tnob; + bsz = tbsz; + ERTS_THR_READ_MEMORY_BARRIER; + } + + if (nocp) + *nocp = noc; + if (cszp) + *cszp = csz; + if (nobp) + *nobp = nob; + if (bszp) + *bszp = bsz; +} + + +#endif /* ERTS_SMP */ + #ifdef DEBUG #if HAVE_ERTS_MSEG @@ -1974,81 +3210,6 @@ static void CHECK_1BLK_CARRIER(Allctr_t* A, int SBC, int MSEGED, Carrier_t* C, #endif static Block_t * -create_sbmbc(Allctr_t *allctr, Uint umem_sz) -{ - Block_t *blk; - Uint blk_sz; - Uint crr_sz = allctr->sbmbc_size; - Carrier_t *crr; - -#if HALFWORD_HEAP - if (allctr->mseg_opt.low_mem) - crr = erts_alloc(ERTS_ALC_T_SBMBC_LOW, crr_sz); - else -#endif - crr = erts_alloc(ERTS_ALC_T_SBMBC, crr_sz); - - INC_CC(allctr->calls.sbmbc_alloc); - SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_MBC, allctr); - - blk = MBC_TO_FIRST_BLK(allctr, crr); - - blk_sz = UNIT_FLOOR(crr_sz - MBC_HEADER_SIZE(allctr)); - - SET_MBC_FBLK_HDR(blk, blk_sz, SBH_THIS_FREE|SBH_LAST_BLK, crr); - - link_carrier(&allctr->sbmbc_list, crr); - - STAT_SBMBC_ALLOC(allctr, crr_sz); - CHECK_1BLK_CARRIER(allctr, 0, 0, crr, crr_sz, blk, blk_sz); - if (allctr->creating_mbc) - (*allctr->creating_mbc)(allctr, crr, ERTS_ALCU_FLG_SBMBC); - - DEBUG_SAVE_ALIGNMENT(crr); - return blk; -} - -static void -destroy_sbmbc(Allctr_t *allctr, Block_t *blk) -{ - Uint crr_sz; - Carrier_t *crr; - - ASSERT(IS_MBC_BLK(blk)); - ASSERT(IS_MBC_FIRST_FBLK(allctr, blk)); - - crr = FIRST_BLK_TO_MBC(allctr, blk); - crr_sz = CARRIER_SZ(crr); - -#ifdef DEBUG - if (!allctr->stopped) { - ASSERT(IS_LAST_BLK(blk)); - -#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG - (*allctr->link_free_block)(allctr, blk, ERTS_ALCU_FLG_SBMBC); - HARD_CHECK_BLK_CARRIER(allctr, blk); - (*allctr->unlink_free_block)(allctr, blk, ERTS_ALCU_FLG_SBMBC); -#endif - } -#endif - - STAT_SBMBC_FREE(allctr, crr_sz); - - unlink_carrier(&allctr->sbmbc_list, crr); - if (allctr->destroying_mbc) - (*allctr->destroying_mbc)(allctr, crr, ERTS_ALCU_FLG_SBMBC); - - INC_CC(allctr->calls.sbmbc_free); - -#if HALFWORD_HEAP - if (allctr->mseg_opt.low_mem) - erts_free(ERTS_ALC_T_SBMBC_LOW, crr); - else -#endif - erts_free(ERTS_ALC_T_SBMBC, crr); -} - -static Block_t * create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags) { Block_t *blk; @@ -2077,6 +3238,24 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags) blk_sz = UMEMSZ2BLKSZ(allctr, umem_sz); +#ifdef ERTS_SMP + allctr->cpool.disable_abandon = ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON; + + if ((flags & (CFLG_MBC|CFLG_NO_CPOOL)) == CFLG_MBC + && ERTS_ALC_IS_CPOOL_ENABLED(allctr) + && erts_thr_progress_is_managed_thread()) { + crr = cpool_fetch(allctr, blk_sz); + if (crr) { + STAT_MBC_CPOOL_FETCH(allctr, crr); + link_carrier(&allctr->mbc_list, crr); + (*allctr->add_mbc)(allctr, crr); + blk = (*allctr->get_free_block)(allctr, blk_sz, NULL, 0); + ASSERT(blk); + return blk; + } + } +#endif + #if HAVE_ERTS_MSEG if (flags & CFLG_FORCE_SYS_ALLOC) @@ -2204,11 +3383,15 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags) allctr->main_carrier = crr; } +#ifdef ERTS_SMP + cpool_init_carrier_data(allctr, crr); +#endif + link_carrier(&allctr->mbc_list, crr); CHECK_1BLK_CARRIER(allctr, 0, is_mseg, crr, crr_sz, blk, blk_sz); if (allctr->creating_mbc) - (*allctr->creating_mbc)(allctr, crr, 0); + (*allctr->creating_mbc)(allctr, crr); } @@ -2352,14 +3535,21 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags) } static void -destroy_carrier(Allctr_t *allctr, Block_t *blk) +dealloc_carrier(Allctr_t *allctr, Carrier_t *crr, Uint mseg_flags) { - Uint crr_sz; - Carrier_t *crr; #if HAVE_ERTS_MSEG - Uint is_mseg = 0; - Uint mseg_flags = ERTS_MSEG_FLG_NONE; + if (IS_MSEG_CARRIER(crr)) + alcu_mseg_dealloc(allctr, crr, CARRIER_SZ(crr), mseg_flags); + else #endif + alcu_sys_free(allctr, crr); +} + +static void +destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp) +{ + Uint crr_sz; + Carrier_t *crr; if (IS_SBC_BLK(blk)) { Uint blk_sz = SBC_BLK_SZ(blk); @@ -2372,7 +3562,6 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk) #if HAVE_ERTS_MSEG if (IS_MSEG_CARRIER(crr)) { - is_mseg++; ASSERT(crr_sz % MSEG_UNIT_SZ == 0); STAT_MSEG_SBC_FREE(allctr, crr_sz, blk_sz); } @@ -2382,6 +3571,7 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk) unlink_carrier(&allctr->sbc_list, crr); + dealloc_carrier(allctr, crr, ERTS_MSEG_FLG_NONE); } else { ASSERT(IS_MBC_FIRST_FBLK(allctr, blk)); @@ -2400,30 +3590,36 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk) } #endif -#if HAVE_ERTS_MSEG - if (IS_MSEG_CARRIER(crr)) { - is_mseg++; - ASSERT(crr_sz % MSEG_UNIT_SZ == 0); - STAT_MSEG_MBC_FREE(allctr, crr_sz); - mseg_flags = ERTS_MSEG_FLG_2POW; + if (allctr->destroying_mbc) + (*allctr->destroying_mbc)(allctr, crr); + +#ifdef ERTS_SMP + if (busy_pcrr_pp && *busy_pcrr_pp) { + ERTS_ALC_CPOOL_ASSERT(*busy_pcrr_pp == crr); + *busy_pcrr_pp = NULL; + cpool_delete(allctr, allctr, crr); } else #endif - STAT_SYS_ALLOC_MBC_FREE(allctr, crr_sz); + { + unlink_carrier(&allctr->mbc_list, crr); +#if HAVE_ERTS_MSEG + if (IS_MSEG_CARRIER(crr)) { + ASSERT(crr_sz % MSEG_UNIT_SZ == 0); + STAT_MSEG_MBC_FREE(allctr, crr_sz); + } + else +#endif + STAT_SYS_ALLOC_MBC_FREE(allctr, crr_sz); + } - unlink_carrier(&allctr->mbc_list, crr); - if (allctr->destroying_mbc) - (*allctr->destroying_mbc)(allctr, crr, 0); +#ifdef ERTS_SMP + schedule_dealloc_carrier(allctr, crr); +#else + dealloc_carrier(allctr, crr, ERTS_MSEG_FLG_2POW); +#endif } - -#if HAVE_ERTS_MSEG - if (is_mseg) { - alcu_mseg_dealloc(allctr, crr, crr_sz, mseg_flags); - } - else -#endif - alcu_sys_free(allctr, crr); } @@ -2457,19 +3653,19 @@ static struct { Eterm lmbcs; Eterm smbcs; Eterm mbcgs; - Eterm sbmbcs; - Eterm sbmbct; + Eterm acul; #if HAVE_ERTS_MSEG Eterm mmc; #endif Eterm ycs; - /* Eterm sbmbcs; */ - Eterm fix_types; Eterm mbcs; +#ifdef ERTS_SMP + Eterm mbcs_pool; +#endif Eterm sbcs; Eterm sys_alloc_carriers_size; @@ -2494,8 +3690,6 @@ static struct { Eterm mseg_dealloc; Eterm mseg_realloc; #endif - Eterm sbmbc_alloc; - Eterm sbmbc_free; #ifdef DEBUG Eterm end_of_atoms; #endif @@ -2551,19 +3745,19 @@ init_atoms(Allctr_t *allctr) AM_INIT(lmbcs); AM_INIT(smbcs); AM_INIT(mbcgs); - AM_INIT(sbmbcs); - AM_INIT(sbmbct); + AM_INIT(acul); #if HAVE_ERTS_MSEG AM_INIT(mmc); #endif AM_INIT(ycs); - /*AM_INIT(sbmbcs);*/ - AM_INIT(fix_types); AM_INIT(mbcs); +#ifdef ERTS_SMP + AM_INIT(mbcs_pool); +#endif AM_INIT(sbcs); AM_INIT(sys_alloc_carriers_size); @@ -2588,8 +3782,6 @@ init_atoms(Allctr_t *allctr) AM_INIT(mseg_dealloc); AM_INIT(mseg_realloc); #endif - AM_INIT(sbmbc_free); - AM_INIT(sbmbc_alloc); #ifdef DEBUG for (atom = (Eterm *) &am; atom < &am.end_of_atoms; atom++) { @@ -2637,19 +3829,22 @@ ensure_atoms_initialized(Allctr_t *allctr) * that would fit a small when size check is done may need to be built * as a big when the actual build is performed. Caller is required to * HRelease after build. + * + * Note, bld_unstable_uint() should have been called bld_unstable_uword() + * but we do not want to rename it... */ static ERTS_INLINE Eterm -bld_unstable_uint(Uint **hpp, Uint *szp, Uint ui) +bld_unstable_uint(Uint **hpp, Uint *szp, UWord ui) { Eterm res = THE_NON_VALUE; if (szp) - *szp += BIG_UINT_HEAP_SIZE; + *szp += BIG_UWORD_HEAP_SIZE(~((UWord) 0)); if (hpp) { if (IS_USMALL(0, ui)) res = make_small(ui); else { - res = uint_to_big(ui, *hpp); - *hpp += BIG_UINT_HEAP_SIZE; + res = uword_to_big(ui, *hpp); + *hpp += BIG_UWORD_HEAP_SIZE(ui); } } return res; @@ -2675,8 +3870,24 @@ add_4tup(Uint **hpp, Uint *szp, Eterm *lp, bld_cons(hpp, szp, bld_tuple(hpp, szp, 4, el1, el2, el3, el4), *lp); } +static ERTS_INLINE void +add_fix_types(Allctr_t *allctr, int internal, Uint **hpp, Uint *szp, + Eterm *lp, Eterm fix) +{ + if (allctr->fix) { + if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr)) + add_2tup(hpp, szp, lp, am.fix_types, fix); + else if (internal) + add_3tup(hpp, szp, lp, + am.fix_types, + erts_bld_uword(hpp, szp, ~((UWord) 0)), + fix); + } +} + static Eterm sz_info_fix(Allctr_t *allctr, + int internal, int *print_to_p, void *print_to_arg, Uint **hpp, @@ -2684,36 +3895,67 @@ sz_info_fix(Allctr_t *allctr, { Eterm res; int ix; - ErtsAlcFixList_t *fix = allctr->fix; - ASSERT(fix); + ASSERT(allctr->fix); res = NIL; - for (ix = ERTS_ALC_NO_FIXED_SIZES-1; ix >= 0; ix--) { - ErtsAlcType_t n = ix + ERTS_ALC_N_MIN_A_FIXED_SIZE; - Uint alloced = (fix[ix].type_size * fix[ix].allocated); - Uint used = fix[ix].type_size*fix[ix].used; - - if (print_to_p) { - int to = *print_to_p; - void *arg = print_to_arg; - erts_print(to, - arg, - "fix type: %s %bpu %bpu\n", - (char *) ERTS_ALC_N2TD(n), - alloced, - used); - } + if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) { + + if (internal) { + for (ix = ERTS_ALC_NO_FIXED_SIZES-1; ix >= 0; ix--) { + ErtsAlcFixList_t *fix = &allctr->fix[ix]; + UWord alloced = fix->type_size * fix->u.cpool.allocated; + UWord used = fix->type_size * fix->u.cpool.used; + + if (print_to_p) { + int to = *print_to_p; + void *arg = print_to_arg; + erts_print(to, + arg, + "fix type internal: %s %bpu %bpu\n", + (char *) ERTS_ALC_N2TD(ERTS_ALC_N_MIN_A_FIXED_SIZE + + ix), + alloced, + used); + } - if (hpp || szp) { - add_3tup(hpp, szp, &res, - fix_type_atoms[ix], - bld_unstable_uint(hpp, szp, alloced), - bld_unstable_uint(hpp, szp, used)); + if (hpp || szp) { + add_3tup(hpp, szp, &res, + fix_type_atoms[ix], + bld_unstable_uint(hpp, szp, alloced), + bld_unstable_uint(hpp, szp, used)); + } + } } } + else { + for (ix = ERTS_ALC_NO_FIXED_SIZES-1; ix >= 0; ix--) { + ErtsAlcFixList_t *fix = &allctr->fix[ix]; + UWord alloced = fix->type_size * fix->u.nocpool.allocated; + UWord used = fix->type_size*fix->u.nocpool.used; + + if (print_to_p) { + int to = *print_to_p; + void *arg = print_to_arg; + erts_print(to, + arg, + "fix type: %s %bpu %bpu\n", + (char *) ERTS_ALC_N2TD(ERTS_ALC_N_MIN_A_FIXED_SIZE + + ix), + alloced, + used); + } + + if (hpp || szp) { + add_3tup(hpp, szp, &res, + fix_type_atoms[ix], + bld_unstable_uint(hpp, szp, alloced), + bld_unstable_uint(hpp, szp, used)); + } + } + } return res; } @@ -2727,9 +3969,7 @@ sz_info_carriers(Allctr_t *allctr, Uint *szp) { Eterm res = THE_NON_VALUE; - Uint curr_size = (cs == &allctr->sbmbcs - ? cs->curr.small_block.size - : cs->curr.norm.mseg.size + cs->curr.norm.sys_alloc.size); + UWord curr_size = cs->curr.norm.mseg.size + cs->curr.norm.sys_alloc.size; if (print_to_p) { int to = *print_to_p; @@ -2743,7 +3983,7 @@ sz_info_carriers(Allctr_t *allctr, cs->blocks.max_ever.size); erts_print(to, arg, - "%scarriers size: %beu %bpu %bpu\n", + "%scarriers size: %bpu %bpu %bpu\n", prefix, curr_size, cs->max.size, @@ -2767,6 +4007,62 @@ sz_info_carriers(Allctr_t *allctr, return res; } +#ifdef ERTS_SMP + +static Eterm +info_cpool(Allctr_t *allctr, + int sz_only, + char *prefix, + int *print_to_p, + void *print_to_arg, + Uint **hpp, + Uint *szp) +{ + Eterm res = THE_NON_VALUE; + UWord noc, csz, nob, bsz; + + noc = csz = nob = bsz = ~0; + if (print_to_p || hpp) { + if (sz_only) + cpool_read_stat(allctr, NULL, &csz, NULL, &bsz); + else + cpool_read_stat(allctr, &noc, &csz, &nob, &bsz); + } + + if (print_to_p) { + int to = *print_to_p; + void *arg = print_to_arg; + if (!sz_only) + erts_print(to, arg, "%sblocks: %bpu\n", prefix, nob); + erts_print(to, arg, "%sblocks size: %bpu\n", prefix, bsz); + if (!sz_only) + erts_print(to, arg, "%scarriers: %bpu\n", prefix, noc); + erts_print(to, arg, "%scarriers size: %bpu\n", prefix, csz); + } + + if (hpp || szp) { + res = NIL; + add_2tup(hpp, szp, &res, + am.carriers_size, + bld_unstable_uint(hpp, szp, csz)); + if (!sz_only) + add_2tup(hpp, szp, &res, + am.carriers, + bld_unstable_uint(hpp, szp, noc)); + add_2tup(hpp, szp, &res, + am.blocks_size, + bld_unstable_uint(hpp, szp, bsz)); + if (!sz_only) + add_2tup(hpp, szp, &res, + am.blocks, + bld_unstable_uint(hpp, szp, nob)); + } + + return res; +} + +#endif /* ERTS_SMP */ + static Eterm info_carriers(Allctr_t *allctr, CarriersStats_t *cs, @@ -2777,17 +4073,10 @@ info_carriers(Allctr_t *allctr, Uint *szp) { Eterm res = THE_NON_VALUE; - Uint curr_no, curr_size; - int small_block = cs == &allctr->sbmbcs; - - if (small_block) { - curr_no = cs->curr.small_block.no; - curr_size = cs->curr.small_block.size; - } - else { - curr_no = cs->curr.norm.mseg.no + cs->curr.norm.sys_alloc.no; - curr_size = cs->curr.norm.mseg.size + cs->curr.norm.sys_alloc.size; - } + UWord curr_no, curr_size; + + curr_no = cs->curr.norm.mseg.no + cs->curr.norm.sys_alloc.no; + curr_size = cs->curr.norm.mseg.size + cs->curr.norm.sys_alloc.size; if (print_to_p) { int to = *print_to_p; @@ -2808,75 +4097,67 @@ info_carriers(Allctr_t *allctr, cs->blocks.max_ever.size); erts_print(to, arg, - "%scarriers: %beu %bpu %bpu\n", + "%scarriers: %bpu %bpu %bpu\n", prefix, curr_no, cs->max.no, cs->max_ever.no); - if (!small_block) { #if HAVE_ERTS_MSEG - erts_print(to, - arg, - "%smseg carriers: %bpu\n", - prefix, - cs->curr.norm.mseg.no); -#endif - erts_print(to, - arg, - "%ssys_alloc carriers: %bpu\n", - prefix, - cs->curr.norm.sys_alloc.no); - } erts_print(to, arg, - "%scarriers size: %beu %bpu %bpu\n", + "%smseg carriers: %bpu\n", + prefix, + cs->curr.norm.mseg.no); +#endif + erts_print(to, + arg, + "%ssys_alloc carriers: %bpu\n", + prefix, + cs->curr.norm.sys_alloc.no); + erts_print(to, + arg, + "%scarriers size: %bpu %bpu %bpu\n", prefix, curr_size, cs->max.size, cs->max_ever.size); - if (!small_block) { #if HAVE_ERTS_MSEG - erts_print(to, - arg, - "%smseg carriers size: %bpu\n", - prefix, - cs->curr.norm.mseg.size); -#endif - erts_print(to, - arg, - "%ssys_alloc carriers size: %bpu\n", - prefix, - cs->curr.norm.sys_alloc.size); - } + erts_print(to, + arg, + "%smseg carriers size: %bpu\n", + prefix, + cs->curr.norm.mseg.size); +#endif + erts_print(to, + arg, + "%ssys_alloc carriers size: %bpu\n", + prefix, + cs->curr.norm.sys_alloc.size); } if (hpp || szp) { res = NIL; - if (!small_block) { - add_2tup(hpp, szp, &res, - am.sys_alloc_carriers_size, - bld_unstable_uint(hpp, szp, cs->curr.norm.sys_alloc.size)); + add_2tup(hpp, szp, &res, + am.sys_alloc_carriers_size, + bld_unstable_uint(hpp, szp, cs->curr.norm.sys_alloc.size)); #if HAVE_ERTS_MSEG - add_2tup(hpp, szp, &res, - am.mseg_alloc_carriers_size, - bld_unstable_uint(hpp, szp, cs->curr.norm.mseg.size)); + add_2tup(hpp, szp, &res, + am.mseg_alloc_carriers_size, + bld_unstable_uint(hpp, szp, cs->curr.norm.mseg.size)); #endif - } add_4tup(hpp, szp, &res, am.carriers_size, bld_unstable_uint(hpp, szp, curr_size), bld_unstable_uint(hpp, szp, cs->max.size), bld_unstable_uint(hpp, szp, cs->max_ever.size)); - if (!small_block) { - add_2tup(hpp, szp, &res, - am.sys_alloc_carriers, - bld_unstable_uint(hpp, szp, cs->curr.norm.sys_alloc.no)); + add_2tup(hpp, szp, &res, + am.sys_alloc_carriers, + bld_unstable_uint(hpp, szp, cs->curr.norm.sys_alloc.no)); #if HAVE_ERTS_MSEG - add_2tup(hpp, szp, &res, - am.mseg_alloc_carriers, - bld_unstable_uint(hpp, szp, cs->curr.norm.mseg.no)); + add_2tup(hpp, szp, &res, + am.mseg_alloc_carriers, + bld_unstable_uint(hpp, szp, cs->curr.norm.mseg.no)); #endif - } add_4tup(hpp, szp, &res, am.carriers, bld_unstable_uint(hpp, szp, curr_no), @@ -2935,16 +4216,10 @@ info_calls(Allctr_t *allctr, if (print_to_p) { #define PRINT_CC_4(TO, TOA, NAME, CC) \ - if ((CC).giga_no == 0) \ - erts_print(TO, TOA, "%s calls: %b32u\n", NAME, CC.no); \ - else \ - erts_print(TO, TOA, "%s calls: %b32u%09lu\n", NAME, CC.giga_no, CC.no) + erts_print(TO, TOA, "%s calls: %b64u\n", NAME, CC) #define PRINT_CC_5(TO, TOA, PRFX, NAME, CC) \ - if ((CC).giga_no == 0) \ - erts_print(TO, TOA, "%s%s calls: %b32u\n",PRFX,NAME,CC.no); \ - else \ - erts_print(TO, TOA, "%s%s calls: %b32u%09lu\n",PRFX,NAME,CC.giga_no,CC.no) + erts_print(TO, TOA, "%s%s calls: %b64u\n",PRFX,NAME,CC) char *prefix = allctr->name_prefix; int to = *print_to_p; @@ -2954,9 +4229,6 @@ info_calls(Allctr_t *allctr, PRINT_CC_5(to, arg, prefix, "free", allctr->calls.this_free); PRINT_CC_5(to, arg, prefix, "realloc", allctr->calls.this_realloc); - PRINT_CC_4(to, arg, "sbmbc_alloc", allctr->calls.sbmbc_alloc); - PRINT_CC_4(to, arg, "sbmbc_free", allctr->calls.sbmbc_free); - #if HAVE_ERTS_MSEG PRINT_CC_4(to, arg, "mseg_alloc", allctr->calls.mseg_alloc); PRINT_CC_4(to, arg, "mseg_dealloc", allctr->calls.mseg_dealloc); @@ -2983,50 +4255,42 @@ info_calls(Allctr_t *allctr, add_3tup(hpp, szp, &res, am.sys_realloc, - bld_unstable_uint(hpp, szp, allctr->calls.sys_realloc.giga_no), - bld_unstable_uint(hpp, szp, allctr->calls.sys_realloc.no)); + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.sys_realloc)), + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.sys_realloc))); add_3tup(hpp, szp, &res, am.sys_free, - bld_unstable_uint(hpp, szp, allctr->calls.sys_free.giga_no), - bld_unstable_uint(hpp, szp, allctr->calls.sys_free.no)); + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.sys_free)), + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.sys_free))); add_3tup(hpp, szp, &res, am.sys_alloc, - bld_unstable_uint(hpp, szp, allctr->calls.sys_alloc.giga_no), - bld_unstable_uint(hpp, szp, allctr->calls.sys_alloc.no)); + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.sys_alloc)), + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.sys_alloc))); #if HAVE_ERTS_MSEG add_3tup(hpp, szp, &res, am.mseg_realloc, - bld_unstable_uint(hpp, szp, allctr->calls.mseg_realloc.giga_no), - bld_unstable_uint(hpp, szp, allctr->calls.mseg_realloc.no)); + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.mseg_realloc)), + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.mseg_realloc))); add_3tup(hpp, szp, &res, am.mseg_dealloc, - bld_unstable_uint(hpp, szp, allctr->calls.mseg_dealloc.giga_no), - bld_unstable_uint(hpp, szp, allctr->calls.mseg_dealloc.no)); + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.mseg_dealloc)), + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.mseg_dealloc))); add_3tup(hpp, szp, &res, am.mseg_alloc, - bld_unstable_uint(hpp, szp, allctr->calls.mseg_alloc.giga_no), - bld_unstable_uint(hpp, szp, allctr->calls.mseg_alloc.no)); + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.mseg_alloc)), + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.mseg_alloc))); #endif add_3tup(hpp, szp, &res, - am.sbmbc_free, - bld_unstable_uint(hpp, szp, allctr->calls.sbmbc_free.giga_no), - bld_unstable_uint(hpp, szp, allctr->calls.sbmbc_free.no)); - add_3tup(hpp, szp, &res, - am.sbmbc_alloc, - bld_unstable_uint(hpp, szp, allctr->calls.sbmbc_alloc.giga_no), - bld_unstable_uint(hpp, szp, allctr->calls.sbmbc_alloc.no)); - add_3tup(hpp, szp, &res, allctr->name.realloc, - bld_unstable_uint(hpp, szp, allctr->calls.this_realloc.giga_no), - bld_unstable_uint(hpp, szp, allctr->calls.this_realloc.no)); + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.this_realloc)), + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.this_realloc))); add_3tup(hpp, szp, &res, allctr->name.free, - bld_unstable_uint(hpp, szp, allctr->calls.this_free.giga_no), - bld_unstable_uint(hpp, szp, allctr->calls.this_free.no)); + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.this_free)), + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.this_free))); add_3tup(hpp, szp, &res, allctr->name.alloc, - bld_unstable_uint(hpp, szp, allctr->calls.this_alloc.giga_no), - bld_unstable_uint(hpp, szp, allctr->calls.this_alloc.no)); + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.this_alloc)), + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.this_alloc))); } return res; @@ -3040,6 +4304,7 @@ info_options(Allctr_t *allctr, Uint *szp) { Eterm res = THE_NON_VALUE; + int acul; if (!allctr) { if (print_to_p) @@ -3051,6 +4316,12 @@ info_options(Allctr_t *allctr, return res; } +#ifdef ERTS_SMP + acul = allctr->cpool.util_limit; +#else + acul = 0; +#endif + if (print_to_p) { char topt[21]; /* Enough for any 64-bit integer */ if (allctr->t) @@ -3079,9 +4350,8 @@ info_options(Allctr_t *allctr, #endif "option lmbcs: %beu\n" "option smbcs: %beu\n" - "option mbcgs: %beu\n" - "option sbmbcs: %beu\n" - "option sbmbct: %beu\n", + "option mbcgs: %beu\n", + "option acul: %d\n", topt, allctr->ramv ? "true" : "false", #if HALFWORD_HEAP @@ -3102,8 +4372,7 @@ info_options(Allctr_t *allctr, allctr->largest_mbc_size, allctr->smallest_mbc_size, allctr->mbc_growth_stages, - allctr->sbmbc_size, - allctr->sbmbc_threshold); + acul); } res = (*allctr->info_options)(allctr, "option ", print_to_p, print_to_arg, @@ -3111,11 +4380,8 @@ info_options(Allctr_t *allctr, if (hpp || szp) { add_2tup(hpp, szp, &res, - am.sbmbct, - bld_uint(hpp, szp, allctr->sbmbc_threshold)); - add_2tup(hpp, szp, &res, - am.sbmbcs, - bld_uint(hpp, szp, allctr->sbmbc_size)); + am.acul, + bld_uint(hpp, szp, (UWord) acul)); add_2tup(hpp, szp, &res, am.mbcgs, bld_uint(hpp, szp, allctr->mbc_growth_stages)); @@ -3261,13 +4527,17 @@ erts_alcu_info_options(Allctr_t *allctr, Eterm erts_alcu_sz_info(Allctr_t *allctr, + int internal, int begin_max_period, int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp) { - Eterm res, sbmbcs, mbcs, sbcs, fix = THE_NON_VALUE; + Eterm res, mbcs, sbcs, fix = THE_NON_VALUE; +#ifdef ERTS_SMP + Eterm mbcs_pool; +#endif res = THE_NON_VALUE; @@ -3296,30 +4566,35 @@ erts_alcu_sz_info(Allctr_t *allctr, = allctr->sbcs.curr.norm.mseg.no + allctr->sbcs.curr.norm.sys_alloc.no; allctr->sbcs.blocks.max.no = allctr->sbcs.max.no; - update_max_ever_values(&allctr->sbmbcs); update_max_ever_values(&allctr->mbcs); update_max_ever_values(&allctr->sbcs); if (allctr->fix) - fix = sz_info_fix(allctr, print_to_p, print_to_arg, hpp, szp); - sbmbcs = sz_info_carriers(allctr, &allctr->sbmbcs, "sbmbcs ", print_to_p, - print_to_arg, hpp, szp); + fix = sz_info_fix(allctr, internal, print_to_p, print_to_arg, hpp, szp); mbcs = sz_info_carriers(allctr, &allctr->mbcs, "mbcs ", print_to_p, print_to_arg, hpp, szp); +#ifdef ERTS_SMP + if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) + mbcs_pool = info_cpool(allctr, 1, "mbcs_pool ", print_to_p, + print_to_arg, hpp, szp); + else + mbcs_pool = THE_NON_VALUE; /* shut up annoying warning... */ +#endif sbcs = sz_info_carriers(allctr, &allctr->sbcs, "sbcs ", print_to_p, print_to_arg, hpp, szp); if (hpp || szp) { res = NIL; add_2tup(hpp, szp, &res, am.sbcs, sbcs); +#ifdef ERTS_SMP + if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) + add_2tup(hpp, szp, &res, am.mbcs_pool, mbcs_pool); +#endif add_2tup(hpp, szp, &res, am.mbcs, mbcs); - add_2tup(hpp, szp, &res, am.sbmbcs, sbmbcs); - if (allctr->fix) - add_2tup(hpp, szp, &res, am.fix_types, fix); + add_fix_types(allctr, internal, hpp, szp, &res, fix); } if (begin_max_period) { - reset_max_values(&allctr->sbmbcs); reset_max_values(&allctr->mbcs); reset_max_values(&allctr->sbcs); } @@ -3338,13 +4613,17 @@ erts_alcu_sz_info(Allctr_t *allctr, Eterm erts_alcu_info(Allctr_t *allctr, + int internal, int begin_max_period, int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp) { - Eterm res, sett, sbmbcs, mbcs, sbcs, calls, fix = THE_NON_VALUE; + Eterm res, sett, mbcs, sbcs, calls, fix = THE_NON_VALUE; +#ifdef ERTS_SMP + Eterm mbcs_pool; +#endif res = THE_NON_VALUE; @@ -3373,7 +4652,6 @@ erts_alcu_info(Allctr_t *allctr, = allctr->sbcs.curr.norm.mseg.no + allctr->sbcs.curr.norm.sys_alloc.no; allctr->sbcs.blocks.max.no = allctr->sbcs.max.no; - update_max_ever_values(&allctr->sbmbcs); update_max_ever_values(&allctr->mbcs); update_max_ever_values(&allctr->sbcs); @@ -3387,11 +4665,16 @@ erts_alcu_info(Allctr_t *allctr, sett = info_options(allctr, print_to_p, print_to_arg, hpp, szp); if (allctr->fix) - fix = sz_info_fix(allctr, print_to_p, print_to_arg, hpp, szp); - sbmbcs = info_carriers(allctr, &allctr->sbmbcs, "sbmbcs ", print_to_p, - print_to_arg, hpp, szp); + fix = sz_info_fix(allctr, internal, print_to_p, print_to_arg, hpp, szp); mbcs = info_carriers(allctr, &allctr->mbcs, "mbcs ", print_to_p, print_to_arg, hpp, szp); +#ifdef ERTS_SMP + if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) + mbcs_pool = info_cpool(allctr, 0, "mbcs_pool ", print_to_p, + print_to_arg, hpp, szp); + else + mbcs_pool = THE_NON_VALUE; /* shut up annoying warning... */ +#endif sbcs = info_carriers(allctr, &allctr->sbcs, "sbcs ", print_to_p, print_to_arg, hpp, szp); calls = info_calls(allctr, print_to_p, print_to_arg, hpp, szp); @@ -3401,10 +4684,12 @@ erts_alcu_info(Allctr_t *allctr, add_2tup(hpp, szp, &res, am.calls, calls); add_2tup(hpp, szp, &res, am.sbcs, sbcs); +#ifdef ERTS_SMP + if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) + add_2tup(hpp, szp, &res, am.mbcs_pool, mbcs_pool); +#endif add_2tup(hpp, szp, &res, am.mbcs, mbcs); - add_2tup(hpp, szp, &res, am.sbmbcs, sbmbcs); - if (allctr->fix) - add_2tup(hpp, szp, &res, am.fix_types, fix); + add_fix_types(allctr, internal, hpp, szp, &res, fix); add_2tup(hpp, szp, &res, am.options, sett); add_3tup(hpp, szp, &res, am.versions, @@ -3413,7 +4698,6 @@ erts_alcu_info(Allctr_t *allctr, } if (begin_max_period) { - reset_max_values(&allctr->sbmbcs); reset_max_values(&allctr->mbcs); reset_max_values(&allctr->sbcs); } @@ -3441,22 +4725,37 @@ erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t * size->carriers = allctr->mbcs.curr.norm.mseg.size; size->carriers += allctr->mbcs.curr.norm.sys_alloc.size; - size->carriers += allctr->sbmbcs.curr.small_block.size; size->carriers += allctr->sbcs.curr.norm.mseg.size; size->carriers += allctr->sbcs.curr.norm.sys_alloc.size; size->blocks = allctr->mbcs.blocks.curr.size; - size->blocks += allctr->sbmbcs.blocks.curr.size; size->blocks += allctr->sbcs.blocks.curr.size; +#ifdef ERTS_SMP + if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) { + UWord csz, bsz; + cpool_read_stat(allctr, NULL, &csz, NULL, &bsz); + size->blocks += bsz; + size->carriers += csz; + } +#endif + if (fi) { int ix; for (ix = 0; ix < fisz; ix++) { if (allctr->fix) { - fi[ix].allocated += (allctr->fix[ix].type_size - * allctr->fix[ix].allocated); - fi[ix].used += (allctr->fix[ix].type_size - * allctr->fix[ix].used); + if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) { + fi[ix].allocated += (allctr->fix[ix].type_size + * allctr->fix[ix].u.cpool.allocated); + fi[ix].used += (allctr->fix[ix].type_size + * allctr->fix[ix].u.cpool.used); + } + else { + fi[ix].allocated += (allctr->fix[ix].type_size + * allctr->fix[ix].u.nocpool.allocated); + fi[ix].used += (allctr->fix[ix].type_size + * allctr->fix[ix].u.nocpool.used); + } } } } @@ -3474,7 +4773,6 @@ do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size) { Allctr_t *allctr = (Allctr_t *) extra; void *res; - ErtsAlcFixList_t *fix; ASSERT(initialized); @@ -3492,57 +4790,21 @@ do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size) INC_CC(allctr->calls.this_alloc); - fix = allctr->fix; - if (fix) { - int ix = type - ERTS_ALC_N_MIN_A_FIXED_SIZE; - ASSERT((unsigned)ix < ERTS_ALC_NO_FIXED_SIZES); - ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1); - ASSERT(size <= fix[ix].type_size); - fix[ix].used++; - res = fix[ix].list; - if (res) { - fix[ix].list_size--; - fix[ix].list = *((void **) res); - if (fix[ix].list && fix[ix].allocated > fix[ix].limit) { - void *p = fix[ix].list; - Block_t *blk; - fix[ix].list = *((void **) p); - fix[ix].list_size--; - blk = UMEM2BLK(p); - if (IS_SBC_BLK(blk)) - destroy_carrier(allctr, blk); - else - mbc_free(allctr, p); - fix[ix].allocated--; - } - ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0); - return res; - } - size = fix[ix].type_size; - if (fix[ix].limit < fix[ix].used) - fix[ix].limit = fix[ix].used; - if (fix[ix].max_used < fix[ix].used) - fix[ix].max_used = fix[ix].used; - fix[ix].allocated++; + if (allctr->fix) { + if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) + return fix_cpool_alloc(allctr, type, size); + else + return fix_nocpool_alloc(allctr, type, size); } if (size >= allctr->sbc_threshold) { Block_t *blk; -#ifdef ERTS_SMP - if (allctr->dd.use) - ERTS_ALCU_HANDLE_DD_IN_OP(allctr, 1); -#endif blk = create_carrier(allctr, size, CFLG_SBC); res = blk ? BLK2UMEM(blk) : NULL; } else res = mbc_alloc(allctr, size); - if (!res && fix) { - int ix = type - ERTS_ALC_N_MIN_A_FIXED_SIZE; - fix[ix].allocated--; - fix[ix].used--; - } return res; } @@ -3611,9 +4873,22 @@ erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size) if (pref_allctr->thread_safe) erts_mtx_lock(&pref_allctr->mutex); +#ifdef ERTS_SMP + ASSERT(pref_allctr->dd.use); + ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1); +#endif + ERTS_ALCU_DBG_CHK_THR_ACCESS(pref_allctr); res = do_erts_alcu_alloc(type, pref_allctr, size); + +#ifdef ERTS_SMP + if (!res && ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1)) { + /* Cleaned up a bit more; try one more time... */ + res = do_erts_alcu_alloc(type, pref_allctr, size); + } +#endif + if (pref_allctr->thread_safe) erts_mtx_unlock(&pref_allctr->mutex); @@ -3630,9 +4905,9 @@ erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size) /* ------------------------------------------------------------------------- */ static ERTS_INLINE void -do_erts_alcu_free(ErtsAlcType_t type, void *extra, void *p) +do_erts_alcu_free(ErtsAlcType_t type, void *extra, void *p, + Carrier_t **busy_pcrr_pp) { - int ix; Allctr_t *allctr = (Allctr_t *) extra; ASSERT(initialized); @@ -3644,57 +4919,28 @@ do_erts_alcu_free(ErtsAlcType_t type, void *extra, void *p) ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr); if (p) { - ErtsAlcFixList_t *fix = allctr->fix; - Block_t *blk; INC_CC(allctr->calls.this_free); - if (fix) { - ix = type - ERTS_ALC_N_MIN_A_FIXED_SIZE; - ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1); - fix[ix].used--; - if (fix[ix].allocated < fix[ix].limit - && fix[ix].list_size < ERTS_ALCU_FIX_MAX_LIST_SZ) { - *((void **) p) = fix[ix].list; - fix[ix].list = p; - fix[ix].list_size++; - if (!allctr->fix_shrink_scheduled) { - allctr->fix_shrink_scheduled = 1; - erts_set_aux_work_timeout( - allctr->ix, - (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM - | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC), - 1); - } - ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0); - return; - } - fix[ix].allocated--; - if (fix[ix].list && fix[ix].allocated > fix[ix].limit) { - blk = UMEM2BLK(p); - if (IS_SBC_BLK(blk)) - destroy_carrier(allctr, blk); - else - mbc_free(allctr, p); - p = fix[ix].list; - fix[ix].list = *((void **) p); - fix[ix].list_size--; - fix[ix].allocated--; - } + if (allctr->fix) { + if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) + fix_cpool_free(allctr, type, p, busy_pcrr_pp); + else + fix_nocpool_free(allctr, type, p); + } + else { + Block_t *blk = UMEM2BLK(p); + if (IS_SBC_BLK(blk)) + destroy_carrier(allctr, blk, NULL); + else + mbc_free(allctr, p, busy_pcrr_pp); } - - blk = UMEM2BLK(p); - if (IS_SBC_BLK(blk)) - destroy_carrier(allctr, blk); - else - mbc_free(allctr, p); - ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0); } } void erts_alcu_free(ErtsAlcType_t type, void *extra, void *p) { - do_erts_alcu_free(type, extra, p); + do_erts_alcu_free(type, extra, p, NULL); } #ifdef USE_THREADS @@ -3704,7 +4950,7 @@ erts_alcu_free_ts(ErtsAlcType_t type, void *extra, void *p) { Allctr_t *allctr = (Allctr_t *) extra; erts_mtx_lock(&allctr->mutex); - do_erts_alcu_free(type, extra, p); + do_erts_alcu_free(type, extra, p, NULL); erts_mtx_unlock(&allctr->mutex); } @@ -3726,7 +4972,7 @@ erts_alcu_free_thr_spec(ErtsAlcType_t type, void *extra, void *p) if (allctr->thread_safe) erts_mtx_lock(&allctr->mutex); - do_erts_alcu_free(type, allctr, p); + do_erts_alcu_free(type, allctr, p, NULL); if (allctr->thread_safe) erts_mtx_unlock(&allctr->mutex); @@ -3736,10 +4982,12 @@ void erts_alcu_free_thr_pref(ErtsAlcType_t type, void *extra, void *p) { if (p) { + Carrier_t *busy_pcrr_p; Allctr_t *pref_allctr, *used_allctr; pref_allctr = get_pref_allctr(extra); - used_allctr = get_used_allctr(extra, p, NULL); + used_allctr = get_used_allctr(pref_allctr, ERTS_ALC_TS_PREF_LOCK_IF_USED, + p, NULL, &busy_pcrr_p); if (pref_allctr != used_allctr) enqueue_dealloc_other_instance(type, used_allctr, @@ -3747,12 +4995,11 @@ erts_alcu_free_thr_pref(ErtsAlcType_t type, void *extra, void *p) (used_allctr->dd.ix - pref_allctr->dd.ix)); else { - if (used_allctr->thread_safe) - erts_mtx_lock(&used_allctr->mutex); ERTS_ALCU_DBG_CHK_THR_ACCESS(used_allctr); - do_erts_alcu_free(type, used_allctr, p); - if (used_allctr->thread_safe) - erts_mtx_unlock(&used_allctr->mutex); + do_erts_alcu_free(type, used_allctr, p, &busy_pcrr_p); + clear_busy_pool_carrier(used_allctr, busy_pcrr_p); + if (pref_allctr->thread_safe) + erts_mtx_unlock(&pref_allctr->mutex); } } } @@ -3768,7 +5015,8 @@ do_erts_alcu_realloc(ErtsAlcType_t type, void *extra, void *p, Uint size, - Uint32 alcu_flgs) + Uint32 alcu_flgs, + Carrier_t **busy_pcrr_pp) { Allctr_t *allctr = (Allctr_t *) extra; Block_t *blk; @@ -3793,7 +5041,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type, #if ALLOC_ZERO_EQ_NULL if (!size) { ASSERT(p); - do_erts_alcu_free(type, extra, p); + do_erts_alcu_free(type, extra, p, busy_pcrr_pp); INC_CC(allctr->calls.this_realloc); DEC_CC(allctr->calls.this_free); return NULL; @@ -3804,32 +5052,9 @@ do_erts_alcu_realloc(ErtsAlcType_t type, blk = UMEM2BLK(p); - if (allctr->sbmbc_threshold > 0) { - Uint old_sz, new_sz, lim; - lim = allctr->sbmbc_threshold; - old_sz = BLK_SZ(blk); - new_sz = UMEMSZ2BLKSZ(allctr, size); - if ((old_sz < lim && lim <= new_sz) - || (new_sz < lim && lim <= old_sz)) { - /* *Need* to move it... */ - - INC_CC(allctr->calls.this_realloc); - res = do_erts_alcu_alloc(type, extra, size); - DEC_CC(allctr->calls.this_alloc); - - sys_memcpy(res, p, MIN(size, old_sz - ABLK_HDR_SZ)); - - do_erts_alcu_free(type, extra, p); - DEC_CC(allctr->calls.this_free); - return res; - } - if (old_sz < lim) - alcu_flgs |= ERTS_ALCU_FLG_SBMBC; - } - if (size < allctr->sbc_threshold) { if (IS_MBC_BLK(blk)) - res = mbc_realloc(allctr, p, size, alcu_flgs); + res = mbc_realloc(allctr, p, size, alcu_flgs, busy_pcrr_pp); else { Uint used_sz = SBC_HEADER_SIZE + ABLK_HDR_SZ + size; Uint crr_sz; @@ -3868,16 +5093,12 @@ do_erts_alcu_realloc(ErtsAlcType_t type, sys_memcpy((void*) res, (void*) p, MIN(SBC_BLK_SZ(blk) - ABLK_HDR_SZ, size)); - destroy_carrier(allctr, blk); + destroy_carrier(allctr, blk, NULL); } } } else { Block_t *new_blk; -#ifdef ERTS_SMP - if (allctr->dd.use) - ERTS_ALCU_HANDLE_DD_IN_OP(allctr, 1); -#endif if(IS_SBC_BLK(blk)) { do_carrier_resize: #if HALFWORD_HEAP @@ -3896,7 +5117,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type, sys_memcpy((void *) res, (void *) p, MIN(MBC_ABLK_SZ(blk) - ABLK_HDR_SZ, size)); - mbc_free(allctr, p); + mbc_free(allctr, p, busy_pcrr_pp); } else res = NULL; @@ -3910,7 +5131,7 @@ void * erts_alcu_realloc(ErtsAlcType_t type, void *extra, void *p, Uint size) { void *res; - res = do_erts_alcu_realloc(type, extra, p, size, 0); + res = do_erts_alcu_realloc(type, extra, p, size, 0, NULL); DEBUG_CHECK_ALIGNMENT(res); return res; } @@ -3931,7 +5152,7 @@ erts_alcu_realloc_mv(ErtsAlcType_t type, void *extra, void *p, Uint size) if (cpy_size > size) cpy_size = size; sys_memcpy(res, p, cpy_size); - do_erts_alcu_free(type, extra, p); + do_erts_alcu_free(type, extra, p, NULL); } DEBUG_CHECK_ALIGNMENT(res); return res; @@ -3946,7 +5167,7 @@ erts_alcu_realloc_ts(ErtsAlcType_t type, void *extra, void *ptr, Uint size) Allctr_t *allctr = (Allctr_t *) extra; void *res; erts_mtx_lock(&allctr->mutex); - res = do_erts_alcu_realloc(type, extra, ptr, size, 0); + res = do_erts_alcu_realloc(type, extra, ptr, size, 0, NULL); erts_mtx_unlock(&allctr->mutex); DEBUG_CHECK_ALIGNMENT(res); return res; @@ -3970,7 +5191,7 @@ erts_alcu_realloc_mv_ts(ErtsAlcType_t type, void *extra, void *p, Uint size) if (cpy_size > size) cpy_size = size; sys_memcpy(res, p, cpy_size); - do_erts_alcu_free(type, extra, p); + do_erts_alcu_free(type, extra, p, NULL); } erts_mtx_unlock(&allctr->mutex); DEBUG_CHECK_ALIGNMENT(res); @@ -3997,7 +5218,7 @@ erts_alcu_realloc_thr_spec(ErtsAlcType_t type, void *extra, if (allctr->thread_safe) erts_mtx_lock(&allctr->mutex); - res = do_erts_alcu_realloc(type, allctr, ptr, size, 0); + res = do_erts_alcu_realloc(type, allctr, ptr, size, 0, NULL); if (allctr->thread_safe) erts_mtx_unlock(&allctr->mutex); @@ -4040,7 +5261,7 @@ erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t type, void *extra, if (cpy_size > size) cpy_size = size; sys_memcpy(res, ptr, cpy_size); - do_erts_alcu_free(type, allctr, ptr); + do_erts_alcu_free(type, allctr, ptr, NULL); if (allctr->thread_safe) erts_mtx_unlock(&allctr->mutex); } @@ -4057,40 +5278,64 @@ realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size, void *res; Allctr_t *pref_allctr, *used_allctr; UWord old_user_size; + Carrier_t *busy_pcrr_p; +#ifdef ERTS_SMP + int retried; +#endif if (!p) return erts_alcu_alloc_thr_pref(type, extra, size); pref_allctr = get_pref_allctr(extra); - used_allctr = get_used_allctr(extra, p, &old_user_size); + + if (pref_allctr->thread_safe) + erts_mtx_lock(&pref_allctr->mutex); + +#ifdef ERTS_SMP + ASSERT(pref_allctr->dd.use); + ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1); + retried = 0; +restart: +#endif + + used_allctr = get_used_allctr(pref_allctr, ERTS_ALC_TS_PREF_LOCK_NO, + p, &old_user_size, &busy_pcrr_p); ASSERT(used_allctr && pref_allctr); if (!force_move && used_allctr == pref_allctr) { - if (used_allctr->thread_safe) - erts_mtx_lock(&used_allctr->mutex); ERTS_ALCU_DBG_CHK_THR_ACCESS(used_allctr); res = do_erts_alcu_realloc(type, used_allctr, p, size, - 0); - if (used_allctr->thread_safe) - erts_mtx_unlock(&used_allctr->mutex); + 0, + &busy_pcrr_p); + clear_busy_pool_carrier(used_allctr, busy_pcrr_p); +#ifdef ERTS_SMP + if (!res && !retried && ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1)) { + /* Cleaned up a bit more; try one more time... */ + retried = 1; + goto restart; + } +#endif + if (pref_allctr->thread_safe) + erts_mtx_unlock(&pref_allctr->mutex); } else { - if (pref_allctr->thread_safe) - erts_mtx_lock(&pref_allctr->mutex); res = do_erts_alcu_alloc(type, pref_allctr, size); - if (pref_allctr->thread_safe && used_allctr != pref_allctr) { - erts_mtx_unlock(&pref_allctr->mutex); - } - if (res) { - DEBUG_CHECK_ALIGNMENT(res); + if (!res) + goto unlock_ts_return; + else { - sys_memcpy(res, p, MIN(size,old_user_size)); + DEBUG_CHECK_ALIGNMENT(res); if (used_allctr != pref_allctr) { + if (pref_allctr->thread_safe) + erts_mtx_unlock(&pref_allctr->mutex); + + sys_memcpy(res, p, MIN(size, old_user_size)); + enqueue_dealloc_other_instance(type, used_allctr, p, @@ -4098,8 +5343,14 @@ realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size, - pref_allctr->dd.ix)); } else { - do_erts_alcu_free(type, used_allctr, p); + + sys_memcpy(res, p, MIN(size, old_user_size)); + + do_erts_alcu_free(type, used_allctr, p, &busy_pcrr_p); ASSERT(pref_allctr == used_allctr); + clear_busy_pool_carrier(used_allctr, busy_pcrr_p); + + unlock_ts_return: if (pref_allctr->thread_safe) erts_mtx_unlock(&pref_allctr->mutex); } @@ -4213,6 +5464,17 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) if (sz > allctr->min_block_size) allctr->min_block_size = sz; } + + allctr->cpool.dc_list.first = NULL; + allctr->cpool.dc_list.last = NULL; + allctr->cpool.abandon_limit = 0; + allctr->cpool.disable_abandon = 0; + erts_atomic_init_nob(&allctr->cpool.stat.blocks_size, 0); + erts_atomic_init_nob(&allctr->cpool.stat.no_blocks, 0); + erts_atomic_init_nob(&allctr->cpool.stat.carriers_size, 0); + erts_atomic_init_nob(&allctr->cpool.stat.no_carriers, 0); + allctr->cpool.check_limit_count = ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT; + allctr->cpool.util_limit = init->acul; #endif allctr->sbc_threshold = init->sbct; @@ -4232,27 +5494,6 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) } #endif - - - allctr->sbmbc_threshold = init->sbmbct; - - if (!erts_have_sbmbc_alloc - || ERTS_IS_SBMBC_ALLOCATOR_NO__(allctr->alloc_no)) - allctr->sbmbc_threshold = 0; - - if (!allctr->sbmbc_threshold) - allctr->sbmbc_size = 0; - else { - Uint min_size; - allctr->sbmbc_size = init->sbmbcs; - min_size = allctr->sbmbc_threshold; - min_size += allctr->min_block_size; - min_size += MBC_HEADER_SIZE(allctr); - if (allctr->sbmbc_size < min_size) - allctr->sbmbc_size = min_size; - } - - #if HAVE_ERTS_MSEG if (allctr->mseg_opt.abs_shrink_th > ~((UWord) 0) / 100) allctr->mseg_opt.abs_shrink_th = ~((UWord) 0) / 100; @@ -4264,16 +5505,12 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) #ifdef ERTS_ENABLE_LOCK_COUNT erts_mtx_init_x_opt(&allctr->mutex, - ERTS_IS_SBMBC_ALLOCATOR_NO__(allctr->alloc_no) - ? "sbmbc_alloc" - : "alcu_allocator", + "alcu_allocator", make_small(allctr->alloc_no), ERTS_LCNT_LT_ALLOC); #else erts_mtx_init_x(&allctr->mutex, - ERTS_IS_SBMBC_ALLOCATOR_NO__(allctr->alloc_no) - ? "sbmbc_alloc" - : "alcu_allocator", + "alcu_allocator", make_small(allctr->alloc_no)); #endif /*ERTS_ENABLE_LOCK_COUNT*/ @@ -4292,6 +5529,8 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) if (!allctr->get_next_mbc_size) allctr->get_next_mbc_size = get_next_mbc_size; + if (allctr->mbc_header_size < sizeof(Carrier_t)) + goto error; #ifdef ERTS_SMP allctr->dd.use = 0; if (init->tpref) { @@ -4300,6 +5539,9 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) allctr->dd.ix = init->ix; } #endif + allctr->mbc_header_size = (UNIT_CEILING(allctr->mbc_header_size + + ABLK_HDR_SZ) + - ABLK_HDR_SZ); if (allctr->main_carrier_size) { Block_t *blk; @@ -4308,6 +5550,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) allctr->main_carrier_size, CFLG_MBC | CFLG_FORCE_SIZE + | CFLG_NO_CPOOL #if !HALFWORD_HEAP && !HAVE_SUPER_ALIGNED_MB_CARRIERS | CFLG_FORCE_SYS_ALLOC #endif @@ -4315,7 +5558,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) if (!blk) goto error; - (*allctr->link_free_block)(allctr, blk, 0); + (*allctr->link_free_block)(allctr, blk); HARD_CHECK_BLK_CARRIER(allctr, blk); @@ -4326,16 +5569,24 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) allctr->fix = init->fix; allctr->fix_shrink_scheduled = 0; for (i = 0; i < ERTS_ALC_NO_FIXED_SIZES; i++) { - allctr->fix[i].max_used = 0; - allctr->fix[i].limit = 0; allctr->fix[i].type_size = init->fix_type_size[i]; allctr->fix[i].list_size = 0; allctr->fix[i].list = NULL; - allctr->fix[i].allocated = 0; - allctr->fix[i].used = 0; #ifdef ERTS_SMP ASSERT(allctr->fix[i].type_size >= sizeof(ErtsAllctrFixDDBlock_t)); #endif + if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) { + allctr->fix[i].u.cpool.min_list_size = 0; + allctr->fix[i].u.cpool.shrink_list = 0; + allctr->fix[i].u.cpool.allocated = 0; + allctr->fix[i].u.cpool.used = 0; + } + else { + allctr->fix[i].u.nocpool.max_used = 0; + allctr->fix[i].u.nocpool.limit = 0; + allctr->fix[i].u.nocpool.allocated = 0; + allctr->fix[i].u.nocpool.used = 0; + } } } @@ -4360,11 +5611,9 @@ erts_alcu_stop(Allctr_t *allctr) allctr->stopped = 1; while (allctr->sbc_list.first) - destroy_carrier(allctr, SBC2BLK(allctr, allctr->sbc_list.first)); + destroy_carrier(allctr, SBC2BLK(allctr, allctr->sbc_list.first), NULL); while (allctr->mbc_list.first) - destroy_carrier(allctr, MBC_TO_FIRST_BLK(allctr, allctr->mbc_list.first)); - while (allctr->sbmbc_list.first) - destroy_sbmbc(allctr, MBC_TO_FIRST_BLK(allctr, allctr->sbmbc_list.first)); + destroy_carrier(allctr, MBC_TO_FIRST_BLK(allctr, allctr->mbc_list.first), NULL); #ifdef USE_THREADS if (allctr->thread_safe) @@ -4378,6 +5627,14 @@ erts_alcu_stop(Allctr_t *allctr) void erts_alcu_init(AlcUInit_t *init) { +#ifdef ERTS_SMP + int i; + for (i = 0; i <= ERTS_ALC_A_MAX; i++) { + ErtsAlcCPoolData_t *sentinel = &carrier_pool[i].sentinel; + erts_atomic_init_nob(&sentinel->next, (erts_aint_t) sentinel); + erts_atomic_init_nob(&sentinel->prev, (erts_aint_t) sentinel); + } +#endif ASSERT(SBC_BLK_SZ_MASK == MBC_FBLK_SZ_MASK); /* see BLK_SZ */ #if HAVE_ERTS_MSEG ASSERT(erts_mseg_unit_size() == MSEG_UNIT_SZ); @@ -4440,8 +5697,35 @@ erts_alcu_test(UWord op, UWord a1, UWord a2) case 0x019: return (UWord) PREV_BLK((Block_t *) a1); case 0x01a: return (UWord) IS_MBC_FIRST_BLK((Allctr_t*)a1, (Block_t *) a2); case 0x01b: return (UWord) sizeof(Unit_t); + case 0x01c: return (unsigned long) BLK_TO_MBC((Block_t*) a1); + case 0x01d: ((Allctr_t*) a1)->add_mbc((Allctr_t*)a1, (Carrier_t*)a2); break; + case 0x01e: ((Allctr_t*) a1)->remove_mbc((Allctr_t*)a1, (Carrier_t*)a2); break; +#ifdef ERTS_SMP + case 0x01f: return (UWord) sizeof(ErtsAlcCrrPool_t); + case 0x020: + SET_CARRIER_HDR((Carrier_t *) a2, 0, SCH_SYS_ALLOC|SCH_MBC, (Allctr_t *) a1); + cpool_init_carrier_data((Allctr_t *) a1, (Carrier_t *) a2); + return (UWord) a2; + case 0x021: + cpool_insert((Allctr_t *) a1, (Carrier_t *) a2); + return (UWord) a2; + case 0x022: + cpool_delete((Allctr_t *) a1, (Allctr_t *) a1, (Carrier_t *) a2); + return (UWord) a2; + case 0x023: return (UWord) cpool_is_empty((Allctr_t *) a1); + case 0x024: return (UWord) cpool_dbg_is_in_pool((Allctr_t *) a1, (Carrier_t *) a2); +#else + case 0x01f: return (UWord) 0; + case 0x020: return (UWord) 0; + case 0x021: return (UWord) 0; + case 0x022: return (UWord) 0; + case 0x023: return (UWord) 0; + case 0x024: return (UWord) 0; +#endif + default: ASSERT(0); return ~((UWord) 0); } + return 0; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\ @@ -4449,6 +5733,20 @@ erts_alcu_test(UWord op, UWord a1, UWord a2) \* */ void +erts_alcu_assert_failed(char* expr, char* file, int line, char *func) +{ + fflush(stdout); + fprintf(stderr, "%s:%d:%s(): Assertion failed: %s\n", + file, line, func, expr); + fflush(stderr); +#if defined(__WIN__) || defined(__WIN32__) + DebugBreak(); +#else + abort(); +#endif +} + +void erts_alcu_verify_unused(Allctr_t *allctr) { UWord no; @@ -4456,12 +5754,10 @@ erts_alcu_verify_unused(Allctr_t *allctr) no = allctr->sbcs.curr.norm.mseg.no; no += allctr->sbcs.curr.norm.sys_alloc.no; no += allctr->mbcs.blocks.curr.no; - no += allctr->sbmbcs.blocks.curr.no; if (no) { UWord sz = allctr->sbcs.blocks.curr.size; sz += allctr->mbcs.blocks.curr.size; - sz += allctr->sbmbcs.blocks.curr.size; erl_exit(ERTS_ABORT_EXIT, "%salloc() used when expected to be unused!\n" "Total amount of blocks allocated: %bpu\n" @@ -4595,7 +5891,7 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk) cl = &allctr->mbc_list; } -#if 0 /* FIXIT sbmbc */ +#ifdef DEBUG if (cl->first == crr) { ASSERT(!crr->prev); } diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h index 4773598561..5e52b9b733 100644 --- a/erts/emulator/beam/erl_alloc_util.h +++ b/erts/emulator/beam/erl_alloc_util.h @@ -55,8 +55,7 @@ typedef struct { UWord lmbcs; UWord smbcs; UWord mbcgs; - UWord sbmbct; - UWord sbmbcs; + int acul; void *fix; size_t *fix_type_size; @@ -100,8 +99,7 @@ typedef struct { 10*1024*1024, /* (bytes) lmbcs: largest mbc size */\ 1024*1024, /* (bytes) smbcs: smallest mbc size */\ 10, /* (amount) mbcgs: mbc growth stages */\ - 256, /* (bytes) sbmbct: small block mbc threshold */\ - 8*1024, /* (bytes) sbmbcs: small block mbc size */ \ + 0, /* (%) acul: abandon carrier utilization limit */\ /* --- Data not options -------------------------------------------- */\ NULL, /* (ptr) fix */\ NULL /* (ptr) fix_type_size */\ @@ -134,8 +132,7 @@ typedef struct { 1024*1024, /* (bytes) lmbcs: largest mbc size */\ 128*1024, /* (bytes) smbcs: smallest mbc size */\ 10, /* (amount) mbcgs: mbc growth stages */\ - 256, /* (bytes) sbmbct: small block mbc threshold */\ - 8*1024, /* (bytes) sbmbcs: small block mbc size */ \ + 0, /* (%) acul: abandon carrier utilization limit */\ /* --- Data not options -------------------------------------------- */\ NULL, /* (ptr) fix */\ NULL /* (ptr) fix_type_size */\ @@ -165,8 +162,8 @@ void erts_alcu_free_thr_pref(ErtsAlcType_t, void *, void *); #endif Eterm erts_alcu_au_info_options(int *, void *, Uint **, Uint *); Eterm erts_alcu_info_options(Allctr_t *, int *, void *, Uint **, Uint *); -Eterm erts_alcu_sz_info(Allctr_t *, int, int *, void *, Uint **, Uint *); -Eterm erts_alcu_info(Allctr_t *, int, int *, void *, Uint **, Uint *); +Eterm erts_alcu_sz_info(Allctr_t *, int, int, int *, void *, Uint **, Uint *); +Eterm erts_alcu_info(Allctr_t *, int, int, int *, void *, Uint **, Uint *); void erts_alcu_init(AlcUInit_t *); void erts_alcu_current_size(Allctr_t *, AllctrSize_t *, ErtsAlcUFixInfo_t *, int); @@ -181,7 +178,6 @@ erts_aint32_t erts_alcu_fix_alloc_shrink(Allctr_t *, erts_aint32_t); #define ERL_ALLOC_UTIL_IMPL__ #define ERTS_ALCU_FLG_FAIL_REALLOC_MOVE (((Uint32) 1) << 0) -#define ERTS_ALCU_FLG_SBMBC (((Uint32) 1) << 1) #ifdef USE_THREADS #define ERL_THREADS_EMU_INTERNAL__ @@ -221,8 +217,15 @@ erts_aint32_t erts_alcu_fix_alloc_shrink(Allctr_t *, erts_aint32_t); #define MBC_FBLK_SZ_MASK UNIT_MASK #define CARRIER_SZ_MASK UNIT_MASK - #if HAVE_ERTS_MSEG + +# define MSEG_UNIT_SHIFT MSEG_ALIGN_BITS +# define MSEG_UNIT_SZ (1 << MSEG_UNIT_SHIFT) +# define MSEG_UNIT_MASK ((~(UWord)0) << MSEG_UNIT_SHIFT) + +# define MSEG_UNIT_FLOOR(X) ((X) & MSEG_UNIT_MASK) +# define MSEG_UNIT_CEILING(X) MSEG_UNIT_FLOOR((X) + ~MSEG_UNIT_MASK) + # ifdef ARCH_64 # define MBC_ABLK_OFFSET_BITS 24 # elif HAVE_SUPER_ALIGNED_MB_CARRIERS @@ -238,10 +241,8 @@ erts_aint32_t erts_alcu_fix_alloc_shrink(Allctr_t *, erts_aint32_t); # define MBC_ABLK_OFFSET_SHIFT (sizeof(UWord)*8 - MBC_ABLK_OFFSET_BITS) # define MBC_ABLK_OFFSET_MASK (~((UWord)0) << MBC_ABLK_OFFSET_SHIFT) # define MBC_ABLK_SZ_MASK (~MBC_ABLK_OFFSET_MASK & ~FLG_MASK) -# define HAVE_ERTS_SBMBC 0 #else # define MBC_ABLK_SZ_MASK (~FLG_MASK) -# define HAVE_ERTS_SBMBC 1 #endif #define MBC_ABLK_SZ(B) (ASSERT_EXPR(!is_sbc_blk(B)), (B)->bhdr & MBC_ABLK_SZ_MASK) @@ -251,18 +252,37 @@ erts_aint32_t erts_alcu_fix_alloc_shrink(Allctr_t *, erts_aint32_t); #define CARRIER_SZ(C) \ ((C)->chdr & CARRIER_SZ_MASK) -extern int erts_have_sbmbc_alloc; - typedef union {char c[ERTS_ALLOC_ALIGN_BYTES]; long l; double d;} Unit_t; +#ifdef ERTS_SMP + +typedef struct { + erts_atomic_t next; + erts_atomic_t prev; + Allctr_t *orig_allctr; + ErtsThrPrgrVal thr_prgr; + erts_atomic_t max_size; + UWord abandon_limit; + UWord blocks; + UWord blocks_size; +} ErtsAlcCPoolData_t; + +#endif + typedef struct Carrier_t_ Carrier_t; struct Carrier_t_ { UWord chdr; Carrier_t *next; Carrier_t *prev; - Allctr_t *allctr; + erts_smp_atomic_t allctr; +#ifdef ERTS_SMP + ErtsAlcCPoolData_t cpool; /* Overwritten by block if sbc */ +#endif }; +#define ERTS_ALC_CARRIER_TO_ALLCTR(C) \ + ((Allctr_t *) (erts_smp_atomic_read_nob(&(C)->allctr) & ~FLG_MASK)) + typedef struct { Carrier_t *first; Carrier_t *last; @@ -280,12 +300,46 @@ typedef struct { #endif } Block_t; +#define THIS_FREE_BLK_HDR_FLG (((UWord) 1) << 0) +#define PREV_FREE_BLK_HDR_FLG (((UWord) 1) << 1) +#define LAST_BLK_HDR_FLG (((UWord) 1) << 2) + +#define SBC_BLK_HDR_FLG /* Special flag combo for (allocated) SBC blocks */\ + (THIS_FREE_BLK_HDR_FLG | PREV_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG) + +/* + * FREE_LAST_MBC_BLK_HDR_FLGS is a special flag combo used for + * distinguishing empty mbc's from allocated blocks in + * handle_delayed_dealloc(). + */ +#define FREE_LAST_MBC_BLK_HDR_FLGS (THIS_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG) + +#define IS_FREE_LAST_MBC_BLK(B) \ + (((B)->bhdr & FLG_MASK) == FREE_LAST_MBC_BLK_HDR_FLGS) + +#define IS_SBC_BLK(B) (((B)->bhdr & FLG_MASK) == SBC_BLK_HDR_FLG) +#define IS_MBC_BLK(B) (!IS_SBC_BLK((B))) +#define IS_FREE_BLK(B) (ASSERT(IS_MBC_BLK(B)), \ + (B)->bhdr & THIS_FREE_BLK_HDR_FLG) + +#if MBC_ABLK_OFFSET_BITS +# define FBLK_TO_MBC(B) (ASSERT(IS_MBC_BLK(B) && IS_FREE_BLK(B)), \ + (B)->u.carrier) +# define ABLK_TO_MBC(B) \ + (ASSERT(IS_MBC_BLK(B) && !IS_FREE_BLK(B)), \ + (Carrier_t*)((MSEG_UNIT_FLOOR((UWord)(B)) - \ + (((B)->bhdr >> MBC_ABLK_OFFSET_SHIFT) << MSEG_UNIT_SHIFT)))) +# define BLK_TO_MBC(B) (IS_FREE_BLK(B) ? FBLK_TO_MBC(B) : ABLK_TO_MBC(B)) +#else +# define FBLK_TO_MBC(B) ((B)->carrier) +# define ABLK_TO_MBC(B) ((B)->carrier) +# define BLK_TO_MBC(B) ((B)->carrier) +#endif +#define MBC_BLK_SZ(B) (IS_FREE_BLK(B) ? MBC_FBLK_SZ(B) : MBC_ABLK_SZ(B)) + typedef UWord FreeBlkFtr_t; /* Footer of a free block */ -typedef struct { - UWord giga_no; - UWord no; -} CallCounter_t; +typedef Uint64 CallCounter_t; typedef struct { UWord no; @@ -298,7 +352,6 @@ typedef struct { StatValues_t mseg; StatValues_t sys_alloc; } norm; - StatValues_t small_block; } curr; StatValues_t max; StatValues_t max_ever; @@ -358,10 +411,20 @@ typedef struct { size_t type_size; SWord list_size; void *list; - SWord max_used; - SWord limit; - SWord allocated; - SWord used; + union { + struct { + SWord max_used; + SWord limit; + SWord allocated; + SWord used; + } nocpool; + struct { + int min_list_size; + int shrink_list; + UWord allocated; + UWord used; + } cpool; + } u; } ErtsAlcFixList_t; struct Allctr_t_ { @@ -409,37 +472,56 @@ struct Allctr_t_ { Uint largest_mbc_size; Uint smallest_mbc_size; Uint mbc_growth_stages; - Uint sbmbc_threshold; - Uint sbmbc_size; #if HAVE_ERTS_MSEG ErtsMsegOpt_t mseg_opt; #endif /* */ + Uint mbc_header_size; Uint min_mbc_size; Uint min_mbc_first_free_size; Uint min_block_size; /* Carriers */ - CarrierList_t sbmbc_list; CarrierList_t mbc_list; CarrierList_t sbc_list; +#ifdef ERTS_SMP + struct { + CarrierList_t dc_list; + UWord abandon_limit; + int disable_abandon; + int check_limit_count; + int util_limit; + struct { + erts_atomic_t blocks_size; + erts_atomic_t no_blocks; + erts_atomic_t carriers_size; + erts_atomic_t no_carriers; + } stat; + } cpool; +#endif /* Main carrier (if there is one) */ Carrier_t * main_carrier; /* Callback functions (first 4 are mandatory) */ Block_t * (*get_free_block) (Allctr_t *, Uint, - Block_t *, Uint, Uint32); - void (*link_free_block) (Allctr_t *, Block_t *, Uint32); - void (*unlink_free_block) (Allctr_t *, Block_t *, Uint32); + Block_t *, Uint); + void (*link_free_block) (Allctr_t *, Block_t *); + void (*unlink_free_block) (Allctr_t *, Block_t *); Eterm (*info_options) (Allctr_t *, char *, int *, void *, Uint **, Uint *); Uint (*get_next_mbc_size) (Allctr_t *); - void (*creating_mbc) (Allctr_t *, Carrier_t *, Uint32); - void (*destroying_mbc) (Allctr_t *, Carrier_t *, Uint32); + void (*creating_mbc) (Allctr_t *, Carrier_t *); + void (*destroying_mbc) (Allctr_t *, Carrier_t *); + + /* The three callbacks below are needed to support carrier migration */ + void (*add_mbc) (Allctr_t *, Carrier_t *); + void (*remove_mbc) (Allctr_t *, Carrier_t *); + UWord (*largest_fblk_in_mbc) (Allctr_t *, Carrier_t *); + void (*init_atoms) (void); #ifdef ERTS_ALLOC_UTIL_HARD_DEBUG @@ -471,8 +553,6 @@ struct Allctr_t_ { CallCounter_t this_alloc; CallCounter_t this_free; CallCounter_t this_realloc; - CallCounter_t sbmbc_alloc; - CallCounter_t sbmbc_free; CallCounter_t mseg_alloc; CallCounter_t mseg_dealloc; CallCounter_t mseg_realloc; @@ -483,8 +563,7 @@ struct Allctr_t_ { CarriersStats_t sbcs; CarriersStats_t mbcs; - CarriersStats_t sbmbcs; - + #ifdef DEBUG #ifdef USE_THREADS struct { @@ -503,6 +582,8 @@ void erts_alcu_verify_unused_ts(Allctr_t *allctr); UWord erts_alcu_test(UWord, UWord, UWord); +void erts_alcu_assert_failed(char* expr, char* file, int line, char *func); + #ifdef DEBUG int is_sbc_blk(Block_t*); #endif diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.c b/erts/emulator/beam/erl_ao_firstfit_alloc.c index b916cb6198..f73ac2eb6e 100644 --- a/erts/emulator/beam/erl_ao_firstfit_alloc.c +++ b/erts/emulator/beam/erl_ao_firstfit_alloc.c @@ -28,11 +28,16 @@ * * This module is a callback-module for erl_alloc_util.c * - * Algorithm: The tree nodes are free-blocks ordered in address order. + * AOFF Algorithm: + * The tree nodes are ordered in address order. * Every node also keeps the size of the largest block in its - * sub-tree ('max_size'). By that we can start from root and keep + * sub-tree ('max_sz'). By that we can start from root and keep * left (for low addresses) while dismissing entire sub-trees with * too small blocks. + * AOFFCBF: + * The only difference for "bestfit within carrier" is the tree + * sorting order. Blocks within the same carrier are sorted + * wrt size instead of address. * * Authors: Rickard Green/Sverker Eriksson */ @@ -85,21 +90,45 @@ typedef struct AOFF_RBTree_t_ AOFF_RBTree_t; struct AOFF_RBTree_t_ { Block_t hdr; - Uint flags; AOFF_RBTree_t *parent; AOFF_RBTree_t *left; AOFF_RBTree_t *right; - Uint max_sz; /* of all blocks in this sub-tree */ + Uint32 flags; + Uint32 max_sz; /* of all blocks in this sub-tree */ }; #define AOFF_BLK_SZ(B) MBC_FBLK_SZ(&(B)->hdr) +typedef struct AOFF_Carrier_t_ AOFF_Carrier_t; + +struct AOFF_Carrier_t_ { + Carrier_t crr; + AOFF_RBTree_t rbt_node; /* My node in the carrier tree */ + AOFF_RBTree_t* root; /* Root of my block tree */ +}; +#define RBT_NODE_TO_MBC(PTR) ((AOFF_Carrier_t*)((char*)(PTR) - offsetof(AOFF_Carrier_t, rbt_node))) + +/* + To support carrier migration we keep two kinds of rb-trees: + 1. One tree of carriers for each allocator instance. + 2. One tree of free blocks for each carrier. + Both trees use the same node structure AOFF_RBTree_t and implementation. + Carrier nodes thus contain a phony Block_t header 'rbt_node.hdr'. + The size value of such a phony block is the size of the largest free block in + that carrier, i.e same as 'max_sz' of the root node of its block tree. +*/ + #ifdef HARD_DEBUG -static AOFF_RBTree_t * check_tree(AOFF_RBTree_t* root, Uint); +# define HARD_CHECK_IS_MEMBER(ROOT,NODE) rbt_assert_is_member(ROOT,NODE) +# define HARD_CHECK_TREE(CRR,BF,ROOT,SZ) check_tree(CRR, BF, ROOT, SZ) +static AOFF_RBTree_t * check_tree(Carrier_t* within_crr, int bestfit, AOFF_RBTree_t* root, Uint); +#else +# define HARD_CHECK_IS_MEMBER(ROOT,NODE) +# define HARD_CHECK_TREE(CRR,BF,ROOT,SZ) #endif -/* Calculate 'max_size' of tree node x by only looking at the direct children - * of x and x itself. +/* Calculate 'max_sz' of tree node x by only looking at 'max_sz' of the + * direct children of x and the size x itself. */ static ERTS_INLINE Uint node_max_size(AOFF_RBTree_t *x) { @@ -113,7 +142,7 @@ static ERTS_INLINE Uint node_max_size(AOFF_RBTree_t *x) return sz; } -/* Set new possibly lower 'max_size' of node and propagate change toward root +/* Set new possibly lower 'max_sz' of node and propagate change toward root */ static ERTS_INLINE void lower_max_size(AOFF_RBTree_t *node, AOFF_RBTree_t* stop_at) @@ -132,32 +161,53 @@ static ERTS_INLINE void lower_max_size(AOFF_RBTree_t *node, else ASSERT(new_max == old_max); } +static ERTS_INLINE SWord cmp_blocks(int bestfit, + AOFF_RBTree_t* lhs, AOFF_RBTree_t* rhs) +{ + ASSERT(lhs != rhs); + ASSERT(!bestfit || FBLK_TO_MBC(&lhs->hdr) == FBLK_TO_MBC(&rhs->hdr)); + if (bestfit) { + SWord diff = (SWord)AOFF_BLK_SZ(lhs) - (SWord)AOFF_BLK_SZ(rhs); + if (diff) return diff; + } + return (char*)lhs - (char*)rhs; +} + +static ERTS_INLINE SWord cmp_cand_blk(int bestfit, + Block_t* cand_blk, AOFF_RBTree_t* rhs) +{ + if (bestfit) { + if (BLK_TO_MBC(cand_blk) == FBLK_TO_MBC(&rhs->hdr)) { + SWord diff = (SWord)MBC_BLK_SZ(cand_blk) - (SWord)MBC_FBLK_SZ(&rhs->hdr); + if (diff) return diff; + } + } + return (char*)cand_blk - (char*)rhs; +} + /* Prototypes of callback functions */ -static Block_t* aoff_get_free_block(Allctr_t *, Uint, Block_t *, Uint, Uint32 flags); -static void aoff_link_free_block(Allctr_t *, Block_t*, Uint32 flags); -static void aoff_unlink_free_block(Allctr_t *allctr, Block_t *del, Uint32 flags); +static Block_t* aoff_get_free_block(Allctr_t *, Uint, Block_t *, Uint); +static void aoff_link_free_block(Allctr_t *, Block_t*); +static void aoff_unlink_free_block(Allctr_t *allctr, Block_t *del); +static void aoff_creating_mbc(Allctr_t*, Carrier_t*); +static void aoff_destroying_mbc(Allctr_t*, Carrier_t*); +static void aoff_add_mbc(Allctr_t*, Carrier_t*); +static void aoff_remove_mbc(Allctr_t*, Carrier_t*); +static UWord aoff_largest_fblk_in_mbc(Allctr_t*, Carrier_t*); + +/* Generic tree functions used by both carrier and block trees. */ +static void rbt_delete(AOFF_RBTree_t** root, AOFF_RBTree_t* del); +static void rbt_insert(int bestfit, AOFF_RBTree_t** root, AOFF_RBTree_t* blk); +static AOFF_RBTree_t* rbt_search(AOFF_RBTree_t* root, Uint size); +#ifdef HARD_DEBUG +static int rbt_assert_is_member(AOFF_RBTree_t* root, AOFF_RBTree_t* node); +#endif static Eterm info_options(Allctr_t *, char *, int *, void *, Uint **, Uint *); static void init_atoms(void); - -#ifdef DEBUG - -/* Destroy all tree fields */ -#define DESTROY_TREE_NODE(N) \ - sys_memset((void *) (((Block_t *) (N)) + 1), \ - 0xff, \ - (sizeof(AOFF_RBTree_t) - sizeof(Block_t))) - -#else - -#define DESTROY_TREE_NODE(N) - -#endif - - static int atoms_initialized = 0; void @@ -184,11 +234,14 @@ erts_aoffalc_start(AOFFAllctr_t *alc, sys_memcpy((void *) alc, (void *) &zero.allctr, sizeof(AOFFAllctr_t)); + alc->bf_within_carrier = aoffinit->bf_within_carrier; + allctr->mbc_header_size = sizeof(AOFF_Carrier_t); allctr->min_mbc_size = MIN_MBC_SZ; allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ; allctr->min_block_size = sizeof(AOFF_RBTree_t); - allctr->vsn_str = ERTS_ALC_AOFF_ALLOC_VSN_STR; + allctr->vsn_str = aoffinit->bf_within_carrier ? + ERTS_ALC_AOFF_CBF_ALLOC_VSN_STR : ERTS_ALC_AOFF_ALLOC_VSN_STR; /* Callback functions */ @@ -199,8 +252,11 @@ erts_aoffalc_start(AOFFAllctr_t *alc, allctr->info_options = info_options; allctr->get_next_mbc_size = NULL; - allctr->creating_mbc = NULL; - allctr->destroying_mbc = NULL; + allctr->creating_mbc = aoff_creating_mbc; + allctr->destroying_mbc = aoff_destroying_mbc; + allctr->add_mbc = aoff_add_mbc; + allctr->remove_mbc = aoff_remove_mbc; + allctr->largest_fblk_in_mbc = aoff_largest_fblk_in_mbc; allctr->init_atoms = init_atoms; #ifdef ERTS_ALLOC_UTIL_HARD_DEBUG @@ -306,7 +362,6 @@ replace(AOFF_RBTree_t **root, AOFF_RBTree_t *x, AOFF_RBTree_t *y) y->max_sz = x->max_sz; lower_max_size(y, NULL); - DESTROY_TREE_NODE(x); } static void @@ -403,21 +458,47 @@ tree_insert_fixup(AOFF_RBTree_t** root, AOFF_RBTree_t *blk) } static void -aoff_unlink_free_block(Allctr_t *allctr, Block_t *del, Uint32 flags) +aoff_unlink_free_block(Allctr_t *allctr, Block_t *del) +{ +#ifdef HARD_DEBUG + AOFFAllctr_t* alc = (AOFFAllctr_t*)allctr; +#endif + AOFF_Carrier_t *crr = (AOFF_Carrier_t*) FBLK_TO_MBC(del); + + ASSERT(crr->rbt_node.hdr.bhdr == crr->root->max_sz); + HARD_CHECK_IS_MEMBER(alc->mbc_root, &crr->rbt_node); + HARD_CHECK_TREE(&crr->crr, alc->bf_within_carrier, crr->root, 0); + + rbt_delete(&crr->root, (AOFF_RBTree_t*)del); + + HARD_CHECK_TREE(&crr->crr, alc->bf_within_carrier, crr->root, 0); + + /* Update the carrier tree with a potentially new (lower) max_sz + */ + if (crr->root) { + if (crr->rbt_node.hdr.bhdr == crr->root->max_sz) { + return; + } + ASSERT(crr->rbt_node.hdr.bhdr > crr->root->max_sz); + crr->rbt_node.hdr.bhdr = crr->root->max_sz; + } + else { + crr->rbt_node.hdr.bhdr = 0; + } + lower_max_size(&crr->rbt_node, NULL); +} + +static void +rbt_delete(AOFF_RBTree_t** root, AOFF_RBTree_t* del) { - AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr; - AOFF_RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC) - ? &alc->sbmbc_root : &alc->mbc_root); Uint spliced_is_black; - AOFF_RBTree_t *x, *y, *z = (AOFF_RBTree_t *) del; + AOFF_RBTree_t *x, *y, *z = del; AOFF_RBTree_t null_x; /* null_x is used to get the fixup started when we splice out a node without children. */ - null_x.parent = NULL; + HARD_CHECK_IS_MEMBER(*root, del); -#ifdef HARD_DEBUG - check_tree(*root, 0); -#endif + null_x.parent = NULL; /* Remove node from tree... */ @@ -572,26 +653,43 @@ aoff_unlink_free_block(Allctr_t *allctr, Block_t *del, Uint32 flags) RBT_ASSERT(!null_x.right); } } - - DESTROY_TREE_NODE(del); - -#ifdef HARD_DEBUG - check_tree(*root, 0); -#endif } static void -aoff_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) +aoff_link_free_block(Allctr_t *allctr, Block_t *block) { - AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr; + AOFFAllctr_t* alc = (AOFFAllctr_t*) allctr; AOFF_RBTree_t *blk = (AOFF_RBTree_t *) block; - AOFF_RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC) - ? &alc->sbmbc_root : &alc->mbc_root); + AOFF_RBTree_t *crr_node; + AOFF_Carrier_t *blk_crr = (AOFF_Carrier_t*) FBLK_TO_MBC(block); Uint blk_sz = AOFF_BLK_SZ(blk); -#ifdef HARD_DEBUG - check_tree(*root, 0); -#endif + ASSERT(allctr == ERTS_ALC_CARRIER_TO_ALLCTR(&blk_crr->crr)); + ASSERT(blk_crr->rbt_node.hdr.bhdr == (blk_crr->root ? blk_crr->root->max_sz : 0)); + HARD_CHECK_IS_MEMBER(alc->mbc_root, &blk_crr->rbt_node); + HARD_CHECK_TREE(&blk_crr->crr, alc->bf_within_carrier, blk_crr->root, 0); + + rbt_insert(alc->bf_within_carrier, &blk_crr->root, blk); + + /* Update the carrier tree with a potential new (larger) max_sz + */ + crr_node = &blk_crr->rbt_node; + if (blk_sz > crr_node->hdr.bhdr) { + ASSERT(blk_sz == blk_crr->root->max_sz); + crr_node->hdr.bhdr = blk_sz; + while (blk_sz > crr_node->max_sz) { + crr_node->max_sz = blk_sz; + crr_node = crr_node->parent; + if (!crr_node) break; + } + } + HARD_CHECK_TREE(&blk_crr->crr, alc->bf_within_carrier, blk_crr->root, 0); +} + +static void +rbt_insert(int bestfit, AOFF_RBTree_t** root, AOFF_RBTree_t* blk) +{ + Uint blk_sz = AOFF_BLK_SZ(blk); blk->flags = 0; blk->left = NULL; @@ -609,7 +707,7 @@ aoff_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) if (x->max_sz < blk_sz) { x->max_sz = blk_sz; } - if (blk < x) { + if (cmp_blocks(bestfit, blk, x) < 0) { if (!x->left) { blk->parent = x; x->left = blk; @@ -625,7 +723,6 @@ aoff_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) } x = x->right; } - } /* Insert block into size tree */ @@ -635,38 +732,59 @@ aoff_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) if (IS_RED(blk->parent)) tree_insert_fixup(root, blk); } - -#ifdef HARD_DEBUG - check_tree(*root, 0); -#endif } -static Block_t * -aoff_get_free_block(Allctr_t *allctr, Uint size, - Block_t *cand_blk, Uint cand_size, Uint32 flags) +static AOFF_RBTree_t* +rbt_search(AOFF_RBTree_t* root, Uint size) { - AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr; - AOFF_RBTree_t *x = ((flags & ERTS_ALCU_FLG_SBMBC) - ? alc->sbmbc_root : alc->mbc_root); - AOFF_RBTree_t *blk = NULL; -#ifdef HARD_DEBUG - AOFF_RBTree_t* dbg_blk = check_tree(x, size); -#endif + AOFF_RBTree_t* x = root; - ASSERT(!cand_blk || cand_size >= size); - - while (x) { + ASSERT(x); + for (;;) { if (x->left && x->left->max_sz >= size) { x = x->left; } else if (AOFF_BLK_SZ(x) >= size) { - blk = x; - break; + return x; } else { x = x->right; + if (!x) { + return NULL; + } } } +} + +static Block_t * +aoff_get_free_block(Allctr_t *allctr, Uint size, + Block_t *cand_blk, Uint cand_size) +{ + AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr; + AOFF_RBTree_t *crr_node = alc->mbc_root; + AOFF_Carrier_t* crr; + AOFF_RBTree_t *blk = NULL; +#ifdef HARD_DEBUG + AOFF_RBTree_t* dbg_blk; +#endif + + ASSERT(!cand_blk || cand_size >= size); + + /* Get first-fit carrier + */ + if (!crr_node || !(blk=rbt_search(crr_node, size))) { + return NULL; + } + crr = RBT_NODE_TO_MBC(blk); + + /* Get block within carrier tree + */ +#ifdef HARD_DEBUG + dbg_blk = HARD_CHECK_TREE(&crr->crr, alc->bf_within_carrier, crr->root, size); +#endif + + blk = rbt_search(crr->root, size); + ASSERT(blk); #ifdef HARD_DEBUG ASSERT(blk == dbg_blk); @@ -675,15 +793,87 @@ aoff_get_free_block(Allctr_t *allctr, Uint size, if (!blk) return NULL; - if (cand_blk && cand_blk < &blk->hdr) { + if (cand_blk && cmp_cand_blk(alc->bf_within_carrier, cand_blk, blk) < 0) { return NULL; /* cand_blk was better */ } - aoff_unlink_free_block(allctr, (Block_t *) blk, flags); + aoff_unlink_free_block(allctr, (Block_t *) blk); return (Block_t *) blk; } +static void aoff_creating_mbc(Allctr_t *allctr, Carrier_t *carrier) +{ + AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr; + AOFF_Carrier_t *crr = (AOFF_Carrier_t*) carrier; + AOFF_RBTree_t **root = &alc->mbc_root; + + HARD_CHECK_TREE(NULL, 0, *root, 0); + + /* Link carrier in address order tree + */ + crr->rbt_node.hdr.bhdr = 0; + rbt_insert(0, root, &crr->rbt_node); + + /* aoff_link_free_block will add free block later */ + crr->root = NULL; + + HARD_CHECK_TREE(NULL, 0, *root, 0); +} + +static void aoff_destroying_mbc(Allctr_t *allctr, Carrier_t *carrier) +{ + AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr; + AOFF_Carrier_t *crr = (AOFF_Carrier_t*) carrier; + AOFF_RBTree_t *root = alc->mbc_root; + + if (crr->rbt_node.parent || &crr->rbt_node == root) { + aoff_remove_mbc(allctr, carrier); + } + /*else already removed */ +} + +static void aoff_add_mbc(Allctr_t *allctr, Carrier_t *carrier) +{ + AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr; + AOFF_Carrier_t *crr = (AOFF_Carrier_t*) carrier; + AOFF_RBTree_t **root = &alc->mbc_root; + + HARD_CHECK_TREE(NULL, 0, *root, 0); + + /* Link carrier in address order tree + */ + rbt_insert(0, root, &crr->rbt_node); + + HARD_CHECK_TREE(NULL, 0, *root, 0); +} + +static void aoff_remove_mbc(Allctr_t *allctr, Carrier_t *carrier) +{ + AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr; + AOFF_Carrier_t *crr = (AOFF_Carrier_t*) carrier; + AOFF_RBTree_t **root = &alc->mbc_root; + + ASSERT(allctr == ERTS_ALC_CARRIER_TO_ALLCTR(carrier)); + HARD_CHECK_TREE(NULL, 0, *root, 0); + + rbt_delete(root, &crr->rbt_node); + crr->rbt_node.parent = NULL; + crr->rbt_node.left = NULL; + crr->rbt_node.right = NULL; + crr->rbt_node.max_sz = crr->rbt_node.hdr.bhdr; + + HARD_CHECK_TREE(NULL, 0, *root, 0); +} + +static UWord aoff_largest_fblk_in_mbc(Allctr_t* allctr, Carrier_t* carrier) +{ + AOFF_Carrier_t *crr = (AOFF_Carrier_t*) carrier; + + ASSERT(allctr == ERTS_ALC_CARRIER_TO_ALLCTR(carrier)); + ASSERT(crr->rbt_node.hdr.bhdr == (crr->root ? crr->root->max_sz : 0)); + return crr->rbt_node.hdr.bhdr; +} /* * info_options() @@ -692,6 +882,7 @@ aoff_get_free_block(Allctr_t *allctr, Uint size, static struct { Eterm as; Eterm aoff; + Eterm aoffcaobf; #ifdef DEBUG Eterm end_of_atoms; #endif @@ -720,6 +911,7 @@ init_atoms(void) #endif AM_INIT(as); AM_INIT(aoff); + AM_INIT(aoffcaobf); #ifdef DEBUG for (atom = (Eterm *) &am; atom < &am.end_of_atoms; atom++) { @@ -749,6 +941,7 @@ info_options(Allctr_t *allctr, Uint **hpp, Uint *szp) { + AOFFAllctr_t* alc = (AOFFAllctr_t*) allctr; Eterm res = THE_NON_VALUE; if (print_to_p) { @@ -756,7 +949,7 @@ info_options(Allctr_t *allctr, print_to_arg, "%sas: %s\n", prefix, - "aoff"); + alc->bf_within_carrier ? "aoffcaobf" : "aoff"); } if (hpp || szp) { @@ -766,7 +959,8 @@ info_options(Allctr_t *allctr, __FILE__, __LINE__);; res = NIL; - add_2tup(hpp, szp, &res, am.as, am.aoff); + add_2tup(hpp, szp, &res, am.as, + alc->bf_within_carrier ? am.aoffcaobf : am.aoff); } return res; @@ -784,14 +978,19 @@ UWord erts_aoffalc_test(UWord op, UWord a1, UWord a2) { switch (op) { - case 0x500: return (UWord) 0; /* IS_AOBF */ - case 0x501: return (UWord) ((AOFFAllctr_t *) a1)->mbc_root; + case 0x501: { + AOFF_RBTree_t *node = ((AOFFAllctr_t *) a1)->mbc_root; + Uint size = (Uint) a2; + node = node ? rbt_search(node, size) : NULL; + return (UWord) (node ? RBT_NODE_TO_MBC(node)->root : NULL); + } case 0x502: return (UWord) ((AOFF_RBTree_t *) a1)->parent; case 0x503: return (UWord) ((AOFF_RBTree_t *) a1)->left; case 0x504: return (UWord) ((AOFF_RBTree_t *) a1)->right; case 0x506: return (UWord) IS_BLACK((AOFF_RBTree_t *) a1); - case 0x508: return (UWord) 1; /* IS_AOFF */ + case 0x508: return (UWord) 0; /* IS_BF_ALGO */ case 0x509: return (UWord) ((AOFF_RBTree_t *) a1)->max_sz; + case 0x50a: return (UWord) ((AOFFAllctr_t *) a1)->bf_within_carrier; default: ASSERT(0); return ~((UWord) 0); } } @@ -804,6 +1003,16 @@ erts_aoffalc_test(UWord op, UWord a1, UWord a2) #ifdef HARD_DEBUG +static int rbt_assert_is_member(AOFF_RBTree_t* root, AOFF_RBTree_t* node) +{ + while (node != root) { + ASSERT(node->parent); + ASSERT(node->parent->left == node || node->parent->right == node); + node = node->parent; + } + return 1; +} + #define IS_LEFT_VISITED(FB) ((FB)->flags & LEFT_VISITED_FLG) #define IS_RIGHT_VISITED(FB) ((FB)->flags & RIGHT_VISITED_FLG) @@ -840,12 +1049,14 @@ static void print_tree(AOFF_RBTree_t*); */ static AOFF_RBTree_t * -check_tree(AOFF_RBTree_t* root, Uint size) +check_tree(Carrier_t* within_crr, int bestfit, AOFF_RBTree_t* root, Uint size) { AOFF_RBTree_t *res = NULL; Sint blacks; Sint curr_blacks; AOFF_RBTree_t *x; + Carrier_t* crr; + Uint depth, max_depth, node_cnt; #ifdef PRINT_TREE print_tree(root); @@ -859,12 +1070,16 @@ check_tree(AOFF_RBTree_t* root, Uint size) ASSERT(!x->parent); curr_blacks = 1; blacks = -1; + depth = 1; + max_depth = 0; + node_cnt = 0; while (x) { if (!IS_LEFT_VISITED(x)) { SET_LEFT_VISITED(x); if (x->left) { x = x->left; + ++depth; if (IS_BLACK(x)) curr_blacks++; continue; @@ -880,6 +1095,7 @@ check_tree(AOFF_RBTree_t* root, Uint size) SET_RIGHT_VISITED(x); if (x->right) { x = x->right; + ++depth; if (IS_BLACK(x)) curr_blacks++; continue; @@ -891,6 +1107,16 @@ check_tree(AOFF_RBTree_t* root, Uint size) } } + ++node_cnt; + if (depth > max_depth) + max_depth = depth; + + if (within_crr) { + crr = FBLK_TO_MBC(&x->hdr); + ASSERT(crr == within_crr); + ASSERT((char*)x > (char*)crr); + ASSERT(((char*)x + AOFF_BLK_SZ(x)) <= ((char*)crr + CARRIER_SZ(crr))); + } if (IS_RED(x)) { ASSERT(IS_BLACK(x->right)); @@ -901,13 +1127,13 @@ check_tree(AOFF_RBTree_t* root, Uint size) if (x->left) { ASSERT(x->left->parent == x); - ASSERT(x->left < x); + ASSERT(cmp_blocks(bestfit, x->left, x) < 0); ASSERT(x->left->max_sz <= x->max_sz); } if (x->right) { ASSERT(x->right->parent == x); - ASSERT(x->right > x); + ASSERT(cmp_blocks(bestfit, x->right, x) > 0); ASSERT(x->right->max_sz <= x->max_sz); } ASSERT(x->max_sz >= AOFF_BLK_SZ(x)); @@ -916,7 +1142,7 @@ check_tree(AOFF_RBTree_t* root, Uint size) || x->max_sz == (x->right ? x->right->max_sz : 0)); if (size && AOFF_BLK_SZ(x) >= size) { - if (!res || x < res) { + if (!res || cmp_blocks(bestfit, x, res) < 0) { res = x; } } @@ -926,10 +1152,11 @@ check_tree(AOFF_RBTree_t* root, Uint size) if (IS_BLACK(x)) curr_blacks--; x = x->parent; - + --depth; } - + ASSERT(depth == 0 || (!root && depth==1)); ASSERT(curr_blacks == 0); + ASSERT((1 << (max_depth/2)) <= node_cnt); UNSET_LEFT_VISITED(root); UNSET_RIGHT_VISITED(root); @@ -954,9 +1181,9 @@ print_tree_aux(AOFF_RBTree_t *x, int indent) for (i = 0; i < indent; i++) { putc(' ', stderr); } - fprintf(stderr, "%s: sz=%lu addr=0x%lx max_size=%lu\r\n", + fprintf(stderr, "%s: sz=%lu addr=0x%lx max_size=%u\r\n", IS_BLACK(x) ? "BLACK" : "RED", - AOFF_BLK_SZ(x), (Uint)x, x->max_sz); + AOFF_BLK_SZ(x), (Uint)x, (unsigned)x->max_sz); print_tree_aux(x->left, indent + INDENT_STEP); } } diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.h b/erts/emulator/beam/erl_ao_firstfit_alloc.h index 21c36c6654..87427e8e62 100644 --- a/erts/emulator/beam/erl_ao_firstfit_alloc.h +++ b/erts/emulator/beam/erl_ao_firstfit_alloc.h @@ -24,11 +24,12 @@ #include "erl_alloc_util.h" #define ERTS_ALC_AOFF_ALLOC_VSN_STR "0.9" +#define ERTS_ALC_AOFF_CBF_ALLOC_VSN_STR "0.9" typedef struct AOFFAllctr_t_ AOFFAllctr_t; typedef struct { - int dummy; + int bf_within_carrier; } AOFFAllctrInit_t; #define ERTS_DEFAULT_AOFF_ALLCTR_INIT {0/*dummy*/} @@ -51,7 +52,7 @@ struct AOFFAllctr_t_ { Allctr_t allctr; /* Has to be first! */ struct AOFF_RBTree_t_* mbc_root; - struct AOFF_RBTree_t_* sbmbc_root; + int bf_within_carrier; }; UWord erts_aoffalc_test(UWord, UWord, UWord); diff --git a/erts/emulator/beam/erl_bestfit_alloc.c b/erts/emulator/beam/erl_bestfit_alloc.c index ed843a51fb..58e53c3d00 100644 --- a/erts/emulator/beam/erl_bestfit_alloc.c +++ b/erts/emulator/beam/erl_bestfit_alloc.c @@ -89,21 +89,21 @@ static RBTree_t * check_tree(RBTree_t, int, Uint); #endif -static void tree_delete(Allctr_t *allctr, Block_t *del, Uint32 flags); +static void tree_delete(Allctr_t *allctr, Block_t *del); /* Prototypes of callback functions */ /* "address order best fit" specific callback functions */ static Block_t * aobf_get_free_block (Allctr_t *, Uint, - Block_t *, Uint, Uint32); -static void aobf_link_free_block (Allctr_t *, Block_t *, Uint32); + Block_t *, Uint); +static void aobf_link_free_block (Allctr_t *, Block_t *); #define aobf_unlink_free_block tree_delete /* "best fit" specific callback functions */ static Block_t * bf_get_free_block (Allctr_t *, Uint, - Block_t *, Uint, Uint32); -static void bf_link_free_block (Allctr_t *, Block_t *, Uint32); -static ERTS_INLINE void bf_unlink_free_block (Allctr_t *, Block_t *, Uint32); + Block_t *, Uint); +static void bf_link_free_block (Allctr_t *, Block_t *); +static ERTS_INLINE void bf_unlink_free_block (Allctr_t *, Block_t *); static Eterm info_options (Allctr_t *, char *, int *, @@ -179,6 +179,7 @@ erts_bfalc_start(BFAllctr_t *bfallctr, bfallctr->address_order = bfinit->ao; + allctr->mbc_header_size = sizeof(Carrier_t); allctr->min_mbc_size = MIN_MBC_SZ; allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ; allctr->min_block_size = (bfinit->ao @@ -207,6 +208,9 @@ erts_bfalc_start(BFAllctr_t *bfallctr, allctr->get_next_mbc_size = NULL; allctr->creating_mbc = NULL; allctr->destroying_mbc = NULL; + allctr->add_mbc = NULL; + allctr->remove_mbc = NULL; + allctr->largest_fblk_in_mbc = NULL; allctr->init_atoms = init_atoms; #ifdef ERTS_ALLOC_UTIL_HARD_DEBUG @@ -407,13 +411,11 @@ tree_insert_fixup(RBTree_t **root, RBTree_t *blk) * callback function in the address order case. */ static void -tree_delete(Allctr_t *allctr, Block_t *del, Uint32 flags) +tree_delete(Allctr_t *allctr, Block_t *del) { BFAllctr_t *bfallctr = (BFAllctr_t *) allctr; Uint spliced_is_black; - RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC) - ? &bfallctr->sbmbc_root - : &bfallctr->mbc_root); + RBTree_t **root = &bfallctr->mbc_root; RBTree_t *x, *y, *z = (RBTree_t *) del; RBTree_t null_x; /* null_x is used to get the fixup started when we splice out a node without children. */ @@ -586,12 +588,10 @@ tree_delete(Allctr_t *allctr, Block_t *del, Uint32 flags) \* */ static void -aobf_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) +aobf_link_free_block(Allctr_t *allctr, Block_t *block) { BFAllctr_t *bfallctr = (BFAllctr_t *) allctr; - RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC) - ? &bfallctr->sbmbc_root - : &bfallctr->mbc_root); + RBTree_t **root = &bfallctr->mbc_root; RBTree_t *blk = (RBTree_t *) block; Uint blk_sz = BF_BLK_SZ(blk); @@ -647,21 +647,18 @@ aobf_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) #if 0 /* tree_delete() is directly used instead */ static void -aobf_unlink_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) +aobf_unlink_free_block(Allctr_t *allctr, Block_t *block) { - tree_delete(allctr, block, flags); + tree_delete(allctr, block); } #endif static Block_t * aobf_get_free_block(Allctr_t *allctr, Uint size, - Block_t *cand_blk, Uint cand_size, - Uint32 flags) + Block_t *cand_blk, Uint cand_size) { BFAllctr_t *bfallctr = (BFAllctr_t *) allctr; - RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC) - ? &bfallctr->sbmbc_root - : &bfallctr->mbc_root); + RBTree_t **root = &bfallctr->mbc_root; RBTree_t *x = *root; RBTree_t *blk = NULL; Uint blk_sz; @@ -694,7 +691,7 @@ aobf_get_free_block(Allctr_t *allctr, Uint size, return NULL; /* cand_blk was better */ } - aobf_unlink_free_block(allctr, (Block_t *) blk, flags); + aobf_unlink_free_block(allctr, (Block_t *) blk); return (Block_t *) blk; } @@ -705,12 +702,10 @@ aobf_get_free_block(Allctr_t *allctr, Uint size, \* */ static void -bf_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) +bf_link_free_block(Allctr_t *allctr, Block_t *block) { BFAllctr_t *bfallctr = (BFAllctr_t *) allctr; - RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC) - ? &bfallctr->sbmbc_root - : &bfallctr->mbc_root); + RBTree_t **root = &bfallctr->mbc_root; RBTree_t *blk = (RBTree_t *) block; Uint blk_sz = BF_BLK_SZ(blk); @@ -779,12 +774,10 @@ bf_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) } static ERTS_INLINE void -bf_unlink_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) +bf_unlink_free_block(Allctr_t *allctr, Block_t *block) { BFAllctr_t *bfallctr = (BFAllctr_t *) allctr; - RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC) - ? &bfallctr->sbmbc_root - : &bfallctr->mbc_root); + RBTree_t **root = &bfallctr->mbc_root; RBTree_t *x = (RBTree_t *) block; if (IS_LIST_ELEM(x)) { @@ -812,7 +805,7 @@ bf_unlink_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) } else { /* Remove from tree */ - tree_delete(allctr, block, flags); + tree_delete(allctr, block); } DESTROY_LIST_ELEM(x); @@ -821,13 +814,10 @@ bf_unlink_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) static Block_t * bf_get_free_block(Allctr_t *allctr, Uint size, - Block_t *cand_blk, Uint cand_size, - Uint32 flags) + Block_t *cand_blk, Uint cand_size) { BFAllctr_t *bfallctr = (BFAllctr_t *) allctr; - RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC) - ? &bfallctr->sbmbc_root - : &bfallctr->mbc_root); + RBTree_t **root = &bfallctr->mbc_root; RBTree_t *x = *root; RBTree_t *blk = NULL; Uint blk_sz; @@ -867,7 +857,7 @@ bf_get_free_block(Allctr_t *allctr, Uint size, the tree node */ blk = LIST_NEXT(blk) ? LIST_NEXT(blk) : blk; - bf_unlink_free_block(allctr, (Block_t *) blk, flags); + bf_unlink_free_block(allctr, (Block_t *) blk); return (Block_t *) blk; } @@ -984,7 +974,7 @@ erts_bfalc_test(UWord op, UWord a1, UWord a2) case 0x205: return (UWord) ((RBTreeList_t *) a1)->next; case 0x206: return (UWord) IS_BLACK((RBTree_t *) a1); case 0x207: return (UWord) IS_TREE_NODE((RBTree_t *) a1); - case 0x208: return (UWord) 0; /* IS_AOFF */ + case 0x208: return (UWord) 1; /* IS_BF_ALGO */ default: ASSERT(0); return ~((UWord) 0); } } diff --git a/erts/emulator/beam/erl_bestfit_alloc.h b/erts/emulator/beam/erl_bestfit_alloc.h index f2d2f07d7a..be8b2b871d 100644 --- a/erts/emulator/beam/erl_bestfit_alloc.h +++ b/erts/emulator/beam/erl_bestfit_alloc.h @@ -55,7 +55,6 @@ struct BFAllctr_t_ { Allctr_t allctr; /* Has to be first! */ RBTree_t * mbc_root; - RBTree_t * sbmbc_root; int address_order; }; diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index 54eefe8d12..74a37f374d 100755 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -59,6 +59,7 @@ static Export* alloc_info_trap = NULL; static Export* alloc_sizes_trap = NULL; static Export *gather_sched_wall_time_res_trap; +static Export *gather_gc_info_res_trap; #define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1) @@ -1670,13 +1671,22 @@ info_1_tuple(Process* BIF_P, /* Pointer to current process. */ sel = *tp++; - if (sel == am_allocator_sizes) { + if (sel == am_memory_internal) { + switch (arity) { + case 3: + if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 1, 1)) + return am_true; + default: + goto badarg; + } + } + else if (sel == am_allocator_sizes) { switch (arity) { case 2: ERTS_BIF_PREP_TRAP1(ret, alloc_sizes_trap, BIF_P, *tp); return ret; case 3: - if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 1)) + if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 1, 0)) return am_true; default: goto badarg; @@ -1735,7 +1745,7 @@ info_1_tuple(Process* BIF_P, /* Pointer to current process. */ ERTS_BIF_PREP_TRAP1(ret, alloc_info_trap, BIF_P, *tp); return ret; case 3: - if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 0)) + if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 0, 0)) return am_true; default: goto badarg; @@ -3103,18 +3113,10 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1) res = TUPLE2(hp, cs, SMALL_ZERO); BIF_RET(res); } else if (BIF_ARG_1 == am_garbage_collection) { - Uint hsz = 4; - ErtsGCInfo gc_info; - Eterm gcs; - Eterm recl; - erts_gc_info(&gc_info); - (void) erts_bld_uint(NULL, &hsz, gc_info.garbage_collections); - (void) erts_bld_uint(NULL, &hsz, gc_info.reclaimed); - hp = HAlloc(BIF_P, hsz); - gcs = erts_bld_uint(&hp, NULL, gc_info.garbage_collections); - recl = erts_bld_uint(&hp, NULL, gc_info.reclaimed); - res = TUPLE3(hp, gcs, recl, SMALL_ZERO); - BIF_RET(res); + res = erts_gc_info_request(BIF_P); + if (is_non_value(res)) + BIF_RET(am_undefined); + BIF_TRAP1(gather_gc_info_res_trap, BIF_P, res); } else if (BIF_ARG_1 == am_reductions) { Uint reds; Uint diff; @@ -4082,6 +4084,8 @@ erts_bif_info_init(void) alloc_sizes_trap = erts_export_put(am_erlang, am_alloc_sizes, 1); gather_sched_wall_time_res_trap = erts_export_put(am_erlang, am_gather_sched_wall_time_result, 1); + gather_gc_info_res_trap + = erts_export_put(am_erlang, am_gather_gc_info_result, 1); process_info_init(); os_info_init(); } diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c index 298909c921..0d12e658d9 100644 --- a/erts/emulator/beam/erl_gc.c +++ b/erts/emulator/beam/erl_gc.c @@ -47,10 +47,6 @@ */ #define ALENGTH(a) (sizeof(a)/sizeof(a[0])) -static erts_smp_spinlock_t info_lck; -static Uint garbage_cols; /* no of garbage collections */ -static Uint reclaimed; /* no of words reclaimed in GCs */ - # define STACK_SZ_ON_HEAP(p) ((p)->hend - (p)->stop) # define OverRunCheck(P) \ if ((P)->stop < (P)->htop) { \ @@ -120,6 +116,8 @@ static void offset_rootset(Process *p, Sint offs, char* area, Uint area_size, static void offset_off_heap(Process* p, Sint offs, char* area, Uint area_size); static void offset_mqueue(Process *p, Sint offs, char* area, Uint area_size); +static void init_gc_info(ErtsGCInfo *gcip); + #ifdef HARDDEBUG static void disallow_heap_frag_ref_in_heap(Process* p); static void disallow_heap_frag_ref_in_old_heap(Process* p); @@ -137,13 +135,41 @@ static int num_heap_sizes; /* Number of heap sizes. */ Uint erts_test_long_gc_sleep; /* Only used for testing... */ +typedef struct { + Process *proc; + Eterm ref; + Eterm ref_heap[REF_THING_SIZE]; + Uint req_sched; + erts_smp_atomic32_t refc; +} ErtsGCInfoReq; + +#if !HALFWORD_HEAP +ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(gcireq, + ErtsGCInfoReq, + 5, + ERTS_ALC_T_GC_INFO_REQ) +#else +static ERTS_INLINE ErtsGCInfoReq * +gcireq_alloc(void) +{ + return erts_alloc(ERTS_ALC_T_GC_INFO_REQ, + sizeof(ErtsGCInfoReq)); +} + +static ERTS_INLINE void +gcireq_free(ErtsGCInfoReq *ptr) +{ + erts_free(ERTS_ALC_T_GC_INFO_REQ, ptr); +} +#endif + /* * Initialize GC global data. */ void erts_init_gc(void) { - int i = 0; + int i = 0, ix; Sint max_heap_size = 0; ASSERT(offsetof(ProcBin,thing_word) == offsetof(struct erl_off_heap_header,thing_word)); @@ -156,9 +182,6 @@ erts_init_gc(void) ASSERT(offsetof(ProcBin,next) == offsetof(ErlFunThing,next)); ASSERT(offsetof(ProcBin,next) == offsetof(ExternalThing,next)); - erts_smp_spinlock_init(&info_lck, "gc_info"); - garbage_cols = 0; - reclaimed = 0; erts_test_long_gc_sleep = 0; /* @@ -199,6 +222,16 @@ erts_init_gc(void) } } num_heap_sizes = i; + + for (ix = 0; ix < erts_no_schedulers; ix++) { + ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(ix); + init_gc_info(&esdp->gc_info); + } + +#if !HALFWORD_HEAP + init_gcireq_alloc(); +#endif + } /* @@ -287,17 +320,6 @@ erts_heap_sizes(Process* p) return res; } -void -erts_gc_info(ErtsGCInfo *gcip) -{ - if (gcip) { - erts_smp_spin_lock(&info_lck); - gcip->garbage_collections = garbage_cols; - gcip->reclaimed = reclaimed; - erts_smp_spin_unlock(&info_lck); - } -} - void erts_offset_heap(Eterm* hp, Uint sz, Sint offs, Eterm* low, Eterm* high) { @@ -378,6 +400,7 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj) Uint reclaimed_now = 0; int done = 0; Uint ms1, s1, us1; + ErtsSchedulerData *esdp = erts_get_scheduler_data(); #ifdef USE_VM_PROBES DTRACE_CHARBUF(pidbuf, DTRACE_TERM_BUF_SIZE); #endif @@ -455,11 +478,9 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj) monitor_large_heap(p); } - erts_smp_spin_lock(&info_lck); - garbage_cols++; - reclaimed += reclaimed_now; - erts_smp_spin_unlock(&info_lck); - + esdp->gc_info.garbage_cols++; + esdp->gc_info.reclaimed += reclaimed_now; + FLAGS(p) &= ~F_FORCE_GC; #ifdef CHECK_FOR_HOLES @@ -2543,6 +2564,110 @@ offset_rootset(Process *p, Sint offs, char* area, Uint area_size, offset_one_rootset(p, offs, area, area_size, objv, nobj); } +static void +init_gc_info(ErtsGCInfo *gcip) +{ + gcip->reclaimed = 0; + gcip->garbage_cols = 0; +} + +static void +reply_gc_info(void *vgcirp) +{ + Uint64 reclaimed = 0, garbage_cols = 0; + ErtsSchedulerData *esdp = erts_get_scheduler_data(); + ErtsGCInfoReq *gcirp = (ErtsGCInfoReq *) vgcirp; + ErtsProcLocks rp_locks = (gcirp->req_sched == esdp->no + ? ERTS_PROC_LOCK_MAIN + : 0); + Process *rp = gcirp->proc; + Eterm ref_copy = NIL, msg; + Eterm *hp = NULL; + Eterm **hpp; + Uint sz, *szp; + ErlOffHeap *ohp = NULL; + ErlHeapFragment *bp = NULL; + + ASSERT(esdp); + + reclaimed = esdp->gc_info.reclaimed; + garbage_cols = esdp->gc_info.garbage_cols; + + sz = 0; + hpp = NULL; + szp = &sz; + + while (1) { + if (hpp) + ref_copy = STORE_NC(hpp, ohp, gcirp->ref); + else + *szp += REF_THING_SIZE; + + msg = erts_bld_tuple(hpp, szp, 3, + make_small(esdp->no), + erts_bld_uint64(hpp, szp, garbage_cols), + erts_bld_uint64(hpp, szp, reclaimed)); + + msg = erts_bld_tuple(hpp, szp, 2, ref_copy, msg); + if (hpp) + break; + + hp = erts_alloc_message_heap(sz, &bp, &ohp, rp, &rp_locks); + szp = NULL; + hpp = &hp; + } + + erts_queue_message(rp, &rp_locks, bp, msg, NIL +#ifdef USE_VM_PROBES + , NIL +#endif + ); + + if (gcirp->req_sched == esdp->no) + rp_locks &= ~ERTS_PROC_LOCK_MAIN; + + if (rp_locks) + erts_smp_proc_unlock(rp, rp_locks); + + erts_smp_proc_dec_refc(rp); + + if (erts_smp_atomic32_dec_read_nob(&gcirp->refc) == 0) + gcireq_free(vgcirp); +} + +Eterm +erts_gc_info_request(Process *c_p) +{ + ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p); + Eterm ref; + ErtsGCInfoReq *gcirp; + Eterm *hp; + + gcirp = gcireq_alloc(); + ref = erts_make_ref(c_p); + hp = &gcirp->ref_heap[0]; + + gcirp->proc = c_p; + gcirp->ref = STORE_NC(&hp, NULL, ref); + gcirp->req_sched = esdp->no; + erts_smp_atomic32_init_nob(&gcirp->refc, + (erts_aint32_t) erts_no_schedulers); + + erts_smp_proc_add_refc(c_p, (Sint32) erts_no_schedulers); + +#ifdef ERTS_SMP + if (erts_no_schedulers > 1) + erts_schedule_multi_misc_aux_work(1, + erts_no_schedulers, + reply_gc_info, + (void *) gcirp); +#endif + + reply_gc_info((void *) gcirp); + + return ref; +} + #if defined(DEBUG) || defined(ERTS_OFFHEAP_DEBUG) static int diff --git a/erts/emulator/beam/erl_goodfit_alloc.c b/erts/emulator/beam/erl_goodfit_alloc.c index 79d8b40f8c..e9d8249ee1 100644 --- a/erts/emulator/beam/erl_goodfit_alloc.c +++ b/erts/emulator/beam/erl_goodfit_alloc.c @@ -163,10 +163,10 @@ BKT_MIN_SZ(GFAllctr_t *gfallctr, int ix) /* Prototypes of callback functions */ static Block_t * get_free_block (Allctr_t *, Uint, - Block_t *, Uint, Uint32); -static void link_free_block (Allctr_t *, Block_t *, Uint32); -static void unlink_free_block (Allctr_t *, Block_t *, Uint32); -static void update_last_aux_mbc (Allctr_t *, Carrier_t *, Uint32); + Block_t *, Uint); +static void link_free_block (Allctr_t *, Block_t *); +static void unlink_free_block (Allctr_t *, Block_t *); +static void update_last_aux_mbc (Allctr_t *, Carrier_t *); static Eterm info_options (Allctr_t *, char *, int *, void *, Uint **, Uint *); static void init_atoms (void); @@ -203,8 +203,7 @@ erts_gfalc_start(GFAllctr_t *gfallctr, sys_memcpy((void *) gfallctr, (void *) &zero.allctr, sizeof(GFAllctr_t)); - init->sbmbct = 0; /* Small mbc not yet supported by goodfit */ - + allctr->mbc_header_size = sizeof(Carrier_t); allctr->min_mbc_size = MIN_MBC_SZ; allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ; allctr->min_block_size = sizeof(GFFreeBlock_t); @@ -222,7 +221,9 @@ erts_gfalc_start(GFAllctr_t *gfallctr, allctr->get_next_mbc_size = NULL; allctr->creating_mbc = update_last_aux_mbc; allctr->destroying_mbc = update_last_aux_mbc; - + allctr->add_mbc = NULL; + allctr->remove_mbc = NULL; + allctr->largest_fblk_in_mbc = NULL; allctr->init_atoms = init_atoms; #ifdef ERTS_ALLOC_UTIL_HARD_DEBUG @@ -384,7 +385,7 @@ search_bucket(Allctr_t *allctr, int ix, Uint size) static Block_t * get_free_block(Allctr_t *allctr, Uint size, - Block_t *cand_blk, Uint cand_size, Uint32 flags) + Block_t *cand_blk, Uint cand_size) { GFAllctr_t *gfallctr = (GFAllctr_t *) allctr; int unsafe_bi, min_bi; @@ -403,7 +404,7 @@ get_free_block(Allctr_t *allctr, Uint size, if (blk) { if (cand_blk && cand_size <= MBC_FBLK_SZ(blk)) return NULL; /* cand_blk was better */ - unlink_free_block(allctr, blk, flags); + unlink_free_block(allctr, blk); return blk; } if (min_bi < NO_OF_BKTS - 1) { @@ -423,14 +424,14 @@ get_free_block(Allctr_t *allctr, Uint size, ASSERT(blk); if (cand_blk && cand_size <= MBC_FBLK_SZ(blk)) return NULL; /* cand_blk was better */ - unlink_free_block(allctr, blk, flags); + unlink_free_block(allctr, blk); return blk; } static void -link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) +link_free_block(Allctr_t *allctr, Block_t *block) { GFAllctr_t *gfallctr = (GFAllctr_t *) allctr; GFFreeBlock_t *blk = (GFFreeBlock_t *) block; @@ -451,7 +452,7 @@ link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) } static void -unlink_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) +unlink_free_block(Allctr_t *allctr, Block_t *block) { GFAllctr_t *gfallctr = (GFAllctr_t *) allctr; GFFreeBlock_t *blk = (GFFreeBlock_t *) block; @@ -472,7 +473,7 @@ unlink_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) } static void -update_last_aux_mbc(Allctr_t *allctr, Carrier_t *mbc, Uint32 flags) +update_last_aux_mbc(Allctr_t *allctr, Carrier_t *mbc) { GFAllctr_t *gfallctr = (GFAllctr_t *) allctr; diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c index b5bac2151d..b3a3c3d403 100644 --- a/erts/emulator/beam/erl_init.c +++ b/erts/emulator/beam/erl_init.c @@ -300,13 +300,13 @@ erl_init(int ncpu, init_benchmarking(); erts_init_monitors(); - erts_init_gc(); erts_init_time(); erts_init_sys_common_misc(); erts_init_process(ncpu, proc_tab_sz, legacy_proc_tab); erts_init_scheduling(no_schedulers, no_schedulers_online); erts_init_cpu_topology(); /* Must be after init_scheduling */ + erts_init_gc(); /* Must be after init_scheduling */ erts_alloc_late_init(); H_MIN_SIZE = erts_next_heap_size(H_MIN_SIZE, 0); diff --git a/erts/emulator/beam/erl_instrument.c b/erts/emulator/beam/erl_instrument.c index 1aea3f65bc..df7c443387 100644 --- a/erts/emulator/beam/erl_instrument.c +++ b/erts/emulator/beam/erl_instrument.c @@ -1236,8 +1236,6 @@ erts_instr_init(int stat, int map_stat) sys_memzero((void *) stats->n, sizeof(Stat_t)*(ERTS_ALC_N_MAX+1)); for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) { - if (ERTS_IS_SBMBC_ALLOCATOR_NO__(i)) - continue; if (erts_allctrs_info[i].enabled) stats->ap[i] = &stats->a[i]; else @@ -1251,8 +1249,6 @@ erts_instr_init(int stat, int map_stat) erts_instr_memory_map = 1; erts_instr_stat = 1; for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) { - if (ERTS_IS_SBMBC_ALLOCATOR_NO__(i)) - continue; erts_allctrs[i].alloc = map_stat_alloc; erts_allctrs[i].realloc = map_stat_realloc; erts_allctrs[i].free = map_stat_free; @@ -1265,8 +1261,6 @@ erts_instr_init(int stat, int map_stat) else { erts_instr_stat = 1; for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) { - if (ERTS_IS_SBMBC_ALLOCATOR_NO__(i)) - continue; erts_allctrs[i].alloc = stat_alloc; erts_allctrs[i].realloc = stat_realloc; erts_allctrs[i].free = stat_free; diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c index a1d270adba..bc59fe55d4 100644 --- a/erts/emulator/beam/erl_lock_check.c +++ b/erts/emulator/beam/erl_lock_check.c @@ -150,7 +150,6 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "instr_x", NULL }, { "instr", NULL }, { "alcu_allocator", "index" }, - { "sbmbc_alloc", "index" }, { "mseg", NULL }, #if HALFWORD_HEAP { "pmmap", NULL }, diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c index 53cb01a8c6..f753de8f52 100644 --- a/erts/emulator/beam/erl_port_task.c +++ b/erts/emulator/beam/erl_port_task.c @@ -1928,18 +1928,21 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) break; case ERTS_PORT_TASK_INPUT: erts_stale_drv_select(pp->common.id, + ERTS_Port2ErlDrvPort(pp), ptp->u.alive.td.io.event, DO_READ, 1); break; case ERTS_PORT_TASK_OUTPUT: erts_stale_drv_select(pp->common.id, + ERTS_Port2ErlDrvPort(pp), ptp->u.alive.td.io.event, DO_WRITE, 1); break; case ERTS_PORT_TASK_EVENT: erts_stale_drv_select(pp->common.id, + ERTS_Port2ErlDrvPort(pp), ptp->u.alive.td.io.event, 0, 1); diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index bb3e787401..88eb224f84 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -1327,6 +1327,17 @@ erts_alloc_notify_delayed_dealloc(int ix) ERTS_SSI_AUX_WORK_DD); } +void +erts_alloc_ensure_handle_delayed_dealloc_call(int ix) +{ +#ifdef DEBUG + ErtsSchedulerData *esdp = erts_get_scheduler_data(); + ASSERT(!esdp || ix == (int) esdp->no); +#endif + set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(ix-1), + ERTS_SSI_AUX_WORK_DD); +} + static ERTS_INLINE erts_aint32_t handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) { @@ -4671,8 +4682,8 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) esdp->reductions = 0; init_sched_wall_time(&esdp->sched_wall_time); - erts_port_task_handle_init(&esdp->nosuspend_port_task_handle); + } init_misc_aux_work(); diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index 6ef1a0073d..865ac6c43f 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -420,6 +420,11 @@ typedef struct { } ErtsSchedWallTime; typedef struct { + Uint64 reclaimed; + Uint64 garbage_cols; +} ErtsGCInfo; + +typedef struct { int sched; erts_aint32_t aux_work; } ErtsDelayedAuxWorkWakeupJob; @@ -507,6 +512,7 @@ struct ErtsSchedulerData_ { Uint64 reductions; ErtsSchedWallTime sched_wall_time; + ErtsGCInfo gc_info; ErtsPortTaskHandle nosuspend_port_task_handle; #ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC @@ -1126,6 +1132,7 @@ void erts_early_init_scheduling(int); void erts_init_scheduling(int, int); Eterm erts_sched_wall_time_request(Process *c_p, int set, int enable); +Eterm erts_gc_info_request(Process *c_p); Uint64 erts_get_proc_interval(void); Uint64 erts_ensure_later_proc_interval(Uint64); Uint64 erts_step_proc_interval(void); @@ -1333,6 +1340,7 @@ int erts_is_multi_scheduling_blocked(void); Eterm erts_multi_scheduling_blockers(Process *); void erts_start_schedulers(void); void erts_alloc_notify_delayed_dealloc(int); +void erts_alloc_ensure_handle_delayed_dealloc_call(int); void erts_smp_notify_check_children_needed(void); #endif #if ERTS_USE_ASYNC_READY_Q diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index c900cdca39..d5e727bcba 100755 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -688,12 +688,6 @@ void MD5Final(unsigned char [16], MD5_CTX *); /* ggc.c */ - -typedef struct { - Uint garbage_collections; - Uint reclaimed; -} ErtsGCInfo; - void erts_gc_info(ErtsGCInfo *gcip); void erts_init_gc(void); int erts_garbage_collect(Process*, int, Eterm*, int); @@ -729,10 +723,10 @@ void erts_raw_port_command(Port*, byte*, Uint); void driver_report_exit(ErlDrvPort, int); LineBuf* allocate_linebuf(int); int async_ready(Port *, void*); -ErtsPortNames *erts_get_port_names(Eterm); +ErtsPortNames *erts_get_port_names(Eterm, ErlDrvPort); void erts_free_port_names(ErtsPortNames *); Uint erts_port_ioq_size(Port *pp); -void erts_stale_drv_select(Eterm, ErlDrvEvent, int, int); +void erts_stale_drv_select(Eterm, ErlDrvPort, ErlDrvEvent, int, int); Port *erts_get_heart_port(void); diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c index 13cff24b95..01e130bd64 100644 --- a/erts/emulator/beam/io.c +++ b/erts/emulator/beam/io.c @@ -4814,7 +4814,8 @@ int async_ready(Port *p, void* data) static void report_missing_drv_callback(Port *p, char *drv_type, char *callback) { - ErtsPortNames *pnp = erts_get_port_names(p->common.id); + ErtsPortNames *pnp = erts_get_port_names(p->common.id, + ERTS_Port2ErlDrvPort(p)); char *unknown = "<unknown>"; char *drv_name = pnp->driver_name ? pnp->driver_name : unknown; char *prt_name = pnp->name ? pnp->name : unknown; @@ -4829,15 +4830,25 @@ report_missing_drv_callback(Port *p, char *drv_type, char *callback) void erts_stale_drv_select(Eterm port, + ErlDrvPort drv_port, ErlDrvEvent hndl, int mode, int deselect) { char *type; - ErlDrvPort drv_port = ERTS_Port2ErlDrvPort(erts_port_lookup_raw(port)); - ErtsPortNames *pnp = erts_get_port_names(port); + ErtsPortNames *pnp; erts_dsprintf_buf_t *dsbufp; + if (drv_port == ERTS_INVALID_ERL_DRV_PORT) { + Port *prt = erts_port_lookup_raw(port); + if (prt) + drv_port = ERTS_Port2ErlDrvPort(port); + else + drv_port = ERTS_INVALID_ERL_DRV_PORT; + } + + pnp = erts_get_port_names(port, drv_port); + switch (mode) { case ERL_DRV_READ | ERL_DRV_WRITE: type = "Input/Output"; @@ -4872,12 +4883,16 @@ erts_stale_drv_select(Eterm port, } ErtsPortNames * -erts_get_port_names(Eterm id) +erts_get_port_names(Eterm id, ErlDrvPort drv_port) { - Port *prt = erts_port_lookup_raw(id); + Port *prt; ErtsPortNames *pnp; ASSERT(is_nil(id) || is_internal_port(id)); + prt = ERTS_ErlDrvPort2Port(drv_port); + if (prt == ERTS_INVALID_ERL_DRV_PORT) + prt = erts_port_lookup_raw(id); + if (!prt) { pnp = erts_alloc(ERTS_ALC_T_PORT_NAMES, sizeof(ErtsPortNames)); pnp->name = NULL; @@ -4889,6 +4904,7 @@ erts_get_port_names(Eterm id) size_t pnp_len = sizeof(ErtsPortNames); #ifndef DEBUG pnp_len += 100; /* In most cases 100 characters will be enough... */ + ASSERT(prt->common.id == id); #endif pnp = erts_alloc(ERTS_ALC_T_PORT_NAMES, pnp_len); do { diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c index 853dd90c34..7035dc77df 100644 --- a/erts/emulator/sys/common/erl_check_io.c +++ b/erts/emulator/sys/common/erl_check_io.c @@ -874,7 +874,7 @@ need2steal(ErtsDrvEventState *state, int mode) static void print_driver_name(erts_dsprintf_buf_t *dsbufp, Eterm id) { - ErtsPortNames *pnp = erts_get_port_names(id); + ErtsPortNames *pnp = erts_get_port_names(id, ERTS_INVALID_ERL_DRV_PORT); if (!pnp->name && !pnp->driver_name) erts_dsprintf(dsbufp, "%s ", "<unknown>"); else { @@ -1357,8 +1357,8 @@ bad_fd_in_pollset(ErtsDrvEventState *state, Eterm inport, "Bad %s fd in erts_poll()! fd=%d, ", io_str, (int) state->fd); if (is_nil(port)) { - ErtsPortNames *ipnp = erts_get_port_names(inport); - ErtsPortNames *opnp = erts_get_port_names(outport); + ErtsPortNames *ipnp = erts_get_port_names(inport, ERTS_INVALID_ERL_DRV_PORT); + ErtsPortNames *opnp = erts_get_port_names(outport, ERTS_INVALID_ERL_DRV_PORT); erts_dsprintf(dsbufp, "ports=%T/%T, drivers=%s/%s, names=%s/%s\n", is_nil(inport) ? am_undefined : inport, is_nil(outport) ? am_undefined : outport, @@ -1370,7 +1370,7 @@ bad_fd_in_pollset(ErtsDrvEventState *state, Eterm inport, erts_free_port_names(opnp); } else { - ErtsPortNames *pnp = erts_get_port_names(port); + ErtsPortNames *pnp = erts_get_port_names(port, ERTS_INVALID_ERL_DRV_PORT); erts_dsprintf(dsbufp, "port=%T, driver=%s, name=%s\n", is_nil(port) ? am_undefined : port, pnp->driver_name ? pnp->driver_name : "<unknown>", @@ -1390,7 +1390,7 @@ bad_fd_in_pollset(ErtsDrvEventState *state, Eterm inport, static void stale_drv_select(Eterm id, ErtsDrvEventState *state, int mode) { - erts_stale_drv_select(id, (ErlDrvEvent) state->fd, mode, 0); + erts_stale_drv_select(id, ERTS_INVALID_ERL_DRV_PORT, (ErlDrvEvent) state->fd, mode, 0); deselect(state, mode); } @@ -1774,7 +1774,7 @@ static void doit_erts_check_io_debug(void *vstate, void *vcounters) err = 1; } else { - ErtsPortNames *pnp = erts_get_port_names(id); + ErtsPortNames *pnp = erts_get_port_names(id, ERTS_INVALID_ERL_DRV_PORT); erts_printf(" inport=%T inname=%s indrv=%s ", id, pnp->name ? pnp->name : "unknown", @@ -1791,7 +1791,7 @@ static void doit_erts_check_io_debug(void *vstate, void *vcounters) err = 1; } else { - ErtsPortNames *pnp = erts_get_port_names(id); + ErtsPortNames *pnp = erts_get_port_names(id, ERTS_INVALID_ERL_DRV_PORT); erts_printf(" outport=%T outname=%s outdrv=%s ", id, pnp->name ? pnp->name : "unknown", @@ -1827,7 +1827,7 @@ static void doit_erts_check_io_debug(void *vstate, void *vcounters) err = 1; } else { - ErtsPortNames *pnp = erts_get_port_names(id); + ErtsPortNames *pnp = erts_get_port_names(id, ERTS_INVALID_ERL_DRV_PORT); erts_printf(" port=%T name=%s drv=%s ", id, pnp->name ? pnp->name : "unknown", diff --git a/erts/emulator/test/alloc_SUITE.erl b/erts/emulator/test/alloc_SUITE.erl index 22b5d93983..33abd45982 100644 --- a/erts/emulator/test/alloc_SUITE.erl +++ b/erts/emulator/test/alloc_SUITE.erl @@ -28,7 +28,8 @@ bucket_index/1, bucket_mask/1, rbtree/1, - mseg_clear_cache/1]). + mseg_clear_cache/1, + cpool/1]). -export([init_per_testcase/2, end_per_testcase/2]). @@ -40,7 +41,7 @@ suite() -> [{ct_hooks,[ts_install_cth]}]. all() -> [basic, coalesce, threads, realloc_copy, bucket_index, - bucket_mask, rbtree, mseg_clear_cache]. + bucket_mask, rbtree, mseg_clear_cache, cpool]. groups() -> []. @@ -105,6 +106,10 @@ mseg_clear_cache(suite) -> []; mseg_clear_cache(doc) -> []; mseg_clear_cache(Cfg) -> ?line drv_case(Cfg). +cpool(suite) -> []; +cpool(doc) -> []; +cpool(Cfg) -> ?line drv_case(Cfg). + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% %% %% Internal functions %% diff --git a/erts/emulator/test/alloc_SUITE_data/Makefile.src b/erts/emulator/test/alloc_SUITE_data/Makefile.src index 035415d73e..1d4286e671 100644 --- a/erts/emulator/test/alloc_SUITE_data/Makefile.src +++ b/erts/emulator/test/alloc_SUITE_data/Makefile.src @@ -23,7 +23,8 @@ TEST_DRVS = basic@dll@ \ bucket_index@dll@ \ bucket_mask@dll@ \ rbtree@dll@ \ - mseg_clear_cache@dll@ + mseg_clear_cache@dll@ \ + cpool@dll@ CC = @CC@ LD = @LD@ diff --git a/erts/emulator/test/alloc_SUITE_data/allocator_test.h b/erts/emulator/test/alloc_SUITE_data/allocator_test.h index c0396ddb61..c37b074f93 100644 --- a/erts/emulator/test/alloc_SUITE_data/allocator_test.h +++ b/erts/emulator/test/alloc_SUITE_data/allocator_test.h @@ -75,6 +75,15 @@ typedef void* erts_cond; #define PREV_BLK(B) ((Block_t *) ALC_TEST1(0x019, (B))) #define IS_MBC_FIRST_BLK(A,B) ((Ulong) ALC_TEST2(0x01a, (A), (B))) #define UNIT_SZ ((Ulong) ALC_TEST0(0x01b)) +#define BLK_TO_MBC(B) ((Carrier_t *) ALC_TEST1(0x01c, (B))) +#define ADD_MBC(A, C) ((void) ALC_TEST2(0x01d, (A), (C))) +#define REMOVE_MBC(A, C) ((void) ALC_TEST2(0x01e, (A), (C))) +#define ZERO_CRR_SIZE ((Ulong) ALC_TEST0(0x01f)) +#define ZERO_CRR_INIT(A,B) ((Carrier_t *) ALC_TEST2(0x020, (A), (B))) +#define CPOOL_INSERT(A,B) ((Carrier_t *) ALC_TEST2(0x021, (A), (B))) +#define CPOOL_DELETE(A,B) ((Carrier_t *) ALC_TEST2(0x022, (A), (B))) +#define CPOOL_IS_EMPTY(A) ((int) ALC_TEST1(0x023, (A))) +#define CPOOL_IS_IN_POOL(A,B) ((int) ALC_TEST2(0x024, (A), (B))) /* From erl_goodfit_alloc.c */ #define BKT_IX(A, S) ((Ulong) ALC_TEST2(0x100, (A), (S))) @@ -84,15 +93,16 @@ typedef void* erts_cond; /* From erl_bestfit_alloc.c and erl_ao_firstfit_alloc.c */ #define IS_AOBF(A) ((Ulong) ALC_TEST1(RBT_OP(0), (A))) -#define RBT_ROOT(A) ((RBT_t *) ALC_TEST1(RBT_OP(1), (A))) +#define RBT_ROOT(A,SZ) ((RBT_t *) ALC_TEST2(RBT_OP(1), (A), (SZ))) #define RBT_PARENT(T) ((RBT_t *) ALC_TEST1(RBT_OP(2), (T))) #define RBT_LEFT(T) ((RBT_t *) ALC_TEST1(RBT_OP(3), (T))) #define RBT_RIGHT(T) ((RBT_t *) ALC_TEST1(RBT_OP(4), (T))) #define RBT_NEXT(T) ((RBTL_t *) ALC_TEST1(RBT_OP(5), (T))) #define RBT_IS_BLACK(T) ((Ulong) ALC_TEST1(RBT_OP(6), (T))) #define RBT_IS_TREE(T) ((Ulong) ALC_TEST1(RBT_OP(7), (T))) -#define IS_AOFF(A) ((Ulong) ALC_TEST1(RBT_OP(8), (A))) +#define IS_BF_ALGO(A) ((Ulong) ALC_TEST1(RBT_OP(8), (A))) #define RBT_MAX_SZ(T) ((Ulong) ALC_TEST1(RBT_OP(9), (T))) +#define IS_CBF(A) ((Ulong) ALC_TEST1(RBT_OP(0xa), (A))) /* From erl_mseg.c */ #define HAVE_MSEG() ((int) ALC_TEST0(0x400)) @@ -129,5 +139,6 @@ typedef void* erts_cond; #define THR_CREATE(F, A) ((erts_thread) ALC_TEST2(0xf10, (F), (A))) #define THR_JOIN(T) ((void) ALC_TEST1(0xf11, (T))) #define THR_EXIT(R) ((void) ALC_TEST1(0xf12, (R))) +#define IS_SMP_ENABLED ((int) ALC_TEST0(0xf13)) #endif diff --git a/erts/emulator/test/alloc_SUITE_data/coalesce.c b/erts/emulator/test/alloc_SUITE_data/coalesce.c index 981fa6d43e..36710bf7b5 100644 --- a/erts/emulator/test/alloc_SUITE_data/coalesce.c +++ b/erts/emulator/test/alloc_SUITE_data/coalesce.c @@ -267,7 +267,7 @@ void testcase_run(TestCaseState_t *tcs) { char *argv_org[] = {"-tsmbcs511","-tmmbcs511", "-tsbct512", "-trmbcmt100", "-tas", NULL, NULL}; - char *alg[] = {"af", "gf", "bf", "aobf", "aoff", NULL}; + char *alg[] = {"af", "gf", "bf", "aobf", "aoff", "aoffcaobf", NULL}; int i; for (i = 0; alg[i]; i++) { diff --git a/erts/emulator/test/alloc_SUITE_data/cpool.c b/erts/emulator/test/alloc_SUITE_data/cpool.c new file mode 100644 index 0000000000..276ed7be04 --- /dev/null +++ b/erts/emulator/test/alloc_SUITE_data/cpool.c @@ -0,0 +1,157 @@ +/* + * %CopyrightBegin% + * + * Copyright Ericsson AB 2013. All Rights Reserved. + * + * The contents of this file are subject to the Erlang Public License, + * Version 1.1, (the "License"); you may not use this file except in + * compliance with the License. You should have received a copy of the + * Erlang Public License along with this software. If not, it can be + * retrieved online at http://www.erlang.org/. + * + * Software distributed under the License is distributed on an "AS IS" + * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + * the License for the specific language governing rights and limitations + * under the License. + * + * %CopyrightEnd% + */ + + +#ifndef __WIN32__ +#include <sys/types.h> +#include <unistd.h> +#include <errno.h> +#endif +#include <stdio.h> +#include <stdarg.h> +#include <string.h> +#include "testcase_driver.h" +#include "allocator_test.h" + +#define FATAL_ASSERT(A) \ + ((void) ((A) \ + ? 1 \ + : (fatal_assert_failed(#A, \ + (char *) __FILE__, \ + __LINE__), \ + 0))) + +static void +fatal_assert_failed(char* expr, char* file, int line) +{ + fflush(stdout); + fprintf(stderr, "%s:%d: Assertion failed: %s\n", + file, line, expr); + fflush(stderr); + abort(); +} + +#define TEST_NO_THREADS 10 +#define TEST_NO_CARRIERS_PER_THREAD 100000 +#define TEST_CARRIERS_OFFSET 5 + +static Allctr_t *alloc = NULL; + +static void stop_allocator(void) +{ + if (alloc) { + STOP_ALC(alloc); + alloc = NULL; + } +} + + +void *thread_func(void *arg); + +char * +testcase_name(void) +{ + return "cpool"; +} + +void +testcase_cleanup(TestCaseState_t *tcs) +{ + stop_allocator(); +} + +void * +thread_func(void *arg) +{ + Carrier_t *crr = (Carrier_t *) arg; + int i; + + for (i = 0; i < (TEST_NO_CARRIERS_PER_THREAD+TEST_CARRIERS_OFFSET); i++) { + int d; + if (i < TEST_NO_CARRIERS_PER_THREAD) { + CPOOL_INSERT(alloc, crr[i]); + if ((i & 0x7) == 0) + FATAL_ASSERT(CPOOL_IS_IN_POOL(alloc, crr[i])); + } + d = i-TEST_CARRIERS_OFFSET; + if (d >= 0) { + CPOOL_DELETE(alloc, crr[d]); + if ((d & 0x7) == 0) + FATAL_ASSERT(!CPOOL_IS_IN_POOL(alloc, crr[d])); + } + } + for (i = 0; i < TEST_NO_CARRIERS_PER_THREAD; i++) + FATAL_ASSERT(!CPOOL_IS_IN_POOL(alloc, crr[i])); + return NULL; +} + +static struct { + erts_thread tid; + Carrier_t *crr[TEST_NO_CARRIERS_PER_THREAD]; +} threads[TEST_NO_THREADS] = {{0}}; + +void +testcase_run(TestCaseState_t *tcs) +{ + int no_threads, t, c; + char *block, *p; + Ulong zcrr_sz; + + if (!IS_SMP_ENABLED) + testcase_skipped(tcs, "No SMP support"); + + alloc = START_ALC("Zero carrier allocator", 1, NULL); + + zcrr_sz = ZERO_CRR_SIZE; + + block = p = ALLOC(alloc, zcrr_sz*TEST_NO_THREADS*TEST_NO_CARRIERS_PER_THREAD); + + ASSERT(tcs, block != NULL); + + for (t = 0; t < TEST_NO_THREADS; t++) { + for (c = 0; c < TEST_NO_CARRIERS_PER_THREAD; c++) { + Carrier_t *crr = (Carrier_t *) p; + p += zcrr_sz; + ZERO_CRR_INIT(alloc, crr); + threads[t].crr[c] = crr; + } + } + + no_threads = 0; + for (t = 0; t < TEST_NO_THREADS; t++) { + threads[t].tid = THR_CREATE(thread_func, (void *) threads[t].crr); + if (threads[t].tid) { + testcase_printf(tcs, "Successfully created thread %d\n", t); + no_threads++; + } + else { + testcase_printf(tcs, "Failed to create thread %d\n", t); + break; + } + } + + for (t = 0; t < no_threads; t++) + THR_JOIN(threads[t].tid); + + FATAL_ASSERT(CPOOL_IS_EMPTY(alloc)); + + FREE(alloc, block); + + ASSERT(tcs, no_threads == TEST_NO_THREADS); +} diff --git a/erts/emulator/test/alloc_SUITE_data/rbtree.c b/erts/emulator/test/alloc_SUITE_data/rbtree.c index 4e7f821baf..702f075304 100644 --- a/erts/emulator/test/alloc_SUITE_data/rbtree.c +++ b/erts/emulator/test/alloc_SUITE_data/rbtree.c @@ -85,7 +85,7 @@ print_tree(TestCaseState_t *tcs, RBT_t *root) static RBT_t * check_tree(TestCaseState_t *tcs, Allctr_t *alc, Ulong size) { - enum { BF, AOBF, AOFF }type; + enum { BF, AOBF, AOFF, AOFFCAOBF }type; int i, max_i; char stk[128]; RBT_t *root, *x, *y, *res; @@ -94,11 +94,16 @@ check_tree(TestCaseState_t *tcs, Allctr_t *alc, Ulong size) res = NULL; - if (IS_AOBF(alc)) type = AOBF; - else if (IS_AOFF(alc)) type = AOFF; - else type = BF; + if (IS_BF_ALGO(alc)) { + if (IS_AOBF(alc)) type = AOBF; + else type = BF; + } + else { /* AOFF_ALGO */ + if (IS_CBF(alc)) type = AOFFCAOBF; + else type = AOFF; + } - root = RBT_ROOT(alc); + root = RBT_ROOT(alc, size); #ifdef PRINT_TREE print_tree(tcs, root); @@ -188,6 +193,15 @@ check_tree(TestCaseState_t *tcs, Allctr_t *alc, Ulong size) ASSERT(tcs, y < x); ASSERT(tcs, RBT_MAX_SZ(y) <= RBT_MAX_SZ(x)); break; + case AOFFCAOBF: + { + void* x_crr = BLK_TO_MBC(x); + void* y_crr = BLK_TO_MBC(y); + ASSERT(tcs, (y < x && (x_crr != y_crr || x_sz == y_sz)) + || (y_sz < x_sz && x_crr == y_crr)); + ASSERT(tcs, RBT_MAX_SZ(y) <= RBT_MAX_SZ(x)); + break; + } } } @@ -207,6 +221,15 @@ check_tree(TestCaseState_t *tcs, Allctr_t *alc, Ulong size) ASSERT(tcs, y > x); ASSERT(tcs, RBT_MAX_SZ(y) <= RBT_MAX_SZ(x)); break; + case AOFFCAOBF: + { + void* x_crr = BLK_TO_MBC(x); + void* y_crr = BLK_TO_MBC(y); + ASSERT(tcs, (y > x && (x_crr != y_crr || x_sz == y_sz)) + || (y_sz > x_sz && x_crr == y_crr)); + ASSERT(tcs, RBT_MAX_SZ(y) <= RBT_MAX_SZ(x)); + break; + } } } @@ -239,7 +262,18 @@ check_tree(TestCaseState_t *tcs, Allctr_t *alc, Ulong size) res = x; } break; + case AOFFCAOBF: + if (BLK_TO_MBC(x) != BLK_TO_MBC(res) || x_sz == y_sz) { + if (x < res) { + res = x; + } + } + else if (x_sz < y_sz) { + res = x; + } + break; } + } } @@ -263,17 +297,20 @@ check_tree(TestCaseState_t *tcs, Allctr_t *alc, Ulong size) } static void -do_check(TestCaseState_t *tcs, Allctr_t *a, Ulong size) +do_check(TestCaseState_t *tcs, Allctr_t *a, Ulong size, int ignore_null) { Ulong sz = ((size + 7) / 8)*8; void *tmp; Block_t *x, *y; x = (Block_t *) check_tree(tcs, a, sz); + if (!x && ignore_null) + return; + tmp = ALLOC(a, sz - ABLK_HDR_SZ); ASSERT(tcs, tmp); y = UMEM2BLK(tmp); - if (IS_AOBF(a)) { + if (!(IS_BF_ALGO(a) && !IS_AOBF(a))) { ASSERT(tcs, x == y); } else { @@ -306,7 +343,7 @@ test_it(TestCaseState_t *tcs) blk[i] = NULL; } if (i % (NO_BLOCKS/2) == 0) - do_check(tcs, a, 50); + do_check(tcs, a, 50, 0); } for (i = 0; i < NO_BLOCKS; i++) { @@ -315,7 +352,7 @@ test_it(TestCaseState_t *tcs) blk[i] = NULL; } if (i % (NO_BLOCKS/2) == 0) - do_check(tcs, a, 200); + do_check(tcs, a, 200, 0); } for (i = 0; i < NO_BLOCKS; i++) { @@ -324,20 +361,101 @@ test_it(TestCaseState_t *tcs) blk[i] = NULL; } if (i % (NO_BLOCKS/2) == 0) - do_check(tcs, a, 100); + do_check(tcs, a, 100, 0); } - do_check(tcs, a, 250); + do_check(tcs, a, 250, 0); for (i = 0; i < NO_BLOCKS; i++) { FREE(a, fence[i]); if (i % (NO_BLOCKS/3) == 0) - do_check(tcs, a, 300); + do_check(tcs, a, 300, 0); + } + + ASSERT(tcs, RBT_ROOT(a,0)); + ASSERT(tcs, !RBT_LEFT(RBT_ROOT(a,0))); + ASSERT(tcs, !RBT_RIGHT(RBT_ROOT(a,0))); +} + + +static int is_single_ablk_in_mbc(Allctr_t* a, void* ptr, void* crr) +{ + Block_t* blk = UMEM2BLK(ptr); + if (crr == BLK_TO_MBC(blk)) { + Block_t* first = MBC_TO_FIRST_BLK(a, crr); + if (blk == first || (IS_FREE_BLK(first) && blk == NXT_BLK(first))) { + Block_t* nxt; + if (IS_LAST_BLK(blk)) { + return 1; + } + nxt = NXT_BLK(blk); + return IS_FREE_BLK(nxt) && IS_LAST_BLK(nxt); + } + } + return 0; +} + +static void +test_carrier_migration(TestCaseState_t *tcs) +{ + int i, j; + Allctr_t* a = ((rbtree_test_data *) tcs->extra)->allocator; + void **blk = ((rbtree_test_data *) tcs->extra)->blk; + void **fence = ((rbtree_test_data *) tcs->extra)->fence; + void *crr, *p, *free_crr; + Ulong min_blk_sz; + + min_blk_sz = MIN_BLK_SZ(a); + + for (i = 0; i < NO_BLOCKS; i++) { + blk[i] = ALLOC(a, min_blk_sz + i % 500); + fence[i] = ALLOC(a, 1); + ASSERT(tcs, blk[i] && fence[i]); + } + + for (j = 0; j < NO_BLOCKS; j += 997) { + crr = BLK_TO_MBC(UMEM2BLK(blk[j])); + REMOVE_MBC(a, crr); + + for (i = 0; i < NO_BLOCKS; i++) { + if (i % 3 == 0) { + if (is_single_ablk_in_mbc(a, blk[i], crr)) { + crr = NULL; /* about to destroy the removed mbc */ + } + FREE(a, blk[i]); + blk[i] = NULL; + } + if (i % (NO_BLOCKS/2) == 0) + do_check(tcs, a, 50, 1); + } + + for (i = 0; i < NO_BLOCKS; i++) { + if (i % 3 == 0) { + ASSERT(tcs, !blk[i]); + blk[i] = ALLOC(a, min_blk_sz + i % 500); + ASSERT(tcs, BLK_TO_MBC(UMEM2BLK(blk[i])) != crr); + } + if (i % (NO_BLOCKS/2) == 0) + do_check(tcs, a, 50, 1); + } + if (crr) { + ADD_MBC(a, crr); + } + } + + for (crr = FIRST_MBC(a); crr; crr = NEXT_C(crr)) { + REMOVE_MBC(a, crr); } - ASSERT(tcs, RBT_ROOT(a)); - ASSERT(tcs, !RBT_LEFT(RBT_ROOT(a))); - ASSERT(tcs, !RBT_RIGHT(RBT_ROOT(a))); + p = ALLOC(a, 1); + free_crr = BLK_TO_MBC(UMEM2BLK(p)); + FREE(a, p); + + for (crr = FIRST_MBC(a); crr; crr = NEXT_C(crr)) { + ASSERT(tcs, free_crr != crr); + } + + ASSERT(tcs, !RBT_ROOT(a,0)); } @@ -369,6 +487,7 @@ testcase_run(TestCaseState_t *tcs) char *argv1[] = {"-tasbf", NULL}; char *argv2[] = {"-tasaobf", NULL}; char *argv3[] = {"-tasaoff", NULL}; + char *argv4[] = {"-tasaoffcaobf", NULL}; Allctr_t *a; rbtree_test_data *td; @@ -390,6 +509,7 @@ testcase_run(TestCaseState_t *tcs) td->allocator = a = START_ALC("rbtree_bf_", 0, argv1); ASSERT(tcs, a); + ASSERT(tcs, IS_BF_ALGO(a)); ASSERT(tcs, !IS_AOBF(a)); test_it(tcs); @@ -407,6 +527,7 @@ testcase_run(TestCaseState_t *tcs) td->allocator = a = START_ALC("rbtree_aobf_", 0, argv2); ASSERT(tcs, a); + ASSERT(tcs, IS_BF_ALGO(a)); ASSERT(tcs, IS_AOBF(a)); test_it(tcs); @@ -424,11 +545,34 @@ testcase_run(TestCaseState_t *tcs) td->allocator = a = START_ALC("rbtree_aoff_", 0, argv3); ASSERT(tcs, a); + ASSERT(tcs, !IS_BF_ALGO(a)); + ASSERT(tcs, !IS_CBF(a)); test_it(tcs); + test_carrier_migration(tcs); STOP_ALC(a); td->allocator = NULL; testcase_printf(tcs, "Address order first fit test succeeded!\n"); + + /* Address order first fit, best fit within carrier */ + + testcase_printf(tcs, "Starting test of aoffcaobf...\n"); + + current_rbt_type_op_base = AO_FIRSTFIT_OP_BASE; + td->allocator = a = START_ALC("rbtree_aoffcaobf_", 0, argv4); + + ASSERT(tcs, a); + ASSERT(tcs, !IS_BF_ALGO(a)); + ASSERT(tcs, IS_CBF(a)); + + test_it(tcs); + test_carrier_migration(tcs); + + STOP_ALC(a); + td->allocator = NULL; + + testcase_printf(tcs, "aoffcaobf test succeeded!\n"); + } diff --git a/erts/emulator/test/driver_SUITE.erl b/erts/emulator/test/driver_SUITE.erl index dfba7d098f..104bdf8aec 100644 --- a/erts/emulator/test/driver_SUITE.erl +++ b/erts/emulator/test/driver_SUITE.erl @@ -2582,10 +2582,9 @@ driver_alloc_size() -> MemInfo -> CS = lists:foldl( fun ({instance, _, L}, Acc) -> - {value,{_,SBMBCS}} = lists:keysearch(sbmbcs, 1, L), {value,{_,MBCS}} = lists:keysearch(mbcs, 1, L), {value,{_,SBCS}} = lists:keysearch(sbcs, 1, L), - [SBMBCS,MBCS,SBCS | Acc] + [MBCS,SBCS | Acc] end, [], MemInfo), diff --git a/erts/emulator/test/nif_SUITE.erl b/erts/emulator/test/nif_SUITE.erl index dcf58fa474..9a70e8646a 100644 --- a/erts/emulator/test/nif_SUITE.erl +++ b/erts/emulator/test/nif_SUITE.erl @@ -1368,10 +1368,9 @@ tmpmem() -> MemInfo -> MSBCS = lists:foldl( fun ({instance, _, L}, Acc) -> - {value,{_,SBMBCS}} = lists:keysearch(sbmbcs, 1, L), {value,{_,MBCS}} = lists:keysearch(mbcs, 1, L), {value,{_,SBCS}} = lists:keysearch(sbcs, 1, L), - [SBMBCS,MBCS,SBCS | Acc] + [MBCS,SBCS | Acc] end, [], MemInfo), diff --git a/erts/emulator/test/port_SUITE.erl b/erts/emulator/test/port_SUITE.erl index 13aa0f4c00..e467e844b3 100644 --- a/erts/emulator/test/port_SUITE.erl +++ b/erts/emulator/test/port_SUITE.erl @@ -92,7 +92,7 @@ spawn_driver/1, spawn_executable/1, close_deaf_port/1, unregister_name/1, parallelism_option/1]). --export([]). +-export([do_iter_max_ports/2]). %% Internal exports. -export([tps/3]). @@ -635,9 +635,16 @@ iter_max_ports_test(Config) -> {win32,_} -> 4; _ -> 10 end, - L = do_iter_max_ports(Iters, Command), + %% Run on a different node in order to limit the effect if this test fails. + Dir = filename:dirname(code:which(?MODULE)), + {ok,Node} = test_server:start_node(test_iter_max_socks,slave, + [{args,"+Q 2048 -pa " ++ Dir}]), + L = rpc:call(Node,?MODULE,do_iter_max_ports,[Iters, Command]), + test_server:stop_node(Node), + io:format("Result: ~p",[L]), all_equal(L), + all_equal(L), test_server:timetrap_cancel(Dog), {comment, "Max ports: " ++ integer_to_list(hd(L))}. @@ -670,7 +677,7 @@ close_ports([]) -> ok. open_ports(Name, Settings) -> - test_server:sleep(50), + test_server:sleep(5), case catch open_port(Name, Settings) of P when is_port(P) -> [P| open_ports(Name, Settings)]; diff --git a/erts/emulator/test/send_term_SUITE.erl b/erts/emulator/test/send_term_SUITE.erl index ba0ba804ca..6615873392 100644 --- a/erts/emulator/test/send_term_SUITE.erl +++ b/erts/emulator/test/send_term_SUITE.erl @@ -175,10 +175,6 @@ chk_temp_alloc() -> %% Verify that we havn't got anything allocated by temp_alloc lists:foreach( fun ({instance, _, TI}) -> - ?line {value, {sbmbcs, SBMBCInfo}} - = lists:keysearch(sbmbcs, 1, TI), - ?line {value, {blocks, 0, _, _}} - = lists:keysearch(blocks, 1, SBMBCInfo), ?line {value, {mbcs, MBCInfo}} = lists:keysearch(mbcs, 1, TI), ?line {value, {blocks, 0, _, _}} |