diff options
Diffstat (limited to 'erts/emulator')
68 files changed, 4259 insertions, 1238 deletions
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in index 9751982103..f442540f49 100644 --- a/erts/emulator/Makefile.in +++ b/erts/emulator/Makefile.in @@ -808,6 +808,7 @@ OS_OBJS += $(OBJDIR)/erl_poll.o \ endif OS_OBJS += $(OBJDIR)/erl_mseg.o \ + $(OBJDIR)/erl_mmap.o \ $(OBJDIR)/erl_$(ERLANG_OSTYPE)_sys_ddll.o \ $(OBJDIR)/erl_mtrace_sys_wrap.o \ $(OBJDIR)/erl_sys_common_misc.o diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c index da36c4437e..78ab6fa30f 100644 --- a/erts/emulator/beam/beam_emu.c +++ b/erts/emulator/beam/beam_emu.c @@ -5654,7 +5654,6 @@ build_stacktrace(Process* c_p, Eterm exc) { return res; } - static BeamInstr* call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func) { @@ -5702,7 +5701,6 @@ call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func) return ep->addressv[erts_active_code_ix()]; } - static Export* apply_setup_error_handler(Process* p, Eterm module, Eterm function, Uint arity, Eterm* reg) { @@ -6208,7 +6206,6 @@ new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free) return make_fun(funp); } - int catchlevel(Process *p) { diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c index 4193eb4f3f..938fd8f2c9 100644 --- a/erts/emulator/beam/beam_load.c +++ b/erts/emulator/beam/beam_load.c @@ -535,7 +535,6 @@ static int must_swap_floats; Uint erts_total_code_size; /**********************************************************************/ - void init_load(void) { FloatDef f; @@ -1209,7 +1208,6 @@ verify_chunks(LoaderState* stp) return 0; } - static int load_atom_table(LoaderState* stp) { @@ -1255,7 +1253,6 @@ load_atom_table(LoaderState* stp) return 0; } - static int load_import_table(LoaderState* stp) { @@ -1308,7 +1305,6 @@ load_import_table(LoaderState* stp) return 0; } - static int read_export_table(LoaderState* stp) { @@ -1641,7 +1637,6 @@ read_line_table(LoaderState* stp) return 0; } - static int read_code_header(LoaderState* stp) { @@ -1711,7 +1706,6 @@ read_code_header(LoaderState* stp) return 0; } - #define VerifyTag(Stp, Actual, Expected) \ if (Actual != Expected) { \ LoadError2(Stp, "bad tag %d; expected %d", Actual, Expected); \ @@ -1730,7 +1724,6 @@ read_code_header(LoaderState* stp) #define TermWords(t) (((t) / (sizeof(BeamInstr)/sizeof(Eterm))) + !!((t) % (sizeof(BeamInstr)/sizeof(Eterm)))) - static int load_code(LoaderState* stp) { @@ -2512,7 +2505,6 @@ load_code(LoaderState* stp) return retval; } - #define succ(St, X, Y) ((X).type == (Y).type && (X).val + 1 == (Y).val) #define succ2(St, X, Y) ((X).type == (Y).type && (X).val + 2 == (Y).val) #define succ3(St, X, Y) ((X).type == (Y).type && (X).val + 3 == (Y).val) @@ -3958,7 +3950,6 @@ tuple_append_put(LoaderState* stp, GenOpArg Arity, GenOpArg Dst, } - /* * Freeze the code in memory, move the string table into place, * resolve all labels. @@ -4276,7 +4267,6 @@ freeze_code(LoaderState* stp) return 0; } - static void final_touch(LoaderState* stp) { @@ -4378,7 +4368,6 @@ final_touch(LoaderState* stp) } } - static int transform_engine(LoaderState* st) { @@ -4716,7 +4705,6 @@ transform_engine(LoaderState* st) return rval; } - static void short_file(int line, LoaderState* stp, unsigned needed) { @@ -4724,7 +4712,6 @@ short_file(int line, LoaderState* stp, unsigned needed) stp->file_name, needed); } - static void load_printf(int line, LoaderState* context, char *fmt,...) { @@ -5190,7 +5177,6 @@ native_addresses(Process* p, Eterm mod) return result; } - /* * Builds a list of all exported functions in the given module: * [{Name, Arity},...] @@ -5240,7 +5226,6 @@ exported_from_module(Process* p, /* Process whose heap to use. */ return result; } - /* * Returns a list of all attributes for the module. * @@ -5281,7 +5266,6 @@ attributes_for_module(Process* p, /* Process whose heap to use. */ return result; } - /* * Returns a list containing compilation information. * diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index 58bd6aac0b..96666d98ed 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -160,7 +160,10 @@ BIF_RETTYPE link_1(BIF_ALIST_1) if (is_internal_port(BIF_ARG_1)) { int send_link_signal = 0; - Port *prt = erts_port_lookup(BIF_ARG_1, ERTS_PORT_SFLGS_INVALID_LOOKUP); + Port *prt = erts_port_lookup(BIF_ARG_1, + (erts_port_synchronous_ops + ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP + : ERTS_PORT_SFLGS_INVALID_LOOKUP)); if (!prt) { goto res_no_proc; } @@ -1363,11 +1366,22 @@ BIF_RETTYPE exit_2(BIF_ALIST_2) */ if (is_internal_port(BIF_ARG_1)) { - Port *prt = erts_port_lookup(BIF_ARG_1, ERTS_PORT_SFLGS_INVALID_LOOKUP); + Eterm ref, *refp; + Uint32 invalid_flags; + Port *prt; + + if (erts_port_synchronous_ops) { + refp = &ref; + invalid_flags = ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP; + } + else { + refp = NULL; + invalid_flags = ERTS_PORT_SFLGS_INVALID_LOOKUP; + } + + prt = erts_port_lookup(BIF_ARG_1, invalid_flags); if (prt) { - Eterm ref; - Eterm *refp = erts_port_synchronous_ops ? &ref : NULL; ErtsPortOpResult res; #ifdef DEBUG @@ -1875,7 +1889,10 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) { if (rp) goto send_message; - pt = erts_port_lookup(id, ERTS_PORT_SFLGS_INVALID_LOOKUP); + pt = erts_port_lookup(id, + (erts_port_synchronous_ops + ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP + : ERTS_PORT_SFLGS_INVALID_LOOKUP)); if (pt) { portid = id; goto port_common; @@ -1905,7 +1922,10 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) { int ret_val; portid = to; - pt = erts_port_lookup(portid, ERTS_PORT_SFLGS_INVALID_LOOKUP); + pt = erts_port_lookup(portid, + (erts_port_synchronous_ops + ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP + : ERTS_PORT_SFLGS_INVALID_LOOKUP)); port_common: ret_val = 0; @@ -1994,7 +2014,10 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) { rp = erts_proc_lookup_raw(id); if (rp) goto send_message; - pt = erts_port_lookup(id, ERTS_PORT_SFLGS_INVALID_LOOKUP); + pt = erts_port_lookup(id, + (erts_port_synchronous_ops + ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP + : ERTS_PORT_SFLGS_INVALID_LOOKUP)); if (pt) { portid = id; goto port_common; diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab index 9f3de5f780..6c5fd17902 100644 --- a/erts/emulator/beam/bif.tab +++ b/erts/emulator/beam/bif.tab @@ -570,6 +570,7 @@ bif erlang:float_to_binary/2 bif erlang:binary_to_float/1 bif io:printable_range/0 +bif os:unsetenv/1 # # Obsolete diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c index 6b43c53985..2b27b111d8 100644 --- a/erts/emulator/beam/big.c +++ b/erts/emulator/beam/big.c @@ -1325,9 +1325,9 @@ static dsize_t I_lshift(ErtsDigit* x, dsize_t xl, Sint y, return 1; } else { - SWord ay = (y < 0) ? -y : y; - int bw = ay / D_EXP; - int sw = ay % D_EXP; + Uint ay = (y < 0) ? -y : y; + Uint bw = ay / D_EXP; + Uint sw = ay % D_EXP; dsize_t rl; ErtsDigit a1=0; ErtsDigit a0=0; @@ -1368,7 +1368,7 @@ static dsize_t I_lshift(ErtsDigit* x, dsize_t xl, Sint y, } if (sign) { - int zl = bw; + Uint zl = bw; ErtsDigit* z = x; while(zl--) { diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c index acbd700cc4..7d4f52ee23 100644 --- a/erts/emulator/beam/break.c +++ b/erts/emulator/beam/break.c @@ -76,7 +76,10 @@ process_info(int to, void *to_arg) for (i = 0; i < max; i++) { Process *p = erts_pix2proc(i); if (p && p->i != ENULL) { - if (!ERTS_PROC_IS_EXITING(p)) + /* Do not include processes with no heap, + * they are most likely just created and has invalid data + */ + if (!ERTS_PROC_IS_EXITING(p) && p->heap != NULL) print_process_info(to, to_arg, p); } } @@ -756,7 +759,7 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) return; /* Can't create the crash dump, skip it */ time(&now); - erts_fdprintf(fd, "=erl_crash_dump:0.2\n%s", ctime(&now)); + erts_fdprintf(fd, "=erl_crash_dump:0.3\n%s", ctime(&now)); if (file != NULL) erts_fdprintf(fd, "The error occurred in file %s, line %d\n", file, line); diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c index 44f4eb9d43..6ecf3f0722 100644 --- a/erts/emulator/beam/dist.c +++ b/erts/emulator/beam/dist.c @@ -353,7 +353,7 @@ static void doit_link_net_exits_sub(ErtsLink *sublnk, void *vlnecp) static void doit_link_net_exits(ErtsLink *lnk, void *vnecp) { LinkNetExitsContext lnec = {(NetExitsContext *) vnecp, lnk}; - ASSERT(lnk->type == LINK_PID) + ASSERT(lnk->type == LINK_PID); erts_sweep_links(ERTS_LINK_ROOT(lnk), &doit_link_net_exits_sub, (void *) &lnec); #ifdef DEBUG ERTS_LINK_ROOT(lnk) = NULL; @@ -369,7 +369,7 @@ static void doit_node_link_net_exits(ErtsLink *lnk, void *vnecp) Process *rp; ErtsLink *rlnk; Uint i,n; - ASSERT(lnk->type == LINK_NODE) + ASSERT(lnk->type == LINK_NODE); if (is_internal_pid(lnk->pid)) { ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK; rp = erts_pid2proc(NULL, 0, lnk->pid, rp_locks); @@ -1509,12 +1509,12 @@ int erts_net_message(Port *prt, break; } rp = erts_pid2proc(NULL, 0, mon->pid, rp_locks); + + erts_destroy_monitor(mon); if (rp == NULL) { break; } - erts_destroy_monitor(mon); - mon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref); if (mon == NULL) { diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c index 5eacff8829..b5ba9bb94a 100644 --- a/erts/emulator/beam/erl_alloc.c +++ b/erts/emulator/beam/erl_alloc.c @@ -100,6 +100,8 @@ static Uint install_debug_functions(void); #endif #endif +static int lock_all_physical_memory = 0; + ErtsAllocatorFunctions_t erts_allctrs[ERTS_ALC_A_MAX+1]; ErtsAllocatorInfo_t erts_allctrs_info[ERTS_ALC_A_MAX+1]; ErtsAllocatorThrSpec_t erts_allctr_thr_spec[ERTS_ALC_A_MAX+1]; @@ -267,7 +269,6 @@ set_default_sl_alloc_opts(struct au_init *ip) ip->atype = GOODFIT; #endif ip->init.util.name_prefix = "sl_"; - ip->init.util.mmmbc = 5; ip->init.util.alloc_no = ERTS_ALC_A_SHORT_LIVED; #ifndef SMALL_MEMORY ip->init.util.mmbcs = 128*1024; /* Main carrier size */ @@ -295,7 +296,6 @@ set_default_std_alloc_opts(struct au_init *ip) ip->atype = BESTFIT; #endif ip->init.util.name_prefix = "std_"; - ip->init.util.mmmbc = 5; ip->init.util.alloc_no = ERTS_ALC_A_STANDARD; #ifndef SMALL_MEMORY ip->init.util.mmbcs = 128*1024; /* Main carrier size */ @@ -319,7 +319,6 @@ set_default_ll_alloc_opts(struct au_init *ip) #endif ip->init.util.ramv = 0; ip->init.util.mmsbc = 0; - ip->init.util.mmmbc = 0; ip->init.util.sbct = ~((UWord) 0); ip->init.util.name_prefix = "ll_"; ip->init.util.alloc_no = ERTS_ALC_A_LONG_LIVED; @@ -370,7 +369,6 @@ set_default_eheap_alloc_opts(struct au_init *ip) ip->thr_spec = 1; ip->atype = GOODFIT; #endif - ip->init.util.mmmbc = 100; ip->init.util.name_prefix = "eheap_"; ip->init.util.alloc_no = ERTS_ALC_A_EHEAP; #ifndef SMALL_MEMORY @@ -397,7 +395,6 @@ set_default_binary_alloc_opts(struct au_init *ip) ip->thr_spec = 1; ip->atype = BESTFIT; #endif - ip->init.util.mmmbc = 50; ip->init.util.name_prefix = "binary_"; ip->init.util.alloc_no = ERTS_ALC_A_BINARY; #ifndef SMALL_MEMORY @@ -419,7 +416,6 @@ set_default_ets_alloc_opts(struct au_init *ip) ip->thr_spec = 1; ip->atype = BESTFIT; #endif - ip->init.util.mmmbc = 100; ip->init.util.name_prefix = "ets_"; ip->init.util.alloc_no = ERTS_ALC_A_ETS; #ifndef SMALL_MEMORY @@ -495,13 +491,6 @@ adjust_tpref(struct au_init *ip, int no_sched) /* ... shrink smallest multi-block carrier size */ if (ip->default_.smbcs) ip->init.util.smbcs /= ERTS_MIN(4, no_sched); - /* ... and more than three allocators shrink - max mseg multi-block carriers */ - if (ip->default_.mmmbc && no_sched > 2) { - ip->init.util.mmmbc /= ERTS_MIN(4, no_sched - 1); - if (ip->init.util.mmmbc < 3) - ip->init.util.mmmbc = 3; - } } } @@ -631,6 +620,8 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) hdbg_init(); #endif + lock_all_physical_memory = 0; + ncpu = eaiop->ncpu; if (ncpu < 1) ncpu = 1; @@ -654,6 +645,20 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) if (argc && argv) handle_args(argc, argv, &init); + if (lock_all_physical_memory) { +#ifdef HAVE_MLOCKALL + errno = 0; + if (mlockall(MCL_CURRENT|MCL_FUTURE) != 0) { + int err = errno; + char *errstr = err ? strerror(err) : "unknown"; + erl_exit(-1, "Failed to lock physical memory: %s (%d)\n", + errstr, err); + } +#else + erl_exit(-1, "Failed to lock physical memory: Not supported\n"); +#endif + } + #ifndef ERTS_SMP init.sl_alloc.thr_spec = 0; init.std_alloc.thr_spec = 0; @@ -731,6 +736,7 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) init.mseg.nos = erts_no_schedulers; erts_mseg_init(&init.mseg); #endif + erts_alcu_init(&init.alloc_util); erts_afalc_init(); erts_bfalc_init(); @@ -1187,6 +1193,25 @@ get_kb_value(char *param_end, char** argv, int* ip) return ((Uint) tmp)*1024; } +static UWord +get_mb_value(char *param_end, char** argv, int* ip) +{ + SWord tmp; + UWord max = ((~((UWord) 0))/(1024*1024)) + 1; + char *rest; + char *param = argv[*ip]+1; + char *value = get_value(param_end, argv, ip); + errno = 0; + tmp = (SWord) ErtsStrToSint(value, &rest, 10); + if (errno != 0 || rest == value || tmp < 0 || max < ((UWord) tmp)) + bad_value(param, param_end, value); + if (max == (UWord) tmp) + return ~((UWord) 0); + else + return ((UWord) tmp)*1024*1024; +} + + #if 0 static Uint get_byte_value(char *param_end, char** argv, int* ip) @@ -1461,6 +1486,30 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init) #endif get_amount_value(argv[i]+6, argv, &i); } + else if (has_prefix("scs", argv[i]+3)) { +#if HAVE_ERTS_MSEG + init->mseg.mmap.scs = +#endif + get_mb_value(argv[i]+6, argv, &i); + } + else if (has_prefix("sco", argv[i]+3)) { +#if HAVE_ERTS_MSEG + init->mseg.mmap.sco = +#endif + get_bool_value(argv[i]+6, argv, &i); + } + else if (has_prefix("scrpm", argv[i]+3)) { +#if HAVE_ERTS_MSEG + init->mseg.mmap.scrpm = +#endif + get_bool_value(argv[i]+8, argv, &i); + } + else if (has_prefix("scrfsd", argv[i]+3)) { +#if HAVE_ERTS_MSEG + init->mseg.mmap.scrfsd = +#endif + get_amount_value(argv[i]+9, argv, &i); + } else { bad_param(param, param+2); } @@ -1560,7 +1609,6 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init) aui[a]->thr_spec = 0; check_disable_carrier_migration(aui[a]); aui[a]->init.util.ramv = 0; - aui[a]->init.util.mmmbc = 10; aui[a]->init.util.lmbcs = 5*1024*1024; } } @@ -1600,6 +1648,19 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init) bad_param(param, param+2); } break; + case 'l': + if (has_prefix("pm", param+2)) { + arg = get_value(argv[i]+5, argv, &i); + if (strcmp("all", arg) == 0) + lock_all_physical_memory = 1; + else if (strcmp("no", arg) == 0) + lock_all_physical_memory = 0; + else + bad_value(param, param+4, arg); + break; + } + bad_param(param, param+2); + break; case 'u': if (has_prefix("ycs", argv[i]+3)) { init->alloc_util.ycs @@ -1609,6 +1670,10 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init) init->alloc_util.mmc = get_amount_value(argv[i]+6, argv, &i); } + else if (has_prefix("sac", argv[i]+3)) { + init->alloc_util.sac + = get_bool_value(argv[i]+6, argv, &i); + } else { int a; int start = i; @@ -2682,6 +2747,7 @@ erts_allocator_info(int to, void *arg) #if HAVE_ERTS_MSEG { + struct erts_mmap_info_struct emis; #ifdef ERTS_SMP int max = (int) erts_no_schedulers; #else @@ -2692,6 +2758,8 @@ erts_allocator_info(int to, void *arg) erts_print(to, arg, "=allocator:mseg_alloc[%d]\n", i); erts_mseg_info(i, &to, arg, 0, NULL, NULL); } + erts_print(to, arg, "=allocator:mseg_alloc.erts_mmap\n"); + erts_mmap_info(&to, arg, NULL, NULL, &emis); } #endif @@ -2716,8 +2784,8 @@ erts_allocator_options(void *proc) #endif Uint sz, *szp, *hp, **hpp; Eterm res, features, settings; - Eterm atoms[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+5]; - Uint terms[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+5]; + Eterm atoms[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+7]; + Uint terms[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+7]; int a, length; SysAllocStat sas; Uint *endp = NULL; @@ -2815,6 +2883,11 @@ erts_allocator_options(void *proc) terms[length++] = erts_bld_2tup_list(hpp, szp, 3, o, v); } + atoms[length] = am_atom_put("lock_physical_memory", 20); + terms[length++] = (lock_all_physical_memory + ? am_atom_put("all", 3) + : am_atom_put("no", 2)); + settings = erts_bld_2tup_list(hpp, szp, length, atoms, terms); length = 0; @@ -2830,6 +2903,9 @@ erts_allocator_options(void *proc) if (use_mseg) terms[length++] = am_atom_put("mseg_alloc", 10); #endif +#if ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC + terms[length++] = am_atom_put("sys_aligned_alloc", 17); +#endif features = length ? erts_bld_list(hpp, szp, length, terms) : NIL; @@ -2915,6 +2991,7 @@ reply_alloc_info(void *vair) Uint sz, *szp; ErlOffHeap *ohp = NULL; ErlHeapFragment *bp = NULL; + struct erts_mmap_info_struct emis; int i; Eterm (*info_func)(Allctr_t *, int, @@ -3027,15 +3104,23 @@ reply_alloc_info(void *vair) ? NIL : erts_mseg_info(0, NULL, NULL, hpp != NULL, hpp, szp)); - ainfo = erts_bld_tuple(hpp, szp, 3, - alloc_atom, - make_small(0), - ainfo); + ainfo = erts_bld_tuple3(hpp, szp, + alloc_atom, + make_small(0), + ainfo); + + ai_list = erts_bld_cons(hpp, szp, + ainfo, ai_list); + ainfo = (air->only_sz ? NIL : erts_mmap_info(NULL, NULL, hpp, szp, &emis)); + ainfo = erts_bld_tuple3(hpp, szp, + alloc_atom, + erts_bld_atom(hpp,szp,"erts_mmap"), + ainfo); #else - ainfo = erts_bld_tuple(hpp, szp, 2, alloc_atom, - am_false); + ainfo = erts_bld_tuple2(hpp, szp, alloc_atom, + am_false); #endif - break; + break; default: alloc_atom = erts_bld_atom(hpp, szp, (char *) ERTS_ALC_A2AD(ai)); diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h index b5975c6c32..f83f6b39cf 100644 --- a/erts/emulator/beam/erl_alloc.h +++ b/erts/emulator/beam/erl_alloc.h @@ -54,6 +54,16 @@ void erts_sys_alloc_init(void); void *erts_sys_alloc(ErtsAlcType_t, void *, Uint); void *erts_sys_realloc(ErtsAlcType_t, void *, void *, Uint); void erts_sys_free(ErtsAlcType_t, void *, void *); +#if ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC +/* + * Note 'alignment' must remain the same in calls to + * 'erts_sys_aligned_realloc()' and 'erts_sys_aligned_free()' + * as in the initial call to 'erts_sys_aligned_alloc()'. + */ +void *erts_sys_aligned_alloc(UWord alignment, UWord size); +void *erts_sys_aligned_realloc(UWord alignment, void *ptr, UWord size, UWord old_size); +void erts_sys_aligned_free(UWord alignment, void *ptr); +#endif Eterm erts_memory(int *, void *, void *, Eterm); Eterm erts_allocated_areas(int *, void *, void *); diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c index e6d9f83aed..c6cea0185f 100644 --- a/erts/emulator/beam/erl_alloc_util.c +++ b/erts/emulator/beam/erl_alloc_util.c @@ -81,16 +81,12 @@ static int atoms_initialized = 0; static int initialized = 0; - #define INV_SYS_ALLOC_CARRIER_MASK ((UWord) (sys_alloc_carrier_size - 1)) #define SYS_ALLOC_CARRIER_MASK (~INV_SYS_ALLOC_CARRIER_MASK) #define SYS_ALLOC_CARRIER_FLOOR(X) ((X) & SYS_ALLOC_CARRIER_MASK) #define SYS_ALLOC_CARRIER_CEILING(X) \ SYS_ALLOC_CARRIER_FLOOR((X) + INV_SYS_ALLOC_CARRIER_MASK) -#undef ASSERT -#define ASSERT ASSERT_EXPR - #if 0 /* Can be useful for debugging */ #define MBC_REALLOC_ALWAYS_MOVES @@ -102,6 +98,7 @@ static Uint sys_alloc_carrier_size; #if HAVE_ERTS_MSEG static Uint max_mseg_carriers; #endif +static int allow_sys_alloc_carriers; #define ONE_GIGA (1000000000) @@ -194,9 +191,9 @@ MBC after deallocating first block: #if MBC_ABLK_OFFSET_BITS -# define MBC_SZ_MAX_LIMIT ((((UWord)1 << MBC_ABLK_OFFSET_BITS) - 1) << MSEG_ALIGN_BITS) +# define MBC_SZ_MAX_LIMIT ((((UWord)1 << MBC_ABLK_OFFSET_BITS) - 1) << ERTS_SUPER_ALIGN_BITS) -# define BLK_CARRIER_OFFSET(B, C) (((char*)(B) - (char*)(C)) >> MSEG_UNIT_SHIFT) +# define BLK_CARRIER_OFFSET(B, C) (((char*)(B) - (char*)(C)) >> ERTS_SACRR_UNIT_SHIFT) # define SET_MBC_ABLK_HDR(B, Sz, F, C) \ (ASSERT(((Sz) & ~MBC_ABLK_SZ_MASK) == 0), \ @@ -210,7 +207,7 @@ MBC after deallocating first block: (B)->u.carrier = (C)) # define IS_MBC_FIRST_ABLK(AP,B) \ - ((((UWord)(B) & ~MSEG_UNIT_MASK) == MBC_HEADER_SIZE(AP)) \ + ((((UWord)(B) & ~ERTS_SACRR_UNIT_MASK) == MBC_HEADER_SIZE(AP)) \ && ((B)->bhdr & MBC_ABLK_OFFSET_MASK) == 0) # define IS_MBC_FIRST_FBLK(AP,B) \ @@ -376,6 +373,8 @@ do { \ #define ERTS_CRR_ALCTR_FLG_IN_POOL (((erts_aint_t) 1) << 0) #define ERTS_CRR_ALCTR_FLG_BUSY (((erts_aint_t) 1) << 1) +#define ERTS_CRR_ALCTR_FLG_MASK (ERTS_CRR_ALCTR_FLG_IN_POOL | \ + ERTS_CRR_ALCTR_FLG_BUSY) #ifdef ERTS_SMP #define SBC_HEADER_SIZE \ @@ -760,8 +759,9 @@ static ERTS_INLINE void * alcu_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags) { void *res; - - res = erts_mseg_alloc_opt(allctr->alloc_no, size_p, flags, &allctr->mseg_opt); + UWord size = (UWord) *size_p; + res = erts_mseg_alloc_opt(allctr->alloc_no, &size, flags, &allctr->mseg_opt); + *size_p = (Uint) size; INC_CC(allctr->calls.mseg_alloc); return res; } @@ -770,9 +770,10 @@ static ERTS_INLINE void * alcu_mseg_realloc(Allctr_t *allctr, void *seg, Uint old_size, Uint *new_size_p) { void *res; - - res = erts_mseg_realloc_opt(allctr->alloc_no, seg, old_size, new_size_p, + UWord new_size = (UWord) *new_size_p; + res = erts_mseg_realloc_opt(allctr->alloc_no, seg, (UWord) old_size, &new_size, ERTS_MSEG_FLG_NONE, &allctr->mseg_opt); + *new_size_p = (Uint) new_size; INC_CC(allctr->calls.mseg_realloc); return res; } @@ -780,18 +781,22 @@ alcu_mseg_realloc(Allctr_t *allctr, void *seg, Uint old_size, Uint *new_size_p) static ERTS_INLINE void alcu_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size, Uint flags) { - erts_mseg_dealloc_opt(allctr->alloc_no, seg, size, flags, &allctr->mseg_opt); + erts_mseg_dealloc_opt(allctr->alloc_no, seg, (UWord) size, flags, &allctr->mseg_opt); INC_CC(allctr->calls.mseg_dealloc); } #endif static ERTS_INLINE void * -alcu_sys_alloc(Allctr_t *allctr, Uint size) +alcu_sys_alloc(Allctr_t *allctr, Uint size, int superalign) { void *res; - - res = erts_sys_alloc(0, NULL, size); +#if ERTS_SA_MB_CARRIERS && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC + if (superalign) + res = erts_sys_aligned_alloc(ERTS_SACRR_UNIT_SZ, size); + else +#endif + res = erts_sys_alloc(0, NULL, size); INC_CC(allctr->calls.sys_alloc); if (erts_mtrace_enabled) erts_mtrace_crr_alloc(res, allctr->alloc_no, ERTS_ALC_A_SYSTEM, size); @@ -799,11 +804,16 @@ alcu_sys_alloc(Allctr_t *allctr, Uint size) } static ERTS_INLINE void * -alcu_sys_realloc(Allctr_t *allctr, void *ptr, Uint size) +alcu_sys_realloc(Allctr_t *allctr, void *ptr, Uint size, Uint old_size, int superalign) { void *res; - res = erts_sys_realloc(0, NULL, ptr, size); +#if ERTS_SA_MB_CARRIERS && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC + if (superalign) + res = erts_sys_aligned_realloc(ERTS_SACRR_UNIT_SZ, ptr, size, old_size); + else +#endif + res = erts_sys_realloc(0, NULL, ptr, size); INC_CC(allctr->calls.sys_realloc); if (erts_mtrace_enabled) erts_mtrace_crr_realloc(res, @@ -815,9 +825,14 @@ alcu_sys_realloc(Allctr_t *allctr, void *ptr, Uint size) } static ERTS_INLINE void -alcu_sys_free(Allctr_t *allctr, void *ptr) +alcu_sys_free(Allctr_t *allctr, void *ptr, int superalign) { - erts_sys_free(0, NULL, ptr); +#if ERTS_SA_MB_CARRIERS && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC + if (superalign) + erts_sys_aligned_free(ERTS_SACRR_UNIT_SZ, ptr); + else +#endif + erts_sys_free(0, NULL, ptr); INC_CC(allctr->calls.sys_free); if (erts_mtrace_enabled) erts_mtrace_crr_free(allctr->alloc_no, ERTS_ALC_A_SYSTEM, ptr); @@ -1334,7 +1349,7 @@ erts_alcu_fix_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs) return fix_nocpool_alloc_shrink(allctr, flgs); } -static void dealloc_carrier(Allctr_t *allctr, Carrier_t *crr, Uint mseg_flags); +static void dealloc_carrier(Allctr_t *allctr, Carrier_t *crr, int superaligned); #ifdef ERTS_SMP @@ -1392,14 +1407,14 @@ get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep, if (ERTS_ALC_TS_PREF_LOCK_IF_USED == pref_lock && pref_allctr->thread_safe) { - used_allctr = (Allctr_t *) (iallctr & ~FLG_MASK); + used_allctr = (Allctr_t *) (iallctr & ~ERTS_CRR_ALCTR_FLG_MASK); if (pref_allctr == used_allctr) { erts_mtx_lock(&pref_allctr->mutex); locked_pref_allctr = 1; } } - while ((iallctr & ((~FLG_MASK)|ERTS_CRR_ALCTR_FLG_IN_POOL)) + while ((iallctr & ((~ERTS_CRR_ALCTR_FLG_MASK)|ERTS_CRR_ALCTR_FLG_IN_POOL)) == (((erts_aint_t) pref_allctr)|ERTS_CRR_ALCTR_FLG_IN_POOL)) { erts_aint_t act; @@ -1414,7 +1429,7 @@ get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep, iallctr = act; } - used_allctr = (Allctr_t *) (iallctr & ~FLG_MASK); + used_allctr = (Allctr_t *) (iallctr & ~ERTS_CRR_ALCTR_FLG_MASK); if (ERTS_ALC_TS_PREF_LOCK_IF_USED == pref_lock) { if (locked_pref_allctr && used_allctr != pref_allctr) { @@ -1424,16 +1439,16 @@ get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep, } ERTS_ALC_CPOOL_ASSERT( - (((iallctr & ~FLG_MASK) == (erts_aint_t) pref_allctr) - ? (((iallctr & FLG_MASK) == ERTS_CRR_ALCTR_FLG_IN_POOL) - || ((iallctr & FLG_MASK) == 0)) + (((iallctr & ~ERTS_CRR_ALCTR_FLG_MASK) == (erts_aint_t) pref_allctr) + ? (((iallctr & ERTS_CRR_ALCTR_FLG_MASK) == ERTS_CRR_ALCTR_FLG_IN_POOL) + || ((iallctr & ERTS_CRR_ALCTR_FLG_MASK) == 0)) : 1)); return used_allctr; } } - used_allctr = (Allctr_t *) (iallctr & ~FLG_MASK); + used_allctr = (Allctr_t *) (iallctr & ~ERTS_CRR_ALCTR_FLG_MASK); if (ERTS_ALC_TS_PREF_LOCK_IF_USED == pref_lock && used_allctr == pref_allctr @@ -1764,7 +1779,7 @@ handle_delayed_dealloc(Allctr_t *allctr, ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr); ERTS_ALC_CPOOL_ASSERT(((erts_aint_t) allctr) != (erts_smp_atomic_read_nob(&crr->allctr) - & ~FLG_MASK)); + & ~ERTS_CRR_ALCTR_FLG_MASK)); erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr)); @@ -1942,7 +1957,7 @@ mbc_alloc_block(Allctr_t *allctr, Uint size, Uint *blk_szp) if (!blk) { blk = create_carrier(allctr, get_blk_sz, CFLG_MBC); -#if !HALFWORD_HEAP && !HAVE_SUPER_ALIGNED_MB_CARRIERS +#if !HALFWORD_HEAP && !ERTS_SUPER_ALIGNED_MSEG_ONLY if (!blk) { /* Emergency! We couldn't create the carrier as we wanted. Try to place it in a sys_alloced sbc. */ @@ -2907,7 +2922,7 @@ cpool_fetch(Allctr_t *allctr, UWord size) #ifdef ERTS_ALC_CPOOL_DEBUG ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_xchg_nob(&crr->allctr, ((erts_aint_t) allctr)) - == (((erts_aint_t) allctr) & ~FLG_MASK)); + == (((erts_aint_t) allctr) & ~ERTS_CRR_ALCTR_FLG_MASK)); #else erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr)); #endif @@ -2949,7 +2964,7 @@ cpool_fetch(Allctr_t *allctr, UWord size) (erts_aint_t) allctr, exp); if (act == exp) { - cpool_delete(allctr, ((Allctr_t *) (act & ~FLG_MASK)), crr); + cpool_delete(allctr, ((Allctr_t *) (act & ~ERTS_CRR_ALCTR_FLG_MASK)), crr); return crr; } } @@ -2978,7 +2993,7 @@ check_pending_dealloc_carrier(Allctr_t *allctr, dcrr = crr; crr = crr->next; - dealloc_carrier(allctr, dcrr, ERTS_MSEG_FLG_2POW); + dealloc_carrier(allctr, dcrr, 1); i++; } while (crr && i < ERTS_ALC_MAX_DEALLOC_CARRIER); @@ -3013,7 +3028,7 @@ schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr) erts_aint_t max_size; if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr)) { - dealloc_carrier(allctr, crr, ERTS_MSEG_FLG_2POW); + dealloc_carrier(allctr, crr, 1); return; } @@ -3044,7 +3059,7 @@ schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr) ERTS_ALC_CPOOL_ASSERT(crr == FIRST_BLK_TO_MBC(allctr, blk)); ERTS_ALC_CPOOL_ASSERT(((erts_aint_t) allctr) == (erts_smp_atomic_read_nob(&crr->allctr) - & ~FLG_MASK)); + & ~ERTS_CRR_ALCTR_FLG_MASK)); if (ddq_enqueue(&orig_allctr->dd.q, BLK2UMEM(blk), cinit)) erts_alloc_notify_delayed_dealloc(orig_allctr->ix); @@ -3053,7 +3068,7 @@ schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr) if (crr->cpool.thr_prgr == ERTS_THR_PRGR_INVALID || erts_thr_progress_has_reached(crr->cpool.thr_prgr)) { - dealloc_carrier(allctr, crr, ERTS_MSEG_FLG_2POW); + dealloc_carrier(allctr, crr, 1); return; } @@ -3188,10 +3203,10 @@ cpool_read_stat(Allctr_t *allctr, UWord *nocp, UWord *cszp, UWord *nobp, UWord * #ifdef DEBUG -#if HAVE_ERTS_MSEG -#define ASSERT_MSEG_UNIT_SIZE_MULTIPLE(CSZ) ASSERT((CSZ) % MSEG_UNIT_SZ == 0) +#if ERTS_SA_MB_CARRIERS +#define ASSERT_ERTS_SACRR_UNIT_SIZE_MULTIPLE(CSZ) ASSERT((CSZ) % ERTS_SACRR_UNIT_SZ == 0) #else -#define ASSERT_MSEG_UNIT_SIZE_MULTIPLE(CSZ) +#define ASSERT_ERTS_SACRR_UNIT_SIZE_MULTIPLE(CSZ) #endif static void CHECK_1BLK_CARRIER(Allctr_t* A, int SBC, int MSEGED, Carrier_t* C, @@ -3213,10 +3228,12 @@ static void CHECK_1BLK_CARRIER(Allctr_t* A, int SBC, int MSEGED, Carrier_t* C, ASSERT(IS_MBC_BLK((B))); ASSERT(IS_MB_CARRIER((C))); ASSERT(FBLK_TO_MBC(B) == (C)); + if ((MSEGED)) { + ASSERT_ERTS_SACRR_UNIT_SIZE_MULTIPLE((CSZ)); + } } if ((MSEGED)) { ASSERT(IS_MSEG_CARRIER((C))); - ASSERT_MSEG_UNIT_SIZE_MULTIPLE((CSZ)); } else { ASSERT(IS_SYS_ALLOC_CARRIER((C))); @@ -3242,13 +3259,15 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags) int is_mseg = 0; #endif -#if HALFWORD_HEAP - flags |= CFLG_FORCE_MSEG; -#elif HAVE_SUPER_ALIGNED_MB_CARRIERS - if (flags & CFLG_MBC) { + if (HALFWORD_HEAP + || (ERTS_SUPER_ALIGNED_MSEG_ONLY && (flags & CFLG_MBC)) + || !allow_sys_alloc_carriers) { flags |= CFLG_FORCE_MSEG; - } + flags &= ~CFLG_FORCE_SYS_ALLOC; +#if !HAVE_ERTS_MSEG + return NULL; #endif + } ASSERT((flags & CFLG_SBC && !(flags & CFLG_MBC)) || (flags & CFLG_MBC && !(flags & CFLG_SBC))); @@ -3287,7 +3306,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags) if (allctr->sbcs.curr.norm.mseg.no >= allctr->max_mseg_sbcs) goto try_sys_alloc; } -#if !HAVE_SUPER_ALIGNED_MB_CARRIERS +#if !ERTS_SUPER_ALIGNED_MSEG_ONLY else { if (allctr->mbcs.curr.norm.mseg.no >= allctr->max_mseg_mbcs) goto try_sys_alloc; @@ -3350,12 +3369,12 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags) ? UNIT_CEILING(bcrr_sz) : SYS_ALLOC_CARRIER_CEILING(bcrr_sz)); - crr = (Carrier_t *) alcu_sys_alloc(allctr, crr_sz); + crr = (Carrier_t *) alcu_sys_alloc(allctr, crr_sz, flags & CFLG_MBC); if (!crr) { if (crr_sz > UNIT_CEILING(bcrr_sz)) { crr_sz = UNIT_CEILING(bcrr_sz); - crr = (Carrier_t *) alcu_sys_alloc(allctr, crr_sz); + crr = (Carrier_t *) alcu_sys_alloc(allctr, crr_sz, flags & CFLG_MBC); } if (!crr) { #if HAVE_ERTS_MSEG @@ -3453,7 +3472,7 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags) if (!(flags & CFLG_FORCE_SYS_ALLOC)) { new_crr_sz = new_blk_sz + SBC_HEADER_SIZE; - new_crr_sz = MSEG_UNIT_CEILING(new_crr_sz); + new_crr_sz = ERTS_SACRR_UNIT_CEILING(new_crr_sz); new_crr = (Carrier_t *) alcu_mseg_realloc(allctr, old_crr, old_crr_sz, @@ -3503,7 +3522,9 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags) new_crr = (Carrier_t *) alcu_sys_realloc(allctr, (void *) old_crr, - new_crr_sz); + new_crr_sz, + old_crr_sz, + 0); if (new_crr) { sys_realloc_success: SET_CARRIER_SZ(new_crr, new_crr_sz); @@ -3522,7 +3543,9 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags) new_crr_sz = UNIT_CEILING(new_crr_sz); new_crr = (Carrier_t *) alcu_sys_realloc(allctr, (void *) old_crr, - new_crr_sz); + new_crr_sz, + old_crr_sz, + 0); if (new_crr) goto sys_realloc_success; } @@ -3541,27 +3564,29 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags) (void *) BLK2UMEM(old_blk), MIN(new_blk_sz, old_blk_sz) - ABLK_HDR_SZ); unlink_carrier(&allctr->sbc_list, old_crr); - alcu_sys_free(allctr, old_crr); + alcu_sys_free(allctr, old_crr, 0); } else { /* Old carrier unchanged; restore... */ STAT_SYS_ALLOC_SBC_ALLOC(allctr, old_crr_sz, old_blk_sz); } - DEBUG_SAVE_ALIGNMENT(new_crr); return new_blk; } #endif } static void -dealloc_carrier(Allctr_t *allctr, Carrier_t *crr, Uint mseg_flags) +dealloc_carrier(Allctr_t *allctr, Carrier_t *crr, int superaligned) { #if HAVE_ERTS_MSEG if (IS_MSEG_CARRIER(crr)) - alcu_mseg_dealloc(allctr, crr, CARRIER_SZ(crr), mseg_flags); + alcu_mseg_dealloc(allctr, crr, CARRIER_SZ(crr), + (superaligned + ? ERTS_MSEG_FLG_2POW + : ERTS_MSEG_FLG_NONE)); else #endif - alcu_sys_free(allctr, crr); + alcu_sys_free(allctr, crr, superaligned); } static void @@ -3581,7 +3606,6 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp) #if HAVE_ERTS_MSEG if (IS_MSEG_CARRIER(crr)) { - ASSERT(crr_sz % MSEG_UNIT_SZ == 0); STAT_MSEG_SBC_FREE(allctr, crr_sz, blk_sz); } else @@ -3590,7 +3614,7 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp) unlink_carrier(&allctr->sbc_list, crr); - dealloc_carrier(allctr, crr, ERTS_MSEG_FLG_NONE); + dealloc_carrier(allctr, crr, 0); } else { ASSERT(IS_MBC_FIRST_FBLK(allctr, blk)); @@ -3624,7 +3648,7 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp) unlink_carrier(&allctr->mbc_list, crr); #if HAVE_ERTS_MSEG if (IS_MSEG_CARRIER(crr)) { - ASSERT(crr_sz % MSEG_UNIT_SZ == 0); + ASSERT(crr_sz % ERTS_SACRR_UNIT_SZ == 0); STAT_MSEG_MBC_FREE(allctr, crr_sz); } else @@ -3635,10 +3659,9 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp) #ifdef ERTS_SMP schedule_dealloc_carrier(allctr, crr); #else - dealloc_carrier(allctr, crr, ERTS_MSEG_FLG_2POW); + dealloc_carrier(allctr, crr, 1); #endif } - } @@ -3678,6 +3701,7 @@ static struct { Eterm mmc; #endif Eterm ycs; + Eterm sac; Eterm fix_types; @@ -3770,6 +3794,7 @@ init_atoms(Allctr_t *allctr) AM_INIT(mmc); #endif AM_INIT(ycs); + AM_INIT(sac); AM_INIT(fix_types); @@ -4490,17 +4515,22 @@ erts_alcu_au_info_options(int *print_to_p, void *print_to_arg, #if HAVE_ERTS_MSEG "option mmc: %beu\n" #endif - "option ycs: %beu\n", + "option ycs: %beu\n" + "option sac: %s\n", #if HAVE_ERTS_MSEG max_mseg_carriers, #endif - sys_alloc_carrier_size); + sys_alloc_carrier_size, + allow_sys_alloc_carriers ? "true" : "false"); } if (hpp || szp) { res = NIL; ensure_atoms_initialized(NULL); add_2tup(hpp, szp, &res, + am.sac, + allow_sys_alloc_carriers ? am_true : am_false); + add_2tup(hpp, szp, &res, am.ycs, bld_uint(hpp, szp, sys_alloc_carrier_size)); #if HAVE_ERTS_MSEG @@ -5086,7 +5116,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type, crr_sz = SYS_ALLOC_CARRIER_CEILING(used_sz); #if HAVE_ERTS_MSEG else - crr_sz = MSEG_UNIT_CEILING(used_sz); + crr_sz = ERTS_SACRR_UNIT_CEILING(used_sz); #endif diff_sz_val = crr_sz - used_sz; if (diff_sz_val < (~((Uint) 0) / 100)) @@ -5403,6 +5433,11 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) { /* erts_alcu_start assumes that allctr has been zeroed */ + if (((UWord)allctr & ERTS_CRR_ALCTR_FLG_MASK) != 0) { + erl_exit(ERTS_ABORT_EXIT, "%s:%d:erts_alcu_start: Alignment error\n", + __FILE__, __LINE__); + } + if (!initialized) goto error; @@ -5454,7 +5489,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) allctr->mbc_move_threshold = init->rmbcmt; #if HAVE_ERTS_MSEG allctr->max_mseg_sbcs = init->mmsbc; -# if HAVE_SUPER_ALIGNED_MB_CARRIERS +# if ERTS_SUPER_ALIGNED_MSEG_ONLY allctr->max_mseg_mbcs = ~(Uint)0; # else allctr->max_mseg_mbcs = init->mmmbc; @@ -5493,7 +5528,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) erts_atomic_init_nob(&allctr->cpool.stat.carriers_size, 0); erts_atomic_init_nob(&allctr->cpool.stat.no_carriers, 0); allctr->cpool.check_limit_count = ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT; - allctr->cpool.util_limit = init->acul; + allctr->cpool.util_limit = init->ts ? 0 : init->acul; #endif allctr->sbc_threshold = init->sbct; @@ -5570,12 +5605,19 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) CFLG_MBC | CFLG_FORCE_SIZE | CFLG_NO_CPOOL -#if !HALFWORD_HEAP && !HAVE_SUPER_ALIGNED_MB_CARRIERS +#if !HALFWORD_HEAP && !ERTS_SUPER_ALIGNED_MSEG_ONLY | CFLG_FORCE_SYS_ALLOC #endif | CFLG_MAIN_CARRIER); - if (!blk) - goto error; + if (!blk) { +#ifdef USE_THREADS + if (allctr->thread_safe) + erts_mtx_destroy(&allctr->mutex); +#endif + erl_exit(ERTS_ABORT_EXIT, + "Failed to create main carrier for %salloc\n", + init->name_prefix); + } (*allctr->link_free_block)(allctr, blk); @@ -5656,12 +5698,13 @@ erts_alcu_init(AlcUInit_t *init) #endif ASSERT(SBC_BLK_SZ_MASK == MBC_FBLK_SZ_MASK); /* see BLK_SZ */ #if HAVE_ERTS_MSEG - ASSERT(erts_mseg_unit_size() == MSEG_UNIT_SZ); + ASSERT(erts_mseg_unit_size() == ERTS_SACRR_UNIT_SZ); max_mseg_carriers = init->mmc; - sys_alloc_carrier_size = MSEG_UNIT_CEILING(init->ycs); + sys_alloc_carrier_size = ERTS_SACRR_UNIT_CEILING(init->ycs); #else /* #if HAVE_ERTS_MSEG */ sys_alloc_carrier_size = ((init->ycs + 4095) / 4096) * 4096; #endif + allow_sys_alloc_carriers = init->sac; #ifdef DEBUG carrier_alignment = sizeof(Unit_t); @@ -5819,7 +5862,7 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk) ASSERT(CARRIER_SZ(sbc) - SBC_HEADER_SIZE >= SBC_BLK_SZ(iblk)); #if HAVE_ERTS_MSEG if (IS_MSEG_CARRIER(sbc)) { - ASSERT(CARRIER_SZ(sbc) % MSEG_UNIT_SZ == 0); + ASSERT(CARRIER_SZ(sbc) % ERTS_SACRR_UNIT_SZ == 0); } #endif crr = sbc; @@ -5904,7 +5947,7 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk) #if HAVE_ERTS_MSEG if (IS_MSEG_CARRIER(crr)) { - ASSERT(CARRIER_SZ(crr) % MSEG_UNIT_SZ == 0); + ASSERT(CARRIER_SZ(crr) % ERTS_SACRR_UNIT_SZ == 0); } #endif cl = &allctr->mbc_list; diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h index 02cbe5c5d0..7be6b1ed9d 100644 --- a/erts/emulator/beam/erl_alloc_util.h +++ b/erts/emulator/beam/erl_alloc_util.h @@ -32,6 +32,7 @@ typedef struct Allctr_t_ Allctr_t; typedef struct { UWord ycs; UWord mmc; + int sac; } AlcUInit_t; typedef struct { @@ -75,7 +76,8 @@ typedef struct { #define ERTS_DEFAULT_ALCU_INIT { \ 1024*1024, /* (bytes) ycs: sys_alloc carrier size */\ - 1024 /* (amount) mmc: max mseg carriers */\ + ~((UWord) 0), /* (amount) mmc: max mseg carriers */\ + 1 /* (bool) sac: sys_alloc carriers */\ } #define ERTS_DEFAULT_ALLCTR_INIT { \ @@ -95,7 +97,7 @@ typedef struct { 50, /* (%) rmbcmt: rel mbc move threshold */\ 1024*1024, /* (bytes) mmbcs: main multiblock carrier size */\ 256, /* (amount) mmsbc: max mseg sbcs */\ - 10, /* (amount) mmmbc: max mseg mbcs */\ + ~((UWord) 0), /* (amount) mmmbc: max mseg mbcs */ \ 10*1024*1024, /* (bytes) lmbcs: largest mbc size */\ 1024*1024, /* (bytes) smbcs: smallest mbc size */\ 10, /* (amount) mbcgs: mbc growth stages */\ @@ -109,7 +111,8 @@ typedef struct { #define ERTS_DEFAULT_ALCU_INIT { \ 128*1024, /* (bytes) ycs: sys_alloc carrier size */\ - 1024 /* (amount) mmc: max mseg carriers */\ + 1024, /* (amount) mmc: max mseg carriers */\ + 1 /* (bool) sac: sys_alloc carriers */\ } #define ERTS_DEFAULT_ALLCTR_INIT { \ @@ -128,7 +131,7 @@ typedef struct { 80, /* (%) rsbcmt: rel sbc move threshold */\ 128*1024, /* (bytes) mmbcs: main multiblock carrier size */\ 256, /* (amount) mmsbc: max mseg sbcs */\ - 10, /* (amount) mmmbc: max mseg mbcs */\ + ~((UWord) 0), /* (amount) mmmbc: max mseg mbcs */ \ 1024*1024, /* (bytes) lmbcs: largest mbc size */\ 128*1024, /* (bytes) smbcs: smallest mbc size */\ 10, /* (amount) mbcgs: mbc growth stages */\ @@ -217,25 +220,34 @@ erts_aint32_t erts_alcu_fix_alloc_shrink(Allctr_t *, erts_aint32_t); #define MBC_FBLK_SZ_MASK UNIT_MASK #define CARRIER_SZ_MASK UNIT_MASK -#if HAVE_ERTS_MSEG - -# define MSEG_UNIT_SHIFT MSEG_ALIGN_BITS -# define MSEG_UNIT_SZ (1 << MSEG_UNIT_SHIFT) -# define MSEG_UNIT_MASK ((~(UWord)0) << MSEG_UNIT_SHIFT) - -# define MSEG_UNIT_FLOOR(X) ((X) & MSEG_UNIT_MASK) -# define MSEG_UNIT_CEILING(X) MSEG_UNIT_FLOOR((X) + ~MSEG_UNIT_MASK) - +#if ERTS_HAVE_MSEG_SUPER_ALIGNED \ + || (!HAVE_ERTS_MSEG && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC) +# ifndef MSEG_ALIGN_BITS +# define ERTS_SUPER_ALIGN_BITS MSEG_ALIGN_BITS +# else +# define ERTS_SUPER_ALIGN_BITS 18 +# endif # ifdef ARCH_64 # define MBC_ABLK_OFFSET_BITS 24 -# elif HAVE_SUPER_ALIGNED_MB_CARRIERS +# else # define MBC_ABLK_OFFSET_BITS 9 /* Affects hard limits for sbct and lmbcs documented in erts_alloc.xml */ # endif -#endif -#ifndef MBC_ABLK_OFFSET_BITS +# define ERTS_SACRR_UNIT_SHIFT ERTS_SUPER_ALIGN_BITS +# define ERTS_SACRR_UNIT_SZ (1 << ERTS_SACRR_UNIT_SHIFT) +# define ERTS_SACRR_UNIT_MASK ((~(UWord)0) << ERTS_SACRR_UNIT_SHIFT) +# define ERTS_SACRR_UNIT_FLOOR(X) ((X) & ERTS_SACRR_UNIT_MASK) +# define ERTS_SACRR_UNIT_CEILING(X) ERTS_SACRR_UNIT_FLOOR((X) + ~ERTS_SACRR_UNIT_MASK) +# define ERTS_SA_MB_CARRIERS 1 +#else +# define ERTS_SA_MB_CARRIERS 0 # define MBC_ABLK_OFFSET_BITS 0 /* no carrier offset in block header */ #endif +#if ERTS_HAVE_MSEG_SUPER_ALIGNED && !ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC +# define ERTS_SUPER_ALIGNED_MSEG_ONLY 1 +#else +# define ERTS_SUPER_ALIGNED_MSEG_ONLY 0 +#endif #if MBC_ABLK_OFFSET_BITS # define MBC_ABLK_OFFSET_SHIFT (sizeof(UWord)*8 - MBC_ABLK_OFFSET_BITS) @@ -245,9 +257,9 @@ erts_aint32_t erts_alcu_fix_alloc_shrink(Allctr_t *, erts_aint32_t); # define MBC_ABLK_SZ_MASK (~FLG_MASK) #endif -#define MBC_ABLK_SZ(B) (ASSERT_EXPR(!is_sbc_blk(B)), (B)->bhdr & MBC_ABLK_SZ_MASK) -#define MBC_FBLK_SZ(B) (ASSERT_EXPR(!is_sbc_blk(B)), (B)->bhdr & MBC_FBLK_SZ_MASK) -#define SBC_BLK_SZ(B) (ASSERT_EXPR(is_sbc_blk(B)), (B)->bhdr & SBC_BLK_SZ_MASK) +#define MBC_ABLK_SZ(B) (ASSERT(!is_sbc_blk(B)), (B)->bhdr & MBC_ABLK_SZ_MASK) +#define MBC_FBLK_SZ(B) (ASSERT(!is_sbc_blk(B)), (B)->bhdr & MBC_FBLK_SZ_MASK) +#define SBC_BLK_SZ(B) (ASSERT(is_sbc_blk(B)), (B)->bhdr & SBC_BLK_SZ_MASK) #define CARRIER_SZ(C) \ ((C)->chdr & CARRIER_SZ_MASK) @@ -327,8 +339,8 @@ typedef struct { (B)->u.carrier) # define ABLK_TO_MBC(B) \ (ASSERT(IS_MBC_BLK(B) && !IS_FREE_BLK(B)), \ - (Carrier_t*)((MSEG_UNIT_FLOOR((UWord)(B)) - \ - (((B)->bhdr >> MBC_ABLK_OFFSET_SHIFT) << MSEG_UNIT_SHIFT)))) + (Carrier_t*)((ERTS_SACRR_UNIT_FLOOR((UWord)(B)) - \ + (((B)->bhdr >> MBC_ABLK_OFFSET_SHIFT) << ERTS_SACRR_UNIT_SHIFT)))) # define BLK_TO_MBC(B) (IS_FREE_BLK(B) ? FBLK_TO_MBC(B) : ABLK_TO_MBC(B)) #else # define FBLK_TO_MBC(B) ((B)->carrier) diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.c b/erts/emulator/beam/erl_ao_firstfit_alloc.c index 4e6c8b317e..396aa88e0b 100644 --- a/erts/emulator/beam/erl_ao_firstfit_alloc.c +++ b/erts/emulator/beam/erl_ao_firstfit_alloc.c @@ -85,9 +85,6 @@ #define SET_RED(N) (((AOFF_RBTree_t *) (N))->flags |= RED_FLG) #define SET_BLACK(N) (((AOFF_RBTree_t *) (N))->flags &= ~RED_FLG) -#undef ASSERT -#define ASSERT ASSERT_EXPR - #if 1 #define RBT_ASSERT ASSERT #else diff --git a/erts/emulator/beam/erl_bestfit_alloc.c b/erts/emulator/beam/erl_bestfit_alloc.c index 41f449bb28..59c14899a2 100644 --- a/erts/emulator/beam/erl_bestfit_alloc.c +++ b/erts/emulator/beam/erl_bestfit_alloc.c @@ -75,9 +75,6 @@ #define BF_BLK_SZ(B) MBC_FBLK_SZ(&(B)->hdr) -#undef ASSERT -#define ASSERT ASSERT_EXPR - #if 1 #define RBT_ASSERT ASSERT #else diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c index 0db19a1ee6..ff775691b3 100644 --- a/erts/emulator/beam/erl_bif_binary.c +++ b/erts/emulator/beam/erl_bif_binary.c @@ -927,6 +927,9 @@ static int do_binary_match_compile(Eterm argument, Eterm *tag, Binary **binp) if (binary_bitsize(b) != 0) { goto badarg; } + if (binary_size(b) == 0) { + goto badarg; + } ++words; characters += binary_size(b); } diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index 39bbd6b182..1da9358334 100755 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -1771,7 +1771,11 @@ info_1_tuple(Process* BIF_P, /* Pointer to current process. */ #if defined(PURIFY) BIF_RET(erts_make_integer(purify_new_leaks(), BIF_P)); #elif defined(VALGRIND) +# ifdef VALGRIND_DO_ADDED_LEAK_CHECK + VALGRIND_DO_ADDED_LEAK_CHECK; +# else VALGRIND_DO_LEAK_CHECK; +# endif BIF_RET(make_small(0)); #endif } else if (*tp == am_fd) { @@ -2091,7 +2095,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) BIF_RET(res); } else if (BIF_ARG_1 == am_sequential_tracer) { val = erts_get_system_seq_tracer(); - ASSERT(is_internal_pid(val) || is_internal_port(val) || val==am_false) + ASSERT(is_internal_pid(val) || is_internal_port(val) || val==am_false); hp = HAlloc(BIF_P, 3); res = TUPLE2(hp, am_sequential_tracer, val); BIF_RET(res); @@ -2636,6 +2640,9 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) BIF_RET(res); } + else if (ERTS_IS_ATOM_STR("ets_limit",BIF_ARG_1)) { + BIF_RET(make_small(erts_db_get_max_tabs())); + } BIF_ERROR(BIF_P, BADARG); } @@ -3286,6 +3293,9 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) erts_smp_thr_progress_unblock(); BIF_RET(res); } + else if (ERTS_IS_ATOM_STR("mmap", BIF_ARG_1)) { + BIF_RET(erts_mmap_debug_info(BIF_P)); + } } else if (is_tuple(BIF_ARG_1)) { Eterm* tp = tuple_val(BIF_ARG_1); diff --git a/erts/emulator/beam/erl_bif_os.c b/erts/emulator/beam/erl_bif_os.c index 1062d4379b..e07c622928 100644 --- a/erts/emulator/beam/erl_bif_os.c +++ b/erts/emulator/beam/erl_bif_os.c @@ -180,3 +180,25 @@ BIF_RETTYPE os_putenv_2(BIF_ALIST_2) BIF_RET(am_true); } +BIF_RETTYPE os_unsetenv_1(BIF_ALIST_1) +{ + char *key_buf; + char buf[STATIC_BUF_SIZE]; + + key_buf = erts_convert_filename_to_native(BIF_ARG_1,buf,STATIC_BUF_SIZE, + ERTS_ALC_T_TMP,0,0,NULL); + if (!key_buf) { + BIF_ERROR(BIF_P, BADARG); + } + + if (erts_sys_unsetenv(key_buf)) { + if (key_buf != buf) { + erts_free(ERTS_ALC_T_TMP, key_buf); + } + BIF_ERROR(BIF_P, BADARG); + } + if (key_buf != buf) { + erts_free(ERTS_ALC_T_TMP, key_buf); + } + BIF_RET(am_true); +} diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c index 109c54fd7f..3cd53ef65d 100644 --- a/erts/emulator/beam/erl_bif_port.c +++ b/erts/emulator/beam/erl_bif_port.c @@ -84,7 +84,7 @@ BIF_RETTYPE open_port_2(BIF_ALIST_2) } static ERTS_INLINE Port * -lookup_port(Process *c_p, Eterm id_or_name) +lookup_port(Process *c_p, Eterm id_or_name, Uint32 invalid_flags) { /* TODO: Implement nicer lookup in register... */ Eterm id; @@ -92,7 +92,19 @@ lookup_port(Process *c_p, Eterm id_or_name) id = erts_whereis_name_to_id(c_p, id_or_name); else id = id_or_name; - return erts_port_lookup(id, ERTS_PORT_SFLGS_INVALID_LOOKUP); + return erts_port_lookup(id, invalid_flags); +} + +static ERTS_INLINE Port * +sig_lookup_port(Process *c_p, Eterm id_or_name) +{ + return lookup_port(c_p, id_or_name, ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP); +} + +static ERTS_INLINE Port * +data_lookup_port(Process *c_p, Eterm id_or_name) +{ + return lookup_port(c_p, id_or_name, ERTS_PORT_SFLGS_INVALID_LOOKUP); } /* @@ -125,7 +137,7 @@ BIF_RETTYPE erts_internal_port_command_3(BIF_ALIST_3) BIF_RET(am_badarg); } - prt = lookup_port(BIF_P, BIF_ARG_1); + prt = sig_lookup_port(BIF_P, BIF_ARG_1); if (!prt) BIF_RET(am_badarg); @@ -185,7 +197,7 @@ BIF_RETTYPE erts_internal_port_call_3(BIF_ALIST_3) unsigned int op; erts_aint32_t state; - prt = lookup_port(BIF_P, BIF_ARG_1); + prt = sig_lookup_port(BIF_P, BIF_ARG_1); if (!prt) BIF_RET(am_badarg); @@ -235,7 +247,7 @@ BIF_RETTYPE erts_internal_port_control_3(BIF_ALIST_3) unsigned int op; erts_aint32_t state; - prt = lookup_port(BIF_P, BIF_ARG_1); + prt = sig_lookup_port(BIF_P, BIF_ARG_1); if (!prt) BIF_RET(am_badarg); @@ -290,7 +302,7 @@ BIF_RETTYPE erts_internal_port_close_1(BIF_ALIST_1) ref = NIL; #endif - prt = lookup_port(BIF_P, BIF_ARG_1); + prt = sig_lookup_port(BIF_P, BIF_ARG_1); if (!prt) BIF_RET(am_badarg); @@ -320,7 +332,7 @@ BIF_RETTYPE erts_internal_port_connect_2(BIF_ALIST_2) Eterm ref; Port* prt; - prt = lookup_port(BIF_P, BIF_ARG_1); + prt = sig_lookup_port(BIF_P, BIF_ARG_1); if (!prt) BIF_RET(am_badarg); @@ -352,7 +364,7 @@ BIF_RETTYPE erts_internal_port_info_1(BIF_ALIST_1) Port* prt; if (is_internal_port(BIF_ARG_1) || is_atom(BIF_ARG_1)) { - prt = lookup_port(BIF_P, BIF_ARG_1); + prt = sig_lookup_port(BIF_P, BIF_ARG_1); if (!prt) BIF_RET(am_undefined); } @@ -391,7 +403,7 @@ BIF_RETTYPE erts_internal_port_info_2(BIF_ALIST_2) Port* prt; if (is_internal_port(BIF_ARG_1) || is_atom(BIF_ARG_1)) { - prt = lookup_port(BIF_P, BIF_ARG_1); + prt = sig_lookup_port(BIF_P, BIF_ARG_1); if (!prt) BIF_RET(am_undefined); } @@ -523,7 +535,7 @@ BIF_RETTYPE port_set_data_2(BIF_ALIST_2) erts_aint_t data; Port* prt; - prt = lookup_port(BIF_P, BIF_ARG_1); + prt = data_lookup_port(BIF_P, BIF_ARG_1); if (!prt) BIF_ERROR(BIF_P, BADARG); @@ -564,7 +576,7 @@ BIF_RETTYPE port_get_data_1(BIF_ALIST_1) erts_aint_t data; Port* prt; - prt = lookup_port(BIF_P, BIF_ARG_1); + prt = data_lookup_port(BIF_P, BIF_ARG_1); if (!prt) BIF_ERROR(BIF_P, BADARG); diff --git a/erts/emulator/beam/erl_binary.h b/erts/emulator/beam/erl_binary.h index 506c4813fa..819b19e566 100644 --- a/erts/emulator/beam/erl_binary.h +++ b/erts/emulator/beam/erl_binary.h @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2000-2011. All Rights Reserved. + * Copyright Ericsson AB 2000-2013. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -153,7 +153,7 @@ do { \ #define binary_bytes(Bin) \ (*binary_val(Bin) == HEADER_PROC_BIN ? \ ((ProcBin *) binary_val(Bin))->bytes : \ - (ASSERT_EXPR(thing_subtag(*binary_val(Bin)) == HEAP_BINARY_SUBTAG), \ + (ASSERT(thing_subtag(*binary_val(Bin)) == HEAP_BINARY_SUBTAG), \ (byte *)(&(((ErlHeapBin *) binary_val(Bin))->data)))) void erts_init_binary(void); @@ -183,7 +183,7 @@ BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen); #endif #define ERTS_CHK_BIN_ALIGNMENT(B) \ - do { ASSERT(!(B) || (((UWord) &((Binary *)(B))->orig_bytes[0]) & ERTS_BIN_ALIGNMENT_MASK) == ((UWord) 0)) } while(0) + do { ASSERT(!(B) || (((UWord) &((Binary *)(B))->orig_bytes[0]) & ERTS_BIN_ALIGNMENT_MASK) == ((UWord) 0)); } while(0) ERTS_GLB_INLINE byte* erts_get_aligned_binary_bytes(Eterm bin, byte** base_ptr); ERTS_GLB_INLINE void erts_free_aligned_binary_bytes(byte* buf); @@ -225,7 +225,7 @@ erts_free_aligned_binary_bytes(byte* buf) ** These extra bytes where earlier (< R13B04) added by an alignment-bug ** in this code. Do we dare remove this in some major release (R14?) maybe? */ -#ifdef DEBUG +#if defined(DEBUG) || defined(VALGRIND) # define CHICKEN_PAD 0 #else # define CHICKEN_PAD (sizeof(void*) - 1) diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c index 43eb691338..73765772c8 100644 --- a/erts/emulator/beam/erl_bits.c +++ b/erts/emulator/beam/erl_bits.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1999-2012. All Rights Reserved. + * Copyright Ericsson AB 1999-2013. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -1491,7 +1491,7 @@ erts_bs_private_append(Process* p, Eterm bin, Eterm build_size_term, Uint unit) bptr->flags = 0; bptr->orig_size = new_size; erts_refc_init(&bptr->refc, 1); - sys_memcpy(bptr->orig_bytes, binp->orig_bytes, pb->size); + sys_memcpy(bptr->orig_bytes, binp->orig_bytes, binp->orig_size); pb->flags |= PB_IS_WRITABLE | PB_ACTIVE_WRITER; pb->val = bptr; pb->bytes = (byte *) bptr->orig_bytes; @@ -1810,6 +1810,11 @@ erts_cmp_bits(byte* a_ptr, size_t a_offs, byte* b_ptr, size_t b_offs, size_t siz Uint rshift; int cmp; + ASSERT(a_offs < 8 && b_offs < 8); + + if (size == 0) + return 0; + if (((a_offs | b_offs | size) & 7) == 0) { int byte_size = size >> 3; return sys_memcmp(a_ptr, b_ptr, byte_size); @@ -1818,58 +1823,72 @@ erts_cmp_bits(byte* a_ptr, size_t a_offs, byte* b_ptr, size_t b_offs, size_t siz /* Compare bit by bit until a_ptr is aligned on byte boundary */ a = *a_ptr++; b = *b_ptr++; - while (size > 0) { - a_bit = get_bit(a, a_offs); - b_bit = get_bit(b, b_offs); - if ((cmp = (a_bit-b_bit)) != 0) { - return cmp; - } - size--; - b_offs++; - if (b_offs == 8) { - b_offs = 0; - b = *b_ptr++; - } - a_offs++; - if (a_offs == 8) { - a_offs = 0; - a = *a_ptr++; - break; + if (a_offs) { + for (;;) { + a_bit = get_bit(a, a_offs); + b_bit = get_bit(b, b_offs); + if ((cmp = (a_bit-b_bit)) != 0) { + return cmp; + } + if (--size == 0) + return 0; + + b_offs++; + if (b_offs == 8) { + b_offs = 0; + b = *b_ptr++; + } + a_offs++; + if (a_offs == 8) { + a_offs = 0; + a = *a_ptr++; + break; + } } } /* Compare byte by byte as long as at least 8 bits remain */ - lshift = b_offs; - rshift = 8 - lshift; - while (size >= 8) { - byte b_cmp = (b << lshift); - b = *b_ptr++; - b_cmp |= b >> rshift; - if ((cmp = (a - b_cmp)) != 0) { - return cmp; - } + if (size >= 8) { + lshift = b_offs; + rshift = 8 - lshift; + for (;;) { + byte b_cmp = (b << lshift); + b = *b_ptr++; + b_cmp |= b >> rshift; + if ((cmp = (a - b_cmp)) != 0) { + return cmp; + } + size -= 8; + if (size < 8) + break; + a = *a_ptr++; + } + + if (size == 0) + return 0; a = *a_ptr++; - size -= 8; } /* Compare the remaining bits bit by bit */ - while (size > 0) { - a_bit = get_bit(a, a_offs); - b_bit = get_bit(b, b_offs); - if ((cmp = (a_bit-b_bit)) != 0) { - return cmp; - } - a_offs++; - if (a_offs == 8) { - a_offs = 0; - a = *a_ptr++; - } - b_offs++; - if (b_offs == 8) { - b_offs = 0; - b = *b_ptr++; - } - size--; + if (size > 0) { + for (;;) { + a_bit = get_bit(a, a_offs); + b_bit = get_bit(b, b_offs); + if ((cmp = (a_bit-b_bit)) != 0) { + return cmp; + } + if (--size == 0) + return 0; + + a_offs++; + ASSERT(a_offs < 8); + + b_offs++; + if (b_offs == 8) { + b_offs = 0; + b = *b_ptr++; + } + } } return 0; diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c index 98c2988323..41e64fcd4f 100644 --- a/erts/emulator/beam/erl_db.c +++ b/erts/emulator/beam/erl_db.c @@ -2236,7 +2236,7 @@ static BIF_RETTYPE ets_select_trap_1(BIF_ALIST_1) CHECK_TABLES(); tptr = tuple_val(a1); - ASSERT(arityval(*tptr) >= 1) + ASSERT(arityval(*tptr) >= 1); if ((tb = db_get_table(p, tptr[1], DB_READ, kind)) == NULL) { BIF_ERROR(p, BADARG); @@ -2403,7 +2403,7 @@ static BIF_RETTYPE ets_select_count_1(BIF_ALIST_1) CHECK_TABLES(); tptr = tuple_val(a1); - ASSERT(arityval(*tptr) >= 1) + ASSERT(arityval(*tptr) >= 1); if ((tb = db_get_table(p, tptr[1], DB_READ, kind)) == NULL) { BIF_ERROR(p, BADARG); } @@ -3811,6 +3811,13 @@ erts_db_foreach_offheap(DbTable *tb, tb->common.meth->db_foreach_offheap(tb, func, arg); } +/* retrieve max number of ets tables */ +Uint +erts_db_get_max_tabs() +{ + return db_max_tabs; +} + /* * For testing of meta tables only. * diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h index 6b62e10eb7..5b4681fc90 100644 --- a/erts/emulator/beam/erl_db.h +++ b/erts/emulator/beam/erl_db.h @@ -79,6 +79,8 @@ extern erts_smp_atomic_t erts_ets_misc_mem_size; Eterm erts_ets_colliding_names(Process*, Eterm name, Uint cnt); +Uint erts_db_get_max_tabs(void); + #endif #if defined(ERTS_WANT_DB_INTERNAL__) && !defined(ERTS_HAVE_DB_INTERNAL__) diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h index 90b79e6044..328b19dfc9 100644 --- a/erts/emulator/beam/erl_db_util.h +++ b/erts/emulator/beam/erl_db_util.h @@ -457,7 +457,7 @@ int erts_db_is_compiled_ms(Eterm term); && ERTS_MAGIC_BIN_DESTRUCTOR((BP)) == erts_db_match_prog_destructor) #define Binary2MatchProg(BP) \ - (ASSERT_EXPR(IsMatchProgBinary((BP))), \ + (ASSERT(IsMatchProgBinary((BP))), \ ((MatchProg *) ERTS_MAGIC_BIN_DATA((BP)))) /* ** Debugging diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c index da254286c4..c5585d39e8 100644 --- a/erts/emulator/beam/erl_gc.c +++ b/erts/emulator/beam/erl_gc.c @@ -2781,7 +2781,7 @@ erts_check_off_heap2(Process *p, Eterm *htop) refc = erts_refc_read(&u.ext->node->refc, 1); break; default: - ASSERT(!!"erts_check_off_heap2: Invalid thing_word"); + ASSERT(!"erts_check_off_heap2: Invalid thing_word"); } ERTS_CHK_OFFHEAP_ASSERT(refc >= 1); #ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_LIST diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c index 2114d0c001..0dd83fa6ed 100644 --- a/erts/emulator/beam/erl_lock_check.c +++ b/erts/emulator/beam/erl_lock_check.c @@ -132,6 +132,7 @@ static erts_lc_lock_order_t erts_lock_order[] = { #endif /* __WIN32__ */ { "alcu_init_atoms", NULL }, { "mseg_init_atoms", NULL }, + { "mmap_init_atoms", NULL }, { "drv_tsd", NULL }, { "async_enq_mtx", NULL }, #ifdef ERTS_SMP @@ -185,7 +186,9 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "sys_gethrtime", NULL }, #endif #endif - { "erts_alloc_hard_debug", NULL } + { "erts_alloc_hard_debug", NULL }, + { "hard_dbg_mseg", NULL }, + { "erts_mmap", NULL } }; #define ERTS_LOCK_ORDER_SIZE \ diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c index 325d77e911..6a9030fd99 100644 --- a/erts/emulator/beam/erl_message.c +++ b/erts/emulator/beam/erl_message.c @@ -46,10 +46,12 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(message, +#ifdef DEBUG static ERTS_INLINE int in_heapfrag(const Eterm* ptr, const ErlHeapFragment *bp) { return ((unsigned)(ptr - bp->mem) < bp->used_size); } +#endif void diff --git a/erts/emulator/beam/erl_node_container_utils.h b/erts/emulator/beam/erl_node_container_utils.h index 0f93a3a9f0..17f6b32bb1 100644 --- a/erts/emulator/beam/erl_node_container_utils.h +++ b/erts/emulator/beam/erl_node_container_utils.h @@ -106,7 +106,7 @@ #define dist_entry_channel_no(x) \ ((x) == erts_this_dist_entry \ ? ((Uint) 0) \ - : (ASSERT_EXPR(is_atom((x)->sysname)), \ + : (ASSERT(is_atom((x)->sysname)), \ (Uint) atom_val((x)->sysname))) #define internal_channel_no(x) ((Uint) ERST_INTERNAL_CHANNEL_NO) #define external_channel_no(x) \ @@ -122,10 +122,10 @@ extern ErtsPTab erts_proc; (D), \ _TAG_IMMED1_PID) -#define internal_pid_index(PID) (ASSERT_EXPR(is_internal_pid((PID))), \ +#define internal_pid_index(PID) (ASSERT(is_internal_pid((PID))), \ erts_ptab_id2pix(&erts_proc, (PID))) -#define internal_pid_data(PID) (ASSERT_EXPR(is_internal_pid((PID))), \ +#define internal_pid_data(PID) (ASSERT(is_internal_pid((PID))), \ erts_ptab_id2data(&erts_proc, (PID))) #define internal_pid_number(x) _GET_PID_NUM(internal_pid_data((x))) @@ -193,10 +193,10 @@ extern ErtsPTab erts_port; (D), \ _TAG_IMMED1_PORT) -#define internal_port_index(PRT) (ASSERT_EXPR(is_internal_port((PRT))), \ +#define internal_port_index(PRT) (ASSERT(is_internal_port((PRT))), \ erts_ptab_id2pix(&erts_port, (PRT))) -#define internal_port_data(PRT) (ASSERT_EXPR(is_internal_port((PRT))), \ +#define internal_port_data(PRT) (ASSERT(is_internal_port((PRT))), \ erts_ptab_id2data(&erts_port, (PRT))) #define internal_port_number(x) _GET_PORT_NUM(internal_port_data((x))) diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h index e4d964146e..123253a057 100644 --- a/erts/emulator/beam/erl_port_task.h +++ b/erts/emulator/beam/erl_port_task.h @@ -77,6 +77,7 @@ extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks; #define ERTS_PTS_FLG_HAVE_NS_TASKS (((erts_aint32_t) 1) << 8) #define ERTS_PTS_FLG_PARALLELISM (((erts_aint32_t) 1) << 9) #define ERTS_PTS_FLG_FORCE_SCHED (((erts_aint32_t) 1) << 10) +#define ERTS_PTS_FLG_EXITING (((erts_aint32_t) 1) << 11) #define ERTS_PTS_FLGS_BUSY \ (ERTS_PTS_FLG_BUSY_PORT | ERTS_PTS_FLG_BUSY_PORT_Q) @@ -86,7 +87,8 @@ extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks; | ERTS_PTS_FLG_HAVE_BUSY_TASKS \ | ERTS_PTS_FLG_HAVE_TASKS \ | ERTS_PTS_FLG_EXEC \ - | ERTS_PTS_FLG_FORCE_SCHED) + | ERTS_PTS_FLG_FORCE_SCHED \ + | ERTS_PTS_FLG_EXITING) #define ERTS_PORT_TASK_DEFAULT_BUSY_PORT_Q_HIGH 8192 #define ERTS_PORT_TASK_DEFAULT_BUSY_PORT_Q_LOW 4096 @@ -135,6 +137,7 @@ ERTS_GLB_INLINE void erts_port_task_fini_sched(ErtsPortTaskSched *ptsp); ERTS_GLB_INLINE void erts_port_task_sched_lock(ErtsPortTaskSched *ptsp); ERTS_GLB_INLINE void erts_port_task_sched_unlock(ErtsPortTaskSched *ptsp); ERTS_GLB_INLINE int erts_port_task_sched_lock_is_locked(ErtsPortTaskSched *ptsp); +ERTS_GLB_INLINE void erts_port_task_sched_enter_exiting_state(ErtsPortTaskSched *ptsp); #ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS ERTS_GLB_INLINE int erts_port_task_have_outstanding_io_tasks(void); @@ -225,6 +228,12 @@ erts_port_task_fini_sched(ErtsPortTaskSched *ptsp) #endif } +ERTS_GLB_INLINE void +erts_port_task_sched_enter_exiting_state(ErtsPortTaskSched *ptsp) +{ + erts_smp_atomic32_read_bor_nob(&ptsp->flags, ERTS_PTS_FLG_EXITING); +} + #ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS ERTS_GLB_INLINE int diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index b88c871ffc..21fd8dd50a 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -328,7 +328,7 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proclist, ERTS_ALC_T_PROC_LIST) #define ERTS_SCHED_SLEEP_INFO_IX(IX) \ - (ASSERT_EXPR(-1 <= ((int) (IX)) \ + (ASSERT(-1 <= ((int) (IX)) \ && ((int) (IX)) < ((int) erts_no_schedulers)), \ &aligned_sched_sleep_info[(IX)].ssi) @@ -8633,6 +8633,7 @@ alloc_process(ErtsRunQueue *rq, erts_aint32_t state) p->approx_started = erts_get_approx_time(); p->rcount = 0; + p->heap = NULL; ASSERT(p == (Process *) (erts_ptab_pix2intptr_nob( @@ -8746,7 +8747,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). hipe_init_process_smp(&p->hipe_smp); #endif #endif - p->heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP, sizeof(Eterm)*sz); p->old_hend = p->old_htop = p->old_heap = NULL; p->high_water = p->heap; diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index 0b8d2976ed..043621125c 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -1188,10 +1188,10 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags; } while (0) #define ERTS_RUNQ_IX(IX) \ - (ASSERT_EXPR(0 <= (IX) && (IX) < erts_no_run_queues), \ + (ASSERT(0 <= (IX) && (IX) < erts_no_run_queues), \ &erts_aligned_run_queues[(IX)].runq) #define ERTS_SCHEDULER_IX(IX) \ - (ASSERT_EXPR(0 <= (IX) && (IX) < erts_no_schedulers), \ + (ASSERT(0 <= (IX) && (IX) < erts_no_schedulers), \ &erts_aligned_scheduler_data[(IX)].esd) void erts_pre_init_process(void); diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c index 6cd0d23b97..2f3cf23b00 100644 --- a/erts/emulator/beam/erl_process_dump.c +++ b/erts/emulator/beam/erl_process_dump.c @@ -34,8 +34,8 @@ #define ERTS_WANT_EXTERNAL_TAGS #include "external.h" -#define WORD_FMT "%X" -#define ADDR_FMT "%X" +#define PTR_FMT "%bpX" +#define ETERM_FMT "%beX" #define OUR_NIL _make_header(0,_TAG_HEADER_FLOAT) @@ -210,9 +210,9 @@ static void dump_element(int to, void *to_arg, Eterm x) { if (is_list(x)) { - erts_print(to, to_arg, "H" WORD_FMT, list_val(x)); + erts_print(to, to_arg, "H" PTR_FMT, list_val(x)); } else if (is_boxed(x)) { - erts_print(to, to_arg, "H" WORD_FMT, boxed_val(x)); + erts_print(to, to_arg, "H" PTR_FMT, boxed_val(x)); } else if (is_immed(x)) { if (is_atom(x)) { unsigned char* s = atom_tab(atom_val(x))->name; @@ -311,7 +311,7 @@ heap_dump(int to, void *to_arg, Eterm x) } else if (is_list(x)) { ptr = list_val(x); if (ptr[0] != OUR_NIL) { - erts_print(to, to_arg, ADDR_FMT ":l", ptr); + erts_print(to, to_arg, PTR_FMT ":l", ptr); dump_element(to, to_arg, ptr[0]); erts_putc(to, to_arg, '|'); dump_element(to, to_arg, ptr[1]); @@ -330,12 +330,12 @@ heap_dump(int to, void *to_arg, Eterm x) ptr = boxed_val(x); hdr = *ptr; if (hdr != OUR_NIL) { /* If not visited */ - erts_print(to, to_arg, ADDR_FMT ":", ptr); + erts_print(to, to_arg, PTR_FMT ":", ptr); if (is_arity_value(hdr)) { Uint i; Uint arity = arityval(hdr); - erts_print(to, to_arg, "t" WORD_FMT ":", arity); + erts_print(to, to_arg, "t" ETERM_FMT ":", arity); for (i = 1; i <= arity; i++) { dump_element(to, to_arg, ptr[i]); if (is_immed(ptr[i])) { @@ -388,21 +388,43 @@ heap_dump(int to, void *to_arg, Eterm x) val->flags = (UWord) all_binaries; all_binaries = val; } - erts_print(to, to_arg, "Yc%X:%X:%X", val, + erts_print(to, to_arg, + "Yc" PTR_FMT ":" PTR_FMT ":" PTR_FMT, + val, pb->bytes - (byte *)val->orig_bytes, size); } else if (tag == SUB_BINARY_SUBTAG) { ErlSubBin* Sb = (ErlSubBin *) binary_val(x); - Eterm* real_bin = binary_val(Sb->orig); + Eterm* real_bin; void* val; + /* + * Must use boxed_val() here, because the original + * binary may have been visited and have had its + * header word changed to OUR_NIL (in which case + * binary_val() will cause an assertion failure in + * the DEBUG emulator). + */ + + real_bin = boxed_val(Sb->orig); + if (thing_subtag(*real_bin) == REFC_BINARY_SUBTAG) { + /* + * Unvisited REFC_BINARY: Point directly to + * the binary. + */ ProcBin* pb = (ProcBin *) real_bin; val = pb->val; - } else { /* Heap binary */ + } else { + /* + * Heap binary or visited REFC binary: Point + * to heap binary or ProcBin on the heap. + */ val = real_bin; } - erts_print(to, to_arg, "Ys%X:%X:%X", val, Sb->offs, size); + erts_print(to, to_arg, + "Ys" PTR_FMT ":" PTR_FMT ":" PTR_FMT, + val, Sb->offs, size); } erts_putc(to, to_arg, '\n'); *ptr = OUR_NIL; @@ -438,7 +460,7 @@ dump_binaries(int to, void *to_arg, Binary* current) long size = current->orig_size; byte* bytes = (byte*) current->orig_bytes; - erts_print(to, to_arg, "=binary:%X\n", current); + erts_print(to, to_arg, "=binary:" PTR_FMT "\n", current); erts_print(to, to_arg, "%X:", size); for (i = 0; i < size; i++) { erts_print(to, to_arg, "%02X", bytes[i]); diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c index fa015ee4b9..ff7fdfcfca 100644 --- a/erts/emulator/beam/erl_trace.c +++ b/erts/emulator/beam/erl_trace.c @@ -2184,7 +2184,7 @@ trace_gc(Process *p, Eterm what) AM_bin_old_vheap_block_size }; - Uint values[] = { + UWord values[] = { OLD_HEAP(p) ? OLD_HEND(p) - OLD_HEAP(p) : 0, HEAP_SIZE(p), MBUF_SIZE(p), @@ -2198,7 +2198,7 @@ trace_gc(Process *p, Eterm what) BIN_OLD_VHEAP_SZ(p) }; #define LOCAL_HEAP_SIZE \ - (sizeof(values)/sizeof(Eterm)) * \ + (sizeof(values)/sizeof(*values)) * \ (2/*cons*/ + 3/*2-tuple*/ + BIG_UINT_HEAP_SIZE) + \ 5/*4-tuple */ + TS_HEAP_WORDS DeclareTmpHeap(local_heap,LOCAL_HEAP_SIZE,p); @@ -2206,7 +2206,7 @@ trace_gc(Process *p, Eterm what) Eterm* limit; #endif - ASSERT(sizeof(values)/sizeof(Uint) == sizeof(tags)/sizeof(Eterm)); + ASSERT(sizeof(values)/sizeof(*values) == sizeof(tags)/sizeof(Eterm)); UseTmpHeap(LOCAL_HEAP_SIZE,p); @@ -2214,9 +2214,9 @@ trace_gc(Process *p, Eterm what) hp = local_heap; #ifdef DEBUG size = 0; - (void) erts_bld_atom_uint_2tup_list(NULL, + (void) erts_bld_atom_uword_2tup_list(NULL, &size, - sizeof(values)/sizeof(Uint), + sizeof(values)/sizeof(*values), tags, values); size += 5/*4-tuple*/ + TS_SIZE(p); @@ -2229,9 +2229,9 @@ trace_gc(Process *p, Eterm what) ERTS_TRACE_FLAGS(p)); size = 0; - (void) erts_bld_atom_uint_2tup_list(NULL, + (void) erts_bld_atom_uword_2tup_list(NULL, &size, - sizeof(values)/sizeof(Uint), + sizeof(values)/sizeof(*values), tags, values); size += 5/*4-tuple*/ + TS_SIZE(p); @@ -2244,9 +2244,9 @@ trace_gc(Process *p, Eterm what) ASSERT(size <= LOCAL_HEAP_SIZE); #endif - msg = erts_bld_atom_uint_2tup_list(&hp, + msg = erts_bld_atom_uword_2tup_list(&hp, NULL, - sizeof(values)/sizeof(Uint), + sizeof(values)/sizeof(*values), tags, values); @@ -2415,7 +2415,7 @@ monitor_long_gc(Process *p, Uint time) { am_old_heap_size, am_heap_size }; - Eterm values[] = { + UWord values[] = { time, OLD_HEAP(p) ? OLD_HEND(p) - OLD_HEAP(p) : 0, HEAP_SIZE(p), @@ -2436,9 +2436,9 @@ monitor_long_gc(Process *p, Uint time) { #endif hsz = 0; - (void) erts_bld_atom_uint_2tup_list(NULL, + (void) erts_bld_atom_uword_2tup_list(NULL, &hsz, - sizeof(values)/sizeof(Uint), + sizeof(values)/sizeof(*values), tags, values); hsz += 5 /* 4-tuple */; @@ -2449,9 +2449,9 @@ monitor_long_gc(Process *p, Uint time) { hp_end = hp + hsz; #endif - list = erts_bld_atom_uint_2tup_list(&hp, + list = erts_bld_atom_uword_2tup_list(&hp, NULL, - sizeof(values)/sizeof(Uint), + sizeof(values)/sizeof(*values), tags, values); msg = TUPLE4(hp, am_monitor, p->common.id, am_long_gc, list); @@ -2489,7 +2489,7 @@ monitor_large_heap(Process *p) { am_old_heap_size, am_heap_size }; - Uint values[] = { + UWord values[] = { OLD_HEAP(p) ? OLD_HEND(p) - OLD_HEAP(p) : 0, HEAP_SIZE(p), MBUF_SIZE(p), @@ -2511,9 +2511,9 @@ monitor_large_heap(Process *p) { #endif hsz = 0; - (void) erts_bld_atom_uint_2tup_list(NULL, + (void) erts_bld_atom_uword_2tup_list(NULL, &hsz, - sizeof(values)/sizeof(Uint), + sizeof(values)/sizeof(*values), tags, values); hsz += 5 /* 4-tuple */; @@ -2524,9 +2524,9 @@ monitor_large_heap(Process *p) { hp_end = hp + hsz; #endif - list = erts_bld_atom_uint_2tup_list(&hp, + list = erts_bld_atom_uword_2tup_list(&hp, NULL, - sizeof(values)/sizeof(Uint), + sizeof(values)/sizeof(*values), tags, values); msg = TUPLE4(hp, am_monitor, p->common.id, am_large_heap, list); diff --git a/erts/emulator/beam/erl_unicode.c b/erts/emulator/beam/erl_unicode.c index e00440b905..569c0a7d31 100644 --- a/erts/emulator/beam/erl_unicode.c +++ b/erts/emulator/beam/erl_unicode.c @@ -1476,6 +1476,9 @@ static Eterm do_utf8_to_list_normalize(Process *p, Uint num, byte *bytes, Uint s Uint16 savepoints[4]; int numpoints = 0; + if (num == 0) + return NIL; + ASSERT(num > 0); hp = HAlloc(p,num * 2); /* May be to much */ diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h index 80d29d554a..292d135946 100644 --- a/erts/emulator/beam/erl_utils.h +++ b/erts/emulator/beam/erl_utils.h @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2012. All Rights Reserved. + * Copyright Ericsson AB 2012-2013. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -168,6 +168,10 @@ Eterm erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64); Eterm erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64); Eterm erts_bld_cons(Uint **hpp, Uint *szp, Eterm car, Eterm cdr); Eterm erts_bld_tuple(Uint **hpp, Uint *szp, Uint arity, ...); +#define erts_bld_tuple2(H,S,E1,E2) erts_bld_tuple(H,S,2,E1,E2) +#define erts_bld_tuple3(H,S,E1,E2,E3) erts_bld_tuple(H,S,3,E1,E2,E3) +#define erts_bld_tuple4(H,S,E1,E2,E3,E4) erts_bld_tuple(H,S,4,E1,E2,E3,E4) +#define erts_bld_tuple5(H,S,E1,E2,E3,E4,E5) erts_bld_tuple(H,S,5,E1,E2,E3,E4,E5) Eterm erts_bld_tuplev(Uint **hpp, Uint *szp, Uint arity, Eterm terms[]); Eterm erts_bld_string_n(Uint **hpp, Uint *szp, const char *str, Sint len); #define erts_bld_string(hpp,szp,str) erts_bld_string_n(hpp,szp,str,strlen(str)) @@ -175,8 +179,8 @@ Eterm erts_bld_list(Uint **hpp, Uint *szp, Sint length, Eterm terms[]); Eterm erts_bld_2tup_list(Uint **hpp, Uint *szp, Sint length, Eterm terms1[], Uint terms2[]); Eterm -erts_bld_atom_uint_2tup_list(Uint **hpp, Uint *szp, - Sint length, Eterm atoms[], Uint uints[]); +erts_bld_atom_uword_2tup_list(Uint **hpp, Uint *szp, + Sint length, Eterm atoms[], UWord uints[]); Eterm erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length, Eterm atoms[], Uint uints1[], Uint uints2[]); diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h index c962955de9..337422eead 100644 --- a/erts/emulator/beam/erl_vm.h +++ b/erts/emulator/beam/erl_vm.h @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1996-2012. All Rights Reserved. + * Copyright Ericsson AB 1996-2013. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -80,7 +80,7 @@ # ifdef CHECK_FOR_HOLES # define INIT_HEAP_MEM(p,sz) erts_set_hole_marker(HEAP_TOP(p), (sz)) # else -# define INIT_HEAP_MEM(p,sz) memset(HEAP_TOP(p),DEBUG_BAD_BYTE,(sz)*sizeof(Eterm*)) +# define INIT_HEAP_MEM(p,sz) memset(HEAP_TOP(p),0x01,(sz)*sizeof(Eterm*)) # endif #else # define INIT_HEAP_MEM(p,sz) ((void)0) @@ -98,7 +98,7 @@ * failing that, in a heap fragment. */ #define HAllocX(p, sz, xtra) \ - (ASSERT_EXPR((sz) >= 0), \ + (ASSERT((sz) >= 0), \ ErtsHAllocLockCheck(p), \ (IS_FORCE_HEAP_FRAGS || (((HEAP_LIMIT(p) - HEAP_TOP(p)) < (sz))) \ ? erts_heap_alloc((p),(sz),(xtra)) \ @@ -135,14 +135,14 @@ */ #ifdef CHECK_FOR_HOLES # define HeapOnlyAlloc(p, sz) \ - (ASSERT_EXPR((sz) >= 0), \ - (ASSERT_EXPR(((HEAP_LIMIT(p) - HEAP_TOP(p)) >= (sz))), \ + (ASSERT((sz) >= 0), \ + (ASSERT(((HEAP_LIMIT(p) - HEAP_TOP(p)) >= (sz))), \ (erts_set_hole_marker(HEAP_TOP(p), (sz)), \ (HEAP_TOP(p) = HEAP_TOP(p) + (sz), HEAP_TOP(p) - (sz))))) #else # define HeapOnlyAlloc(p, sz) \ - (ASSERT_EXPR((sz) >= 0), \ - (ASSERT_EXPR(((HEAP_LIMIT(p) - HEAP_TOP(p)) >= (sz))), \ + (ASSERT((sz) >= 0), \ + (ASSERT(((HEAP_LIMIT(p) - HEAP_TOP(p)) >= (sz))), \ (HEAP_TOP(p) = HEAP_TOP(p) + (sz), HEAP_TOP(p) - (sz)))) #endif diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c index 4600d691c7..2cb44a5b64 100644 --- a/erts/emulator/beam/external.c +++ b/erts/emulator/beam/external.c @@ -3330,7 +3330,7 @@ dec_term_atom_common: n = get_int32(ep); ep += 4; - if (n <= ERL_ONHEAP_BIN_LIMIT) { + if ((unsigned)n <= ERL_ONHEAP_BIN_LIMIT) { ErlHeapBin* hb = (ErlHeapBin *) hp; hb->thing_word = header_heap_bin(n); @@ -3379,8 +3379,10 @@ dec_term_atom_common: n = get_int32(ep); bitsize = ep[4]; - ep += 5; - if (n <= ERL_ONHEAP_BIN_LIMIT) { + if (((bitsize==0) != (n==0)) || bitsize > 8) + goto error; + ep += 5; + if ((unsigned)n <= ERL_ONHEAP_BIN_LIMIT) { ErlHeapBin* hb = (ErlHeapBin *) hp; hb->thing_word = header_heap_bin(n); @@ -3423,10 +3425,10 @@ dec_term_atom_common: n = pb->size; } - if (bitsize == 0) { + if (bitsize == 8 || n == 0) { *objp = bin; } else { - sb = (ErlSubBin *) hp; + sb = (ErlSubBin *)hp; sb->thing_word = HEADER_SUB_BIN; sb->orig = bin; sb->size = n - 1; diff --git a/erts/emulator/beam/external.h b/erts/emulator/beam/external.h index 9afdb52329..83001b2c7e 100644 --- a/erts/emulator/beam/external.h +++ b/erts/emulator/beam/external.h @@ -138,8 +138,8 @@ typedef struct { #define ERTS_DIST_EXT_SIZE(EDEP) \ (sizeof(ErtsDistExternal) \ - (((EDEP)->flags & ERTS_DIST_EXT_ATOM_TRANS_TAB) \ - ? (ASSERT_EXPR(0 <= (EDEP)->attab.size \ - && (EDEP)->attab.size <= ERTS_ATOM_CACHE_SIZE), \ + ? (ASSERT(0 <= (EDEP)->attab.size \ + && (EDEP)->attab.size <= ERTS_ATOM_CACHE_SIZE), \ sizeof(Eterm)*(ERTS_ATOM_CACHE_SIZE - (EDEP)->attab.size)) \ : sizeof(ErtsAtomTranslationTable))) diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index 6b720b53d8..bfab25d2bd 100755 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -870,13 +870,13 @@ Eterm store_external_or_ref_in_proc_(Process *, Eterm); Eterm store_external_or_ref_(Uint **, ErlOffHeap*, Eterm); #define NC_HEAP_SIZE(NC) \ - (ASSERT_EXPR(is_node_container((NC))), \ + (ASSERT(is_node_container((NC))), \ IS_CONST((NC)) ? 0 : (thing_arityval(*boxed_val((NC))) + 1)) #define STORE_NC(Hpp, ETpp, NC) \ - (ASSERT_EXPR(is_node_container((NC))), \ + (ASSERT(is_node_container((NC))), \ IS_CONST((NC)) ? (NC) : store_external_or_ref_((Hpp), (ETpp), (NC))) #define STORE_NC_IN_PROC(Pp, NC) \ - (ASSERT_EXPR(is_node_container((NC))), \ + (ASSERT(is_node_container((NC))), \ IS_CONST((NC)) ? (NC) : store_external_or_ref_in_proc_((Pp), (NC))) /* duplicates from big.h */ diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c index c1e66b59af..9076bbe73c 100644 --- a/erts/emulator/beam/io.c +++ b/erts/emulator/beam/io.c @@ -1330,7 +1330,7 @@ force_imm_drv_call(ErtsTryImmDrvCallState *sp) erts_aint32_t invalid_state; Port *prt = sp->port; - ASSERT(ERTS_IS_CRASH_DUMPING) + ASSERT(ERTS_IS_CRASH_DUMPING); ASSERT(is_atom(sp->port_op)); invalid_state = sp->state; @@ -3601,6 +3601,8 @@ erts_deliver_port_exit(Port *p, Eterm from, Eterm reason, int send_closed) if (send_closed) set_state_flags |= ERTS_PORT_SFLG_SEND_CLOSED; + erts_port_task_sched_enter_exiting_state(&p->sched); + state = erts_atomic32_read_bor_mb(&p->state, set_state_flags); state |= set_state_flags; @@ -4078,7 +4080,7 @@ erts_port_control(Process* c_p, copy = 1; else { binp = ((ProcBin *) ebinp)->val; - ASSERT(bufp < bufp + size); + ASSERT(bufp <= bufp + size); ASSERT(binp->orig_bytes <= bufp && bufp + size <= binp->orig_bytes + binp->orig_size); erts_refc_inc(&binp->refc, 1); diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h index 0f4169c81d..d0ccf8a2fc 100644 --- a/erts/emulator/beam/sys.h +++ b/erts/emulator/beam/sys.h @@ -149,19 +149,33 @@ typedef ERTS_SYS_FD_TYPE ErtsSysFdType; # define ERTS_EXIT_AFTER_DUMP exit #endif +/* In VC++, noreturn is a declspec that has to be before the types, + * but in GNUC it is an att ribute to be placed between return type + * and function name, hence __decl_noreturn <types> __noreturn <function name> + */ +#if __GNUC__ +# define __decl_noreturn +# define __noreturn __attribute__((noreturn)) +#else +# if defined(__WIN32__) && defined(_MSC_VER) +# define __noreturn +# define __decl_noreturn __declspec(noreturn) +# else +# define __noreturn +# define __decl_noreturn +# endif +#endif + +#define ERTS_ASSERT(e) \ + ((void) ((e) ? 1 : (erl_assert_error(#e, __func__, __FILE__, __LINE__), 0))) + +__decl_noreturn void __noreturn erl_assert_error(const char* expr, const char *func, + const char* file, int line); + #ifdef DEBUG -# define ASSERT(e) \ - if (e) { \ - ; \ - } else { \ - erl_assert_error(#e, __FILE__, __LINE__); \ - } -# define ASSERT_EXPR(e) \ - ((void) ((e) ? 1 : (erl_assert_error(#e, __FILE__, __LINE__), 0))) -void erl_assert_error(char* expr, char* file, int line); +# define ASSERT(e) ERTS_ASSERT(e) #else -# define ASSERT(e) -# define ASSERT_EXPR(e) ((void) 1) +# define ASSERT(e) ((void) 1) #endif /* @@ -197,23 +211,6 @@ void erl_assert_error(char* expr, char* file, int line); # define erts_align_attribute(SZ) #endif -/* In VC++, noreturn is a declspec that has to be before the types, - * but in GNUC it is an att ribute to be placed between return type - * and function name, hence __decl_noreturn <types> __noreturn <function name> - */ -#if __GNUC__ -# define __decl_noreturn -# define __noreturn __attribute__((noreturn)) -#else -# if defined(__WIN32__) && defined(_MSC_VER) -# define __noreturn -# define __decl_noreturn __declspec(noreturn) -# else -# define __noreturn -# define __decl_noreturn -# endif -#endif - /* ** Data types: ** @@ -761,6 +758,8 @@ int erts_sys_getenv(char *key, char *value, size_t *size); int erts_sys_getenv_raw(char *key, char *value, size_t *size); /* erts_sys_getenv__() is only allowed to be used in early init phase */ int erts_sys_getenv__(char *key, char *value, size_t *size); +/* erst_sys_unsetenv() returns 0 on success and a value != 0 on failure. */ +int erts_sys_unsetenv(char *key); /* Easier to use, but not as efficient, environment functions */ char *erts_read_env(char *key); @@ -1024,6 +1023,9 @@ void erl_bin_write(unsigned char *, int, int); #define ERTS_SMALL_ABS(Small) labs(Small) #endif +#ifndef ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC +# define ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC 0 +#endif #ifdef __WIN32__ void call_break_handler(void); diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c index a5b2cc589c..5a889f7fce 100644 --- a/erts/emulator/beam/utils.c +++ b/erts/emulator/beam/utils.c @@ -576,8 +576,8 @@ erts_bld_2tup_list(Uint **hpp, Uint *szp, } Eterm -erts_bld_atom_uint_2tup_list(Uint **hpp, Uint *szp, - Sint length, Eterm atoms[], Uint uints[]) +erts_bld_atom_uword_2tup_list(Uint **hpp, Uint *szp, + Sint length, Eterm atoms[], UWord uints[]) { Sint i; Eterm res = THE_NON_VALUE; diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c index 60db50e80a..45fac69303 100644 --- a/erts/emulator/drivers/common/inet_drv.c +++ b/erts/emulator/drivers/common/inet_drv.c @@ -417,13 +417,44 @@ static unsigned long one_value = 1; # define sctp_adaptation_layer_event sctp_adaption_layer_event #endif -#ifdef __GNUC__ +#if defined(__GNUC__) && defined(HAVE_SCTP_BINDX) static typeof(sctp_bindx) *p_sctp_bindx = NULL; +#else +static int (*p_sctp_bindx) + (int sd, struct sockaddr *addrs, int addrcnt, int flags) = NULL; +#endif + +#if defined(__GNUC__) && defined(HAVE_SCTP_PEELOFF) static typeof(sctp_peeloff) *p_sctp_peeloff = NULL; #else -static int (*p_sctp_bindx)(int sd, struct sockaddr *addrs, - int addrcnt, int flags) = NULL; -static int (*p_sctp_peeloff)(int sd, sctp_assoc_t assoc_id) = NULL; +static int (*p_sctp_peeloff) + (int sd, sctp_assoc_t assoc_id) = NULL; +#endif + +#if defined(__GNUC__) && defined(HAVE_SCTP_GETLADDRS) +static typeof(sctp_getladdrs) *p_sctp_getladdrs = NULL; +#else +static int (*p_sctp_getladdrs) + (int sd, sctp_assoc_t assoc_id, struct sockaddr **ss) = NULL; +#endif + +#if defined(__GNUC__) && defined(HAVE_SCTP_FREELADDRS) +static typeof(sctp_freeladdrs) *p_sctp_freeladdrs = NULL; +#else +static void (*p_sctp_freeladdrs)(struct sockaddr *addrs) = NULL; +#endif + +#if defined(__GNUC__) && defined(HAVE_SCTP_GETPADDRS) +static typeof(sctp_getpaddrs) *p_sctp_getpaddrs = NULL; +#else +static int (*p_sctp_getpaddrs) + (int sd, sctp_assoc_t assoc_id, struct sockaddr **ss) = NULL; +#endif + +#if defined(__GNUC__) && defined(HAVE_SCTP_FREEPADDRS) +static typeof(sctp_freepaddrs) *p_sctp_freepaddrs = NULL; +#else +static void (*p_sctp_freepaddrs)(struct sockaddr *addrs) = NULL; #endif #endif /* #if defined(HAVE_SCTP_H) */ @@ -593,7 +624,7 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n) #define INET_F_BUSY 0x0080 #define INET_F_MULTI_CLIENT 0x0100 /* Multiple clients for one descriptor, i.e. multi-accept */ -/* One numberspace for *_REC_* so if an e.g UDP request is issued +/* One numberspace for *_REQ_* so if an e.g UDP request is issued ** for a TCP socket, the driver can protest. */ #define INET_REQ_OPEN 1 @@ -624,6 +655,8 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n) #define INET_REQ_ACCEPT 26 #define INET_REQ_LISTEN 27 #define INET_REQ_IGNOREFD 28 +#define INET_REQ_GETLADDRS 29 +#define INET_REQ_GETPADDRS 30 /* TCP requests */ /* #define TCP_REQ_ACCEPT 40 MOVED */ @@ -1417,8 +1450,7 @@ static int load_ip_address(ErlDrvTermData* spec, int i, int family, char* buf) #ifdef HAVE_SCTP /* For SCTP, we often need to return {IP, Port} tuples: */ -static int inet_get_address - (int family, char* dst, inet_address* src, unsigned int* len); +static int inet_get_address(char* dst, inet_address* src, unsigned int* len); #define LOAD_IP_AND_PORT_CNT \ (8*LOAD_INT_CNT + LOAD_TUPLE_CNT + LOAD_INT_CNT + LOAD_TUPLE_CNT) @@ -1433,8 +1465,7 @@ static int load_ip_and_port unsigned int len = sizeof(struct sockaddr_storage); unsigned int alen = len; char abuf [len]; - int res = - inet_get_address(desc->sfamily, abuf, (inet_address*) addr, &alen); + int res = inet_get_address(abuf, (inet_address*) addr, &alen); ASSERT(res==0); res = 0; /* Now "abuf" contains: Family(1b), Port(2b), IP(4|16b) */ @@ -3658,9 +3689,27 @@ static int inet_init() /* Check the size of SCTP AssocID -- currently both this driver and the Erlang part require 32 bit: */ ASSERT(sizeof(sctp_assoc_t)==ASSOC_ID_LEN); -# if defined(HAVE_SCTP_BINDX) && defined (HAVE_SCTP_PEELOFF) +# if defined(HAVE_SCTP_BINDX) p_sctp_bindx = sctp_bindx; +# if defined(HAVE_SCTP_PEELOFF) p_sctp_peeloff = sctp_peeloff; +# else + p_sctp_peeloff = NULL; +# endif +# if defined(HAVE_SCTP_GETLADDRS) && defined(HAVE_SCTP_FREELADDRS) + p_sctp_getladdrs = sctp_getladdrs; + p_sctp_freeladdrs = sctp_freeladdrs; +# else + p_sctp_getladdrs = NULL; + p_sctp_freeladdrs = NULL; +# endif +# if defined(HAVE_SCTP_GETPADDRS) && defined(HAVE_SCTP_FREEPADDRS) + p_sctp_getpaddrs = sctp_getpaddrs; + p_sctp_freepaddrs = sctp_freepaddrs; +# else + p_sctp_getpaddrs = NULL; + p_sctp_freepaddrs = NULL; +# endif inet_init_sctp(); add_driver_entry(&sctp_inet_driver_entry); # else @@ -3675,12 +3724,36 @@ static int inet_init() void *ptr; if (erts_sys_ddll_sym(h_libsctp, "sctp_bindx", &ptr) == 0) { p_sctp_bindx = ptr; - inet_init_sctp(); - add_driver_entry(&sctp_inet_driver_entry); if (erts_sys_ddll_sym(h_libsctp, "sctp_peeloff", &ptr) == 0) { p_sctp_peeloff = ptr; } + else p_sctp_peeloff = NULL; + if (erts_sys_ddll_sym(h_libsctp, "sctp_getladdrs", &ptr) == 0) { + p_sctp_getladdrs = ptr; + } + else p_sctp_getladdrs = NULL; + if (erts_sys_ddll_sym(h_libsctp, "sctp_freeladdrs", &ptr) == 0) { + p_sctp_freeladdrs = ptr; + } + else { + p_sctp_freeladdrs = NULL; + p_sctp_getladdrs = NULL; + } + if (erts_sys_ddll_sym(h_libsctp, "sctp_getpaddrs", &ptr) == 0) { + p_sctp_getpaddrs = ptr; + } + else p_sctp_getpaddrs = NULL; + if (erts_sys_ddll_sym(h_libsctp, "sctp_freepaddrs", &ptr) == 0) { + p_sctp_freepaddrs = ptr; + } + else { + p_sctp_freepaddrs = NULL; + p_sctp_getpaddrs = NULL; + } + inet_init_sctp(); + add_driver_entry(&sctp_inet_driver_entry); } + else p_sctp_bindx = NULL; } } # endif @@ -3835,10 +3908,12 @@ static char *inet_set_faddress(int family, inet_address* dst, ** and *len is the length of dst on return ** (suitable to deliver to erlang) */ -static int inet_get_address(int family, char* dst, inet_address* src, unsigned int* len) +static int inet_get_address(char* dst, inet_address* src, unsigned int* len) { + int family; short port; + family = src->sa.sa_family; if ((family == AF_INET) && (*len >= sizeof(struct sockaddr_in))) { dst[0] = INET_AF_INET; port = sock_ntohs(src->sai.sin_port); @@ -3860,6 +3935,75 @@ static int inet_get_address(int family, char* dst, inet_address* src, unsigned i return -1; } +/* Same as the above, but take family from the address structure, +** and advance the address pointer to the next address +** according to the size of the current, +** and return the resulting encoded size +*/ +static int inet_address_to_erlang(char *dst, inet_address **src) { + short port; + + switch ((*src)->sa.sa_family) { + case AF_INET: + if (dst) { + dst[0] = INET_AF_INET; + port = sock_ntohs((*src)->sai.sin_port); + put_int16(port, dst+1); + sys_memcpy(dst+1+2, (char *) &(*src)->sai.sin_addr, 4); + } + (*src) = (inet_address *) (&(*src)->sai + 1); + return 1 + 2 + 4; +#if defined(HAVE_IN6) && defined(AF_INET6) + case AF_INET6: + if (dst) { + dst[0] = INET_AF_INET6; + port = sock_ntohs((*src)->sai6.sin6_port); + put_int16(port, dst+1); + VALGRIND_MAKE_MEM_DEFINED(&(*src)->sai6.sin6_addr,16); /* false undefs from syscall sctp_get[lp]addrs */ + sys_memcpy(dst+1+2, (char *) &(*src)->sai6.sin6_addr, 16); + } + (*src) = (inet_address *) (&(*src)->sai6 + 1); + return 1 + 2 + 16; +#endif + default: + return -1; + } +} + +/* Encode n encoded addresses from addrs in the result buffer +*/ +static ErlDrvSizeT reply_inet_addrs +(int n, inet_address *addrs, char **rbuf, ErlDrvSizeT rsize) { + inet_address *ia; + int i, s; + ErlDrvSizeT rlen; + + if (IS_SOCKET_ERROR(n)) return ctl_error(sock_errno(), rbuf, rsize); + if (n == 0) return ctl_reply(INET_REP_OK, NULL, 0, rbuf, rsize); + + /* Calculate result length */ + rlen = 1; + ia = addrs; + for (i = 0; i < n; i++) { + s = inet_address_to_erlang(NULL, &ia); + if (s < 0) break; + rlen += s; + } + + if (rlen > rsize) (*rbuf) = ALLOC(rlen); + + (*rbuf)[0] = INET_REP_OK; + rlen = 1; + ia = addrs; + for (i = 0; i < n; i++) { + s = inet_address_to_erlang((*rbuf)+rlen, &ia); + if (s < 0) break; + rlen += s; + } + + return rlen; +} + static void desc_close(inet_descriptor* desc) { if (desc->s != INVALID_SOCKET) { @@ -4433,7 +4577,7 @@ static ErlDrvSSizeT inet_ctl_getiflist(inet_descriptor* desc, case AF_INET6: #endif case AF_INET: - ASSERT(sp+IFNAMSIZ+1 < sbuf+ifc.ifc_len+1) + ASSERT(sp+IFNAMSIZ+1 < sbuf+ifc.ifc_len+1); strncpy(sp, ifrp->ifr_name, IFNAMSIZ); sp[IFNAMSIZ] = '\0'; sp += strlen(sp), ++sp; @@ -7879,6 +8023,39 @@ static ErlDrvSSizeT inet_ctl(inet_descriptor* desc, int cmd, char* buf, return ctl_reply(INET_REP_OK, tbuf, strlen(tbuf), rbuf, rsize); } + case INET_REQ_GETPADDRS: { + DEBUGF(("inet_ctl(%ld): INET_GETPADDRS\r\n", (long)desc->port)); + + if (len != 4) return ctl_error(EINVAL, rbuf, rsize); + + if (! IS_OPEN(desc)) return ctl_xerror(EXBADPORT, rbuf, rsize); + if (! IS_BOUND(desc)) return ctl_xerror(EXBADSEQ, rbuf, rsize); + +#ifdef HAVE_SCTP + if (IS_SCTP(desc) && p_sctp_getpaddrs) { + struct sockaddr *sa; + Uint32 assoc_id; + int n; + ErlDrvSizeT rlen; + + assoc_id = get_int32(buf); + n = p_sctp_getpaddrs(desc->s, assoc_id, &sa); + rlen = reply_inet_addrs(n, (inet_address *) sa, rbuf, rsize); + if (n > 0) p_sctp_freepaddrs(sa); + return rlen; + } +#endif + { /* Fallback to sock_peer */ + inet_address addr; + unsigned int sz; + int i; + + sz = sizeof(addr); + i = sock_peer(desc->s, (struct sockaddr *) &addr, &sz); + return reply_inet_addrs(i >= 0 ? 1 : i, &addr, rbuf, rsize); + } + } + case INET_REQ_PEER: { /* get peername */ char tbuf[sizeof(inet_address)]; inet_address peer; @@ -7894,7 +8071,7 @@ static ErlDrvSSizeT inet_ctl(inet_descriptor* desc, int cmd, char* buf, if (IS_SOCKET_ERROR(sock_peer(desc->s, (struct sockaddr*)ptr,&sz))) return ctl_error(sock_errno(), rbuf, rsize); } - if (inet_get_address(desc->sfamily, tbuf, ptr, &sz) < 0) + if (inet_get_address(tbuf, ptr, &sz) < 0) return ctl_error(EINVAL, rbuf, rsize); return ctl_reply(INET_REP_OK, tbuf, sz, rbuf, rsize); } @@ -7915,6 +8092,39 @@ static ErlDrvSSizeT inet_ctl(inet_descriptor* desc, int cmd, char* buf, } } + case INET_REQ_GETLADDRS: { + DEBUGF(("inet_ctl(%ld): INET_GETLADDRS\r\n", (long)desc->port)); + + if (len != 4) return ctl_error(EINVAL, rbuf, rsize); + + if (! IS_OPEN(desc)) return ctl_xerror(EXBADPORT, rbuf, rsize); + if (! IS_BOUND(desc)) return ctl_xerror(EXBADSEQ, rbuf, rsize); + +#ifdef HAVE_SCTP + if (IS_SCTP(desc) && p_sctp_getladdrs) { + struct sockaddr *sa; + Uint32 assoc_id; + int n; + ErlDrvSizeT rlen; + + assoc_id = get_int32(buf); + n = p_sctp_getladdrs(desc->s, assoc_id, &sa); + rlen = reply_inet_addrs(n, (inet_address *) sa, rbuf, rsize); + if (n > 0) p_sctp_freeladdrs(sa); + return rlen; + } +#endif + { /* Fallback to sock_name */ + inet_address addr; + unsigned int sz; + int i; + + sz = sizeof(addr); + i = sock_name(desc->s, (struct sockaddr *) &addr, &sz); + return reply_inet_addrs(i >= 0 ? 1 : i, &addr, rbuf, rsize); + } + } + case INET_REQ_NAME: { /* get sockname */ char tbuf[sizeof(inet_address)]; inet_address name; @@ -7931,7 +8141,7 @@ static ErlDrvSSizeT inet_ctl(inet_descriptor* desc, int cmd, char* buf, if (IS_SOCKET_ERROR(sock_name(desc->s, (struct sockaddr*)ptr, &sz))) return ctl_error(sock_errno(), rbuf, rsize); } - if (inet_get_address(desc->sfamily, tbuf, ptr, &sz) < 0) + if (inet_get_address(tbuf, ptr, &sz) < 0) return ctl_error(EINVAL, rbuf, rsize); return ctl_reply(INET_REP_OK, tbuf, sz, rbuf, rsize); } @@ -10770,7 +10980,7 @@ static int packet_inet_input(udp_descriptor* udesc, HANDLE event) inet_input_count(desc, n); udesc->i_ptr += n; - inet_get_address(desc->sfamily, abuf, &other, &len); + inet_get_address(abuf, &other, &len); /* Copy formatted address to the buffer allocated; "len" is the actual length which must be <= than the original reserved. This means that the addr + data in the buffer are contiguous, diff --git a/erts/emulator/drivers/unix/ttsl_drv.c b/erts/emulator/drivers/unix/ttsl_drv.c index 1e436830e7..491e0a090e 100644 --- a/erts/emulator/drivers/unix/ttsl_drv.c +++ b/erts/emulator/drivers/unix/ttsl_drv.c @@ -745,7 +745,7 @@ static Sint16 get_sint16(char *s) { return ((*s << 8) | ((byte*)s)[1]); } - + static int start_lbuf(void) { if (!lbuf && !(lbuf = ( Uint32*) driver_alloc(lbuf_size * sizeof(Uint32)))) @@ -1091,7 +1091,7 @@ static int move_cursor(int from, int to) move_left(-dc); return TRUE; } - + static int start_termcap(void) { int eres; @@ -1187,7 +1187,7 @@ static int move_down(int n) tputs(down, 1, outc); return TRUE; } - + /* * Updates cols if terminal has resized (SIGWINCH). Should be called @@ -1209,7 +1209,7 @@ static void update_cols(void) cols = width; } } - + /* * Put a terminal device into non-canonical mode with ECHO off. diff --git a/erts/emulator/drivers/win32/win_efile.c b/erts/emulator/drivers/win32/win_efile.c index b36a103f8e..319065f57b 100644 --- a/erts/emulator/drivers/win32/win_efile.c +++ b/erts/emulator/drivers/win32/win_efile.c @@ -1216,7 +1216,7 @@ int flags; return 1; } - + /* * is_root_unc_name - returns TRUE if the argument is a UNC name specifying * a root share. That is, if it is of the form \\server\share\. diff --git a/erts/emulator/drivers/win32/winsock_func.h b/erts/emulator/drivers/win32/winsock_func.h deleted file mode 100644 index 9d2c099c4d..0000000000 --- a/erts/emulator/drivers/win32/winsock_func.h +++ /dev/null @@ -1,102 +0,0 @@ -/* - * %CopyrightBegin% - * - * Copyright Ericsson AB 1997-2009. All Rights Reserved. - * - * The contents of this file are subject to the Erlang Public License, - * Version 1.1, (the "License"); you may not use this file except in - * compliance with the License. You should have received a copy of the - * Erlang Public License along with this software. If not, it can be - * retrieved online at http://www.erlang.org/. - * - * Software distributed under the License is distributed on an "AS IS" - * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See - * the License for the specific language governing rights and limitations - * under the License. - * - * %CopyrightEnd% - */ - -typedef struct _WinSockFuncs { - int (WSAAPI *WSAStartup)(WORD wVersionRequired, LPWSADATA lpWSAData); - int (WSAAPI *WSACleanup)(void); - int (WSAAPI *WSAGetLastError)(void); - DWORD (WSAAPI *WSAWaitForMultipleEvents) (DWORD cEvents, - const WSAEVENT FAR * lphEvents, - BOOL fWaitAll, - DWORD dwTimeout, - BOOL fAlertable); - WSAEVENT (WSAAPI *WSACreateEvent)(void); - BOOL (WSAAPI *WSACloseEvent)(WSAEVENT hEvent); - - BOOL (WSAAPI *WSASetEvent)(WSAEVENT hEvent); - BOOL (WSAAPI *WSAResetEvent)(WSAEVENT hEvent); - int (WSAAPI *WSAEventSelect)(SOCKET s, WSAEVENT hEventObject, - long lNetworkEvents); - int (WSAAPI *WSAEnumNetworkEvents)(SOCKET s, - WSAEVENT hEventObject, - LPWSANETWORKEVENTS lpNetworkEvents); - int (WSAAPI *WSAIoctl)(SOCKET s, - DWORD dwIoControlCode, - LPVOID lpvInBuffer, - DWORD cbInBuffer, - LPVOID lpvOUTBuffer, - DWORD cbOUTBuffer, - LPDWORD lpcbBytesReturned, - LPWSAOVERLAPPED lpOverlapped, - LPWSAOVERLAPPED_COMPLETION_ROUTINE lpCompletionROUTINE - ); - SOCKET (WSAAPI *accept)(SOCKET s, struct sockaddr FAR *addr, - int FAR *addrlen); - int (WSAAPI *bind)(SOCKET s, const struct sockaddr FAR *addr, - int namelen); - int (WSAAPI *closesocket)(SOCKET s); - int (WSAAPI *connect)(SOCKET s, const struct sockaddr FAR *name, - int namelen); - int (WSAAPI *ioctlsocket)(SOCKET s, long cmd, u_long FAR *argp); - int (WSAAPI *getsockopt)(SOCKET s, int level, int optname, - char FAR * optval, int FAR *optlen); - u_long (WSAAPI *htonl)(u_long hostlong); - u_short (WSAAPI *htons)(u_short hostshort); - unsigned long (WSAAPI *inet_addr)(const char FAR * cp); - char FAR * (WSAAPI *inet_ntoa)(struct in_addr in); - int (WSAAPI *listen)(SOCKET s, int backlog); - u_short (WSAAPI *ntohs)(u_short netshort); - int (WSAAPI *recv)(SOCKET s, char FAR * buf, int len, int flags); - int (WSAAPI *send)(SOCKET s, const char FAR * buf, int len, int flags); - int (WSAAPI *setsockopt)(SOCKET s, int level, int optname, - const char FAR * optval, int optlen); - int (WSAAPI *shutdown)(SOCKET s, int how); - SOCKET (WSAAPI *socket)(int af, int type, int protocol); - struct hostent FAR * (WSAAPI *gethostbyname)(const char FAR * name); - struct hostent FAR * (WSAAPI *gethostbyaddr)(const char FAR *addr, - int addrlen, int addrtype); - int (WSAAPI *gethostname)(char FAR * name, int namelen); - struct servent FAR * (WSAAPI *getservbyname)(const char FAR * name, - const char FAR * proto); - struct servent FAR * (WSAAPI *getservbyport)(int port, - const char FAR * proto); - int (WSAAPI *getsockname)(SOCKET sock, struct sockaddr FAR *name, - int FAR *namelen); - - /* - * New, added for inet_drv. - */ - - int (WSAAPI *getpeername)(SOCKET s, struct sockaddr FAR * name, - int FAR * namelen); - u_long (WSAAPI *ntohl)(u_long netlong); - int (WSAAPI *WSASend)(SOCKET s, LPWSABUF lpBuffers, DWORD dwBufferCount, - LPDWORD lpNumberOfBytesSent, DWORD dwFlags, - LPWSAOVERLAPPED lpOverlapped, - LPWSAOVERLAPPED_COMPLETION_ROUTINE lpCompletionRoutine); - int (WSAAPI *sendto)(SOCKET s, const char FAR * buf, int len, - int flags, const struct sockaddr FAR * to, int tolen); - int (WSAAPI *recvfrom)(SOCKET s, char FAR * buf, int len, int flags, - struct sockaddr FAR * from, int FAR * fromlen); -} WinSockFuncs; - - -extern WinSockFuncs winSock; - -extern int tcp_lookup_functions(void); diff --git a/erts/emulator/sys/common/erl_mmap.c b/erts/emulator/sys/common/erl_mmap.c new file mode 100644 index 0000000000..3f6813e1a5 --- /dev/null +++ b/erts/emulator/sys/common/erl_mmap.c @@ -0,0 +1,2832 @@ +/* + * %CopyrightBegin% + * + * Copyright Ericsson AB 2002-2013. All Rights Reserved. + * + * The contents of this file are subject to the Erlang Public License, + * Version 1.1, (the "License"); you may not use this file except in + * compliance with the License. You should have received a copy of the + * Erlang Public License along with this software. If not, it can be + * retrieved online at http://www.erlang.org/. + * + * Software distributed under the License is distributed on an "AS IS" + * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + * the License for the specific language governing rights and limitations + * under the License. + * + * %CopyrightEnd% + */ +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif + +#include "sys.h" +#include "erl_process.h" +#include "erl_smp.h" +#include "atom.h" +#include "erl_mmap.h" +#include <stddef.h> + +/* #define ERTS_MMAP_OP_RINGBUF_SZ 100 */ + +#if defined(DEBUG) || 0 +# undef ERTS_MMAP_DEBUG +# define ERTS_MMAP_DEBUG +# ifndef ERTS_MMAP_OP_RINGBUF_SZ +# define ERTS_MMAP_OP_RINGBUF_SZ 100 +# endif +#endif + +#ifndef ERTS_MMAP_OP_RINGBUF_SZ +# define ERTS_MMAP_OP_RINGBUF_SZ 0 +#endif + +/* #define ERTS_MMAP_DEBUG_FILL_AREAS */ + +#ifdef ERTS_MMAP_DEBUG +# define ERTS_MMAP_ASSERT ERTS_ASSERT +#else +# define ERTS_MMAP_ASSERT(A) ((void) 1) +#endif + +/* + * `mmap_state.sa.bot` and `mmap_state.sua.top` are read only after + * initialization, but the other pointers are not; i.e., only + * ERTS_MMAP_IN_SUPERCARRIER() is allowed without the mutex held. + */ +#define ERTS_MMAP_IN_SUPERCARRIER(PTR) \ + (((UWord) (PTR)) - ((UWord) mmap_state.sa.bot) \ + < ((UWord) mmap_state.sua.top) - ((UWord) mmap_state.sa.bot)) +#define ERTS_MMAP_IN_SUPERALIGNED_AREA(PTR) \ + (ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&mmap_state.mtx)), \ + (((UWord) (PTR)) - ((UWord) mmap_state.sa.bot) \ + < ((UWord) mmap_state.sa.top) - ((UWord) mmap_state.sa.bot))) +#define ERTS_MMAP_IN_SUPERUNALIGNED_AREA(PTR) \ + (ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&mmap_state.mtx)), \ + (((UWord) (PTR)) - ((UWord) mmap_state.sua.bot) \ + < ((UWord) mmap_state.sua.top) - ((UWord) mmap_state.sua.bot))) + +int erts_have_erts_mmap; +UWord erts_page_inv_mask; + +#if defined(DEBUG) || defined(ERTS_MMAP_DEBUG) +# undef RBT_DEBUG +# define RBT_DEBUG +#endif +#ifdef RBT_DEBUG +# define RBT_ASSERT ERTS_ASSERT +# define IF_RBT_DEBUG(C) C +#else +# define RBT_ASSERT(x) +# define IF_RBT_DEBUG(C) +#endif + +typedef struct RBTNode_ RBTNode; +struct RBTNode_ { + UWord parent_and_color; /* color in bit 0 of parent ptr */ + RBTNode *left; + RBTNode *right; +}; + +#define RED_FLG (1) +#define IS_RED(N) ((N) && ((N)->parent_and_color & RED_FLG)) +#define IS_BLACK(N) (!IS_RED(N)) +#define SET_RED(N) ((N)->parent_and_color |= RED_FLG) +#define SET_BLACK(N) ((N)->parent_and_color &= ~RED_FLG) + +static ERTS_INLINE RBTNode* parent(RBTNode* node) +{ + return (RBTNode*) (node->parent_and_color & ~RED_FLG); +} + +static ERTS_INLINE void set_parent(RBTNode* node, RBTNode* parent) +{ + RBT_ASSERT(!((UWord)parent & RED_FLG)); + node->parent_and_color = ((UWord)parent) | (node->parent_and_color & RED_FLG); +} + +static ERTS_INLINE UWord parent_and_color(RBTNode* parent, int color) +{ + RBT_ASSERT(!((UWord)parent & RED_FLG)); + RBT_ASSERT(!(color & ~RED_FLG)); + return ((UWord)parent) | color; +} + + +enum SortOrder { + ADDR_ORDER, /* only address order */ + SA_SZ_ADDR_ORDER, /* first super-aligned size then address order */ + SZ_REVERSE_ADDR_ORDER /* first size then reverse address order */ +}; +#ifdef HARD_DEBUG +static const char* sort_order_names[] = {"Address","SuperAlignedSize-Address","Size-RevAddress"}; +#endif + +typedef struct { + RBTNode* root; + enum SortOrder order; +}RBTree; + +#ifdef HARD_DEBUG +# define HARD_CHECK_IS_MEMBER(ROOT,NODE) rbt_assert_is_member(ROOT,NODE) +# define HARD_CHECK_TREE(TREE,SZ) check_tree(TREE, SZ) +static int rbt_assert_is_member(RBTNode* root, RBTNode* node); +static RBTNode* check_tree(RBTree* tree, Uint); +#else +# define HARD_CHECK_IS_MEMBER(ROOT,NODE) +# define HARD_CHECK_TREE(TREE,SZ) +#endif + +#if ERTS_MMAP_OP_RINGBUF_SZ + +static int mmap_op_ix; + +typedef enum { + ERTS_OP_TYPE_NONE, + ERTS_OP_TYPE_MMAP, + ERTS_OP_TYPE_MUNMAP, + ERTS_OP_TYPE_MREMAP +} ErtsMMapOpType; + +typedef struct { + ErtsMMapOpType type; + void *result; + UWord in_size; + UWord out_size; + void *old_ptr; + UWord old_size; +} ErtsMMapOp; + +static ErtsMMapOp mmap_ops[ERTS_MMAP_OP_RINGBUF_SZ]; + +#define ERTS_MMAP_OP_RINGBUF_INIT() \ + do { \ + int ix__; \ + for (ix__ = 0; ix__ < ERTS_MMAP_OP_RINGBUF_SZ; ix__++) {\ + mmap_ops[ix__].type = ERTS_OP_TYPE_NONE; \ + mmap_ops[ix__].result = NULL; \ + mmap_ops[ix__].in_size = 0; \ + mmap_ops[ix__].out_size = 0; \ + mmap_ops[ix__].old_ptr = NULL; \ + mmap_ops[ix__].old_size = 0; \ + } \ + mmap_op_ix = ERTS_MMAP_OP_RINGBUF_SZ-1; \ + } while (0) + +#define ERTS_MMAP_OP_START(SZ) \ + do { \ + int ix__; \ + if (++mmap_op_ix >= ERTS_MMAP_OP_RINGBUF_SZ) \ + mmap_op_ix = 0; \ + ix__ = mmap_op_ix; \ + mmap_ops[ix__].type = ERTS_OP_TYPE_MMAP; \ + mmap_ops[ix__].result = NULL; \ + mmap_ops[ix__].in_size = (SZ); \ + mmap_ops[ix__].out_size = 0; \ + mmap_ops[ix__].old_ptr = NULL; \ + mmap_ops[ix__].old_size = 0; \ + } while (0) + +#define ERTS_MMAP_OP_END(PTR, SZ) \ + do { \ + int ix__ = mmap_op_ix; \ + mmap_ops[ix__].result = (PTR); \ + mmap_ops[ix__].out_size = (SZ); \ + } while (0) + +#define ERTS_MMAP_OP_LCK(RES, IN_SZ, OUT_SZ) \ + do { \ + erts_smp_mtx_lock(&mmap_state.mtx); \ + ERTS_MMAP_OP_START((IN_SZ)); \ + ERTS_MMAP_OP_END((RES), (OUT_SZ)); \ + erts_smp_mtx_unlock(&mmap_state.mtx); \ + } while (0) + +#define ERTS_MUNMAP_OP(PTR, SZ) \ + do { \ + int ix__; \ + if (++mmap_op_ix >= ERTS_MMAP_OP_RINGBUF_SZ) \ + mmap_op_ix = 0; \ + ix__ = mmap_op_ix; \ + mmap_ops[ix__].type = ERTS_OP_TYPE_MUNMAP; \ + mmap_ops[ix__].result = NULL; \ + mmap_ops[ix__].in_size = 0; \ + mmap_ops[ix__].out_size = 0; \ + mmap_ops[ix__].old_ptr = (PTR); \ + mmap_ops[ix__].old_size = (SZ); \ + } while (0) + +#define ERTS_MUNMAP_OP_LCK(PTR, SZ) \ + do { \ + erts_smp_mtx_lock(&mmap_state.mtx); \ + ERTS_MUNMAP_OP((PTR), (SZ)); \ + erts_smp_mtx_unlock(&mmap_state.mtx); \ + } while (0) + +#define ERTS_MREMAP_OP_START(OLD_PTR, OLD_SZ, IN_SZ) \ + do { \ + int ix__; \ + if (++mmap_op_ix >= ERTS_MMAP_OP_RINGBUF_SZ) \ + mmap_op_ix = 0; \ + ix__ = mmap_op_ix; \ + mmap_ops[ix__].type = ERTS_OP_TYPE_MREMAP; \ + mmap_ops[ix__].result = NULL; \ + mmap_ops[ix__].in_size = (IN_SZ); \ + mmap_ops[ix__].out_size = (OLD_SZ); \ + mmap_ops[ix__].old_ptr = (OLD_PTR); \ + mmap_ops[ix__].old_size = (OLD_SZ); \ + } while (0) + +#define ERTS_MREMAP_OP_END(PTR, SZ) \ + do { \ + int ix__ = mmap_op_ix; \ + mmap_ops[ix__].result = (PTR); \ + mmap_ops[mmap_op_ix].out_size = (SZ); \ + } while (0) + +#define ERTS_MREMAP_OP_LCK(RES, OLD_PTR, OLD_SZ, IN_SZ, OUT_SZ) \ + do { \ + erts_smp_mtx_lock(&mmap_state.mtx); \ + ERTS_MREMAP_OP_START((OLD_PTR), (OLD_SZ), (IN_SZ)); \ + ERTS_MREMAP_OP_END((RES), (OUT_SZ)); \ + erts_smp_mtx_unlock(&mmap_state.mtx); \ + } while (0) + +#define ERTS_MMAP_OP_ABORT() \ + do { \ + int ix__ = mmap_op_ix; \ + mmap_ops[ix__].type = ERTS_OP_TYPE_NONE; \ + mmap_ops[ix__].result = NULL; \ + mmap_ops[ix__].in_size = 0; \ + mmap_ops[ix__].out_size = 0; \ + mmap_ops[ix__].old_ptr = NULL; \ + mmap_ops[ix__].old_size = 0; \ + if (--mmap_op_ix < 0) \ + mmap_op_ix = ERTS_MMAP_OP_RINGBUF_SZ-1; \ + } while (0) + +#else + +#define ERTS_MMAP_OP_RINGBUF_INIT() +#define ERTS_MMAP_OP_START(SZ) +#define ERTS_MMAP_OP_END(PTR, SZ) +#define ERTS_MMAP_OP_LCK(RES, IN_SZ, OUT_SZ) +#define ERTS_MUNMAP_OP(PTR, SZ) +#define ERTS_MUNMAP_OP_LCK(PTR, SZ) +#define ERTS_MREMAP_OP_START(OLD_PTR, OLD_SZ, IN_SZ) +#define ERTS_MREMAP_OP_END(PTR, SZ) +#define ERTS_MREMAP_OP_LCK(RES, OLD_PTR, OLD_SZ, IN_SZ, OUT_SZ) +#define ERTS_MMAP_OP_ABORT() + +#endif + +typedef struct { + RBTNode snode; /* node in 'stree' */ + RBTNode anode; /* node in 'atree' */ + char* start; + char* end; +}ErtsFreeSegDesc; + +typedef struct { + RBTree stree; /* size ordered tree */ + RBTree atree; /* address ordered tree */ + Uint nseg; +}ErtsFreeSegMap; + +static struct { + int (*reserve_physical)(char *, UWord); + void (*unreserve_physical)(char *, UWord); + int supercarrier; + int no_os_mmap; + /* + * Super unaligend area is located above super aligned + * area. That is, `sa.bot` is beginning of the super + * carrier, `sua.top` is the end of the super carrier, + * and sa.top and sua.bot moves towards eachother. + */ + struct { + char *top; + char *bot; + ErtsFreeSegMap map; + } sua; + struct { + char *top; + char *bot; + ErtsFreeSegMap map; + } sa; +#if HAVE_MMAP && (!defined(MAP_ANON) && !defined(MAP_ANONYMOUS)) + int mmap_fd; +#endif + erts_smp_mtx_t mtx; + struct { + char *free_list; + char *unused_start; + char *unused_end; + char *new_area_hint; + Uint reserved; + } desc; + struct { + UWord free_seg_descs; + struct { + UWord curr; + UWord max; + } free_segs; + } no; + struct { + struct { + UWord total; + struct { + UWord total; + UWord sa; + UWord sua; + } used; + } supercarrier; + struct { + UWord used; + } os; + } size; +} mmap_state; + +#define ERTS_MMAP_SIZE_SC_SA_INC(SZ) \ + do { \ + mmap_state.size.supercarrier.used.total += (SZ); \ + mmap_state.size.supercarrier.used.sa += (SZ); \ + ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.total \ + <= mmap_state.size.supercarrier.total); \ + ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.sa \ + <= mmap_state.size.supercarrier.used.total); \ + } while (0) +#define ERTS_MMAP_SIZE_SC_SA_DEC(SZ) \ + do { \ + ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.total >= (SZ)); \ + mmap_state.size.supercarrier.used.total -= (SZ); \ + ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.sa >= (SZ)); \ + mmap_state.size.supercarrier.used.sa -= (SZ); \ + } while (0) +#define ERTS_MMAP_SIZE_SC_SUA_INC(SZ) \ + do { \ + mmap_state.size.supercarrier.used.total += (SZ); \ + mmap_state.size.supercarrier.used.sua += (SZ); \ + ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.total \ + <= mmap_state.size.supercarrier.total); \ + ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.sua \ + <= mmap_state.size.supercarrier.used.total); \ + } while (0) +#define ERTS_MMAP_SIZE_SC_SUA_DEC(SZ) \ + do { \ + ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.total >= (SZ)); \ + mmap_state.size.supercarrier.used.total -= (SZ); \ + ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.sua >= (SZ)); \ + mmap_state.size.supercarrier.used.sua -= (SZ); \ + } while (0) +#define ERTS_MMAP_SIZE_OS_INC(SZ) \ + do { \ + ERTS_MMAP_ASSERT(mmap_state.size.os.used + (SZ) >= (SZ)); \ + mmap_state.size.os.used += (SZ); \ + } while (0) +#define ERTS_MMAP_SIZE_OS_DEC(SZ) \ + do { \ + ERTS_MMAP_ASSERT(mmap_state.size.os.used >= (SZ)); \ + mmap_state.size.os.used -= (SZ); \ + } while (0) + + +static void +add_free_desc_area(char *start, char *end) +{ + ERTS_MMAP_ASSERT(end == (void *) 0 || end > start); + if (sizeof(ErtsFreeSegDesc) <= ((UWord) end) - ((UWord) start)) { + UWord no; + ErtsFreeSegDesc *prev_desc, *desc; + char *desc_end; + + no = 1; + prev_desc = (ErtsFreeSegDesc *) start; + prev_desc->start = mmap_state.desc.free_list; + desc = (ErtsFreeSegDesc *) (start + sizeof(ErtsFreeSegDesc)); + desc_end = start + 2*sizeof(ErtsFreeSegDesc); + + while (desc_end <= end) { + desc->start = (char *) prev_desc; + prev_desc = desc; + desc = (ErtsFreeSegDesc *) desc_end; + desc_end += sizeof(ErtsFreeSegDesc); + no++; + } + mmap_state.desc.free_list = (char *) prev_desc; + mmap_state.no.free_seg_descs += no; + } +} + +static ErtsFreeSegDesc * +add_unused_free_desc_area(void) +{ + char *ptr; + if (!mmap_state.desc.unused_start) + return NULL; + + ERTS_MMAP_ASSERT(mmap_state.desc.unused_end); + ERTS_MMAP_ASSERT(ERTS_PAGEALIGNED_SIZE + <= mmap_state.desc.unused_end - mmap_state.desc.unused_start); + + ptr = mmap_state.desc.unused_start + ERTS_PAGEALIGNED_SIZE; + add_free_desc_area(mmap_state.desc.unused_start, ptr); + + if ((mmap_state.desc.unused_end - ptr) >= ERTS_PAGEALIGNED_SIZE) + mmap_state.desc.unused_start = ptr; + else + mmap_state.desc.unused_end = mmap_state.desc.unused_start = NULL; + + ERTS_MMAP_ASSERT(mmap_state.desc.free_list); + return (ErtsFreeSegDesc *) mmap_state.desc.free_list; +} + +static ERTS_INLINE ErtsFreeSegDesc * +alloc_desc(void) +{ + ErtsFreeSegDesc *res; + res = (ErtsFreeSegDesc *) mmap_state.desc.free_list; + if (!res) { + res = add_unused_free_desc_area(); + if (!res) + return NULL; + } + mmap_state.desc.free_list = res->start; + ASSERT(mmap_state.no.free_segs.curr < mmap_state.no.free_seg_descs); + mmap_state.no.free_segs.curr++; + if (mmap_state.no.free_segs.max < mmap_state.no.free_segs.curr) + mmap_state.no.free_segs.max = mmap_state.no.free_segs.curr; + return res; +} + +static ERTS_INLINE void +free_desc(ErtsFreeSegDesc *desc) +{ + desc->start = mmap_state.desc.free_list; + mmap_state.desc.free_list = (char *) desc; + ERTS_MMAP_ASSERT(mmap_state.no.free_segs.curr > 0); + mmap_state.no.free_segs.curr--; +} + +static ERTS_INLINE ErtsFreeSegDesc* anode_to_desc(RBTNode* anode) +{ + return (ErtsFreeSegDesc*) ((char*)anode - offsetof(ErtsFreeSegDesc, anode)); +} + +static ERTS_INLINE ErtsFreeSegDesc* snode_to_desc(RBTNode* snode) +{ + return (ErtsFreeSegDesc*) ((char*)snode - offsetof(ErtsFreeSegDesc, snode)); +} + +static ERTS_INLINE ErtsFreeSegDesc* node_to_desc(enum SortOrder order, RBTNode* node) +{ + return order==ADDR_ORDER ? anode_to_desc(node) : snode_to_desc(node); +} + +static ERTS_INLINE SWord usable_size(enum SortOrder order, + ErtsFreeSegDesc* desc) +{ + return ((order == SA_SZ_ADDR_ORDER) ? + ERTS_SUPERALIGNED_FLOOR(desc->end) - ERTS_SUPERALIGNED_CEILING(desc->start) + : desc->end - desc->start); +} + +#ifdef HARD_DEBUG +static ERTS_INLINE SWord cmp_nodes(enum SortOrder order, + RBTNode* lhs, RBTNode* rhs) +{ + ErtsFreeSegDesc* ldesc = node_to_desc(order, lhs); + ErtsFreeSegDesc* rdesc = node_to_desc(order, rhs); + RBT_ASSERT(lhs != rhs); + if (order != ADDR_ORDER) { + SWord diff = usable_size(order, ldesc) - usable_size(order, rdesc); + if (diff) return diff; + } + if (order != SZ_REVERSE_ADDR_ORDER) { + return (char*)ldesc->start - (char*)rdesc->start; + } + else { + return (char*)rdesc->start - (char*)ldesc->start; + } +} +#endif /* HARD_DEBUG */ + +static ERTS_INLINE SWord cmp_with_node(enum SortOrder order, + SWord sz, char* addr, RBTNode* rhs) +{ + ErtsFreeSegDesc* rdesc; + if (order != ADDR_ORDER) { + SWord diff; + rdesc = snode_to_desc(rhs); + diff = sz - usable_size(order, rdesc); + if (diff) return diff; + } + else + rdesc = anode_to_desc(rhs); + + if (order != SZ_REVERSE_ADDR_ORDER) + return addr - (char*)rdesc->start; + else + return (char*)rdesc->start - addr; +} + + +static ERTS_INLINE void +left_rotate(RBTNode **root, RBTNode *x) +{ + RBTNode *y = x->right; + x->right = y->left; + if (y->left) + set_parent(y->left, x); + set_parent(y, parent(x)); + if (!parent(y)) { + RBT_ASSERT(*root == x); + *root = y; + } + else if (x == parent(x)->left) + parent(x)->left = y; + else { + RBT_ASSERT(x == parent(x)->right); + parent(x)->right = y; + } + y->left = x; + set_parent(x, y); +} + +static ERTS_INLINE void +right_rotate(RBTNode **root, RBTNode *x) +{ + RBTNode *y = x->left; + x->left = y->right; + if (y->right) + set_parent(y->right, x); + set_parent(y, parent(x)); + if (!parent(y)) { + RBT_ASSERT(*root == x); + *root = y; + } + else if (x == parent(x)->right) + parent(x)->right = y; + else { + RBT_ASSERT(x == parent(x)->left); + parent(x)->left = y; + } + y->right = x; + set_parent(x, y); +} + +/* + * Replace node x with node y + * NOTE: segment descriptor of y is not changed + */ +static ERTS_INLINE void +replace(RBTNode **root, RBTNode *x, RBTNode *y) +{ + + if (!parent(x)) { + RBT_ASSERT(*root == x); + *root = y; + } + else if (x == parent(x)->left) + parent(x)->left = y; + else { + RBT_ASSERT(x == parent(x)->right); + parent(x)->right = y; + } + if (x->left) { + RBT_ASSERT(parent(x->left) == x); + set_parent(x->left, y); + } + if (x->right) { + RBT_ASSERT(parent(x->right) == x); + set_parent(x->right, y); + } + + y->parent_and_color = x->parent_and_color; + y->right = x->right; + y->left = x->left; +} + +static void +tree_insert_fixup(RBTNode** root, RBTNode *node) +{ + RBTNode *x = node, *y, *papa_x, *granpa_x; + + /* + * Rearrange the tree so that it satisfies the Red-Black Tree properties + */ + + papa_x = parent(x); + RBT_ASSERT(x != *root && IS_RED(papa_x)); + do { + + /* + * x and its parent are both red. Move the red pair up the tree + * until we get to the root or until we can separate them. + */ + + granpa_x = parent(papa_x); + RBT_ASSERT(IS_RED(x)); + RBT_ASSERT(IS_BLACK(granpa_x)); + RBT_ASSERT(granpa_x); + + if (papa_x == granpa_x->left) { + y = granpa_x->right; + if (IS_RED(y)) { + SET_BLACK(y); + SET_BLACK(papa_x); + SET_RED(granpa_x); + x = granpa_x; + } + else { + + if (x == papa_x->right) { + left_rotate(root, papa_x); + papa_x = x; + x = papa_x->left; + } + + RBT_ASSERT(x == granpa_x->left->left); + RBT_ASSERT(IS_RED(x)); + RBT_ASSERT(IS_RED(papa_x)); + RBT_ASSERT(IS_BLACK(granpa_x)); + RBT_ASSERT(IS_BLACK(y)); + + SET_BLACK(papa_x); + SET_RED(granpa_x); + right_rotate(root, granpa_x); + + RBT_ASSERT(x == parent(x)->left); + RBT_ASSERT(IS_RED(x)); + RBT_ASSERT(IS_RED(parent(x)->right)); + RBT_ASSERT(IS_BLACK(parent(x))); + break; + } + } + else { + RBT_ASSERT(papa_x == granpa_x->right); + y = granpa_x->left; + if (IS_RED(y)) { + SET_BLACK(y); + SET_BLACK(papa_x); + SET_RED(granpa_x); + x = granpa_x; + } + else { + + if (x == papa_x->left) { + right_rotate(root, papa_x); + papa_x = x; + x = papa_x->right; + } + + RBT_ASSERT(x == granpa_x->right->right); + RBT_ASSERT(IS_RED(x)); + RBT_ASSERT(IS_RED(papa_x)); + RBT_ASSERT(IS_BLACK(granpa_x)); + RBT_ASSERT(IS_BLACK(y)); + + SET_BLACK(papa_x); + SET_RED(granpa_x); + left_rotate(root, granpa_x); + + RBT_ASSERT(x == parent(x)->right); + RBT_ASSERT(IS_RED(x)); + RBT_ASSERT(IS_RED(parent(x)->left)); + RBT_ASSERT(IS_BLACK(parent(x))); + break; + } + } + } while (x != *root && (papa_x=parent(x), IS_RED(papa_x))); + + SET_BLACK(*root); +} + +static void +rbt_delete(RBTree* tree, RBTNode* del) +{ + Uint spliced_is_black; + RBTNode *x, *y, *z = del, *papa_y; + RBTNode null_x; /* null_x is used to get the fixup started when we + splice out a node without children. */ + + HARD_CHECK_IS_MEMBER(tree->root, del); + HARD_CHECK_TREE(tree, 0); + + null_x.parent_and_color = parent_and_color(NULL, !RED_FLG); + + /* Remove node from tree... */ + + /* Find node to splice out */ + if (!z->left || !z->right) + y = z; + else + /* Set y to z:s successor */ + for(y = z->right; y->left; y = y->left); + /* splice out y */ + x = y->left ? y->left : y->right; + spliced_is_black = IS_BLACK(y); + papa_y = parent(y); + if (x) { + set_parent(x, papa_y); + } + else if (spliced_is_black) { + x = &null_x; + x->right = x->left = NULL; + x->parent_and_color = parent_and_color(papa_y, !RED_FLG); + y->left = x; + } + + if (!papa_y) { + RBT_ASSERT(tree->root == y); + tree->root = x; + } + else { + if (y == papa_y->left) { + papa_y->left = x; + } + else { + RBT_ASSERT(y == papa_y->right); + papa_y->right = x; + } + } + if (y != z) { + /* We spliced out the successor of z; replace z by the successor */ + RBT_ASSERT(z != &null_x); + replace(&tree->root, z, y); + } + + if (spliced_is_black) { + RBTNode* papa_x; + /* We removed a black node which makes the resulting tree + violate the Red-Black Tree properties. Fixup tree... */ + + papa_x = parent(x); + while (IS_BLACK(x) && papa_x) { + + /* + * x has an "extra black" which we move up the tree + * until we reach the root or until we can get rid of it. + * + * y is the sibbling of x + */ + + if (x == papa_x->left) { + y = papa_x->right; + RBT_ASSERT(y); + if (IS_RED(y)) { + RBT_ASSERT(y->right); + RBT_ASSERT(y->left); + SET_BLACK(y); + RBT_ASSERT(IS_BLACK(papa_x)); + SET_RED(papa_x); + left_rotate(&tree->root, papa_x); + RBT_ASSERT(papa_x == parent(x)); + y = papa_x->right; + } + RBT_ASSERT(y); + RBT_ASSERT(IS_BLACK(y)); + if (IS_BLACK(y->left) && IS_BLACK(y->right)) { + SET_RED(y); + } + else { + if (IS_BLACK(y->right)) { + SET_BLACK(y->left); + SET_RED(y); + right_rotate(&tree->root, y); + RBT_ASSERT(papa_x == parent(x)); + y = papa_x->right; + } + RBT_ASSERT(y); + if (IS_RED(papa_x)) { + + SET_BLACK(papa_x); + SET_RED(y); + } + RBT_ASSERT(y->right); + SET_BLACK(y->right); + left_rotate(&tree->root, papa_x); + x = tree->root; + break; + } + } + else { + RBT_ASSERT(x == papa_x->right); + y = papa_x->left; + RBT_ASSERT(y); + if (IS_RED(y)) { + RBT_ASSERT(y->right); + RBT_ASSERT(y->left); + SET_BLACK(y); + RBT_ASSERT(IS_BLACK(papa_x)); + SET_RED(papa_x); + right_rotate(&tree->root, papa_x); + RBT_ASSERT(papa_x == parent(x)); + y = papa_x->left; + } + RBT_ASSERT(y); + RBT_ASSERT(IS_BLACK(y)); + if (IS_BLACK(y->right) && IS_BLACK(y->left)) { + SET_RED(y); + } + else { + if (IS_BLACK(y->left)) { + SET_BLACK(y->right); + SET_RED(y); + left_rotate(&tree->root, y); + RBT_ASSERT(papa_x == parent(x)); + y = papa_x->left; + } + RBT_ASSERT(y); + if (IS_RED(papa_x)) { + SET_BLACK(papa_x); + SET_RED(y); + } + RBT_ASSERT(y->left); + SET_BLACK(y->left); + right_rotate(&tree->root, papa_x); + x = tree->root; + break; + } + } + x = papa_x; + papa_x = parent(x); + } + SET_BLACK(x); + + papa_x = parent(&null_x); + if (papa_x) { + if (papa_x->left == &null_x) + papa_x->left = NULL; + else { + RBT_ASSERT(papa_x->right == &null_x); + papa_x->right = NULL; + } + RBT_ASSERT(!null_x.left); + RBT_ASSERT(!null_x.right); + } + else if (tree->root == &null_x) { + tree->root = NULL; + RBT_ASSERT(!null_x.left); + RBT_ASSERT(!null_x.right); + } + } + HARD_CHECK_TREE(tree, 0); +} + + +static void +rbt_insert(RBTree* tree, RBTNode* node) +{ +#ifdef RBT_DEBUG + ErtsFreeSegDesc *dbg_under=NULL, *dbg_over=NULL; +#endif + ErtsFreeSegDesc* desc = node_to_desc(tree->order, node); + char* seg_addr = desc->start; + SWord seg_sz = desc->end - desc->start; + + HARD_CHECK_TREE(tree, 0); + + node->left = NULL; + node->right = NULL; + + if (!tree->root) { + node->parent_and_color = parent_and_color(NULL, !RED_FLG); + tree->root = node; + } + else { + RBTNode *x = tree->root; + while (1) { + SWord diff = cmp_with_node(tree->order, seg_sz, seg_addr, x); + if (diff < 0) { + IF_RBT_DEBUG(dbg_over = node_to_desc(tree->order, x)); + if (!x->left) { + node->parent_and_color = parent_and_color(x, RED_FLG); + x->left = node; + break; + } + x = x->left; + } + else { + RBT_ASSERT(diff > 0); + IF_RBT_DEBUG(dbg_under = node_to_desc(tree->order, x)); + if (!x->right) { + node->parent_and_color = parent_and_color(x, RED_FLG); + x->right = node; + break; + } + x = x->right; + } + } + + RBT_ASSERT(parent(node)); +#ifdef RBT_DEBUG + if (tree->order == ADDR_ORDER) { + RBT_ASSERT(!dbg_under || dbg_under->end < desc->start); + RBT_ASSERT(!dbg_over || dbg_over->start > desc->end); + } +#endif + RBT_ASSERT(IS_RED(node)); + if (IS_RED(parent(node))) + tree_insert_fixup(&tree->root, node); + } + HARD_CHECK_TREE(tree, 0); +} + +/* + * Traverse tree in (reverse) sorting order + */ +static void +rbt_foreach_node(RBTree* tree, + void (*fn)(RBTNode*,void*), + void* arg, int reverse) +{ +#ifdef HARD_DEBUG + Sint blacks = -1; + Sint curr_blacks = 1; + Uint depth = 1; + Uint max_depth = 0; + Uint node_cnt = 0; +#endif + enum { RECURSE_LEFT, DO_NODE, RECURSE_RIGHT, RETURN_TO_PARENT }state; + RBTNode *x = tree->root; + + RBT_ASSERT(!x || !parent(x)); + + state = reverse ? RECURSE_RIGHT : RECURSE_LEFT; + while (x) { + switch (state) { + case RECURSE_LEFT: + if (x->left) { + RBT_ASSERT(parent(x->left) == x); + #ifdef HARD_DEBUG + ++depth; + if (IS_BLACK(x->left)) + curr_blacks++; + #endif + x = x->left; + state = reverse ? RECURSE_RIGHT : RECURSE_LEFT; + } + else { + #ifdef HARD_DEBUG + if (blacks < 0) + blacks = curr_blacks; + RBT_ASSERT(blacks == curr_blacks); + #endif + state = reverse ? RETURN_TO_PARENT : DO_NODE; + } + break; + + case DO_NODE: + #ifdef HARD_DEBUG + ++node_cnt; + if (depth > max_depth) + max_depth = depth; + #endif + (*fn) (x, arg); /* Do it! */ + state = reverse ? RECURSE_LEFT : RECURSE_RIGHT; + break; + + case RECURSE_RIGHT: + if (x->right) { + RBT_ASSERT(parent(x->right) == x); + #ifdef HARD_DEBUG + ++depth; + if (IS_BLACK(x->right)) + curr_blacks++; + #endif + x = x->right; + state = reverse ? RECURSE_RIGHT : RECURSE_LEFT; + } + else { + #ifdef HARD_DEBUG + if (blacks < 0) + blacks = curr_blacks; + RBT_ASSERT(blacks == curr_blacks); + #endif + state = reverse ? DO_NODE : RETURN_TO_PARENT; + } + break; + + case RETURN_TO_PARENT: + #ifdef HARD_DEBUG + if (IS_BLACK(x)) + curr_blacks--; + --depth; + #endif + if (parent(x)) { + if (x == parent(x)->left) { + state = reverse ? RETURN_TO_PARENT : DO_NODE; + } + else { + RBT_ASSERT(x == parent(x)->right); + state = reverse ? DO_NODE : RETURN_TO_PARENT; + } + } + x = parent(x); + break; + } + } +#ifdef HARD_DEBUG + RBT_ASSERT(depth == 0 || (!tree->root && depth==1)); + RBT_ASSERT(curr_blacks == 0); + RBT_ASSERT((1 << (max_depth/2)) <= node_cnt); +#endif +} + +#if defined(RBT_DEBUG) || defined(HARD_DEBUG_MSEG) +static RBTNode* rbt_prev_node(RBTNode* node) +{ + RBTNode* x; + if (node->left) { + for (x=node->left; x->right; x=x->right) + ; + return x; + } + for (x=node; parent(x); x=parent(x)) { + if (parent(x)->right == x) + return parent(x); + } + return NULL; +} +static RBTNode* rbt_next_node(RBTNode* node) +{ + RBTNode* x; + if (node->right) { + for (x=node->right; x->left; x=x->left) + ; + return x; + } + for (x=node; parent(x); x=parent(x)) { + if (parent(x)->left == x) + return parent(x); + } + return NULL; +} +#endif /* RBT_DEBUG || HARD_DEBUG_MSEG */ + + +/* The API to keep track of a bunch of separated (free) segments + (non-overlapping and non-adjacent). + */ +static void init_free_seg_map(ErtsFreeSegMap*, enum SortOrder); +static void adjacent_free_seg(ErtsFreeSegMap*, char* start, char* end, + ErtsFreeSegDesc** under, ErtsFreeSegDesc** over); +static void insert_free_seg(ErtsFreeSegMap*, ErtsFreeSegDesc*, char* start, char* end); +static void resize_free_seg(ErtsFreeSegMap*, ErtsFreeSegDesc*, char* start, char* end); +static void delete_free_seg(ErtsFreeSegMap*, ErtsFreeSegDesc*); +static ErtsFreeSegDesc* lookup_free_seg(ErtsFreeSegMap*, SWord sz); + + +static void init_free_seg_map(ErtsFreeSegMap* map, enum SortOrder order) +{ + map->atree.root = NULL; + map->atree.order = ADDR_ORDER; + map->stree.root = NULL; + map->stree.order = order; + map->nseg = 0; +} + +/* Lookup directly adjacent free segments to the given area [start->end]. + * The given area must not contain any free segments. + */ +static void adjacent_free_seg(ErtsFreeSegMap* map, char* start, char* end, + ErtsFreeSegDesc** under, ErtsFreeSegDesc** over) +{ + RBTNode* x = map->atree.root; + + *under = NULL; + *over = NULL; + while (x) { + if (start < anode_to_desc(x)->start) { + RBT_ASSERT(end <= anode_to_desc(x)->start); + if (end == anode_to_desc(x)->start) { + RBT_ASSERT(!*over); + *over = anode_to_desc(x); + } + x = x->left; + } + else { + RBT_ASSERT(start >= anode_to_desc(x)->end); + if (start == anode_to_desc(x)->end) { + RBT_ASSERT(!*under); + *under = anode_to_desc(x); + } + x = x->right; + } + } +} + +/* Initialize 'desc' and insert as new free segment [start->end]. + * The new segment must not contain or be adjacent to any free segment in 'map'. + */ +static void insert_free_seg(ErtsFreeSegMap* map, ErtsFreeSegDesc* desc, + char* start, char* end) +{ + desc->start = start; + desc->end = end; + rbt_insert(&map->atree, &desc->anode); + rbt_insert(&map->stree, &desc->snode); + map->nseg++; +} + +/* Resize existing free segment 'desc' to [start->end]. + * The new segment location must overlap the old location and + * it must not contain or be adjacent to any other free segment in 'map'. + */ +static void resize_free_seg(ErtsFreeSegMap* map, ErtsFreeSegDesc* desc, + char* start, char* end) +{ +#ifdef RBT_DEBUG + RBTNode* prev = rbt_prev_node(&desc->anode); + RBTNode* next = rbt_next_node(&desc->anode); + RBT_ASSERT(!prev || anode_to_desc(prev)->end < start); + RBT_ASSERT(!next || anode_to_desc(next)->start > end); +#endif + rbt_delete(&map->stree, &desc->snode); + desc->start = start; + desc->end = end; + rbt_insert(&map->stree, &desc->snode); +} + +/* Delete existing free segment 'desc' from 'map'. + */ +static void delete_free_seg(ErtsFreeSegMap* map, ErtsFreeSegDesc* desc) +{ + rbt_delete(&map->atree, &desc->anode); + rbt_delete(&map->stree, &desc->snode); + map->nseg--; +} + +/* Lookup a free segment in 'map' with a size of at least 'need_sz' usable bytes. + */ +static ErtsFreeSegDesc* lookup_free_seg(ErtsFreeSegMap* map, SWord need_sz) +{ + RBTNode* x = map->stree.root; + ErtsFreeSegDesc* best_desc = NULL; + const enum SortOrder order = map->stree.order; + + while (x) { + ErtsFreeSegDesc* desc = snode_to_desc(x); + SWord seg_sz = usable_size(order, desc); + + if (seg_sz < need_sz) { + x = x->right; + } + else { + best_desc = desc; + x = x->left; + } + } + return best_desc; +} + +struct build_arg_t +{ + Process* p; + Eterm* hp; + Eterm acc; +}; + +static void build_free_seg_tuple(RBTNode* node, void* arg) +{ + struct build_arg_t* a = (struct build_arg_t*)arg; + ErtsFreeSegDesc* desc = anode_to_desc(node); + Eterm start= erts_bld_uword(&a->hp, NULL, (UWord)desc->start); + Eterm end = erts_bld_uword(&a->hp, NULL, (UWord)desc->end); + Eterm tpl = TUPLE2(a->hp, start, end); + + a->hp += 3; + a->acc = CONS(a->hp, tpl, a->acc); + a->hp += 2; +} + +static +Eterm build_free_seg_list(Process* p, ErtsFreeSegMap* map) +{ + struct build_arg_t barg; + Eterm* hp_end; + const Uint may_need = map->nseg * (2 + 3 + 2*2); /* cons + tuple + bigs */ + + barg.p = p; + barg.hp = HAlloc(p, may_need); + hp_end = barg.hp + may_need; + barg.acc = NIL; + rbt_foreach_node(&map->atree, build_free_seg_tuple, &barg, 1); + + RBT_ASSERT(barg.hp <= hp_end); + HRelease(p, hp_end, barg.hp); + return barg.acc; +} + +#if ERTS_HAVE_OS_MMAP +/* Implementation of os_mmap()/os_munmap()/os_mremap()... */ + +#if HAVE_MMAP +# define ERTS_MMAP_PROT (PROT_READ|PROT_WRITE) +# if defined(MAP_ANONYMOUS) +# define ERTS_MMAP_FLAGS (MAP_ANON|MAP_PRIVATE) +# define ERTS_MMAP_FD (-1) +# elif defined(MAP_ANON) +# define ERTS_MMAP_FLAGS (MAP_ANON|MAP_PRIVATE) +# define ERTS_MMAP_FD (-1) +# else +# define ERTS_MMAP_FLAGS (MAP_PRIVATE) +# define ERTS_MMAP_FD mmap_state.mmap_fd +# endif +#endif + +static ERTS_INLINE void * +os_mmap(void *hint_ptr, UWord size, int try_superalign) +{ +#if HAVE_MMAP + void *res; +#ifdef MAP_ALIGN + if (try_superalign) + res = mmap((void *) ERTS_SUPERALIGNED_SIZE, size, ERTS_MMAP_PROT, + ERTS_MMAP_FLAGS|MAP_ALIGN, ERTS_MMAP_FD, 0); + else +#endif + res = mmap((void *) hint_ptr, size, ERTS_MMAP_PROT, + ERTS_MMAP_FLAGS, ERTS_MMAP_FD, 0); + if (res == MAP_FAILED) + return NULL; + return res; +#elif HAVE_VIRTUALALLOC + return (void *) VirtualAlloc(NULL, (SIZE_T) size, + MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE); +#else +# error "missing mmap() or similar" +#endif +} + +static ERTS_INLINE void +os_munmap(void *ptr, UWord size) +{ +#if HAVE_MMAP +#ifdef ERTS_MMAP_DEBUG + int res = +#endif + munmap(ptr, size); + ERTS_MMAP_ASSERT(res == 0); +#elif HAVE_VIRTUALALLOC +#ifdef DEBUG + BOOL res = +#endif + VirtualFree((LPVOID) ptr, (SIZE_T) 0, MEM_RELEASE); + ERTS_MMAP_ASSERT(res != 0); +#else +# error "missing munmap() or similar" +#endif +} + +#ifdef ERTS_HAVE_OS_MREMAP +# if HAVE_MREMAP +# if defined(__NetBSD__) +# define ERTS_MREMAP_FLAGS (0) +# else +# define ERTS_MREMAP_FLAGS (MREMAP_MAYMOVE) +# endif +# endif +static ERTS_INLINE void * +os_mremap(void *ptr, UWord old_size, UWord new_size, int try_superalign) +{ + void *new_seg; +#if HAVE_MREMAP + new_seg = mremap(ptr, (size_t) old_size, +# if defined(__NetBSD__) + NULL, +# endif + (size_t) new_size, ERTS_MREMAP_FLAGS); + if (new_seg == (void *) MAP_FAILED) + return NULL; + return new_seg; +#else +# error "missing mremap() or similar" +#endif +} +#endif + +#ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION +#if HAVE_MMAP + +#define ERTS_MMAP_RESERVE_PROT (ERTS_MMAP_PROT) +#define ERTS_MMAP_RESERVE_FLAGS (ERTS_MMAP_FLAGS|MAP_FIXED) +#define ERTS_MMAP_UNRESERVE_PROT (PROT_NONE) +#define ERTS_MMAP_UNRESERVE_FLAGS (ERTS_MMAP_FLAGS|MAP_NORESERVE|MAP_FIXED) +#define ERTS_MMAP_VIRTUAL_PROT (PROT_NONE) +#define ERTS_MMAP_VIRTUAL_FLAGS (ERTS_MMAP_FLAGS|MAP_NORESERVE) + +static int +os_reserve_physical(char *ptr, UWord size) +{ + void *res = mmap((void *) ptr, (size_t) size, ERTS_MMAP_RESERVE_PROT, + ERTS_MMAP_RESERVE_FLAGS, ERTS_MMAP_FD, 0); + if (res == (void *) MAP_FAILED) + return 0; + return 1; +} + +static void +os_unreserve_physical(char *ptr, UWord size) +{ + void *res = mmap((void *) ptr, (size_t) size, ERTS_MMAP_UNRESERVE_PROT, + ERTS_MMAP_UNRESERVE_FLAGS, ERTS_MMAP_FD, 0); + if (res == (void *) MAP_FAILED) + erl_exit(ERTS_ABORT_EXIT, "Failed to unreserve memory"); +} + +static void * +os_mmap_virtual(char *ptr, UWord size) +{ + void *res = mmap((void *) ptr, (size_t) size, ERTS_MMAP_VIRTUAL_PROT, + ERTS_MMAP_VIRTUAL_FLAGS, ERTS_MMAP_FD, 0); + if (res == (void *) MAP_FAILED) + return NULL; + return res; +} + +#else +#error "Missing reserve/unreserve physical memory implementation" +#endif +#endif /* ERTS_HAVE_OS_RESERVE_PHYSICAL_MEMORY */ + +#endif /* ERTS_HAVE_OS_MMAP */ + +static int reserve_noop(char *ptr, UWord size) +{ +#ifdef ERTS_MMAP_DEBUG_FILL_AREAS + Uint32 *uip, *end = (Uint32 *) (ptr + size); + + for (uip = (Uint32 *) ptr; uip < end; uip++) + ERTS_MMAP_ASSERT(*uip == (Uint32) 0xdeadbeef); + for (uip = (Uint32 *) ptr; uip < end; uip++) + *uip = (Uint32) 0xfeedfeed; +#endif + return 1; +} + +static void unreserve_noop(char *ptr, UWord size) +{ +#ifdef ERTS_MMAP_DEBUG_FILL_AREAS + Uint32 *uip, *end = (Uint32 *) (ptr + size); + + for (uip = (Uint32 *) ptr; uip < end; uip++) + *uip = (Uint32) 0xdeadbeef; +#endif +} + +static UWord +alloc_desc_insert_free_seg(ErtsFreeSegMap *map, char* start, char* end) +{ + char *ptr; + ErtsFreeSegMap *da_map; + ErtsFreeSegDesc *desc = alloc_desc(); + if (desc) { + insert_free_seg(map, desc, start, end); + return 0; + } + + /* + * Ahh; ran out of free segment descriptors. + * + * First try to map a new page... + */ + +#if ERTS_HAVE_OS_MMAP + if (!mmap_state.no_os_mmap) { + ptr = os_mmap(mmap_state.desc.new_area_hint, ERTS_PAGEALIGNED_SIZE, 0); + if (ptr) { + mmap_state.desc.new_area_hint = ptr+ERTS_PAGEALIGNED_SIZE; + ERTS_MMAP_SIZE_OS_INC(ERTS_PAGEALIGNED_SIZE); + add_free_desc_area(ptr, ptr+ERTS_PAGEALIGNED_SIZE); + desc = alloc_desc(); + ERTS_MMAP_ASSERT(desc); + insert_free_seg(map, desc, start, end); + return 0; + } + } +#endif + + /* + * ...then try to find a good place in the supercarrier... + */ + da_map = &mmap_state.sua.map; + desc = lookup_free_seg(da_map, ERTS_PAGEALIGNED_SIZE); + if (desc) { + if (mmap_state.reserve_physical(desc->start, ERTS_PAGEALIGNED_SIZE)) + ERTS_MMAP_SIZE_SC_SUA_INC(ERTS_PAGEALIGNED_SIZE); + else + desc = NULL; + + } + else { + da_map = &mmap_state.sa.map; + desc = lookup_free_seg(da_map, ERTS_PAGEALIGNED_SIZE); + if (desc) { + if (mmap_state.reserve_physical(desc->start, ERTS_PAGEALIGNED_SIZE)) + ERTS_MMAP_SIZE_SC_SA_INC(ERTS_PAGEALIGNED_SIZE); + else + desc = NULL; + } + } + if (desc) { + char *da_end = desc->start + ERTS_PAGEALIGNED_SIZE; + add_free_desc_area(desc->start, da_end); + if (da_end != desc->end) + resize_free_seg(da_map, desc, da_end, desc->end); + else { + delete_free_seg(da_map, desc); + free_desc(desc); + } + + desc = alloc_desc(); + ERTS_MMAP_ASSERT(desc); + insert_free_seg(map, desc, start, end); + return 0; + } + + /* + * ... and then as last resort use the first page of the + * free segment we are trying to insert for free descriptors. + */ + ptr = start + ERTS_PAGEALIGNED_SIZE; + ERTS_MMAP_ASSERT(ptr <= end); + + add_free_desc_area(start, ptr); + + if (ptr != end) { + desc = alloc_desc(); + ERTS_MMAP_ASSERT(desc); + insert_free_seg(map, desc, ptr, end); + } + + return ERTS_PAGEALIGNED_SIZE; +} + +void * +erts_mmap(Uint32 flags, UWord *sizep) +{ + char *seg; + UWord asize = ERTS_PAGEALIGNED_CEILING(*sizep); + + /* Map in premapped supercarrier */ + if (mmap_state.supercarrier && !(ERTS_MMAPFLG_OS_ONLY & flags)) { + char *end; + ErtsFreeSegDesc *desc; + Uint32 superaligned = (ERTS_MMAPFLG_SUPERALIGNED & flags); + + erts_smp_mtx_lock(&mmap_state.mtx); + + ERTS_MMAP_OP_START(*sizep); + + if (!superaligned) { + desc = lookup_free_seg(&mmap_state.sua.map, asize); + if (desc) { + seg = desc->start; + end = seg+asize; + if (!mmap_state.reserve_physical(seg, asize)) + goto supercarrier_reserve_failure; + if (desc->end == end) { + delete_free_seg(&mmap_state.sua.map, desc); + free_desc(desc); + } + else { + ERTS_MMAP_ASSERT(end < desc->end); + resize_free_seg(&mmap_state.sua.map, desc, end, desc->end); + } + ERTS_MMAP_SIZE_SC_SUA_INC(asize); + goto supercarrier_success; + } + + if (asize <= mmap_state.sua.bot - mmap_state.sa.top) { + if (!mmap_state.reserve_physical(mmap_state.sua.bot - asize, + asize)) + goto supercarrier_reserve_failure; + mmap_state.sua.bot -= asize; + seg = mmap_state.sua.bot; + ERTS_MMAP_SIZE_SC_SUA_INC(asize); + goto supercarrier_success; + } + } + + asize = ERTS_SUPERALIGNED_CEILING(asize); + + desc = lookup_free_seg(&mmap_state.sa.map, asize); + if (desc) { + char *start = seg = desc->start; + seg = (char *) ERTS_SUPERALIGNED_CEILING(seg); + end = seg+asize; + if (!mmap_state.reserve_physical(start, (UWord) (end - start))) + goto supercarrier_reserve_failure; + ERTS_MMAP_SIZE_SC_SA_INC(asize); + if (desc->end == end) { + if (start != seg) + resize_free_seg(&mmap_state.sa.map, desc, start, seg); + else { + delete_free_seg(&mmap_state.sa.map, desc); + free_desc(desc); + } + } + else { + ERTS_MMAP_ASSERT(end < desc->end); + resize_free_seg(&mmap_state.sa.map, desc, end, desc->end); + if (start != seg) { + UWord ad_sz; + ad_sz = alloc_desc_insert_free_seg(&mmap_state.sua.map, + start, seg); + start += ad_sz; + if (start != seg) + mmap_state.unreserve_physical(start, (UWord) (seg - start)); + } + } + goto supercarrier_success; + } + + if (superaligned) { + char *start = mmap_state.sa.top; + seg = (char *) ERTS_SUPERALIGNED_CEILING(start); + + if (asize + (seg - start) <= mmap_state.sua.bot - start) { + end = seg + asize; + if (!mmap_state.reserve_physical(start, (UWord) (end - start))) + goto supercarrier_reserve_failure; + mmap_state.sa.top = end; + ERTS_MMAP_SIZE_SC_SA_INC(asize); + if (start != seg) { + UWord ad_sz; + ad_sz = alloc_desc_insert_free_seg(&mmap_state.sua.map, + start, seg); + start += ad_sz; + if (start != seg) + mmap_state.unreserve_physical(start, (UWord) (seg - start)); + } + goto supercarrier_success; + } + + desc = lookup_free_seg(&mmap_state.sua.map, asize + ERTS_SUPERALIGNED_SIZE); + if (desc) { + char *org_start = desc->start; + char *org_end = desc->end; + + seg = (char *) ERTS_SUPERALIGNED_CEILING(org_start); + end = seg + asize; + if (!mmap_state.reserve_physical(seg, (UWord) (org_end - seg))) + goto supercarrier_reserve_failure; + ERTS_MMAP_SIZE_SC_SUA_INC(asize); + if (org_start != seg) { + ERTS_MMAP_ASSERT(org_start < seg); + resize_free_seg(&mmap_state.sua.map, desc, org_start, seg); + desc = NULL; + } + if (end != org_end) { + UWord ad_sz = 0; + ERTS_MMAP_ASSERT(end < org_end); + if (desc) + resize_free_seg(&mmap_state.sua.map, desc, end, org_end); + else + ad_sz = alloc_desc_insert_free_seg(&mmap_state.sua.map, + end, org_end); + end += ad_sz; + if (end != org_end) + mmap_state.unreserve_physical(end, + (UWord) (org_end - end)); + } + goto supercarrier_success; + } + } + + ERTS_MMAP_OP_ABORT(); + erts_smp_mtx_unlock(&mmap_state.mtx); + } + +#if ERTS_HAVE_OS_MMAP + /* Map using OS primitives */ + if (!(ERTS_MMAPFLG_SUPERCARRIER_ONLY & flags) && !mmap_state.no_os_mmap) { + if (!(ERTS_MMAPFLG_SUPERALIGNED & flags)) { + seg = os_mmap(NULL, asize, 0); + if (!seg) + goto failure; + } + else { + asize = ERTS_SUPERALIGNED_CEILING(*sizep); + seg = os_mmap(NULL, asize, 1); + if (!seg) + goto failure; + + if (!ERTS_IS_SUPERALIGNED(seg)) { + char *ptr; + UWord sz; + + os_munmap(seg, asize); + + ptr = os_mmap(NULL, asize + ERTS_SUPERALIGNED_SIZE, 1); + if (!ptr) + goto failure; + + seg = (char *) ERTS_SUPERALIGNED_CEILING(ptr); + sz = (UWord) (seg - ptr); + ERTS_MMAP_ASSERT(sz <= ERTS_SUPERALIGNED_SIZE); + if (sz) + os_munmap(ptr, sz); + sz = ERTS_SUPERALIGNED_SIZE - sz; + if (sz) + os_munmap(seg+asize, sz); + } + } + + ERTS_MMAP_OP_LCK(seg, *sizep, asize); + ERTS_MMAP_SIZE_OS_INC(asize); + *sizep = asize; + return (void *) seg; + } +failure: +#endif + ERTS_MMAP_OP_LCK(NULL, *sizep, 0); + *sizep = 0; + return NULL; + +supercarrier_success: + +#ifdef ERTS_MMAP_DEBUG + if (ERTS_MMAPFLG_SUPERALIGNED & flags) { + ERTS_MMAP_ASSERT(ERTS_IS_SUPERALIGNED(seg)); + ERTS_MMAP_ASSERT(ERTS_IS_SUPERALIGNED(asize)); + } + else { + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(seg)); + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(asize)); + } +#endif + + ERTS_MMAP_OP_END(seg, asize); + erts_smp_mtx_unlock(&mmap_state.mtx); + + *sizep = asize; + return (void *) seg; + +supercarrier_reserve_failure: + erts_smp_mtx_unlock(&mmap_state.mtx); + *sizep = 0; + return NULL; +} + +void +erts_munmap(Uint32 flags, void *ptr, UWord size) +{ + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(ptr)); + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(size)); + + if (!ERTS_MMAP_IN_SUPERCARRIER(ptr)) { + ERTS_MMAP_ASSERT(!mmap_state.no_os_mmap); +#if ERTS_HAVE_OS_MMAP + ERTS_MUNMAP_OP_LCK(ptr, size); + ERTS_MMAP_SIZE_OS_DEC(size); + os_munmap(ptr, size); +#endif + } + else { + char *start, *end; + ErtsFreeSegMap *map; + ErtsFreeSegDesc *prev, *next, *desc; + UWord ad_sz = 0; + + ERTS_MMAP_ASSERT(mmap_state.supercarrier); + + start = (char *) ptr; + end = start + size; + + erts_smp_mtx_lock(&mmap_state.mtx); + + ERTS_MUNMAP_OP(ptr, size); + + if (ERTS_MMAP_IN_SUPERALIGNED_AREA(ptr)) { + + map = &mmap_state.sa.map; + adjacent_free_seg(map, start, end, &prev, &next); + + ERTS_MMAP_SIZE_SC_SA_DEC(size); + if (end == mmap_state.sa.top) { + ERTS_MMAP_ASSERT(!next); + if (prev) { + start = prev->start; + delete_free_seg(map, prev); + free_desc(prev); + } + mmap_state.sa.top = start; + goto supercarrier_success; + } + } + else { + map = &mmap_state.sua.map; + adjacent_free_seg(map, start, end, &prev, &next); + + ERTS_MMAP_SIZE_SC_SUA_DEC(size); + if (start == mmap_state.sua.bot) { + ERTS_MMAP_ASSERT(!prev); + if (next) { + end = next->end; + delete_free_seg(map, next); + free_desc(next); + } + mmap_state.sua.bot = end; + goto supercarrier_success; + } + } + + desc = NULL; + + if (next) { + ERTS_MMAP_ASSERT(end < next->end); + end = next->end; + if (prev) { + delete_free_seg(map, next); + free_desc(next); + goto save_prev; + } + desc = next; + } else if (prev) { + save_prev: + ERTS_MMAP_ASSERT(prev->start < start); + start = prev->start; + desc = prev; + } + + if (desc) + resize_free_seg(map, desc, start, end); + else + ad_sz = alloc_desc_insert_free_seg(map, start, end); + + supercarrier_success: { + UWord unres_sz; + + ERTS_MMAP_ASSERT(size >= ad_sz); + unres_sz = size - ad_sz; + if (unres_sz) + mmap_state.unreserve_physical(((char *) ptr) + ad_sz, unres_sz); + + erts_smp_mtx_unlock(&mmap_state.mtx); + } + } +} + +static void * +remap_move(Uint32 flags, void *ptr, UWord old_size, UWord *sizep) +{ + UWord size = *sizep; + void *new_ptr = erts_mmap(flags, &size); + if (!new_ptr) + return NULL; + *sizep = size; + if (old_size < size) + size = old_size; + sys_memcpy(new_ptr, ptr, (size_t) size); + erts_munmap(flags, ptr, old_size); + return new_ptr; +} + +void * +erts_mremap(Uint32 flags, void *ptr, UWord old_size, UWord *sizep) +{ + void *new_ptr; + Uint32 superaligned; + UWord asize; + + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(ptr)); + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(old_size)); + ERTS_MMAP_ASSERT(sizep && ERTS_IS_PAGEALIGNED(*sizep)); + + if (!ERTS_MMAP_IN_SUPERCARRIER(ptr)) { + + ERTS_MMAP_ASSERT(!mmap_state.no_os_mmap); + + if (!(ERTS_MMAPFLG_OS_ONLY & flags) && mmap_state.supercarrier) { + new_ptr = remap_move(ERTS_MMAPFLG_SUPERCARRIER_ONLY|flags, ptr, + old_size, sizep); + if (new_ptr) + return new_ptr; + } + + if (ERTS_MMAPFLG_SUPERCARRIER_ONLY & flags) { + ERTS_MREMAP_OP_LCK(NULL, ptr, old_size, *sizep, old_size); + return NULL; + } + +#if ERTS_HAVE_OS_MREMAP || ERTS_HAVE_GENUINE_OS_MMAP + superaligned = (ERTS_MMAPFLG_SUPERALIGNED & flags); + + if (superaligned) { + asize = ERTS_SUPERALIGNED_CEILING(*sizep); + if (asize == old_size && ERTS_IS_SUPERALIGNED(ptr)) { + ERTS_MREMAP_OP_LCK(ptr, ptr, old_size, *sizep, asize); + *sizep = asize; + return ptr; + } + } + else { + asize = ERTS_PAGEALIGNED_CEILING(*sizep); + if (asize == old_size) { + ERTS_MREMAP_OP_LCK(ptr, ptr, old_size, *sizep, asize); + *sizep = asize; + return ptr; + } + } + +#if ERTS_HAVE_GENUINE_OS_MMAP + if (asize < old_size + && (!superaligned + || ERTS_IS_SUPERALIGNED(ptr))) { + UWord um_sz; + new_ptr = ((char *) ptr) + asize; + ERTS_MMAP_ASSERT((((char *)ptr) + old_size) > (char *) new_ptr); + um_sz = (UWord) ((((char *) ptr) + old_size) - (char *) new_ptr); + ERTS_MMAP_SIZE_OS_DEC(um_sz); + os_munmap(new_ptr, um_sz); + ERTS_MREMAP_OP_LCK(ptr, ptr, old_size, *sizep, asize); + *sizep = asize; + return ptr; + } +#endif +#if ERTS_HAVE_OS_MREMAP + if (superaligned) + return remap_move(flags, new_ptr, old_size, sizep); + else { + new_ptr = os_mremap(ptr, old_size, asize, 0); + if (!new_ptr) + return NULL; + if (asize > old_size) + ERTS_MMAP_SIZE_OS_INC(asize - old_size); + else + ERTS_MMAP_SIZE_OS_DEC(old_size - asize); + ERTS_MREMAP_OP_LCK(new_ptr, ptr, old_size, *sizep, asize); + *sizep = asize; + return new_ptr; + } +#endif +#endif + } + else { /* In super carrier */ + char *start, *end, *new_end; + ErtsFreeSegMap *map; + ErtsFreeSegDesc *prev, *next; + UWord ad_sz = 0; + + ERTS_MMAP_ASSERT(mmap_state.supercarrier); + + if (ERTS_MMAPFLG_OS_ONLY & flags) + return remap_move(flags, ptr, old_size, sizep); + + superaligned = (ERTS_MMAPFLG_SUPERALIGNED & flags); + + asize = (superaligned + ? ERTS_SUPERALIGNED_CEILING(*sizep) + : ERTS_PAGEALIGNED_CEILING(*sizep)); + + erts_smp_mtx_lock(&mmap_state.mtx); + + if (ERTS_MMAP_IN_SUPERALIGNED_AREA(ptr) + ? (!superaligned && lookup_free_seg(&mmap_state.sua.map, asize)) + : (superaligned && lookup_free_seg(&mmap_state.sa.map, asize))) { + erts_smp_mtx_unlock(&mmap_state.mtx); + /* + * Segment currently in wrong area (due to a previous memory + * shortage), move it to the right area. + * (remap_move() will succeed) + */ + return remap_move(ERTS_MMAPFLG_SUPERCARRIER_ONLY|flags, ptr, + old_size, sizep); + } + + ERTS_MREMAP_OP_START(ptr, old_size, *sizep); + + if (asize == old_size) { + new_ptr = ptr; + goto supercarrier_resize_success; + } + + start = (char *) ptr; + end = start + old_size; + new_end = start+asize; + + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(ptr)); + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(old_size)); + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(asize)); + + if (asize < old_size) { + UWord unres_sz; + new_ptr = ptr; + if (!ERTS_MMAP_IN_SUPERALIGNED_AREA(ptr)) { + map = &mmap_state.sua.map; + ERTS_MMAP_SIZE_SC_SUA_DEC(old_size - asize); + } + else { + if (end == mmap_state.sa.top) { + mmap_state.sa.top = new_end; + mmap_state.unreserve_physical(((char *) ptr) + asize, + old_size - asize); + goto supercarrier_resize_success; + } + ERTS_MMAP_SIZE_SC_SA_DEC(old_size - asize); + map = &mmap_state.sa.map; + } + + adjacent_free_seg(map, start, end, &prev, &next); + + if (next) + resize_free_seg(map, next, new_end, next->end); + else + ad_sz = alloc_desc_insert_free_seg(map, new_end, end); + ERTS_MMAP_ASSERT(old_size - asize >= ad_sz); + unres_sz = old_size - asize - ad_sz; + if (unres_sz) + mmap_state.unreserve_physical(((char *) ptr) + asize + ad_sz, + unres_sz); + goto supercarrier_resize_success; + } + + if (!ERTS_MMAP_IN_SUPERALIGNED_AREA(ptr)) { + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(ptr)); + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(old_size)); + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(asize)); + + adjacent_free_seg(&mmap_state.sua.map, start, end, &prev, &next); + + if (next && new_end <= next->end) { + if (!mmap_state.reserve_physical(((char *) ptr) + old_size, + asize - old_size)) + goto supercarrier_reserve_failure; + if (new_end < next->end) + resize_free_seg(&mmap_state.sua.map, next, new_end, next->end); + else { + delete_free_seg(&mmap_state.sua.map, next); + free_desc(next); + } + new_ptr = ptr; + ERTS_MMAP_SIZE_SC_SUA_INC(asize - old_size); + goto supercarrier_resize_success; + } + } + else { /* Superaligned area */ + + if (end == mmap_state.sa.top) { + if (new_end <= mmap_state.sua.bot) { + if (!mmap_state.reserve_physical(((char *) ptr) + old_size, + asize - old_size)) + goto supercarrier_reserve_failure; + mmap_state.sa.top = new_end; + new_ptr = ptr; + ERTS_MMAP_SIZE_SC_SA_INC(asize - old_size); + goto supercarrier_resize_success; + } + } + else { + adjacent_free_seg(&mmap_state.sa.map, start, end, &prev, &next); + if (next && new_end <= next->end) { + if (!mmap_state.reserve_physical(((char *) ptr) + old_size, + asize - old_size)) + goto supercarrier_reserve_failure; + if (new_end < next->end) + resize_free_seg(&mmap_state.sa.map, next, new_end, next->end); + else { + delete_free_seg(&mmap_state.sa.map, next); + free_desc(next); + } + new_ptr = ptr; + ERTS_MMAP_SIZE_SC_SA_INC(asize - old_size); + goto supercarrier_resize_success; + } + } + } + + ERTS_MMAP_OP_ABORT(); + erts_smp_mtx_unlock(&mmap_state.mtx); + + /* Failed to resize... */ + } + + return remap_move(flags, ptr, old_size, sizep); + +supercarrier_resize_success: + +#ifdef ERTS_MMAP_DEBUG + if ((ERTS_MMAPFLG_SUPERALIGNED & flags) + || ERTS_MMAP_IN_SUPERALIGNED_AREA(new_ptr)) { + ERTS_MMAP_ASSERT(ERTS_IS_SUPERALIGNED(new_ptr)); + ERTS_MMAP_ASSERT(ERTS_IS_SUPERALIGNED(asize)); + } + else { + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(new_ptr)); + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(asize)); + } +#endif + + ERTS_MREMAP_OP_END(new_ptr, asize); + erts_smp_mtx_unlock(&mmap_state.mtx); + + *sizep = asize; + return new_ptr; + +supercarrier_reserve_failure: + ERTS_MREMAP_OP_END(NULL, old_size); + erts_smp_mtx_unlock(&mmap_state.mtx); + *sizep = old_size; + return NULL; + +} + +int erts_mmap_in_supercarrier(void *ptr) +{ + return ERTS_MMAP_IN_SUPERCARRIER(ptr); +} + + +static struct { + Eterm total; + Eterm total_sa; + Eterm total_sua; + Eterm used; + Eterm used_sa; + Eterm used_sua; + Eterm max; + Eterm allocated; + Eterm reserved; + Eterm sizes; + Eterm free_segs; + Eterm supercarrier; + Eterm os; + Eterm scs; + Eterm sco; + Eterm scrpm; + Eterm scrfsd; + + int is_initialized; + erts_mtx_t init_mutex; +}am; + +static void ERTS_INLINE atom_init(Eterm *atom, char *name) +{ + *atom = am_atom_put(name, strlen(name)); +} +#define AM_INIT(AM) atom_init(&am.AM, #AM) + +static void init_atoms(void) +{ + erts_mtx_lock(&am.init_mutex); + + if (!am.is_initialized) { + AM_INIT(total); + AM_INIT(total_sa); + AM_INIT(total_sua); + AM_INIT(used); + AM_INIT(used_sa); + AM_INIT(used_sua); + AM_INIT(max); + AM_INIT(allocated); + AM_INIT(reserved); + AM_INIT(sizes); + AM_INIT(free_segs); + AM_INIT(supercarrier); + AM_INIT(os); + AM_INIT(scs); + AM_INIT(sco); + AM_INIT(scrpm); + AM_INIT(scrfsd); + am.is_initialized = 1; + } + erts_mtx_unlock(&am.init_mutex); +}; + + +#ifdef HARD_DEBUG_MSEG +static void hard_dbg_mseg_init(void); +#endif + +void +erts_mmap_init(ErtsMMapInit *init) +{ + int virtual_map = 0; + char *start = NULL, *end = NULL; + UWord pagesize; +#if defined(__WIN32__) + SYSTEM_INFO sysinfo; + GetSystemInfo(&sysinfo); + pagesize = (UWord) sysinfo.dwPageSize; +#elif defined(_SC_PAGESIZE) + pagesize = (UWord) sysconf(_SC_PAGESIZE); +#elif defined(HAVE_GETPAGESIZE) + pagesize = (UWord) getpagesize(); +#else +# error "Do not know how to get page size" +#endif +#if defined(HARD_DEBUG) || 0 + erts_fprintf(stderr, "erts_mmap: scs = %bpu\n", init->scs); + erts_fprintf(stderr, "erts_mmap: sco = %i\n", init->sco); + erts_fprintf(stderr, "erts_mmap: scrfsd = %i\n", init->scrfsd); +#endif + erts_page_inv_mask = pagesize - 1; + if (pagesize & erts_page_inv_mask) + erl_exit(-1, "erts_mmap: Invalid pagesize: %bpu\n", + pagesize); + + ERTS_MMAP_OP_RINGBUF_INIT(); + + erts_have_erts_mmap = 0; + + mmap_state.supercarrier = 0; + mmap_state.reserve_physical = reserve_noop; + mmap_state.unreserve_physical = unreserve_noop; + +#if HAVE_MMAP && !defined(MAP_ANON) + mmap_state.mmap_fd = open("/dev/zero", O_RDWR); + if (mmap_state.mmap_fd < 0) + erl_exit(-1, "erts_mmap: Failed to open /dev/zero\n"); +#endif + + erts_smp_mtx_init(&mmap_state.mtx, "erts_mmap"); + erts_mtx_init(&am.init_mutex, "mmap_init_atoms"); + +#ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION + if (init->virtual_range.start) { + char *ptr; + UWord sz; + ptr = (char *) ERTS_PAGEALIGNED_CEILING(init->virtual_range.start); + end = (char *) ERTS_PAGEALIGNED_FLOOR(init->virtual_range.end); + sz = end - ptr; + start = os_mmap_virtual(ptr, sz); + if (!start || start > ptr || start >= end) + erl_exit(-1, + "erts_mmap: Failed to create virtual range for super carrier\n"); + sz = start - ptr; + if (sz) + os_munmap(end, sz); + mmap_state.reserve_physical = os_reserve_physical; + mmap_state.unreserve_physical = os_unreserve_physical; + virtual_map = 1; + } + else +#endif + if (init->predefined_area.start) { + start = init->predefined_area.start; + end = init->predefined_area.end; + if (end != (void *) 0 && end < start) + end = start; + } +#if ERTS_HAVE_OS_MMAP + else if (init->scs) { + UWord sz; + sz = ERTS_PAGEALIGNED_CEILING(init->scs); +#ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION + if (!init->scrpm) { + start = os_mmap_virtual(NULL, sz); + mmap_state.reserve_physical = os_reserve_physical; + mmap_state.unreserve_physical = os_unreserve_physical; + virtual_map = 1; + } + else +#endif + { + /* + * The whole supercarrier will by physically + * reserved all the time. + */ + start = os_mmap(NULL, sz, 1); + } + if (!start) + erl_exit(-1, + "erts_mmap: Failed to create super carrier of size %bpu MB\n", + init->scs/1024/1024); + end = start + sz; +#ifdef ERTS_MMAP_DEBUG_FILL_AREAS + if (!virtual_map) { + Uint32 *uip; + + for (uip = (Uint32 *) start; uip < (Uint32 *) end; uip++) + *uip = (Uint32) 0xdeadbeef; + } +#endif + } + if (!mmap_state.no_os_mmap) + erts_have_erts_mmap |= ERTS_HAVE_ERTS_OS_MMAP; +#endif + + mmap_state.no.free_seg_descs = 0; + mmap_state.no.free_segs.curr = 0; + mmap_state.no.free_segs.max = 0; + + mmap_state.size.supercarrier.total = 0; + mmap_state.size.supercarrier.used.total = 0; + mmap_state.size.supercarrier.used.sa = 0; + mmap_state.size.supercarrier.used.sua = 0; + mmap_state.size.os.used = 0; + + mmap_state.desc.new_area_hint = NULL; + + if (!start) { + mmap_state.sa.bot = NULL; + mmap_state.sua.top = NULL; + mmap_state.sa.bot = NULL; + mmap_state.sua.top = NULL; + mmap_state.no_os_mmap = 0; + mmap_state.supercarrier = 0; + } + else { + size_t desc_size; + + mmap_state.no_os_mmap = init->sco; + + desc_size = init->scrfsd; + if (desc_size < 100) + desc_size = 100; + desc_size *= sizeof(ErtsFreeSegDesc); + if ((desc_size + + ERTS_SUPERALIGNED_SIZE + + ERTS_PAGEALIGNED_SIZE) > end - start) + erl_exit(-1, "erts_mmap: No space for segments in super carrier\n"); + + mmap_state.sa.bot = start; + mmap_state.sa.bot += desc_size; + mmap_state.sa.bot = (char *) ERTS_SUPERALIGNED_CEILING(mmap_state.sa.bot); + mmap_state.sa.top = mmap_state.sa.bot; + mmap_state.sua.top = end; + mmap_state.sua.bot = mmap_state.sua.top; + + mmap_state.size.supercarrier.used.total += (UWord) (mmap_state.sa.bot - start); + + mmap_state.desc.free_list = NULL; + mmap_state.desc.reserved = 0; + + if (end == (void *) 0) { + /* + * Very unlikely, but we need a guarantee + * that `mmap_state.sua.top` always will + * compare as larger than all segment pointers + * into the super carrier... + */ + mmap_state.sua.top -= ERTS_PAGEALIGNED_SIZE; + mmap_state.size.supercarrier.used.total += ERTS_PAGEALIGNED_SIZE; +#ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION + if (!virtual_map || os_reserve_physical(mmap_state.sua.top, ERTS_PAGEALIGNED_SIZE)) +#endif + add_free_desc_area(mmap_state.sua.top, end); + mmap_state.desc.reserved += (end - mmap_state.sua.top) / sizeof(ErtsFreeSegDesc); + } + + mmap_state.size.supercarrier.total = (UWord) (mmap_state.sua.top - start); + + /* + * Area before (and after) super carrier + * will be used for free segment descritors. + */ +#ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION + if (virtual_map && !os_reserve_physical(start, mmap_state.sa.bot - start)) + erl_exit(-1, "erts_mmap: Failed to reserve physical memory for descriptors\n"); +#endif + mmap_state.desc.unused_start = start; + mmap_state.desc.unused_end = mmap_state.sa.bot; + mmap_state.desc.reserved += ((mmap_state.desc.unused_end - start) + / sizeof(ErtsFreeSegDesc)); + + init_free_seg_map(&mmap_state.sa.map, SA_SZ_ADDR_ORDER); + init_free_seg_map(&mmap_state.sua.map, SZ_REVERSE_ADDR_ORDER); + + mmap_state.supercarrier = 1; + erts_have_erts_mmap |= ERTS_HAVE_ERTS_SUPERCARRIER_MMAP; + + mmap_state.desc.new_area_hint = end; + + } + +#if !ERTS_HAVE_OS_MMAP + mmap_state.no_os_mmap = 1; +#endif + +#ifdef HARD_DEBUG_MSEG + hard_dbg_mseg_init(); +#endif +} + + +static ERTS_INLINE void +add_2tup(Uint **hpp, Uint *szp, Eterm *lp, Eterm el1, Eterm el2) +{ + *lp = erts_bld_cons(hpp, szp, erts_bld_tuple(hpp, szp, 2, el1, el2), *lp); +} + +Eterm erts_mmap_info(int *print_to_p, + void *print_to_arg, + Eterm** hpp, Uint* szp, + struct erts_mmap_info_struct* emis) +{ + Eterm size_tags[] = { am.total, am.total_sa, am.total_sua, am.used, am.used_sa, am.used_sua }; + Eterm seg_tags[] = { am.used, am.max, am.allocated, am.reserved, am.used_sa, am.used_sua }; + Eterm group[2]; + Eterm group_tags[] = { am.sizes, am.free_segs }; + Eterm list[2]; + Eterm list_tags[2]; /* { am.supercarrier, am.os } */ + int lix; + Eterm res = THE_NON_VALUE; + + if (!hpp) { + erts_smp_mtx_lock(&mmap_state.mtx); + emis->sizes[0] = mmap_state.size.supercarrier.total; + emis->sizes[1] = mmap_state.sa.top - mmap_state.sa.bot; + emis->sizes[2] = mmap_state.sua.top - mmap_state.sua.bot; + emis->sizes[3] = mmap_state.size.supercarrier.used.total; + emis->sizes[4] = mmap_state.size.supercarrier.used.sa; + emis->sizes[5] = mmap_state.size.supercarrier.used.sua; + + emis->segs[0] = mmap_state.no.free_segs.curr; + emis->segs[1] = mmap_state.no.free_segs.max; + emis->segs[2] = mmap_state.no.free_seg_descs; + emis->segs[3] = mmap_state.desc.reserved; + emis->segs[4] = mmap_state.sa.map.nseg; + emis->segs[5] = mmap_state.sua.map.nseg; + + emis->os_used = mmap_state.size.os.used; + erts_smp_mtx_unlock(&mmap_state.mtx); + } + + if (print_to_p) { + int to = *print_to_p; + void *arg = print_to_arg; + if (mmap_state.supercarrier) { + const char* prefix = "supercarrier "; + erts_print(to, arg, "%stotal size: %bpu\n", prefix, emis->sizes[0]); + erts_print(to, arg, "%stotal sa size: %bpu\n", prefix, emis->sizes[1]); + erts_print(to, arg, "%stotal sua size: %bpu\n", prefix, emis->sizes[2]); + erts_print(to, arg, "%sused size: %bpu\n", prefix, emis->sizes[3]); + erts_print(to, arg, "%sused sa size: %bpu\n", prefix, emis->sizes[4]); + erts_print(to, arg, "%sused sua size: %bpu\n", prefix, emis->sizes[5]); + erts_print(to, arg, "%sused free segs: %bpu\n", prefix, emis->segs[0]); + erts_print(to, arg, "%smax free segs: %bpu\n", prefix, emis->segs[1]); + erts_print(to, arg, "%sallocated free segs: %bpu\n", prefix, emis->segs[2]); + erts_print(to, arg, "%sreserved free segs: %bpu\n", prefix, emis->segs[3]); + erts_print(to, arg, "%ssa free segs: %bpu\n", prefix, emis->segs[4]); + erts_print(to, arg, "%ssua free segs: %bpu\n", prefix, emis->segs[5]); + } + if (!mmap_state.no_os_mmap) { + erts_print(to, arg, "os mmap size used: %bpu\n", emis->os_used); + } + } + + + if (hpp || szp) { + if (!am.is_initialized) { + init_atoms(); + } + + lix = 0; + if (mmap_state.supercarrier) { + group[0] = erts_bld_atom_uword_2tup_list(hpp, szp, + sizeof(size_tags)/sizeof(Eterm), + size_tags, emis->sizes); + group[1] = erts_bld_atom_uword_2tup_list(hpp, szp, + sizeof(seg_tags)/sizeof(Eterm), + seg_tags, emis->segs); + list[lix] = erts_bld_2tup_list(hpp, szp, 2, group_tags, group); + list_tags[lix] = am.supercarrier; + lix++; + } + + if (!mmap_state.no_os_mmap) { + group[0] = erts_bld_atom_uword_2tup_list(hpp, szp, + 1, &am.used, &emis->os_used); + list[lix] = erts_bld_2tup_list(hpp, szp, 1, group_tags, group); + list_tags[lix] = am.os; + lix++; + } + res = erts_bld_2tup_list(hpp, szp, lix, list_tags, list); + } + return res; +} + +Eterm erts_mmap_info_options(char *prefix, + int *print_to_p, + void *print_to_arg, + Uint **hpp, + Uint *szp) +{ + const UWord scs = mmap_state.sua.top - mmap_state.sa.bot; + const Eterm sco = mmap_state.no_os_mmap ? am_true : am_false; + const Eterm scrpm = (mmap_state.reserve_physical == reserve_noop) ? am_true : am_false; + Eterm res = THE_NON_VALUE; + + if (print_to_p) { + int to = *print_to_p; + void *arg = print_to_arg; + erts_print(to, arg, "%sscs: %bpu\n", prefix, scs); + if (mmap_state.supercarrier) { + erts_print(to, arg, "%ssco: %T\n", prefix, sco); + erts_print(to, arg, "%sscrpm: %T\n", prefix, scrpm); + erts_print(to, arg, "%sscrfsd: %beu\n", prefix, mmap_state.desc.reserved); + } + } + + if (hpp || szp) { + if (!am.is_initialized) { + init_atoms(); + } + + res = NIL; + if (mmap_state.supercarrier) { + add_2tup(hpp, szp, &res, am.scrfsd, + erts_bld_uint(hpp,szp, mmap_state.desc.reserved)); + add_2tup(hpp, szp, &res, am.scrpm, scrpm); + add_2tup(hpp, szp, &res, am.sco, sco); + } + add_2tup(hpp, szp, &res, am.scs, erts_bld_uword(hpp, szp, scs)); + } + return res; +} + + +Eterm erts_mmap_debug_info(Process* p) +{ + if (mmap_state.supercarrier) { + ERTS_DECL_AM(sabot); + ERTS_DECL_AM(satop); + ERTS_DECL_AM(suabot); + ERTS_DECL_AM(suatop); + Eterm sa_list, sua_list, list; + Eterm tags[] = { AM_sabot, AM_satop, AM_suabot, AM_suatop }; + UWord values[4]; + Eterm *hp, *hp_end; + Uint may_need; + const Uint PTR_BIG_SZ = HALFWORD_HEAP ? 3 : 2; + + erts_smp_mtx_lock(&mmap_state.mtx); + values[0] = (UWord)mmap_state.sa.bot; + values[1] = (UWord)mmap_state.sa.top; + values[2] = (UWord)mmap_state.sua.bot; + values[3] = (UWord)mmap_state.sua.top; + sa_list = build_free_seg_list(p, &mmap_state.sa.map); + sua_list = build_free_seg_list(p, &mmap_state.sua.map); + erts_smp_mtx_unlock(&mmap_state.mtx); + + may_need = 4*(2+3+PTR_BIG_SZ) + 2*(2+3); + hp = HAlloc(p, may_need); + hp_end = hp + may_need; + + list = erts_bld_atom_uword_2tup_list(&hp, NULL, + sizeof(values)/sizeof(*values), + tags, values); + + sa_list = TUPLE2(hp, am_atom_put("sa_free_segs",12), sa_list); hp+=3; + sua_list = TUPLE2(hp, am_atom_put("sua_free_segs",13), sua_list); hp+=3; + list = CONS(hp, sua_list, list); hp+=2; + list = CONS(hp, sa_list, list); hp+=2; + + ASSERT(hp <= hp_end); + HRelease(p, hp_end, hp); + return list; + } + else { + return am_undefined; + } +} + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\ + * Debug functions * +\* */ + + +#ifdef HARD_DEBUG + +static int rbt_assert_is_member(RBTNode* root, RBTNode* node) +{ + while (node != root) { + RBT_ASSERT(parent(node)); + RBT_ASSERT(parent(node)->left == node || parent(node)->right == node); + node = parent(node); + } + return 1; +} + + +#if 0 +# define PRINT_TREE +#else +# undef PRINT_TREE +#endif + +#ifdef PRINT_TREE +static void print_tree(enum SortOrder order, RBTNode*); +#endif + +/* + * Checks that the order between parent and children are correct, + * and that the Red-Black Tree properies are satisfied. if size > 0, + * check_tree() returns the node that satisfies "address order first fit" + * + * The Red-Black Tree properies are: + * 1. Every node is either red or black. + * 2. Every leaf (NIL) is black. + * 3. If a node is red, then both its children are black. + * 4. Every simple path from a node to a descendant leaf + * contains the same number of black nodes. + * + */ + +struct check_arg_t { + RBTree* tree; + ErtsFreeSegDesc* prev_seg; + Uint size; + RBTNode *res; +}; +static void check_node_callback(RBTNode* x, void* arg); + + +static RBTNode * +check_tree(RBTree* tree, Uint size) +{ + struct check_arg_t carg; + carg.tree = tree; + carg.prev_seg = NULL; + carg.size = size; + carg.res = NULL; + +#ifdef PRINT_TREE + print_tree(tree->order, tree->root); +#endif + + if (!tree->root) + return NULL; + + RBT_ASSERT(IS_BLACK(tree->root)); + RBT_ASSERT(!parent(tree->root)); + + rbt_foreach_node(tree, check_node_callback, &carg, 0); + + return carg.res; +} + +static void check_node_callback(RBTNode* x, void* arg) +{ + struct check_arg_t* a = (struct check_arg_t*) arg; + ErtsFreeSegDesc* seg; + + if (IS_RED(x)) { + RBT_ASSERT(IS_BLACK(x->right)); + RBT_ASSERT(IS_BLACK(x->left)); + } + + RBT_ASSERT(parent(x) || x == a->tree->root); + + if (x->left) { + RBT_ASSERT(cmp_nodes(a->tree->order, x->left, x) < 0); + } + if (x->right) { + RBT_ASSERT(cmp_nodes(a->tree->order, x->right, x) > 0); + } + + seg = node_to_desc(a->tree->order, x); + RBT_ASSERT(seg->start < seg->end); + if (a->size && (seg->end - seg->start) >= a->size) { + if (!a->res || cmp_nodes(a->tree->order, x, a->res) < 0) { + a->res = x; + } + } + if (a->tree->order == ADDR_ORDER) { + RBT_ASSERT(!a->prev_seg || a->prev_seg->end < seg->start); + a->prev_seg = seg; + } +} + +#endif /* HARD_DEBUG */ + + +#ifdef PRINT_TREE +#define INDENT_STEP 2 + +#include <stdio.h> + +static void +print_tree_aux(enum SortOrder order, RBTNode *x, int indent) +{ + int i; + + if (x) { + ErtsFreeSegDesc* desc = node_to_desc(order, x); + print_tree_aux(order, x->right, indent + INDENT_STEP); + for (i = 0; i < indent; i++) { + putc(' ', stderr); + } + fprintf(stderr, "%s: sz=%lx [%p - %p] desc=%p\r\n", + IS_BLACK(x) ? "BLACK" : "RED", + desc->end - desc->start, desc->start, desc->end, desc); + print_tree_aux(order, x->left, indent + INDENT_STEP); + } +} + + +static void +print_tree(enum SortOrder order, RBTNode* root) +{ + fprintf(stderr, " --- %s ordered tree begin ---\r\n", sort_order_names[order]); + print_tree_aux(order, root, 0); + fprintf(stderr, " --- %s ordered tree end ---\r\n", sort_order_names[order]); +} + +#endif /* PRINT_TREE */ + + +#ifdef FREE_SEG_API_SMOKE_TEST + +void test_it(void) +{ + ErtsFreeSegMap map; + ErtsFreeSegDesc *desc, *under, *over, *d1, *d2; + const int i = 1; /* reverse addr order */ + + { + init_free_seg_map(&map, SZ_REVERSE_ADDR_ORDER); + + insert_free_seg(&map, alloc_desc(), (char*)0x11000, (char*)0x12000); + HARD_CHECK_TREE(&map.atree, 0); HARD_CHECK_TREE(&map.stree, 0); + insert_free_seg(&map, alloc_desc(), (char*)0x13000, (char*)0x14000); + HARD_CHECK_TREE(&map.atree, 0); HARD_CHECK_TREE(&map.stree, 0); + insert_free_seg(&map, alloc_desc(), (char*)0x15000, (char*)0x17000); + HARD_CHECK_TREE(&map.atree, 0); HARD_CHECK_TREE(&map.stree, 0); + insert_free_seg(&map, alloc_desc(), (char*)0x8000, (char*)0x10000); + HARD_CHECK_TREE(&map.atree, 0); HARD_CHECK_TREE(&map.stree, 0); + + desc = lookup_free_seg(&map, 0x500); + ERTS_ASSERT(desc->start == (char*)(i?0x13000L:0x11000L)); + + desc = lookup_free_seg(&map, 0x1500); + ERTS_ASSERT(desc->start == (char*)0x15000); + + adjacent_free_seg(&map, (char*)0x6666, (char*)0x7777, &under, &over); + ERTS_ASSERT(!under && !over); + + adjacent_free_seg(&map, (char*)0x6666, (char*)0x8000, &under, &over); + ERTS_ASSERT(!under && over->start == (char*)0x8000); + + adjacent_free_seg(&map, (char*)0x10000, (char*)0x10500, &under, &over); + ERTS_ASSERT(under->end == (char*)0x10000 && !over); + + adjacent_free_seg(&map, (char*)0x10100, (char*)0x10500, &under, &over); + ERTS_ASSERT(!under && !over); + + adjacent_free_seg(&map, (char*)0x10100, (char*)0x11000, &under, &over); + ERTS_ASSERT(!under && over && over->start == (char*)0x11000); + + adjacent_free_seg(&map, (char*)0x12000, (char*)0x12500, &under, &over); + ERTS_ASSERT(under && under->end == (char*)0x12000 && !over); + + adjacent_free_seg(&map, (char*)0x12000, (char*)0x13000, &under, &over); + ERTS_ASSERT(under && under->end == (char*)0x12000 && + over && over->start == (char*)0x13000); + + adjacent_free_seg(&map, (char*)0x12500, (char*)0x13000, &under, &over); + ERTS_ASSERT(!under && over && over->start == (char*)0x13000); + + d1 = lookup_free_seg(&map, 0x500); + ERTS_ASSERT(d1->start == (char*)(i?0x13000L:0x11000L)); + + resize_free_seg(&map, d1, d1->start - 0x800, (char*)d1->end); + HARD_CHECK_TREE(&map.atree, 0); HARD_CHECK_TREE(&map.stree, 0); + + d2 = lookup_free_seg(&map, 0x1200); + ERTS_ASSERT(d2 == d1); + + delete_free_seg(&map, d1); + HARD_CHECK_TREE(&map.atree, 0); HARD_CHECK_TREE(&map.stree, 0); + + d1 = lookup_free_seg(&map, 0x1200); + ERTS_ASSERT(d1->start == (char*)0x15000); + } +} + +#endif /* FREE_SEG_API_SMOKE_TEST */ + + +#ifdef HARD_DEBUG_MSEG + +/* + * Debug stuff used by erl_mseg to check that it does the right thing. + * The reason for keeping it here is that we (ab)use the rb-tree code + * for keeping track of *allocated* segments. + */ + +typedef struct ErtsFreeSegDesc_fake_ { + /*RBTNode snode; Save memory by skipping unused size tree node */ + RBTNode anode; /* node in 'atree' */ + union { + char* start; + struct ErtsFreeSegDesc_fake_* next_free; + }u; + char* end; +}ErtsFreeSegDesc_fake; + +static ErtsFreeSegDesc_fake hard_dbg_mseg_desc_pool[10000]; +static ErtsFreeSegDesc_fake* hard_dbg_mseg_desc_first; +RBTree hard_dbg_mseg_tree; + +static erts_mtx_t hard_dbg_mseg_mtx; + +static void hard_dbg_mseg_init(void) +{ + ErtsFreeSegDesc_fake* p; + + erts_mtx_init(&hard_dbg_mseg_mtx, "hard_dbg_mseg"); + hard_dbg_mseg_tree.root = NULL; + hard_dbg_mseg_tree.order = ADDR_ORDER; + + p = &hard_dbg_mseg_desc_pool[(sizeof(hard_dbg_mseg_desc_pool) / + sizeof(*hard_dbg_mseg_desc_pool)) - 1]; + p->u.next_free = NULL; + while (--p >= hard_dbg_mseg_desc_pool) { + p->u.next_free = (p+1); + } + hard_dbg_mseg_desc_first = &hard_dbg_mseg_desc_pool[0]; +} + +static ErtsFreeSegDesc* hard_dbg_alloc_desc(void) +{ + ErtsFreeSegDesc_fake* p = hard_dbg_mseg_desc_first; + ERTS_ASSERT(p || !"HARD_DEBUG_MSEG: Out of mseg descriptors"); + hard_dbg_mseg_desc_first = p->u.next_free; + + /* Creative pointer arithmetic to return something that looks like + * a ErtsFreeSegDesc as long as we don't use the absent 'snode'. + */ + return (ErtsFreeSegDesc*) ((char*)p - offsetof(ErtsFreeSegDesc,anode)); +} + +static void hard_dbg_free_desc(ErtsFreeSegDesc* desc) +{ + ErtsFreeSegDesc_fake* p = (ErtsFreeSegDesc_fake*) &desc->anode; + memset(p, 0xfe, sizeof(*p)); + p->u.next_free = hard_dbg_mseg_desc_first; + hard_dbg_mseg_desc_first = p; +} + +static void check_seg_writable(void* seg, UWord sz) +{ + UWord* seg_end = (UWord*)((char*)seg + sz); + volatile UWord* p; + ERTS_ASSERT(ERTS_IS_PAGEALIGNED(seg)); + ERTS_ASSERT(ERTS_IS_PAGEALIGNED(sz)); + for (p=(UWord*)seg; p<seg_end; p += (ERTS_INV_PAGEALIGNED_MASK+1)/sizeof(UWord)) { + UWord write_back = *p; + *p = 0xfade2b1acc; + *p = write_back; + } +} + +void hard_dbg_insert_mseg(void* seg, UWord sz) +{ + check_seg_writable(seg, sz); + erts_mtx_lock(&hard_dbg_mseg_mtx); + { + ErtsFreeSegDesc *desc = hard_dbg_alloc_desc(); + RBTNode *prev, *next; + desc->start = (char*)seg; + desc->end = desc->start + sz - 1; /* -1 to allow adjacent segments in tree */ + rbt_insert(&hard_dbg_mseg_tree, &desc->anode); + prev = rbt_prev_node(&desc->anode); + next = rbt_next_node(&desc->anode); + ERTS_ASSERT(!prev || anode_to_desc(prev)->end < desc->start); + ERTS_ASSERT(!next || anode_to_desc(next)->start > desc->end); + } + erts_mtx_unlock(&hard_dbg_mseg_mtx); +} + +static ErtsFreeSegDesc* hard_dbg_lookup_seg_at(RBTree* tree, char* start) +{ + RBTNode* x = tree->root; + + while (x) { + ErtsFreeSegDesc* desc = anode_to_desc(x); + if (start < desc->start) { + x = x->left; + } + else if (start > desc->start) { + ERTS_ASSERT(start > desc->end); + x = x->right; + } + else + return desc; + } + return NULL; +} + +void hard_dbg_remove_mseg(void* seg, UWord sz) +{ + check_seg_writable(seg, sz); + erts_mtx_lock(&hard_dbg_mseg_mtx); + { + ErtsFreeSegDesc* desc = hard_dbg_lookup_seg_at(&hard_dbg_mseg_tree, (char*)seg); + ERTS_ASSERT(desc); + ERTS_ASSERT(desc->start == (char*)seg); + ERTS_ASSERT(desc->end == (char*)seg + sz - 1); + + rbt_delete(&hard_dbg_mseg_tree, &desc->anode); + hard_dbg_free_desc(desc); + } + erts_mtx_unlock(&hard_dbg_mseg_mtx); +} + +#endif /* HARD_DEBUG_MSEG */ diff --git a/erts/emulator/sys/common/erl_mmap.h b/erts/emulator/sys/common/erl_mmap.h new file mode 100644 index 0000000000..778a8e0e80 --- /dev/null +++ b/erts/emulator/sys/common/erl_mmap.h @@ -0,0 +1,134 @@ +/* + * %CopyrightBegin% + * + * Copyright Ericsson AB 2013. All Rights Reserved. + * + * The contents of this file are subject to the Erlang Public License, + * Version 1.1, (the "License"); you may not use this file except in + * compliance with the License. You should have received a copy of the + * Erlang Public License along with this software. If not, it can be + * retrieved online at http://www.erlang.org/. + * + * Software distributed under the License is distributed on an "AS IS" + * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + * the License for the specific language governing rights and limitations + * under the License. + * + * %CopyrightEnd% + */ + +#ifndef ERL_MMAP_H__ +#define ERL_MMAP_H__ + +#include "sys.h" + +#define ERTS_MMAP_SUPERALIGNED_BITS (18) +/* Affects hard limits for sbct and lmbcs documented in erts_alloc.xml */ + +#define ERTS_MMAPFLG_OS_ONLY (((Uint32) 1) << 0) +#define ERTS_MMAPFLG_SUPERCARRIER_ONLY (((Uint32) 1) << 1) +#define ERTS_MMAPFLG_SUPERALIGNED (((Uint32) 1) << 2) + +#define ERTS_HAVE_ERTS_OS_MMAP (1 << 0) +#define ERTS_HAVE_ERTS_SUPERCARRIER_MMAP (1 << 1) +extern int erts_have_erts_mmap; +extern UWord erts_page_inv_mask; + +typedef struct { + struct { + char *start; + char *end; + } virtual_range; + struct { + char *start; + char *end; + } predefined_area; + UWord scs; /* super carrier size */ + int sco; /* super carrier only? */ + UWord scrfsd; /* super carrier reserved free segment descriptors */ + int scrpm; /* super carrier reserve physical memory */ +}ErtsMMapInit; + +#define ERTS_MMAP_INIT_DEFAULT_INITER \ + {{NULL, NULL}, {NULL, NULL}, 0, 1, (1 << 16), 1} + +void *erts_mmap(Uint32 flags, UWord *sizep); +void erts_munmap(Uint32 flags, void *ptr, UWord size); +void *erts_mremap(Uint32 flags, void *ptr, UWord old_size, UWord *sizep); +int erts_mmap_in_supercarrier(void *ptr); +void erts_mmap_init(ErtsMMapInit*); +struct erts_mmap_info_struct +{ + UWord sizes[6]; + UWord segs[6]; + UWord os_used; +}; +Eterm erts_mmap_info(int *print_to_p, void *print_to_arg, + Eterm** hpp, Uint* szp, struct erts_mmap_info_struct*); +Eterm erts_mmap_info_options(char *prefix, int *print_to_p, void *print_to_arg, + Uint **hpp, Uint *szp); +struct process; +Eterm erts_mmap_debug_info(struct process*); + +#define ERTS_SUPERALIGNED_SIZE \ + (1 << ERTS_MMAP_SUPERALIGNED_BITS) +#define ERTS_INV_SUPERALIGNED_MASK \ + ((UWord) (ERTS_SUPERALIGNED_SIZE - 1)) +#define ERTS_SUPERALIGNED_MASK \ + (~ERTS_INV_SUPERALIGNED_MASK) +#define ERTS_SUPERALIGNED_FLOOR(X) \ + (((UWord) (X)) & ERTS_SUPERALIGNED_MASK) +#define ERTS_SUPERALIGNED_CEILING(X) \ + ERTS_SUPERALIGNED_FLOOR((X) + ERTS_INV_SUPERALIGNED_MASK) +#define ERTS_IS_SUPERALIGNED(X) \ + (((UWord) (X) & ERTS_INV_SUPERALIGNED_MASK) == 0) + +#define ERTS_INV_PAGEALIGNED_MASK \ + (erts_page_inv_mask) +#define ERTS_PAGEALIGNED_MASK \ + (~ERTS_INV_PAGEALIGNED_MASK) +#define ERTS_PAGEALIGNED_FLOOR(X) \ + (((UWord) (X)) & ERTS_PAGEALIGNED_MASK) +#define ERTS_PAGEALIGNED_CEILING(X) \ + ERTS_PAGEALIGNED_FLOOR((X) + ERTS_INV_PAGEALIGNED_MASK) +#define ERTS_IS_PAGEALIGNED(X) \ + (((UWord) (X) & ERTS_INV_PAGEALIGNED_MASK) == 0) +#define ERTS_PAGEALIGNED_SIZE \ + (ERTS_INV_PAGEALIGNED_MASK + 1) + +#ifndef HAVE_MMAP +# define HAVE_MMAP 0 +#endif +#ifndef HAVE_MREMAP +# define HAVE_MREMAP 0 +#endif +#if HAVE_MMAP +# define ERTS_HAVE_OS_MMAP 1 +# define ERTS_HAVE_GENUINE_OS_MMAP 1 +# if HAVE_MREMAP +# define ERTS_HAVE_OS_MREMAP 1 +# endif +# if defined(MAP_FIXED) && defined(MAP_NORESERVE) +# define ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION 1 +# endif +#endif + +#ifndef HAVE_VIRTUALALLOC +# define HAVE_VIRTUALALLOC 0 +#endif +#if HAVE_VIRTUALALLOC +# define ERTS_HAVE_OS_MMAP 1 +#endif + +/*#define HARD_DEBUG_MSEG*/ +#ifdef HARD_DEBUG_MSEG +# define HARD_DBG_INSERT_MSEG hard_dbg_insert_mseg +# define HARD_DBG_REMOVE_MSEG hard_dbg_remove_mseg +void hard_dbg_insert_mseg(void* seg, UWord sz); +void hard_dbg_remove_mseg(void* seg, UWord sz); +#else +# define HARD_DBG_INSERT_MSEG(SEG,SZ) +# define HARD_DBG_REMOVE_MSEG(SEG,SZ) +#endif + +#endif /* ERL_MMAP_H__ */ diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c index 2748edba02..94a381e168 100644 --- a/erts/emulator/sys/common/erl_mseg.c +++ b/erts/emulator/sys/common/erl_mseg.c @@ -100,45 +100,6 @@ static int atoms_initialized; typedef struct mem_kind_t MemKind; -#if HALFWORD_HEAP -static int initialize_pmmap(void); -static void *pmmap(size_t size); -static int pmunmap(void *p, size_t size); -static void *pmremap(void *old_address, size_t old_size, - size_t new_size); -#endif - -#if HAVE_MMAP -/* Mmap ... */ - -#define MMAP_PROT (PROT_READ|PROT_WRITE) - - -#ifdef MAP_ANON -# define MMAP_FLAGS (MAP_ANON|MAP_PRIVATE) -# define MMAP_FD (-1) -#else -# define MMAP_FLAGS (MAP_PRIVATE) -# define MMAP_FD mmap_fd -static int mmap_fd; -#endif - -#if HAVE_MREMAP -# define HAVE_MSEG_RECREATE 1 -#else -# define HAVE_MSEG_RECREATE 0 -#endif - -#if HALFWORD_HEAP -#define CAN_PARTLY_DESTROY 0 -#else -#define CAN_PARTLY_DESTROY 1 -#endif -#else /* #if HAVE_MMAP */ -#define CAN_PARTLY_DESTROY 0 -#error "Not supported" -#endif /* #if HAVE_MMAP */ - const ErtsMsegOpt_t erts_mseg_default_opt = { 1, /* Use cache */ 1, /* Preserv data */ @@ -163,9 +124,7 @@ typedef struct { CallCounter create; CallCounter create_resize; CallCounter destroy; -#if HAVE_MSEG_RECREATE CallCounter recreate; -#endif CallCounter clear_cache; CallCounter check_cache; } ErtsMsegCalls; @@ -173,7 +132,7 @@ typedef struct { typedef struct cache_t_ cache_t; struct cache_t_ { - Uint size; + UWord size; void *seg; cache_t *next; cache_t *prev; @@ -236,11 +195,6 @@ struct ErtsMsegAllctr_t_ { Uint rel_max_cache_bad_fit; ErtsMsegCalls calls; - -#if CAN_PARTLY_DESTROY - Uint min_seg_size; -#endif - }; typedef union { @@ -344,69 +298,31 @@ schedule_cache_check(ErtsMsegAllctr_t *ma) { } } -/* remove ErtsMsegAllctr_t from arguments? - * only used for statistics - */ -static ERTS_INLINE void * -mmap_align(ErtsMsegAllctr_t *ma, void *addr, size_t length, int prot, int flags, int fd, off_t offset) { - - char *p, *q; - UWord d; - - p = mmap(addr, length, prot, flags, fd, offset); - - if (MAP_IS_ALIGNED(p) || p == MAP_FAILED) - return p; - - if (ma) - INC_CC(ma, create_resize); - - munmap(p, length); - - if ((p = mmap(addr, length + MSEG_ALIGNED_SIZE, prot, flags, fd, offset)) == MAP_FAILED) - return MAP_FAILED; - - q = (void *)ALIGNED_CEILING((char *)p); - d = (UWord)(q - p); - - if (d > 0) - munmap(p, d); - - if (MSEG_ALIGNED_SIZE - d > 0) - munmap((void *)(q + length), MSEG_ALIGNED_SIZE - d); - - return q; -} +/* #define ERTS_PRINT_ERTS_MMAP */ static ERTS_INLINE void * -mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size) +mseg_create(ErtsMsegAllctr_t *ma, Uint flags, MemKind* mk, UWord *sizep) { +#ifdef ERTS_PRINT_ERTS_MMAP + UWord req_size = *sizep; +#endif void *seg; - ASSERT(size % MSEG_ALIGNED_SIZE == 0); - + Uint32 mmap_flags = 0; #if HALFWORD_HEAP - if (mk == &ma->low_mem) { - seg = pmmap(size); - if ((unsigned long) seg & CHECK_POINTER_MASK) { - erts_fprintf(stderr,"Pointer mask failure (0x%08lx)\n",(unsigned long) seg); - return NULL; - } - } else + mmap_flags |= ((mk == &ma->low_mem) + ? ERTS_MMAPFLG_SUPERCARRIER_ONLY + : ERTS_MMAPFLG_OS_ONLY); #endif - { -#if HAVE_MMAP - { - seg = (void *) mmap_align(ma, (void *) 0, (size_t) size, - MMAP_PROT, MMAP_FLAGS, MMAP_FD, 0); - if (seg == (void *) MAP_FAILED) - seg = NULL; - - ASSERT(MAP_IS_ALIGNED(seg) || !seg); - } -#else -# error "Missing mseg_create() implementation" + if (MSEG_FLG_IS_2POW(flags)) + mmap_flags |= ERTS_MMAPFLG_SUPERALIGNED; + + seg = erts_mmap(mmap_flags, sizep); + +#ifdef ERTS_PRINT_ERTS_MMAP + erts_fprintf(stderr, "%p = erts_mmap(%s, {%bpu, %bpu});\n", seg, + (mmap_flags & ERTS_MMAPFLG_SUPERALIGNED) ? "sa" : "sua", + req_size, *sizep); #endif - } INC_CC(ma, create); @@ -414,91 +330,55 @@ mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size) } static ERTS_INLINE void -mseg_destroy(ErtsMsegAllctr_t *ma, MemKind* mk, void *seg, Uint size) { - ERTS_DECLARE_DUMMY(int res); - +mseg_destroy(ErtsMsegAllctr_t *ma, Uint flags, MemKind* mk, void *seg_p, UWord size) { + + Uint32 mmap_flags = 0; #if HALFWORD_HEAP - if (mk == &ma->low_mem) { - res = pmunmap((void *) seg, size); - } - else + mmap_flags |= ((mk == &ma->low_mem) + ? ERTS_MMAPFLG_SUPERCARRIER_ONLY + : ERTS_MMAPFLG_OS_ONLY); #endif - { -#ifdef HAVE_MMAP - res = munmap((void *) seg, size); -#else -# error "Missing mseg_destroy() implementation" + if (MSEG_FLG_IS_2POW(flags)) + mmap_flags |= ERTS_MMAPFLG_SUPERALIGNED; + + erts_munmap(mmap_flags, seg_p, size); +#ifdef ERTS_PRINT_ERTS_MMAP + erts_fprintf(stderr, "erts_munmap(%s, %p, %bpu);\n", + (mmap_flags & ERTS_MMAPFLG_SUPERALIGNED) ? "sa" : "sua", + seg_p, *size); #endif - } - - ASSERT(size % MSEG_ALIGNED_SIZE == 0); - ASSERT(res == 0); - INC_CC(ma, destroy); } -#if HAVE_MSEG_RECREATE -#if defined(__NetBSD__) -#define MREMAP_FLAGS (0) -#else -#define MREMAP_FLAGS (MREMAP_MAYMOVE) -#endif - - -/* mseg_recreate - * May return *unaligned* segments as in address not aligned to MSEG_ALIGNMENT - * it is still page aligned - * - * This is fine for single block carriers as long as we don't cache misaligned - * segments (since multiblock carriers may use them) - * - * For multiblock carriers we *need* MSEG_ALIGNMENT but mbc's will never be - * reallocated. - * - * This should probably be fixed the following way: - * 1) Use an option to segment allocation - NEED_ALIGNMENT - * 2) Add mremap_align which takes care of aligning a new a mremaped area - * 3) Fix the cache to handle of aligned and unaligned segments - */ - static ERTS_INLINE void * -mseg_recreate(ErtsMsegAllctr_t *ma, MemKind* mk, void *old_seg, Uint old_size, Uint new_size) +mseg_recreate(ErtsMsegAllctr_t *ma, Uint flags, MemKind* mk, void *old_seg, UWord old_size, UWord *sizep) { +#ifdef ERTS_PRINT_ERTS_MMAP + UWord req_size = *sizep; +#endif void *new_seg; - - ASSERT(old_size % MSEG_ALIGNED_SIZE == 0); - ASSERT(new_size % MSEG_ALIGNED_SIZE == 0); - + Uint32 mmap_flags = 0; #if HALFWORD_HEAP - if (mk == &ma->low_mem) { - new_seg = (void *) pmremap((void *) old_seg, - (size_t) old_size, - (size_t) new_size); - } - else + mmap_flags |= ((mk == &ma->low_mem) + ? ERTS_MMAPFLG_SUPERCARRIER_ONLY + : ERTS_MMAPFLG_OS_ONLY); #endif - { -#if HAVE_MREMAP -#if defined(__NetBSD__) - new_seg = mremap(old_seg, (size_t)old_size, NULL, new_size, MREMAP_FLAGS); -#else - new_seg = mremap(old_seg, (size_t)old_size, (size_t)new_size, MREMAP_FLAGS); -#endif - if (new_seg == (void *) MAP_FAILED) - new_seg = NULL; -#else -#error "Missing mseg_recreate() implementation" -#endif - } + if (MSEG_FLG_IS_2POW(flags)) + mmap_flags |= ERTS_MMAPFLG_SUPERALIGNED; + new_seg = erts_mremap(mmap_flags, old_seg, old_size, sizep); + +#ifdef ERTS_PRINT_ERTS_MMAP + erts_fprintf(stderr, "%p = erts_mremap(%s, %p, %bpu, {%bpu, %bpu});\n", + new_seg, (mmap_flags & ERTS_MMAPFLG_SUPERALIGNED) ? "sa" : "sua", + old_seg, old_size, req_size, *sizep); +#endif INC_CC(ma, recreate); return new_seg; } -#endif /* #if HAVE_MSEG_RECREATE */ - #ifdef DEBUG #define ERTS_DBG_MA_CHK_THR_ACCESS(MA) \ do { \ @@ -528,7 +408,7 @@ static ERTS_INLINE void mseg_cache_clear_node(cache_t *c) { c->prev = c; } -static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, Uint size, Uint flags) { +static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, UWord size, Uint flags) { cache_t *c; ERTS_DBG_MK_CHK_THR_ACCESS(mk); @@ -566,14 +446,13 @@ static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, Uint size, Ui return 1; } else if (!MSEG_FLG_IS_2POW(flags) && !erts_circleq_is_empty(&(mk->cache_unpowered_node))) { - /* No free slots. * Evict oldest slot from unpowered cache so we can cache an unpowered (sbc) segment */ c = erts_circleq_tail(&(mk->cache_unpowered_node)); erts_circleq_remove(c); - mseg_destroy(mk->ma, mk, c->seg, c->size); + mseg_destroy(mk->ma, ERTS_MSEG_FLG_NONE, mk, c->seg, c->size); mseg_cache_clear_node(c); c->seg = seg; @@ -599,7 +478,8 @@ static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, Uint size, Ui c = erts_circleq_tail(&(mk->cache_powered_node[i])); erts_circleq_remove(c); - mseg_destroy(mk->ma, mk, c->seg, c->size); + mseg_destroy(mk->ma, ERTS_MSEG_FLG_2POW, mk, c->seg, c->size); + mseg_cache_clear_node(c); c->seg = seg; @@ -614,18 +494,18 @@ static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, Uint size, Ui return 0; } -static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint *size_p, Uint flags) { +static ERTS_INLINE void *cache_get_segment(MemKind *mk, UWord *size_p, Uint flags) { - Uint size = *size_p; + UWord size = *size_p; ERTS_DBG_MK_CHK_THR_ACCESS(mk); if (MSEG_FLG_IS_2POW(flags)) { int i, ix = SIZE_TO_CACHE_AREA_IDX(size); - void *seg; + char *seg; cache_t *c; - Uint csize; + UWord csize; ASSERT(IS_2POW(size)); @@ -641,7 +521,7 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint *size_p, Uint flags ASSERT(MAP_IS_ALIGNED(c->seg)); csize = c->size; - seg = c->seg; + seg = (char*) c->seg; mk->cache_size--; mk->cache_hits++; @@ -652,9 +532,8 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint *size_p, Uint flags ASSERT(!(mk->cache_size < 0)); - if (csize != size) { - mseg_destroy(mk->ma, mk, (char *)seg + size, csize - size); - } + if (csize != size) + mseg_destroy(mk->ma, ERTS_MSEG_FLG_2POW, mk, seg + size, csize - size); return seg; } @@ -663,10 +542,10 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint *size_p, Uint flags void *seg; cache_t *c; cache_t *best = NULL; - Uint bdiff = 0; - Uint csize; - Uint bad_max_abs = mk->ma->abs_max_cache_bad_fit; - Uint bad_max_rel = mk->ma->rel_max_cache_bad_fit; + UWord bdiff = 0; + UWord csize; + UWord bad_max_abs = mk->ma->abs_max_cache_bad_fit; + UWord bad_max_rel = mk->ma->rel_max_cache_bad_fit; erts_circleq_foreach(c, &(mk->cache_unpowered_node)) { csize = c->size; @@ -728,7 +607,7 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint *size_p, Uint flags * using callbacks from aux-work in the scheduler. */ -static ERTS_INLINE Uint mseg_drop_one_memkind_cache_size(MemKind *mk, cache_t *head) { +static ERTS_INLINE Uint mseg_drop_one_memkind_cache_size(MemKind *mk, Uint flags, cache_t *head) { cache_t *c = NULL; c = erts_circleq_tail(head); @@ -737,7 +616,7 @@ static ERTS_INLINE Uint mseg_drop_one_memkind_cache_size(MemKind *mk, cache_t *h if (erts_mtrace_enabled) erts_mtrace_crr_free(SEGTYPE, SEGTYPE, c->seg); - mseg_destroy(mk->ma, mk, c->seg, c->size); + mseg_destroy(mk->ma, flags, mk, c->seg, c->size); mseg_cache_clear_node(c); erts_circleq_push_head(&(mk->cache_free), c); @@ -749,7 +628,7 @@ static ERTS_INLINE Uint mseg_drop_one_memkind_cache_size(MemKind *mk, cache_t *h return mk->cache_size; } -static ERTS_INLINE Uint mseg_drop_memkind_cache_size(MemKind *mk, cache_t *head) { +static ERTS_INLINE Uint mseg_drop_memkind_cache_size(MemKind *mk, Uint flags, cache_t *head) { cache_t *c = NULL; while (!erts_circleq_is_empty(head)) { @@ -760,7 +639,7 @@ static ERTS_INLINE Uint mseg_drop_memkind_cache_size(MemKind *mk, cache_t *head) if (erts_mtrace_enabled) erts_mtrace_crr_free(SEGTYPE, SEGTYPE, c->seg); - mseg_destroy(mk->ma, mk, c->seg, c->size); + mseg_destroy(mk->ma, flags, mk, c->seg, c->size); mseg_cache_clear_node(c); erts_circleq_push_head(&(mk->cache_free), c); @@ -788,11 +667,11 @@ static Uint mseg_check_memkind_cache(MemKind *mk) { for (i = 0; i < CACHE_AREAS; i++) { if (!erts_circleq_is_empty(&(mk->cache_powered_node[i]))) - return mseg_drop_one_memkind_cache_size(mk, &(mk->cache_powered_node[i])); + return mseg_drop_one_memkind_cache_size(mk, ERTS_MSEG_FLG_2POW, &(mk->cache_powered_node[i])); } if (!erts_circleq_is_empty(&(mk->cache_unpowered_node))) - return mseg_drop_one_memkind_cache_size(mk, &(mk->cache_unpowered_node)); + return mseg_drop_one_memkind_cache_size(mk, ERTS_MSEG_FLG_NONE, &(mk->cache_unpowered_node)); return 0; } @@ -851,12 +730,12 @@ static void mseg_clear_memkind_cache(MemKind *mk) { if (erts_circleq_is_empty(&(mk->cache_powered_node[i]))) continue; - mseg_drop_memkind_cache_size(mk, &(mk->cache_powered_node[i])); + mseg_drop_memkind_cache_size(mk, ERTS_MSEG_FLG_2POW, &(mk->cache_powered_node[i])); ASSERT(erts_circleq_is_empty(&(mk->cache_powered_node[i]))); } /* drop varied caches */ if (!erts_circleq_is_empty(&(mk->cache_unpowered_node))) - mseg_drop_memkind_cache_size(mk, &(mk->cache_unpowered_node)); + mseg_drop_memkind_cache_size(mk, ERTS_MSEG_FLG_NONE, &(mk->cache_unpowered_node)); ASSERT(erts_circleq_is_empty(&(mk->cache_unpowered_node))); ASSERT(mk->cache_size == 0); @@ -896,36 +775,35 @@ static ERTS_INLINE MemKind* memkind(ErtsMsegAllctr_t *ma, } static void * -mseg_alloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, Uint *size_p, +mseg_alloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, UWord *size_p, Uint flags, const ErtsMsegOpt_t *opt) { - Uint size; + UWord size; void *seg; MemKind* mk = memkind(ma, opt); INC_CC(ma, alloc); - /* Carrier align */ - size = ALIGNED_CEILING(*size_p); - - /* Cache optim (if applicable) */ - if (MSEG_FLG_IS_2POW(flags) && !IS_2POW(size)) - size = ceil_2pow(size); + if (!MSEG_FLG_IS_2POW(flags)) + size = ERTS_PAGEALIGNED_CEILING(*size_p); + else { + size = ALIGNED_CEILING(*size_p); + if (!IS_2POW(size)) { + /* Cache optim (if applicable) */ + size = ceil_2pow(size); + } + } -#if CAN_PARTLY_DESTROY - if (size < ma->min_seg_size) - ma->min_seg_size = size; -#endif - if (opt->cache && mk->cache_size > 0 && (seg = cache_get_segment(mk, &size, flags)) != NULL) goto done; - if ((seg = mseg_create(ma, mk, size)) == NULL) - size = 0; + seg = mseg_create(ma, flags, mk, &size); + if (!seg) + *size_p = 0; + else { done: - *size_p = size; - if (seg) { + *size_p = size; if (erts_mtrace_enabled) erts_mtrace_crr_alloc(seg, atype, ERTS_MTRACE_SEGMENT_ID, size); @@ -937,7 +815,7 @@ done: static void -mseg_dealloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, Uint size, +mseg_dealloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, UWord size, Uint flags, const ErtsMsegOpt_t *opt) { MemKind* mk = memkind(ma, opt); @@ -952,7 +830,7 @@ mseg_dealloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, Uint size, if (erts_mtrace_enabled) erts_mtrace_crr_free(atype, SEGTYPE, seg); - mseg_destroy(ma, mk, seg, size); + mseg_destroy(ma, flags, mk, seg, size); done: @@ -961,11 +839,11 @@ done: static void * mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, - Uint old_size, Uint *new_size_p, Uint flags, const ErtsMsegOpt_t *opt) + UWord old_size, UWord *new_size_p, Uint flags, const ErtsMsegOpt_t *opt) { MemKind* mk; void *new_seg; - Uint new_size; + UWord new_size; /* Just allocate a new segment if we didn't have one before */ if (!seg || !old_size) { @@ -985,90 +863,47 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, mk = memkind(ma, opt); new_seg = seg; - /* Carrier align */ - new_size = ALIGNED_CEILING(*new_size_p); - - /* Cache optim (if applicable) */ - if (MSEG_FLG_IS_2POW(flags) && !IS_2POW(new_size)) - new_size = ceil_2pow(new_size); - - if (new_size == old_size) - ; - else if (new_size < old_size) { - Uint shrink_sz = old_size - new_size; + if (!MSEG_FLG_IS_2POW(flags)) + new_size = ERTS_PAGEALIGNED_CEILING(*new_size_p); + else { + new_size = ALIGNED_CEILING(*new_size_p); + if (!IS_2POW(new_size)) { + /* Cache optim (if applicable) */ + new_size = ceil_2pow(new_size); + } + } -#if CAN_PARTLY_DESTROY - if (new_size < ma->min_seg_size) - ma->min_seg_size = new_size; -#endif - /* +M<S>rsbcst <ratio> */ - if (shrink_sz < opt->abs_shrink_th - && 100*shrink_sz < opt->rel_shrink_th*old_size) { - new_size = old_size; + if (new_size > old_size) { + if (opt->preserv) { + new_seg = mseg_recreate(ma, flags, mk, (void *) seg, old_size, &new_size); + if (!new_seg) + new_size = old_size; } else { - -#if CAN_PARTLY_DESTROY - - if (erts_mtrace_enabled) - erts_mtrace_crr_realloc(new_seg, atype, SEGTYPE, seg, new_size); - - mseg_destroy(ma, mk, ((char *) seg) + new_size, shrink_sz); - -#elif HAVE_MSEG_RECREATE - goto do_recreate; -#else + mseg_dealloc(ma, atype, seg, old_size, flags, opt); new_seg = mseg_alloc(ma, atype, &new_size, flags, opt); - - ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg); - if (!new_seg) - new_size = old_size; - else { - sys_memcpy(((char *) new_seg), - ((char *) seg), - MIN(new_size, old_size)); - mseg_dealloc(ma, atype, seg, old_size, flags, opt); - } -#endif + new_size = 0; } } - else { + else if (new_size < old_size) { + UWord shrink_sz = old_size - new_size; - if (!opt->preserv) { - mseg_dealloc(ma, atype, seg, old_size, flags, opt); - new_seg = mseg_alloc(ma, atype, &new_size, flags, opt); - ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg); + /* +M<S>rsbcst <ratio> */ + if (shrink_sz < opt->abs_shrink_th + && 100*shrink_sz < opt->rel_shrink_th*old_size) { + new_size = old_size; } else { -#if HAVE_MSEG_RECREATE -#if !CAN_PARTLY_DESTROY - do_recreate: -#endif - new_seg = mseg_recreate(ma, mk, (void *) seg, old_size, new_size); - /* ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg); - * will not always be aligned and it ok for now - */ - - if (erts_mtrace_enabled) - erts_mtrace_crr_realloc(new_seg, atype, SEGTYPE, seg, new_size); + new_seg = mseg_recreate(ma, flags, mk, (void *) seg, old_size, &new_size); if (!new_seg) new_size = old_size; -#else - new_seg = mseg_alloc(ma, atype, &new_size, flags, opt); - - ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg); - - if (!new_seg) - new_size = old_size; - else { - sys_memcpy(((char *) new_seg), ((char *) seg), MIN(new_size, old_size)); - mseg_dealloc(ma, atype, seg, old_size, flags, opt); - } -#endif } } + if (erts_mtrace_enabled) + erts_mtrace_crr_realloc(new_seg, atype, SEGTYPE, seg, new_size); + INC_CC(ma, realloc); ASSERT(!MSEG_FLG_IS_2POW(flags) || IS_2POW(new_size)); @@ -1106,9 +941,7 @@ static struct { Eterm mseg_create; Eterm mseg_create_resize; Eterm mseg_destroy; -#if HAVE_MSEG_RECREATE Eterm mseg_recreate; -#endif Eterm mseg_clear_cache; Eterm mseg_check_cache; @@ -1129,8 +962,6 @@ init_atoms(ErtsMsegAllctr_t *ma) #ifdef DEBUG Eterm *atom; #endif - - ERTS_MSEG_UNLOCK(ma); erts_mtx_lock(&init_atoms_mutex); if (!atoms_initialized) { @@ -1163,9 +994,7 @@ init_atoms(ErtsMsegAllctr_t *ma) AM_INIT(mseg_create); AM_INIT(mseg_create_resize); AM_INIT(mseg_destroy); -#if HAVE_MSEG_RECREATE AM_INIT(mseg_recreate); -#endif AM_INIT(mseg_clear_cache); AM_INIT(mseg_check_cache); @@ -1176,7 +1005,6 @@ init_atoms(ErtsMsegAllctr_t *ma) #endif } - ERTS_MSEG_LOCK(ma); atoms_initialized = 1; erts_mtx_unlock(&init_atoms_mutex); } @@ -1239,7 +1067,9 @@ info_options(ErtsMsegAllctr_t *ma, Uint **hpp, Uint *szp) { - Eterm res = THE_NON_VALUE; + Eterm res; + + res = erts_mmap_info_options(prefix, print_to_p, print_to_arg, hpp, szp); if (print_to_p) { int to = *print_to_p; @@ -1254,7 +1084,6 @@ info_options(ErtsMsegAllctr_t *ma, if (!atoms_initialized) init_atoms(ma); - res = NIL; add_2tup(hpp, szp, &res, am.mcs, bld_uint(hpp, szp, ma->max_cache_size)); @@ -1293,9 +1122,7 @@ info_calls(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp PRINT_CC(to, arg, create); PRINT_CC(to, arg, create_resize); PRINT_CC(to, arg, destroy); -#if HAVE_MSEG_RECREATE PRINT_CC(to, arg, recreate); -#endif PRINT_CC(to, arg, clear_cache); PRINT_CC(to, arg, check_cache); @@ -1316,12 +1143,10 @@ info_calls(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp bld_unstable_uint(hpp, szp, ma->calls.clear_cache.giga_no), bld_unstable_uint(hpp, szp, ma->calls.clear_cache.no)); -#if HAVE_MSEG_RECREATE add_3tup(hpp, szp, &res, am.mseg_recreate, bld_unstable_uint(hpp, szp, ma->calls.recreate.giga_no), bld_unstable_uint(hpp, szp, ma->calls.recreate.no)); -#endif add_3tup(hpp, szp, &res, am.mseg_destroy, bld_unstable_uint(hpp, szp, ma->calls.destroy.giga_no), @@ -1465,14 +1290,8 @@ erts_mseg_info_options(int ix, ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_IX(ix); Eterm res; - ERTS_MSEG_LOCK(ma); - - ERTS_DBG_MA_CHK_THR_ACCESS(ma); - res = info_options(ma, "option ", print_to_p, print_to_arg, hpp, szp); - ERTS_MSEG_UNLOCK(ma); - return res; } @@ -1490,10 +1309,6 @@ erts_mseg_info(int ix, Eterm values[4]; Uint n = 0; - ERTS_MSEG_LOCK(ma); - - ERTS_DBG_MA_CHK_THR_ACCESS(ma); - if (hpp || szp) { if (!atoms_initialized) @@ -1506,6 +1321,10 @@ erts_mseg_info(int ix, } values[n++] = info_version(ma, print_to_p, print_to_arg, hpp, szp); values[n++] = info_options(ma, "option ", print_to_p, print_to_arg, hpp, szp); + + ERTS_MSEG_LOCK(ma); + ERTS_DBG_MA_CHK_THR_ACCESS(ma); + #if HALFWORD_HEAP values[n++] = info_memkind(ma, &ma->low_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp); values[n++] = info_memkind(ma, &ma->hi_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp); @@ -1521,7 +1340,7 @@ erts_mseg_info(int ix, } void * -erts_mseg_alloc_opt(ErtsAlcType_t atype, Uint *size_p, Uint flags, const ErtsMsegOpt_t *opt) +erts_mseg_alloc_opt(ErtsAlcType_t atype, UWord *size_p, Uint flags, const ErtsMsegOpt_t *opt) { ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt); void *seg; @@ -1529,20 +1348,23 @@ erts_mseg_alloc_opt(ErtsAlcType_t atype, Uint *size_p, Uint flags, const ErtsMse ERTS_DBG_MA_CHK_THR_ACCESS(ma); seg = mseg_alloc(ma, atype, size_p, flags, opt); ERTS_MSEG_UNLOCK(ma); + HARD_DBG_INSERT_MSEG(seg, *size_p); return seg; } void * -erts_mseg_alloc(ErtsAlcType_t atype, Uint *size_p, Uint flags) +erts_mseg_alloc(ErtsAlcType_t atype, UWord *size_p, Uint flags) { return erts_mseg_alloc_opt(atype, size_p, flags, &erts_mseg_default_opt); } void erts_mseg_dealloc_opt(ErtsAlcType_t atype, void *seg, - Uint size, Uint flags, const ErtsMsegOpt_t *opt) + UWord size, Uint flags, const ErtsMsegOpt_t *opt) { ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt); + + HARD_DBG_REMOVE_MSEG(seg, size); ERTS_MSEG_LOCK(ma); ERTS_DBG_MA_CHK_THR_ACCESS(ma); mseg_dealloc(ma, atype, seg, size, flags, opt); @@ -1550,29 +1372,32 @@ erts_mseg_dealloc_opt(ErtsAlcType_t atype, void *seg, } void -erts_mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size, Uint flags) +erts_mseg_dealloc(ErtsAlcType_t atype, void *seg, UWord size, Uint flags) { erts_mseg_dealloc_opt(atype, seg, size, flags, &erts_mseg_default_opt); } void * erts_mseg_realloc_opt(ErtsAlcType_t atype, void *seg, - Uint old_size, Uint *new_size_p, + UWord old_size, UWord *new_size_p, Uint flags, const ErtsMsegOpt_t *opt) { ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt); void *new_seg; + + HARD_DBG_REMOVE_MSEG(seg, old_size); ERTS_MSEG_LOCK(ma); ERTS_DBG_MA_CHK_THR_ACCESS(ma); new_seg = mseg_realloc(ma, atype, seg, old_size, new_size_p, flags, opt); ERTS_MSEG_UNLOCK(ma); + HARD_DBG_INSERT_MSEG(new_seg, *new_size_p); return new_seg; } void * erts_mseg_realloc(ErtsAlcType_t atype, void *seg, - Uint old_size, Uint *new_size_p, Uint flags) + UWord old_size, UWord *new_size_p, Uint flags) { return erts_mseg_realloc_opt(atype, seg, old_size, new_size_p, flags, &erts_mseg_default_opt); @@ -1662,16 +1487,17 @@ erts_mseg_init(ErtsMsegInit_t *init) erts_mtx_init(&init_atoms_mutex, "mseg_init_atoms"); -#if HAVE_MMAP && !defined(MAP_ANON) - mmap_fd = open("/dev/zero", O_RDWR); - if (mmap_fd < 0) - erl_exit(ERTS_ABORT_EXIT, "erts_mseg: unable to open /dev/zero\n"); -#endif +#if HALFWORD_HEAP + if (sizeof(void *) != 8) + erl_exit(-1,"Halfword emulator cannot be run in 32bit mode"); -#if HAVE_MMAP && HALFWORD_HEAP - initialize_pmmap(); + init->mmap.virtual_range.start = (char *) sbrk(0); + init->mmap.virtual_range.end = (char *) 0x100000000UL; + init->mmap.sco = 0; #endif + erts_mmap_init(&init->mmap); + if (!IS_2POW(GET_PAGE_SIZE)) erl_exit(ERTS_ABORT_EXIT, "erts_mseg: Unexpected page_size %beu\n", GET_PAGE_SIZE); @@ -1712,10 +1538,6 @@ erts_mseg_init(ErtsMsegInit_t *init) #endif sys_memzero((void *) &ma->calls, sizeof(ErtsMsegCalls)); - -#if CAN_PARTLY_DESTROY - ma->min_seg_size = ~((Uint) 0); -#endif } } @@ -1757,7 +1579,7 @@ erts_mseg_test(UWord op, UWord a1, UWord a2, UWord a3) case 0x400: /* Have erts_mseg */ return (UWord) 1; case 0x401: - return (UWord) erts_mseg_alloc(ERTS_ALC_A_INVALID, (Uint *) a1, (Uint) 0); + return (UWord) erts_mseg_alloc(ERTS_ALC_A_INVALID, (UWord *) a1, (Uint) 0); case 0x402: erts_mseg_dealloc(ERTS_ALC_A_INVALID, (void *) a1, (Uint) a2, (Uint) 0); return (UWord) 0; @@ -1765,7 +1587,7 @@ erts_mseg_test(UWord op, UWord a1, UWord a2, UWord a3) return (UWord) erts_mseg_realloc(ERTS_ALC_A_INVALID, (void *) a1, (Uint) a2, - (Uint *) a3, + (UWord *) a3, (Uint) 0); case 0x404: erts_mseg_clear_cache(); @@ -1788,405 +1610,3 @@ erts_mseg_test(UWord op, UWord a1, UWord a2, UWord a3) } } - - -#if HALFWORD_HEAP -/* - * Very simple page oriented mmap replacer. Works in the lower - * 32 bit address range of a 64bit program. - * Implements anonymous mmap mremap and munmap with address order first fit. - * The free list is expected to be very short... - * To be used for compressed pointers in Erlang halfword emulator - * implementation. The MacOS X version is more of a toy, it's not really - * for production as the halfword erlang VM relies on Linux specific memory - * mapping tricks. - */ - -/* #define HARDDEBUG 1 */ - -#ifdef HARDDEBUG -static void dump_freelist(void) -{ - FreeBlock *p = first; - - while (p) { - fprintf(stderr, "p = %p\r\np->num = %ld\r\np->next = %p\r\n\r\n", - (void *) p, (unsigned long) p->num, (void *) p->next); - p = p->next; - } -} - -#define HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(PTR, SZ) \ - fprintf(stderr,"Mapping of address %p with size %ld " \ - "does not map complete pages (%s:%d)\r\n", \ - (void *) (PTR), (unsigned long) (SZ),__FILE__, __LINE__) - -#define HARDDEBUG_HW_UNALIGNED_ALIGNMENT(PTR, SZ) \ - fprintf(stderr,"Mapping of address %p with size %ld " \ - "is not page aligned (%s:%d)\r\n", \ - (void *) (PTR), (unsigned long) (SZ),__FILE__, __LINE__) - -#define HARDDEBUG_MAP_FAILED(PTR, SZ) \ - fprintf(stderr, "Could not actually map memory " \ - "at address %p with size %ld (%s:%d) ..\r\n", \ - (void *) (PTR), (unsigned long) (SZ),__FILE__, __LINE__) -#else -#define HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(PTR, SZ) do{}while(0) -#define HARDDEBUG_HW_UNALIGNED_ALIGNMENT(PTR, SZ) do{}while(0) -#define HARDDEBUG_MAP_FAILED(PTR, SZ) do{}while(0) -#endif - - -#ifdef __APPLE__ -#define MAP_ANONYMOUS MAP_ANON -#endif - -#define INIT_LOCK() do {erts_mtx_init(&pmmap_mutex, "pmmap");} while(0) - -#define TAKE_LOCK() do {erts_mtx_lock(&pmmap_mutex);} while(0) - -#define RELEASE_LOCK() do {erts_mtx_unlock(&pmmap_mutex);} while(0) - -static erts_mtx_t pmmap_mutex; /* Also needed when !USE_THREADS */ - -typedef struct _free_block { - unsigned long num; /*pages*/ - struct _free_block *next; -} FreeBlock; - -/* Protect with lock */ -static FreeBlock *first; - -static void *do_map(void *ptr, size_t sz) -{ - void *res; - - if (ALIGNED_CEILING(sz) != sz) { - HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(ptr, sz); - return NULL; - } - - if (((unsigned long) ptr) % MSEG_ALIGNED_SIZE) { - HARDDEBUG_HW_UNALIGNED_ALIGNMENT(ptr, sz); - return NULL; - } - -#if HAVE_MMAP - res = mmap(ptr, sz, - PROT_READ | PROT_WRITE, MAP_PRIVATE | - MAP_ANONYMOUS | MAP_FIXED, - -1 , 0); -#else -# error "Missing mmap support" -#endif - - if (res == MAP_FAILED) { - HARDDEBUG_MAP_FAILED(ptr, sz); - return NULL; - } - - return res; -} - -static int do_unmap(void *ptr, size_t sz) -{ - void *res; - - if (ALIGNED_CEILING(sz) != sz) { - HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(ptr, sz); - return 1; - } - - if (((unsigned long) ptr) % MSEG_ALIGNED_SIZE) { - HARDDEBUG_HW_UNALIGNED_ALIGNMENT(ptr, sz); - return 1; - } - - res = mmap(ptr, sz, - PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, - -1 , 0); - - if (res == MAP_FAILED) { - HARDDEBUG_MAP_FAILED(ptr, sz); - return 1; - } - - return 0; -} - -#ifdef __APPLE__ -/* - * The first 4 gig's are protected on Macos X for 64bit processes :( - * The range 0x1000000000 - 0x10FFFFFFFF is selected as an arbitrary - * value of a normally unused range... Real MMAP's will avoid - * it and all 32bit compressed pointers can be in that range... - * More expensive than on Linux where expansion of compressed - * poiters involves no masking (as they are in the first 4 gig's). - * It's also very uncertain if the MAP_NORESERVE flag really has - * any effect in MacOS X. Swap space may always be allocated... - */ -#define SET_RANGE_MIN() /* nothing */ -#define RANGE_MIN 0x1000000000UL -#define RANGE_MAX 0x1100000000UL -#define RANGE_MASK (RANGE_MIN) -#define EXTRA_MAP_FLAGS (MAP_FIXED) -#else -static size_t range_min; -#define SET_RANGE_MIN() do { range_min = (size_t) sbrk(0); } while (0) -#define RANGE_MIN range_min -#define RANGE_MAX 0x100000000UL -#define RANGE_MASK 0UL -#define EXTRA_MAP_FLAGS (0) -#endif - -static int initialize_pmmap(void) -{ - char *p,*q,*rptr; - size_t rsz; - FreeBlock *initial; - - SET_RANGE_MIN(); - if (sizeof(void *) != 8) { - erl_exit(1,"Halfword emulator cannot be run in 32bit mode"); - } - - p = (char *) RANGE_MIN; - q = (char *) RANGE_MAX; - - rsz = ALIGNED_FLOOR(q - p); - - rptr = mmap_align(NULL, (void *) p, rsz, - PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | - MAP_NORESERVE | EXTRA_MAP_FLAGS, - -1 , 0); -#ifdef HARDDEBUG - printf("p=%p, rsz = %ld, pages = %ld, got range = %p -> %p\r\n", - p, (unsigned long) rsz, (unsigned long) (rsz / MSEG_ALIGNED_SIZE), - (void *) rptr, (void*)(rptr + rsz)); -#endif - if ((UWord)(rptr + rsz) > RANGE_MAX) { - size_t rsz_trunc = RANGE_MAX - (UWord)rptr; -#ifdef HARDDEBUG - printf("Reducing mmap'ed memory from %lu to %lu Mb, reduced range = %p -> %p\r\n", - rsz/(1024*1024), rsz_trunc/(1024*1024), rptr, rptr+rsz_trunc); -#endif - munmap((void*)RANGE_MAX, rsz - rsz_trunc); - rsz = rsz_trunc; - } - if (!do_map(rptr, MSEG_ALIGNED_SIZE)) { - erl_exit(1,"Could not actually mmap first page for halfword emulator...\n"); - } - initial = (FreeBlock *) rptr; - initial->num = (rsz / MSEG_ALIGNED_SIZE); - initial->next = NULL; - first = initial; - INIT_LOCK(); - return 0; -} - -static void *pmmap(size_t size) -{ - size_t real_size = ALIGNED_CEILING(size); - size_t num_pages = real_size / MSEG_ALIGNED_SIZE; - FreeBlock **block; - FreeBlock *tail; - FreeBlock *res; - - TAKE_LOCK(); - - for (block = &first; - *block != NULL && (*block)->num < num_pages; - block = &((*block)->next)) - ; - if (!(*block)) { - RELEASE_LOCK(); - return NULL; - } - if ((*block)->num == num_pages) { - /* nice, perfect fit */ - res = *block; - *block = (*block)->next; - } else { - tail = (FreeBlock *) (((char *) ((void *) (*block))) + real_size); - if (!do_map(tail, MSEG_ALIGNED_SIZE)) { - HARDDEBUG_MAP_FAILED(tail, MSEG_ALIGNED_SIZE); - RELEASE_LOCK(); - return NULL; - } - tail->num = (*block)->num - num_pages; - tail->next = (*block)->next; - res = *block; - *block = tail; - } - - RELEASE_LOCK(); - - if (!do_map(res, real_size)) { - HARDDEBUG_MAP_FAILED(res, real_size); - return NULL; - } - - return (void *) res; -} - -static int pmunmap(void *p, size_t size) -{ - size_t real_size = ALIGNED_CEILING(size); - size_t num_pages = real_size / MSEG_ALIGNED_SIZE; - - FreeBlock *block; - FreeBlock *last; - FreeBlock *nb = (FreeBlock *) p; - - ASSERT(((unsigned long)p & CHECK_POINTER_MASK)==0); - - if (real_size > MSEG_ALIGNED_SIZE) { - if (do_unmap(((char *) p) + MSEG_ALIGNED_SIZE, real_size - MSEG_ALIGNED_SIZE)) { - return 1; - } - } - - TAKE_LOCK(); - - last = NULL; - block = first; - while(block != NULL && ((void *) block) < p) { - last = block; - block = block->next; - } - - if (block != NULL && - ((void *) block) == ((void *) (((char *) p) + real_size))) { - /* Merge new free block with following */ - nb->num = block->num + num_pages; - nb->next = block->next; - if (do_unmap(block, MSEG_ALIGNED_SIZE)) { - RELEASE_LOCK(); - return 1; - } - } else { - /* just link in */ - nb->num = num_pages; - nb->next = block; - } - if (last != NULL) { - if (p == ((void *) (((char *) last) + (last->num * MSEG_ALIGNED_SIZE)))) { - /* Merge with previous */ - last->num += nb->num; - last->next = nb->next; - if (do_unmap(nb, MSEG_ALIGNED_SIZE)) { - RELEASE_LOCK(); - return 1; - } - } else { - last->next = nb; - } - } else { - first = nb; - } - RELEASE_LOCK(); - return 0; -} - -static void *pmremap(void *old_address, size_t old_size, - size_t new_size) -{ - size_t new_real_size = ALIGNED_CEILING(new_size); - size_t new_num_pages = new_real_size / MSEG_ALIGNED_SIZE; - size_t old_real_size = ALIGNED_CEILING(old_size); - size_t old_num_pages = old_real_size / MSEG_ALIGNED_SIZE; - if (new_num_pages == old_num_pages) { - return old_address; - } else if (new_num_pages < old_num_pages) { /* Shrink */ - size_t nfb_pages = old_num_pages - new_num_pages; - size_t nfb_real_size = old_real_size - new_real_size; - void *vnfb = (void *) (((char *)old_address) + new_real_size); - FreeBlock *nfb = (FreeBlock *) vnfb; - FreeBlock **block; - TAKE_LOCK(); - for (block = &first; - *block != NULL && (*block) < nfb; - block = &((*block)->next)) - ; - if (!(*block) || - (*block) > ((FreeBlock *)(((char *) vnfb) + nfb_real_size))) { - /* Normal link in */ - if (nfb_pages > 1) { - if (do_unmap((void *)(((char *) vnfb) + MSEG_ALIGNED_SIZE), - (nfb_pages - 1)*MSEG_ALIGNED_SIZE)) { - return NULL; - } - } - nfb->next = (*block); - nfb->num = nfb_pages; - (*block) = nfb; - } else { /* block merge */ - nfb->next = (*block)->next; - nfb->num = nfb_pages + (*block)->num; - /* unmap also the first page of the next freeblock */ - (*block) = nfb; - if (do_unmap((void *)(((char *) vnfb) + MSEG_ALIGNED_SIZE), - nfb_pages*MSEG_ALIGNED_SIZE)) { - return NULL; - } - } - RELEASE_LOCK(); - return old_address; - } else { /* Enlarge */ - FreeBlock **block; - void *old_end = (void *) (((char *)old_address) + old_real_size); - TAKE_LOCK(); - for (block = &first; - *block != NULL && (*block) < (FreeBlock *) old_address; - block = &((*block)->next)) - ; - if ((*block) == NULL || old_end > ((void *) RANGE_MAX) || - (*block) != old_end || - (*block)->num < (new_num_pages - old_num_pages)) { - /* cannot extend */ - void *result; - RELEASE_LOCK(); - result = pmmap(new_size); - if (result == NULL) { - return NULL; - } - memcpy(result,old_address,old_size); - if (pmunmap(old_address,old_size)) { - /* Oups... */ - pmunmap(result,new_size); - return NULL; - } - return result; - } else { /* extend */ - size_t remaining_pages = (*block)->num - - (new_num_pages - old_num_pages); - if (!remaining_pages) { - void *p = (void *) (((char *) (*block)) + MSEG_ALIGNED_SIZE); - void *n = (*block)->next; - size_t x = ((*block)->num - 1) * MSEG_ALIGNED_SIZE; - if (x > 0) { - if (do_map(p,x) == NULL) { - RELEASE_LOCK(); - return NULL; - } - } - (*block) = n; - } else { - FreeBlock *nfb = (FreeBlock *) ((void *) - (((char *) old_address) + - new_real_size)); - void *p = (void *) (((char *) (*block)) + MSEG_ALIGNED_SIZE); - if (do_map(p,new_real_size - old_real_size) == NULL) { - RELEASE_LOCK(); - return NULL; - } - nfb->num = remaining_pages; - nfb->next = (*block)->next; - (*block) = nfb; - } - RELEASE_LOCK(); - return old_address; - } - } -} -#endif /* HALFWORD_HEAP */ diff --git a/erts/emulator/sys/common/erl_mseg.h b/erts/emulator/sys/common/erl_mseg.h index a4f250ceab..2284b3f8f1 100644 --- a/erts/emulator/sys/common/erl_mseg.h +++ b/erts/emulator/sys/common/erl_mseg.h @@ -22,25 +22,25 @@ #include "sys.h" #include "erl_alloc_types.h" +#include "erl_mmap.h" -#ifndef HAVE_MMAP -# define HAVE_MMAP 0 -#endif -#ifndef HAVE_MREMAP -# define HAVE_MREMAP 0 -#endif - -#if HAVE_MMAP +/* + * We currently only enable mseg_alloc if we got + * a genuine mmap()/munmap() primitive. It is possible + * to utilize erts_mmap() withiout a mmap support but + * alloc_util needs to be prepared before we can do + * that. + */ +#ifdef ERTS_HAVE_GENUINE_OS_MMAP # define HAVE_ERTS_MSEG 1 -# define HAVE_SUPER_ALIGNED_MB_CARRIERS 1 +# define ERTS_HAVE_MSEG_SUPER_ALIGNED 1 #else # define HAVE_ERTS_MSEG 0 -# define HAVE_SUPER_ALIGNED_MB_CARRIERS 0 +# define ERTS_HAVE_MSEG_SUPER_ALIGNED 0 #endif -#if HAVE_SUPER_ALIGNED_MB_CARRIERS -# define MSEG_ALIGN_BITS (18) - /* Affects hard limits for sbct and lmbcs documented in erts_alloc.xml */ +#if ERTS_HAVE_MSEG_SUPER_ALIGNED +# define MSEG_ALIGN_BITS ERTS_MMAP_SUPERALIGNED_BITS #else /* If we don't use super aligned multiblock carriers * we will mmap with page size alignment (and thus use corresponding @@ -68,6 +68,7 @@ typedef struct { Uint rmcbf; Uint mcs; Uint nos; + ErtsMMapInit mmap; } ErtsMsegInit_t; #define ERTS_MSEG_INIT_DEFAULT_INITIALIZER \ @@ -75,7 +76,8 @@ typedef struct { 4*1024*1024, /* amcbf: Absolute max cache bad fit */ \ 20, /* rmcbf: Relative max cache bad fit */ \ 10, /* mcs: Max cache size */ \ - 1000 /* cci: Cache check interval */ \ + 1000, /* cci: Cache check interval */ \ + ERTS_MMAP_INIT_DEFAULT_INITER \ } typedef struct { @@ -91,12 +93,12 @@ typedef struct { extern const ErtsMsegOpt_t erts_mseg_default_opt; -void *erts_mseg_alloc(ErtsAlcType_t, Uint *, Uint); -void *erts_mseg_alloc_opt(ErtsAlcType_t, Uint *, Uint, const ErtsMsegOpt_t *); -void erts_mseg_dealloc(ErtsAlcType_t, void *, Uint, Uint); -void erts_mseg_dealloc_opt(ErtsAlcType_t, void *, Uint, Uint, const ErtsMsegOpt_t *); -void *erts_mseg_realloc(ErtsAlcType_t, void *, Uint, Uint *, Uint); -void *erts_mseg_realloc_opt(ErtsAlcType_t, void *, Uint, Uint *, Uint, const ErtsMsegOpt_t *); +void *erts_mseg_alloc(ErtsAlcType_t, UWord *, Uint); +void *erts_mseg_alloc_opt(ErtsAlcType_t, UWord *, Uint, const ErtsMsegOpt_t *); +void erts_mseg_dealloc(ErtsAlcType_t, void *, UWord, Uint); +void erts_mseg_dealloc_opt(ErtsAlcType_t, void *, UWord, Uint, const ErtsMsegOpt_t *); +void *erts_mseg_realloc(ErtsAlcType_t, void *, UWord, UWord *, Uint); +void *erts_mseg_realloc_opt(ErtsAlcType_t, void *, UWord, UWord *, Uint, const ErtsMsegOpt_t *); void erts_mseg_clear_cache(void); void erts_mseg_cache_check(void); Uint erts_mseg_no( const ErtsMsegOpt_t *); diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c index 7676d8872a..0a58a625b2 100644 --- a/erts/emulator/sys/common/erl_poll.c +++ b/erts/emulator/sys/common/erl_poll.c @@ -2510,7 +2510,8 @@ ERTS_POLL_EXPORT(erts_poll_destroy_pollset)(ErtsPollSet ps) pollsets = pollsets->next; else { ErtsPollSet prev_ps; - for (prev_ps = pollsets; ps != prev_ps->next; prev_ps = prev_ps->next); + for (prev_ps = pollsets; ps != prev_ps->next; prev_ps = prev_ps->next) + ; ASSERT(ps == prev_ps->next); prev_ps->next = ps->next; } diff --git a/erts/emulator/sys/unix/erl_unix_sys.h b/erts/emulator/sys/unix/erl_unix_sys.h index c8fcec8547..2c47aa06c2 100644 --- a/erts/emulator/sys/unix/erl_unix_sys.h +++ b/erts/emulator/sys/unix/erl_unix_sys.h @@ -107,6 +107,10 @@ #endif #include <netdb.h> +#ifdef HAVE_POSIX_MEMALIGN +# define ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC 1 +#endif + /* * Make sure that MAXPATHLEN is defined. */ diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c index a7ea4b2490..61f9f6a59a 100644 --- a/erts/emulator/sys/unix/sys.c +++ b/erts/emulator/sys/unix/sys.c @@ -2489,6 +2489,16 @@ erts_sys_getenv(char *key, char *value, size_t *size) return res; } +int +erts_sys_unsetenv(char *key) +{ + int res; + erts_smp_rwmtx_rwlock(&environ_rwmtx); + res = unsetenv(key); + erts_smp_rwmtx_rwunlock(&environ_rwmtx); + return res; +} + void sys_init_io(void) { @@ -2524,6 +2534,52 @@ void erts_sys_alloc_init(void) { } +#if ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC +void *erts_sys_aligned_alloc(UWord alignment, UWord size) +{ +#ifdef HAVE_POSIX_MEMALIGN + void *ptr = NULL; + int error; + ASSERT(alignment && (alignment & ~alignment) == 0); /* power of 2 */ + error = posix_memalign(&ptr, (size_t) alignment, (size_t) size); +#if HAVE_ERTS_MSEG + if (error || !ptr) { + erts_mseg_clear_cache(); + error = posix_memalign(&ptr, (size_t) alignment, (size_t) size); + } +#endif + if (error) { + errno = error; + return NULL; + } + if (!ptr) + errno = ENOMEM; + ASSERT(!ptr || (((UWord) ptr) & (alignment - 1)) == 0); + return ptr; +#else +# error "Missing erts_sys_aligned_alloc() implementation" +#endif +} + +void erts_sys_aligned_free(UWord alignment, void *ptr) +{ + ASSERT(alignment && (alignment & ~alignment) == 0); /* power of 2 */ + free(ptr); +} + +void *erts_sys_aligned_realloc(UWord alignment, void *ptr, UWord size, UWord old_size) +{ + void *new_ptr = erts_sys_aligned_alloc(alignment, size); + if (new_ptr) { + UWord copy_size = old_size < size ? old_size : size; + sys_memcpy(new_ptr, ptr, (size_t) copy_size); + erts_sys_aligned_free(alignment, ptr); + } + return new_ptr; +} + +#endif + void *erts_sys_alloc(ErtsAlcType_t t, void *x, Uint sz) { void *res = malloc((size_t) sz); @@ -2592,15 +2648,13 @@ int fd; } -#ifdef DEBUG - extern int erts_initialized; void -erl_assert_error(char* expr, char* file, int line) +erl_assert_error(const char* expr, const char* func, const char* file, int line) { fflush(stdout); - fprintf(stderr, "Assertion failed: %s in %s, line %d\n", - expr, file, line); + fprintf(stderr, "%s:%d:%s() Assertion failed: %s\n", + file, line, func, expr); fflush(stderr); #if !defined(ERTS_SMP) && 0 /* Writing a crashdump from a failed assertion when smp support @@ -2615,6 +2669,8 @@ erl_assert_error(char* expr, char* file, int line) abort(); } +#ifdef DEBUG + void erl_debug(char* fmt, ...) { diff --git a/erts/emulator/sys/win32/erl_win_sys.h b/erts/emulator/sys/win32/erl_win_sys.h index 5ce1a61303..0deb097b1a 100644 --- a/erts/emulator/sys/win32/erl_win_sys.h +++ b/erts/emulator/sys/win32/erl_win_sys.h @@ -82,7 +82,6 @@ #define NO_ERF #define NO_ERFC -#define NO_SYSLOG #define NO_SYSCONF #define NO_DAEMON #define NO_PWD @@ -95,6 +94,8 @@ # define ERTS_I64_LITERAL(X) X##i64 #endif +#define ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC 1 + /* * Practial Windows specific macros. */ diff --git a/erts/emulator/sys/win32/sys.c b/erts/emulator/sys/win32/sys.c index 922967c698..99951bb45e 100755 --- a/erts/emulator/sys/win32/sys.c +++ b/erts/emulator/sys/win32/sys.c @@ -32,7 +32,7 @@ #include "erl_threads.h" #include "../../drivers/win32/win_con.h" #include "erl_cpu_topology.h" - +#include <malloc.h> void erts_sys_init_float(void); @@ -2912,6 +2912,30 @@ void erts_sys_free(ErtsAlcType_t t, void *x, void *p) free(p); } +void *erts_sys_aligned_alloc(UWord alignment, UWord size) +{ + void *ptr; + ASSERT(alignment && (alignment & ~alignment) == 0); /* power of 2 */ + ptr = _aligned_malloc((size_t) size, (size_t) alignment); + ASSERT(!ptr || (((UWord) ptr) & (alignment - 1)) == 0); + return ptr; +} + +void erts_sys_aligned_free(UWord alignment, void *ptr) +{ + ASSERT(alignment && (alignment & ~alignment) == 0); /* power of 2 */ + _aligned_free(ptr); +} + +void *erts_sys_aligned_realloc(UWord alignment, void *ptr, UWord size, UWord old_size) +{ + void *new_ptr; + ASSERT(alignment && (alignment & ~alignment) == 0); /* power of 2 */ + new_ptr = _aligned_realloc(ptr, (size_t) size, (size_t) alignment); + ASSERT(!new_ptr || (((UWord) new_ptr) & (alignment - 1)) == 0); + return new_ptr; +} + static Preload* preloaded = NULL; static unsigned* res_name = NULL; static int num_preloaded = 0; @@ -3200,7 +3224,7 @@ erl_bin_write(buf, sz, max) } void -erl_assert_error(char* expr, char* file, int line) +erl_assert_error(const char* expr, const char* func, const char* file, int line) { char message[1024]; diff --git a/erts/emulator/sys/win32/sys_env.c b/erts/emulator/sys/win32/sys_env.c index 754f4c6e4c..9f977ad6c8 100644 --- a/erts/emulator/sys/win32/sys_env.c +++ b/erts/emulator/sys/win32/sys_env.c @@ -141,6 +141,24 @@ void fini_getenv_state(GETENV_STATE *state) erts_smp_rwmtx_runlock(&environ_rwmtx);
}
+int erts_sys_unsetenv(char *key)
+{
+ int res = 0;
+ WCHAR *wkey = (WCHAR *) key;
+
+ SetLastError(0);
+ erts_smp_rwmtx_rlock(&environ_rwmtx);
+ GetEnvironmentVariableW(wkey,
+ NULL,
+ 0);
+ if (GetLastError() != ERROR_ENVVAR_NOT_FOUND) {
+ res = (SetEnvironmentVariableW(wkey,
+ NULL) ? 0 : 1);
+ }
+ erts_smp_rwmtx_runlock(&environ_rwmtx);
+ return res;
+}
+
char*
win_build_environment(char* new_env)
{
diff --git a/erts/emulator/test/alloc_SUITE.erl b/erts/emulator/test/alloc_SUITE.erl index 801ed0f85a..35c44c229a 100644 --- a/erts/emulator/test/alloc_SUITE.erl +++ b/erts/emulator/test/alloc_SUITE.erl @@ -29,6 +29,7 @@ bucket_mask/1, rbtree/1, mseg_clear_cache/1, + erts_mmap/1, cpool/1]). -export([init_per_testcase/2, end_per_testcase/2]). @@ -41,7 +42,7 @@ suite() -> [{ct_hooks,[ts_install_cth]}]. all() -> [basic, coalesce, threads, realloc_copy, bucket_index, - bucket_mask, rbtree, mseg_clear_cache, cpool]. + bucket_mask, rbtree, mseg_clear_cache, erts_mmap, cpool]. groups() -> []. @@ -110,6 +111,64 @@ cpool(suite) -> []; cpool(doc) -> []; cpool(Cfg) -> ?line drv_case(Cfg). +erts_mmap(Config) when is_list(Config) -> + case {?t:os_type(), is_halfword_vm()} of + {{unix, _}, false} -> + [erts_mmap_do(Config, SCO, SCRPM, SCRFSD) + || SCO <-[true,false], SCRFSD <-[1234,0], SCRPM <- [true,false]]; + + {_,true} -> + {skipped, "No supercarrier support on halfword vm"}; + {SkipOs,_} -> + ?line {skipped, + lists:flatten(["Not run on " + | io_lib:format("~p",[SkipOs])])} + end. + + +erts_mmap_do(Config, SCO, SCRPM, SCRFSD) -> + %% We use the number of schedulers + 1 * approx main carriers size + %% to calculate how large the super carrier has to be + %% and then use a minimum of 100 for systems with a low amount of + %% schedulers + Schldr = erlang:system_info(schedulers_online)+1, + SCS = max(round((262144 * 6 + 3 * 1048576) * Schldr / 1024 / 1024),100), + O1 = "+MMscs" ++ integer_to_list(SCS) + ++ " +MMsco" ++ atom_to_list(SCO) + ++ " +MMscrpm" ++ atom_to_list(SCRPM), + Opts = case SCRFSD of + 0 -> O1; + _ -> O1 ++ " +MMscrfsd"++integer_to_list(SCRFSD) + end, + {ok, Node} = start_node(Config, Opts), + Self = self(), + Ref = make_ref(), + F = fun () -> + SI = erlang:system_info({allocator,mseg_alloc}), + {erts_mmap,EM} = lists:keyfind(erts_mmap, 1, SI), + {supercarrier,SC} = lists:keyfind(supercarrier, 1, EM), + {sizes,Sizes} = lists:keyfind(sizes, 1, SC), + {free_segs,Segs} = lists:keyfind(free_segs,1,SC), + {total,Total} = lists:keyfind(total,1,Sizes), + Total = SCS*1024*1024, + + {reserved,Reserved} = lists:keyfind(reserved,1,Segs), + true = (Reserved >= SCRFSD), + + case {SCO,lists:keyfind(os,1,EM)} of + {true, false} -> ok; + {false, {os,_}} -> ok + end, + + Self ! {Ref, ok} + end, + + spawn_link(Node, F), + Result = receive {Ref, Rslt} -> Rslt end, + stop_node(Node), + Result. + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% %% %% Internal functions %% @@ -179,7 +238,9 @@ receive_drv_result(Port, CaseName) -> ?line {comment, Comment} end. -start_node(Config) when is_list(Config) -> +start_node(Config) -> + start_node(Config, []). +start_node(Config, Opts) when is_list(Config), is_list(Opts) -> ?line Pa = filename:dirname(code:which(?MODULE)), ?line {A, B, C} = now(), ?line Name = list_to_atom(atom_to_list(?MODULE) @@ -191,7 +252,14 @@ start_node(Config) when is_list(Config) -> ++ integer_to_list(B) ++ "-" ++ integer_to_list(C)), - ?line ?t:start_node(Name, slave, [{args, "-pa "++Pa}]). + ?line ?t:start_node(Name, slave, [{args, Opts++" -pa "++Pa}]). stop_node(Node) -> ?t:stop_node(Node). + +is_halfword_vm() -> + case {erlang:system_info({wordsize, internal}), + erlang:system_info({wordsize, external})} of + {4, 8} -> true; + {WS, WS} -> false + end. diff --git a/erts/emulator/test/bif_SUITE.erl b/erts/emulator/test/bif_SUITE.erl index 02c6de8cb1..fbc229bc53 100644 --- a/erts/emulator/test/bif_SUITE.erl +++ b/erts/emulator/test/bif_SUITE.erl @@ -388,8 +388,12 @@ os_env(Config) when is_list(Config) -> false -> ?line ok; BadVal -> ?line ?t:fail(BadVal) end, - %% os:putenv and os:getenv currently uses a temp buf of size 1024 - %% for storing key+value + true = os:putenv(EnvVar1, "mors"), + true = os:unsetenv(EnvVar1), + false = os:getenv(EnvVar1), + true = os:unsetenv(EnvVar1), % unset unset variable + %% os:putenv, os:getenv and os:unsetenv currently use a temp + %% buffer of size 1024 for storing key+value ?line os_env_long(1010, 1030, "hej hopp"). os_env_long(Min, Max, _Value) when Min > Max -> @@ -398,7 +402,7 @@ os_env_long(Min, Max, Value) -> ?line EnvVar = lists:duplicate(Min, $X), ?line true = os:putenv(EnvVar, Value), ?line Value = os:getenv(EnvVar), - ?line true = os:putenv(EnvVar, ""), + true = os:unsetenv(EnvVar), ?line os_env_long(Min+1, Max, Value). otp_7526(doc) -> diff --git a/erts/emulator/test/big_SUITE_data/eq_big.dat b/erts/emulator/test/big_SUITE_data/eq_big.dat index 5511d1bf10..4ccb33d182 100644 --- a/erts/emulator/test/big_SUITE_data/eq_big.dat +++ b/erts/emulator/test/big_SUITE_data/eq_big.dat @@ -13001,4 +13001,5 @@ 0 = 7153697524993 bsr 475833444444444444444444444444444444444444444444. -1 = -83987348 bsr 475833444444444444444444444444444444444444444444. +0 = 1183140560213014108063589658350 bsr 146783911423364576743092537299333564210980159306769991919205685720763064069663027716481187399048043939495935. diff --git a/erts/emulator/test/binary_SUITE.erl b/erts/emulator/test/binary_SUITE.erl index 2e5f8ce64d..6b5c542b8a 100644 --- a/erts/emulator/test/binary_SUITE.erl +++ b/erts/emulator/test/binary_SUITE.erl @@ -631,7 +631,13 @@ safe_binary_to_term2(Config) when is_list(Config) -> bad_terms(suite) -> []; bad_terms(Config) when is_list(Config) -> - ?line test_terms(fun corrupter/1). + ?line test_terms(fun corrupter/1), + {'EXIT',{badarg,_}} = (catch binary_to_term(<<131,$M,3:32,0,11,22,33>>)), + {'EXIT',{badarg,_}} = (catch binary_to_term(<<131,$M,3:32,9,11,22,33>>)), + {'EXIT',{badarg,_}} = (catch binary_to_term(<<131,$M,0:32,1,11,22,33>>)), + {'EXIT',{badarg,_}} = (catch binary_to_term(<<131,$M,-1:32,1,11,22,33>>)), + ok. + corrupter(Term) when is_function(Term); is_function(hd(Term)); @@ -1221,14 +1227,9 @@ gc() -> gc1() -> ok. bit_sized_binary_sizes(Config) when is_list(Config) -> - ?line [bsbs_1(A) || A <- lists:seq(0, 7)], + ?line [bsbs_1(A) || A <- lists:seq(1, 8)], ok. -bsbs_1(0) -> - BinSize = 32+8, - io:format("A: ~p BinSize: ~p", [0,BinSize]), - Bin = binary_to_term_stress(<<131,$M,5:32,0,0,0,0,0,0>>), - BinSize = bit_size(Bin); bsbs_1(A) -> BinSize = 32+A, io:format("A: ~p BinSize: ~p", [A,BinSize]), diff --git a/erts/emulator/test/call_trace_SUITE.erl b/erts/emulator/test/call_trace_SUITE.erl index eaecd32f95..ef1f2aa04c 100644 --- a/erts/emulator/test/call_trace_SUITE.erl +++ b/erts/emulator/test/call_trace_SUITE.erl @@ -1193,7 +1193,7 @@ bs_sum_b(Acc, <<>>) -> Acc. - + %%% Help functions. expect() -> diff --git a/erts/emulator/test/driver_SUITE.erl b/erts/emulator/test/driver_SUITE.erl index 104bdf8aec..7087542899 100644 --- a/erts/emulator/test/driver_SUITE.erl +++ b/erts/emulator/test/driver_SUITE.erl @@ -367,7 +367,7 @@ compare(Got, Expected) -> ?t:fail(got_bad_data) end. - + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% Driver timer test suites %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -515,7 +515,7 @@ try_change_timer(Port, Timeout) -> ?line test_server:fail("driver failed to timeout") end. - + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% Queue test suites %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -719,7 +719,7 @@ deq(Port, Size) -> read_head(Port, Size) -> erlang:port_control(Port, ?READ_HEAD, <<Size:32>>). - + driver_unloaded(doc) -> []; driver_unloaded(suite) -> diff --git a/erts/emulator/test/hash_SUITE.erl b/erts/emulator/test/hash_SUITE.erl index 43c9d64af7..738c9c8b16 100644 --- a/erts/emulator/test/hash_SUITE.erl +++ b/erts/emulator/test/hash_SUITE.erl @@ -32,7 +32,7 @@ %% -module(hash_SUITE). -export([basic_test/0,cmp_test/1,range_test/0,spread_test/1, - phash2_test/0, otp_5292_test/0, bit_level_binaries/0, + phash2_test/0, otp_5292_test/0, otp_7127_test/0]). -compile({nowarn_deprecated_function, {erlang,hash,2}}). @@ -152,7 +152,7 @@ otp_5292(Config) when is_list(Config) -> %% Test hashing bit-level binaries. bit_level_binaries(Config) when is_list(Config) -> - bit_level_binaries(). + bit_level_binaries_do(). otp_7127(suite) -> []; @@ -537,7 +537,7 @@ hash_int(Start, End, F) -> md5(T) -> erlang:md5(term_to_binary(T)). -bit_level_binaries() -> +bit_level_binaries_do() -> [3511317,7022633,14044578,28087749,56173436,112344123,90467083|_] = bit_level_all_different(fun erlang:hash/2), [3511317,7022633,14044578,28087749,56173436,112344123,90467083|_] = diff --git a/erts/emulator/test/match_spec_SUITE.erl b/erts/emulator/test/match_spec_SUITE.erl index b56b7ce525..bcc46d78ba 100644 --- a/erts/emulator/test/match_spec_SUITE.erl +++ b/erts/emulator/test/match_spec_SUITE.erl @@ -213,7 +213,7 @@ test_3(Config) when is_list(Config) -> otp_9422(doc) -> []; otp_9422(Config) when is_list(Config) -> - Laps = 1000, + Laps = 10000, ?line Fun1 = fun() -> otp_9422_tracee() end, ?line P1 = spawn_link(?MODULE, loop_runner, [self(), Fun1, Laps]), io:format("spawned ~p as tracee\n", [P1]), @@ -230,7 +230,7 @@ otp_9422(Config) when is_list(Config) -> %%receive after 10*1000 -> ok end, stop_collect(P1), - stop_collect(P2), + stop_collect(P2, abort), ok. otp_9422_tracee() -> @@ -975,7 +975,9 @@ start_collect(P) -> P ! {go, self()}. stop_collect(P) -> - P ! {done, self()}, + stop_collect(P, done). +stop_collect(P, Order) -> + P ! {Order, self()}, receive {gone, P} -> ok @@ -1008,7 +1010,13 @@ loop_runner_cont(_Collector, _Fun, Laps, Laps) -> end; loop_runner_cont(Collector, Fun, N, Laps) -> Fun(), - loop_runner_cont(Collector, Fun, N+1, Laps). + receive + {abort, Collector} -> + io:format("loop_runner ~p aborted after ~p of ~p laps\n", [self(), N+1, Laps]), + Collector ! {gone, self()} + after 0 -> + loop_runner_cont(Collector, Fun, N+1, Laps) + end. f1(X) -> diff --git a/erts/emulator/test/statistics_SUITE.erl b/erts/emulator/test/statistics_SUITE.erl index a93dd309c1..c428be6c5a 100644 --- a/erts/emulator/test/statistics_SUITE.erl +++ b/erts/emulator/test/statistics_SUITE.erl @@ -75,7 +75,7 @@ end_per_group(_GroupName, Config) -> Config. - + %%% Testing statistics(wall_clock). @@ -121,7 +121,7 @@ wall_clock_update1(N) when N > 0 -> wall_clock_update1(0) -> ok. - + %%% Test statistics(runtime). @@ -199,7 +199,7 @@ do_much(N) -> _ = 4784728478274827 * 72874284728472, do_much(N-1). - + reductions(doc) -> "Test that statistics(reductions) is callable, and that " "Total_Reductions and Reductions_Since_Last_Call make sense. " @@ -246,7 +246,7 @@ reductions_big_loop() -> reductions_big_loop() end. - + %%% Tests of statistics(run_queue). @@ -295,7 +295,7 @@ hog_iter(N, Mon) when N > 0 -> end; hog_iter(0, Mon) -> ?line hog_iter(10000, Mon). - + %%% Tests of statistics(scheduler_wall_time). scheduler_wall_time(doc) -> @@ -363,7 +363,7 @@ load_percentage([{Id, WN, TN}|Ss], [{Id, WP, TP}|Ps]) -> [100*(WN-WP) div (TN-TP)|load_percentage(Ss, Ps)]; load_percentage([], []) -> []. - + garbage_collection(doc) -> "Tests that statistics(garbage_collection) is callable. " "It is not clear how to test anything more."; diff --git a/erts/emulator/test/system_info_SUITE.erl b/erts/emulator/test/system_info_SUITE.erl index 0350eb671d..ceb4afb5cf 100644 --- a/erts/emulator/test/system_info_SUITE.erl +++ b/erts/emulator/test/system_info_SUITE.erl @@ -37,7 +37,8 @@ init_per_group/2,end_per_group/2, init_per_testcase/2, end_per_testcase/2]). --export([process_count/1, system_version/1, misc_smoke_tests/1, heap_size/1, wordsize/1, memory/1]). +-export([process_count/1, system_version/1, misc_smoke_tests/1, heap_size/1, wordsize/1, memory/1, + ets_limit/1]). -define(DEFAULT_TIMEOUT, ?t:minutes(2)). @@ -45,7 +46,7 @@ suite() -> [{ct_hooks,[ts_install_cth]}]. all() -> [process_count, system_version, misc_smoke_tests, - heap_size, wordsize, memory]. + heap_size, wordsize, memory, ets_limit]. groups() -> []. @@ -496,3 +497,52 @@ mapn(_Fun, 0) -> []; mapn(Fun, N) -> [Fun(N) | mapn(Fun, N-1)]. + +ets_limit(doc) -> + "Verify system_info(ets_limit) reflects max ETS table settings."; +ets_limit(suite) -> []; +ets_limit(Config0) when is_list(Config0) -> + Config = [{testcase,ets_limit}|Config0], + true = is_integer(get_ets_limit(Config)), + 12345 = get_ets_limit(Config, 12345), + ok. + +get_ets_limit(Config) -> + get_ets_limit(Config, 0). +get_ets_limit(Config, EtsMax) -> + Envs = case EtsMax of + 0 -> []; + _ -> [{"ERL_MAX_ETS_TABLES", integer_to_list(EtsMax)}] + end, + {ok, Node} = start_node(Config, Envs), + Me = self(), + Ref = make_ref(), + spawn_link(Node, + fun() -> + Res = erlang:system_info(ets_limit), + unlink(Me), + Me ! {Ref, Res} + end), + receive + {Ref, Res} -> + Res + end, + stop_node(Node), + Res. + +start_node(Config, Envs) when is_list(Config) -> + Pa = filename:dirname(code:which(?MODULE)), + {A, B, C} = now(), + Name = list_to_atom(atom_to_list(?MODULE) + ++ "-" + ++ atom_to_list(?config(testcase, Config)) + ++ "-" + ++ integer_to_list(A) + ++ "-" + ++ integer_to_list(B) + ++ "-" + ++ integer_to_list(C)), + ?t:start_node(Name, peer, [{args, "-pa "++Pa}, {env, Envs}]). + +stop_node(Node) -> + ?t:stop_node(Node). diff --git a/erts/emulator/test/time_SUITE.erl b/erts/emulator/test/time_SUITE.erl index 4d12e3449c..a0a8a9c42c 100644 --- a/erts/emulator/test/time_SUITE.erl +++ b/erts/emulator/test/time_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2011. All Rights Reserved. +%% Copyright Ericsson AB 1997-2013. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -241,14 +241,26 @@ compare(Utc0, Local) -> %% Two linear times can be subtracted to give their difference %% in seconds. %% -%% XXX Limitations: The length of months and leap years are not -%% taken into account; thus a comparision of dates is only -%% valid if they are in the SAME month. +%% XXX Limitations: Simplified leap year calc will fail for 2100 :-) linear_time({{Year, Mon, Day}, {Hour, Min, Sec}}) -> - 86400*(366*Year + 31*(Mon-1) + (Day-1)) + + 86400*(year_to_days(Year) + month_to_days(Year,Mon) + (Day-1)) + 3600*Hour + 60*Min + Sec. +year_to_days(Year) -> + Year * 365 + (Year-1) div 4. + +month_to_days(Year, Mon) -> + DoM = [31,days_in_february(Year),31,30,31,30,31,31,30,31,30,31], + {PastMonths,_} = lists:split(Mon-1, DoM), + lists:sum(PastMonths). + +days_in_february(Year) -> + case (Year rem 4) of + 0 -> 29; + _ -> 28 + end. + %% This functions returns either the normal timezone or the %% the DST timezone, depending on the given UTC time. %% diff --git a/erts/emulator/test/trace_SUITE.erl b/erts/emulator/test/trace_SUITE.erl index 0f513f0dcb..2251575e5a 100644 --- a/erts/emulator/test/trace_SUITE.erl +++ b/erts/emulator/test/trace_SUITE.erl @@ -1427,7 +1427,7 @@ receive_nothing() -> ok end. - + %%% Models for various kinds of processes. process(Dest) -> diff --git a/erts/emulator/test/trace_port_SUITE.erl b/erts/emulator/test/trace_port_SUITE.erl index cc2eadafbc..99df8da107 100644 --- a/erts/emulator/test/trace_port_SUITE.erl +++ b/erts/emulator/test/trace_port_SUITE.erl @@ -648,7 +648,7 @@ fun_spawn(Fun, Opts) -> % [] % end. - + %%% Models for various kinds of processes. %% Sends messages when ordered to. |