aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator')
-rw-r--r--erts/emulator/Makefile.in1
-rw-r--r--erts/emulator/beam/beam_emu.c3
-rw-r--r--erts/emulator/beam/beam_load.c16
-rw-r--r--erts/emulator/beam/big.c8
-rw-r--r--erts/emulator/beam/dist.c4
-rw-r--r--erts/emulator/beam/erl_alloc.c91
-rw-r--r--erts/emulator/beam/erl_alloc.h10
-rw-r--r--erts/emulator/beam/erl_alloc_util.c121
-rw-r--r--erts/emulator/beam/erl_alloc_util.h49
-rw-r--r--erts/emulator/beam/erl_ao_firstfit_alloc.c3
-rw-r--r--erts/emulator/beam/erl_bestfit_alloc.c3
-rw-r--r--erts/emulator/beam/erl_bif_binary.c3
-rwxr-xr-xerts/emulator/beam/erl_bif_info.c8
-rw-r--r--erts/emulator/beam/erl_binary.h6
-rw-r--r--erts/emulator/beam/erl_db.c11
-rw-r--r--erts/emulator/beam/erl_db.h2
-rw-r--r--erts/emulator/beam/erl_db_util.h2
-rw-r--r--erts/emulator/beam/erl_driver.h8
-rw-r--r--erts/emulator/beam/erl_drv_thread.c42
-rw-r--r--erts/emulator/beam/erl_init.c173
-rw-r--r--erts/emulator/beam/erl_lock_check.c5
-rw-r--r--erts/emulator/beam/erl_node_container_utils.h10
-rw-r--r--erts/emulator/beam/erl_port_task.c23
-rw-r--r--erts/emulator/beam/erl_process.c4
-rw-r--r--erts/emulator/beam/erl_process.h4
-rw-r--r--erts/emulator/beam/erl_trace.c38
-rw-r--r--erts/emulator/beam/erl_unicode.c3
-rw-r--r--erts/emulator/beam/erl_utils.h10
-rw-r--r--erts/emulator/beam/erl_vm.h14
-rw-r--r--erts/emulator/beam/external.h4
-rwxr-xr-xerts/emulator/beam/global.h6
-rw-r--r--erts/emulator/beam/io.c2
-rw-r--r--erts/emulator/beam/sys.h26
-rw-r--r--erts/emulator/beam/utils.c4
-rw-r--r--erts/emulator/drivers/common/efile_drv.c119
-rw-r--r--erts/emulator/drivers/common/inet_drv.c288
-rw-r--r--erts/emulator/drivers/unix/ttsl_drv.c8
-rw-r--r--erts/emulator/drivers/win32/win_efile.c2
-rw-r--r--erts/emulator/sys/common/erl_mmap.c2832
-rw-r--r--erts/emulator/sys/common/erl_mmap.h134
-rw-r--r--erts/emulator/sys/common/erl_mseg.c876
-rw-r--r--erts/emulator/sys/common/erl_mseg.h42
-rw-r--r--erts/emulator/sys/common/erl_poll.c4
-rw-r--r--erts/emulator/sys/unix/erl_unix_sys.h4
-rw-r--r--erts/emulator/sys/unix/sys.c56
-rw-r--r--erts/emulator/sys/win32/erl_win_sys.h3
-rwxr-xr-xerts/emulator/sys/win32/sys.c28
-rw-r--r--erts/emulator/test/alloc_SUITE.erl69
-rw-r--r--erts/emulator/test/big_SUITE_data/eq_big.dat1
-rw-r--r--erts/emulator/test/call_trace_SUITE.erl2
-rw-r--r--erts/emulator/test/driver_SUITE.erl6
-rw-r--r--erts/emulator/test/num_bif_SUITE.erl22
-rw-r--r--erts/emulator/test/scheduler_SUITE.erl64
-rw-r--r--erts/emulator/test/statistics_SUITE.erl12
-rw-r--r--erts/emulator/test/system_info_SUITE.erl54
-rw-r--r--erts/emulator/test/time_SUITE.erl22
-rw-r--r--erts/emulator/test/trace_SUITE.erl2
-rw-r--r--erts/emulator/test/trace_port_SUITE.erl2
58 files changed, 4260 insertions, 1109 deletions
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index 048e92baad..5638683f88 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -852,6 +852,7 @@ OS_OBJS += $(OBJDIR)/erl_poll.o \
endif
OS_OBJS += $(OBJDIR)/erl_mseg.o \
+ $(OBJDIR)/erl_mmap.o \
$(OBJDIR)/erl_$(ERLANG_OSTYPE)_sys_ddll.o \
$(OBJDIR)/erl_mtrace_sys_wrap.o \
$(OBJDIR)/erl_sys_common_misc.o
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index da36c4437e..78ab6fa30f 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -5654,7 +5654,6 @@ build_stacktrace(Process* c_p, Eterm exc) {
return res;
}
-
static BeamInstr*
call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func)
{
@@ -5702,7 +5701,6 @@ call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func)
return ep->addressv[erts_active_code_ix()];
}
-
static Export*
apply_setup_error_handler(Process* p, Eterm module, Eterm function, Uint arity, Eterm* reg)
{
@@ -6208,7 +6206,6 @@ new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free)
return make_fun(funp);
}
-
int catchlevel(Process *p)
{
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index 4193eb4f3f..938fd8f2c9 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -535,7 +535,6 @@ static int must_swap_floats;
Uint erts_total_code_size;
/**********************************************************************/
-
void init_load(void)
{
FloatDef f;
@@ -1209,7 +1208,6 @@ verify_chunks(LoaderState* stp)
return 0;
}
-
static int
load_atom_table(LoaderState* stp)
{
@@ -1255,7 +1253,6 @@ load_atom_table(LoaderState* stp)
return 0;
}
-
static int
load_import_table(LoaderState* stp)
{
@@ -1308,7 +1305,6 @@ load_import_table(LoaderState* stp)
return 0;
}
-
static int
read_export_table(LoaderState* stp)
{
@@ -1641,7 +1637,6 @@ read_line_table(LoaderState* stp)
return 0;
}
-
static int
read_code_header(LoaderState* stp)
{
@@ -1711,7 +1706,6 @@ read_code_header(LoaderState* stp)
return 0;
}
-
#define VerifyTag(Stp, Actual, Expected) \
if (Actual != Expected) { \
LoadError2(Stp, "bad tag %d; expected %d", Actual, Expected); \
@@ -1730,7 +1724,6 @@ read_code_header(LoaderState* stp)
#define TermWords(t) (((t) / (sizeof(BeamInstr)/sizeof(Eterm))) + !!((t) % (sizeof(BeamInstr)/sizeof(Eterm))))
-
static int
load_code(LoaderState* stp)
{
@@ -2512,7 +2505,6 @@ load_code(LoaderState* stp)
return retval;
}
-
#define succ(St, X, Y) ((X).type == (Y).type && (X).val + 1 == (Y).val)
#define succ2(St, X, Y) ((X).type == (Y).type && (X).val + 2 == (Y).val)
#define succ3(St, X, Y) ((X).type == (Y).type && (X).val + 3 == (Y).val)
@@ -3958,7 +3950,6 @@ tuple_append_put(LoaderState* stp, GenOpArg Arity, GenOpArg Dst,
}
-
/*
* Freeze the code in memory, move the string table into place,
* resolve all labels.
@@ -4276,7 +4267,6 @@ freeze_code(LoaderState* stp)
return 0;
}
-
static void
final_touch(LoaderState* stp)
{
@@ -4378,7 +4368,6 @@ final_touch(LoaderState* stp)
}
}
-
static int
transform_engine(LoaderState* st)
{
@@ -4716,7 +4705,6 @@ transform_engine(LoaderState* st)
return rval;
}
-
static void
short_file(int line, LoaderState* stp, unsigned needed)
{
@@ -4724,7 +4712,6 @@ short_file(int line, LoaderState* stp, unsigned needed)
stp->file_name, needed);
}
-
static void
load_printf(int line, LoaderState* context, char *fmt,...)
{
@@ -5190,7 +5177,6 @@ native_addresses(Process* p, Eterm mod)
return result;
}
-
/*
* Builds a list of all exported functions in the given module:
* [{Name, Arity},...]
@@ -5240,7 +5226,6 @@ exported_from_module(Process* p, /* Process whose heap to use. */
return result;
}
-
/*
* Returns a list of all attributes for the module.
*
@@ -5281,7 +5266,6 @@ attributes_for_module(Process* p, /* Process whose heap to use. */
return result;
}
-
/*
* Returns a list containing compilation information.
*
diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c
index 6b43c53985..2b27b111d8 100644
--- a/erts/emulator/beam/big.c
+++ b/erts/emulator/beam/big.c
@@ -1325,9 +1325,9 @@ static dsize_t I_lshift(ErtsDigit* x, dsize_t xl, Sint y,
return 1;
}
else {
- SWord ay = (y < 0) ? -y : y;
- int bw = ay / D_EXP;
- int sw = ay % D_EXP;
+ Uint ay = (y < 0) ? -y : y;
+ Uint bw = ay / D_EXP;
+ Uint sw = ay % D_EXP;
dsize_t rl;
ErtsDigit a1=0;
ErtsDigit a0=0;
@@ -1368,7 +1368,7 @@ static dsize_t I_lshift(ErtsDigit* x, dsize_t xl, Sint y,
}
if (sign) {
- int zl = bw;
+ Uint zl = bw;
ErtsDigit* z = x;
while(zl--) {
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index 44f4eb9d43..aabccac822 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -353,7 +353,7 @@ static void doit_link_net_exits_sub(ErtsLink *sublnk, void *vlnecp)
static void doit_link_net_exits(ErtsLink *lnk, void *vnecp)
{
LinkNetExitsContext lnec = {(NetExitsContext *) vnecp, lnk};
- ASSERT(lnk->type == LINK_PID)
+ ASSERT(lnk->type == LINK_PID);
erts_sweep_links(ERTS_LINK_ROOT(lnk), &doit_link_net_exits_sub, (void *) &lnec);
#ifdef DEBUG
ERTS_LINK_ROOT(lnk) = NULL;
@@ -369,7 +369,7 @@ static void doit_node_link_net_exits(ErtsLink *lnk, void *vnecp)
Process *rp;
ErtsLink *rlnk;
Uint i,n;
- ASSERT(lnk->type == LINK_NODE)
+ ASSERT(lnk->type == LINK_NODE);
if (is_internal_pid(lnk->pid)) {
ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK;
rp = erts_pid2proc(NULL, 0, lnk->pid, rp_locks);
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 5eacff8829..d8da616d05 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -267,7 +267,6 @@ set_default_sl_alloc_opts(struct au_init *ip)
ip->atype = GOODFIT;
#endif
ip->init.util.name_prefix = "sl_";
- ip->init.util.mmmbc = 5;
ip->init.util.alloc_no = ERTS_ALC_A_SHORT_LIVED;
#ifndef SMALL_MEMORY
ip->init.util.mmbcs = 128*1024; /* Main carrier size */
@@ -295,7 +294,6 @@ set_default_std_alloc_opts(struct au_init *ip)
ip->atype = BESTFIT;
#endif
ip->init.util.name_prefix = "std_";
- ip->init.util.mmmbc = 5;
ip->init.util.alloc_no = ERTS_ALC_A_STANDARD;
#ifndef SMALL_MEMORY
ip->init.util.mmbcs = 128*1024; /* Main carrier size */
@@ -319,7 +317,6 @@ set_default_ll_alloc_opts(struct au_init *ip)
#endif
ip->init.util.ramv = 0;
ip->init.util.mmsbc = 0;
- ip->init.util.mmmbc = 0;
ip->init.util.sbct = ~((UWord) 0);
ip->init.util.name_prefix = "ll_";
ip->init.util.alloc_no = ERTS_ALC_A_LONG_LIVED;
@@ -370,7 +367,6 @@ set_default_eheap_alloc_opts(struct au_init *ip)
ip->thr_spec = 1;
ip->atype = GOODFIT;
#endif
- ip->init.util.mmmbc = 100;
ip->init.util.name_prefix = "eheap_";
ip->init.util.alloc_no = ERTS_ALC_A_EHEAP;
#ifndef SMALL_MEMORY
@@ -397,7 +393,6 @@ set_default_binary_alloc_opts(struct au_init *ip)
ip->thr_spec = 1;
ip->atype = BESTFIT;
#endif
- ip->init.util.mmmbc = 50;
ip->init.util.name_prefix = "binary_";
ip->init.util.alloc_no = ERTS_ALC_A_BINARY;
#ifndef SMALL_MEMORY
@@ -419,7 +414,6 @@ set_default_ets_alloc_opts(struct au_init *ip)
ip->thr_spec = 1;
ip->atype = BESTFIT;
#endif
- ip->init.util.mmmbc = 100;
ip->init.util.name_prefix = "ets_";
ip->init.util.alloc_no = ERTS_ALC_A_ETS;
#ifndef SMALL_MEMORY
@@ -495,13 +489,6 @@ adjust_tpref(struct au_init *ip, int no_sched)
/* ... shrink smallest multi-block carrier size */
if (ip->default_.smbcs)
ip->init.util.smbcs /= ERTS_MIN(4, no_sched);
- /* ... and more than three allocators shrink
- max mseg multi-block carriers */
- if (ip->default_.mmmbc && no_sched > 2) {
- ip->init.util.mmmbc /= ERTS_MIN(4, no_sched - 1);
- if (ip->init.util.mmmbc < 3)
- ip->init.util.mmmbc = 3;
- }
}
}
@@ -731,6 +718,7 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
init.mseg.nos = erts_no_schedulers;
erts_mseg_init(&init.mseg);
#endif
+
erts_alcu_init(&init.alloc_util);
erts_afalc_init();
erts_bfalc_init();
@@ -1187,6 +1175,25 @@ get_kb_value(char *param_end, char** argv, int* ip)
return ((Uint) tmp)*1024;
}
+static UWord
+get_mb_value(char *param_end, char** argv, int* ip)
+{
+ SWord tmp;
+ UWord max = ((~((UWord) 0))/(1024*1024)) + 1;
+ char *rest;
+ char *param = argv[*ip]+1;
+ char *value = get_value(param_end, argv, ip);
+ errno = 0;
+ tmp = (SWord) ErtsStrToSint(value, &rest, 10);
+ if (errno != 0 || rest == value || tmp < 0 || max < ((UWord) tmp))
+ bad_value(param, param_end, value);
+ if (max == (UWord) tmp)
+ return ~((UWord) 0);
+ else
+ return ((UWord) tmp)*1024*1024;
+}
+
+
#if 0
static Uint
get_byte_value(char *param_end, char** argv, int* ip)
@@ -1461,6 +1468,30 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
#endif
get_amount_value(argv[i]+6, argv, &i);
}
+ else if (has_prefix("scs", argv[i]+3)) {
+#if HAVE_ERTS_MSEG
+ init->mseg.mmap.scs =
+#endif
+ get_mb_value(argv[i]+6, argv, &i);
+ }
+ else if (has_prefix("sco", argv[i]+3)) {
+#if HAVE_ERTS_MSEG
+ init->mseg.mmap.sco =
+#endif
+ get_bool_value(argv[i]+6, argv, &i);
+ }
+ else if (has_prefix("scrpm", argv[i]+3)) {
+#if HAVE_ERTS_MSEG
+ init->mseg.mmap.scrpm =
+#endif
+ get_bool_value(argv[i]+8, argv, &i);
+ }
+ else if (has_prefix("scmgc", argv[i]+3)) {
+#if HAVE_ERTS_MSEG
+ init->mseg.mmap.scmgc =
+#endif
+ get_amount_value(argv[i]+8, argv, &i);
+ }
else {
bad_param(param, param+2);
}
@@ -1560,7 +1591,6 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
aui[a]->thr_spec = 0;
check_disable_carrier_migration(aui[a]);
aui[a]->init.util.ramv = 0;
- aui[a]->init.util.mmmbc = 10;
aui[a]->init.util.lmbcs = 5*1024*1024;
}
}
@@ -2682,6 +2712,7 @@ erts_allocator_info(int to, void *arg)
#if HAVE_ERTS_MSEG
{
+ struct erts_mmap_info_struct emis;
#ifdef ERTS_SMP
int max = (int) erts_no_schedulers;
#else
@@ -2692,6 +2723,8 @@ erts_allocator_info(int to, void *arg)
erts_print(to, arg, "=allocator:mseg_alloc[%d]\n", i);
erts_mseg_info(i, &to, arg, 0, NULL, NULL);
}
+ erts_print(to, arg, "=allocator:mseg_alloc.erts_mmap\n");
+ erts_mmap_info(&to, arg, NULL, NULL, &emis);
}
#endif
@@ -2716,8 +2749,8 @@ erts_allocator_options(void *proc)
#endif
Uint sz, *szp, *hp, **hpp;
Eterm res, features, settings;
- Eterm atoms[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+5];
- Uint terms[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+5];
+ Eterm atoms[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+6];
+ Uint terms[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+6];
int a, length;
SysAllocStat sas;
Uint *endp = NULL;
@@ -2830,6 +2863,9 @@ erts_allocator_options(void *proc)
if (use_mseg)
terms[length++] = am_atom_put("mseg_alloc", 10);
#endif
+#if ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
+ terms[length++] = am_atom_put("sys_aligned_alloc", 17);
+#endif
features = length ? erts_bld_list(hpp, szp, length, terms) : NIL;
@@ -2915,6 +2951,7 @@ reply_alloc_info(void *vair)
Uint sz, *szp;
ErlOffHeap *ohp = NULL;
ErlHeapFragment *bp = NULL;
+ struct erts_mmap_info_struct emis;
int i;
Eterm (*info_func)(Allctr_t *,
int,
@@ -3027,15 +3064,23 @@ reply_alloc_info(void *vair)
? NIL
: erts_mseg_info(0, NULL, NULL, hpp != NULL,
hpp, szp));
- ainfo = erts_bld_tuple(hpp, szp, 3,
- alloc_atom,
- make_small(0),
- ainfo);
+ ainfo = erts_bld_tuple3(hpp, szp,
+ alloc_atom,
+ make_small(0),
+ ainfo);
+
+ ai_list = erts_bld_cons(hpp, szp,
+ ainfo, ai_list);
+ ainfo = (air->only_sz ? NIL : erts_mmap_info(NULL, NULL, hpp, szp, &emis));
+ ainfo = erts_bld_tuple3(hpp, szp,
+ alloc_atom,
+ erts_bld_atom(hpp,szp,"erts_mmap"),
+ ainfo);
#else
- ainfo = erts_bld_tuple(hpp, szp, 2, alloc_atom,
- am_false);
+ ainfo = erts_bld_tuple2(hpp, szp, alloc_atom,
+ am_false);
#endif
- break;
+ break;
default:
alloc_atom = erts_bld_atom(hpp, szp,
(char *) ERTS_ALC_A2AD(ai));
diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h
index b5975c6c32..f83f6b39cf 100644
--- a/erts/emulator/beam/erl_alloc.h
+++ b/erts/emulator/beam/erl_alloc.h
@@ -54,6 +54,16 @@ void erts_sys_alloc_init(void);
void *erts_sys_alloc(ErtsAlcType_t, void *, Uint);
void *erts_sys_realloc(ErtsAlcType_t, void *, void *, Uint);
void erts_sys_free(ErtsAlcType_t, void *, void *);
+#if ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
+/*
+ * Note 'alignment' must remain the same in calls to
+ * 'erts_sys_aligned_realloc()' and 'erts_sys_aligned_free()'
+ * as in the initial call to 'erts_sys_aligned_alloc()'.
+ */
+void *erts_sys_aligned_alloc(UWord alignment, UWord size);
+void *erts_sys_aligned_realloc(UWord alignment, void *ptr, UWord size, UWord old_size);
+void erts_sys_aligned_free(UWord alignment, void *ptr);
+#endif
Eterm erts_memory(int *, void *, void *, Eterm);
Eterm erts_allocated_areas(int *, void *, void *);
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index e6d9f83aed..1fdee4db2c 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -81,16 +81,12 @@
static int atoms_initialized = 0;
static int initialized = 0;
-
#define INV_SYS_ALLOC_CARRIER_MASK ((UWord) (sys_alloc_carrier_size - 1))
#define SYS_ALLOC_CARRIER_MASK (~INV_SYS_ALLOC_CARRIER_MASK)
#define SYS_ALLOC_CARRIER_FLOOR(X) ((X) & SYS_ALLOC_CARRIER_MASK)
#define SYS_ALLOC_CARRIER_CEILING(X) \
SYS_ALLOC_CARRIER_FLOOR((X) + INV_SYS_ALLOC_CARRIER_MASK)
-#undef ASSERT
-#define ASSERT ASSERT_EXPR
-
#if 0
/* Can be useful for debugging */
#define MBC_REALLOC_ALWAYS_MOVES
@@ -194,9 +190,9 @@ MBC after deallocating first block:
#if MBC_ABLK_OFFSET_BITS
-# define MBC_SZ_MAX_LIMIT ((((UWord)1 << MBC_ABLK_OFFSET_BITS) - 1) << MSEG_ALIGN_BITS)
+# define MBC_SZ_MAX_LIMIT ((((UWord)1 << MBC_ABLK_OFFSET_BITS) - 1) << ERTS_SUPER_ALIGN_BITS)
-# define BLK_CARRIER_OFFSET(B, C) (((char*)(B) - (char*)(C)) >> MSEG_UNIT_SHIFT)
+# define BLK_CARRIER_OFFSET(B, C) (((char*)(B) - (char*)(C)) >> ERTS_SACRR_UNIT_SHIFT)
# define SET_MBC_ABLK_HDR(B, Sz, F, C) \
(ASSERT(((Sz) & ~MBC_ABLK_SZ_MASK) == 0), \
@@ -210,7 +206,7 @@ MBC after deallocating first block:
(B)->u.carrier = (C))
# define IS_MBC_FIRST_ABLK(AP,B) \
- ((((UWord)(B) & ~MSEG_UNIT_MASK) == MBC_HEADER_SIZE(AP)) \
+ ((((UWord)(B) & ~ERTS_SACRR_UNIT_MASK) == MBC_HEADER_SIZE(AP)) \
&& ((B)->bhdr & MBC_ABLK_OFFSET_MASK) == 0)
# define IS_MBC_FIRST_FBLK(AP,B) \
@@ -760,8 +756,9 @@ static ERTS_INLINE void *
alcu_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
{
void *res;
-
- res = erts_mseg_alloc_opt(allctr->alloc_no, size_p, flags, &allctr->mseg_opt);
+ UWord size = (UWord) *size_p;
+ res = erts_mseg_alloc_opt(allctr->alloc_no, &size, flags, &allctr->mseg_opt);
+ *size_p = (Uint) size;
INC_CC(allctr->calls.mseg_alloc);
return res;
}
@@ -770,9 +767,10 @@ static ERTS_INLINE void *
alcu_mseg_realloc(Allctr_t *allctr, void *seg, Uint old_size, Uint *new_size_p)
{
void *res;
-
- res = erts_mseg_realloc_opt(allctr->alloc_no, seg, old_size, new_size_p,
+ UWord new_size = (UWord) *new_size_p;
+ res = erts_mseg_realloc_opt(allctr->alloc_no, seg, (UWord) old_size, &new_size,
ERTS_MSEG_FLG_NONE, &allctr->mseg_opt);
+ *new_size_p = (Uint) new_size;
INC_CC(allctr->calls.mseg_realloc);
return res;
}
@@ -780,18 +778,22 @@ alcu_mseg_realloc(Allctr_t *allctr, void *seg, Uint old_size, Uint *new_size_p)
static ERTS_INLINE void
alcu_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size, Uint flags)
{
- erts_mseg_dealloc_opt(allctr->alloc_no, seg, size, flags, &allctr->mseg_opt);
+ erts_mseg_dealloc_opt(allctr->alloc_no, seg, (UWord) size, flags, &allctr->mseg_opt);
INC_CC(allctr->calls.mseg_dealloc);
}
#endif
static ERTS_INLINE void *
-alcu_sys_alloc(Allctr_t *allctr, Uint size)
+alcu_sys_alloc(Allctr_t *allctr, Uint size, int superalign)
{
void *res;
-
- res = erts_sys_alloc(0, NULL, size);
+#if ERTS_SA_MB_CARRIERS && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
+ if (superalign)
+ res = erts_sys_aligned_alloc(ERTS_SACRR_UNIT_SZ, size);
+ else
+#endif
+ res = erts_sys_alloc(0, NULL, size);
INC_CC(allctr->calls.sys_alloc);
if (erts_mtrace_enabled)
erts_mtrace_crr_alloc(res, allctr->alloc_no, ERTS_ALC_A_SYSTEM, size);
@@ -799,11 +801,16 @@ alcu_sys_alloc(Allctr_t *allctr, Uint size)
}
static ERTS_INLINE void *
-alcu_sys_realloc(Allctr_t *allctr, void *ptr, Uint size)
+alcu_sys_realloc(Allctr_t *allctr, void *ptr, Uint size, Uint old_size, int superalign)
{
void *res;
- res = erts_sys_realloc(0, NULL, ptr, size);
+#if ERTS_SA_MB_CARRIERS && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
+ if (superalign)
+ res = erts_sys_aligned_realloc(ERTS_SACRR_UNIT_SZ, ptr, size, old_size);
+ else
+#endif
+ res = erts_sys_realloc(0, NULL, ptr, size);
INC_CC(allctr->calls.sys_realloc);
if (erts_mtrace_enabled)
erts_mtrace_crr_realloc(res,
@@ -815,9 +822,14 @@ alcu_sys_realloc(Allctr_t *allctr, void *ptr, Uint size)
}
static ERTS_INLINE void
-alcu_sys_free(Allctr_t *allctr, void *ptr)
+alcu_sys_free(Allctr_t *allctr, void *ptr, int superalign)
{
- erts_sys_free(0, NULL, ptr);
+#if ERTS_SA_MB_CARRIERS && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
+ if (superalign)
+ erts_sys_aligned_free(ERTS_SACRR_UNIT_SZ, ptr);
+ else
+#endif
+ erts_sys_free(0, NULL, ptr);
INC_CC(allctr->calls.sys_free);
if (erts_mtrace_enabled)
erts_mtrace_crr_free(allctr->alloc_no, ERTS_ALC_A_SYSTEM, ptr);
@@ -1334,7 +1346,7 @@ erts_alcu_fix_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
return fix_nocpool_alloc_shrink(allctr, flgs);
}
-static void dealloc_carrier(Allctr_t *allctr, Carrier_t *crr, Uint mseg_flags);
+static void dealloc_carrier(Allctr_t *allctr, Carrier_t *crr, int superaligned);
#ifdef ERTS_SMP
@@ -1942,7 +1954,7 @@ mbc_alloc_block(Allctr_t *allctr, Uint size, Uint *blk_szp)
if (!blk) {
blk = create_carrier(allctr, get_blk_sz, CFLG_MBC);
-#if !HALFWORD_HEAP && !HAVE_SUPER_ALIGNED_MB_CARRIERS
+#if !HALFWORD_HEAP && !ERTS_SUPER_ALIGNED_MSEG_ONLY
if (!blk) {
/* Emergency! We couldn't create the carrier as we wanted.
Try to place it in a sys_alloced sbc. */
@@ -2978,7 +2990,7 @@ check_pending_dealloc_carrier(Allctr_t *allctr,
dcrr = crr;
crr = crr->next;
- dealloc_carrier(allctr, dcrr, ERTS_MSEG_FLG_2POW);
+ dealloc_carrier(allctr, dcrr, 1);
i++;
} while (crr && i < ERTS_ALC_MAX_DEALLOC_CARRIER);
@@ -3013,7 +3025,7 @@ schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr)
erts_aint_t max_size;
if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
- dealloc_carrier(allctr, crr, ERTS_MSEG_FLG_2POW);
+ dealloc_carrier(allctr, crr, 1);
return;
}
@@ -3053,7 +3065,7 @@ schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr)
if (crr->cpool.thr_prgr == ERTS_THR_PRGR_INVALID
|| erts_thr_progress_has_reached(crr->cpool.thr_prgr)) {
- dealloc_carrier(allctr, crr, ERTS_MSEG_FLG_2POW);
+ dealloc_carrier(allctr, crr, 1);
return;
}
@@ -3188,10 +3200,10 @@ cpool_read_stat(Allctr_t *allctr, UWord *nocp, UWord *cszp, UWord *nobp, UWord *
#ifdef DEBUG
-#if HAVE_ERTS_MSEG
-#define ASSERT_MSEG_UNIT_SIZE_MULTIPLE(CSZ) ASSERT((CSZ) % MSEG_UNIT_SZ == 0)
+#if ERTS_SA_MB_CARRIERS
+#define ASSERT_ERTS_SACRR_UNIT_SIZE_MULTIPLE(CSZ) ASSERT((CSZ) % ERTS_SACRR_UNIT_SZ == 0)
#else
-#define ASSERT_MSEG_UNIT_SIZE_MULTIPLE(CSZ)
+#define ASSERT_ERTS_SACRR_UNIT_SIZE_MULTIPLE(CSZ)
#endif
static void CHECK_1BLK_CARRIER(Allctr_t* A, int SBC, int MSEGED, Carrier_t* C,
@@ -3213,10 +3225,12 @@ static void CHECK_1BLK_CARRIER(Allctr_t* A, int SBC, int MSEGED, Carrier_t* C,
ASSERT(IS_MBC_BLK((B)));
ASSERT(IS_MB_CARRIER((C)));
ASSERT(FBLK_TO_MBC(B) == (C));
+ if ((MSEGED)) {
+ ASSERT_ERTS_SACRR_UNIT_SIZE_MULTIPLE((CSZ));
+ }
}
if ((MSEGED)) {
ASSERT(IS_MSEG_CARRIER((C)));
- ASSERT_MSEG_UNIT_SIZE_MULTIPLE((CSZ));
}
else {
ASSERT(IS_SYS_ALLOC_CARRIER((C)));
@@ -3244,7 +3258,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
#if HALFWORD_HEAP
flags |= CFLG_FORCE_MSEG;
-#elif HAVE_SUPER_ALIGNED_MB_CARRIERS
+#elif ERTS_SUPER_ALIGNED_MSEG_ONLY
if (flags & CFLG_MBC) {
flags |= CFLG_FORCE_MSEG;
}
@@ -3287,7 +3301,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
if (allctr->sbcs.curr.norm.mseg.no >= allctr->max_mseg_sbcs)
goto try_sys_alloc;
}
-#if !HAVE_SUPER_ALIGNED_MB_CARRIERS
+#if !ERTS_SUPER_ALIGNED_MSEG_ONLY
else {
if (allctr->mbcs.curr.norm.mseg.no >= allctr->max_mseg_mbcs)
goto try_sys_alloc;
@@ -3350,12 +3364,12 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
? UNIT_CEILING(bcrr_sz)
: SYS_ALLOC_CARRIER_CEILING(bcrr_sz));
- crr = (Carrier_t *) alcu_sys_alloc(allctr, crr_sz);
+ crr = (Carrier_t *) alcu_sys_alloc(allctr, crr_sz, flags & CFLG_MBC);
if (!crr) {
if (crr_sz > UNIT_CEILING(bcrr_sz)) {
crr_sz = UNIT_CEILING(bcrr_sz);
- crr = (Carrier_t *) alcu_sys_alloc(allctr, crr_sz);
+ crr = (Carrier_t *) alcu_sys_alloc(allctr, crr_sz, flags & CFLG_MBC);
}
if (!crr) {
#if HAVE_ERTS_MSEG
@@ -3453,7 +3467,7 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
if (!(flags & CFLG_FORCE_SYS_ALLOC)) {
new_crr_sz = new_blk_sz + SBC_HEADER_SIZE;
- new_crr_sz = MSEG_UNIT_CEILING(new_crr_sz);
+ new_crr_sz = ERTS_SACRR_UNIT_CEILING(new_crr_sz);
new_crr = (Carrier_t *) alcu_mseg_realloc(allctr,
old_crr,
old_crr_sz,
@@ -3503,7 +3517,9 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
new_crr = (Carrier_t *) alcu_sys_realloc(allctr,
(void *) old_crr,
- new_crr_sz);
+ new_crr_sz,
+ old_crr_sz,
+ 0);
if (new_crr) {
sys_realloc_success:
SET_CARRIER_SZ(new_crr, new_crr_sz);
@@ -3522,7 +3538,9 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
new_crr_sz = UNIT_CEILING(new_crr_sz);
new_crr = (Carrier_t *) alcu_sys_realloc(allctr,
(void *) old_crr,
- new_crr_sz);
+ new_crr_sz,
+ old_crr_sz,
+ 0);
if (new_crr)
goto sys_realloc_success;
}
@@ -3541,7 +3559,7 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
(void *) BLK2UMEM(old_blk),
MIN(new_blk_sz, old_blk_sz) - ABLK_HDR_SZ);
unlink_carrier(&allctr->sbc_list, old_crr);
- alcu_sys_free(allctr, old_crr);
+ alcu_sys_free(allctr, old_crr, 0);
}
else {
/* Old carrier unchanged; restore... */
@@ -3554,14 +3572,17 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
}
static void
-dealloc_carrier(Allctr_t *allctr, Carrier_t *crr, Uint mseg_flags)
+dealloc_carrier(Allctr_t *allctr, Carrier_t *crr, int superaligned)
{
#if HAVE_ERTS_MSEG
if (IS_MSEG_CARRIER(crr))
- alcu_mseg_dealloc(allctr, crr, CARRIER_SZ(crr), mseg_flags);
+ alcu_mseg_dealloc(allctr, crr, CARRIER_SZ(crr),
+ (superaligned
+ ? ERTS_MSEG_FLG_2POW
+ : ERTS_MSEG_FLG_NONE));
else
#endif
- alcu_sys_free(allctr, crr);
+ alcu_sys_free(allctr, crr, superaligned);
}
static void
@@ -3581,7 +3602,6 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp)
#if HAVE_ERTS_MSEG
if (IS_MSEG_CARRIER(crr)) {
- ASSERT(crr_sz % MSEG_UNIT_SZ == 0);
STAT_MSEG_SBC_FREE(allctr, crr_sz, blk_sz);
}
else
@@ -3590,7 +3610,7 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp)
unlink_carrier(&allctr->sbc_list, crr);
- dealloc_carrier(allctr, crr, ERTS_MSEG_FLG_NONE);
+ dealloc_carrier(allctr, crr, 0);
}
else {
ASSERT(IS_MBC_FIRST_FBLK(allctr, blk));
@@ -3624,7 +3644,7 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp)
unlink_carrier(&allctr->mbc_list, crr);
#if HAVE_ERTS_MSEG
if (IS_MSEG_CARRIER(crr)) {
- ASSERT(crr_sz % MSEG_UNIT_SZ == 0);
+ ASSERT(crr_sz % ERTS_SACRR_UNIT_SZ == 0);
STAT_MSEG_MBC_FREE(allctr, crr_sz);
}
else
@@ -3635,10 +3655,9 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp)
#ifdef ERTS_SMP
schedule_dealloc_carrier(allctr, crr);
#else
- dealloc_carrier(allctr, crr, ERTS_MSEG_FLG_2POW);
+ dealloc_carrier(allctr, crr, 1);
#endif
}
-
}
@@ -5086,7 +5105,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
crr_sz = SYS_ALLOC_CARRIER_CEILING(used_sz);
#if HAVE_ERTS_MSEG
else
- crr_sz = MSEG_UNIT_CEILING(used_sz);
+ crr_sz = ERTS_SACRR_UNIT_CEILING(used_sz);
#endif
diff_sz_val = crr_sz - used_sz;
if (diff_sz_val < (~((Uint) 0) / 100))
@@ -5454,7 +5473,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->mbc_move_threshold = init->rmbcmt;
#if HAVE_ERTS_MSEG
allctr->max_mseg_sbcs = init->mmsbc;
-# if HAVE_SUPER_ALIGNED_MB_CARRIERS
+# if ERTS_SUPER_ALIGNED_MSEG_ONLY
allctr->max_mseg_mbcs = ~(Uint)0;
# else
allctr->max_mseg_mbcs = init->mmmbc;
@@ -5570,7 +5589,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
CFLG_MBC
| CFLG_FORCE_SIZE
| CFLG_NO_CPOOL
-#if !HALFWORD_HEAP && !HAVE_SUPER_ALIGNED_MB_CARRIERS
+#if !HALFWORD_HEAP && !ERTS_SUPER_ALIGNED_MSEG_ONLY
| CFLG_FORCE_SYS_ALLOC
#endif
| CFLG_MAIN_CARRIER);
@@ -5656,9 +5675,9 @@ erts_alcu_init(AlcUInit_t *init)
#endif
ASSERT(SBC_BLK_SZ_MASK == MBC_FBLK_SZ_MASK); /* see BLK_SZ */
#if HAVE_ERTS_MSEG
- ASSERT(erts_mseg_unit_size() == MSEG_UNIT_SZ);
+ ASSERT(erts_mseg_unit_size() == ERTS_SACRR_UNIT_SZ);
max_mseg_carriers = init->mmc;
- sys_alloc_carrier_size = MSEG_UNIT_CEILING(init->ycs);
+ sys_alloc_carrier_size = ERTS_SACRR_UNIT_CEILING(init->ycs);
#else /* #if HAVE_ERTS_MSEG */
sys_alloc_carrier_size = ((init->ycs + 4095) / 4096) * 4096;
#endif
@@ -5819,7 +5838,7 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
ASSERT(CARRIER_SZ(sbc) - SBC_HEADER_SIZE >= SBC_BLK_SZ(iblk));
#if HAVE_ERTS_MSEG
if (IS_MSEG_CARRIER(sbc)) {
- ASSERT(CARRIER_SZ(sbc) % MSEG_UNIT_SZ == 0);
+ ASSERT(CARRIER_SZ(sbc) % ERTS_SACRR_UNIT_SZ == 0);
}
#endif
crr = sbc;
@@ -5904,7 +5923,7 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
#if HAVE_ERTS_MSEG
if (IS_MSEG_CARRIER(crr)) {
- ASSERT(CARRIER_SZ(crr) % MSEG_UNIT_SZ == 0);
+ ASSERT(CARRIER_SZ(crr) % ERTS_SACRR_UNIT_SZ == 0);
}
#endif
cl = &allctr->mbc_list;
diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h
index 02cbe5c5d0..222f137024 100644
--- a/erts/emulator/beam/erl_alloc_util.h
+++ b/erts/emulator/beam/erl_alloc_util.h
@@ -75,7 +75,7 @@ typedef struct {
#define ERTS_DEFAULT_ALCU_INIT { \
1024*1024, /* (bytes) ycs: sys_alloc carrier size */\
- 1024 /* (amount) mmc: max mseg carriers */\
+ ~((UWord) 0) /* (amount) mmc: max mseg carriers */ \
}
#define ERTS_DEFAULT_ALLCTR_INIT { \
@@ -95,7 +95,7 @@ typedef struct {
50, /* (%) rmbcmt: rel mbc move threshold */\
1024*1024, /* (bytes) mmbcs: main multiblock carrier size */\
256, /* (amount) mmsbc: max mseg sbcs */\
- 10, /* (amount) mmmbc: max mseg mbcs */\
+ ~((UWord) 0), /* (amount) mmmbc: max mseg mbcs */ \
10*1024*1024, /* (bytes) lmbcs: largest mbc size */\
1024*1024, /* (bytes) smbcs: smallest mbc size */\
10, /* (amount) mbcgs: mbc growth stages */\
@@ -128,7 +128,7 @@ typedef struct {
80, /* (%) rsbcmt: rel sbc move threshold */\
128*1024, /* (bytes) mmbcs: main multiblock carrier size */\
256, /* (amount) mmsbc: max mseg sbcs */\
- 10, /* (amount) mmmbc: max mseg mbcs */\
+ ~((UWord) 0), /* (amount) mmmbc: max mseg mbcs */ \
1024*1024, /* (bytes) lmbcs: largest mbc size */\
128*1024, /* (bytes) smbcs: smallest mbc size */\
10, /* (amount) mbcgs: mbc growth stages */\
@@ -217,25 +217,34 @@ erts_aint32_t erts_alcu_fix_alloc_shrink(Allctr_t *, erts_aint32_t);
#define MBC_FBLK_SZ_MASK UNIT_MASK
#define CARRIER_SZ_MASK UNIT_MASK
-#if HAVE_ERTS_MSEG
-
-# define MSEG_UNIT_SHIFT MSEG_ALIGN_BITS
-# define MSEG_UNIT_SZ (1 << MSEG_UNIT_SHIFT)
-# define MSEG_UNIT_MASK ((~(UWord)0) << MSEG_UNIT_SHIFT)
-
-# define MSEG_UNIT_FLOOR(X) ((X) & MSEG_UNIT_MASK)
-# define MSEG_UNIT_CEILING(X) MSEG_UNIT_FLOOR((X) + ~MSEG_UNIT_MASK)
-
+#if ERTS_HAVE_MSEG_SUPER_ALIGNED \
+ || (!HAVE_ERTS_MSEG && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC)
+# ifndef MSEG_ALIGN_BITS
+# define ERTS_SUPER_ALIGN_BITS MSEG_ALIGN_BITS
+# else
+# define ERTS_SUPER_ALIGN_BITS 18
+# endif
# ifdef ARCH_64
# define MBC_ABLK_OFFSET_BITS 24
-# elif HAVE_SUPER_ALIGNED_MB_CARRIERS
+# else
# define MBC_ABLK_OFFSET_BITS 9
/* Affects hard limits for sbct and lmbcs documented in erts_alloc.xml */
# endif
-#endif
-#ifndef MBC_ABLK_OFFSET_BITS
+# define ERTS_SACRR_UNIT_SHIFT ERTS_SUPER_ALIGN_BITS
+# define ERTS_SACRR_UNIT_SZ (1 << ERTS_SACRR_UNIT_SHIFT)
+# define ERTS_SACRR_UNIT_MASK ((~(UWord)0) << ERTS_SACRR_UNIT_SHIFT)
+# define ERTS_SACRR_UNIT_FLOOR(X) ((X) & ERTS_SACRR_UNIT_MASK)
+# define ERTS_SACRR_UNIT_CEILING(X) ERTS_SACRR_UNIT_FLOOR((X) + ~ERTS_SACRR_UNIT_MASK)
+# define ERTS_SA_MB_CARRIERS 1
+#else
+# define ERTS_SA_MB_CARRIERS 0
# define MBC_ABLK_OFFSET_BITS 0 /* no carrier offset in block header */
#endif
+#if ERTS_HAVE_MSEG_SUPER_ALIGNED && !ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
+# define ERTS_SUPER_ALIGNED_MSEG_ONLY 1
+#else
+# define ERTS_SUPER_ALIGNED_MSEG_ONLY 0
+#endif
#if MBC_ABLK_OFFSET_BITS
# define MBC_ABLK_OFFSET_SHIFT (sizeof(UWord)*8 - MBC_ABLK_OFFSET_BITS)
@@ -245,9 +254,9 @@ erts_aint32_t erts_alcu_fix_alloc_shrink(Allctr_t *, erts_aint32_t);
# define MBC_ABLK_SZ_MASK (~FLG_MASK)
#endif
-#define MBC_ABLK_SZ(B) (ASSERT_EXPR(!is_sbc_blk(B)), (B)->bhdr & MBC_ABLK_SZ_MASK)
-#define MBC_FBLK_SZ(B) (ASSERT_EXPR(!is_sbc_blk(B)), (B)->bhdr & MBC_FBLK_SZ_MASK)
-#define SBC_BLK_SZ(B) (ASSERT_EXPR(is_sbc_blk(B)), (B)->bhdr & SBC_BLK_SZ_MASK)
+#define MBC_ABLK_SZ(B) (ASSERT(!is_sbc_blk(B)), (B)->bhdr & MBC_ABLK_SZ_MASK)
+#define MBC_FBLK_SZ(B) (ASSERT(!is_sbc_blk(B)), (B)->bhdr & MBC_FBLK_SZ_MASK)
+#define SBC_BLK_SZ(B) (ASSERT(is_sbc_blk(B)), (B)->bhdr & SBC_BLK_SZ_MASK)
#define CARRIER_SZ(C) \
((C)->chdr & CARRIER_SZ_MASK)
@@ -327,8 +336,8 @@ typedef struct {
(B)->u.carrier)
# define ABLK_TO_MBC(B) \
(ASSERT(IS_MBC_BLK(B) && !IS_FREE_BLK(B)), \
- (Carrier_t*)((MSEG_UNIT_FLOOR((UWord)(B)) - \
- (((B)->bhdr >> MBC_ABLK_OFFSET_SHIFT) << MSEG_UNIT_SHIFT))))
+ (Carrier_t*)((ERTS_SACRR_UNIT_FLOOR((UWord)(B)) - \
+ (((B)->bhdr >> MBC_ABLK_OFFSET_SHIFT) << ERTS_SACRR_UNIT_SHIFT))))
# define BLK_TO_MBC(B) (IS_FREE_BLK(B) ? FBLK_TO_MBC(B) : ABLK_TO_MBC(B))
#else
# define FBLK_TO_MBC(B) ((B)->carrier)
diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.c b/erts/emulator/beam/erl_ao_firstfit_alloc.c
index 4e6c8b317e..396aa88e0b 100644
--- a/erts/emulator/beam/erl_ao_firstfit_alloc.c
+++ b/erts/emulator/beam/erl_ao_firstfit_alloc.c
@@ -85,9 +85,6 @@
#define SET_RED(N) (((AOFF_RBTree_t *) (N))->flags |= RED_FLG)
#define SET_BLACK(N) (((AOFF_RBTree_t *) (N))->flags &= ~RED_FLG)
-#undef ASSERT
-#define ASSERT ASSERT_EXPR
-
#if 1
#define RBT_ASSERT ASSERT
#else
diff --git a/erts/emulator/beam/erl_bestfit_alloc.c b/erts/emulator/beam/erl_bestfit_alloc.c
index 41f449bb28..59c14899a2 100644
--- a/erts/emulator/beam/erl_bestfit_alloc.c
+++ b/erts/emulator/beam/erl_bestfit_alloc.c
@@ -75,9 +75,6 @@
#define BF_BLK_SZ(B) MBC_FBLK_SZ(&(B)->hdr)
-#undef ASSERT
-#define ASSERT ASSERT_EXPR
-
#if 1
#define RBT_ASSERT ASSERT
#else
diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c
index 0db19a1ee6..ff775691b3 100644
--- a/erts/emulator/beam/erl_bif_binary.c
+++ b/erts/emulator/beam/erl_bif_binary.c
@@ -927,6 +927,9 @@ static int do_binary_match_compile(Eterm argument, Eterm *tag, Binary **binp)
if (binary_bitsize(b) != 0) {
goto badarg;
}
+ if (binary_size(b) == 0) {
+ goto badarg;
+ }
++words;
characters += binary_size(b);
}
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 673dfc658c..5fbcbbe250 100755
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -2091,7 +2091,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(res);
} else if (BIF_ARG_1 == am_sequential_tracer) {
val = erts_get_system_seq_tracer();
- ASSERT(is_internal_pid(val) || is_internal_port(val) || val==am_false)
+ ASSERT(is_internal_pid(val) || is_internal_port(val) || val==am_false);
hp = HAlloc(BIF_P, 3);
res = TUPLE2(hp, am_sequential_tracer, val);
BIF_RET(res);
@@ -2636,6 +2636,9 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(res);
}
+ else if (ERTS_IS_ATOM_STR("ets_limit",BIF_ARG_1)) {
+ BIF_RET(make_small(erts_db_get_max_tabs()));
+ }
BIF_ERROR(BIF_P, BADARG);
}
@@ -3286,6 +3289,9 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
erts_smp_thr_progress_unblock();
BIF_RET(res);
}
+ else if (ERTS_IS_ATOM_STR("mmap", BIF_ARG_1)) {
+ BIF_RET(erts_mmap_debug_info(BIF_P));
+ }
}
else if (is_tuple(BIF_ARG_1)) {
Eterm* tp = tuple_val(BIF_ARG_1);
diff --git a/erts/emulator/beam/erl_binary.h b/erts/emulator/beam/erl_binary.h
index 506c4813fa..f7dc20f5e6 100644
--- a/erts/emulator/beam/erl_binary.h
+++ b/erts/emulator/beam/erl_binary.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2013. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -153,7 +153,7 @@ do { \
#define binary_bytes(Bin) \
(*binary_val(Bin) == HEADER_PROC_BIN ? \
((ProcBin *) binary_val(Bin))->bytes : \
- (ASSERT_EXPR(thing_subtag(*binary_val(Bin)) == HEAP_BINARY_SUBTAG), \
+ (ASSERT(thing_subtag(*binary_val(Bin)) == HEAP_BINARY_SUBTAG), \
(byte *)(&(((ErlHeapBin *) binary_val(Bin))->data))))
void erts_init_binary(void);
@@ -183,7 +183,7 @@ BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen);
#endif
#define ERTS_CHK_BIN_ALIGNMENT(B) \
- do { ASSERT(!(B) || (((UWord) &((Binary *)(B))->orig_bytes[0]) & ERTS_BIN_ALIGNMENT_MASK) == ((UWord) 0)) } while(0)
+ do { ASSERT(!(B) || (((UWord) &((Binary *)(B))->orig_bytes[0]) & ERTS_BIN_ALIGNMENT_MASK) == ((UWord) 0)); } while(0)
ERTS_GLB_INLINE byte* erts_get_aligned_binary_bytes(Eterm bin, byte** base_ptr);
ERTS_GLB_INLINE void erts_free_aligned_binary_bytes(byte* buf);
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 98c2988323..41e64fcd4f 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -2236,7 +2236,7 @@ static BIF_RETTYPE ets_select_trap_1(BIF_ALIST_1)
CHECK_TABLES();
tptr = tuple_val(a1);
- ASSERT(arityval(*tptr) >= 1)
+ ASSERT(arityval(*tptr) >= 1);
if ((tb = db_get_table(p, tptr[1], DB_READ, kind)) == NULL) {
BIF_ERROR(p, BADARG);
@@ -2403,7 +2403,7 @@ static BIF_RETTYPE ets_select_count_1(BIF_ALIST_1)
CHECK_TABLES();
tptr = tuple_val(a1);
- ASSERT(arityval(*tptr) >= 1)
+ ASSERT(arityval(*tptr) >= 1);
if ((tb = db_get_table(p, tptr[1], DB_READ, kind)) == NULL) {
BIF_ERROR(p, BADARG);
}
@@ -3811,6 +3811,13 @@ erts_db_foreach_offheap(DbTable *tb,
tb->common.meth->db_foreach_offheap(tb, func, arg);
}
+/* retrieve max number of ets tables */
+Uint
+erts_db_get_max_tabs()
+{
+ return db_max_tabs;
+}
+
/*
* For testing of meta tables only.
*
diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h
index 6b62e10eb7..5b4681fc90 100644
--- a/erts/emulator/beam/erl_db.h
+++ b/erts/emulator/beam/erl_db.h
@@ -79,6 +79,8 @@ extern erts_smp_atomic_t erts_ets_misc_mem_size;
Eterm erts_ets_colliding_names(Process*, Eterm name, Uint cnt);
+Uint erts_db_get_max_tabs(void);
+
#endif
#if defined(ERTS_WANT_DB_INTERNAL__) && !defined(ERTS_HAVE_DB_INTERNAL__)
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index 90b79e6044..328b19dfc9 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -457,7 +457,7 @@ int erts_db_is_compiled_ms(Eterm term);
&& ERTS_MAGIC_BIN_DESTRUCTOR((BP)) == erts_db_match_prog_destructor)
#define Binary2MatchProg(BP) \
- (ASSERT_EXPR(IsMatchProgBinary((BP))), \
+ (ASSERT(IsMatchProgBinary((BP))), \
((MatchProg *) ERTS_MAGIC_BIN_DATA((BP))))
/*
** Debugging
diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h
index d2a11cce05..5cffae92be 100644
--- a/erts/emulator/beam/erl_driver.h
+++ b/erts/emulator/beam/erl_driver.h
@@ -552,6 +552,11 @@ EXTERN int erl_drv_equal_tids(ErlDrvTid tid1, ErlDrvTid tid2);
EXTERN void erl_drv_thread_exit(void *resp);
EXTERN int erl_drv_thread_join(ErlDrvTid, void **respp);
+EXTERN char* erl_drv_mutex_name(ErlDrvMutex *mtx);
+EXTERN char* erl_drv_cond_name(ErlDrvCond *cnd);
+EXTERN char* erl_drv_rwlock_name(ErlDrvRWLock *rwlck);
+EXTERN char* erl_drv_thread_name(ErlDrvTid tid);
+
/*
* Misc.
*/
@@ -689,6 +694,3 @@ EXTERN int erl_drv_getenv(char *key, char *value, size_t *value_size);
/* also in global.h, but driver's can't include global.h */
void dtrace_drvport_str(ErlDrvPort port, char *port_buf);
-
-
-
diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c
index a49a155701..4f1bba8657 100644
--- a/erts/emulator/beam/erl_drv_thread.c
+++ b/erts/emulator/beam/erl_drv_thread.c
@@ -188,6 +188,17 @@ erl_drv_mutex_destroy(ErlDrvMutex *dmtx)
#endif
}
+
+char *
+erl_drv_mutex_name(ErlDrvMutex *dmtx)
+{
+#ifdef USE_THREADS
+ return dmtx ? dmtx->name : NULL;
+#else
+ return NULL;
+#endif
+}
+
int
erl_drv_mutex_trylock(ErlDrvMutex *dmtx)
{
@@ -258,6 +269,15 @@ erl_drv_cond_destroy(ErlDrvCond *dcnd)
#endif
}
+char *
+erl_drv_cond_name(ErlDrvCond *dcnd)
+{
+#ifdef USE_THREADS
+ return dcnd ? dcnd->name : NULL;
+#else
+ return NULL;
+#endif
+}
void
erl_drv_cond_signal(ErlDrvCond *dcnd)
@@ -331,6 +351,16 @@ erl_drv_rwlock_destroy(ErlDrvRWLock *drwlck)
#endif
}
+char *
+erl_drv_rwlock_name(ErlDrvRWLock *drwlck)
+{
+#ifdef USE_THREADS
+ return drwlck ? drwlck->name : NULL;
+#else
+ return NULL;
+#endif
+}
+
int
erl_drv_rwlock_tryrlock(ErlDrvRWLock *drwlck)
{
@@ -617,6 +647,18 @@ erl_drv_thread_create(char *name,
#endif
}
+char *
+erl_drv_thread_name(ErlDrvTid tid)
+{
+#ifdef USE_THREADS
+ struct ErlDrvTid_ *dtid = (struct ErlDrvTid_ *) tid;
+ return dtid ? dtid->name : NULL;
+#else
+ return NULL;
+#endif
+}
+
+
ErlDrvTid
erl_drv_thread_self(void)
{
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 8d137df7ae..8c4fffa75b 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -549,9 +549,12 @@ void erts_usage(void)
ERTS_SCHED_THREAD_MAX_STACK_SIZE);
erts_fprintf(stderr, "-spp Bool set port parallelism scheduling hint\n");
erts_fprintf(stderr, "-S n1:n2 set number of schedulers (n1), and number of\n");
- erts_fprintf(stderr, " schedulers online (n2), valid range for both\n");
- erts_fprintf(stderr, " numbers are [1-%d]\n",
+ erts_fprintf(stderr, " schedulers online (n2), maximum for both\n");
+ erts_fprintf(stderr, " numbers is %d\n",
ERTS_MAX_NO_OF_SCHEDULERS);
+ erts_fprintf(stderr, "-SP p1:p2 specify schedulers (p1) and schedulers online (p2)\n");
+ erts_fprintf(stderr, " as percentages of logical processors configured and logical\n");
+ erts_fprintf(stderr, " processors available, respectively\n");
erts_fprintf(stderr, "-t size set the maximum number of atoms the "
"emulator can handle\n");
erts_fprintf(stderr, " valid range is [%d-%d]\n",
@@ -631,6 +634,8 @@ early_init(int *argc, char **argv) /*
int ncpuavail;
int schdlrs;
int schdlrs_onln;
+ int schdlrs_percentage = 100;
+ int schdlrs_onln_percentage = 100;
int max_main_threads;
int max_reader_groups;
int reader_groups;
@@ -758,63 +763,132 @@ early_init(int *argc, char **argv) /*
}
break;
}
- case 'S' : {
- int tot, onln;
- char *arg = get_arg(argv[i]+2, argv[i+1], &i);
- switch (sscanf(arg, "%d:%d", &tot, &onln)) {
- case 0:
- switch (sscanf(arg, ":%d", &onln)) {
+ case 'S' :
+ if (argv[i][2] == 'P') {
+ int ptot, ponln;
+ char *arg = get_arg(argv[i]+3, argv[i+1], &i);
+ switch (sscanf(arg, "%d:%d", &ptot, &ponln)) {
+ case 0:
+ switch (sscanf(arg, ":%d", &ponln)) {
+ case 1:
+ if (ponln < 0)
+ goto bad_SP;
+ ptot = 100;
+ goto chk_SP;
+ default:
+ goto bad_SP;
+ }
case 1:
- tot = no_schedulers;
- goto chk_S;
+ if (ptot < 0)
+ goto bad_SP;
+ ponln = ptot < 100 ? ptot : 100;
+ goto chk_SP;
+ case 2:
+ if (ptot < 0 || ponln < 0)
+ goto bad_SP;
+ chk_SP:
+ schdlrs_percentage = ptot;
+ schdlrs_onln_percentage = ponln;
+ break;
default:
- goto bad_S;
- }
- case 1:
- onln = tot < schdlrs_onln ? tot : schdlrs_onln;
- case 2:
- chk_S:
- if (tot > 0)
- schdlrs = tot;
- else
- schdlrs = no_schedulers + tot;
- if (onln > 0)
- schdlrs_onln = onln;
- else
- schdlrs_onln = no_schedulers_online + onln;
- if (schdlrs < 1 || ERTS_MAX_NO_OF_SCHEDULERS < schdlrs) {
- erts_fprintf(stderr,
- "bad amount of schedulers %d\n",
- tot);
- erts_usage();
- }
- if (schdlrs_onln < 1 || schdlrs < schdlrs_onln) {
+ bad_SP:
+ erts_fprintf(stderr,
+ "bad schedulers percentage specifier %s\n",
+ arg);
+ erts_usage();
+ break;
+ }
+
+ VERBOSE(DEBUG_SYSTEM,
+ ("using %d:%d scheduler percentages\n",
+ schdlrs_percentage, schdlrs_onln_percentage));
+ } else {
+ int tot, onln;
+ char *arg = get_arg(argv[i]+2, argv[i+1], &i);
+ switch (sscanf(arg, "%d:%d", &tot, &onln)) {
+ case 0:
+ switch (sscanf(arg, ":%d", &onln)) {
+ case 1:
+ tot = no_schedulers;
+ goto chk_S;
+ default:
+ goto bad_S;
+ }
+ case 1:
+ onln = tot < schdlrs_onln ? tot : schdlrs_onln;
+ case 2:
+ chk_S:
+ if (tot > 0)
+ schdlrs = tot;
+ else
+ schdlrs = no_schedulers + tot;
+ if (onln > 0)
+ schdlrs_onln = onln;
+ else
+ schdlrs_onln = no_schedulers_online + onln;
+ if (schdlrs < 1 || ERTS_MAX_NO_OF_SCHEDULERS < schdlrs) {
+ erts_fprintf(stderr,
+ "bad amount of schedulers %d\n",
+ tot);
+ erts_usage();
+ }
+ if (schdlrs_onln < 1 || schdlrs < schdlrs_onln) {
+ erts_fprintf(stderr,
+ "bad amount of schedulers online %d "
+ "(total amount of schedulers %d)\n",
+ schdlrs_onln, schdlrs);
+ erts_usage();
+ }
+ break;
+ default:
+ bad_S:
erts_fprintf(stderr,
- "bad amount of schedulers online %d "
- "(total amount of schedulers %d)\n",
- schdlrs_onln, schdlrs);
+ "bad amount of schedulers %s\n",
+ arg);
erts_usage();
+ break;
}
- break;
- default:
- bad_S:
- erts_fprintf(stderr,
- "bad amount of schedulers %s\n",
- arg);
- erts_usage();
- break;
- }
- VERBOSE(DEBUG_SYSTEM,
- ("using %d:%d scheduler(s)\n", tot, onln));
- break;
- }
+ VERBOSE(DEBUG_SYSTEM,
+ ("using %d:%d scheduler(s)\n", tot, onln));
+ }
+ break;
default:
break;
}
}
i++;
}
+
+#ifdef ERTS_SMP
+ /* apply any scheduler percentages */
+ if (schdlrs_percentage != 100 || schdlrs_onln_percentage != 100) {
+ schdlrs = schdlrs * schdlrs_percentage / 100;
+ schdlrs_onln = schdlrs_onln * schdlrs_onln_percentage / 100;
+ if (schdlrs < 1)
+ schdlrs = 1;
+ if (ERTS_MAX_NO_OF_SCHEDULERS < schdlrs) {
+ erts_fprintf(stderr,
+ "bad schedulers percentage %d "
+ "(total amount of schedulers %d)\n",
+ schdlrs_percentage, schdlrs);
+ erts_usage();
+ }
+ if (schdlrs_onln < 1)
+ schdlrs_onln = 1;
+ if (schdlrs < schdlrs_onln) {
+ erts_fprintf(stderr,
+ "bad schedulers online percentage %d "
+ "(total amount of schedulers %d, online %d)\n",
+ schdlrs_onln_percentage, schdlrs, schdlrs_onln);
+ erts_usage();
+ }
+ }
+#else
+ /* Silence gcc warnings */
+ (void)schdlrs_percentage;
+ (void)schdlrs_onln_percentage;
+#endif
}
#ifndef USE_THREADS
@@ -1312,7 +1386,10 @@ erl_start(int argc, char **argv)
break;
case 'S' : /* Was handled in early_init() just read past it */
- (void) get_arg(argv[i]+2, argv[i+1], &i);
+ if (argv[i][2] == 'P')
+ (void) get_arg(argv[i]+3, argv[i+1], &i);
+ else
+ (void) get_arg(argv[i]+2, argv[i+1], &i);
break;
case 's' : {
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 2114d0c001..0dd83fa6ed 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -132,6 +132,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
#endif /* __WIN32__ */
{ "alcu_init_atoms", NULL },
{ "mseg_init_atoms", NULL },
+ { "mmap_init_atoms", NULL },
{ "drv_tsd", NULL },
{ "async_enq_mtx", NULL },
#ifdef ERTS_SMP
@@ -185,7 +186,9 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "sys_gethrtime", NULL },
#endif
#endif
- { "erts_alloc_hard_debug", NULL }
+ { "erts_alloc_hard_debug", NULL },
+ { "hard_dbg_mseg", NULL },
+ { "erts_mmap", NULL }
};
#define ERTS_LOCK_ORDER_SIZE \
diff --git a/erts/emulator/beam/erl_node_container_utils.h b/erts/emulator/beam/erl_node_container_utils.h
index 0f93a3a9f0..17f6b32bb1 100644
--- a/erts/emulator/beam/erl_node_container_utils.h
+++ b/erts/emulator/beam/erl_node_container_utils.h
@@ -106,7 +106,7 @@
#define dist_entry_channel_no(x) \
((x) == erts_this_dist_entry \
? ((Uint) 0) \
- : (ASSERT_EXPR(is_atom((x)->sysname)), \
+ : (ASSERT(is_atom((x)->sysname)), \
(Uint) atom_val((x)->sysname)))
#define internal_channel_no(x) ((Uint) ERST_INTERNAL_CHANNEL_NO)
#define external_channel_no(x) \
@@ -122,10 +122,10 @@ extern ErtsPTab erts_proc;
(D), \
_TAG_IMMED1_PID)
-#define internal_pid_index(PID) (ASSERT_EXPR(is_internal_pid((PID))), \
+#define internal_pid_index(PID) (ASSERT(is_internal_pid((PID))), \
erts_ptab_id2pix(&erts_proc, (PID)))
-#define internal_pid_data(PID) (ASSERT_EXPR(is_internal_pid((PID))), \
+#define internal_pid_data(PID) (ASSERT(is_internal_pid((PID))), \
erts_ptab_id2data(&erts_proc, (PID)))
#define internal_pid_number(x) _GET_PID_NUM(internal_pid_data((x)))
@@ -193,10 +193,10 @@ extern ErtsPTab erts_port;
(D), \
_TAG_IMMED1_PORT)
-#define internal_port_index(PRT) (ASSERT_EXPR(is_internal_port((PRT))), \
+#define internal_port_index(PRT) (ASSERT(is_internal_port((PRT))), \
erts_ptab_id2pix(&erts_port, (PRT)))
-#define internal_port_data(PRT) (ASSERT_EXPR(is_internal_port((PRT))), \
+#define internal_port_data(PRT) (ASSERT(is_internal_port((PRT))), \
erts_ptab_id2data(&erts_port, (PRT)))
#define internal_port_number(x) _GET_PORT_NUM(internal_port_data((x)))
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 7d53ce7152..547a42beb2 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -1838,6 +1838,16 @@ release_port(void *vport)
{
erts_port_dec_refc((Port *) vport);
}
+
+static void
+schedule_release_port(void *vport) {
+ Port *pp = (Port*)vport;
+ /* This is only used when a port release was ordered from a non-scheduler */
+ erts_schedule_thr_prgr_later_op(release_port,
+ (void *) pp,
+ &pp->common.u.release);
+}
+
#endif
static void
@@ -2033,10 +2043,15 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
* Schedule cleanup of port structure...
*/
#ifdef ERTS_SMP
- /* Has to be more or less immediate to release any driver */
- erts_schedule_thr_prgr_later_op(release_port,
- (void *) pp,
- &pp->common.u.release);
+ /* We might not be a scheduler, eg. traceing to port we are sys_msg_dispatcher */
+ if (!erts_get_scheduler_data()) {
+ erts_schedule_misc_aux_work(1, schedule_release_port, (void*)pp);
+ } else {
+ /* Has to be more or less immediate to release any driver */
+ erts_schedule_thr_prgr_later_op(release_port,
+ (void *) pp,
+ &pp->common.u.release);
+ }
#else
pp->cleanup = 1;
#endif
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 2439a46ab6..79f382674a 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -294,7 +294,7 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proclist,
ERTS_ALC_T_PROC_LIST)
#define ERTS_SCHED_SLEEP_INFO_IX(IX) \
- (ASSERT_EXPR(-1 <= ((int) (IX)) \
+ (ASSERT(-1 <= ((int) (IX)) \
&& ((int) (IX)) < ((int) erts_no_schedulers)), \
&aligned_sched_sleep_info[(IX)].ssi)
@@ -7237,7 +7237,7 @@ erts_sched_stat_modify(int what)
break;
case ERTS_SCHED_STAT_MODIFY_DISABLE:
erts_smp_thr_progress_block();
- erts_sched_stat.enabled = 1;
+ erts_sched_stat.enabled = 0;
erts_smp_thr_progress_unblock();
break;
case ERTS_SCHED_STAT_MODIFY_CLEAR:
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 8e5467f196..8d136f6e8b 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -1135,10 +1135,10 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags;
} while (0)
#define ERTS_RUNQ_IX(IX) \
- (ASSERT_EXPR(0 <= (IX) && (IX) < erts_no_run_queues), \
+ (ASSERT(0 <= (IX) && (IX) < erts_no_run_queues), \
&erts_aligned_run_queues[(IX)].runq)
#define ERTS_SCHEDULER_IX(IX) \
- (ASSERT_EXPR(0 <= (IX) && (IX) < erts_no_schedulers), \
+ (ASSERT(0 <= (IX) && (IX) < erts_no_schedulers), \
&erts_aligned_scheduler_data[(IX)].esd)
void erts_pre_init_process(void);
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index fa015ee4b9..ff7fdfcfca 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -2184,7 +2184,7 @@ trace_gc(Process *p, Eterm what)
AM_bin_old_vheap_block_size
};
- Uint values[] = {
+ UWord values[] = {
OLD_HEAP(p) ? OLD_HEND(p) - OLD_HEAP(p) : 0,
HEAP_SIZE(p),
MBUF_SIZE(p),
@@ -2198,7 +2198,7 @@ trace_gc(Process *p, Eterm what)
BIN_OLD_VHEAP_SZ(p)
};
#define LOCAL_HEAP_SIZE \
- (sizeof(values)/sizeof(Eterm)) * \
+ (sizeof(values)/sizeof(*values)) * \
(2/*cons*/ + 3/*2-tuple*/ + BIG_UINT_HEAP_SIZE) + \
5/*4-tuple */ + TS_HEAP_WORDS
DeclareTmpHeap(local_heap,LOCAL_HEAP_SIZE,p);
@@ -2206,7 +2206,7 @@ trace_gc(Process *p, Eterm what)
Eterm* limit;
#endif
- ASSERT(sizeof(values)/sizeof(Uint) == sizeof(tags)/sizeof(Eterm));
+ ASSERT(sizeof(values)/sizeof(*values) == sizeof(tags)/sizeof(Eterm));
UseTmpHeap(LOCAL_HEAP_SIZE,p);
@@ -2214,9 +2214,9 @@ trace_gc(Process *p, Eterm what)
hp = local_heap;
#ifdef DEBUG
size = 0;
- (void) erts_bld_atom_uint_2tup_list(NULL,
+ (void) erts_bld_atom_uword_2tup_list(NULL,
&size,
- sizeof(values)/sizeof(Uint),
+ sizeof(values)/sizeof(*values),
tags,
values);
size += 5/*4-tuple*/ + TS_SIZE(p);
@@ -2229,9 +2229,9 @@ trace_gc(Process *p, Eterm what)
ERTS_TRACE_FLAGS(p));
size = 0;
- (void) erts_bld_atom_uint_2tup_list(NULL,
+ (void) erts_bld_atom_uword_2tup_list(NULL,
&size,
- sizeof(values)/sizeof(Uint),
+ sizeof(values)/sizeof(*values),
tags,
values);
size += 5/*4-tuple*/ + TS_SIZE(p);
@@ -2244,9 +2244,9 @@ trace_gc(Process *p, Eterm what)
ASSERT(size <= LOCAL_HEAP_SIZE);
#endif
- msg = erts_bld_atom_uint_2tup_list(&hp,
+ msg = erts_bld_atom_uword_2tup_list(&hp,
NULL,
- sizeof(values)/sizeof(Uint),
+ sizeof(values)/sizeof(*values),
tags,
values);
@@ -2415,7 +2415,7 @@ monitor_long_gc(Process *p, Uint time) {
am_old_heap_size,
am_heap_size
};
- Eterm values[] = {
+ UWord values[] = {
time,
OLD_HEAP(p) ? OLD_HEND(p) - OLD_HEAP(p) : 0,
HEAP_SIZE(p),
@@ -2436,9 +2436,9 @@ monitor_long_gc(Process *p, Uint time) {
#endif
hsz = 0;
- (void) erts_bld_atom_uint_2tup_list(NULL,
+ (void) erts_bld_atom_uword_2tup_list(NULL,
&hsz,
- sizeof(values)/sizeof(Uint),
+ sizeof(values)/sizeof(*values),
tags,
values);
hsz += 5 /* 4-tuple */;
@@ -2449,9 +2449,9 @@ monitor_long_gc(Process *p, Uint time) {
hp_end = hp + hsz;
#endif
- list = erts_bld_atom_uint_2tup_list(&hp,
+ list = erts_bld_atom_uword_2tup_list(&hp,
NULL,
- sizeof(values)/sizeof(Uint),
+ sizeof(values)/sizeof(*values),
tags,
values);
msg = TUPLE4(hp, am_monitor, p->common.id, am_long_gc, list);
@@ -2489,7 +2489,7 @@ monitor_large_heap(Process *p) {
am_old_heap_size,
am_heap_size
};
- Uint values[] = {
+ UWord values[] = {
OLD_HEAP(p) ? OLD_HEND(p) - OLD_HEAP(p) : 0,
HEAP_SIZE(p),
MBUF_SIZE(p),
@@ -2511,9 +2511,9 @@ monitor_large_heap(Process *p) {
#endif
hsz = 0;
- (void) erts_bld_atom_uint_2tup_list(NULL,
+ (void) erts_bld_atom_uword_2tup_list(NULL,
&hsz,
- sizeof(values)/sizeof(Uint),
+ sizeof(values)/sizeof(*values),
tags,
values);
hsz += 5 /* 4-tuple */;
@@ -2524,9 +2524,9 @@ monitor_large_heap(Process *p) {
hp_end = hp + hsz;
#endif
- list = erts_bld_atom_uint_2tup_list(&hp,
+ list = erts_bld_atom_uword_2tup_list(&hp,
NULL,
- sizeof(values)/sizeof(Uint),
+ sizeof(values)/sizeof(*values),
tags,
values);
msg = TUPLE4(hp, am_monitor, p->common.id, am_large_heap, list);
diff --git a/erts/emulator/beam/erl_unicode.c b/erts/emulator/beam/erl_unicode.c
index ec8ea5f044..7e3c6681d9 100644
--- a/erts/emulator/beam/erl_unicode.c
+++ b/erts/emulator/beam/erl_unicode.c
@@ -1476,6 +1476,9 @@ static Eterm do_utf8_to_list_normalize(Process *p, Uint num, byte *bytes, Uint s
Uint16 savepoints[4];
int numpoints = 0;
+ if (num == 0)
+ return NIL;
+
ASSERT(num > 0);
hp = HAlloc(p,num * 2); /* May be to much */
diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h
index 80d29d554a..292d135946 100644
--- a/erts/emulator/beam/erl_utils.h
+++ b/erts/emulator/beam/erl_utils.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2012. All Rights Reserved.
+ * Copyright Ericsson AB 2012-2013. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -168,6 +168,10 @@ Eterm erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64);
Eterm erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64);
Eterm erts_bld_cons(Uint **hpp, Uint *szp, Eterm car, Eterm cdr);
Eterm erts_bld_tuple(Uint **hpp, Uint *szp, Uint arity, ...);
+#define erts_bld_tuple2(H,S,E1,E2) erts_bld_tuple(H,S,2,E1,E2)
+#define erts_bld_tuple3(H,S,E1,E2,E3) erts_bld_tuple(H,S,3,E1,E2,E3)
+#define erts_bld_tuple4(H,S,E1,E2,E3,E4) erts_bld_tuple(H,S,4,E1,E2,E3,E4)
+#define erts_bld_tuple5(H,S,E1,E2,E3,E4,E5) erts_bld_tuple(H,S,5,E1,E2,E3,E4,E5)
Eterm erts_bld_tuplev(Uint **hpp, Uint *szp, Uint arity, Eterm terms[]);
Eterm erts_bld_string_n(Uint **hpp, Uint *szp, const char *str, Sint len);
#define erts_bld_string(hpp,szp,str) erts_bld_string_n(hpp,szp,str,strlen(str))
@@ -175,8 +179,8 @@ Eterm erts_bld_list(Uint **hpp, Uint *szp, Sint length, Eterm terms[]);
Eterm erts_bld_2tup_list(Uint **hpp, Uint *szp,
Sint length, Eterm terms1[], Uint terms2[]);
Eterm
-erts_bld_atom_uint_2tup_list(Uint **hpp, Uint *szp,
- Sint length, Eterm atoms[], Uint uints[]);
+erts_bld_atom_uword_2tup_list(Uint **hpp, Uint *szp,
+ Sint length, Eterm atoms[], UWord uints[]);
Eterm
erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length,
Eterm atoms[], Uint uints1[], Uint uints2[]);
diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h
index c962955de9..337422eead 100644
--- a/erts/emulator/beam/erl_vm.h
+++ b/erts/emulator/beam/erl_vm.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2012. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2013. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -80,7 +80,7 @@
# ifdef CHECK_FOR_HOLES
# define INIT_HEAP_MEM(p,sz) erts_set_hole_marker(HEAP_TOP(p), (sz))
# else
-# define INIT_HEAP_MEM(p,sz) memset(HEAP_TOP(p),DEBUG_BAD_BYTE,(sz)*sizeof(Eterm*))
+# define INIT_HEAP_MEM(p,sz) memset(HEAP_TOP(p),0x01,(sz)*sizeof(Eterm*))
# endif
#else
# define INIT_HEAP_MEM(p,sz) ((void)0)
@@ -98,7 +98,7 @@
* failing that, in a heap fragment.
*/
#define HAllocX(p, sz, xtra) \
- (ASSERT_EXPR((sz) >= 0), \
+ (ASSERT((sz) >= 0), \
ErtsHAllocLockCheck(p), \
(IS_FORCE_HEAP_FRAGS || (((HEAP_LIMIT(p) - HEAP_TOP(p)) < (sz))) \
? erts_heap_alloc((p),(sz),(xtra)) \
@@ -135,14 +135,14 @@
*/
#ifdef CHECK_FOR_HOLES
# define HeapOnlyAlloc(p, sz) \
- (ASSERT_EXPR((sz) >= 0), \
- (ASSERT_EXPR(((HEAP_LIMIT(p) - HEAP_TOP(p)) >= (sz))), \
+ (ASSERT((sz) >= 0), \
+ (ASSERT(((HEAP_LIMIT(p) - HEAP_TOP(p)) >= (sz))), \
(erts_set_hole_marker(HEAP_TOP(p), (sz)), \
(HEAP_TOP(p) = HEAP_TOP(p) + (sz), HEAP_TOP(p) - (sz)))))
#else
# define HeapOnlyAlloc(p, sz) \
- (ASSERT_EXPR((sz) >= 0), \
- (ASSERT_EXPR(((HEAP_LIMIT(p) - HEAP_TOP(p)) >= (sz))), \
+ (ASSERT((sz) >= 0), \
+ (ASSERT(((HEAP_LIMIT(p) - HEAP_TOP(p)) >= (sz))), \
(HEAP_TOP(p) = HEAP_TOP(p) + (sz), HEAP_TOP(p) - (sz))))
#endif
diff --git a/erts/emulator/beam/external.h b/erts/emulator/beam/external.h
index e37d47919e..ff29e84972 100644
--- a/erts/emulator/beam/external.h
+++ b/erts/emulator/beam/external.h
@@ -138,8 +138,8 @@ typedef struct {
#define ERTS_DIST_EXT_SIZE(EDEP) \
(sizeof(ErtsDistExternal) \
- (((EDEP)->flags & ERTS_DIST_EXT_ATOM_TRANS_TAB) \
- ? (ASSERT_EXPR(0 <= (EDEP)->attab.size \
- && (EDEP)->attab.size <= ERTS_ATOM_CACHE_SIZE), \
+ ? (ASSERT(0 <= (EDEP)->attab.size \
+ && (EDEP)->attab.size <= ERTS_ATOM_CACHE_SIZE), \
sizeof(Eterm)*(ERTS_ATOM_CACHE_SIZE - (EDEP)->attab.size)) \
: sizeof(ErtsAtomTranslationTable)))
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index c040b259a0..c1fda3f96c 100755
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -867,13 +867,13 @@ Eterm store_external_or_ref_in_proc_(Process *, Eterm);
Eterm store_external_or_ref_(Uint **, ErlOffHeap*, Eterm);
#define NC_HEAP_SIZE(NC) \
- (ASSERT_EXPR(is_node_container((NC))), \
+ (ASSERT(is_node_container((NC))), \
IS_CONST((NC)) ? 0 : (thing_arityval(*boxed_val((NC))) + 1))
#define STORE_NC(Hpp, ETpp, NC) \
- (ASSERT_EXPR(is_node_container((NC))), \
+ (ASSERT(is_node_container((NC))), \
IS_CONST((NC)) ? (NC) : store_external_or_ref_((Hpp), (ETpp), (NC)))
#define STORE_NC_IN_PROC(Pp, NC) \
- (ASSERT_EXPR(is_node_container((NC))), \
+ (ASSERT(is_node_container((NC))), \
IS_CONST((NC)) ? (NC) : store_external_or_ref_in_proc_((Pp), (NC)))
/* duplicates from big.h */
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index fb39c18d9d..b2803747eb 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -1330,7 +1330,7 @@ force_imm_drv_call(ErtsTryImmDrvCallState *sp)
erts_aint32_t invalid_state;
Port *prt = sp->port;
- ASSERT(ERTS_IS_CRASH_DUMPING)
+ ASSERT(ERTS_IS_CRASH_DUMPING);
ASSERT(is_atom(sp->port_op));
invalid_state = sp->state;
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index 05bff430e3..9561c0be96 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -149,19 +149,14 @@ typedef ERTS_SYS_FD_TYPE ErtsSysFdType;
# define ERTS_EXIT_AFTER_DUMP exit
#endif
+#define ERTS_ASSERT(e) \
+ ((void) ((e) ? 1 : (erl_assert_error(#e, __func__, __FILE__, __LINE__), 0)))
+void erl_assert_error(const char* expr, const char *func, const char* file, int line);
+
#ifdef DEBUG
-# define ASSERT(e) \
- if (e) { \
- ; \
- } else { \
- erl_assert_error(#e, __FILE__, __LINE__); \
- }
-# define ASSERT_EXPR(e) \
- ((void) ((e) ? 1 : (erl_assert_error(#e, __FILE__, __LINE__), 0)))
-void erl_assert_error(char* expr, char* file, int line);
+# define ASSERT(e) ERTS_ASSERT(e)
#else
-# define ASSERT(e)
-# define ASSERT_EXPR(e) ((void) 1)
+# define ASSERT(e) ((void) 1)
#endif
/*
@@ -282,16 +277,19 @@ typedef unsigned long UWord;
typedef long SWord;
#define SWORD_CONSTANT(Const) Const##L
#define UWORD_CONSTANT(Const) Const##UL
+#define ERTS_SWORD_MAX LONG_MAX
#elif SIZEOF_VOID_P == SIZEOF_INT
typedef unsigned int UWord;
typedef int SWord;
#define SWORD_CONSTANT(Const) Const
#define UWORD_CONSTANT(Const) Const##U
+#define ERTS_SWORD_MAX INT_MAX
#elif SIZEOF_VOID_P == SIZEOF_LONG_LONG
typedef unsigned long long UWord;
typedef long long SWord;
#define SWORD_CONSTANT(Const) Const##LL
#define UWORD_CONSTANT(Const) Const##ULL
+#define ERTS_SWORD_MAX LLONG_MAX
#else
#error Found no appropriate type to use for 'Eterm', 'Uint' and 'Sint'
#endif
@@ -304,6 +302,7 @@ typedef unsigned long Uint;
typedef long Sint;
#define SWORD_CONSTANT(Const) Const##L
#define UWORD_CONSTANT(Const) Const##UL
+#define ERTS_SWORD_MAX LONG_MAX
#define ERTS_SIZEOF_ETERM SIZEOF_LONG
#define ErtsStrToSint strtol
#elif SIZEOF_VOID_P == SIZEOF_INT
@@ -312,6 +311,7 @@ typedef unsigned int Uint;
typedef int Sint;
#define SWORD_CONSTANT(Const) Const
#define UWORD_CONSTANT(Const) Const##U
+#define ERTS_SWORD_MAX INT_MAX
#define ERTS_SIZEOF_ETERM SIZEOF_INT
#define ErtsStrToSint strtol
#elif SIZEOF_VOID_P == SIZEOF_LONG_LONG
@@ -320,6 +320,7 @@ typedef unsigned long long Uint;
typedef long long Sint;
#define SWORD_CONSTANT(Const) Const##LL
#define UWORD_CONSTANT(Const) Const##ULL
+#define ERTS_SWORD_MAX LLONG_MAX
#define ERTS_SIZEOF_ETERM SIZEOF_LONG_LONG
#if defined(__WIN32__)
#define ErtsStrToSint _strtoi64
@@ -1012,6 +1013,9 @@ void erl_bin_write(unsigned char *, int, int);
#define ERTS_SMALL_ABS(Small) labs(Small)
#endif
+#ifndef ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
+# define ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC 0
+#endif
#ifdef __WIN32__
void call_break_handler(void);
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 43720084d1..605a625282 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -576,8 +576,8 @@ erts_bld_2tup_list(Uint **hpp, Uint *szp,
}
Eterm
-erts_bld_atom_uint_2tup_list(Uint **hpp, Uint *szp,
- Sint length, Eterm atoms[], Uint uints[])
+erts_bld_atom_uword_2tup_list(Uint **hpp, Uint *szp,
+ Sint length, Eterm atoms[], UWord uints[])
{
Sint i;
Eterm res = THE_NON_VALUE;
diff --git a/erts/emulator/drivers/common/efile_drv.c b/erts/emulator/drivers/common/efile_drv.c
index c997fe1bf9..8de578d8b7 100644
--- a/erts/emulator/drivers/common/efile_drv.c
+++ b/erts/emulator/drivers/common/efile_drv.c
@@ -542,84 +542,84 @@ static void *ef_safe_realloc(void *op, Uint s)
*/
/* char EV_CHAR_P(ErlIOVec *ev, int p, int q) */
-#define EV_CHAR_P(ev, p, q) \
- (((char *)(ev)->iov[(q)].iov_base) + (p))
+#define EV_CHAR_P(ev, p, q) \
+ (((char *)(ev)->iov[q].iov_base) + (p))
/* int EV_GET_CHAR(ErlIOVec *ev, char *p, int *pp, int *qp) */
#define EV_GET_CHAR(ev, p, pp, qp) efile_ev_get_char(ev, p ,pp, qp)
static int
-efile_ev_get_char(ErlIOVec *ev, char *p, int *pp, int *qp) {
- if (*(pp)+1 <= (ev)->iov[*(qp)].iov_len) {
- *(p) = *EV_CHAR_P(ev, *(pp), *(qp));
- if (*(pp)+1 < (ev)->iov[*(qp)].iov_len)
- *(pp) = *(pp)+1;
- else {
- (*(qp))++;
- *pp = 0;
+efile_ev_get_char(ErlIOVec *ev, char *p, size_t *pp, size_t *qp) {
+ if (*pp + 1 <= ev->iov[*qp].iov_len) {
+ *p = *EV_CHAR_P(ev, *pp, *qp);
+ if (*pp + 1 < ev->iov[*qp].iov_len)
+ *pp += 1;
+ else {
+ *qp += 1;
+ *pp = 0;
+ }
+ return !0;
}
- return !0;
- }
- return 0;
+ return 0;
}
/* Uint32 EV_UINT32(ErlIOVec *ev, int p, int q)*/
-#define EV_UINT32(ev, p, q) \
- ((Uint32) *(((unsigned char *)(ev)->iov[(q)].iov_base) + (p)))
+#define EV_UINT32(ev, p, q) \
+ ((Uint32) ((unsigned char *)(ev)->iov[q].iov_base)[p])
/* int EV_GET_UINT32(ErlIOVec *ev, Uint32 *p, int *pp, int *qp) */
-#define EV_GET_UINT32(ev, p, pp, qp) efile_ev_get_uint32(ev,p,pp,qp)
+#define EV_GET_UINT32(ev, p, pp, qp) efile_ev_get_uint32(ev, p, pp, qp)
static int
-efile_ev_get_uint32(ErlIOVec *ev, Uint32 *p, int *pp, int *qp) {
- if (*(pp)+4 <= (ev)->iov[*(qp)].iov_len) {
- *(p) = (EV_UINT32(ev, *(pp), *(qp)) << 24)
- | (EV_UINT32(ev, *(pp)+1, *(qp)) << 16)
- | (EV_UINT32(ev, *(pp)+2, *(qp)) << 8)
- | (EV_UINT32(ev, *(pp)+3, *(qp)));
- if (*(pp)+4 < (ev)->iov[*(qp)].iov_len)
- *(pp) = *(pp)+4;
- else {
- (*(qp))++;
- *pp = 0;
+efile_ev_get_uint32(ErlIOVec *ev, Uint32 *p, size_t *pp, size_t *qp) {
+ if (*pp + 4 <= ev->iov[*qp].iov_len) {
+ *p = (EV_UINT32(ev, *pp, *qp) << 24)
+ | (EV_UINT32(ev, *pp + 1, *qp) << 16)
+ | (EV_UINT32(ev, *pp + 2, *qp) << 8)
+ | (EV_UINT32(ev, *pp + 3, *qp));
+ if (*pp + 4 < ev->iov[*qp].iov_len)
+ *pp += 4;
+ else {
+ *qp += 1;
+ *pp = 0;
+ }
+ return !0;
}
- return !0;
- }
- return 0;
+ return 0;
}
/* Uint64 EV_UINT64(ErlIOVec *ev, int p, int q)*/
-#define EV_UINT64(ev, p, q) \
- ((Uint64) *(((unsigned char *)(ev)->iov[(q)].iov_base) + (p)))
+#define EV_UINT64(ev, p, q) \
+ ((Uint64) ((unsigned char *)(ev)->iov[q].iov_base)[p])
/* int EV_GET_UINT64(ErlIOVec *ev, Uint64 *p, int *pp, int *qp) */
-#define EV_GET_UINT64(ev, p, pp, qp) efile_ev_get_uint64(ev,p,pp,qp)
+#define EV_GET_UINT64(ev, p, pp, qp) efile_ev_get_uint64(ev, p, pp, qp)
static int
-efile_ev_get_uint64(ErlIOVec *ev, Uint64 *p, int *pp, int *qp) {
- if (*(pp)+8 <= (ev)->iov[*(qp)].iov_len) {
- *(p) = (EV_UINT64(ev, *(pp), *(qp)) << 56)
- | (EV_UINT64(ev, *(pp)+1, *(qp)) << 48)
- | (EV_UINT64(ev, *(pp)+2, *(qp)) << 40)
- | (EV_UINT64(ev, *(pp)+3, *(qp)) << 32)
- | (EV_UINT64(ev, *(pp)+4, *(qp)) << 24)
- | (EV_UINT64(ev, *(pp)+5, *(qp)) << 16)
- | (EV_UINT64(ev, *(pp)+6, *(qp)) << 8)
- | (EV_UINT64(ev, *(pp)+7, *(qp)));
- if (*(pp)+8 < (ev)->iov[*(qp)].iov_len)
- *(pp) = *(pp)+8;
- else {
- (*(qp))++;
- *pp = 0;
+efile_ev_get_uint64(ErlIOVec *ev, Uint64 *p, size_t *pp, size_t *qp) {
+ if (*pp + 8 <= ev->iov[*qp].iov_len) {
+ *p = (EV_UINT64(ev, *pp, *qp) << 56)
+ | (EV_UINT64(ev, *pp + 1, *qp) << 48)
+ | (EV_UINT64(ev, *pp + 2, *qp) << 40)
+ | (EV_UINT64(ev, *pp + 3, *qp) << 32)
+ | (EV_UINT64(ev, *pp + 4, *qp) << 24)
+ | (EV_UINT64(ev, *pp + 5, *qp) << 16)
+ | (EV_UINT64(ev, *pp + 6, *qp) << 8)
+ | (EV_UINT64(ev, *pp + 7, *qp));
+ if (*pp + 8 < ev->iov[*qp].iov_len)
+ *pp += 8;
+ else {
+ *qp += 1;
+ *pp = 0;
+ }
+ return !0;
}
- return !0;
- }
- return 0;
+ return 0;
}
/* int EV_GET_SINT64(ErlIOVec *ev, Uint64 *p, int *pp, int *qp) */
-#define EV_GET_SINT64(ev, p, pp, qp) efile_ev_get_sint64(ev,p,pp,qp)
+#define EV_GET_SINT64(ev, p, pp, qp) efile_ev_get_sint64(ev, p, pp, qp)
static int
-efile_ev_get_sint64(ErlIOVec *ev, Sint64 *p, int *pp, int *qp) {
- Uint64 *tmp = (Uint64*)p;
- return EV_GET_UINT64(ev,tmp,pp,qp);
+efile_ev_get_sint64(ErlIOVec *ev, Sint64 *p, size_t *pp, size_t *qp) {
+ Uint64 *tmp = (Uint64*)p;
+ return EV_GET_UINT64(ev, tmp, pp, qp);
}
#if 0
@@ -1139,7 +1139,7 @@ static void invoke_read(void *data)
read_size = erts_gzread((gzFile)d->fd,
d->c.read.binp->orig_bytes + d->c.read.bin_offset,
size);
- status = (read_size != -1);
+ status = (read_size != (size_t) -1);
if (!status) {
d->errInfo.posix_errno = EIO;
}
@@ -1213,7 +1213,7 @@ static void invoke_read_line(void *data)
d->c.read_line.binp->orig_bytes +
d->c.read_line.read_offset + d->c.read_line.read_size,
size);
- status = (read_size != -1);
+ status = (read_size != (size_t) -1);
if (!status) {
d->errInfo.posix_errno = EIO;
}
@@ -1707,8 +1707,9 @@ static void invoke_pwritev(void *data) {
ASSERT(written == size);
d->again = 0;
}
- } else
+ } else {
ASSERT(written >= FILE_SEGMENT_WRITE);
+ }
MUTEX_LOCK(d->c.writev.q_mtx);
driver_deq(d->c.pwritev.port, written);
@@ -3205,7 +3206,7 @@ static void
file_outputv(ErlDrvData e, ErlIOVec *ev) {
file_descriptor* desc = (file_descriptor*)e;
char command;
- int p, q;
+ size_t p, q;
int err;
struct t_data *d = NULL;
#ifdef USE_VM_PROBES
diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c
index 301ce2d0e2..4fb20bf1b0 100644
--- a/erts/emulator/drivers/common/inet_drv.c
+++ b/erts/emulator/drivers/common/inet_drv.c
@@ -86,6 +86,13 @@
#endif
typedef unsigned long long llu_t;
+#ifndef INT16_MIN
+#define INT16_MIN (-32768)
+#endif
+#ifndef INT16_MAX
+#define INT16_MAX (32767)
+#endif
+
#ifdef __WIN32__
#define STRNCASECMP strncasecmp
@@ -282,7 +289,7 @@ static BOOL (WINAPI *fpSetHandleInformation)(HANDLE,DWORD,DWORD);
static unsigned long zero_value = 0;
static unsigned long one_value = 1;
-#else
+#else /* #ifdef __WIN32__ */
#include <sys/time.h>
#ifdef NETDB_H_NEEDS_IN_H
@@ -315,9 +322,17 @@ static unsigned long one_value = 1;
#include <net/if.h>
+#ifdef HAVE_SCHED_H
+#include <sched.h>
+#endif
+
+#ifdef HAVE_SETNS_H
+#include <setns.h>
+#endif
+
/* SCTP support -- currently for UNIX platforms only: */
#undef HAVE_SCTP
-#if (!defined(__WIN32__) && defined(HAVE_SCTP_H))
+#if defined(HAVE_SCTP_H)
#include <netinet/sctp.h>
@@ -418,7 +433,7 @@ static int (*p_sctp_bindx)(int sd, struct sockaddr *addrs,
static int (*p_sctp_peeloff)(int sd, sctp_assoc_t assoc_id) = NULL;
#endif
-#endif /* SCTP supported */
+#endif /* #if defined(HAVE_SCTP_H) */
#ifndef WANT_NONBLOCKING
#define WANT_NONBLOCKING
@@ -512,7 +527,7 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
} while(0)
-#endif /* __WIN32__ */
+#endif /* #ifdef __WIN32__ #else */
#ifdef HAVE_SOCKLEN_T
# define SOCKLEN_T socklen_t
@@ -573,6 +588,7 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
#define INET_PASSIVE 0 /* false */
#define INET_ACTIVE 1 /* true */
#define INET_ONCE 2 /* true; active once then passive */
+#define INET_MULTI 3 /* true; active N then passive */
/* INET_REQ_GETSTATUS enumeration */
#define INET_F_OPEN 0x0001
@@ -680,6 +696,7 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
#define INET_LOPT_TCP_SEND_TIMEOUT_CLOSE 35 /* auto-close on send timeout or not */
#define INET_LOPT_MSGQ_HIWTRMRK 36 /* set local msgq high watermark */
#define INET_LOPT_MSGQ_LOWTRMRK 37 /* set local msgq low watermark */
+#define INET_LOPT_NETNS 38 /* Network namespace pathname */
/* SCTP options: a separate range, from 100: */
#define SCTP_OPT_RTOINFO 100
#define SCTP_OPT_ASSOCINFO 101
@@ -916,6 +933,7 @@ typedef struct {
inet_async_op op_queue[INET_MAX_ASYNC]; /* call queue */
int active; /* 0 = passive, 1 = active, 2 = active once */
+ Sint16 active_count; /* counter for {active,N} */
int stype; /* socket type:
SOCK_STREAM/SOCK_DGRAM/SOCK_SEQPACKET */
int sprotocol; /* socket protocol:
@@ -955,6 +973,10 @@ typedef struct {
int is_ignored; /* if a fd is ignored by the inet_drv.
This flag should be set to true when
the fd is used outside of inet_drv. */
+#ifdef HAVE_SETNS
+ char *netns; /* Socket network namespace name
+ as full file path */
+#endif
} inet_descriptor;
@@ -1147,22 +1169,36 @@ static int packet_inet_output(udp_descriptor* udesc, HANDLE event);
static int async_ref = 0; /* async reference id generator */
#define NEW_ASYNC_ID() ((async_ref++) & 0xffff)
+/* check for transition from active to passive */
+#define INET_CHECK_ACTIVE_TO_PASSIVE(inet) \
+ do { \
+ if ((inet)->active == INET_ONCE) \
+ (inet)->active = INET_PASSIVE; \
+ else if ((inet)->active == INET_MULTI && --((inet)->active_count) == 0) { \
+ (inet)->active = INET_PASSIVE; \
+ packet_passive_message(inet); \
+ } \
+ } while (0)
static ErlDrvTermData am_ok;
static ErlDrvTermData am_tcp;
static ErlDrvTermData am_udp;
static ErlDrvTermData am_error;
+static ErlDrvTermData am_einval;
static ErlDrvTermData am_inet_async;
static ErlDrvTermData am_inet_reply;
static ErlDrvTermData am_timeout;
static ErlDrvTermData am_closed;
+static ErlDrvTermData am_tcp_passive;
static ErlDrvTermData am_tcp_closed;
static ErlDrvTermData am_tcp_error;
+static ErlDrvTermData am_udp_passive;
static ErlDrvTermData am_udp_error;
static ErlDrvTermData am_empty_out_q;
static ErlDrvTermData am_ssl_tls;
#ifdef HAVE_SCTP
static ErlDrvTermData am_sctp;
+static ErlDrvTermData am_sctp_passive;
static ErlDrvTermData am_sctp_error;
static ErlDrvTermData am_true;
static ErlDrvTermData am_false;
@@ -1172,6 +1208,7 @@ static ErlDrvTermData am_list;
static ErlDrvTermData am_binary;
static ErlDrvTermData am_active;
static ErlDrvTermData am_once;
+static ErlDrvTermData am_multi;
static ErlDrvTermData am_buffer;
static ErlDrvTermData am_linger;
static ErlDrvTermData am_recbuf;
@@ -1181,6 +1218,7 @@ static ErlDrvTermData am_dontroute;
static ErlDrvTermData am_priority;
static ErlDrvTermData am_tos;
static ErlDrvTermData am_ipv6_v6only;
+static ErlDrvTermData am_netns;
#endif
/* speical errors for bad ports and sequences */
@@ -3323,6 +3361,34 @@ static int packet_binary_message
}
/*
+** active mode message: send active-to-passive transition message
+** {tcp_passive, S} or
+** {udp_passive, S} or
+** {sctp_passive, S}
+*/
+ static int packet_passive_message(inet_descriptor* desc)
+ {
+ ErlDrvTermData spec[6];
+ int i = 0;
+
+ DEBUGF(("packet_passive_message(%ld):\r\n", (long)desc->port));
+
+ if (desc->sprotocol == IPPROTO_TCP)
+ i = LOAD_ATOM(spec, i, am_tcp_passive);
+ else {
+#ifdef HAVE_SCTP
+ i = LOAD_ATOM(spec, i, IS_SCTP(desc) ? am_sctp_passive : am_udp_passive);
+#else
+ i = LOAD_ATOM(spec, i, am_udp_passive);
+#endif
+ }
+ i = LOAD_PORT(spec, i, desc->dport);
+ i = LOAD_TUPLE(spec, i, 2);
+ ASSERT(i <= 6);
+ return erl_drv_output_term(desc->dport, spec, i);
+ }
+
+/*
** send active message {udp_error|sctp_error, S, Error}
*/
static int packet_error_message(udp_descriptor* udesc, int err)
@@ -3364,7 +3430,7 @@ static int tcp_reply_data(tcp_descriptor* desc, char* buf, int len)
int code;
const char* body = buf;
int bodylen = len;
-
+
packet_get_body(desc->inet.htype, &body, &bodylen);
if (desc->inet.deliver == INET_DELIVER_PORT) {
@@ -3382,8 +3448,7 @@ static int tcp_reply_data(tcp_descriptor* desc, char* buf, int len)
if (code < 0)
return code;
- if (desc->inet.active == INET_ONCE)
- desc->inet.active = INET_PASSIVE;
+ INET_CHECK_ACTIVE_TO_PASSIVE(INETP(desc));
return code;
}
@@ -3410,8 +3475,7 @@ tcp_reply_binary_data(tcp_descriptor* desc, ErlDrvBinary* bin, int offs, int len
}
if (code < 0)
return code;
- if (desc->inet.active == INET_ONCE)
- desc->inet.active = INET_PASSIVE;
+ INET_CHECK_ACTIVE_TO_PASSIVE(INETP(desc));
return code;
}
@@ -3434,8 +3498,7 @@ packet_reply_binary_data(inet_descriptor* desc, unsigned int hsz,
code = packet_binary_message(desc, bin, offs, len, extra);
if (code < 0)
return code;
- if (desc->active == INET_ONCE)
- desc->active = INET_PASSIVE;
+ INET_CHECK_ACTIVE_TO_PASSIVE(desc);
return code;
}
}
@@ -3480,6 +3543,7 @@ sock_init(void) /* May be called multiple times. */
#ifdef HAVE_SCTP
static void inet_init_sctp(void) {
INIT_ATOM(sctp);
+ INIT_ATOM(sctp_passive);
INIT_ATOM(sctp_error);
INIT_ATOM(true);
INIT_ATOM(false);
@@ -3489,6 +3553,7 @@ static void inet_init_sctp(void) {
INIT_ATOM(binary);
INIT_ATOM(active);
INIT_ATOM(once);
+ INIT_ATOM(multi);
INIT_ATOM(buffer);
INIT_ATOM(linger);
INIT_ATOM(recbuf);
@@ -3498,6 +3563,7 @@ static void inet_init_sctp(void) {
INIT_ATOM(priority);
INIT_ATOM(tos);
INIT_ATOM(ipv6_v6only);
+ INIT_ATOM(netns);
/* Option names */
INIT_ATOM(sctp_rtoinfo);
@@ -3613,12 +3679,15 @@ static int inet_init()
INIT_ATOM(tcp);
INIT_ATOM(udp);
INIT_ATOM(error);
+ INIT_ATOM(einval);
INIT_ATOM(inet_async);
INIT_ATOM(inet_reply);
INIT_ATOM(timeout);
INIT_ATOM(closed);
+ INIT_ATOM(tcp_passive);
INIT_ATOM(tcp_closed);
INIT_ATOM(tcp_error);
+ INIT_ATOM(udp_passive);
INIT_ATOM(udp_error);
INIT_ATOM(empty_out_q);
INIT_ATOM(ssl_tls);
@@ -3908,12 +3977,81 @@ static int erl_inet_close(inet_descriptor* desc)
static ErlDrvSSizeT inet_ctl_open(inet_descriptor* desc, int domain, int type,
char** rbuf, ErlDrvSizeT rsize)
{
+ int save_errno;
+#ifdef HAVE_SETNS
+ int current_ns, new_ns;
+ current_ns = new_ns = 0;
+#endif
+ save_errno = 0;
+
if (desc->state != INET_STATE_CLOSED)
return ctl_xerror(EXBADSEQ, rbuf, rsize);
+
+#ifdef HAVE_SETNS
+ if (desc->netns != NULL) {
+ /* Temporarily change network namespace for this thread
+ * while creating the socket
+ */
+ current_ns = open("/proc/self/ns/net", O_RDONLY);
+ if (current_ns == INVALID_SOCKET)
+ return ctl_error(sock_errno(), rbuf, rsize);
+ new_ns = open(desc->netns, O_RDONLY);
+ if (new_ns == INVALID_SOCKET) {
+ save_errno = sock_errno();
+ while (close(current_ns) == INVALID_SOCKET &&
+ sock_errno() == EINTR);
+ return ctl_error(save_errno, rbuf, rsize);
+ }
+ if (setns(new_ns, CLONE_NEWNET) != 0) {
+ save_errno = sock_errno();
+ while (close(new_ns) == INVALID_SOCKET &&
+ sock_errno() == EINTR);
+ while (close(current_ns) == INVALID_SOCKET &&
+ sock_errno() == EINTR);
+ return ctl_error(save_errno, rbuf, rsize);
+ }
+ else {
+ while (close(new_ns) == INVALID_SOCKET &&
+ sock_errno() == EINTR);
+ }
+ }
+#endif
if ((desc->s = sock_open(domain, type, desc->sprotocol)) == INVALID_SOCKET)
- return ctl_error(sock_errno(), rbuf, rsize);
- if ((desc->event = sock_create_event(desc)) == INVALID_EVENT)
- return ctl_error(sock_errno(), rbuf, rsize);
+ save_errno = sock_errno();
+#ifdef HAVE_SETNS
+ if (desc->netns != NULL) {
+ /* Restore network namespace */
+ if (setns(current_ns, CLONE_NEWNET) != 0) {
+ /* XXX Failed to restore network namespace.
+ * What to do? Tidy up and return an error...
+ * Note that the thread now might still be in the namespace.
+ * Can this even happen? Should the emulator be aborted?
+ */
+ if (desc->s != INVALID_SOCKET)
+ save_errno = sock_errno();
+ while (close(desc->s) == INVALID_SOCKET &&
+ sock_errno() == EINTR);
+ desc->s = INVALID_SOCKET;
+ while (close(current_ns) == INVALID_SOCKET &&
+ sock_errno() == EINTR);
+ return ctl_error(save_errno, rbuf, rsize);
+ }
+ else {
+ while (close(current_ns) == INVALID_SOCKET &&
+ sock_errno() == EINTR);
+ }
+ }
+#endif
+ if (desc->s == INVALID_SOCKET)
+ return ctl_error(save_errno, rbuf, rsize);
+
+ if ((desc->event = sock_create_event(desc)) == INVALID_EVENT) {
+ save_errno = sock_errno();
+ while (close(desc->s) == INVALID_SOCKET &&
+ sock_errno() == EINTR);
+ desc->s = INVALID_SOCKET;
+ return ctl_error(save_errno, rbuf, rsize);
+ }
SET_NONBLOCKING(desc->s);
#ifdef __WIN32__
driver_select(desc->port, desc->event, ERL_DRV_READ, 1);
@@ -4349,7 +4487,7 @@ static ErlDrvSSizeT inet_ctl_getiflist(inet_descriptor* desc,
case AF_INET6:
#endif
case AF_INET:
- ASSERT(sp+IFNAMSIZ+1 < sbuf+ifc.ifc_len+1)
+ ASSERT(sp+IFNAMSIZ+1 < sbuf+ifc.ifc_len+1);
strncpy(sp, ifrp->ifr_name, IFNAMSIZ);
sp[IFNAMSIZ] = '\0';
sp += strlen(sp), ++sp;
@@ -5424,8 +5562,25 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len)
case INET_LOPT_ACTIVE:
DEBUGF(("inet_set_opts(%ld): s=%d, ACTIVE=%d\r\n",
- (long)desc->port, desc->s,ival));
+ (long)desc->port, desc->s, ival));
desc->active = ival;
+ if (desc->active == INET_MULTI) {
+ long ac = desc->active_count;
+ Sint16 nval = get_int16(ptr);
+ ptr += 2;
+ len -= 2;
+ ac += nval;
+ if (ac > INT16_MAX || ac < INT16_MIN)
+ return -1;
+ desc->active_count += nval;
+ if (desc->active_count < 0)
+ desc->active_count = 0;
+ if (desc->active_count == 0) {
+ desc->active = INET_PASSIVE;
+ packet_passive_message(desc);
+ }
+ } else
+ desc->active_count = 0;
if ((desc->stype == SOCK_STREAM) && (desc->active != INET_PASSIVE) &&
(desc->state == INET_STATE_CLOSED)) {
tcp_closed_message((tcp_descriptor *) desc);
@@ -5529,6 +5684,20 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len)
}
continue;
+#ifdef HAVE_SETNS
+ case INET_LOPT_NETNS:
+ /* It is annoying that ival and len are both (signed) int */
+ if (ival < 0) return -1;
+ if (len < ival) return -1;
+ if (desc->netns != NULL) FREE(desc->netns);
+ desc->netns = ALLOC(((unsigned int) ival) + 1);
+ memcpy(desc->netns, ptr, ival);
+ desc->netns[ival] = '\0';
+ ptr += ival;
+ len -= ival;
+ continue;
+#endif
+
case INET_OPT_REUSEADDR:
#ifdef __WIN32__
continue; /* Bjorn says */
@@ -5722,7 +5891,8 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len)
/* XXX fprintf(stderr,"desc->htype == %d, old_htype == %d,
desc->active == %d, old_active == %d\r\n",(int)desc->htype,
(int) old_htype, (int) desc->active, (int) old_active );*/
- return 1+(desc->htype == old_htype && desc->active == INET_ONCE);
+ return 1+(desc->htype == old_htype &&
+ (desc->active == INET_ONCE || desc->active == INET_MULTI));
}
return 0;
}
@@ -5855,9 +6025,39 @@ static int sctp_set_opts(inet_descriptor* desc, char* ptr, int len)
case INET_LOPT_ACTIVE:
desc->active = get_int32(curr); curr += 4;
+ if (desc->active == INET_MULTI) {
+ long ac = desc->active_count;
+ Sint16 nval = get_int16(curr); curr += 2;
+ ac += nval;
+ if (ac > INT16_MAX || ac < INT16_MIN)
+ return -1;
+ desc->active_count += nval;
+ if (desc->active_count < 0)
+ desc->active_count = 0;
+ if (desc->active_count == 0) {
+ desc->active = INET_PASSIVE;
+ packet_passive_message(desc);
+ }
+ } else
+ desc->active_count = 0;
res = 0;
continue;
+#ifdef HAVE_SETNS
+ case INET_LOPT_NETNS:
+ {
+ size_t ns_len;
+ ns_len = get_int32(curr); curr += 4;
+ CHKLEN(curr, ns_len);
+ if (desc->netns != NULL) FREE(desc->netns);
+ desc->netns = ALLOC(ns_len + 1);
+ memcpy(desc->netns, curr, ns_len);
+ desc->netns[ns_len] = '\0';
+ curr += ns_len;
+ }
+ continue;
+#endif
+
/* SCTP options and applicable generic INET options: */
case SCTP_OPT_RTOINFO:
@@ -6362,6 +6562,11 @@ static ErlDrvSSizeT inet_fill_opts(inet_descriptor* desc,
case INET_LOPT_ACTIVE:
*ptr++ = opt;
put_int32(desc->active, ptr);
+ if (desc->active == INET_MULTI) {
+ PLACE_FOR(2,ptr);
+ put_int16(desc->active_count, ptr);
+ ptr += 2;
+ }
continue;
case INET_LOPT_PACKET:
*ptr++ = opt;
@@ -6454,6 +6659,22 @@ static ErlDrvSSizeT inet_fill_opts(inet_descriptor* desc,
}
continue;
+#ifdef HAVE_SETNS
+ case INET_LOPT_NETNS:
+ if (desc->netns != NULL) {
+ size_t netns_len;
+ netns_len = strlen(desc->netns);
+ *ptr++ = opt;
+ put_int32(netns_len, ptr);
+ PLACE_FOR(netns_len, ptr);
+ memcpy(ptr, desc->netns, netns_len);
+ ptr += netns_len;
+ } else {
+ TRUNCATE_TO(0,ptr);
+ }
+ continue;
+#endif
+
case INET_OPT_PRIORITY:
#ifdef SO_PRIORITY
type = SO_PRIORITY;
@@ -6718,7 +6939,10 @@ static ErlDrvSSizeT sctp_fill_opts(inet_descriptor* desc,
}
case INET_LOPT_ACTIVE:
{
- PLACE_FOR(spec, i, 2*LOAD_ATOM_CNT + LOAD_TUPLE_CNT);
+ if (desc->active == INET_MULTI)
+ PLACE_FOR(spec, i, LOAD_ATOM_CNT + LOAD_INT_CNT + LOAD_TUPLE_CNT);
+ else
+ PLACE_FOR(spec, i, 2*LOAD_ATOM_CNT + LOAD_TUPLE_CNT);
i = LOAD_ATOM (spec, i, am_active);
switch (desc->active)
{
@@ -6731,12 +6955,31 @@ static ErlDrvSSizeT sctp_fill_opts(inet_descriptor* desc,
case INET_ONCE :
{ i = LOAD_ATOM (spec, i, am_once); break; }
+ case INET_MULTI :
+ { i = LOAD_INT(spec, i, desc->active_count); break; }
+
default: ASSERT (0);
}
i = LOAD_TUPLE (spec, i, 2);
break;
}
+#ifdef HAVE_SETNS
+ case INET_LOPT_NETNS:
+ if (desc->netns != NULL) {
+ PLACE_FOR
+ (spec, i,
+ LOAD_ATOM_CNT + LOAD_BUF2BINARY_CNT + LOAD_TUPLE_CNT);
+ i = LOAD_ATOM (spec, i, am_netns);
+ i = LOAD_BUF2BINARY
+ (spec, i, desc->netns, strlen(desc->netns));
+ i = LOAD_TUPLE (spec, i, 2);
+ break;
+ }
+ else
+ continue; /* Ignore */
+#endif
+
/* SCTP and generic INET options: */
case SCTP_OPT_RTOINFO:
@@ -7458,6 +7701,10 @@ static ErlDrvSSizeT inet_subscribe(inet_descriptor* desc,
static void inet_stop(inet_descriptor* desc)
{
erl_inet_close(desc);
+#ifdef HAVE_SETNS
+ if (desc->netns != NULL)
+ FREE(desc->netns);
+#endif
FREE(desc);
}
@@ -7507,6 +7754,7 @@ static ErlDrvData inet_start(ErlDrvPort port, int size, int protocol)
socket */
desc->deliver = INET_DELIVER_TERM; /* standard term format */
desc->active = INET_PASSIVE; /* start passive */
+ desc->active_count = 0;
desc->oph = NULL;
desc->opt = NULL;
@@ -7537,6 +7785,10 @@ static ErlDrvData inet_start(ErlDrvPort port, int size, int protocol)
desc->is_ignored = 0;
+#ifdef HAVE_SETNS
+ desc->netns = NULL;
+#endif
+
return (ErlDrvData)desc;
}
diff --git a/erts/emulator/drivers/unix/ttsl_drv.c b/erts/emulator/drivers/unix/ttsl_drv.c
index 1e436830e7..491e0a090e 100644
--- a/erts/emulator/drivers/unix/ttsl_drv.c
+++ b/erts/emulator/drivers/unix/ttsl_drv.c
@@ -745,7 +745,7 @@ static Sint16 get_sint16(char *s)
{
return ((*s << 8) | ((byte*)s)[1]);
}
-
+
static int start_lbuf(void)
{
if (!lbuf && !(lbuf = ( Uint32*) driver_alloc(lbuf_size * sizeof(Uint32))))
@@ -1091,7 +1091,7 @@ static int move_cursor(int from, int to)
move_left(-dc);
return TRUE;
}
-
+
static int start_termcap(void)
{
int eres;
@@ -1187,7 +1187,7 @@ static int move_down(int n)
tputs(down, 1, outc);
return TRUE;
}
-
+
/*
* Updates cols if terminal has resized (SIGWINCH). Should be called
@@ -1209,7 +1209,7 @@ static void update_cols(void)
cols = width;
}
}
-
+
/*
* Put a terminal device into non-canonical mode with ECHO off.
diff --git a/erts/emulator/drivers/win32/win_efile.c b/erts/emulator/drivers/win32/win_efile.c
index b36a103f8e..319065f57b 100644
--- a/erts/emulator/drivers/win32/win_efile.c
+++ b/erts/emulator/drivers/win32/win_efile.c
@@ -1216,7 +1216,7 @@ int flags;
return 1;
}
-
+
/*
* is_root_unc_name - returns TRUE if the argument is a UNC name specifying
* a root share. That is, if it is of the form \\server\share\.
diff --git a/erts/emulator/sys/common/erl_mmap.c b/erts/emulator/sys/common/erl_mmap.c
new file mode 100644
index 0000000000..a9da7430fb
--- /dev/null
+++ b/erts/emulator/sys/common/erl_mmap.c
@@ -0,0 +1,2832 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2002-2013. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_process.h"
+#include "erl_smp.h"
+#include "atom.h"
+#include "erl_mmap.h"
+#include <stddef.h>
+
+/* #define ERTS_MMAP_OP_RINGBUF_SZ 100 */
+
+#if defined(DEBUG) || 0
+# undef ERTS_MMAP_DEBUG
+# define ERTS_MMAP_DEBUG
+# ifndef ERTS_MMAP_OP_RINGBUF_SZ
+# define ERTS_MMAP_OP_RINGBUF_SZ 100
+# endif
+#endif
+
+#ifndef ERTS_MMAP_OP_RINGBUF_SZ
+# define ERTS_MMAP_OP_RINGBUF_SZ 0
+#endif
+
+/* #define ERTS_MMAP_DEBUG_FILL_AREAS */
+
+#ifdef ERTS_MMAP_DEBUG
+# define ERTS_MMAP_ASSERT ERTS_ASSERT
+#else
+# define ERTS_MMAP_ASSERT(A) ((void) 1)
+#endif
+
+/*
+ * `mmap_state.sa.bot` and `mmap_state.sua.top` are read only after
+ * initialization, but the other pointers are not; i.e., only
+ * ERTS_MMAP_IN_SUPERCARRIER() is allowed without the mutex held.
+ */
+#define ERTS_MMAP_IN_SUPERCARRIER(PTR) \
+ (((UWord) (PTR)) - ((UWord) mmap_state.sa.bot) \
+ < ((UWord) mmap_state.sua.top) - ((UWord) mmap_state.sa.bot))
+#define ERTS_MMAP_IN_SUPERALIGNED_AREA(PTR) \
+ (ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&mmap_state.mtx)), \
+ (((UWord) (PTR)) - ((UWord) mmap_state.sa.bot) \
+ < ((UWord) mmap_state.sa.top) - ((UWord) mmap_state.sa.bot)))
+#define ERTS_MMAP_IN_SUPERUNALIGNED_AREA(PTR) \
+ (ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&mmap_state.mtx)), \
+ (((UWord) (PTR)) - ((UWord) mmap_state.sua.bot) \
+ < ((UWord) mmap_state.sua.top) - ((UWord) mmap_state.sua.bot)))
+
+int erts_have_erts_mmap;
+UWord erts_page_inv_mask;
+
+#if defined(DEBUG) || defined(ERTS_MMAP_DEBUG)
+# undef RBT_DEBUG
+# define RBT_DEBUG
+#endif
+#ifdef RBT_DEBUG
+# define RBT_ASSERT ERTS_ASSERT
+# define IF_RBT_DEBUG(C) C
+#else
+# define RBT_ASSERT(x)
+# define IF_RBT_DEBUG(C)
+#endif
+
+typedef struct RBTNode_ RBTNode;
+struct RBTNode_ {
+ UWord parent_and_color; /* color in bit 0 of parent ptr */
+ RBTNode *left;
+ RBTNode *right;
+};
+
+#define RED_FLG (1)
+#define IS_RED(N) ((N) && ((N)->parent_and_color & RED_FLG))
+#define IS_BLACK(N) (!IS_RED(N))
+#define SET_RED(N) ((N)->parent_and_color |= RED_FLG)
+#define SET_BLACK(N) ((N)->parent_and_color &= ~RED_FLG)
+
+static ERTS_INLINE RBTNode* parent(RBTNode* node)
+{
+ return (RBTNode*) (node->parent_and_color & ~RED_FLG);
+}
+
+static ERTS_INLINE void set_parent(RBTNode* node, RBTNode* parent)
+{
+ RBT_ASSERT(!((UWord)parent & RED_FLG));
+ node->parent_and_color = ((UWord)parent) | (node->parent_and_color & RED_FLG);
+}
+
+static ERTS_INLINE UWord parent_and_color(RBTNode* parent, int color)
+{
+ RBT_ASSERT(!((UWord)parent & RED_FLG));
+ RBT_ASSERT(!(color & ~RED_FLG));
+ return ((UWord)parent) | color;
+}
+
+
+enum SortOrder {
+ ADDR_ORDER, /* only address order */
+ SA_SZ_ADDR_ORDER, /* first super-aligned size then address order */
+ SZ_REVERSE_ADDR_ORDER /* first size then reverse address order */
+};
+#ifdef HARD_DEBUG
+static const char* sort_order_names[] = {"Address","SuperAlignedSize-Address","Size-RevAddress"};
+#endif
+
+typedef struct {
+ RBTNode* root;
+ enum SortOrder order;
+}RBTree;
+
+#ifdef HARD_DEBUG
+# define HARD_CHECK_IS_MEMBER(ROOT,NODE) rbt_assert_is_member(ROOT,NODE)
+# define HARD_CHECK_TREE(TREE,SZ) check_tree(TREE, SZ)
+static int rbt_assert_is_member(RBTNode* root, RBTNode* node);
+static RBTNode* check_tree(RBTree* tree, Uint);
+#else
+# define HARD_CHECK_IS_MEMBER(ROOT,NODE)
+# define HARD_CHECK_TREE(TREE,SZ)
+#endif
+
+#if ERTS_MMAP_OP_RINGBUF_SZ
+
+static int mmap_op_ix;
+
+typedef enum {
+ ERTS_OP_TYPE_NONE,
+ ERTS_OP_TYPE_MMAP,
+ ERTS_OP_TYPE_MUNMAP,
+ ERTS_OP_TYPE_MREMAP
+} ErtsMMapOpType;
+
+typedef struct {
+ ErtsMMapOpType type;
+ void *result;
+ UWord in_size;
+ UWord out_size;
+ void *old_ptr;
+ UWord old_size;
+} ErtsMMapOp;
+
+static ErtsMMapOp mmap_ops[ERTS_MMAP_OP_RINGBUF_SZ];
+
+#define ERTS_MMAP_OP_RINGBUF_INIT() \
+ do { \
+ int ix__; \
+ for (ix__ = 0; ix__ < ERTS_MMAP_OP_RINGBUF_SZ; ix__++) {\
+ mmap_ops[ix__].type = ERTS_OP_TYPE_NONE; \
+ mmap_ops[ix__].result = NULL; \
+ mmap_ops[ix__].in_size = 0; \
+ mmap_ops[ix__].out_size = 0; \
+ mmap_ops[ix__].old_ptr = NULL; \
+ mmap_ops[ix__].old_size = 0; \
+ } \
+ mmap_op_ix = ERTS_MMAP_OP_RINGBUF_SZ-1; \
+ } while (0)
+
+#define ERTS_MMAP_OP_START(SZ) \
+ do { \
+ int ix__; \
+ if (++mmap_op_ix >= ERTS_MMAP_OP_RINGBUF_SZ) \
+ mmap_op_ix = 0; \
+ ix__ = mmap_op_ix; \
+ mmap_ops[ix__].type = ERTS_OP_TYPE_MMAP; \
+ mmap_ops[ix__].result = NULL; \
+ mmap_ops[ix__].in_size = (SZ); \
+ mmap_ops[ix__].out_size = 0; \
+ mmap_ops[ix__].old_ptr = NULL; \
+ mmap_ops[ix__].old_size = 0; \
+ } while (0)
+
+#define ERTS_MMAP_OP_END(PTR, SZ) \
+ do { \
+ int ix__ = mmap_op_ix; \
+ mmap_ops[ix__].result = (PTR); \
+ mmap_ops[ix__].out_size = (SZ); \
+ } while (0)
+
+#define ERTS_MMAP_OP_LCK(RES, IN_SZ, OUT_SZ) \
+ do { \
+ erts_smp_mtx_lock(&mmap_state.mtx); \
+ ERTS_MMAP_OP_START((IN_SZ)); \
+ ERTS_MMAP_OP_END((RES), (OUT_SZ)); \
+ erts_smp_mtx_unlock(&mmap_state.mtx); \
+ } while (0)
+
+#define ERTS_MUNMAP_OP(PTR, SZ) \
+ do { \
+ int ix__; \
+ if (++mmap_op_ix >= ERTS_MMAP_OP_RINGBUF_SZ) \
+ mmap_op_ix = 0; \
+ ix__ = mmap_op_ix; \
+ mmap_ops[ix__].type = ERTS_OP_TYPE_MUNMAP; \
+ mmap_ops[ix__].result = NULL; \
+ mmap_ops[ix__].in_size = 0; \
+ mmap_ops[ix__].out_size = 0; \
+ mmap_ops[ix__].old_ptr = (PTR); \
+ mmap_ops[ix__].old_size = (SZ); \
+ } while (0)
+
+#define ERTS_MUNMAP_OP_LCK(PTR, SZ) \
+ do { \
+ erts_smp_mtx_lock(&mmap_state.mtx); \
+ ERTS_MUNMAP_OP((PTR), (SZ)); \
+ erts_smp_mtx_unlock(&mmap_state.mtx); \
+ } while (0)
+
+#define ERTS_MREMAP_OP_START(OLD_PTR, OLD_SZ, IN_SZ) \
+ do { \
+ int ix__; \
+ if (++mmap_op_ix >= ERTS_MMAP_OP_RINGBUF_SZ) \
+ mmap_op_ix = 0; \
+ ix__ = mmap_op_ix; \
+ mmap_ops[ix__].type = ERTS_OP_TYPE_MREMAP; \
+ mmap_ops[ix__].result = NULL; \
+ mmap_ops[ix__].in_size = (IN_SZ); \
+ mmap_ops[ix__].out_size = (OLD_SZ); \
+ mmap_ops[ix__].old_ptr = (OLD_PTR); \
+ mmap_ops[ix__].old_size = (OLD_SZ); \
+ } while (0)
+
+#define ERTS_MREMAP_OP_END(PTR, SZ) \
+ do { \
+ int ix__ = mmap_op_ix; \
+ mmap_ops[ix__].result = (PTR); \
+ mmap_ops[mmap_op_ix].out_size = (SZ); \
+ } while (0)
+
+#define ERTS_MREMAP_OP_LCK(RES, OLD_PTR, OLD_SZ, IN_SZ, OUT_SZ) \
+ do { \
+ erts_smp_mtx_lock(&mmap_state.mtx); \
+ ERTS_MREMAP_OP_START((OLD_PTR), (OLD_SZ), (IN_SZ)); \
+ ERTS_MREMAP_OP_END((RES), (OUT_SZ)); \
+ erts_smp_mtx_unlock(&mmap_state.mtx); \
+ } while (0)
+
+#define ERTS_MMAP_OP_ABORT() \
+ do { \
+ int ix__ = mmap_op_ix; \
+ mmap_ops[ix__].type = ERTS_OP_TYPE_NONE; \
+ mmap_ops[ix__].result = NULL; \
+ mmap_ops[ix__].in_size = 0; \
+ mmap_ops[ix__].out_size = 0; \
+ mmap_ops[ix__].old_ptr = NULL; \
+ mmap_ops[ix__].old_size = 0; \
+ if (--mmap_op_ix < 0) \
+ mmap_op_ix = ERTS_MMAP_OP_RINGBUF_SZ-1; \
+ } while (0)
+
+#else
+
+#define ERTS_MMAP_OP_RINGBUF_INIT()
+#define ERTS_MMAP_OP_START(SZ)
+#define ERTS_MMAP_OP_END(PTR, SZ)
+#define ERTS_MMAP_OP_LCK(RES, IN_SZ, OUT_SZ)
+#define ERTS_MUNMAP_OP(PTR, SZ)
+#define ERTS_MUNMAP_OP_LCK(PTR, SZ)
+#define ERTS_MREMAP_OP_START(OLD_PTR, OLD_SZ, IN_SZ)
+#define ERTS_MREMAP_OP_END(PTR, SZ)
+#define ERTS_MREMAP_OP_LCK(RES, OLD_PTR, OLD_SZ, IN_SZ, OUT_SZ)
+#define ERTS_MMAP_OP_ABORT()
+
+#endif
+
+typedef struct {
+ RBTNode snode; /* node in 'stree' */
+ RBTNode anode; /* node in 'atree' */
+ char* start;
+ char* end;
+}ErtsFreeSegDesc;
+
+typedef struct {
+ RBTree stree; /* size ordered tree */
+ RBTree atree; /* address ordered tree */
+ Uint nseg;
+}ErtsFreeSegMap;
+
+static struct {
+ int (*reserve_physical)(char *, UWord);
+ void (*unreserve_physical)(char *, UWord);
+ int supercarrier;
+ int no_os_mmap;
+ /*
+ * Super unaligend area is located above super aligned
+ * area. That is, `sa.bot` is beginning of the super
+ * carrier, `sua.top` is the end of the super carrier,
+ * and sa.top and sua.bot moves towards eachother.
+ */
+ struct {
+ char *top;
+ char *bot;
+ ErtsFreeSegMap map;
+ } sua;
+ struct {
+ char *top;
+ char *bot;
+ ErtsFreeSegMap map;
+ } sa;
+#if HAVE_MMAP && (!defined(MAP_ANON) && !defined(MAP_ANONYMOUS))
+ int mmap_fd;
+#endif
+ erts_smp_mtx_t mtx;
+ struct {
+ char *free_list;
+ char *unused_start;
+ char *unused_end;
+ char *new_area_hint;
+ Uint reserved;
+ } desc;
+ struct {
+ UWord free_seg_descs;
+ struct {
+ UWord curr;
+ UWord max;
+ } free_segs;
+ } no;
+ struct {
+ struct {
+ UWord total;
+ struct {
+ UWord total;
+ UWord sa;
+ UWord sua;
+ } used;
+ } supercarrier;
+ struct {
+ UWord used;
+ } os;
+ } size;
+} mmap_state;
+
+#define ERTS_MMAP_SIZE_SC_SA_INC(SZ) \
+ do { \
+ mmap_state.size.supercarrier.used.total += (SZ); \
+ mmap_state.size.supercarrier.used.sa += (SZ); \
+ ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.total \
+ <= mmap_state.size.supercarrier.total); \
+ ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.sa \
+ <= mmap_state.size.supercarrier.used.total); \
+ } while (0)
+#define ERTS_MMAP_SIZE_SC_SA_DEC(SZ) \
+ do { \
+ ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.total >= (SZ)); \
+ mmap_state.size.supercarrier.used.total -= (SZ); \
+ ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.sa >= (SZ)); \
+ mmap_state.size.supercarrier.used.sa -= (SZ); \
+ } while (0)
+#define ERTS_MMAP_SIZE_SC_SUA_INC(SZ) \
+ do { \
+ mmap_state.size.supercarrier.used.total += (SZ); \
+ mmap_state.size.supercarrier.used.sua += (SZ); \
+ ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.total \
+ <= mmap_state.size.supercarrier.total); \
+ ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.sua \
+ <= mmap_state.size.supercarrier.used.total); \
+ } while (0)
+#define ERTS_MMAP_SIZE_SC_SUA_DEC(SZ) \
+ do { \
+ ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.total >= (SZ)); \
+ mmap_state.size.supercarrier.used.total -= (SZ); \
+ ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.sua >= (SZ)); \
+ mmap_state.size.supercarrier.used.sua -= (SZ); \
+ } while (0)
+#define ERTS_MMAP_SIZE_OS_INC(SZ) \
+ do { \
+ ERTS_MMAP_ASSERT(mmap_state.size.os.used + (SZ) >= (SZ)); \
+ mmap_state.size.os.used += (SZ); \
+ } while (0)
+#define ERTS_MMAP_SIZE_OS_DEC(SZ) \
+ do { \
+ ERTS_MMAP_ASSERT(mmap_state.size.os.used >= (SZ)); \
+ mmap_state.size.os.used -= (SZ); \
+ } while (0)
+
+
+static void
+add_free_desc_area(char *start, char *end)
+{
+ ERTS_MMAP_ASSERT(end == (void *) 0 || end > start);
+ if (sizeof(ErtsFreeSegDesc) <= ((UWord) end) - ((UWord) start)) {
+ UWord no;
+ ErtsFreeSegDesc *prev_desc, *desc;
+ char *desc_end;
+
+ no = 1;
+ prev_desc = (ErtsFreeSegDesc *) start;
+ prev_desc->start = mmap_state.desc.free_list;
+ desc = (ErtsFreeSegDesc *) (start + sizeof(ErtsFreeSegDesc));
+ desc_end = start + 2*sizeof(ErtsFreeSegDesc);
+
+ while (desc_end <= end) {
+ desc->start = (char *) prev_desc;
+ prev_desc = desc;
+ desc = (ErtsFreeSegDesc *) desc_end;
+ desc_end += sizeof(ErtsFreeSegDesc);
+ no++;
+ }
+ mmap_state.desc.free_list = (char *) prev_desc;
+ mmap_state.no.free_seg_descs += no;
+ }
+}
+
+static ErtsFreeSegDesc *
+add_unused_free_desc_area(void)
+{
+ char *ptr;
+ if (!mmap_state.desc.unused_start)
+ return NULL;
+
+ ERTS_MMAP_ASSERT(mmap_state.desc.unused_end);
+ ERTS_MMAP_ASSERT(ERTS_PAGEALIGNED_SIZE
+ <= mmap_state.desc.unused_end - mmap_state.desc.unused_start);
+
+ ptr = mmap_state.desc.unused_start + ERTS_PAGEALIGNED_SIZE;
+ add_free_desc_area(mmap_state.desc.unused_start, ptr);
+
+ if ((mmap_state.desc.unused_end - ptr) >= ERTS_PAGEALIGNED_SIZE)
+ mmap_state.desc.unused_start = ptr;
+ else
+ mmap_state.desc.unused_end = mmap_state.desc.unused_start = NULL;
+
+ ERTS_MMAP_ASSERT(mmap_state.desc.free_list);
+ return (ErtsFreeSegDesc *) mmap_state.desc.free_list;
+}
+
+static ERTS_INLINE ErtsFreeSegDesc *
+alloc_desc(void)
+{
+ ErtsFreeSegDesc *res;
+ res = (ErtsFreeSegDesc *) mmap_state.desc.free_list;
+ if (!res) {
+ res = add_unused_free_desc_area();
+ if (!res)
+ return NULL;
+ }
+ mmap_state.desc.free_list = res->start;
+ ASSERT(mmap_state.no.free_segs.curr < mmap_state.no.free_seg_descs);
+ mmap_state.no.free_segs.curr++;
+ if (mmap_state.no.free_segs.max < mmap_state.no.free_segs.curr)
+ mmap_state.no.free_segs.max = mmap_state.no.free_segs.curr;
+ return res;
+}
+
+static ERTS_INLINE void
+free_desc(ErtsFreeSegDesc *desc)
+{
+ desc->start = mmap_state.desc.free_list;
+ mmap_state.desc.free_list = (char *) desc;
+ ERTS_MMAP_ASSERT(mmap_state.no.free_segs.curr > 0);
+ mmap_state.no.free_segs.curr--;
+}
+
+static ERTS_INLINE ErtsFreeSegDesc* anode_to_desc(RBTNode* anode)
+{
+ return (ErtsFreeSegDesc*) ((char*)anode - offsetof(ErtsFreeSegDesc, anode));
+}
+
+static ERTS_INLINE ErtsFreeSegDesc* snode_to_desc(RBTNode* snode)
+{
+ return (ErtsFreeSegDesc*) ((char*)snode - offsetof(ErtsFreeSegDesc, snode));
+}
+
+static ERTS_INLINE ErtsFreeSegDesc* node_to_desc(enum SortOrder order, RBTNode* node)
+{
+ return order==ADDR_ORDER ? anode_to_desc(node) : snode_to_desc(node);
+}
+
+static ERTS_INLINE SWord usable_size(enum SortOrder order,
+ ErtsFreeSegDesc* desc)
+{
+ return ((order == SA_SZ_ADDR_ORDER) ?
+ ERTS_SUPERALIGNED_FLOOR(desc->end) - ERTS_SUPERALIGNED_CEILING(desc->start)
+ : desc->end - desc->start);
+}
+
+#ifdef HARD_DEBUG
+static ERTS_INLINE SWord cmp_nodes(enum SortOrder order,
+ RBTNode* lhs, RBTNode* rhs)
+{
+ ErtsFreeSegDesc* ldesc = node_to_desc(order, lhs);
+ ErtsFreeSegDesc* rdesc = node_to_desc(order, rhs);
+ RBT_ASSERT(lhs != rhs);
+ if (order != ADDR_ORDER) {
+ SWord diff = usable_size(order, ldesc) - usable_size(order, rdesc);
+ if (diff) return diff;
+ }
+ if (order != SZ_REVERSE_ADDR_ORDER) {
+ return (char*)ldesc->start - (char*)rdesc->start;
+ }
+ else {
+ return (char*)rdesc->start - (char*)ldesc->start;
+ }
+}
+#endif /* HARD_DEBUG */
+
+static ERTS_INLINE SWord cmp_with_node(enum SortOrder order,
+ SWord sz, char* addr, RBTNode* rhs)
+{
+ ErtsFreeSegDesc* rdesc;
+ if (order != ADDR_ORDER) {
+ SWord diff;
+ rdesc = snode_to_desc(rhs);
+ diff = sz - usable_size(order, rdesc);
+ if (diff) return diff;
+ }
+ else
+ rdesc = anode_to_desc(rhs);
+
+ if (order != SZ_REVERSE_ADDR_ORDER)
+ return addr - (char*)rdesc->start;
+ else
+ return (char*)rdesc->start - addr;
+}
+
+
+static ERTS_INLINE void
+left_rotate(RBTNode **root, RBTNode *x)
+{
+ RBTNode *y = x->right;
+ x->right = y->left;
+ if (y->left)
+ set_parent(y->left, x);
+ set_parent(y, parent(x));
+ if (!parent(y)) {
+ RBT_ASSERT(*root == x);
+ *root = y;
+ }
+ else if (x == parent(x)->left)
+ parent(x)->left = y;
+ else {
+ RBT_ASSERT(x == parent(x)->right);
+ parent(x)->right = y;
+ }
+ y->left = x;
+ set_parent(x, y);
+}
+
+static ERTS_INLINE void
+right_rotate(RBTNode **root, RBTNode *x)
+{
+ RBTNode *y = x->left;
+ x->left = y->right;
+ if (y->right)
+ set_parent(y->right, x);
+ set_parent(y, parent(x));
+ if (!parent(y)) {
+ RBT_ASSERT(*root == x);
+ *root = y;
+ }
+ else if (x == parent(x)->right)
+ parent(x)->right = y;
+ else {
+ RBT_ASSERT(x == parent(x)->left);
+ parent(x)->left = y;
+ }
+ y->right = x;
+ set_parent(x, y);
+}
+
+/*
+ * Replace node x with node y
+ * NOTE: segment descriptor of y is not changed
+ */
+static ERTS_INLINE void
+replace(RBTNode **root, RBTNode *x, RBTNode *y)
+{
+
+ if (!parent(x)) {
+ RBT_ASSERT(*root == x);
+ *root = y;
+ }
+ else if (x == parent(x)->left)
+ parent(x)->left = y;
+ else {
+ RBT_ASSERT(x == parent(x)->right);
+ parent(x)->right = y;
+ }
+ if (x->left) {
+ RBT_ASSERT(parent(x->left) == x);
+ set_parent(x->left, y);
+ }
+ if (x->right) {
+ RBT_ASSERT(parent(x->right) == x);
+ set_parent(x->right, y);
+ }
+
+ y->parent_and_color = x->parent_and_color;
+ y->right = x->right;
+ y->left = x->left;
+}
+
+static void
+tree_insert_fixup(RBTNode** root, RBTNode *node)
+{
+ RBTNode *x = node, *y, *papa_x, *granpa_x;
+
+ /*
+ * Rearrange the tree so that it satisfies the Red-Black Tree properties
+ */
+
+ papa_x = parent(x);
+ RBT_ASSERT(x != *root && IS_RED(papa_x));
+ do {
+
+ /*
+ * x and its parent are both red. Move the red pair up the tree
+ * until we get to the root or until we can separate them.
+ */
+
+ granpa_x = parent(papa_x);
+ RBT_ASSERT(IS_RED(x));
+ RBT_ASSERT(IS_BLACK(granpa_x));
+ RBT_ASSERT(granpa_x);
+
+ if (papa_x == granpa_x->left) {
+ y = granpa_x->right;
+ if (IS_RED(y)) {
+ SET_BLACK(y);
+ SET_BLACK(papa_x);
+ SET_RED(granpa_x);
+ x = granpa_x;
+ }
+ else {
+
+ if (x == papa_x->right) {
+ left_rotate(root, papa_x);
+ papa_x = x;
+ x = papa_x->left;
+ }
+
+ RBT_ASSERT(x == granpa_x->left->left);
+ RBT_ASSERT(IS_RED(x));
+ RBT_ASSERT(IS_RED(papa_x));
+ RBT_ASSERT(IS_BLACK(granpa_x));
+ RBT_ASSERT(IS_BLACK(y));
+
+ SET_BLACK(papa_x);
+ SET_RED(granpa_x);
+ right_rotate(root, granpa_x);
+
+ RBT_ASSERT(x == parent(x)->left);
+ RBT_ASSERT(IS_RED(x));
+ RBT_ASSERT(IS_RED(parent(x)->right));
+ RBT_ASSERT(IS_BLACK(parent(x)));
+ break;
+ }
+ }
+ else {
+ RBT_ASSERT(papa_x == granpa_x->right);
+ y = granpa_x->left;
+ if (IS_RED(y)) {
+ SET_BLACK(y);
+ SET_BLACK(papa_x);
+ SET_RED(granpa_x);
+ x = granpa_x;
+ }
+ else {
+
+ if (x == papa_x->left) {
+ right_rotate(root, papa_x);
+ papa_x = x;
+ x = papa_x->right;
+ }
+
+ RBT_ASSERT(x == granpa_x->right->right);
+ RBT_ASSERT(IS_RED(x));
+ RBT_ASSERT(IS_RED(papa_x));
+ RBT_ASSERT(IS_BLACK(granpa_x));
+ RBT_ASSERT(IS_BLACK(y));
+
+ SET_BLACK(papa_x);
+ SET_RED(granpa_x);
+ left_rotate(root, granpa_x);
+
+ RBT_ASSERT(x == parent(x)->right);
+ RBT_ASSERT(IS_RED(x));
+ RBT_ASSERT(IS_RED(parent(x)->left));
+ RBT_ASSERT(IS_BLACK(parent(x)));
+ break;
+ }
+ }
+ } while (x != *root && (papa_x=parent(x), IS_RED(papa_x)));
+
+ SET_BLACK(*root);
+}
+
+static void
+rbt_delete(RBTree* tree, RBTNode* del)
+{
+ Uint spliced_is_black;
+ RBTNode *x, *y, *z = del, *papa_y;
+ RBTNode null_x; /* null_x is used to get the fixup started when we
+ splice out a node without children. */
+
+ HARD_CHECK_IS_MEMBER(tree->root, del);
+ HARD_CHECK_TREE(tree, 0);
+
+ null_x.parent_and_color = parent_and_color(NULL, !RED_FLG);
+
+ /* Remove node from tree... */
+
+ /* Find node to splice out */
+ if (!z->left || !z->right)
+ y = z;
+ else
+ /* Set y to z:s successor */
+ for(y = z->right; y->left; y = y->left);
+ /* splice out y */
+ x = y->left ? y->left : y->right;
+ spliced_is_black = IS_BLACK(y);
+ papa_y = parent(y);
+ if (x) {
+ set_parent(x, papa_y);
+ }
+ else if (spliced_is_black) {
+ x = &null_x;
+ x->right = x->left = NULL;
+ x->parent_and_color = parent_and_color(papa_y, !RED_FLG);
+ y->left = x;
+ }
+
+ if (!papa_y) {
+ RBT_ASSERT(tree->root == y);
+ tree->root = x;
+ }
+ else {
+ if (y == papa_y->left) {
+ papa_y->left = x;
+ }
+ else {
+ RBT_ASSERT(y == papa_y->right);
+ papa_y->right = x;
+ }
+ }
+ if (y != z) {
+ /* We spliced out the successor of z; replace z by the successor */
+ RBT_ASSERT(z != &null_x);
+ replace(&tree->root, z, y);
+ }
+
+ if (spliced_is_black) {
+ RBTNode* papa_x;
+ /* We removed a black node which makes the resulting tree
+ violate the Red-Black Tree properties. Fixup tree... */
+
+ papa_x = parent(x);
+ while (IS_BLACK(x) && papa_x) {
+
+ /*
+ * x has an "extra black" which we move up the tree
+ * until we reach the root or until we can get rid of it.
+ *
+ * y is the sibbling of x
+ */
+
+ if (x == papa_x->left) {
+ y = papa_x->right;
+ RBT_ASSERT(y);
+ if (IS_RED(y)) {
+ RBT_ASSERT(y->right);
+ RBT_ASSERT(y->left);
+ SET_BLACK(y);
+ RBT_ASSERT(IS_BLACK(papa_x));
+ SET_RED(papa_x);
+ left_rotate(&tree->root, papa_x);
+ RBT_ASSERT(papa_x == parent(x));
+ y = papa_x->right;
+ }
+ RBT_ASSERT(y);
+ RBT_ASSERT(IS_BLACK(y));
+ if (IS_BLACK(y->left) && IS_BLACK(y->right)) {
+ SET_RED(y);
+ }
+ else {
+ if (IS_BLACK(y->right)) {
+ SET_BLACK(y->left);
+ SET_RED(y);
+ right_rotate(&tree->root, y);
+ RBT_ASSERT(papa_x == parent(x));
+ y = papa_x->right;
+ }
+ RBT_ASSERT(y);
+ if (IS_RED(papa_x)) {
+
+ SET_BLACK(papa_x);
+ SET_RED(y);
+ }
+ RBT_ASSERT(y->right);
+ SET_BLACK(y->right);
+ left_rotate(&tree->root, papa_x);
+ x = tree->root;
+ break;
+ }
+ }
+ else {
+ RBT_ASSERT(x == papa_x->right);
+ y = papa_x->left;
+ RBT_ASSERT(y);
+ if (IS_RED(y)) {
+ RBT_ASSERT(y->right);
+ RBT_ASSERT(y->left);
+ SET_BLACK(y);
+ RBT_ASSERT(IS_BLACK(papa_x));
+ SET_RED(papa_x);
+ right_rotate(&tree->root, papa_x);
+ RBT_ASSERT(papa_x == parent(x));
+ y = papa_x->left;
+ }
+ RBT_ASSERT(y);
+ RBT_ASSERT(IS_BLACK(y));
+ if (IS_BLACK(y->right) && IS_BLACK(y->left)) {
+ SET_RED(y);
+ }
+ else {
+ if (IS_BLACK(y->left)) {
+ SET_BLACK(y->right);
+ SET_RED(y);
+ left_rotate(&tree->root, y);
+ RBT_ASSERT(papa_x == parent(x));
+ y = papa_x->left;
+ }
+ RBT_ASSERT(y);
+ if (IS_RED(papa_x)) {
+ SET_BLACK(papa_x);
+ SET_RED(y);
+ }
+ RBT_ASSERT(y->left);
+ SET_BLACK(y->left);
+ right_rotate(&tree->root, papa_x);
+ x = tree->root;
+ break;
+ }
+ }
+ x = papa_x;
+ papa_x = parent(x);
+ }
+ SET_BLACK(x);
+
+ papa_x = parent(&null_x);
+ if (papa_x) {
+ if (papa_x->left == &null_x)
+ papa_x->left = NULL;
+ else {
+ RBT_ASSERT(papa_x->right == &null_x);
+ papa_x->right = NULL;
+ }
+ RBT_ASSERT(!null_x.left);
+ RBT_ASSERT(!null_x.right);
+ }
+ else if (tree->root == &null_x) {
+ tree->root = NULL;
+ RBT_ASSERT(!null_x.left);
+ RBT_ASSERT(!null_x.right);
+ }
+ }
+ HARD_CHECK_TREE(tree, 0);
+}
+
+
+static void
+rbt_insert(RBTree* tree, RBTNode* node)
+{
+#ifdef RBT_DEBUG
+ ErtsFreeSegDesc *dbg_under=NULL, *dbg_over=NULL;
+#endif
+ ErtsFreeSegDesc* desc = node_to_desc(tree->order, node);
+ char* seg_addr = desc->start;
+ SWord seg_sz = desc->end - desc->start;
+
+ HARD_CHECK_TREE(tree, 0);
+
+ node->left = NULL;
+ node->right = NULL;
+
+ if (!tree->root) {
+ node->parent_and_color = parent_and_color(NULL, !RED_FLG);
+ tree->root = node;
+ }
+ else {
+ RBTNode *x = tree->root;
+ while (1) {
+ SWord diff = cmp_with_node(tree->order, seg_sz, seg_addr, x);
+ if (diff < 0) {
+ IF_RBT_DEBUG(dbg_over = node_to_desc(tree->order, x));
+ if (!x->left) {
+ node->parent_and_color = parent_and_color(x, RED_FLG);
+ x->left = node;
+ break;
+ }
+ x = x->left;
+ }
+ else {
+ RBT_ASSERT(diff > 0);
+ IF_RBT_DEBUG(dbg_under = node_to_desc(tree->order, x));
+ if (!x->right) {
+ node->parent_and_color = parent_and_color(x, RED_FLG);
+ x->right = node;
+ break;
+ }
+ x = x->right;
+ }
+ }
+
+ RBT_ASSERT(parent(node));
+#ifdef RBT_DEBUG
+ if (tree->order == ADDR_ORDER) {
+ RBT_ASSERT(!dbg_under || dbg_under->end < desc->start);
+ RBT_ASSERT(!dbg_over || dbg_over->start > desc->end);
+ }
+#endif
+ RBT_ASSERT(IS_RED(node));
+ if (IS_RED(parent(node)))
+ tree_insert_fixup(&tree->root, node);
+ }
+ HARD_CHECK_TREE(tree, 0);
+}
+
+/*
+ * Traverse tree in (reverse) sorting order
+ */
+static void
+rbt_foreach_node(RBTree* tree,
+ void (*fn)(RBTNode*,void*),
+ void* arg, int reverse)
+{
+#ifdef HARD_DEBUG
+ Sint blacks = -1;
+ Sint curr_blacks = 1;
+ Uint depth = 1;
+ Uint max_depth = 0;
+ Uint node_cnt = 0;
+#endif
+ enum { RECURSE_LEFT, DO_NODE, RECURSE_RIGHT, RETURN_TO_PARENT }state;
+ RBTNode *x = tree->root;
+
+ RBT_ASSERT(!x || !parent(x));
+
+ state = reverse ? RECURSE_RIGHT : RECURSE_LEFT;
+ while (x) {
+ switch (state) {
+ case RECURSE_LEFT:
+ if (x->left) {
+ RBT_ASSERT(parent(x->left) == x);
+ #ifdef HARD_DEBUG
+ ++depth;
+ if (IS_BLACK(x->left))
+ curr_blacks++;
+ #endif
+ x = x->left;
+ state = reverse ? RECURSE_RIGHT : RECURSE_LEFT;
+ }
+ else {
+ #ifdef HARD_DEBUG
+ if (blacks < 0)
+ blacks = curr_blacks;
+ RBT_ASSERT(blacks == curr_blacks);
+ #endif
+ state = reverse ? RETURN_TO_PARENT : DO_NODE;
+ }
+ break;
+
+ case DO_NODE:
+ #ifdef HARD_DEBUG
+ ++node_cnt;
+ if (depth > max_depth)
+ max_depth = depth;
+ #endif
+ (*fn) (x, arg); /* Do it! */
+ state = reverse ? RECURSE_LEFT : RECURSE_RIGHT;
+ break;
+
+ case RECURSE_RIGHT:
+ if (x->right) {
+ RBT_ASSERT(parent(x->right) == x);
+ #ifdef HARD_DEBUG
+ ++depth;
+ if (IS_BLACK(x->right))
+ curr_blacks++;
+ #endif
+ x = x->right;
+ state = reverse ? RECURSE_RIGHT : RECURSE_LEFT;
+ }
+ else {
+ #ifdef HARD_DEBUG
+ if (blacks < 0)
+ blacks = curr_blacks;
+ RBT_ASSERT(blacks == curr_blacks);
+ #endif
+ state = reverse ? DO_NODE : RETURN_TO_PARENT;
+ }
+ break;
+
+ case RETURN_TO_PARENT:
+ #ifdef HARD_DEBUG
+ if (IS_BLACK(x))
+ curr_blacks--;
+ --depth;
+ #endif
+ if (parent(x)) {
+ if (x == parent(x)->left) {
+ state = reverse ? RETURN_TO_PARENT : DO_NODE;
+ }
+ else {
+ RBT_ASSERT(x == parent(x)->right);
+ state = reverse ? DO_NODE : RETURN_TO_PARENT;
+ }
+ }
+ x = parent(x);
+ break;
+ }
+ }
+#ifdef HARD_DEBUG
+ RBT_ASSERT(depth == 0 || (!tree->root && depth==1));
+ RBT_ASSERT(curr_blacks == 0);
+ RBT_ASSERT((1 << (max_depth/2)) <= node_cnt);
+#endif
+}
+
+#if defined(RBT_DEBUG) || defined(HARD_DEBUG_MSEG)
+static RBTNode* rbt_prev_node(RBTNode* node)
+{
+ RBTNode* x;
+ if (node->left) {
+ for (x=node->left; x->right; x=x->right)
+ ;
+ return x;
+ }
+ for (x=node; parent(x); x=parent(x)) {
+ if (parent(x)->right == x)
+ return parent(x);
+ }
+ return NULL;
+}
+static RBTNode* rbt_next_node(RBTNode* node)
+{
+ RBTNode* x;
+ if (node->right) {
+ for (x=node->right; x->left; x=x->left)
+ ;
+ return x;
+ }
+ for (x=node; parent(x); x=parent(x)) {
+ if (parent(x)->left == x)
+ return parent(x);
+ }
+ return NULL;
+}
+#endif /* RBT_DEBUG || HARD_DEBUG_MSEG */
+
+
+/* The API to keep track of a bunch of separated (free) segments
+ (non-overlapping and non-adjacent).
+ */
+static void init_free_seg_map(ErtsFreeSegMap*, enum SortOrder);
+static void adjacent_free_seg(ErtsFreeSegMap*, char* start, char* end,
+ ErtsFreeSegDesc** under, ErtsFreeSegDesc** over);
+static void insert_free_seg(ErtsFreeSegMap*, ErtsFreeSegDesc*, char* start, char* end);
+static void resize_free_seg(ErtsFreeSegMap*, ErtsFreeSegDesc*, char* start, char* end);
+static void delete_free_seg(ErtsFreeSegMap*, ErtsFreeSegDesc*);
+static ErtsFreeSegDesc* lookup_free_seg(ErtsFreeSegMap*, SWord sz);
+
+
+static void init_free_seg_map(ErtsFreeSegMap* map, enum SortOrder order)
+{
+ map->atree.root = NULL;
+ map->atree.order = ADDR_ORDER;
+ map->stree.root = NULL;
+ map->stree.order = order;
+ map->nseg = 0;
+}
+
+/* Lookup directly adjacent free segments to the given area [start->end].
+ * The given area must not contain any free segments.
+ */
+static void adjacent_free_seg(ErtsFreeSegMap* map, char* start, char* end,
+ ErtsFreeSegDesc** under, ErtsFreeSegDesc** over)
+{
+ RBTNode* x = map->atree.root;
+
+ *under = NULL;
+ *over = NULL;
+ while (x) {
+ if (start < anode_to_desc(x)->start) {
+ RBT_ASSERT(end <= anode_to_desc(x)->start);
+ if (end == anode_to_desc(x)->start) {
+ RBT_ASSERT(!*over);
+ *over = anode_to_desc(x);
+ }
+ x = x->left;
+ }
+ else {
+ RBT_ASSERT(start >= anode_to_desc(x)->end);
+ if (start == anode_to_desc(x)->end) {
+ RBT_ASSERT(!*under);
+ *under = anode_to_desc(x);
+ }
+ x = x->right;
+ }
+ }
+}
+
+/* Initialize 'desc' and insert as new free segment [start->end].
+ * The new segment must not contain or be adjacent to any free segment in 'map'.
+ */
+static void insert_free_seg(ErtsFreeSegMap* map, ErtsFreeSegDesc* desc,
+ char* start, char* end)
+{
+ desc->start = start;
+ desc->end = end;
+ rbt_insert(&map->atree, &desc->anode);
+ rbt_insert(&map->stree, &desc->snode);
+ map->nseg++;
+}
+
+/* Resize existing free segment 'desc' to [start->end].
+ * The new segment location must overlap the old location and
+ * it must not contain or be adjacent to any other free segment in 'map'.
+ */
+static void resize_free_seg(ErtsFreeSegMap* map, ErtsFreeSegDesc* desc,
+ char* start, char* end)
+{
+#ifdef RBT_DEBUG
+ RBTNode* prev = rbt_prev_node(&desc->anode);
+ RBTNode* next = rbt_next_node(&desc->anode);
+ RBT_ASSERT(!prev || anode_to_desc(prev)->end < start);
+ RBT_ASSERT(!next || anode_to_desc(next)->start > end);
+#endif
+ rbt_delete(&map->stree, &desc->snode);
+ desc->start = start;
+ desc->end = end;
+ rbt_insert(&map->stree, &desc->snode);
+}
+
+/* Delete existing free segment 'desc' from 'map'.
+ */
+static void delete_free_seg(ErtsFreeSegMap* map, ErtsFreeSegDesc* desc)
+{
+ rbt_delete(&map->atree, &desc->anode);
+ rbt_delete(&map->stree, &desc->snode);
+ map->nseg--;
+}
+
+/* Lookup a free segment in 'map' with a size of at least 'need_sz' usable bytes.
+ */
+static ErtsFreeSegDesc* lookup_free_seg(ErtsFreeSegMap* map, SWord need_sz)
+{
+ RBTNode* x = map->stree.root;
+ ErtsFreeSegDesc* best_desc = NULL;
+ const enum SortOrder order = map->stree.order;
+
+ while (x) {
+ ErtsFreeSegDesc* desc = snode_to_desc(x);
+ SWord seg_sz = usable_size(order, desc);
+
+ if (seg_sz < need_sz) {
+ x = x->right;
+ }
+ else {
+ best_desc = desc;
+ x = x->left;
+ }
+ }
+ return best_desc;
+}
+
+struct build_arg_t
+{
+ Process* p;
+ Eterm* hp;
+ Eterm acc;
+};
+
+static void build_free_seg_tuple(RBTNode* node, void* arg)
+{
+ struct build_arg_t* a = (struct build_arg_t*)arg;
+ ErtsFreeSegDesc* desc = anode_to_desc(node);
+ Eterm start= erts_bld_uword(&a->hp, NULL, (UWord)desc->start);
+ Eterm end = erts_bld_uword(&a->hp, NULL, (UWord)desc->end);
+ Eterm tpl = TUPLE2(a->hp, start, end);
+
+ a->hp += 3;
+ a->acc = CONS(a->hp, tpl, a->acc);
+ a->hp += 2;
+}
+
+static
+Eterm build_free_seg_list(Process* p, ErtsFreeSegMap* map)
+{
+ struct build_arg_t barg;
+ Eterm* hp_end;
+ const Uint may_need = map->nseg * (2 + 3 + 2*2); /* cons + tuple + bigs */
+
+ barg.p = p;
+ barg.hp = HAlloc(p, may_need);
+ hp_end = barg.hp + may_need;
+ barg.acc = NIL;
+ rbt_foreach_node(&map->atree, build_free_seg_tuple, &barg, 1);
+
+ RBT_ASSERT(barg.hp <= hp_end);
+ HRelease(p, hp_end, barg.hp);
+ return barg.acc;
+}
+
+#if ERTS_HAVE_OS_MMAP
+/* Implementation of os_mmap()/os_munmap()/os_mremap()... */
+
+#if HAVE_MMAP
+# define ERTS_MMAP_PROT (PROT_READ|PROT_WRITE)
+# if defined(MAP_ANONYMOUS)
+# define ERTS_MMAP_FLAGS (MAP_ANON|MAP_PRIVATE)
+# define ERTS_MMAP_FD (-1)
+# elif defined(MAP_ANON)
+# define ERTS_MMAP_FLAGS (MAP_ANON|MAP_PRIVATE)
+# define ERTS_MMAP_FD (-1)
+# else
+# define ERTS_MMAP_FLAGS (MAP_PRIVATE)
+# define ERTS_MMAP_FD mmap_state.mmap_fd
+# endif
+#endif
+
+static ERTS_INLINE void *
+os_mmap(void *hint_ptr, UWord size, int try_superalign)
+{
+#if HAVE_MMAP
+ void *res;
+#ifdef MAP_ALIGN
+ if (try_superalign)
+ res = mmap((void *) ERTS_SUPERALIGNED_SIZE, size, ERTS_MMAP_PROT,
+ ERTS_MMAP_FLAGS|MAP_ALIGN, ERTS_MMAP_FD, 0);
+ else
+#endif
+ res = mmap((void *) hint_ptr, size, ERTS_MMAP_PROT,
+ ERTS_MMAP_FLAGS, ERTS_MMAP_FD, 0);
+ if (res == MAP_FAILED)
+ return NULL;
+ return res;
+#elif HAVE_VIRTUALALLOC
+ return (void *) VirtualAlloc(NULL, (SIZE_T) size,
+ MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
+#else
+# error "missing mmap() or similar"
+#endif
+}
+
+static ERTS_INLINE void
+os_munmap(void *ptr, UWord size)
+{
+#if HAVE_MMAP
+#ifdef ERTS_MMAP_DEBUG
+ int res =
+#endif
+ munmap(ptr, size);
+ ERTS_MMAP_ASSERT(res == 0);
+#elif HAVE_VIRTUALALLOC
+#ifdef DEBUG
+ BOOL res =
+#endif
+ VirtualFree((LPVOID) ptr, (SIZE_T) 0, MEM_RELEASE);
+ ERTS_MMAP_ASSERT(res != 0);
+#else
+# error "missing munmap() or similar"
+#endif
+}
+
+#ifdef ERTS_HAVE_OS_MREMAP
+# if HAVE_MREMAP
+# if defined(__NetBSD__)
+# define ERTS_MREMAP_FLAGS (0)
+# else
+# define ERTS_MREMAP_FLAGS (MREMAP_MAYMOVE)
+# endif
+# endif
+static ERTS_INLINE void *
+os_mremap(void *ptr, UWord old_size, UWord new_size, int try_superalign)
+{
+ void *new_seg;
+#if HAVE_MREMAP
+ new_seg = mremap(ptr, (size_t) old_size,
+# if defined(__NetBSD__)
+ NULL,
+# endif
+ (size_t) new_size, ERTS_MREMAP_FLAGS);
+ if (new_seg == (void *) MAP_FAILED)
+ return NULL;
+ return new_seg;
+#else
+# error "missing mremap() or similar"
+#endif
+}
+#endif
+
+#ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION
+#if HAVE_MMAP
+
+#define ERTS_MMAP_RESERVE_PROT (ERTS_MMAP_PROT)
+#define ERTS_MMAP_RESERVE_FLAGS (ERTS_MMAP_FLAGS|MAP_FIXED)
+#define ERTS_MMAP_UNRESERVE_PROT (PROT_NONE)
+#define ERTS_MMAP_UNRESERVE_FLAGS (ERTS_MMAP_FLAGS|MAP_NORESERVE|MAP_FIXED)
+#define ERTS_MMAP_VIRTUAL_PROT (PROT_NONE)
+#define ERTS_MMAP_VIRTUAL_FLAGS (ERTS_MMAP_FLAGS|MAP_NORESERVE)
+
+static int
+os_reserve_physical(char *ptr, UWord size)
+{
+ void *res = mmap((void *) ptr, (size_t) size, ERTS_MMAP_RESERVE_PROT,
+ ERTS_MMAP_RESERVE_FLAGS, ERTS_MMAP_FD, 0);
+ if (res == (void *) MAP_FAILED)
+ return 0;
+ return 1;
+}
+
+static void
+os_unreserve_physical(char *ptr, UWord size)
+{
+ void *res = mmap((void *) ptr, (size_t) size, ERTS_MMAP_UNRESERVE_PROT,
+ ERTS_MMAP_UNRESERVE_FLAGS, ERTS_MMAP_FD, 0);
+ if (res == (void *) MAP_FAILED)
+ erl_exit(ERTS_ABORT_EXIT, "Failed to unreserve memory");
+}
+
+static void *
+os_mmap_virtual(char *ptr, UWord size)
+{
+ void *res = mmap((void *) ptr, (size_t) size, ERTS_MMAP_VIRTUAL_PROT,
+ ERTS_MMAP_VIRTUAL_FLAGS, ERTS_MMAP_FD, 0);
+ if (res == (void *) MAP_FAILED)
+ return NULL;
+ return res;
+}
+
+#else
+#error "Missing reserve/unreserve physical memory implementation"
+#endif
+#endif /* ERTS_HAVE_OS_RESERVE_PHYSICAL_MEMORY */
+
+#endif /* ERTS_HAVE_OS_MMAP */
+
+static int reserve_noop(char *ptr, UWord size)
+{
+#ifdef ERTS_MMAP_DEBUG_FILL_AREAS
+ Uint32 *uip, *end = (Uint32 *) (ptr + size);
+
+ for (uip = (Uint32 *) ptr; uip < end; uip++)
+ ERTS_MMAP_ASSERT(*uip == (Uint32) 0xdeadbeef);
+ for (uip = (Uint32 *) ptr; uip < end; uip++)
+ *uip = (Uint32) 0xfeedfeed;
+#endif
+ return 1;
+}
+
+static void unreserve_noop(char *ptr, UWord size)
+{
+#ifdef ERTS_MMAP_DEBUG_FILL_AREAS
+ Uint32 *uip, *end = (Uint32 *) (ptr + size);
+
+ for (uip = (Uint32 *) ptr; uip < end; uip++)
+ *uip = (Uint32) 0xdeadbeef;
+#endif
+}
+
+static UWord
+alloc_desc_insert_free_seg(ErtsFreeSegMap *map, char* start, char* end)
+{
+ char *ptr;
+ ErtsFreeSegMap *da_map;
+ ErtsFreeSegDesc *desc = alloc_desc();
+ if (desc) {
+ insert_free_seg(map, desc, start, end);
+ return 0;
+ }
+
+ /*
+ * Ahh; ran out of free segment descriptors.
+ *
+ * First try to map a new page...
+ */
+
+#if ERTS_HAVE_OS_MMAP
+ if (!mmap_state.no_os_mmap) {
+ ptr = os_mmap(mmap_state.desc.new_area_hint, ERTS_PAGEALIGNED_SIZE, 0);
+ if (ptr) {
+ mmap_state.desc.new_area_hint = ptr+ERTS_PAGEALIGNED_SIZE;
+ ERTS_MMAP_SIZE_OS_INC(ERTS_PAGEALIGNED_SIZE);
+ add_free_desc_area(ptr, ptr+ERTS_PAGEALIGNED_SIZE);
+ desc = alloc_desc();
+ ERTS_MMAP_ASSERT(desc);
+ insert_free_seg(map, desc, start, end);
+ return 0;
+ }
+ }
+#endif
+
+ /*
+ * ...then try to find a good place in the supercarrier...
+ */
+ da_map = &mmap_state.sua.map;
+ desc = lookup_free_seg(da_map, ERTS_PAGEALIGNED_SIZE);
+ if (desc) {
+ if (mmap_state.reserve_physical(desc->start, ERTS_PAGEALIGNED_SIZE))
+ ERTS_MMAP_SIZE_SC_SUA_INC(ERTS_PAGEALIGNED_SIZE);
+ else
+ desc = NULL;
+
+ }
+ else {
+ da_map = &mmap_state.sa.map;
+ desc = lookup_free_seg(da_map, ERTS_PAGEALIGNED_SIZE);
+ if (desc) {
+ if (mmap_state.reserve_physical(desc->start, ERTS_PAGEALIGNED_SIZE))
+ ERTS_MMAP_SIZE_SC_SA_INC(ERTS_PAGEALIGNED_SIZE);
+ else
+ desc = NULL;
+ }
+ }
+ if (desc) {
+ char *da_end = desc->start + ERTS_PAGEALIGNED_SIZE;
+ add_free_desc_area(desc->start, da_end);
+ if (da_end != desc->end)
+ resize_free_seg(da_map, desc, da_end, desc->end);
+ else {
+ delete_free_seg(da_map, desc);
+ free_desc(desc);
+ }
+
+ desc = alloc_desc();
+ ERTS_MMAP_ASSERT(desc);
+ insert_free_seg(map, desc, start, end);
+ return 0;
+ }
+
+ /*
+ * ... and then as last resort use the first page of the
+ * free segment we are trying to insert for free descriptors.
+ */
+ ptr = start + ERTS_PAGEALIGNED_SIZE;
+ ERTS_MMAP_ASSERT(ptr <= end);
+
+ add_free_desc_area(start, ptr);
+
+ if (ptr != end) {
+ desc = alloc_desc();
+ ERTS_MMAP_ASSERT(desc);
+ insert_free_seg(map, desc, ptr, end);
+ }
+
+ return ERTS_PAGEALIGNED_SIZE;
+}
+
+void *
+erts_mmap(Uint32 flags, UWord *sizep)
+{
+ char *seg;
+ UWord asize = ERTS_PAGEALIGNED_CEILING(*sizep);
+
+ /* Map in premapped supercarrier */
+ if (mmap_state.supercarrier && !(ERTS_MMAPFLG_OS_ONLY & flags)) {
+ char *end;
+ ErtsFreeSegDesc *desc;
+ Uint32 superaligned = (ERTS_MMAPFLG_SUPERALIGNED & flags);
+
+ erts_smp_mtx_lock(&mmap_state.mtx);
+
+ ERTS_MMAP_OP_START(*sizep);
+
+ if (!superaligned) {
+ desc = lookup_free_seg(&mmap_state.sua.map, asize);
+ if (desc) {
+ seg = desc->start;
+ end = seg+asize;
+ if (!mmap_state.reserve_physical(seg, asize))
+ goto supercarrier_reserve_failure;
+ if (desc->end == end) {
+ delete_free_seg(&mmap_state.sua.map, desc);
+ free_desc(desc);
+ }
+ else {
+ ERTS_MMAP_ASSERT(end < desc->end);
+ resize_free_seg(&mmap_state.sua.map, desc, end, desc->end);
+ }
+ ERTS_MMAP_SIZE_SC_SUA_INC(asize);
+ goto supercarrier_success;
+ }
+
+ if (asize <= mmap_state.sua.bot - mmap_state.sa.top) {
+ if (!mmap_state.reserve_physical(mmap_state.sua.bot - asize,
+ asize))
+ goto supercarrier_reserve_failure;
+ mmap_state.sua.bot -= asize;
+ seg = mmap_state.sua.bot;
+ ERTS_MMAP_SIZE_SC_SUA_INC(asize);
+ goto supercarrier_success;
+ }
+ }
+
+ asize = ERTS_SUPERALIGNED_CEILING(asize);
+
+ desc = lookup_free_seg(&mmap_state.sa.map, asize);
+ if (desc) {
+ char *start = seg = desc->start;
+ seg = (char *) ERTS_SUPERALIGNED_CEILING(seg);
+ end = seg+asize;
+ if (!mmap_state.reserve_physical(start, (UWord) (end - start)))
+ goto supercarrier_reserve_failure;
+ ERTS_MMAP_SIZE_SC_SA_INC(asize);
+ if (desc->end == end) {
+ if (start != seg)
+ resize_free_seg(&mmap_state.sa.map, desc, start, seg);
+ else {
+ delete_free_seg(&mmap_state.sa.map, desc);
+ free_desc(desc);
+ }
+ }
+ else {
+ ERTS_MMAP_ASSERT(end < desc->end);
+ resize_free_seg(&mmap_state.sa.map, desc, end, desc->end);
+ if (start != seg) {
+ UWord ad_sz;
+ ad_sz = alloc_desc_insert_free_seg(&mmap_state.sua.map,
+ start, seg);
+ start += ad_sz;
+ if (start != seg)
+ mmap_state.unreserve_physical(start, (UWord) (seg - start));
+ }
+ }
+ goto supercarrier_success;
+ }
+
+ if (superaligned) {
+ char *start = mmap_state.sa.top;
+ seg = (char *) ERTS_SUPERALIGNED_CEILING(start);
+
+ if (asize + (seg - start) <= mmap_state.sua.bot - start) {
+ end = seg + asize;
+ if (!mmap_state.reserve_physical(start, (UWord) (end - start)))
+ goto supercarrier_reserve_failure;
+ mmap_state.sa.top = end;
+ ERTS_MMAP_SIZE_SC_SA_INC(asize);
+ if (start != seg) {
+ UWord ad_sz;
+ ad_sz = alloc_desc_insert_free_seg(&mmap_state.sua.map,
+ start, seg);
+ start += ad_sz;
+ if (start != seg)
+ mmap_state.unreserve_physical(start, (UWord) (seg - start));
+ }
+ goto supercarrier_success;
+ }
+
+ desc = lookup_free_seg(&mmap_state.sua.map, asize + ERTS_SUPERALIGNED_SIZE);
+ if (desc) {
+ char *org_start = desc->start;
+ char *org_end = desc->end;
+
+ seg = (char *) ERTS_SUPERALIGNED_CEILING(org_start);
+ end = seg + asize;
+ if (!mmap_state.reserve_physical(seg, (UWord) (org_end - seg)))
+ goto supercarrier_reserve_failure;
+ ERTS_MMAP_SIZE_SC_SUA_INC(asize);
+ if (org_start != seg) {
+ ERTS_MMAP_ASSERT(org_start < seg);
+ resize_free_seg(&mmap_state.sua.map, desc, org_start, seg);
+ desc = NULL;
+ }
+ if (end != org_end) {
+ UWord ad_sz = 0;
+ ERTS_MMAP_ASSERT(end < org_end);
+ if (desc)
+ resize_free_seg(&mmap_state.sua.map, desc, end, org_end);
+ else
+ ad_sz = alloc_desc_insert_free_seg(&mmap_state.sua.map,
+ end, org_end);
+ end += ad_sz;
+ if (end != org_end)
+ mmap_state.unreserve_physical(end,
+ (UWord) (org_end - end));
+ }
+ goto supercarrier_success;
+ }
+ }
+
+ ERTS_MMAP_OP_ABORT();
+ erts_smp_mtx_unlock(&mmap_state.mtx);
+ }
+
+#if ERTS_HAVE_OS_MMAP
+ /* Map using OS primitives */
+ if (!(ERTS_MMAPFLG_SUPERCARRIER_ONLY & flags) && !mmap_state.no_os_mmap) {
+ if (!(ERTS_MMAPFLG_SUPERALIGNED & flags)) {
+ seg = os_mmap(NULL, asize, 0);
+ if (!seg)
+ goto failure;
+ }
+ else {
+ asize = ERTS_SUPERALIGNED_CEILING(*sizep);
+ seg = os_mmap(NULL, asize, 1);
+ if (!seg)
+ goto failure;
+
+ if (!ERTS_IS_SUPERALIGNED(seg)) {
+ char *ptr;
+ UWord sz;
+
+ os_munmap(seg, asize);
+
+ ptr = os_mmap(NULL, asize + ERTS_SUPERALIGNED_SIZE, 1);
+ if (!ptr)
+ goto failure;
+
+ seg = (char *) ERTS_SUPERALIGNED_CEILING(ptr);
+ sz = (UWord) (seg - ptr);
+ ERTS_MMAP_ASSERT(sz <= ERTS_SUPERALIGNED_SIZE);
+ if (sz)
+ os_munmap(ptr, sz);
+ sz = ERTS_SUPERALIGNED_SIZE - sz;
+ if (sz)
+ os_munmap(seg+asize, sz);
+ }
+ }
+
+ ERTS_MMAP_OP_LCK(seg, *sizep, asize);
+ ERTS_MMAP_SIZE_OS_INC(asize);
+ *sizep = asize;
+ return (void *) seg;
+ }
+failure:
+#endif
+ ERTS_MMAP_OP_LCK(NULL, *sizep, 0);
+ *sizep = 0;
+ return NULL;
+
+supercarrier_success:
+
+#ifdef ERTS_MMAP_DEBUG
+ if (ERTS_MMAPFLG_SUPERALIGNED & flags) {
+ ERTS_MMAP_ASSERT(ERTS_IS_SUPERALIGNED(seg));
+ ERTS_MMAP_ASSERT(ERTS_IS_SUPERALIGNED(asize));
+ }
+ else {
+ ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(seg));
+ ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(asize));
+ }
+#endif
+
+ ERTS_MMAP_OP_END(seg, asize);
+ erts_smp_mtx_unlock(&mmap_state.mtx);
+
+ *sizep = asize;
+ return (void *) seg;
+
+supercarrier_reserve_failure:
+ erts_smp_mtx_unlock(&mmap_state.mtx);
+ *sizep = 0;
+ return NULL;
+}
+
+void
+erts_munmap(Uint32 flags, void *ptr, UWord size)
+{
+ ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(ptr));
+ ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(size));
+
+ if (!ERTS_MMAP_IN_SUPERCARRIER(ptr)) {
+ ERTS_MMAP_ASSERT(!mmap_state.no_os_mmap);
+#if ERTS_HAVE_OS_MMAP
+ ERTS_MUNMAP_OP_LCK(ptr, size);
+ ERTS_MMAP_SIZE_OS_DEC(size);
+ os_munmap(ptr, size);
+#endif
+ }
+ else {
+ char *start, *end;
+ ErtsFreeSegMap *map;
+ ErtsFreeSegDesc *prev, *next, *desc;
+ UWord ad_sz = 0;
+
+ ERTS_MMAP_ASSERT(mmap_state.supercarrier);
+
+ start = (char *) ptr;
+ end = start + size;
+
+ erts_smp_mtx_lock(&mmap_state.mtx);
+
+ ERTS_MUNMAP_OP(ptr, size);
+
+ if (ERTS_MMAP_IN_SUPERALIGNED_AREA(ptr)) {
+
+ map = &mmap_state.sa.map;
+ adjacent_free_seg(map, start, end, &prev, &next);
+
+ ERTS_MMAP_SIZE_SC_SA_DEC(size);
+ if (end == mmap_state.sa.top) {
+ ERTS_MMAP_ASSERT(!next);
+ if (prev) {
+ start = prev->start;
+ delete_free_seg(map, prev);
+ free_desc(prev);
+ }
+ mmap_state.sa.top = start;
+ goto supercarrier_success;
+ }
+ }
+ else {
+ map = &mmap_state.sua.map;
+ adjacent_free_seg(map, start, end, &prev, &next);
+
+ ERTS_MMAP_SIZE_SC_SUA_DEC(size);
+ if (start == mmap_state.sua.bot) {
+ ERTS_MMAP_ASSERT(!prev);
+ if (next) {
+ end = next->end;
+ delete_free_seg(map, next);
+ free_desc(next);
+ }
+ mmap_state.sua.bot = end;
+ goto supercarrier_success;
+ }
+ }
+
+ desc = NULL;
+
+ if (next) {
+ ERTS_MMAP_ASSERT(end < next->end);
+ end = next->end;
+ if (prev) {
+ delete_free_seg(map, next);
+ free_desc(next);
+ goto save_prev;
+ }
+ desc = next;
+ } else if (prev) {
+ save_prev:
+ ERTS_MMAP_ASSERT(prev->start < start);
+ start = prev->start;
+ desc = prev;
+ }
+
+ if (desc)
+ resize_free_seg(map, desc, start, end);
+ else
+ ad_sz = alloc_desc_insert_free_seg(map, start, end);
+
+ supercarrier_success: {
+ UWord unres_sz;
+
+ ERTS_MMAP_ASSERT(size >= ad_sz);
+ unres_sz = size - ad_sz;
+ if (unres_sz)
+ mmap_state.unreserve_physical(((char *) ptr) + ad_sz, unres_sz);
+
+ erts_smp_mtx_unlock(&mmap_state.mtx);
+ }
+ }
+}
+
+static void *
+remap_move(Uint32 flags, void *ptr, UWord old_size, UWord *sizep)
+{
+ UWord size = *sizep;
+ void *new_ptr = erts_mmap(flags, &size);
+ if (!new_ptr)
+ return NULL;
+ *sizep = size;
+ if (old_size < size)
+ size = old_size;
+ sys_memcpy(new_ptr, ptr, (size_t) size);
+ erts_munmap(flags, ptr, old_size);
+ return new_ptr;
+}
+
+void *
+erts_mremap(Uint32 flags, void *ptr, UWord old_size, UWord *sizep)
+{
+ void *new_ptr;
+ Uint32 superaligned;
+ UWord asize;
+
+ ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(ptr));
+ ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(old_size));
+ ERTS_MMAP_ASSERT(sizep && ERTS_IS_PAGEALIGNED(*sizep));
+
+ if (!ERTS_MMAP_IN_SUPERCARRIER(ptr)) {
+
+ ERTS_MMAP_ASSERT(!mmap_state.no_os_mmap);
+
+ if (!(ERTS_MMAPFLG_OS_ONLY & flags) && mmap_state.supercarrier) {
+ new_ptr = remap_move(ERTS_MMAPFLG_SUPERCARRIER_ONLY|flags, ptr,
+ old_size, sizep);
+ if (new_ptr)
+ return new_ptr;
+ }
+
+ if (ERTS_MMAPFLG_SUPERCARRIER_ONLY & flags) {
+ ERTS_MREMAP_OP_LCK(NULL, ptr, old_size, *sizep, old_size);
+ return NULL;
+ }
+
+#if ERTS_HAVE_OS_MREMAP || ERTS_HAVE_GENUINE_OS_MMAP
+ superaligned = (ERTS_MMAPFLG_SUPERALIGNED & flags);
+
+ if (superaligned) {
+ asize = ERTS_SUPERALIGNED_CEILING(*sizep);
+ if (asize == old_size && ERTS_IS_SUPERALIGNED(ptr)) {
+ ERTS_MREMAP_OP_LCK(ptr, ptr, old_size, *sizep, asize);
+ *sizep = asize;
+ return ptr;
+ }
+ }
+ else {
+ asize = ERTS_PAGEALIGNED_CEILING(*sizep);
+ if (asize == old_size) {
+ ERTS_MREMAP_OP_LCK(ptr, ptr, old_size, *sizep, asize);
+ *sizep = asize;
+ return ptr;
+ }
+ }
+
+#if ERTS_HAVE_GENUINE_OS_MMAP
+ if (asize < old_size
+ && (!superaligned
+ || ERTS_IS_SUPERALIGNED(ptr))) {
+ UWord um_sz;
+ new_ptr = ((char *) ptr) + asize;
+ ERTS_MMAP_ASSERT((((char *)ptr) + old_size) > (char *) new_ptr);
+ um_sz = (UWord) ((((char *) ptr) + old_size) - (char *) new_ptr);
+ ERTS_MMAP_SIZE_OS_DEC(um_sz);
+ os_munmap(new_ptr, um_sz);
+ ERTS_MREMAP_OP_LCK(ptr, ptr, old_size, *sizep, asize);
+ *sizep = asize;
+ return ptr;
+ }
+#endif
+#if ERTS_HAVE_OS_MREMAP
+ if (superaligned)
+ return remap_move(flags, new_ptr, old_size, sizep);
+ else {
+ new_ptr = os_mremap(ptr, old_size, asize, 0);
+ if (!new_ptr)
+ return NULL;
+ if (asize > old_size)
+ ERTS_MMAP_SIZE_OS_INC(asize - old_size);
+ else
+ ERTS_MMAP_SIZE_OS_DEC(old_size - asize);
+ ERTS_MREMAP_OP_LCK(new_ptr, ptr, old_size, *sizep, asize);
+ *sizep = asize;
+ return new_ptr;
+ }
+#endif
+#endif
+ }
+ else { /* In super carrier */
+ char *start, *end, *new_end;
+ ErtsFreeSegMap *map;
+ ErtsFreeSegDesc *prev, *next;
+ UWord ad_sz = 0;
+
+ ERTS_MMAP_ASSERT(mmap_state.supercarrier);
+
+ if (ERTS_MMAPFLG_OS_ONLY & flags)
+ return remap_move(flags, ptr, old_size, sizep);
+
+ superaligned = (ERTS_MMAPFLG_SUPERALIGNED & flags);
+
+ asize = (superaligned
+ ? ERTS_SUPERALIGNED_CEILING(*sizep)
+ : ERTS_PAGEALIGNED_CEILING(*sizep));
+
+ erts_smp_mtx_lock(&mmap_state.mtx);
+
+ if (ERTS_MMAP_IN_SUPERALIGNED_AREA(ptr)
+ ? (!superaligned && lookup_free_seg(&mmap_state.sua.map, asize))
+ : (superaligned && lookup_free_seg(&mmap_state.sa.map, asize))) {
+ erts_smp_mtx_unlock(&mmap_state.mtx);
+ /*
+ * Segment currently in wrong area (due to a previous memory
+ * shortage), move it to the right area.
+ * (remap_move() will succeed)
+ */
+ return remap_move(ERTS_MMAPFLG_SUPERCARRIER_ONLY|flags, ptr,
+ old_size, sizep);
+ }
+
+ ERTS_MREMAP_OP_START(ptr, old_size, *sizep);
+
+ if (asize == old_size) {
+ new_ptr = ptr;
+ goto supercarrier_resize_success;
+ }
+
+ start = (char *) ptr;
+ end = start + old_size;
+ new_end = start+asize;
+
+ ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(ptr));
+ ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(old_size));
+ ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(asize));
+
+ if (asize < old_size) {
+ UWord unres_sz;
+ new_ptr = ptr;
+ if (!ERTS_MMAP_IN_SUPERALIGNED_AREA(ptr)) {
+ map = &mmap_state.sua.map;
+ ERTS_MMAP_SIZE_SC_SUA_DEC(old_size - asize);
+ }
+ else {
+ if (end == mmap_state.sa.top) {
+ mmap_state.sa.top = new_end;
+ mmap_state.unreserve_physical(((char *) ptr) + asize,
+ old_size - asize);
+ goto supercarrier_resize_success;
+ }
+ ERTS_MMAP_SIZE_SC_SA_DEC(old_size - asize);
+ map = &mmap_state.sa.map;
+ }
+
+ adjacent_free_seg(map, start, end, &prev, &next);
+
+ if (next)
+ resize_free_seg(map, next, new_end, next->end);
+ else
+ ad_sz = alloc_desc_insert_free_seg(map, new_end, end);
+ ERTS_MMAP_ASSERT(old_size - asize >= ad_sz);
+ unres_sz = old_size - asize - ad_sz;
+ if (unres_sz)
+ mmap_state.unreserve_physical(((char *) ptr) + asize + ad_sz,
+ unres_sz);
+ goto supercarrier_resize_success;
+ }
+
+ if (!ERTS_MMAP_IN_SUPERALIGNED_AREA(ptr)) {
+ ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(ptr));
+ ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(old_size));
+ ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(asize));
+
+ adjacent_free_seg(&mmap_state.sua.map, start, end, &prev, &next);
+
+ if (next && new_end <= next->end) {
+ if (!mmap_state.reserve_physical(((char *) ptr) + old_size,
+ asize - old_size))
+ goto supercarrier_reserve_failure;
+ if (new_end < next->end)
+ resize_free_seg(&mmap_state.sua.map, next, new_end, next->end);
+ else {
+ delete_free_seg(&mmap_state.sua.map, next);
+ free_desc(next);
+ }
+ new_ptr = ptr;
+ ERTS_MMAP_SIZE_SC_SUA_INC(asize - old_size);
+ goto supercarrier_resize_success;
+ }
+ }
+ else { /* Superaligned area */
+
+ if (end == mmap_state.sa.top) {
+ if (new_end <= mmap_state.sua.bot) {
+ if (!mmap_state.reserve_physical(((char *) ptr) + old_size,
+ asize - old_size))
+ goto supercarrier_reserve_failure;
+ mmap_state.sa.top = new_end;
+ new_ptr = ptr;
+ ERTS_MMAP_SIZE_SC_SA_INC(asize - old_size);
+ goto supercarrier_resize_success;
+ }
+ }
+ else {
+ adjacent_free_seg(&mmap_state.sa.map, start, end, &prev, &next);
+ if (next && new_end <= next->end) {
+ if (!mmap_state.reserve_physical(((char *) ptr) + old_size,
+ asize - old_size))
+ goto supercarrier_reserve_failure;
+ if (new_end < next->end)
+ resize_free_seg(&mmap_state.sa.map, next, new_end, next->end);
+ else {
+ delete_free_seg(&mmap_state.sa.map, next);
+ free_desc(next);
+ }
+ new_ptr = ptr;
+ ERTS_MMAP_SIZE_SC_SA_INC(asize - old_size);
+ goto supercarrier_resize_success;
+ }
+ }
+ }
+
+ ERTS_MMAP_OP_ABORT();
+ erts_smp_mtx_unlock(&mmap_state.mtx);
+
+ /* Failed to resize... */
+ }
+
+ return remap_move(flags, ptr, old_size, sizep);
+
+supercarrier_resize_success:
+
+#ifdef ERTS_MMAP_DEBUG
+ if ((ERTS_MMAPFLG_SUPERALIGNED & flags)
+ || ERTS_MMAP_IN_SUPERALIGNED_AREA(new_ptr)) {
+ ERTS_MMAP_ASSERT(ERTS_IS_SUPERALIGNED(new_ptr));
+ ERTS_MMAP_ASSERT(ERTS_IS_SUPERALIGNED(asize));
+ }
+ else {
+ ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(new_ptr));
+ ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(asize));
+ }
+#endif
+
+ ERTS_MREMAP_OP_END(new_ptr, asize);
+ erts_smp_mtx_unlock(&mmap_state.mtx);
+
+ *sizep = asize;
+ return new_ptr;
+
+supercarrier_reserve_failure:
+ ERTS_MREMAP_OP_END(NULL, old_size);
+ erts_smp_mtx_unlock(&mmap_state.mtx);
+ *sizep = old_size;
+ return NULL;
+
+}
+
+int erts_mmap_in_supercarrier(void *ptr)
+{
+ return ERTS_MMAP_IN_SUPERCARRIER(ptr);
+}
+
+
+static struct {
+ Eterm total;
+ Eterm total_sa;
+ Eterm total_sua;
+ Eterm used;
+ Eterm used_sa;
+ Eterm used_sua;
+ Eterm max;
+ Eterm allocated;
+ Eterm reserved;
+ Eterm sizes;
+ Eterm free_segs;
+ Eterm supercarrier;
+ Eterm os;
+ Eterm scs;
+ Eterm sco;
+ Eterm scrpm;
+ Eterm scmgc;
+
+ int is_initialized;
+ erts_mtx_t init_mutex;
+}am;
+
+static void ERTS_INLINE atom_init(Eterm *atom, char *name)
+{
+ *atom = am_atom_put(name, strlen(name));
+}
+#define AM_INIT(AM) atom_init(&am.AM, #AM)
+
+static void init_atoms(void)
+{
+ erts_mtx_lock(&am.init_mutex);
+
+ if (!am.is_initialized) {
+ AM_INIT(total);
+ AM_INIT(total_sa);
+ AM_INIT(total_sua);
+ AM_INIT(used);
+ AM_INIT(used_sa);
+ AM_INIT(used_sua);
+ AM_INIT(max);
+ AM_INIT(allocated);
+ AM_INIT(reserved);
+ AM_INIT(sizes);
+ AM_INIT(free_segs);
+ AM_INIT(supercarrier);
+ AM_INIT(os);
+ AM_INIT(scs);
+ AM_INIT(sco);
+ AM_INIT(scrpm);
+ AM_INIT(scmgc);
+ am.is_initialized = 1;
+ }
+ erts_mtx_unlock(&am.init_mutex);
+};
+
+
+#ifdef HARD_DEBUG_MSEG
+static void hard_dbg_mseg_init(void);
+#endif
+
+void
+erts_mmap_init(ErtsMMapInit *init)
+{
+ int virtual_map = 0;
+ char *start = NULL, *end = NULL;
+ UWord pagesize;
+#if defined(__WIN32__)
+ SYSTEM_INFO sysinfo;
+ GetSystemInfo(&sysinfo);
+ pagesize = (UWord) sysinfo.dwPageSize;
+#elif defined(_SC_PAGESIZE)
+ pagesize = (UWord) sysconf(_SC_PAGESIZE);
+#elif defined(HAVE_GETPAGESIZE)
+ pagesize = (UWord) getpagesize();
+#else
+# error "Do not know how to get page size"
+#endif
+#if defined(HARD_DEBUG) || 0
+ erts_fprintf(stderr, "erts_mmap: scs = %bpu\n", init->scs);
+ erts_fprintf(stderr, "erts_mmap: sco = %i\n", init->sco);
+ erts_fprintf(stderr, "erts_mmap: scmgc = %i\n", init->scmgc);
+#endif
+ erts_page_inv_mask = pagesize - 1;
+ if (pagesize & erts_page_inv_mask)
+ erl_exit(-1, "erts_mmap: Invalid pagesize: %bpu\n",
+ pagesize);
+
+ ERTS_MMAP_OP_RINGBUF_INIT();
+
+ erts_have_erts_mmap = 0;
+
+ mmap_state.supercarrier = 0;
+ mmap_state.reserve_physical = reserve_noop;
+ mmap_state.unreserve_physical = unreserve_noop;
+
+#if HAVE_MMAP && !defined(MAP_ANON)
+ mmap_state.mmap_fd = open("/dev/zero", O_RDWR);
+ if (mmap_state.mmap_fd < 0)
+ erl_exit(-1, "erts_mmap: Failed to open /dev/zero\n");
+#endif
+
+ erts_smp_mtx_init(&mmap_state.mtx, "erts_mmap");
+ erts_mtx_init(&am.init_mutex, "mmap_init_atoms");
+
+#ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION
+ if (init->virtual_range.start) {
+ char *ptr;
+ UWord sz;
+ ptr = (char *) ERTS_PAGEALIGNED_CEILING(init->virtual_range.start);
+ end = (char *) ERTS_PAGEALIGNED_FLOOR(init->virtual_range.end);
+ sz = end - ptr;
+ start = os_mmap_virtual(ptr, sz);
+ if (!start || start > ptr || start >= end)
+ erl_exit(-1,
+ "erts_mmap: Failed to create virtual range for super carrier\n");
+ sz = start - ptr;
+ if (sz)
+ os_munmap(end, sz);
+ mmap_state.reserve_physical = os_reserve_physical;
+ mmap_state.unreserve_physical = os_unreserve_physical;
+ virtual_map = 1;
+ }
+ else
+#endif
+ if (init->predefined_area.start) {
+ start = init->predefined_area.start;
+ end = init->predefined_area.end;
+ if (end != (void *) 0 && end < start)
+ end = start;
+ }
+#if ERTS_HAVE_OS_MMAP
+ else if (init->scs) {
+ UWord sz;
+ sz = ERTS_PAGEALIGNED_CEILING(init->scs);
+#ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION
+ if (!init->scrpm) {
+ start = os_mmap_virtual(NULL, sz);
+ mmap_state.reserve_physical = os_reserve_physical;
+ mmap_state.unreserve_physical = os_unreserve_physical;
+ virtual_map = 1;
+ }
+ else
+#endif
+ {
+ /*
+ * The whole supercarrier will by physically
+ * reserved all the time.
+ */
+ start = os_mmap(NULL, sz, 1);
+ }
+ if (!start)
+ erl_exit(-1,
+ "erts_mmap: Failed to create super carrier of size %bpu MB\n",
+ init->scs/1024/1024);
+ end = start + sz;
+#ifdef ERTS_MMAP_DEBUG_FILL_AREAS
+ if (!virtual_map) {
+ Uint32 *uip;
+
+ for (uip = (Uint32 *) start; uip < (Uint32 *) end; uip++)
+ *uip = (Uint32) 0xdeadbeef;
+ }
+#endif
+ }
+ if (!mmap_state.no_os_mmap)
+ erts_have_erts_mmap |= ERTS_HAVE_ERTS_OS_MMAP;
+#endif
+
+ mmap_state.no.free_seg_descs = 0;
+ mmap_state.no.free_segs.curr = 0;
+ mmap_state.no.free_segs.max = 0;
+
+ mmap_state.size.supercarrier.total = 0;
+ mmap_state.size.supercarrier.used.total = 0;
+ mmap_state.size.supercarrier.used.sa = 0;
+ mmap_state.size.supercarrier.used.sua = 0;
+ mmap_state.size.os.used = 0;
+
+ mmap_state.desc.new_area_hint = NULL;
+
+ if (!start) {
+ mmap_state.sa.bot = NULL;
+ mmap_state.sua.top = NULL;
+ mmap_state.sa.bot = NULL;
+ mmap_state.sua.top = NULL;
+ mmap_state.no_os_mmap = 0;
+ mmap_state.supercarrier = 0;
+ }
+ else {
+ size_t desc_size;
+
+ mmap_state.no_os_mmap = init->sco;
+
+ desc_size = init->scmgc;
+ if (desc_size < 100)
+ desc_size = 100;
+ desc_size *= sizeof(ErtsFreeSegDesc);
+ if ((desc_size
+ + ERTS_SUPERALIGNED_SIZE
+ + ERTS_PAGEALIGNED_SIZE) > end - start)
+ erl_exit(-1, "erts_mmap: No space for segments in super carrier\n");
+
+ mmap_state.sa.bot = start;
+ mmap_state.sa.bot += desc_size;
+ mmap_state.sa.bot = (char *) ERTS_SUPERALIGNED_CEILING(mmap_state.sa.bot);
+ mmap_state.sa.top = mmap_state.sa.bot;
+ mmap_state.sua.top = end;
+ mmap_state.sua.bot = mmap_state.sua.top;
+
+ mmap_state.size.supercarrier.used.total += (UWord) (mmap_state.sa.bot - start);
+
+ mmap_state.desc.free_list = NULL;
+ mmap_state.desc.reserved = 0;
+
+ if (end == (void *) 0) {
+ /*
+ * Very unlikely, but we need a guarantee
+ * that `mmap_state.sua.top` always will
+ * compare as larger than all segment pointers
+ * into the super carrier...
+ */
+ mmap_state.sua.top -= ERTS_PAGEALIGNED_SIZE;
+ mmap_state.size.supercarrier.used.total += ERTS_PAGEALIGNED_SIZE;
+#ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION
+ if (!virtual_map || os_reserve_physical(mmap_state.sua.top, ERTS_PAGEALIGNED_SIZE))
+#endif
+ add_free_desc_area(mmap_state.sua.top, end);
+ mmap_state.desc.reserved += (end - mmap_state.sua.top) / sizeof(ErtsFreeSegDesc);
+ }
+
+ mmap_state.size.supercarrier.total = (UWord) (mmap_state.sua.top - start);
+
+ /*
+ * Area before (and after) super carrier
+ * will be used for free segment descritors.
+ */
+#ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION
+ if (virtual_map && !os_reserve_physical(start, mmap_state.sa.bot - start))
+ erl_exit(-1, "erts_mmap: Failed to reserve physical memory for descriptors\n");
+#endif
+ mmap_state.desc.unused_start = start;
+ mmap_state.desc.unused_end = mmap_state.sa.bot;
+ mmap_state.desc.reserved += ((mmap_state.desc.unused_end - start)
+ / sizeof(ErtsFreeSegDesc));
+
+ init_free_seg_map(&mmap_state.sa.map, SA_SZ_ADDR_ORDER);
+ init_free_seg_map(&mmap_state.sua.map, SZ_REVERSE_ADDR_ORDER);
+
+ mmap_state.supercarrier = 1;
+ erts_have_erts_mmap |= ERTS_HAVE_ERTS_SUPERCARRIER_MMAP;
+
+ mmap_state.desc.new_area_hint = end;
+
+ }
+
+#if !ERTS_HAVE_OS_MMAP
+ mmap_state.no_os_mmap = 1;
+#endif
+
+#ifdef HARD_DEBUG_MSEG
+ hard_dbg_mseg_init();
+#endif
+}
+
+
+static ERTS_INLINE void
+add_2tup(Uint **hpp, Uint *szp, Eterm *lp, Eterm el1, Eterm el2)
+{
+ *lp = erts_bld_cons(hpp, szp, erts_bld_tuple(hpp, szp, 2, el1, el2), *lp);
+}
+
+Eterm erts_mmap_info(int *print_to_p,
+ void *print_to_arg,
+ Eterm** hpp, Uint* szp,
+ struct erts_mmap_info_struct* emis)
+{
+ Eterm size_tags[] = { am.total, am.total_sa, am.total_sua, am.used, am.used_sa, am.used_sua };
+ Eterm seg_tags[] = { am.used, am.max, am.allocated, am.reserved, am.used_sa, am.used_sua };
+ Eterm group[2];
+ Eterm group_tags[] = { am.sizes, am.free_segs };
+ Eterm list[2];
+ Eterm list_tags[2]; /* { am.supercarrier, am.os } */
+ int lix;
+ Eterm res = THE_NON_VALUE;
+
+ if (!hpp) {
+ erts_smp_mtx_lock(&mmap_state.mtx);
+ emis->sizes[0] = mmap_state.size.supercarrier.total;
+ emis->sizes[1] = mmap_state.sa.top - mmap_state.sa.bot;
+ emis->sizes[2] = mmap_state.sua.top - mmap_state.sua.bot;
+ emis->sizes[3] = mmap_state.size.supercarrier.used.total;
+ emis->sizes[4] = mmap_state.size.supercarrier.used.sa;
+ emis->sizes[5] = mmap_state.size.supercarrier.used.sua;
+
+ emis->segs[0] = mmap_state.no.free_segs.curr;
+ emis->segs[1] = mmap_state.no.free_segs.max;
+ emis->segs[2] = mmap_state.no.free_seg_descs;
+ emis->segs[3] = mmap_state.desc.reserved;
+ emis->segs[4] = mmap_state.sa.map.nseg;
+ emis->segs[5] = mmap_state.sua.map.nseg;
+
+ emis->os_used = mmap_state.size.os.used;
+ erts_smp_mtx_unlock(&mmap_state.mtx);
+ }
+
+ if (print_to_p) {
+ int to = *print_to_p;
+ void *arg = print_to_arg;
+ if (mmap_state.supercarrier) {
+ const char* prefix = "supercarrier ";
+ erts_print(to, arg, "%stotal size: %bpu\n", prefix, emis->sizes[0]);
+ erts_print(to, arg, "%stotal sa size: %bpu\n", prefix, emis->sizes[1]);
+ erts_print(to, arg, "%stotal sua size: %bpu\n", prefix, emis->sizes[2]);
+ erts_print(to, arg, "%sused size: %bpu\n", prefix, emis->sizes[3]);
+ erts_print(to, arg, "%sused sa size: %bpu\n", prefix, emis->sizes[4]);
+ erts_print(to, arg, "%sused sua size: %bpu\n", prefix, emis->sizes[5]);
+ erts_print(to, arg, "%sused free segs: %bpu\n", prefix, emis->segs[0]);
+ erts_print(to, arg, "%smax free segs: %bpu\n", prefix, emis->segs[1]);
+ erts_print(to, arg, "%sallocated free segs: %bpu\n", prefix, emis->segs[2]);
+ erts_print(to, arg, "%sreserved free segs: %bpu\n", prefix, emis->segs[3]);
+ erts_print(to, arg, "%ssa free segs: %bpu\n", prefix, emis->segs[4]);
+ erts_print(to, arg, "%ssua free segs: %bpu\n", prefix, emis->segs[5]);
+ }
+ if (!mmap_state.no_os_mmap) {
+ erts_print(to, arg, "os mmap size used: %bpu\n", emis->os_used);
+ }
+ }
+
+
+ if (hpp || szp) {
+ if (!am.is_initialized) {
+ init_atoms();
+ }
+
+ lix = 0;
+ if (mmap_state.supercarrier) {
+ group[0] = erts_bld_atom_uword_2tup_list(hpp, szp,
+ sizeof(size_tags)/sizeof(Eterm),
+ size_tags, emis->sizes);
+ group[1] = erts_bld_atom_uword_2tup_list(hpp, szp,
+ sizeof(seg_tags)/sizeof(Eterm),
+ seg_tags, emis->segs);
+ list[lix] = erts_bld_2tup_list(hpp, szp, 2, group_tags, group);
+ list_tags[lix] = am.supercarrier;
+ lix++;
+ }
+
+ if (!mmap_state.no_os_mmap) {
+ group[0] = erts_bld_atom_uword_2tup_list(hpp, szp,
+ 1, &am.used, &emis->os_used);
+ list[lix] = erts_bld_2tup_list(hpp, szp, 1, group_tags, group);
+ list_tags[lix] = am.os;
+ lix++;
+ }
+ res = erts_bld_2tup_list(hpp, szp, lix, list_tags, list);
+ }
+ return res;
+}
+
+Eterm erts_mmap_info_options(char *prefix,
+ int *print_to_p,
+ void *print_to_arg,
+ Uint **hpp,
+ Uint *szp)
+{
+ const UWord scs = mmap_state.sua.top - mmap_state.sa.bot;
+ const Eterm sco = mmap_state.no_os_mmap ? am_true : am_false;
+ const Eterm scrpm = (mmap_state.reserve_physical == reserve_noop) ? am_true : am_false;
+ Eterm res = THE_NON_VALUE;
+
+ if (print_to_p) {
+ int to = *print_to_p;
+ void *arg = print_to_arg;
+ erts_print(to, arg, "%sscs: %bpu\n", prefix, scs);
+ if (mmap_state.supercarrier) {
+ erts_print(to, arg, "%ssco: %T\n", prefix, sco);
+ erts_print(to, arg, "%sscrpm: %T\n", prefix, scrpm);
+ erts_print(to, arg, "%sscmgc: %beu\n", prefix, mmap_state.desc.reserved);
+ }
+ }
+
+ if (hpp || szp) {
+ if (!am.is_initialized) {
+ init_atoms();
+ }
+
+ res = NIL;
+ if (mmap_state.supercarrier) {
+ add_2tup(hpp, szp, &res, am.scmgc,
+ erts_bld_uint(hpp,szp, mmap_state.desc.reserved));
+ add_2tup(hpp, szp, &res, am.scrpm, scrpm);
+ add_2tup(hpp, szp, &res, am.sco, sco);
+ }
+ add_2tup(hpp, szp, &res, am.scs, erts_bld_uword(hpp, szp, scs));
+ }
+ return res;
+}
+
+
+Eterm erts_mmap_debug_info(Process* p)
+{
+ if (mmap_state.supercarrier) {
+ ERTS_DECL_AM(sabot);
+ ERTS_DECL_AM(satop);
+ ERTS_DECL_AM(suabot);
+ ERTS_DECL_AM(suatop);
+ Eterm sa_list, sua_list, list;
+ Eterm tags[] = { AM_sabot, AM_satop, AM_suabot, AM_suatop };
+ UWord values[4];
+ Eterm *hp, *hp_end;
+ Uint may_need;
+ const Uint PTR_BIG_SZ = HALFWORD_HEAP ? 3 : 2;
+
+ erts_smp_mtx_lock(&mmap_state.mtx);
+ values[0] = (UWord)mmap_state.sa.bot;
+ values[1] = (UWord)mmap_state.sa.top;
+ values[2] = (UWord)mmap_state.sua.bot;
+ values[3] = (UWord)mmap_state.sua.top;
+ sa_list = build_free_seg_list(p, &mmap_state.sa.map);
+ sua_list = build_free_seg_list(p, &mmap_state.sua.map);
+ erts_smp_mtx_unlock(&mmap_state.mtx);
+
+ may_need = 4*(2+3+PTR_BIG_SZ) + 2*(2+3);
+ hp = HAlloc(p, may_need);
+ hp_end = hp + may_need;
+
+ list = erts_bld_atom_uword_2tup_list(&hp, NULL,
+ sizeof(values)/sizeof(*values),
+ tags, values);
+
+ sa_list = TUPLE2(hp, am_atom_put("sa_free_segs",12), sa_list); hp+=3;
+ sua_list = TUPLE2(hp, am_atom_put("sua_free_segs",13), sua_list); hp+=3;
+ list = CONS(hp, sua_list, list); hp+=2;
+ list = CONS(hp, sa_list, list); hp+=2;
+
+ ASSERT(hp <= hp_end);
+ HRelease(p, hp_end, hp);
+ return list;
+ }
+ else {
+ return am_undefined;
+ }
+}
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Debug functions *
+\* */
+
+
+#ifdef HARD_DEBUG
+
+static int rbt_assert_is_member(RBTNode* root, RBTNode* node)
+{
+ while (node != root) {
+ RBT_ASSERT(parent(node));
+ RBT_ASSERT(parent(node)->left == node || parent(node)->right == node);
+ node = parent(node);
+ }
+ return 1;
+}
+
+
+#if 0
+# define PRINT_TREE
+#else
+# undef PRINT_TREE
+#endif
+
+#ifdef PRINT_TREE
+static void print_tree(enum SortOrder order, RBTNode*);
+#endif
+
+/*
+ * Checks that the order between parent and children are correct,
+ * and that the Red-Black Tree properies are satisfied. if size > 0,
+ * check_tree() returns the node that satisfies "address order first fit"
+ *
+ * The Red-Black Tree properies are:
+ * 1. Every node is either red or black.
+ * 2. Every leaf (NIL) is black.
+ * 3. If a node is red, then both its children are black.
+ * 4. Every simple path from a node to a descendant leaf
+ * contains the same number of black nodes.
+ *
+ */
+
+struct check_arg_t {
+ RBTree* tree;
+ ErtsFreeSegDesc* prev_seg;
+ Uint size;
+ RBTNode *res;
+};
+static void check_node_callback(RBTNode* x, void* arg);
+
+
+static RBTNode *
+check_tree(RBTree* tree, Uint size)
+{
+ struct check_arg_t carg;
+ carg.tree = tree;
+ carg.prev_seg = NULL;
+ carg.size = size;
+ carg.res = NULL;
+
+#ifdef PRINT_TREE
+ print_tree(tree->order, tree->root);
+#endif
+
+ if (!tree->root)
+ return NULL;
+
+ RBT_ASSERT(IS_BLACK(tree->root));
+ RBT_ASSERT(!parent(tree->root));
+
+ rbt_foreach_node(tree, check_node_callback, &carg, 0);
+
+ return carg.res;
+}
+
+static void check_node_callback(RBTNode* x, void* arg)
+{
+ struct check_arg_t* a = (struct check_arg_t*) arg;
+ ErtsFreeSegDesc* seg;
+
+ if (IS_RED(x)) {
+ RBT_ASSERT(IS_BLACK(x->right));
+ RBT_ASSERT(IS_BLACK(x->left));
+ }
+
+ RBT_ASSERT(parent(x) || x == a->tree->root);
+
+ if (x->left) {
+ RBT_ASSERT(cmp_nodes(a->tree->order, x->left, x) < 0);
+ }
+ if (x->right) {
+ RBT_ASSERT(cmp_nodes(a->tree->order, x->right, x) > 0);
+ }
+
+ seg = node_to_desc(a->tree->order, x);
+ RBT_ASSERT(seg->start < seg->end);
+ if (a->size && (seg->end - seg->start) >= a->size) {
+ if (!a->res || cmp_nodes(a->tree->order, x, a->res) < 0) {
+ a->res = x;
+ }
+ }
+ if (a->tree->order == ADDR_ORDER) {
+ RBT_ASSERT(!a->prev_seg || a->prev_seg->end < seg->start);
+ a->prev_seg = seg;
+ }
+}
+
+#endif /* HARD_DEBUG */
+
+
+#ifdef PRINT_TREE
+#define INDENT_STEP 2
+
+#include <stdio.h>
+
+static void
+print_tree_aux(enum SortOrder order, RBTNode *x, int indent)
+{
+ int i;
+
+ if (x) {
+ ErtsFreeSegDesc* desc = node_to_desc(order, x);
+ print_tree_aux(order, x->right, indent + INDENT_STEP);
+ for (i = 0; i < indent; i++) {
+ putc(' ', stderr);
+ }
+ fprintf(stderr, "%s: sz=%lx [%p - %p] desc=%p\r\n",
+ IS_BLACK(x) ? "BLACK" : "RED",
+ desc->end - desc->start, desc->start, desc->end, desc);
+ print_tree_aux(order, x->left, indent + INDENT_STEP);
+ }
+}
+
+
+static void
+print_tree(enum SortOrder order, RBTNode* root)
+{
+ fprintf(stderr, " --- %s ordered tree begin ---\r\n", sort_order_names[order]);
+ print_tree_aux(order, root, 0);
+ fprintf(stderr, " --- %s ordered tree end ---\r\n", sort_order_names[order]);
+}
+
+#endif /* PRINT_TREE */
+
+
+#ifdef FREE_SEG_API_SMOKE_TEST
+
+void test_it(void)
+{
+ ErtsFreeSegMap map;
+ ErtsFreeSegDesc *desc, *under, *over, *d1, *d2;
+ const int i = 1; /* reverse addr order */
+
+ {
+ init_free_seg_map(&map, SZ_REVERSE_ADDR_ORDER);
+
+ insert_free_seg(&map, alloc_desc(), (char*)0x11000, (char*)0x12000);
+ HARD_CHECK_TREE(&map.atree, 0); HARD_CHECK_TREE(&map.stree, 0);
+ insert_free_seg(&map, alloc_desc(), (char*)0x13000, (char*)0x14000);
+ HARD_CHECK_TREE(&map.atree, 0); HARD_CHECK_TREE(&map.stree, 0);
+ insert_free_seg(&map, alloc_desc(), (char*)0x15000, (char*)0x17000);
+ HARD_CHECK_TREE(&map.atree, 0); HARD_CHECK_TREE(&map.stree, 0);
+ insert_free_seg(&map, alloc_desc(), (char*)0x8000, (char*)0x10000);
+ HARD_CHECK_TREE(&map.atree, 0); HARD_CHECK_TREE(&map.stree, 0);
+
+ desc = lookup_free_seg(&map, 0x500);
+ ERTS_ASSERT(desc->start == (char*)(i?0x13000L:0x11000L));
+
+ desc = lookup_free_seg(&map, 0x1500);
+ ERTS_ASSERT(desc->start == (char*)0x15000);
+
+ adjacent_free_seg(&map, (char*)0x6666, (char*)0x7777, &under, &over);
+ ERTS_ASSERT(!under && !over);
+
+ adjacent_free_seg(&map, (char*)0x6666, (char*)0x8000, &under, &over);
+ ERTS_ASSERT(!under && over->start == (char*)0x8000);
+
+ adjacent_free_seg(&map, (char*)0x10000, (char*)0x10500, &under, &over);
+ ERTS_ASSERT(under->end == (char*)0x10000 && !over);
+
+ adjacent_free_seg(&map, (char*)0x10100, (char*)0x10500, &under, &over);
+ ERTS_ASSERT(!under && !over);
+
+ adjacent_free_seg(&map, (char*)0x10100, (char*)0x11000, &under, &over);
+ ERTS_ASSERT(!under && over && over->start == (char*)0x11000);
+
+ adjacent_free_seg(&map, (char*)0x12000, (char*)0x12500, &under, &over);
+ ERTS_ASSERT(under && under->end == (char*)0x12000 && !over);
+
+ adjacent_free_seg(&map, (char*)0x12000, (char*)0x13000, &under, &over);
+ ERTS_ASSERT(under && under->end == (char*)0x12000 &&
+ over && over->start == (char*)0x13000);
+
+ adjacent_free_seg(&map, (char*)0x12500, (char*)0x13000, &under, &over);
+ ERTS_ASSERT(!under && over && over->start == (char*)0x13000);
+
+ d1 = lookup_free_seg(&map, 0x500);
+ ERTS_ASSERT(d1->start == (char*)(i?0x13000L:0x11000L));
+
+ resize_free_seg(&map, d1, d1->start - 0x800, (char*)d1->end);
+ HARD_CHECK_TREE(&map.atree, 0); HARD_CHECK_TREE(&map.stree, 0);
+
+ d2 = lookup_free_seg(&map, 0x1200);
+ ERTS_ASSERT(d2 == d1);
+
+ delete_free_seg(&map, d1);
+ HARD_CHECK_TREE(&map.atree, 0); HARD_CHECK_TREE(&map.stree, 0);
+
+ d1 = lookup_free_seg(&map, 0x1200);
+ ERTS_ASSERT(d1->start == (char*)0x15000);
+ }
+}
+
+#endif /* FREE_SEG_API_SMOKE_TEST */
+
+
+#ifdef HARD_DEBUG_MSEG
+
+/*
+ * Debug stuff used by erl_mseg to check that it does the right thing.
+ * The reason for keeping it here is that we (ab)use the rb-tree code
+ * for keeping track of *allocated* segments.
+ */
+
+typedef struct ErtsFreeSegDesc_fake_ {
+ /*RBTNode snode; Save memory by skipping unused size tree node */
+ RBTNode anode; /* node in 'atree' */
+ union {
+ char* start;
+ struct ErtsFreeSegDesc_fake_* next_free;
+ }u;
+ char* end;
+}ErtsFreeSegDesc_fake;
+
+static ErtsFreeSegDesc_fake hard_dbg_mseg_desc_pool[10000];
+static ErtsFreeSegDesc_fake* hard_dbg_mseg_desc_first;
+RBTree hard_dbg_mseg_tree;
+
+static erts_mtx_t hard_dbg_mseg_mtx;
+
+static void hard_dbg_mseg_init(void)
+{
+ ErtsFreeSegDesc_fake* p;
+
+ erts_mtx_init(&hard_dbg_mseg_mtx, "hard_dbg_mseg");
+ hard_dbg_mseg_tree.root = NULL;
+ hard_dbg_mseg_tree.order = ADDR_ORDER;
+
+ p = &hard_dbg_mseg_desc_pool[(sizeof(hard_dbg_mseg_desc_pool) /
+ sizeof(*hard_dbg_mseg_desc_pool)) - 1];
+ p->u.next_free = NULL;
+ while (--p >= hard_dbg_mseg_desc_pool) {
+ p->u.next_free = (p+1);
+ }
+ hard_dbg_mseg_desc_first = &hard_dbg_mseg_desc_pool[0];
+}
+
+static ErtsFreeSegDesc* hard_dbg_alloc_desc(void)
+{
+ ErtsFreeSegDesc_fake* p = hard_dbg_mseg_desc_first;
+ ERTS_ASSERT(p || !"HARD_DEBUG_MSEG: Out of mseg descriptors");
+ hard_dbg_mseg_desc_first = p->u.next_free;
+
+ /* Creative pointer arithmetic to return something that looks like
+ * a ErtsFreeSegDesc as long as we don't use the absent 'snode'.
+ */
+ return (ErtsFreeSegDesc*) ((char*)p - offsetof(ErtsFreeSegDesc,anode));
+}
+
+static void hard_dbg_free_desc(ErtsFreeSegDesc* desc)
+{
+ ErtsFreeSegDesc_fake* p = (ErtsFreeSegDesc_fake*) &desc->anode;
+ memset(p, 0xfe, sizeof(*p));
+ p->u.next_free = hard_dbg_mseg_desc_first;
+ hard_dbg_mseg_desc_first = p;
+}
+
+static void check_seg_writable(void* seg, UWord sz)
+{
+ UWord* seg_end = (UWord*)((char*)seg + sz);
+ volatile UWord* p;
+ ERTS_ASSERT(ERTS_IS_PAGEALIGNED(seg));
+ ERTS_ASSERT(ERTS_IS_PAGEALIGNED(sz));
+ for (p=(UWord*)seg; p<seg_end; p += (ERTS_INV_PAGEALIGNED_MASK+1)/sizeof(UWord)) {
+ UWord write_back = *p;
+ *p = 0xfade2b1acc;
+ *p = write_back;
+ }
+}
+
+void hard_dbg_insert_mseg(void* seg, UWord sz)
+{
+ check_seg_writable(seg, sz);
+ erts_mtx_lock(&hard_dbg_mseg_mtx);
+ {
+ ErtsFreeSegDesc *desc = hard_dbg_alloc_desc();
+ RBTNode *prev, *next;
+ desc->start = (char*)seg;
+ desc->end = desc->start + sz - 1; /* -1 to allow adjacent segments in tree */
+ rbt_insert(&hard_dbg_mseg_tree, &desc->anode);
+ prev = rbt_prev_node(&desc->anode);
+ next = rbt_next_node(&desc->anode);
+ ERTS_ASSERT(!prev || anode_to_desc(prev)->end < desc->start);
+ ERTS_ASSERT(!next || anode_to_desc(next)->start > desc->end);
+ }
+ erts_mtx_unlock(&hard_dbg_mseg_mtx);
+}
+
+static ErtsFreeSegDesc* hard_dbg_lookup_seg_at(RBTree* tree, char* start)
+{
+ RBTNode* x = tree->root;
+
+ while (x) {
+ ErtsFreeSegDesc* desc = anode_to_desc(x);
+ if (start < desc->start) {
+ x = x->left;
+ }
+ else if (start > desc->start) {
+ ERTS_ASSERT(start > desc->end);
+ x = x->right;
+ }
+ else
+ return desc;
+ }
+ return NULL;
+}
+
+void hard_dbg_remove_mseg(void* seg, UWord sz)
+{
+ check_seg_writable(seg, sz);
+ erts_mtx_lock(&hard_dbg_mseg_mtx);
+ {
+ ErtsFreeSegDesc* desc = hard_dbg_lookup_seg_at(&hard_dbg_mseg_tree, (char*)seg);
+ ERTS_ASSERT(desc);
+ ERTS_ASSERT(desc->start == (char*)seg);
+ ERTS_ASSERT(desc->end == (char*)seg + sz - 1);
+
+ rbt_delete(&hard_dbg_mseg_tree, &desc->anode);
+ hard_dbg_free_desc(desc);
+ }
+ erts_mtx_unlock(&hard_dbg_mseg_mtx);
+}
+
+#endif /* HARD_DEBUG_MSEG */
diff --git a/erts/emulator/sys/common/erl_mmap.h b/erts/emulator/sys/common/erl_mmap.h
new file mode 100644
index 0000000000..e6934dbb26
--- /dev/null
+++ b/erts/emulator/sys/common/erl_mmap.h
@@ -0,0 +1,134 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2013. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_MMAP_H__
+#define ERL_MMAP_H__
+
+#include "sys.h"
+
+#define ERTS_MMAP_SUPERALIGNED_BITS (18)
+/* Affects hard limits for sbct and lmbcs documented in erts_alloc.xml */
+
+#define ERTS_MMAPFLG_OS_ONLY (((Uint32) 1) << 0)
+#define ERTS_MMAPFLG_SUPERCARRIER_ONLY (((Uint32) 1) << 1)
+#define ERTS_MMAPFLG_SUPERALIGNED (((Uint32) 1) << 2)
+
+#define ERTS_HAVE_ERTS_OS_MMAP (1 << 0)
+#define ERTS_HAVE_ERTS_SUPERCARRIER_MMAP (1 << 1)
+extern int erts_have_erts_mmap;
+extern UWord erts_page_inv_mask;
+
+typedef struct {
+ struct {
+ char *start;
+ char *end;
+ } virtual_range;
+ struct {
+ char *start;
+ char *end;
+ } predefined_area;
+ UWord scs; /* super carrier size */
+ int sco; /* super carrier only? */
+ Uint scmgc; /* super carrier: max guaranteed (number of) carriers */
+ int scrpm;
+}ErtsMMapInit;
+
+#define ERTS_MMAP_INIT_DEFAULT_INITER \
+ {{NULL, NULL}, {NULL, NULL}, 0, 1, (1 << 16), 1}
+
+void *erts_mmap(Uint32 flags, UWord *sizep);
+void erts_munmap(Uint32 flags, void *ptr, UWord size);
+void *erts_mremap(Uint32 flags, void *ptr, UWord old_size, UWord *sizep);
+int erts_mmap_in_supercarrier(void *ptr);
+void erts_mmap_init(ErtsMMapInit*);
+struct erts_mmap_info_struct
+{
+ UWord sizes[6];
+ UWord segs[6];
+ UWord os_used;
+};
+Eterm erts_mmap_info(int *print_to_p, void *print_to_arg,
+ Eterm** hpp, Uint* szp, struct erts_mmap_info_struct*);
+Eterm erts_mmap_info_options(char *prefix, int *print_to_p, void *print_to_arg,
+ Uint **hpp, Uint *szp);
+struct process;
+Eterm erts_mmap_debug_info(struct process*);
+
+#define ERTS_SUPERALIGNED_SIZE \
+ (1 << ERTS_MMAP_SUPERALIGNED_BITS)
+#define ERTS_INV_SUPERALIGNED_MASK \
+ ((UWord) (ERTS_SUPERALIGNED_SIZE - 1))
+#define ERTS_SUPERALIGNED_MASK \
+ (~ERTS_INV_SUPERALIGNED_MASK)
+#define ERTS_SUPERALIGNED_FLOOR(X) \
+ (((UWord) (X)) & ERTS_SUPERALIGNED_MASK)
+#define ERTS_SUPERALIGNED_CEILING(X) \
+ ERTS_SUPERALIGNED_FLOOR((X) + ERTS_INV_SUPERALIGNED_MASK)
+#define ERTS_IS_SUPERALIGNED(X) \
+ (((UWord) (X) & ERTS_INV_SUPERALIGNED_MASK) == 0)
+
+#define ERTS_INV_PAGEALIGNED_MASK \
+ (erts_page_inv_mask)
+#define ERTS_PAGEALIGNED_MASK \
+ (~ERTS_INV_PAGEALIGNED_MASK)
+#define ERTS_PAGEALIGNED_FLOOR(X) \
+ (((UWord) (X)) & ERTS_PAGEALIGNED_MASK)
+#define ERTS_PAGEALIGNED_CEILING(X) \
+ ERTS_PAGEALIGNED_FLOOR((X) + ERTS_INV_PAGEALIGNED_MASK)
+#define ERTS_IS_PAGEALIGNED(X) \
+ (((UWord) (X) & ERTS_INV_PAGEALIGNED_MASK) == 0)
+#define ERTS_PAGEALIGNED_SIZE \
+ (ERTS_INV_PAGEALIGNED_MASK + 1)
+
+#ifndef HAVE_MMAP
+# define HAVE_MMAP 0
+#endif
+#ifndef HAVE_MREMAP
+# define HAVE_MREMAP 0
+#endif
+#if HAVE_MMAP
+# define ERTS_HAVE_OS_MMAP 1
+# define ERTS_HAVE_GENUINE_OS_MMAP 1
+# if HAVE_MREMAP
+# define ERTS_HAVE_OS_MREMAP 1
+# endif
+# if defined(MAP_FIXED) && defined(MAP_NORESERVE)
+# define ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION 1
+# endif
+#endif
+
+#ifndef HAVE_VIRTUALALLOC
+# define HAVE_VIRTUALALLOC 0
+#endif
+#if HAVE_VIRTUALALLOC
+# define ERTS_HAVE_OS_MMAP 1
+#endif
+
+/*#define HARD_DEBUG_MSEG*/
+#ifdef HARD_DEBUG_MSEG
+# define HARD_DBG_INSERT_MSEG hard_dbg_insert_mseg
+# define HARD_DBG_REMOVE_MSEG hard_dbg_remove_mseg
+void hard_dbg_insert_mseg(void* seg, UWord sz);
+void hard_dbg_remove_mseg(void* seg, UWord sz);
+#else
+# define HARD_DBG_INSERT_MSEG(SEG,SZ)
+# define HARD_DBG_REMOVE_MSEG(SEG,SZ)
+#endif
+
+#endif /* ERL_MMAP_H__ */
diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c
index 2748edba02..94a381e168 100644
--- a/erts/emulator/sys/common/erl_mseg.c
+++ b/erts/emulator/sys/common/erl_mseg.c
@@ -100,45 +100,6 @@ static int atoms_initialized;
typedef struct mem_kind_t MemKind;
-#if HALFWORD_HEAP
-static int initialize_pmmap(void);
-static void *pmmap(size_t size);
-static int pmunmap(void *p, size_t size);
-static void *pmremap(void *old_address, size_t old_size,
- size_t new_size);
-#endif
-
-#if HAVE_MMAP
-/* Mmap ... */
-
-#define MMAP_PROT (PROT_READ|PROT_WRITE)
-
-
-#ifdef MAP_ANON
-# define MMAP_FLAGS (MAP_ANON|MAP_PRIVATE)
-# define MMAP_FD (-1)
-#else
-# define MMAP_FLAGS (MAP_PRIVATE)
-# define MMAP_FD mmap_fd
-static int mmap_fd;
-#endif
-
-#if HAVE_MREMAP
-# define HAVE_MSEG_RECREATE 1
-#else
-# define HAVE_MSEG_RECREATE 0
-#endif
-
-#if HALFWORD_HEAP
-#define CAN_PARTLY_DESTROY 0
-#else
-#define CAN_PARTLY_DESTROY 1
-#endif
-#else /* #if HAVE_MMAP */
-#define CAN_PARTLY_DESTROY 0
-#error "Not supported"
-#endif /* #if HAVE_MMAP */
-
const ErtsMsegOpt_t erts_mseg_default_opt = {
1, /* Use cache */
1, /* Preserv data */
@@ -163,9 +124,7 @@ typedef struct {
CallCounter create;
CallCounter create_resize;
CallCounter destroy;
-#if HAVE_MSEG_RECREATE
CallCounter recreate;
-#endif
CallCounter clear_cache;
CallCounter check_cache;
} ErtsMsegCalls;
@@ -173,7 +132,7 @@ typedef struct {
typedef struct cache_t_ cache_t;
struct cache_t_ {
- Uint size;
+ UWord size;
void *seg;
cache_t *next;
cache_t *prev;
@@ -236,11 +195,6 @@ struct ErtsMsegAllctr_t_ {
Uint rel_max_cache_bad_fit;
ErtsMsegCalls calls;
-
-#if CAN_PARTLY_DESTROY
- Uint min_seg_size;
-#endif
-
};
typedef union {
@@ -344,69 +298,31 @@ schedule_cache_check(ErtsMsegAllctr_t *ma) {
}
}
-/* remove ErtsMsegAllctr_t from arguments?
- * only used for statistics
- */
-static ERTS_INLINE void *
-mmap_align(ErtsMsegAllctr_t *ma, void *addr, size_t length, int prot, int flags, int fd, off_t offset) {
-
- char *p, *q;
- UWord d;
-
- p = mmap(addr, length, prot, flags, fd, offset);
-
- if (MAP_IS_ALIGNED(p) || p == MAP_FAILED)
- return p;
-
- if (ma)
- INC_CC(ma, create_resize);
-
- munmap(p, length);
-
- if ((p = mmap(addr, length + MSEG_ALIGNED_SIZE, prot, flags, fd, offset)) == MAP_FAILED)
- return MAP_FAILED;
-
- q = (void *)ALIGNED_CEILING((char *)p);
- d = (UWord)(q - p);
-
- if (d > 0)
- munmap(p, d);
-
- if (MSEG_ALIGNED_SIZE - d > 0)
- munmap((void *)(q + length), MSEG_ALIGNED_SIZE - d);
-
- return q;
-}
+/* #define ERTS_PRINT_ERTS_MMAP */
static ERTS_INLINE void *
-mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size)
+mseg_create(ErtsMsegAllctr_t *ma, Uint flags, MemKind* mk, UWord *sizep)
{
+#ifdef ERTS_PRINT_ERTS_MMAP
+ UWord req_size = *sizep;
+#endif
void *seg;
- ASSERT(size % MSEG_ALIGNED_SIZE == 0);
-
+ Uint32 mmap_flags = 0;
#if HALFWORD_HEAP
- if (mk == &ma->low_mem) {
- seg = pmmap(size);
- if ((unsigned long) seg & CHECK_POINTER_MASK) {
- erts_fprintf(stderr,"Pointer mask failure (0x%08lx)\n",(unsigned long) seg);
- return NULL;
- }
- } else
+ mmap_flags |= ((mk == &ma->low_mem)
+ ? ERTS_MMAPFLG_SUPERCARRIER_ONLY
+ : ERTS_MMAPFLG_OS_ONLY);
#endif
- {
-#if HAVE_MMAP
- {
- seg = (void *) mmap_align(ma, (void *) 0, (size_t) size,
- MMAP_PROT, MMAP_FLAGS, MMAP_FD, 0);
- if (seg == (void *) MAP_FAILED)
- seg = NULL;
-
- ASSERT(MAP_IS_ALIGNED(seg) || !seg);
- }
-#else
-# error "Missing mseg_create() implementation"
+ if (MSEG_FLG_IS_2POW(flags))
+ mmap_flags |= ERTS_MMAPFLG_SUPERALIGNED;
+
+ seg = erts_mmap(mmap_flags, sizep);
+
+#ifdef ERTS_PRINT_ERTS_MMAP
+ erts_fprintf(stderr, "%p = erts_mmap(%s, {%bpu, %bpu});\n", seg,
+ (mmap_flags & ERTS_MMAPFLG_SUPERALIGNED) ? "sa" : "sua",
+ req_size, *sizep);
#endif
- }
INC_CC(ma, create);
@@ -414,91 +330,55 @@ mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size)
}
static ERTS_INLINE void
-mseg_destroy(ErtsMsegAllctr_t *ma, MemKind* mk, void *seg, Uint size) {
- ERTS_DECLARE_DUMMY(int res);
-
+mseg_destroy(ErtsMsegAllctr_t *ma, Uint flags, MemKind* mk, void *seg_p, UWord size) {
+
+ Uint32 mmap_flags = 0;
#if HALFWORD_HEAP
- if (mk == &ma->low_mem) {
- res = pmunmap((void *) seg, size);
- }
- else
+ mmap_flags |= ((mk == &ma->low_mem)
+ ? ERTS_MMAPFLG_SUPERCARRIER_ONLY
+ : ERTS_MMAPFLG_OS_ONLY);
#endif
- {
-#ifdef HAVE_MMAP
- res = munmap((void *) seg, size);
-#else
-# error "Missing mseg_destroy() implementation"
+ if (MSEG_FLG_IS_2POW(flags))
+ mmap_flags |= ERTS_MMAPFLG_SUPERALIGNED;
+
+ erts_munmap(mmap_flags, seg_p, size);
+#ifdef ERTS_PRINT_ERTS_MMAP
+ erts_fprintf(stderr, "erts_munmap(%s, %p, %bpu);\n",
+ (mmap_flags & ERTS_MMAPFLG_SUPERALIGNED) ? "sa" : "sua",
+ seg_p, *size);
#endif
- }
-
- ASSERT(size % MSEG_ALIGNED_SIZE == 0);
- ASSERT(res == 0);
-
INC_CC(ma, destroy);
}
-#if HAVE_MSEG_RECREATE
-#if defined(__NetBSD__)
-#define MREMAP_FLAGS (0)
-#else
-#define MREMAP_FLAGS (MREMAP_MAYMOVE)
-#endif
-
-
-/* mseg_recreate
- * May return *unaligned* segments as in address not aligned to MSEG_ALIGNMENT
- * it is still page aligned
- *
- * This is fine for single block carriers as long as we don't cache misaligned
- * segments (since multiblock carriers may use them)
- *
- * For multiblock carriers we *need* MSEG_ALIGNMENT but mbc's will never be
- * reallocated.
- *
- * This should probably be fixed the following way:
- * 1) Use an option to segment allocation - NEED_ALIGNMENT
- * 2) Add mremap_align which takes care of aligning a new a mremaped area
- * 3) Fix the cache to handle of aligned and unaligned segments
- */
-
static ERTS_INLINE void *
-mseg_recreate(ErtsMsegAllctr_t *ma, MemKind* mk, void *old_seg, Uint old_size, Uint new_size)
+mseg_recreate(ErtsMsegAllctr_t *ma, Uint flags, MemKind* mk, void *old_seg, UWord old_size, UWord *sizep)
{
+#ifdef ERTS_PRINT_ERTS_MMAP
+ UWord req_size = *sizep;
+#endif
void *new_seg;
-
- ASSERT(old_size % MSEG_ALIGNED_SIZE == 0);
- ASSERT(new_size % MSEG_ALIGNED_SIZE == 0);
-
+ Uint32 mmap_flags = 0;
#if HALFWORD_HEAP
- if (mk == &ma->low_mem) {
- new_seg = (void *) pmremap((void *) old_seg,
- (size_t) old_size,
- (size_t) new_size);
- }
- else
+ mmap_flags |= ((mk == &ma->low_mem)
+ ? ERTS_MMAPFLG_SUPERCARRIER_ONLY
+ : ERTS_MMAPFLG_OS_ONLY);
#endif
- {
-#if HAVE_MREMAP
-#if defined(__NetBSD__)
- new_seg = mremap(old_seg, (size_t)old_size, NULL, new_size, MREMAP_FLAGS);
-#else
- new_seg = mremap(old_seg, (size_t)old_size, (size_t)new_size, MREMAP_FLAGS);
-#endif
- if (new_seg == (void *) MAP_FAILED)
- new_seg = NULL;
-#else
-#error "Missing mseg_recreate() implementation"
-#endif
- }
+ if (MSEG_FLG_IS_2POW(flags))
+ mmap_flags |= ERTS_MMAPFLG_SUPERALIGNED;
+ new_seg = erts_mremap(mmap_flags, old_seg, old_size, sizep);
+
+#ifdef ERTS_PRINT_ERTS_MMAP
+ erts_fprintf(stderr, "%p = erts_mremap(%s, %p, %bpu, {%bpu, %bpu});\n",
+ new_seg, (mmap_flags & ERTS_MMAPFLG_SUPERALIGNED) ? "sa" : "sua",
+ old_seg, old_size, req_size, *sizep);
+#endif
INC_CC(ma, recreate);
return new_seg;
}
-#endif /* #if HAVE_MSEG_RECREATE */
-
#ifdef DEBUG
#define ERTS_DBG_MA_CHK_THR_ACCESS(MA) \
do { \
@@ -528,7 +408,7 @@ static ERTS_INLINE void mseg_cache_clear_node(cache_t *c) {
c->prev = c;
}
-static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, Uint size, Uint flags) {
+static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, UWord size, Uint flags) {
cache_t *c;
ERTS_DBG_MK_CHK_THR_ACCESS(mk);
@@ -566,14 +446,13 @@ static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, Uint size, Ui
return 1;
} else if (!MSEG_FLG_IS_2POW(flags) && !erts_circleq_is_empty(&(mk->cache_unpowered_node))) {
-
/* No free slots.
* Evict oldest slot from unpowered cache so we can cache an unpowered (sbc) segment */
c = erts_circleq_tail(&(mk->cache_unpowered_node));
erts_circleq_remove(c);
- mseg_destroy(mk->ma, mk, c->seg, c->size);
+ mseg_destroy(mk->ma, ERTS_MSEG_FLG_NONE, mk, c->seg, c->size);
mseg_cache_clear_node(c);
c->seg = seg;
@@ -599,7 +478,8 @@ static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, Uint size, Ui
c = erts_circleq_tail(&(mk->cache_powered_node[i]));
erts_circleq_remove(c);
- mseg_destroy(mk->ma, mk, c->seg, c->size);
+ mseg_destroy(mk->ma, ERTS_MSEG_FLG_2POW, mk, c->seg, c->size);
+
mseg_cache_clear_node(c);
c->seg = seg;
@@ -614,18 +494,18 @@ static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, Uint size, Ui
return 0;
}
-static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint *size_p, Uint flags) {
+static ERTS_INLINE void *cache_get_segment(MemKind *mk, UWord *size_p, Uint flags) {
- Uint size = *size_p;
+ UWord size = *size_p;
ERTS_DBG_MK_CHK_THR_ACCESS(mk);
if (MSEG_FLG_IS_2POW(flags)) {
int i, ix = SIZE_TO_CACHE_AREA_IDX(size);
- void *seg;
+ char *seg;
cache_t *c;
- Uint csize;
+ UWord csize;
ASSERT(IS_2POW(size));
@@ -641,7 +521,7 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint *size_p, Uint flags
ASSERT(MAP_IS_ALIGNED(c->seg));
csize = c->size;
- seg = c->seg;
+ seg = (char*) c->seg;
mk->cache_size--;
mk->cache_hits++;
@@ -652,9 +532,8 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint *size_p, Uint flags
ASSERT(!(mk->cache_size < 0));
- if (csize != size) {
- mseg_destroy(mk->ma, mk, (char *)seg + size, csize - size);
- }
+ if (csize != size)
+ mseg_destroy(mk->ma, ERTS_MSEG_FLG_2POW, mk, seg + size, csize - size);
return seg;
}
@@ -663,10 +542,10 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint *size_p, Uint flags
void *seg;
cache_t *c;
cache_t *best = NULL;
- Uint bdiff = 0;
- Uint csize;
- Uint bad_max_abs = mk->ma->abs_max_cache_bad_fit;
- Uint bad_max_rel = mk->ma->rel_max_cache_bad_fit;
+ UWord bdiff = 0;
+ UWord csize;
+ UWord bad_max_abs = mk->ma->abs_max_cache_bad_fit;
+ UWord bad_max_rel = mk->ma->rel_max_cache_bad_fit;
erts_circleq_foreach(c, &(mk->cache_unpowered_node)) {
csize = c->size;
@@ -728,7 +607,7 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint *size_p, Uint flags
* using callbacks from aux-work in the scheduler.
*/
-static ERTS_INLINE Uint mseg_drop_one_memkind_cache_size(MemKind *mk, cache_t *head) {
+static ERTS_INLINE Uint mseg_drop_one_memkind_cache_size(MemKind *mk, Uint flags, cache_t *head) {
cache_t *c = NULL;
c = erts_circleq_tail(head);
@@ -737,7 +616,7 @@ static ERTS_INLINE Uint mseg_drop_one_memkind_cache_size(MemKind *mk, cache_t *h
if (erts_mtrace_enabled)
erts_mtrace_crr_free(SEGTYPE, SEGTYPE, c->seg);
- mseg_destroy(mk->ma, mk, c->seg, c->size);
+ mseg_destroy(mk->ma, flags, mk, c->seg, c->size);
mseg_cache_clear_node(c);
erts_circleq_push_head(&(mk->cache_free), c);
@@ -749,7 +628,7 @@ static ERTS_INLINE Uint mseg_drop_one_memkind_cache_size(MemKind *mk, cache_t *h
return mk->cache_size;
}
-static ERTS_INLINE Uint mseg_drop_memkind_cache_size(MemKind *mk, cache_t *head) {
+static ERTS_INLINE Uint mseg_drop_memkind_cache_size(MemKind *mk, Uint flags, cache_t *head) {
cache_t *c = NULL;
while (!erts_circleq_is_empty(head)) {
@@ -760,7 +639,7 @@ static ERTS_INLINE Uint mseg_drop_memkind_cache_size(MemKind *mk, cache_t *head)
if (erts_mtrace_enabled)
erts_mtrace_crr_free(SEGTYPE, SEGTYPE, c->seg);
- mseg_destroy(mk->ma, mk, c->seg, c->size);
+ mseg_destroy(mk->ma, flags, mk, c->seg, c->size);
mseg_cache_clear_node(c);
erts_circleq_push_head(&(mk->cache_free), c);
@@ -788,11 +667,11 @@ static Uint mseg_check_memkind_cache(MemKind *mk) {
for (i = 0; i < CACHE_AREAS; i++) {
if (!erts_circleq_is_empty(&(mk->cache_powered_node[i])))
- return mseg_drop_one_memkind_cache_size(mk, &(mk->cache_powered_node[i]));
+ return mseg_drop_one_memkind_cache_size(mk, ERTS_MSEG_FLG_2POW, &(mk->cache_powered_node[i]));
}
if (!erts_circleq_is_empty(&(mk->cache_unpowered_node)))
- return mseg_drop_one_memkind_cache_size(mk, &(mk->cache_unpowered_node));
+ return mseg_drop_one_memkind_cache_size(mk, ERTS_MSEG_FLG_NONE, &(mk->cache_unpowered_node));
return 0;
}
@@ -851,12 +730,12 @@ static void mseg_clear_memkind_cache(MemKind *mk) {
if (erts_circleq_is_empty(&(mk->cache_powered_node[i])))
continue;
- mseg_drop_memkind_cache_size(mk, &(mk->cache_powered_node[i]));
+ mseg_drop_memkind_cache_size(mk, ERTS_MSEG_FLG_2POW, &(mk->cache_powered_node[i]));
ASSERT(erts_circleq_is_empty(&(mk->cache_powered_node[i])));
}
/* drop varied caches */
if (!erts_circleq_is_empty(&(mk->cache_unpowered_node)))
- mseg_drop_memkind_cache_size(mk, &(mk->cache_unpowered_node));
+ mseg_drop_memkind_cache_size(mk, ERTS_MSEG_FLG_NONE, &(mk->cache_unpowered_node));
ASSERT(erts_circleq_is_empty(&(mk->cache_unpowered_node)));
ASSERT(mk->cache_size == 0);
@@ -896,36 +775,35 @@ static ERTS_INLINE MemKind* memkind(ErtsMsegAllctr_t *ma,
}
static void *
-mseg_alloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, Uint *size_p,
+mseg_alloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, UWord *size_p,
Uint flags, const ErtsMsegOpt_t *opt)
{
- Uint size;
+ UWord size;
void *seg;
MemKind* mk = memkind(ma, opt);
INC_CC(ma, alloc);
- /* Carrier align */
- size = ALIGNED_CEILING(*size_p);
-
- /* Cache optim (if applicable) */
- if (MSEG_FLG_IS_2POW(flags) && !IS_2POW(size))
- size = ceil_2pow(size);
+ if (!MSEG_FLG_IS_2POW(flags))
+ size = ERTS_PAGEALIGNED_CEILING(*size_p);
+ else {
+ size = ALIGNED_CEILING(*size_p);
+ if (!IS_2POW(size)) {
+ /* Cache optim (if applicable) */
+ size = ceil_2pow(size);
+ }
+ }
-#if CAN_PARTLY_DESTROY
- if (size < ma->min_seg_size)
- ma->min_seg_size = size;
-#endif
-
if (opt->cache && mk->cache_size > 0 && (seg = cache_get_segment(mk, &size, flags)) != NULL)
goto done;
- if ((seg = mseg_create(ma, mk, size)) == NULL)
- size = 0;
+ seg = mseg_create(ma, flags, mk, &size);
+ if (!seg)
+ *size_p = 0;
+ else {
done:
- *size_p = size;
- if (seg) {
+ *size_p = size;
if (erts_mtrace_enabled)
erts_mtrace_crr_alloc(seg, atype, ERTS_MTRACE_SEGMENT_ID, size);
@@ -937,7 +815,7 @@ done:
static void
-mseg_dealloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, Uint size,
+mseg_dealloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, UWord size,
Uint flags, const ErtsMsegOpt_t *opt)
{
MemKind* mk = memkind(ma, opt);
@@ -952,7 +830,7 @@ mseg_dealloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, Uint size,
if (erts_mtrace_enabled)
erts_mtrace_crr_free(atype, SEGTYPE, seg);
- mseg_destroy(ma, mk, seg, size);
+ mseg_destroy(ma, flags, mk, seg, size);
done:
@@ -961,11 +839,11 @@ done:
static void *
mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg,
- Uint old_size, Uint *new_size_p, Uint flags, const ErtsMsegOpt_t *opt)
+ UWord old_size, UWord *new_size_p, Uint flags, const ErtsMsegOpt_t *opt)
{
MemKind* mk;
void *new_seg;
- Uint new_size;
+ UWord new_size;
/* Just allocate a new segment if we didn't have one before */
if (!seg || !old_size) {
@@ -985,90 +863,47 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg,
mk = memkind(ma, opt);
new_seg = seg;
- /* Carrier align */
- new_size = ALIGNED_CEILING(*new_size_p);
-
- /* Cache optim (if applicable) */
- if (MSEG_FLG_IS_2POW(flags) && !IS_2POW(new_size))
- new_size = ceil_2pow(new_size);
-
- if (new_size == old_size)
- ;
- else if (new_size < old_size) {
- Uint shrink_sz = old_size - new_size;
+ if (!MSEG_FLG_IS_2POW(flags))
+ new_size = ERTS_PAGEALIGNED_CEILING(*new_size_p);
+ else {
+ new_size = ALIGNED_CEILING(*new_size_p);
+ if (!IS_2POW(new_size)) {
+ /* Cache optim (if applicable) */
+ new_size = ceil_2pow(new_size);
+ }
+ }
-#if CAN_PARTLY_DESTROY
- if (new_size < ma->min_seg_size)
- ma->min_seg_size = new_size;
-#endif
- /* +M<S>rsbcst <ratio> */
- if (shrink_sz < opt->abs_shrink_th
- && 100*shrink_sz < opt->rel_shrink_th*old_size) {
- new_size = old_size;
+ if (new_size > old_size) {
+ if (opt->preserv) {
+ new_seg = mseg_recreate(ma, flags, mk, (void *) seg, old_size, &new_size);
+ if (!new_seg)
+ new_size = old_size;
}
else {
-
-#if CAN_PARTLY_DESTROY
-
- if (erts_mtrace_enabled)
- erts_mtrace_crr_realloc(new_seg, atype, SEGTYPE, seg, new_size);
-
- mseg_destroy(ma, mk, ((char *) seg) + new_size, shrink_sz);
-
-#elif HAVE_MSEG_RECREATE
- goto do_recreate;
-#else
+ mseg_dealloc(ma, atype, seg, old_size, flags, opt);
new_seg = mseg_alloc(ma, atype, &new_size, flags, opt);
-
- ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg);
-
if (!new_seg)
- new_size = old_size;
- else {
- sys_memcpy(((char *) new_seg),
- ((char *) seg),
- MIN(new_size, old_size));
- mseg_dealloc(ma, atype, seg, old_size, flags, opt);
- }
-#endif
+ new_size = 0;
}
}
- else {
+ else if (new_size < old_size) {
+ UWord shrink_sz = old_size - new_size;
- if (!opt->preserv) {
- mseg_dealloc(ma, atype, seg, old_size, flags, opt);
- new_seg = mseg_alloc(ma, atype, &new_size, flags, opt);
- ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg);
+ /* +M<S>rsbcst <ratio> */
+ if (shrink_sz < opt->abs_shrink_th
+ && 100*shrink_sz < opt->rel_shrink_th*old_size) {
+ new_size = old_size;
}
else {
-#if HAVE_MSEG_RECREATE
-#if !CAN_PARTLY_DESTROY
- do_recreate:
-#endif
- new_seg = mseg_recreate(ma, mk, (void *) seg, old_size, new_size);
- /* ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg);
- * will not always be aligned and it ok for now
- */
-
- if (erts_mtrace_enabled)
- erts_mtrace_crr_realloc(new_seg, atype, SEGTYPE, seg, new_size);
+ new_seg = mseg_recreate(ma, flags, mk, (void *) seg, old_size, &new_size);
if (!new_seg)
new_size = old_size;
-#else
- new_seg = mseg_alloc(ma, atype, &new_size, flags, opt);
-
- ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg);
-
- if (!new_seg)
- new_size = old_size;
- else {
- sys_memcpy(((char *) new_seg), ((char *) seg), MIN(new_size, old_size));
- mseg_dealloc(ma, atype, seg, old_size, flags, opt);
- }
-#endif
}
}
+ if (erts_mtrace_enabled)
+ erts_mtrace_crr_realloc(new_seg, atype, SEGTYPE, seg, new_size);
+
INC_CC(ma, realloc);
ASSERT(!MSEG_FLG_IS_2POW(flags) || IS_2POW(new_size));
@@ -1106,9 +941,7 @@ static struct {
Eterm mseg_create;
Eterm mseg_create_resize;
Eterm mseg_destroy;
-#if HAVE_MSEG_RECREATE
Eterm mseg_recreate;
-#endif
Eterm mseg_clear_cache;
Eterm mseg_check_cache;
@@ -1129,8 +962,6 @@ init_atoms(ErtsMsegAllctr_t *ma)
#ifdef DEBUG
Eterm *atom;
#endif
-
- ERTS_MSEG_UNLOCK(ma);
erts_mtx_lock(&init_atoms_mutex);
if (!atoms_initialized) {
@@ -1163,9 +994,7 @@ init_atoms(ErtsMsegAllctr_t *ma)
AM_INIT(mseg_create);
AM_INIT(mseg_create_resize);
AM_INIT(mseg_destroy);
-#if HAVE_MSEG_RECREATE
AM_INIT(mseg_recreate);
-#endif
AM_INIT(mseg_clear_cache);
AM_INIT(mseg_check_cache);
@@ -1176,7 +1005,6 @@ init_atoms(ErtsMsegAllctr_t *ma)
#endif
}
- ERTS_MSEG_LOCK(ma);
atoms_initialized = 1;
erts_mtx_unlock(&init_atoms_mutex);
}
@@ -1239,7 +1067,9 @@ info_options(ErtsMsegAllctr_t *ma,
Uint **hpp,
Uint *szp)
{
- Eterm res = THE_NON_VALUE;
+ Eterm res;
+
+ res = erts_mmap_info_options(prefix, print_to_p, print_to_arg, hpp, szp);
if (print_to_p) {
int to = *print_to_p;
@@ -1254,7 +1084,6 @@ info_options(ErtsMsegAllctr_t *ma,
if (!atoms_initialized)
init_atoms(ma);
- res = NIL;
add_2tup(hpp, szp, &res,
am.mcs,
bld_uint(hpp, szp, ma->max_cache_size));
@@ -1293,9 +1122,7 @@ info_calls(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp
PRINT_CC(to, arg, create);
PRINT_CC(to, arg, create_resize);
PRINT_CC(to, arg, destroy);
-#if HAVE_MSEG_RECREATE
PRINT_CC(to, arg, recreate);
-#endif
PRINT_CC(to, arg, clear_cache);
PRINT_CC(to, arg, check_cache);
@@ -1316,12 +1143,10 @@ info_calls(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp
bld_unstable_uint(hpp, szp, ma->calls.clear_cache.giga_no),
bld_unstable_uint(hpp, szp, ma->calls.clear_cache.no));
-#if HAVE_MSEG_RECREATE
add_3tup(hpp, szp, &res,
am.mseg_recreate,
bld_unstable_uint(hpp, szp, ma->calls.recreate.giga_no),
bld_unstable_uint(hpp, szp, ma->calls.recreate.no));
-#endif
add_3tup(hpp, szp, &res,
am.mseg_destroy,
bld_unstable_uint(hpp, szp, ma->calls.destroy.giga_no),
@@ -1465,14 +1290,8 @@ erts_mseg_info_options(int ix,
ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_IX(ix);
Eterm res;
- ERTS_MSEG_LOCK(ma);
-
- ERTS_DBG_MA_CHK_THR_ACCESS(ma);
-
res = info_options(ma, "option ", print_to_p, print_to_arg, hpp, szp);
- ERTS_MSEG_UNLOCK(ma);
-
return res;
}
@@ -1490,10 +1309,6 @@ erts_mseg_info(int ix,
Eterm values[4];
Uint n = 0;
- ERTS_MSEG_LOCK(ma);
-
- ERTS_DBG_MA_CHK_THR_ACCESS(ma);
-
if (hpp || szp) {
if (!atoms_initialized)
@@ -1506,6 +1321,10 @@ erts_mseg_info(int ix,
}
values[n++] = info_version(ma, print_to_p, print_to_arg, hpp, szp);
values[n++] = info_options(ma, "option ", print_to_p, print_to_arg, hpp, szp);
+
+ ERTS_MSEG_LOCK(ma);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
+
#if HALFWORD_HEAP
values[n++] = info_memkind(ma, &ma->low_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
values[n++] = info_memkind(ma, &ma->hi_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
@@ -1521,7 +1340,7 @@ erts_mseg_info(int ix,
}
void *
-erts_mseg_alloc_opt(ErtsAlcType_t atype, Uint *size_p, Uint flags, const ErtsMsegOpt_t *opt)
+erts_mseg_alloc_opt(ErtsAlcType_t atype, UWord *size_p, Uint flags, const ErtsMsegOpt_t *opt)
{
ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt);
void *seg;
@@ -1529,20 +1348,23 @@ erts_mseg_alloc_opt(ErtsAlcType_t atype, Uint *size_p, Uint flags, const ErtsMse
ERTS_DBG_MA_CHK_THR_ACCESS(ma);
seg = mseg_alloc(ma, atype, size_p, flags, opt);
ERTS_MSEG_UNLOCK(ma);
+ HARD_DBG_INSERT_MSEG(seg, *size_p);
return seg;
}
void *
-erts_mseg_alloc(ErtsAlcType_t atype, Uint *size_p, Uint flags)
+erts_mseg_alloc(ErtsAlcType_t atype, UWord *size_p, Uint flags)
{
return erts_mseg_alloc_opt(atype, size_p, flags, &erts_mseg_default_opt);
}
void
erts_mseg_dealloc_opt(ErtsAlcType_t atype, void *seg,
- Uint size, Uint flags, const ErtsMsegOpt_t *opt)
+ UWord size, Uint flags, const ErtsMsegOpt_t *opt)
{
ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt);
+
+ HARD_DBG_REMOVE_MSEG(seg, size);
ERTS_MSEG_LOCK(ma);
ERTS_DBG_MA_CHK_THR_ACCESS(ma);
mseg_dealloc(ma, atype, seg, size, flags, opt);
@@ -1550,29 +1372,32 @@ erts_mseg_dealloc_opt(ErtsAlcType_t atype, void *seg,
}
void
-erts_mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size, Uint flags)
+erts_mseg_dealloc(ErtsAlcType_t atype, void *seg, UWord size, Uint flags)
{
erts_mseg_dealloc_opt(atype, seg, size, flags, &erts_mseg_default_opt);
}
void *
erts_mseg_realloc_opt(ErtsAlcType_t atype, void *seg,
- Uint old_size, Uint *new_size_p,
+ UWord old_size, UWord *new_size_p,
Uint flags,
const ErtsMsegOpt_t *opt)
{
ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt);
void *new_seg;
+
+ HARD_DBG_REMOVE_MSEG(seg, old_size);
ERTS_MSEG_LOCK(ma);
ERTS_DBG_MA_CHK_THR_ACCESS(ma);
new_seg = mseg_realloc(ma, atype, seg, old_size, new_size_p, flags, opt);
ERTS_MSEG_UNLOCK(ma);
+ HARD_DBG_INSERT_MSEG(new_seg, *new_size_p);
return new_seg;
}
void *
erts_mseg_realloc(ErtsAlcType_t atype, void *seg,
- Uint old_size, Uint *new_size_p, Uint flags)
+ UWord old_size, UWord *new_size_p, Uint flags)
{
return erts_mseg_realloc_opt(atype, seg, old_size, new_size_p,
flags, &erts_mseg_default_opt);
@@ -1662,16 +1487,17 @@ erts_mseg_init(ErtsMsegInit_t *init)
erts_mtx_init(&init_atoms_mutex, "mseg_init_atoms");
-#if HAVE_MMAP && !defined(MAP_ANON)
- mmap_fd = open("/dev/zero", O_RDWR);
- if (mmap_fd < 0)
- erl_exit(ERTS_ABORT_EXIT, "erts_mseg: unable to open /dev/zero\n");
-#endif
+#if HALFWORD_HEAP
+ if (sizeof(void *) != 8)
+ erl_exit(-1,"Halfword emulator cannot be run in 32bit mode");
-#if HAVE_MMAP && HALFWORD_HEAP
- initialize_pmmap();
+ init->mmap.virtual_range.start = (char *) sbrk(0);
+ init->mmap.virtual_range.end = (char *) 0x100000000UL;
+ init->mmap.sco = 0;
#endif
+ erts_mmap_init(&init->mmap);
+
if (!IS_2POW(GET_PAGE_SIZE))
erl_exit(ERTS_ABORT_EXIT, "erts_mseg: Unexpected page_size %beu\n", GET_PAGE_SIZE);
@@ -1712,10 +1538,6 @@ erts_mseg_init(ErtsMsegInit_t *init)
#endif
sys_memzero((void *) &ma->calls, sizeof(ErtsMsegCalls));
-
-#if CAN_PARTLY_DESTROY
- ma->min_seg_size = ~((Uint) 0);
-#endif
}
}
@@ -1757,7 +1579,7 @@ erts_mseg_test(UWord op, UWord a1, UWord a2, UWord a3)
case 0x400: /* Have erts_mseg */
return (UWord) 1;
case 0x401:
- return (UWord) erts_mseg_alloc(ERTS_ALC_A_INVALID, (Uint *) a1, (Uint) 0);
+ return (UWord) erts_mseg_alloc(ERTS_ALC_A_INVALID, (UWord *) a1, (Uint) 0);
case 0x402:
erts_mseg_dealloc(ERTS_ALC_A_INVALID, (void *) a1, (Uint) a2, (Uint) 0);
return (UWord) 0;
@@ -1765,7 +1587,7 @@ erts_mseg_test(UWord op, UWord a1, UWord a2, UWord a3)
return (UWord) erts_mseg_realloc(ERTS_ALC_A_INVALID,
(void *) a1,
(Uint) a2,
- (Uint *) a3,
+ (UWord *) a3,
(Uint) 0);
case 0x404:
erts_mseg_clear_cache();
@@ -1788,405 +1610,3 @@ erts_mseg_test(UWord op, UWord a1, UWord a2, UWord a3)
}
}
-
-
-#if HALFWORD_HEAP
-/*
- * Very simple page oriented mmap replacer. Works in the lower
- * 32 bit address range of a 64bit program.
- * Implements anonymous mmap mremap and munmap with address order first fit.
- * The free list is expected to be very short...
- * To be used for compressed pointers in Erlang halfword emulator
- * implementation. The MacOS X version is more of a toy, it's not really
- * for production as the halfword erlang VM relies on Linux specific memory
- * mapping tricks.
- */
-
-/* #define HARDDEBUG 1 */
-
-#ifdef HARDDEBUG
-static void dump_freelist(void)
-{
- FreeBlock *p = first;
-
- while (p) {
- fprintf(stderr, "p = %p\r\np->num = %ld\r\np->next = %p\r\n\r\n",
- (void *) p, (unsigned long) p->num, (void *) p->next);
- p = p->next;
- }
-}
-
-#define HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(PTR, SZ) \
- fprintf(stderr,"Mapping of address %p with size %ld " \
- "does not map complete pages (%s:%d)\r\n", \
- (void *) (PTR), (unsigned long) (SZ),__FILE__, __LINE__)
-
-#define HARDDEBUG_HW_UNALIGNED_ALIGNMENT(PTR, SZ) \
- fprintf(stderr,"Mapping of address %p with size %ld " \
- "is not page aligned (%s:%d)\r\n", \
- (void *) (PTR), (unsigned long) (SZ),__FILE__, __LINE__)
-
-#define HARDDEBUG_MAP_FAILED(PTR, SZ) \
- fprintf(stderr, "Could not actually map memory " \
- "at address %p with size %ld (%s:%d) ..\r\n", \
- (void *) (PTR), (unsigned long) (SZ),__FILE__, __LINE__)
-#else
-#define HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(PTR, SZ) do{}while(0)
-#define HARDDEBUG_HW_UNALIGNED_ALIGNMENT(PTR, SZ) do{}while(0)
-#define HARDDEBUG_MAP_FAILED(PTR, SZ) do{}while(0)
-#endif
-
-
-#ifdef __APPLE__
-#define MAP_ANONYMOUS MAP_ANON
-#endif
-
-#define INIT_LOCK() do {erts_mtx_init(&pmmap_mutex, "pmmap");} while(0)
-
-#define TAKE_LOCK() do {erts_mtx_lock(&pmmap_mutex);} while(0)
-
-#define RELEASE_LOCK() do {erts_mtx_unlock(&pmmap_mutex);} while(0)
-
-static erts_mtx_t pmmap_mutex; /* Also needed when !USE_THREADS */
-
-typedef struct _free_block {
- unsigned long num; /*pages*/
- struct _free_block *next;
-} FreeBlock;
-
-/* Protect with lock */
-static FreeBlock *first;
-
-static void *do_map(void *ptr, size_t sz)
-{
- void *res;
-
- if (ALIGNED_CEILING(sz) != sz) {
- HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(ptr, sz);
- return NULL;
- }
-
- if (((unsigned long) ptr) % MSEG_ALIGNED_SIZE) {
- HARDDEBUG_HW_UNALIGNED_ALIGNMENT(ptr, sz);
- return NULL;
- }
-
-#if HAVE_MMAP
- res = mmap(ptr, sz,
- PROT_READ | PROT_WRITE, MAP_PRIVATE |
- MAP_ANONYMOUS | MAP_FIXED,
- -1 , 0);
-#else
-# error "Missing mmap support"
-#endif
-
- if (res == MAP_FAILED) {
- HARDDEBUG_MAP_FAILED(ptr, sz);
- return NULL;
- }
-
- return res;
-}
-
-static int do_unmap(void *ptr, size_t sz)
-{
- void *res;
-
- if (ALIGNED_CEILING(sz) != sz) {
- HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(ptr, sz);
- return 1;
- }
-
- if (((unsigned long) ptr) % MSEG_ALIGNED_SIZE) {
- HARDDEBUG_HW_UNALIGNED_ALIGNMENT(ptr, sz);
- return 1;
- }
-
- res = mmap(ptr, sz,
- PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
- -1 , 0);
-
- if (res == MAP_FAILED) {
- HARDDEBUG_MAP_FAILED(ptr, sz);
- return 1;
- }
-
- return 0;
-}
-
-#ifdef __APPLE__
-/*
- * The first 4 gig's are protected on Macos X for 64bit processes :(
- * The range 0x1000000000 - 0x10FFFFFFFF is selected as an arbitrary
- * value of a normally unused range... Real MMAP's will avoid
- * it and all 32bit compressed pointers can be in that range...
- * More expensive than on Linux where expansion of compressed
- * poiters involves no masking (as they are in the first 4 gig's).
- * It's also very uncertain if the MAP_NORESERVE flag really has
- * any effect in MacOS X. Swap space may always be allocated...
- */
-#define SET_RANGE_MIN() /* nothing */
-#define RANGE_MIN 0x1000000000UL
-#define RANGE_MAX 0x1100000000UL
-#define RANGE_MASK (RANGE_MIN)
-#define EXTRA_MAP_FLAGS (MAP_FIXED)
-#else
-static size_t range_min;
-#define SET_RANGE_MIN() do { range_min = (size_t) sbrk(0); } while (0)
-#define RANGE_MIN range_min
-#define RANGE_MAX 0x100000000UL
-#define RANGE_MASK 0UL
-#define EXTRA_MAP_FLAGS (0)
-#endif
-
-static int initialize_pmmap(void)
-{
- char *p,*q,*rptr;
- size_t rsz;
- FreeBlock *initial;
-
- SET_RANGE_MIN();
- if (sizeof(void *) != 8) {
- erl_exit(1,"Halfword emulator cannot be run in 32bit mode");
- }
-
- p = (char *) RANGE_MIN;
- q = (char *) RANGE_MAX;
-
- rsz = ALIGNED_FLOOR(q - p);
-
- rptr = mmap_align(NULL, (void *) p, rsz,
- PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS |
- MAP_NORESERVE | EXTRA_MAP_FLAGS,
- -1 , 0);
-#ifdef HARDDEBUG
- printf("p=%p, rsz = %ld, pages = %ld, got range = %p -> %p\r\n",
- p, (unsigned long) rsz, (unsigned long) (rsz / MSEG_ALIGNED_SIZE),
- (void *) rptr, (void*)(rptr + rsz));
-#endif
- if ((UWord)(rptr + rsz) > RANGE_MAX) {
- size_t rsz_trunc = RANGE_MAX - (UWord)rptr;
-#ifdef HARDDEBUG
- printf("Reducing mmap'ed memory from %lu to %lu Mb, reduced range = %p -> %p\r\n",
- rsz/(1024*1024), rsz_trunc/(1024*1024), rptr, rptr+rsz_trunc);
-#endif
- munmap((void*)RANGE_MAX, rsz - rsz_trunc);
- rsz = rsz_trunc;
- }
- if (!do_map(rptr, MSEG_ALIGNED_SIZE)) {
- erl_exit(1,"Could not actually mmap first page for halfword emulator...\n");
- }
- initial = (FreeBlock *) rptr;
- initial->num = (rsz / MSEG_ALIGNED_SIZE);
- initial->next = NULL;
- first = initial;
- INIT_LOCK();
- return 0;
-}
-
-static void *pmmap(size_t size)
-{
- size_t real_size = ALIGNED_CEILING(size);
- size_t num_pages = real_size / MSEG_ALIGNED_SIZE;
- FreeBlock **block;
- FreeBlock *tail;
- FreeBlock *res;
-
- TAKE_LOCK();
-
- for (block = &first;
- *block != NULL && (*block)->num < num_pages;
- block = &((*block)->next))
- ;
- if (!(*block)) {
- RELEASE_LOCK();
- return NULL;
- }
- if ((*block)->num == num_pages) {
- /* nice, perfect fit */
- res = *block;
- *block = (*block)->next;
- } else {
- tail = (FreeBlock *) (((char *) ((void *) (*block))) + real_size);
- if (!do_map(tail, MSEG_ALIGNED_SIZE)) {
- HARDDEBUG_MAP_FAILED(tail, MSEG_ALIGNED_SIZE);
- RELEASE_LOCK();
- return NULL;
- }
- tail->num = (*block)->num - num_pages;
- tail->next = (*block)->next;
- res = *block;
- *block = tail;
- }
-
- RELEASE_LOCK();
-
- if (!do_map(res, real_size)) {
- HARDDEBUG_MAP_FAILED(res, real_size);
- return NULL;
- }
-
- return (void *) res;
-}
-
-static int pmunmap(void *p, size_t size)
-{
- size_t real_size = ALIGNED_CEILING(size);
- size_t num_pages = real_size / MSEG_ALIGNED_SIZE;
-
- FreeBlock *block;
- FreeBlock *last;
- FreeBlock *nb = (FreeBlock *) p;
-
- ASSERT(((unsigned long)p & CHECK_POINTER_MASK)==0);
-
- if (real_size > MSEG_ALIGNED_SIZE) {
- if (do_unmap(((char *) p) + MSEG_ALIGNED_SIZE, real_size - MSEG_ALIGNED_SIZE)) {
- return 1;
- }
- }
-
- TAKE_LOCK();
-
- last = NULL;
- block = first;
- while(block != NULL && ((void *) block) < p) {
- last = block;
- block = block->next;
- }
-
- if (block != NULL &&
- ((void *) block) == ((void *) (((char *) p) + real_size))) {
- /* Merge new free block with following */
- nb->num = block->num + num_pages;
- nb->next = block->next;
- if (do_unmap(block, MSEG_ALIGNED_SIZE)) {
- RELEASE_LOCK();
- return 1;
- }
- } else {
- /* just link in */
- nb->num = num_pages;
- nb->next = block;
- }
- if (last != NULL) {
- if (p == ((void *) (((char *) last) + (last->num * MSEG_ALIGNED_SIZE)))) {
- /* Merge with previous */
- last->num += nb->num;
- last->next = nb->next;
- if (do_unmap(nb, MSEG_ALIGNED_SIZE)) {
- RELEASE_LOCK();
- return 1;
- }
- } else {
- last->next = nb;
- }
- } else {
- first = nb;
- }
- RELEASE_LOCK();
- return 0;
-}
-
-static void *pmremap(void *old_address, size_t old_size,
- size_t new_size)
-{
- size_t new_real_size = ALIGNED_CEILING(new_size);
- size_t new_num_pages = new_real_size / MSEG_ALIGNED_SIZE;
- size_t old_real_size = ALIGNED_CEILING(old_size);
- size_t old_num_pages = old_real_size / MSEG_ALIGNED_SIZE;
- if (new_num_pages == old_num_pages) {
- return old_address;
- } else if (new_num_pages < old_num_pages) { /* Shrink */
- size_t nfb_pages = old_num_pages - new_num_pages;
- size_t nfb_real_size = old_real_size - new_real_size;
- void *vnfb = (void *) (((char *)old_address) + new_real_size);
- FreeBlock *nfb = (FreeBlock *) vnfb;
- FreeBlock **block;
- TAKE_LOCK();
- for (block = &first;
- *block != NULL && (*block) < nfb;
- block = &((*block)->next))
- ;
- if (!(*block) ||
- (*block) > ((FreeBlock *)(((char *) vnfb) + nfb_real_size))) {
- /* Normal link in */
- if (nfb_pages > 1) {
- if (do_unmap((void *)(((char *) vnfb) + MSEG_ALIGNED_SIZE),
- (nfb_pages - 1)*MSEG_ALIGNED_SIZE)) {
- return NULL;
- }
- }
- nfb->next = (*block);
- nfb->num = nfb_pages;
- (*block) = nfb;
- } else { /* block merge */
- nfb->next = (*block)->next;
- nfb->num = nfb_pages + (*block)->num;
- /* unmap also the first page of the next freeblock */
- (*block) = nfb;
- if (do_unmap((void *)(((char *) vnfb) + MSEG_ALIGNED_SIZE),
- nfb_pages*MSEG_ALIGNED_SIZE)) {
- return NULL;
- }
- }
- RELEASE_LOCK();
- return old_address;
- } else { /* Enlarge */
- FreeBlock **block;
- void *old_end = (void *) (((char *)old_address) + old_real_size);
- TAKE_LOCK();
- for (block = &first;
- *block != NULL && (*block) < (FreeBlock *) old_address;
- block = &((*block)->next))
- ;
- if ((*block) == NULL || old_end > ((void *) RANGE_MAX) ||
- (*block) != old_end ||
- (*block)->num < (new_num_pages - old_num_pages)) {
- /* cannot extend */
- void *result;
- RELEASE_LOCK();
- result = pmmap(new_size);
- if (result == NULL) {
- return NULL;
- }
- memcpy(result,old_address,old_size);
- if (pmunmap(old_address,old_size)) {
- /* Oups... */
- pmunmap(result,new_size);
- return NULL;
- }
- return result;
- } else { /* extend */
- size_t remaining_pages = (*block)->num -
- (new_num_pages - old_num_pages);
- if (!remaining_pages) {
- void *p = (void *) (((char *) (*block)) + MSEG_ALIGNED_SIZE);
- void *n = (*block)->next;
- size_t x = ((*block)->num - 1) * MSEG_ALIGNED_SIZE;
- if (x > 0) {
- if (do_map(p,x) == NULL) {
- RELEASE_LOCK();
- return NULL;
- }
- }
- (*block) = n;
- } else {
- FreeBlock *nfb = (FreeBlock *) ((void *)
- (((char *) old_address) +
- new_real_size));
- void *p = (void *) (((char *) (*block)) + MSEG_ALIGNED_SIZE);
- if (do_map(p,new_real_size - old_real_size) == NULL) {
- RELEASE_LOCK();
- return NULL;
- }
- nfb->num = remaining_pages;
- nfb->next = (*block)->next;
- (*block) = nfb;
- }
- RELEASE_LOCK();
- return old_address;
- }
- }
-}
-#endif /* HALFWORD_HEAP */
diff --git a/erts/emulator/sys/common/erl_mseg.h b/erts/emulator/sys/common/erl_mseg.h
index a4f250ceab..2284b3f8f1 100644
--- a/erts/emulator/sys/common/erl_mseg.h
+++ b/erts/emulator/sys/common/erl_mseg.h
@@ -22,25 +22,25 @@
#include "sys.h"
#include "erl_alloc_types.h"
+#include "erl_mmap.h"
-#ifndef HAVE_MMAP
-# define HAVE_MMAP 0
-#endif
-#ifndef HAVE_MREMAP
-# define HAVE_MREMAP 0
-#endif
-
-#if HAVE_MMAP
+/*
+ * We currently only enable mseg_alloc if we got
+ * a genuine mmap()/munmap() primitive. It is possible
+ * to utilize erts_mmap() withiout a mmap support but
+ * alloc_util needs to be prepared before we can do
+ * that.
+ */
+#ifdef ERTS_HAVE_GENUINE_OS_MMAP
# define HAVE_ERTS_MSEG 1
-# define HAVE_SUPER_ALIGNED_MB_CARRIERS 1
+# define ERTS_HAVE_MSEG_SUPER_ALIGNED 1
#else
# define HAVE_ERTS_MSEG 0
-# define HAVE_SUPER_ALIGNED_MB_CARRIERS 0
+# define ERTS_HAVE_MSEG_SUPER_ALIGNED 0
#endif
-#if HAVE_SUPER_ALIGNED_MB_CARRIERS
-# define MSEG_ALIGN_BITS (18)
- /* Affects hard limits for sbct and lmbcs documented in erts_alloc.xml */
+#if ERTS_HAVE_MSEG_SUPER_ALIGNED
+# define MSEG_ALIGN_BITS ERTS_MMAP_SUPERALIGNED_BITS
#else
/* If we don't use super aligned multiblock carriers
* we will mmap with page size alignment (and thus use corresponding
@@ -68,6 +68,7 @@ typedef struct {
Uint rmcbf;
Uint mcs;
Uint nos;
+ ErtsMMapInit mmap;
} ErtsMsegInit_t;
#define ERTS_MSEG_INIT_DEFAULT_INITIALIZER \
@@ -75,7 +76,8 @@ typedef struct {
4*1024*1024, /* amcbf: Absolute max cache bad fit */ \
20, /* rmcbf: Relative max cache bad fit */ \
10, /* mcs: Max cache size */ \
- 1000 /* cci: Cache check interval */ \
+ 1000, /* cci: Cache check interval */ \
+ ERTS_MMAP_INIT_DEFAULT_INITER \
}
typedef struct {
@@ -91,12 +93,12 @@ typedef struct {
extern const ErtsMsegOpt_t erts_mseg_default_opt;
-void *erts_mseg_alloc(ErtsAlcType_t, Uint *, Uint);
-void *erts_mseg_alloc_opt(ErtsAlcType_t, Uint *, Uint, const ErtsMsegOpt_t *);
-void erts_mseg_dealloc(ErtsAlcType_t, void *, Uint, Uint);
-void erts_mseg_dealloc_opt(ErtsAlcType_t, void *, Uint, Uint, const ErtsMsegOpt_t *);
-void *erts_mseg_realloc(ErtsAlcType_t, void *, Uint, Uint *, Uint);
-void *erts_mseg_realloc_opt(ErtsAlcType_t, void *, Uint, Uint *, Uint, const ErtsMsegOpt_t *);
+void *erts_mseg_alloc(ErtsAlcType_t, UWord *, Uint);
+void *erts_mseg_alloc_opt(ErtsAlcType_t, UWord *, Uint, const ErtsMsegOpt_t *);
+void erts_mseg_dealloc(ErtsAlcType_t, void *, UWord, Uint);
+void erts_mseg_dealloc_opt(ErtsAlcType_t, void *, UWord, Uint, const ErtsMsegOpt_t *);
+void *erts_mseg_realloc(ErtsAlcType_t, void *, UWord, UWord *, Uint);
+void *erts_mseg_realloc_opt(ErtsAlcType_t, void *, UWord, UWord *, Uint, const ErtsMsegOpt_t *);
void erts_mseg_clear_cache(void);
void erts_mseg_cache_check(void);
Uint erts_mseg_no( const ErtsMsegOpt_t *);
diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c
index 5861b30315..7676d8872a 100644
--- a/erts/emulator/sys/common/erl_poll.c
+++ b/erts/emulator/sys/common/erl_poll.c
@@ -123,8 +123,8 @@ static ERTS_INLINE
int ERTS_SELECT(int nfds, ERTS_fd_set *readfds, ERTS_fd_set *writefds,
ERTS_fd_set *exceptfds, struct timeval *timeout)
{
- ASSERT(!readfds || readfds->sz >= nfds);
- ASSERT(!writefds || writefds->sz >= nfds);
+ ASSERT(!readfds || readfds->sz >= ERTS_FD_SIZE(nfds));
+ ASSERT(!writefds || writefds->sz >= ERTS_FD_SIZE(nfds));
ASSERT(!exceptfds);
return select(nfds,
(readfds ? readfds->ptr : NULL ),
diff --git a/erts/emulator/sys/unix/erl_unix_sys.h b/erts/emulator/sys/unix/erl_unix_sys.h
index c8fcec8547..2c47aa06c2 100644
--- a/erts/emulator/sys/unix/erl_unix_sys.h
+++ b/erts/emulator/sys/unix/erl_unix_sys.h
@@ -107,6 +107,10 @@
#endif
#include <netdb.h>
+#ifdef HAVE_POSIX_MEMALIGN
+# define ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC 1
+#endif
+
/*
* Make sure that MAXPATHLEN is defined.
*/
diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c
index a7ea4b2490..401b37b9d2 100644
--- a/erts/emulator/sys/unix/sys.c
+++ b/erts/emulator/sys/unix/sys.c
@@ -2524,6 +2524,52 @@ void erts_sys_alloc_init(void)
{
}
+#if ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
+void *erts_sys_aligned_alloc(UWord alignment, UWord size)
+{
+#ifdef HAVE_POSIX_MEMALIGN
+ void *ptr = NULL;
+ int error;
+ ASSERT(alignment && (alignment & ~alignment) == 0); /* power of 2 */
+ error = posix_memalign(&ptr, (size_t) alignment, (size_t) size);
+#if HAVE_ERTS_MSEG
+ if (error || !ptr) {
+ erts_mseg_clear_cache();
+ error = posix_memalign(&ptr, (size_t) alignment, (size_t) size);
+ }
+#endif
+ if (error) {
+ errno = error;
+ return NULL;
+ }
+ if (!ptr)
+ errno = ENOMEM;
+ ASSERT(!ptr || (((UWord) ptr) & (alignment - 1)) == 0);
+ return ptr;
+#else
+# error "Missing erts_sys_aligned_alloc() implementation"
+#endif
+}
+
+void erts_sys_aligned_free(UWord alignment, void *ptr)
+{
+ ASSERT(alignment && (alignment & ~alignment) == 0); /* power of 2 */
+ free(ptr);
+}
+
+void *erts_sys_aligned_realloc(UWord alignment, void *ptr, UWord size, UWord old_size)
+{
+ void *new_ptr = erts_sys_aligned_alloc(alignment, size);
+ if (new_ptr) {
+ UWord copy_size = old_size < size ? old_size : size;
+ sys_memcpy(new_ptr, ptr, (size_t) copy_size);
+ erts_sys_aligned_free(alignment, ptr);
+ }
+ return new_ptr;
+}
+
+#endif
+
void *erts_sys_alloc(ErtsAlcType_t t, void *x, Uint sz)
{
void *res = malloc((size_t) sz);
@@ -2592,15 +2638,13 @@ int fd;
}
-#ifdef DEBUG
-
extern int erts_initialized;
void
-erl_assert_error(char* expr, char* file, int line)
+erl_assert_error(const char* expr, const char* func, const char* file, int line)
{
fflush(stdout);
- fprintf(stderr, "Assertion failed: %s in %s, line %d\n",
- expr, file, line);
+ fprintf(stderr, "%s:%d:%s() Assertion failed: %s\n",
+ file, line, func, expr);
fflush(stderr);
#if !defined(ERTS_SMP) && 0
/* Writing a crashdump from a failed assertion when smp support
@@ -2615,6 +2659,8 @@ erl_assert_error(char* expr, char* file, int line)
abort();
}
+#ifdef DEBUG
+
void
erl_debug(char* fmt, ...)
{
diff --git a/erts/emulator/sys/win32/erl_win_sys.h b/erts/emulator/sys/win32/erl_win_sys.h
index 5ce1a61303..0deb097b1a 100644
--- a/erts/emulator/sys/win32/erl_win_sys.h
+++ b/erts/emulator/sys/win32/erl_win_sys.h
@@ -82,7 +82,6 @@
#define NO_ERF
#define NO_ERFC
-#define NO_SYSLOG
#define NO_SYSCONF
#define NO_DAEMON
#define NO_PWD
@@ -95,6 +94,8 @@
# define ERTS_I64_LITERAL(X) X##i64
#endif
+#define ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC 1
+
/*
* Practial Windows specific macros.
*/
diff --git a/erts/emulator/sys/win32/sys.c b/erts/emulator/sys/win32/sys.c
index 93bbae7dee..5ea4703a7a 100755
--- a/erts/emulator/sys/win32/sys.c
+++ b/erts/emulator/sys/win32/sys.c
@@ -32,7 +32,7 @@
#include "erl_threads.h"
#include "../../drivers/win32/win_con.h"
#include "erl_cpu_topology.h"
-
+#include <malloc.h>
void erts_sys_init_float(void);
@@ -2754,6 +2754,30 @@ void erts_sys_free(ErtsAlcType_t t, void *x, void *p)
free(p);
}
+void *erts_sys_aligned_alloc(UWord alignment, UWord size)
+{
+ void *ptr;
+ ASSERT(alignment && (alignment & ~alignment) == 0); /* power of 2 */
+ ptr = _aligned_malloc((size_t) size, (size_t) alignment);
+ ASSERT(!ptr || (((UWord) ptr) & (alignment - 1)) == 0);
+ return ptr;
+}
+
+void erts_sys_aligned_free(UWord alignment, void *ptr)
+{
+ ASSERT(alignment && (alignment & ~alignment) == 0); /* power of 2 */
+ _aligned_free(ptr);
+}
+
+void *erts_sys_aligned_realloc(UWord alignment, void *ptr, UWord size, UWord old_size)
+{
+ void *new_ptr;
+ ASSERT(alignment && (alignment & ~alignment) == 0); /* power of 2 */
+ new_ptr = _aligned_realloc(ptr, (size_t) size, (size_t) alignment);
+ ASSERT(!new_ptr || (((UWord) new_ptr) & (alignment - 1)) == 0);
+ return new_ptr;
+}
+
static Preload* preloaded = NULL;
static unsigned* res_name = NULL;
static int num_preloaded = 0;
@@ -3042,7 +3066,7 @@ erl_bin_write(buf, sz, max)
}
void
-erl_assert_error(char* expr, char* file, int line)
+erl_assert_error(const char* expr, const char* func, const char* file, int line)
{
char message[1024];
diff --git a/erts/emulator/test/alloc_SUITE.erl b/erts/emulator/test/alloc_SUITE.erl
index 801ed0f85a..f6ff6bb813 100644
--- a/erts/emulator/test/alloc_SUITE.erl
+++ b/erts/emulator/test/alloc_SUITE.erl
@@ -29,6 +29,7 @@
bucket_mask/1,
rbtree/1,
mseg_clear_cache/1,
+ erts_mmap/1,
cpool/1]).
-export([init_per_testcase/2, end_per_testcase/2]).
@@ -41,7 +42,7 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[basic, coalesce, threads, realloc_copy, bucket_index,
- bucket_mask, rbtree, mseg_clear_cache, cpool].
+ bucket_mask, rbtree, mseg_clear_cache, erts_mmap, cpool].
groups() ->
[].
@@ -110,6 +111,59 @@ cpool(suite) -> [];
cpool(doc) -> [];
cpool(Cfg) -> ?line drv_case(Cfg).
+erts_mmap(Config) when is_list(Config) ->
+ case {?t:os_type(), is_halfword_vm()} of
+ {{unix, _}, false} ->
+ [erts_mmap_do(Config, SCO, SCRPM, SCMGC)
+ || SCO <-[true,false], SCMGC <-[1234,0], SCRPM <- [true,false]];
+
+ {_,true} ->
+ {skipped, "No supercarrier support on halfword vm"};
+ {SkipOs,_} ->
+ ?line {skipped,
+ lists:flatten(["Not run on "
+ | io_lib:format("~p",[SkipOs])])}
+ end.
+
+
+erts_mmap_do(Config, SCO, SCRPM, SCMGC) ->
+ SCS = 100, % Mb
+ O1 = "+MMscs" ++ integer_to_list(SCS)
+ ++ " +MMsco" ++ atom_to_list(SCO)
+ ++ " +MMscrpm" ++ atom_to_list(SCRPM),
+ Opts = case SCMGC of
+ 0 -> O1;
+ _ -> O1 ++ " +MMscmgc"++integer_to_list(SCMGC)
+ end,
+ {ok, Node} = start_node(Config, Opts),
+ Self = self(),
+ Ref = make_ref(),
+ F = fun () ->
+ SI = erlang:system_info({allocator,mseg_alloc}),
+ {erts_mmap,EM} = lists:keyfind(erts_mmap, 1, SI),
+ {supercarrier,SC} = lists:keyfind(supercarrier, 1, EM),
+ {sizes,Sizes} = lists:keyfind(sizes, 1, SC),
+ {free_segs,Segs} = lists:keyfind(free_segs,1,SC),
+ {total,Total} = lists:keyfind(total,1,Sizes),
+ Total = SCS*1024*1024,
+
+ {reserved,Reserved} = lists:keyfind(reserved,1,Segs),
+ true = (Reserved >= SCMGC),
+
+ case {SCO,lists:keyfind(os,1,EM)} of
+ {true, false} -> ok;
+ {false, {os,_}} -> ok
+ end,
+
+ Self ! {Ref, ok}
+ end,
+
+ spawn_link(Node, F),
+ Result = receive {Ref, Rslt} -> Rslt end,
+ stop_node(Node),
+ Result.
+
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% %%
%% Internal functions %%
@@ -179,7 +233,9 @@ receive_drv_result(Port, CaseName) ->
?line {comment, Comment}
end.
-start_node(Config) when is_list(Config) ->
+start_node(Config) ->
+ start_node(Config, []).
+start_node(Config, Opts) when is_list(Config), is_list(Opts) ->
?line Pa = filename:dirname(code:which(?MODULE)),
?line {A, B, C} = now(),
?line Name = list_to_atom(atom_to_list(?MODULE)
@@ -191,7 +247,14 @@ start_node(Config) when is_list(Config) ->
++ integer_to_list(B)
++ "-"
++ integer_to_list(C)),
- ?line ?t:start_node(Name, slave, [{args, "-pa "++Pa}]).
+ ?line ?t:start_node(Name, slave, [{args, Opts++" -pa "++Pa}]).
stop_node(Node) ->
?t:stop_node(Node).
+
+is_halfword_vm() ->
+ case {erlang:system_info({wordsize, internal}),
+ erlang:system_info({wordsize, external})} of
+ {4, 8} -> true;
+ {WS, WS} -> false
+ end.
diff --git a/erts/emulator/test/big_SUITE_data/eq_big.dat b/erts/emulator/test/big_SUITE_data/eq_big.dat
index 5511d1bf10..4ccb33d182 100644
--- a/erts/emulator/test/big_SUITE_data/eq_big.dat
+++ b/erts/emulator/test/big_SUITE_data/eq_big.dat
@@ -13001,4 +13001,5 @@
0 = 7153697524993 bsr 475833444444444444444444444444444444444444444444.
-1 = -83987348 bsr 475833444444444444444444444444444444444444444444.
+0 = 1183140560213014108063589658350 bsr 146783911423364576743092537299333564210980159306769991919205685720763064069663027716481187399048043939495935.
diff --git a/erts/emulator/test/call_trace_SUITE.erl b/erts/emulator/test/call_trace_SUITE.erl
index eaecd32f95..ef1f2aa04c 100644
--- a/erts/emulator/test/call_trace_SUITE.erl
+++ b/erts/emulator/test/call_trace_SUITE.erl
@@ -1193,7 +1193,7 @@ bs_sum_b(Acc, <<>>) -> Acc.
-
+
%%% Help functions.
expect() ->
diff --git a/erts/emulator/test/driver_SUITE.erl b/erts/emulator/test/driver_SUITE.erl
index 104bdf8aec..7087542899 100644
--- a/erts/emulator/test/driver_SUITE.erl
+++ b/erts/emulator/test/driver_SUITE.erl
@@ -367,7 +367,7 @@ compare(Got, Expected) ->
?t:fail(got_bad_data)
end.
-
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Driver timer test suites
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -515,7 +515,7 @@ try_change_timer(Port, Timeout) ->
?line test_server:fail("driver failed to timeout")
end.
-
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Queue test suites
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -719,7 +719,7 @@ deq(Port, Size) ->
read_head(Port, Size) ->
erlang:port_control(Port, ?READ_HEAD, <<Size:32>>).
-
+
driver_unloaded(doc) ->
[];
driver_unloaded(suite) ->
diff --git a/erts/emulator/test/num_bif_SUITE.erl b/erts/emulator/test/num_bif_SUITE.erl
index b92a0e2059..ff8d18eef8 100644
--- a/erts/emulator/test/num_bif_SUITE.erl
+++ b/erts/emulator/test/num_bif_SUITE.erl
@@ -350,12 +350,34 @@ t_integer_to_string(Config) when is_list(Config) ->
(catch erlang:integer_to_list(Value))
end,[atom,1.2,0.0,[$1,[$2]]]),
+ %% Base-2 integers
+ test_its("0", 0, 2),
+ test_its("1", 1, 2),
+ test_its("110110", 54, 2),
+ test_its("-1000000", -64, 2),
+ %% Base-16 integers
+ test_its("0", 0, 16),
+ test_its("A", 10, 16),
+ test_its("D4BE", 54462, 16),
+ test_its("-D4BE", -54462, 16),
+
+ lists:foreach(fun(Value) ->
+ {'EXIT', {badarg, _}} =
+ (catch erlang:integer_to_binary(Value, 8)),
+ {'EXIT', {badarg, _}} =
+ (catch erlang:integer_to_list(Value, 8))
+ end,[atom,1.2,0.0,[$1,[$2]]]),
+
ok.
test_its(List,Int) ->
Int = list_to_integer(List),
Int = binary_to_integer(list_to_binary(List)).
+test_its(List,Int,Base) ->
+ Int = list_to_integer(List, Base),
+ Int = binary_to_integer(list_to_binary(List), Base).
+
%% Tests binary_to_integer/1.
t_string_to_integer(Config) when is_list(Config) ->
diff --git a/erts/emulator/test/scheduler_SUITE.erl b/erts/emulator/test/scheduler_SUITE.erl
index 8931562828..81539faa09 100644
--- a/erts/emulator/test/scheduler_SUITE.erl
+++ b/erts/emulator/test/scheduler_SUITE.erl
@@ -52,6 +52,7 @@
update_cpu_info/1,
sct_cmd/1,
sbt_cmd/1,
+ scheduler_threads/1,
scheduler_suspend/1,
reader_groups/1]).
@@ -66,7 +67,7 @@ all() ->
equal_with_part_time_max,
equal_and_high_with_part_time_max, equal_with_high,
equal_with_high_max, bound_process,
- {group, scheduler_bind}, scheduler_suspend,
+ {group, scheduler_bind}, scheduler_threads, scheduler_suspend,
reader_groups].
groups() ->
@@ -1039,7 +1040,66 @@ sbt_test(Config, CpuTCmd, ClBt, Bt, LP) ->
tuple_to_list(SB)),
?line stop_node(Node),
?line ok.
-
+
+scheduler_threads(Config) when is_list(Config) ->
+ SmpSupport = erlang:system_info(smp_support),
+ {Sched, SchedOnln, _} = get_sstate(Config, ""),
+ %% Configure half the number of both the scheduler threads and
+ %% the scheduler threads online.
+ {HalfSched, HalfSchedOnln} = case SmpSupport of
+ false -> {1,1};
+ true ->
+ {Sched div 2,
+ SchedOnln div 2}
+ end,
+ {HalfSched, HalfSchedOnln, _} = get_sstate(Config, "+SP 50:50"),
+ %% Use +S to configure 4x the number of scheduler threads and
+ %% 4x the number of scheduler threads online, but alter that
+ %% setting using +SP to 50% scheduler threads and 25% scheduler
+ %% threads online. The result should be 2x scheduler threads and
+ %% 1x scheduler threads online.
+ TwiceSched = case SmpSupport of
+ false -> 1;
+ true -> Sched*2
+ end,
+ FourSched = integer_to_list(Sched*4),
+ FourSchedOnln = integer_to_list(SchedOnln*4),
+ CombinedCmd1 = "+S "++FourSched++":"++FourSchedOnln++" +SP50:25",
+ {TwiceSched, SchedOnln, _} = get_sstate(Config, CombinedCmd1),
+ %% Now do the same test but with the +S and +SP options in the
+ %% opposite order, since order shouldn't matter.
+ CombinedCmd2 = "+SP50:25 +S "++FourSched++":"++FourSchedOnln,
+ {TwiceSched, SchedOnln, _} = get_sstate(Config, CombinedCmd2),
+ %% Apply two +SP options to make sure the second overrides the first
+ TwoCmd = "+SP 25:25 +SP 100:100",
+ {Sched, SchedOnln, _} = get_sstate(Config, TwoCmd),
+ %% Configure 50% of scheduler threads online only
+ {Sched, HalfSchedOnln, _} = get_sstate(Config, "+SP:50"),
+ %% Configure 2x scheduler threads only
+ {TwiceSched, SchedOnln, _} = get_sstate(Config, "+SP 200"),
+ %% Test resetting the scheduler counts
+ ResetCmd = "+S "++FourSched++":"++FourSchedOnln++" +S 0:0",
+ {Sched, SchedOnln, _} = get_sstate(Config, ResetCmd),
+ %% Test negative +S settings, but only for SMP-enabled emulators
+ case SmpSupport of
+ false -> ok;
+ true ->
+ SchedMinus1 = Sched-1,
+ SchedOnlnMinus1 = SchedOnln-1,
+ {SchedMinus1, SchedOnlnMinus1, _} = get_sstate(Config, "+S -1"),
+ {Sched, SchedOnlnMinus1, _} = get_sstate(Config, "+S :-1"),
+ {SchedMinus1, SchedOnlnMinus1, _} = get_sstate(Config, "+S -1:-1")
+ end,
+ ok.
+
+get_sstate(Config, Cmd) ->
+ {ok, Node} = start_node(Config, Cmd),
+ [SState] = mcall(Node, [fun () ->
+ erlang:system_info(schedulers_state)
+ end]),
+ stop_node(Node),
+ SState.
+
scheduler_suspend(Config) when is_list(Config) ->
?line Dog = ?t:timetrap(?t:minutes(5)),
?line lists:foreach(fun (S) -> scheduler_suspend_test(Config, S) end,
diff --git a/erts/emulator/test/statistics_SUITE.erl b/erts/emulator/test/statistics_SUITE.erl
index a93dd309c1..c428be6c5a 100644
--- a/erts/emulator/test/statistics_SUITE.erl
+++ b/erts/emulator/test/statistics_SUITE.erl
@@ -75,7 +75,7 @@ end_per_group(_GroupName, Config) ->
Config.
-
+
%%% Testing statistics(wall_clock).
@@ -121,7 +121,7 @@ wall_clock_update1(N) when N > 0 ->
wall_clock_update1(0) ->
ok.
-
+
%%% Test statistics(runtime).
@@ -199,7 +199,7 @@ do_much(N) ->
_ = 4784728478274827 * 72874284728472,
do_much(N-1).
-
+
reductions(doc) ->
"Test that statistics(reductions) is callable, and that "
"Total_Reductions and Reductions_Since_Last_Call make sense. "
@@ -246,7 +246,7 @@ reductions_big_loop() ->
reductions_big_loop()
end.
-
+
%%% Tests of statistics(run_queue).
@@ -295,7 +295,7 @@ hog_iter(N, Mon) when N > 0 ->
end;
hog_iter(0, Mon) ->
?line hog_iter(10000, Mon).
-
+
%%% Tests of statistics(scheduler_wall_time).
scheduler_wall_time(doc) ->
@@ -363,7 +363,7 @@ load_percentage([{Id, WN, TN}|Ss], [{Id, WP, TP}|Ps]) ->
[100*(WN-WP) div (TN-TP)|load_percentage(Ss, Ps)];
load_percentage([], []) -> [].
-
+
garbage_collection(doc) ->
"Tests that statistics(garbage_collection) is callable. "
"It is not clear how to test anything more.";
diff --git a/erts/emulator/test/system_info_SUITE.erl b/erts/emulator/test/system_info_SUITE.erl
index 0350eb671d..ceb4afb5cf 100644
--- a/erts/emulator/test/system_info_SUITE.erl
+++ b/erts/emulator/test/system_info_SUITE.erl
@@ -37,7 +37,8 @@
init_per_group/2,end_per_group/2,
init_per_testcase/2, end_per_testcase/2]).
--export([process_count/1, system_version/1, misc_smoke_tests/1, heap_size/1, wordsize/1, memory/1]).
+-export([process_count/1, system_version/1, misc_smoke_tests/1, heap_size/1, wordsize/1, memory/1,
+ ets_limit/1]).
-define(DEFAULT_TIMEOUT, ?t:minutes(2)).
@@ -45,7 +46,7 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[process_count, system_version, misc_smoke_tests,
- heap_size, wordsize, memory].
+ heap_size, wordsize, memory, ets_limit].
groups() ->
[].
@@ -496,3 +497,52 @@ mapn(_Fun, 0) ->
[];
mapn(Fun, N) ->
[Fun(N) | mapn(Fun, N-1)].
+
+ets_limit(doc) ->
+ "Verify system_info(ets_limit) reflects max ETS table settings.";
+ets_limit(suite) -> [];
+ets_limit(Config0) when is_list(Config0) ->
+ Config = [{testcase,ets_limit}|Config0],
+ true = is_integer(get_ets_limit(Config)),
+ 12345 = get_ets_limit(Config, 12345),
+ ok.
+
+get_ets_limit(Config) ->
+ get_ets_limit(Config, 0).
+get_ets_limit(Config, EtsMax) ->
+ Envs = case EtsMax of
+ 0 -> [];
+ _ -> [{"ERL_MAX_ETS_TABLES", integer_to_list(EtsMax)}]
+ end,
+ {ok, Node} = start_node(Config, Envs),
+ Me = self(),
+ Ref = make_ref(),
+ spawn_link(Node,
+ fun() ->
+ Res = erlang:system_info(ets_limit),
+ unlink(Me),
+ Me ! {Ref, Res}
+ end),
+ receive
+ {Ref, Res} ->
+ Res
+ end,
+ stop_node(Node),
+ Res.
+
+start_node(Config, Envs) when is_list(Config) ->
+ Pa = filename:dirname(code:which(?MODULE)),
+ {A, B, C} = now(),
+ Name = list_to_atom(atom_to_list(?MODULE)
+ ++ "-"
+ ++ atom_to_list(?config(testcase, Config))
+ ++ "-"
+ ++ integer_to_list(A)
+ ++ "-"
+ ++ integer_to_list(B)
+ ++ "-"
+ ++ integer_to_list(C)),
+ ?t:start_node(Name, peer, [{args, "-pa "++Pa}, {env, Envs}]).
+
+stop_node(Node) ->
+ ?t:stop_node(Node).
diff --git a/erts/emulator/test/time_SUITE.erl b/erts/emulator/test/time_SUITE.erl
index 4d12e3449c..a0a8a9c42c 100644
--- a/erts/emulator/test/time_SUITE.erl
+++ b/erts/emulator/test/time_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -241,14 +241,26 @@ compare(Utc0, Local) ->
%% Two linear times can be subtracted to give their difference
%% in seconds.
%%
-%% XXX Limitations: The length of months and leap years are not
-%% taken into account; thus a comparision of dates is only
-%% valid if they are in the SAME month.
+%% XXX Limitations: Simplified leap year calc will fail for 2100 :-)
linear_time({{Year, Mon, Day}, {Hour, Min, Sec}}) ->
- 86400*(366*Year + 31*(Mon-1) + (Day-1)) +
+ 86400*(year_to_days(Year) + month_to_days(Year,Mon) + (Day-1)) +
3600*Hour + 60*Min + Sec.
+year_to_days(Year) ->
+ Year * 365 + (Year-1) div 4.
+
+month_to_days(Year, Mon) ->
+ DoM = [31,days_in_february(Year),31,30,31,30,31,31,30,31,30,31],
+ {PastMonths,_} = lists:split(Mon-1, DoM),
+ lists:sum(PastMonths).
+
+days_in_february(Year) ->
+ case (Year rem 4) of
+ 0 -> 29;
+ _ -> 28
+ end.
+
%% This functions returns either the normal timezone or the
%% the DST timezone, depending on the given UTC time.
%%
diff --git a/erts/emulator/test/trace_SUITE.erl b/erts/emulator/test/trace_SUITE.erl
index 0f513f0dcb..2251575e5a 100644
--- a/erts/emulator/test/trace_SUITE.erl
+++ b/erts/emulator/test/trace_SUITE.erl
@@ -1427,7 +1427,7 @@ receive_nothing() ->
ok
end.
-
+
%%% Models for various kinds of processes.
process(Dest) ->
diff --git a/erts/emulator/test/trace_port_SUITE.erl b/erts/emulator/test/trace_port_SUITE.erl
index cc2eadafbc..99df8da107 100644
--- a/erts/emulator/test/trace_port_SUITE.erl
+++ b/erts/emulator/test/trace_port_SUITE.erl
@@ -648,7 +648,7 @@ fun_spawn(Fun, Opts) ->
% []
% end.
-
+
%%% Models for various kinds of processes.
%% Sends messages when ordered to.