aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/sys/common
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/sys/common')
-rw-r--r--erts/emulator/sys/common/erl_check_io.c29
-rw-r--r--erts/emulator/sys/common/erl_check_io.h2
-rw-r--r--erts/emulator/sys/common/erl_mmap.c723
-rw-r--r--erts/emulator/sys/common/erl_mmap.h136
-rw-r--r--erts/emulator/sys/common/erl_mseg.c490
-rw-r--r--erts/emulator/sys/common/erl_mseg.h25
-rw-r--r--erts/emulator/sys/common/erl_mtrace_sys_wrap.c2
-rw-r--r--erts/emulator/sys/common/erl_os_monotonic_time_extender.c4
-rw-r--r--erts/emulator/sys/common/erl_poll.c52
-rw-r--r--erts/emulator/sys/common/erl_poll.h24
-rw-r--r--erts/emulator/sys/common/erl_sys_common_misc.c2
-rw-r--r--erts/emulator/sys/common/erl_util_queue.h2
12 files changed, 760 insertions, 731 deletions
diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c
index 105b129065..44a77f3ea5 100644
--- a/erts/emulator/sys/common/erl_check_io.c
+++ b/erts/emulator/sys/common/erl_check_io.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2016. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -39,6 +39,7 @@
#include "erl_check_io.h"
#include "erl_thr_progress.h"
#include "dtrace-wrapper.h"
+#include "lttng-wrapper.h"
#define ERTS_WANT_TIMER_WHEEL_API
#include "erl_time.h"
@@ -395,6 +396,7 @@ forget_removed(struct pollset_info* psi)
if (drv_ptr) {
int was_unmasked = erts_block_fpe();
DTRACE1(driver_stop_select, drv_ptr->name);
+ LTTNG1(driver_stop_select, drv_ptr->name);
(*drv_ptr->stop_select) ((ErlDrvEvent) fd, NULL);
erts_unblock_fpe(was_unmasked);
if (drv_ptr->handle) {
@@ -1055,6 +1057,7 @@ done_unknown:
if (stop_select_fn) {
int was_unmasked = erts_block_fpe();
DTRACE1(driver_stop_select, name);
+ LTTNG1(driver_stop_select, "unknown");
(*stop_select_fn)(e, NULL);
erts_unblock_fpe(was_unmasked);
}
@@ -1337,11 +1340,7 @@ print_select_op(erts_dsprintf_buf_t *dsbufp,
{
Port *pp = erts_drvport2port(ix);
erts_dsprintf(dsbufp,
-#ifdef __OSE__
- "driver_select(%p, %d,%s%s%s%s | %d, %d) "
-#else
"driver_select(%p, %d,%s%s%s%s, %d) "
-#endif
"by ",
ix,
(int) GET_FD(fd),
@@ -1861,25 +1860,6 @@ stale_drv_select(Eterm id, ErtsDrvEventState *state, int mode)
#ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS
-#ifdef __OSE__
-static SafeHashValue drv_ev_state_hash(void *des)
-{
- ErtsSysFdType fd = ((ErtsDrvEventState *) des)->fd;
- /* We use hash on signo ^ id in order for steal to happen when the
- same signo + fd is selected on by two different ports */
- SafeHashValue val = (SafeHashValue)(fd->signo ^ fd->id);
- return val ^ (val >> 8);
-}
-
-static int drv_ev_state_cmp(void *des1, void *des2)
-{
- ErtsSysFdType fd1 = ((ErtsDrvEventState *) des1)->fd;
- ErtsSysFdType fd2 = ((ErtsDrvEventState *) des2)->fd;
- if (fd1->signo == fd2->signo && fd1->id == fd2->id)
- return 0;
- return 1;
-}
-#else /* !__OSE__ && !ERTS_SYS_CONTINOUS_FD_NUMBERS i.e. probably windows */
static SafeHashValue drv_ev_state_hash(void *des)
{
SafeHashValue val = (SafeHashValue) ((ErtsDrvEventState *) des)->fd;
@@ -1891,7 +1871,6 @@ static int drv_ev_state_cmp(void *des1, void *des2)
return ( ((ErtsDrvEventState *) des1)->fd == ((ErtsDrvEventState *) des2)->fd
? 0 : 1);
}
-#endif
static void *drv_ev_state_alloc(void *des_tmpl)
{
diff --git a/erts/emulator/sys/common/erl_check_io.h b/erts/emulator/sys/common/erl_check_io.h
index c8675a7d9d..14f1ea3f43 100644
--- a/erts/emulator/sys/common/erl_check_io.h
+++ b/erts/emulator/sys/common/erl_check_io.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2016. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/erts/emulator/sys/common/erl_mmap.c b/erts/emulator/sys/common/erl_mmap.c
index 754047829f..7bbb406f29 100644
--- a/erts/emulator/sys/common/erl_mmap.c
+++ b/erts/emulator/sys/common/erl_mmap.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2016. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -28,6 +28,8 @@
#include "erl_mmap.h"
#include <stddef.h>
+#if HAVE_ERTS_MMAP
+
/* #define ERTS_MMAP_OP_RINGBUF_SZ 100 */
#if defined(DEBUG) || 0
@@ -51,23 +53,22 @@
#endif
/*
- * `mmap_state.sa.bot` and `mmap_state.sua.top` are read only after
+ * `mm->sa.bot` and `mm->sua.top` are read only after
* initialization, but the other pointers are not; i.e., only
* ERTS_MMAP_IN_SUPERCARRIER() is allowed without the mutex held.
*/
#define ERTS_MMAP_IN_SUPERCARRIER(PTR) \
- (((UWord) (PTR)) - ((UWord) mmap_state.sa.bot) \
- < ((UWord) mmap_state.sua.top) - ((UWord) mmap_state.sa.bot))
+ (((UWord) (PTR)) - ((UWord) mm->sa.bot) \
+ < ((UWord) mm->sua.top) - ((UWord) mm->sa.bot))
#define ERTS_MMAP_IN_SUPERALIGNED_AREA(PTR) \
- (ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&mmap_state.mtx)), \
- (((UWord) (PTR)) - ((UWord) mmap_state.sa.bot) \
- < ((UWord) mmap_state.sa.top) - ((UWord) mmap_state.sa.bot)))
+ (ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&mm->mtx)), \
+ (((UWord) (PTR)) - ((UWord) mm->sa.bot) \
+ < ((UWord) mm->sa.top) - ((UWord) mm->sa.bot)))
#define ERTS_MMAP_IN_SUPERUNALIGNED_AREA(PTR) \
- (ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&mmap_state.mtx)), \
- (((UWord) (PTR)) - ((UWord) mmap_state.sua.bot) \
- < ((UWord) mmap_state.sua.top) - ((UWord) mmap_state.sua.bot)))
+ (ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&mm->mtx)), \
+ (((UWord) (PTR)) - ((UWord) mm->sua.bot) \
+ < ((UWord) mm->sua.top) - ((UWord) mm->sua.bot)))
-int erts_have_erts_mmap;
UWord erts_page_inv_mask;
#if defined(DEBUG) || defined(ERTS_MMAP_DEBUG)
@@ -197,10 +198,10 @@ static ErtsMMapOp mmap_ops[ERTS_MMAP_OP_RINGBUF_SZ];
#define ERTS_MMAP_OP_LCK(RES, IN_SZ, OUT_SZ) \
do { \
- erts_smp_mtx_lock(&mmap_state.mtx); \
+ erts_smp_mtx_lock(&mm->mtx); \
ERTS_MMAP_OP_START((IN_SZ)); \
ERTS_MMAP_OP_END((RES), (OUT_SZ)); \
- erts_smp_mtx_unlock(&mmap_state.mtx); \
+ erts_smp_mtx_unlock(&mm->mtx); \
} while (0)
#define ERTS_MUNMAP_OP(PTR, SZ) \
@@ -219,9 +220,9 @@ static ErtsMMapOp mmap_ops[ERTS_MMAP_OP_RINGBUF_SZ];
#define ERTS_MUNMAP_OP_LCK(PTR, SZ) \
do { \
- erts_smp_mtx_lock(&mmap_state.mtx); \
+ erts_smp_mtx_lock(&mm->mtx); \
ERTS_MUNMAP_OP((PTR), (SZ)); \
- erts_smp_mtx_unlock(&mmap_state.mtx); \
+ erts_smp_mtx_unlock(&mm->mtx); \
} while (0)
#define ERTS_MREMAP_OP_START(OLD_PTR, OLD_SZ, IN_SZ) \
@@ -247,10 +248,10 @@ static ErtsMMapOp mmap_ops[ERTS_MMAP_OP_RINGBUF_SZ];
#define ERTS_MREMAP_OP_LCK(RES, OLD_PTR, OLD_SZ, IN_SZ, OUT_SZ) \
do { \
- erts_smp_mtx_lock(&mmap_state.mtx); \
+ erts_smp_mtx_lock(&mm->mtx); \
ERTS_MREMAP_OP_START((OLD_PTR), (OLD_SZ), (IN_SZ)); \
ERTS_MREMAP_OP_END((RES), (OUT_SZ)); \
- erts_smp_mtx_unlock(&mmap_state.mtx); \
+ erts_smp_mtx_unlock(&mm->mtx); \
} while (0)
#define ERTS_MMAP_OP_ABORT() \
@@ -294,13 +295,14 @@ typedef struct {
Uint nseg;
}ErtsFreeSegMap;
-static struct {
- int (*reserve_physical)(char *, UWord);
+struct ErtsMemMapper_ {
+ int (*reserve_physical)(char *, UWord, int exec);
void (*unreserve_physical)(char *, UWord);
int supercarrier;
int no_os_mmap;
+ int executable; /* is client a native code allocator? */
/*
- * Super unaligend area is located above super aligned
+ * Super unaligned area is located above super aligned
* area. That is, `sa.bot` is beginning of the super
* carrier, `sua.top` is the end of the super carrier,
* and sa.top and sua.bot moves towards eachother.
@@ -346,54 +348,68 @@ static struct {
UWord used;
} os;
} size;
-} mmap_state;
+};
+
+ErtsMemMapper erts_dflt_mmapper;
+
+#if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
+ErtsMemMapper erts_literal_mmapper;
+char* erts_literals_start;
+UWord erts_literals_size;
+#endif
+
+#ifdef ERTS_ALC_A_EXEC
+ErtsMemMapper erts_exec_mmapper;
+#endif
+
+
#define ERTS_MMAP_SIZE_SC_SA_INC(SZ) \
do { \
- mmap_state.size.supercarrier.used.total += (SZ); \
- mmap_state.size.supercarrier.used.sa += (SZ); \
- ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.total \
- <= mmap_state.size.supercarrier.total); \
- ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.sa \
- <= mmap_state.size.supercarrier.used.total); \
+ mm->size.supercarrier.used.total += (SZ); \
+ mm->size.supercarrier.used.sa += (SZ); \
+ ERTS_MMAP_ASSERT(mm->size.supercarrier.used.total \
+ <= mm->size.supercarrier.total); \
+ ERTS_MMAP_ASSERT(mm->size.supercarrier.used.sa \
+ <= mm->size.supercarrier.used.total); \
} while (0)
#define ERTS_MMAP_SIZE_SC_SA_DEC(SZ) \
do { \
- ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.total >= (SZ)); \
- mmap_state.size.supercarrier.used.total -= (SZ); \
- ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.sa >= (SZ)); \
- mmap_state.size.supercarrier.used.sa -= (SZ); \
+ ERTS_MMAP_ASSERT(mm->size.supercarrier.used.total >= (SZ)); \
+ mm->size.supercarrier.used.total -= (SZ); \
+ ERTS_MMAP_ASSERT(mm->size.supercarrier.used.sa >= (SZ)); \
+ mm->size.supercarrier.used.sa -= (SZ); \
} while (0)
#define ERTS_MMAP_SIZE_SC_SUA_INC(SZ) \
do { \
- mmap_state.size.supercarrier.used.total += (SZ); \
- mmap_state.size.supercarrier.used.sua += (SZ); \
- ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.total \
- <= mmap_state.size.supercarrier.total); \
- ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.sua \
- <= mmap_state.size.supercarrier.used.total); \
+ mm->size.supercarrier.used.total += (SZ); \
+ mm->size.supercarrier.used.sua += (SZ); \
+ ERTS_MMAP_ASSERT(mm->size.supercarrier.used.total \
+ <= mm->size.supercarrier.total); \
+ ERTS_MMAP_ASSERT(mm->size.supercarrier.used.sua \
+ <= mm->size.supercarrier.used.total); \
} while (0)
#define ERTS_MMAP_SIZE_SC_SUA_DEC(SZ) \
do { \
- ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.total >= (SZ)); \
- mmap_state.size.supercarrier.used.total -= (SZ); \
- ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.sua >= (SZ)); \
- mmap_state.size.supercarrier.used.sua -= (SZ); \
+ ERTS_MMAP_ASSERT(mm->size.supercarrier.used.total >= (SZ)); \
+ mm->size.supercarrier.used.total -= (SZ); \
+ ERTS_MMAP_ASSERT(mm->size.supercarrier.used.sua >= (SZ)); \
+ mm->size.supercarrier.used.sua -= (SZ); \
} while (0)
#define ERTS_MMAP_SIZE_OS_INC(SZ) \
do { \
- ERTS_MMAP_ASSERT(mmap_state.size.os.used + (SZ) >= (SZ)); \
- mmap_state.size.os.used += (SZ); \
+ ERTS_MMAP_ASSERT(mm->size.os.used + (SZ) >= (SZ)); \
+ mm->size.os.used += (SZ); \
} while (0)
#define ERTS_MMAP_SIZE_OS_DEC(SZ) \
do { \
- ERTS_MMAP_ASSERT(mmap_state.size.os.used >= (SZ)); \
- mmap_state.size.os.used -= (SZ); \
+ ERTS_MMAP_ASSERT(mm->size.os.used >= (SZ)); \
+ mm->size.os.used -= (SZ); \
} while (0)
static void
-add_free_desc_area(char *start, char *end)
+add_free_desc_area(ErtsMemMapper* mm, char *start, char *end)
{
ERTS_MMAP_ASSERT(end == (void *) 0 || end > start);
if (sizeof(ErtsFreeSegDesc) <= ((UWord) end) - ((UWord) start)) {
@@ -403,7 +419,7 @@ add_free_desc_area(char *start, char *end)
no = 1;
prev_desc = (ErtsFreeSegDesc *) start;
- prev_desc->start = mmap_state.desc.free_list;
+ prev_desc->start = mm->desc.free_list;
desc = (ErtsFreeSegDesc *) (start + sizeof(ErtsFreeSegDesc));
desc_end = start + 2*sizeof(ErtsFreeSegDesc);
@@ -414,59 +430,59 @@ add_free_desc_area(char *start, char *end)
desc_end += sizeof(ErtsFreeSegDesc);
no++;
}
- mmap_state.desc.free_list = (char *) prev_desc;
- mmap_state.no.free_seg_descs += no;
+ mm->desc.free_list = (char *) prev_desc;
+ mm->no.free_seg_descs += no;
}
}
static ErtsFreeSegDesc *
-add_unused_free_desc_area(void)
+add_unused_free_desc_area(ErtsMemMapper* mm)
{
char *ptr;
- if (!mmap_state.desc.unused_start)
+ if (!mm->desc.unused_start)
return NULL;
- ERTS_MMAP_ASSERT(mmap_state.desc.unused_end);
+ ERTS_MMAP_ASSERT(mm->desc.unused_end);
ERTS_MMAP_ASSERT(ERTS_PAGEALIGNED_SIZE
- <= mmap_state.desc.unused_end - mmap_state.desc.unused_start);
+ <= mm->desc.unused_end - mm->desc.unused_start);
- ptr = mmap_state.desc.unused_start + ERTS_PAGEALIGNED_SIZE;
- add_free_desc_area(mmap_state.desc.unused_start, ptr);
+ ptr = mm->desc.unused_start + ERTS_PAGEALIGNED_SIZE;
+ add_free_desc_area(mm, mm->desc.unused_start, ptr);
- if ((mmap_state.desc.unused_end - ptr) >= ERTS_PAGEALIGNED_SIZE)
- mmap_state.desc.unused_start = ptr;
+ if ((mm->desc.unused_end - ptr) >= ERTS_PAGEALIGNED_SIZE)
+ mm->desc.unused_start = ptr;
else
- mmap_state.desc.unused_end = mmap_state.desc.unused_start = NULL;
+ mm->desc.unused_end = mm->desc.unused_start = NULL;
- ERTS_MMAP_ASSERT(mmap_state.desc.free_list);
- return (ErtsFreeSegDesc *) mmap_state.desc.free_list;
+ ERTS_MMAP_ASSERT(mm->desc.free_list);
+ return (ErtsFreeSegDesc *) mm->desc.free_list;
}
static ERTS_INLINE ErtsFreeSegDesc *
-alloc_desc(void)
+alloc_desc(ErtsMemMapper* mm)
{
ErtsFreeSegDesc *res;
- res = (ErtsFreeSegDesc *) mmap_state.desc.free_list;
+ res = (ErtsFreeSegDesc *) mm->desc.free_list;
if (!res) {
- res = add_unused_free_desc_area();
+ res = add_unused_free_desc_area(mm);
if (!res)
return NULL;
}
- mmap_state.desc.free_list = res->start;
- ASSERT(mmap_state.no.free_segs.curr < mmap_state.no.free_seg_descs);
- mmap_state.no.free_segs.curr++;
- if (mmap_state.no.free_segs.max < mmap_state.no.free_segs.curr)
- mmap_state.no.free_segs.max = mmap_state.no.free_segs.curr;
+ mm->desc.free_list = res->start;
+ ASSERT(mm->no.free_segs.curr < mm->no.free_seg_descs);
+ mm->no.free_segs.curr++;
+ if (mm->no.free_segs.max < mm->no.free_segs.curr)
+ mm->no.free_segs.max = mm->no.free_segs.curr;
return res;
}
static ERTS_INLINE void
-free_desc(ErtsFreeSegDesc *desc)
+free_desc(ErtsMemMapper* mm, ErtsFreeSegDesc *desc)
{
- desc->start = mmap_state.desc.free_list;
- mmap_state.desc.free_list = (char *) desc;
- ERTS_MMAP_ASSERT(mmap_state.no.free_segs.curr > 0);
- mmap_state.no.free_segs.curr--;
+ desc->start = mm->desc.free_list;
+ mm->desc.free_list = (char *) desc;
+ ERTS_MMAP_ASSERT(mm->no.free_segs.curr > 0);
+ mm->no.free_segs.curr--;
}
static ERTS_INLINE ErtsFreeSegDesc* anode_to_desc(RBTNode* anode)
@@ -1225,6 +1241,7 @@ Eterm build_free_seg_list(Process* p, ErtsFreeSegMap* map)
#if HAVE_MMAP
# define ERTS_MMAP_PROT (PROT_READ|PROT_WRITE)
+# define ERTS_MMAP_PROT_EXEC (PROT_READ|PROT_WRITE|PROT_EXEC)
# if defined(MAP_ANONYMOUS)
# define ERTS_MMAP_FLAGS (MAP_ANON|MAP_PRIVATE)
# define ERTS_MMAP_FD (-1)
@@ -1233,29 +1250,31 @@ Eterm build_free_seg_list(Process* p, ErtsFreeSegMap* map)
# define ERTS_MMAP_FD (-1)
# else
# define ERTS_MMAP_FLAGS (MAP_PRIVATE)
-# define ERTS_MMAP_FD mmap_state.mmap_fd
+# define ERTS_MMAP_FD mm->mmap_fd
# endif
#endif
static ERTS_INLINE void *
-os_mmap(void *hint_ptr, UWord size, int try_superalign)
+os_mmap(void *hint_ptr, UWord size, int try_superalign, int executable)
{
#if HAVE_MMAP
+ const int prot = executable ? ERTS_MMAP_PROT_EXEC : ERTS_MMAP_PROT;
void *res;
#ifdef MAP_ALIGN
if (try_superalign)
- res = mmap((void *) ERTS_SUPERALIGNED_SIZE, size, ERTS_MMAP_PROT,
+ res = mmap((void *) ERTS_SUPERALIGNED_SIZE, size, prot,
ERTS_MMAP_FLAGS|MAP_ALIGN, ERTS_MMAP_FD, 0);
else
#endif
- res = mmap((void *) hint_ptr, size, ERTS_MMAP_PROT,
+ res = mmap((void *) hint_ptr, size, prot,
ERTS_MMAP_FLAGS, ERTS_MMAP_FD, 0);
if (res == MAP_FAILED)
return NULL;
return res;
#elif HAVE_VIRTUALALLOC
+ const DWORD prot = executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
return (void *) VirtualAlloc(NULL, (SIZE_T) size,
- MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
+ MEM_COMMIT|MEM_RESERVE, prot);
#else
# error "missing mmap() or similar"
#endif
@@ -1312,16 +1331,26 @@ os_mremap(void *ptr, UWord old_size, UWord new_size, int try_superalign)
#if HAVE_MMAP
#define ERTS_MMAP_RESERVE_PROT (ERTS_MMAP_PROT)
+#define ERTS_MMAP_RESERVE_PROT_EXEC (ERTS_MMAP_PROT_EXEC)
#define ERTS_MMAP_RESERVE_FLAGS (ERTS_MMAP_FLAGS|MAP_FIXED)
#define ERTS_MMAP_UNRESERVE_PROT (PROT_NONE)
+#if defined(__FreeBSD__)
+#define ERTS_MMAP_UNRESERVE_FLAGS (ERTS_MMAP_FLAGS|MAP_FIXED)
+#else
#define ERTS_MMAP_UNRESERVE_FLAGS (ERTS_MMAP_FLAGS|MAP_NORESERVE|MAP_FIXED)
+#endif /* __FreeBSD__ */
#define ERTS_MMAP_VIRTUAL_PROT (PROT_NONE)
+#if defined(__FreeBSD__)
+#define ERTS_MMAP_VIRTUAL_FLAGS (ERTS_MMAP_FLAGS)
+#else
#define ERTS_MMAP_VIRTUAL_FLAGS (ERTS_MMAP_FLAGS|MAP_NORESERVE)
+#endif /* __FreeBSD__ */
static int
-os_reserve_physical(char *ptr, UWord size)
+os_reserve_physical(char *ptr, UWord size, int exec)
{
- void *res = mmap((void *) ptr, (size_t) size, ERTS_MMAP_RESERVE_PROT,
+ const int prot = exec ? ERTS_MMAP_RESERVE_PROT_EXEC : ERTS_MMAP_RESERVE_PROT;
+ void *res = mmap((void *) ptr, (size_t) size, prot,
ERTS_MMAP_RESERVE_FLAGS, ERTS_MMAP_FD, 0);
if (res == (void *) MAP_FAILED)
return 0;
@@ -1334,14 +1363,43 @@ os_unreserve_physical(char *ptr, UWord size)
void *res = mmap((void *) ptr, (size_t) size, ERTS_MMAP_UNRESERVE_PROT,
ERTS_MMAP_UNRESERVE_FLAGS, ERTS_MMAP_FD, 0);
if (res == (void *) MAP_FAILED)
- erl_exit(ERTS_ABORT_EXIT, "Failed to unreserve memory");
+ erts_exit(ERTS_ABORT_EXIT, "Failed to unreserve memory");
}
static void *
-os_mmap_virtual(char *ptr, UWord size)
+os_mmap_virtual(char *ptr, UWord size, int exec)
{
- void *res = mmap((void *) ptr, (size_t) size, ERTS_MMAP_VIRTUAL_PROT,
- ERTS_MMAP_VIRTUAL_FLAGS, ERTS_MMAP_FD, 0);
+ int flags = ERTS_MMAP_VIRTUAL_FLAGS;
+ void* res;
+
+#ifdef ERTS_ALC_A_EXEC
+ if (exec) {
+ ASSERT(!ptr);
+ /* OTP-19.0: Nice hack below cut-and-pasted from hipe_amd64.c */
+
+# ifdef MAP_32BIT
+ /* If we got MAP_32BIT (Linux), then use that to ask for low memory */
+ flags |= MAP_32BIT;
+# else
+ /* FreeBSD doesn't have MAP_32BIT, and it doesn't respect
+ a plain map_hint (returns high mappings even though the
+ hint refers to a free area), so we have to use both map_hint
+ and MAP_FIXED to get addresses below the 2GB boundary.
+ This is even worse than the Linux/ppc64 case.
+ Similarly, Solaris 10 doesn't have MAP_32BIT,
+ and it doesn't respect a plain map_hint. */
+ ptr = (char*)(512*1024*1024); /* 0.5GB */
+
+# if defined(__FreeBSD__) || defined(__sun__)
+ flags |= MAP_FIXED;
+# endif
+# endif /* !MAP_32BIT */
+ }
+#else /* !ERTS_ALC_A_EXEC */
+ ASSERT(!exec);
+#endif
+ res = mmap((void *) ptr, (size_t) size, ERTS_MMAP_VIRTUAL_PROT,
+ flags, ERTS_MMAP_FD, 0);
if (res == (void *) MAP_FAILED)
return NULL;
return res;
@@ -1354,7 +1412,7 @@ os_mmap_virtual(char *ptr, UWord size)
#endif /* ERTS_HAVE_OS_MMAP */
-static int reserve_noop(char *ptr, UWord size)
+static int reserve_noop(char *ptr, UWord size, int exec)
{
#ifdef ERTS_MMAP_DEBUG_FILL_AREAS
Uint32 *uip, *end = (Uint32 *) (ptr + size);
@@ -1378,11 +1436,12 @@ static void unreserve_noop(char *ptr, UWord size)
}
static UWord
-alloc_desc_insert_free_seg(ErtsFreeSegMap *map, char* start, char* end)
+alloc_desc_insert_free_seg(ErtsMemMapper* mm,
+ ErtsFreeSegMap *map, char* start, char* end)
{
char *ptr;
ErtsFreeSegMap *da_map;
- ErtsFreeSegDesc *desc = alloc_desc();
+ ErtsFreeSegDesc *desc = alloc_desc(mm);
if (desc) {
insert_free_seg(map, desc, start, end);
return 0;
@@ -1395,13 +1454,13 @@ alloc_desc_insert_free_seg(ErtsFreeSegMap *map, char* start, char* end)
*/
#if ERTS_HAVE_OS_MMAP
- if (!mmap_state.no_os_mmap) {
- ptr = os_mmap(mmap_state.desc.new_area_hint, ERTS_PAGEALIGNED_SIZE, 0);
+ if (!mm->no_os_mmap) {
+ ptr = os_mmap(mm->desc.new_area_hint, ERTS_PAGEALIGNED_SIZE, 0, 0);
if (ptr) {
- mmap_state.desc.new_area_hint = ptr+ERTS_PAGEALIGNED_SIZE;
+ mm->desc.new_area_hint = ptr+ERTS_PAGEALIGNED_SIZE;
ERTS_MMAP_SIZE_OS_INC(ERTS_PAGEALIGNED_SIZE);
- add_free_desc_area(ptr, ptr+ERTS_PAGEALIGNED_SIZE);
- desc = alloc_desc();
+ add_free_desc_area(mm, ptr, ptr+ERTS_PAGEALIGNED_SIZE);
+ desc = alloc_desc(mm);
ERTS_MMAP_ASSERT(desc);
insert_free_seg(map, desc, start, end);
return 0;
@@ -1412,20 +1471,20 @@ alloc_desc_insert_free_seg(ErtsFreeSegMap *map, char* start, char* end)
/*
* ...then try to find a good place in the supercarrier...
*/
- da_map = &mmap_state.sua.map;
+ da_map = &mm->sua.map;
desc = lookup_free_seg(da_map, ERTS_PAGEALIGNED_SIZE);
if (desc) {
- if (mmap_state.reserve_physical(desc->start, ERTS_PAGEALIGNED_SIZE))
+ if (mm->reserve_physical(desc->start, ERTS_PAGEALIGNED_SIZE, 0))
ERTS_MMAP_SIZE_SC_SUA_INC(ERTS_PAGEALIGNED_SIZE);
else
desc = NULL;
}
else {
- da_map = &mmap_state.sa.map;
+ da_map = &mm->sa.map;
desc = lookup_free_seg(da_map, ERTS_PAGEALIGNED_SIZE);
if (desc) {
- if (mmap_state.reserve_physical(desc->start, ERTS_PAGEALIGNED_SIZE))
+ if (mm->reserve_physical(desc->start, ERTS_PAGEALIGNED_SIZE, 0))
ERTS_MMAP_SIZE_SC_SA_INC(ERTS_PAGEALIGNED_SIZE);
else
desc = NULL;
@@ -1433,15 +1492,15 @@ alloc_desc_insert_free_seg(ErtsFreeSegMap *map, char* start, char* end)
}
if (desc) {
char *da_end = desc->start + ERTS_PAGEALIGNED_SIZE;
- add_free_desc_area(desc->start, da_end);
+ add_free_desc_area(mm, desc->start, da_end);
if (da_end != desc->end)
resize_free_seg(da_map, desc, da_end, desc->end);
else {
delete_free_seg(da_map, desc);
- free_desc(desc);
+ free_desc(mm, desc);
}
- desc = alloc_desc();
+ desc = alloc_desc(mm);
ERTS_MMAP_ASSERT(desc);
insert_free_seg(map, desc, start, end);
return 0;
@@ -1454,10 +1513,10 @@ alloc_desc_insert_free_seg(ErtsFreeSegMap *map, char* start, char* end)
ptr = start + ERTS_PAGEALIGNED_SIZE;
ERTS_MMAP_ASSERT(ptr <= end);
- add_free_desc_area(start, ptr);
+ add_free_desc_area(mm, start, ptr);
if (ptr != end) {
- desc = alloc_desc();
+ desc = alloc_desc(mm);
ERTS_MMAP_ASSERT(desc);
insert_free_seg(map, desc, ptr, end);
}
@@ -1466,46 +1525,46 @@ alloc_desc_insert_free_seg(ErtsFreeSegMap *map, char* start, char* end)
}
void *
-erts_mmap(Uint32 flags, UWord *sizep)
+erts_mmap(ErtsMemMapper* mm, Uint32 flags, UWord *sizep)
{
char *seg;
UWord asize = ERTS_PAGEALIGNED_CEILING(*sizep);
/* Map in premapped supercarrier */
- if (mmap_state.supercarrier && !(ERTS_MMAPFLG_OS_ONLY & flags)) {
+ if (mm->supercarrier && !(ERTS_MMAPFLG_OS_ONLY & flags)) {
char *end;
ErtsFreeSegDesc *desc;
Uint32 superaligned = (ERTS_MMAPFLG_SUPERALIGNED & flags);
- erts_smp_mtx_lock(&mmap_state.mtx);
+ erts_smp_mtx_lock(&mm->mtx);
ERTS_MMAP_OP_START(*sizep);
if (!superaligned) {
- desc = lookup_free_seg(&mmap_state.sua.map, asize);
+ desc = lookup_free_seg(&mm->sua.map, asize);
if (desc) {
seg = desc->start;
end = seg+asize;
- if (!mmap_state.reserve_physical(seg, asize))
+ if (!mm->reserve_physical(seg, asize, mm->executable))
goto supercarrier_reserve_failure;
if (desc->end == end) {
- delete_free_seg(&mmap_state.sua.map, desc);
- free_desc(desc);
+ delete_free_seg(&mm->sua.map, desc);
+ free_desc(mm, desc);
}
else {
ERTS_MMAP_ASSERT(end < desc->end);
- resize_free_seg(&mmap_state.sua.map, desc, end, desc->end);
+ resize_free_seg(&mm->sua.map, desc, end, desc->end);
}
ERTS_MMAP_SIZE_SC_SUA_INC(asize);
goto supercarrier_success;
}
- if (asize <= mmap_state.sua.bot - mmap_state.sa.top) {
- if (!mmap_state.reserve_physical(mmap_state.sua.bot - asize,
- asize))
+ if (asize <= mm->sua.bot - mm->sa.top) {
+ if (!mm->reserve_physical(mm->sua.bot - asize, asize,
+ mm->executable))
goto supercarrier_reserve_failure;
- mmap_state.sua.bot -= asize;
- seg = mmap_state.sua.bot;
+ mm->sua.bot -= asize;
+ seg = mm->sua.bot;
ERTS_MMAP_SIZE_SC_SUA_INC(asize);
goto supercarrier_success;
}
@@ -1513,84 +1572,87 @@ erts_mmap(Uint32 flags, UWord *sizep)
asize = ERTS_SUPERALIGNED_CEILING(asize);
- desc = lookup_free_seg(&mmap_state.sa.map, asize);
+ desc = lookup_free_seg(&mm->sa.map, asize);
if (desc) {
char *start = seg = desc->start;
seg = (char *) ERTS_SUPERALIGNED_CEILING(seg);
end = seg+asize;
- if (!mmap_state.reserve_physical(start, (UWord) (end - start)))
+ if (!mm->reserve_physical(start, (UWord) (end - start),
+ mm->executable))
goto supercarrier_reserve_failure;
ERTS_MMAP_SIZE_SC_SA_INC(asize);
if (desc->end == end) {
if (start != seg)
- resize_free_seg(&mmap_state.sa.map, desc, start, seg);
+ resize_free_seg(&mm->sa.map, desc, start, seg);
else {
- delete_free_seg(&mmap_state.sa.map, desc);
- free_desc(desc);
+ delete_free_seg(&mm->sa.map, desc);
+ free_desc(mm, desc);
}
}
else {
ERTS_MMAP_ASSERT(end < desc->end);
- resize_free_seg(&mmap_state.sa.map, desc, end, desc->end);
+ resize_free_seg(&mm->sa.map, desc, end, desc->end);
if (start != seg) {
UWord ad_sz;
- ad_sz = alloc_desc_insert_free_seg(&mmap_state.sua.map,
+ ad_sz = alloc_desc_insert_free_seg(mm, &mm->sua.map,
start, seg);
start += ad_sz;
if (start != seg)
- mmap_state.unreserve_physical(start, (UWord) (seg - start));
+ mm->unreserve_physical(start, (UWord) (seg - start));
}
}
goto supercarrier_success;
}
if (superaligned) {
- char *start = mmap_state.sa.top;
+ char *start = mm->sa.top;
seg = (char *) ERTS_SUPERALIGNED_CEILING(start);
- if (asize + (seg - start) <= mmap_state.sua.bot - start) {
+ if (asize + (seg - start) <= mm->sua.bot - start) {
end = seg + asize;
- if (!mmap_state.reserve_physical(start, (UWord) (end - start)))
+ if (!mm->reserve_physical(start, (UWord) (end - start),
+ mm->executable))
goto supercarrier_reserve_failure;
- mmap_state.sa.top = end;
+ mm->sa.top = end;
ERTS_MMAP_SIZE_SC_SA_INC(asize);
if (start != seg) {
UWord ad_sz;
- ad_sz = alloc_desc_insert_free_seg(&mmap_state.sua.map,
+ ad_sz = alloc_desc_insert_free_seg(mm, &mm->sua.map,
start, seg);
start += ad_sz;
if (start != seg)
- mmap_state.unreserve_physical(start, (UWord) (seg - start));
+ mm->unreserve_physical(start, (UWord) (seg - start));
}
goto supercarrier_success;
}
- desc = lookup_free_seg(&mmap_state.sua.map, asize + ERTS_SUPERALIGNED_SIZE);
+ desc = lookup_free_seg(&mm->sua.map, asize + ERTS_SUPERALIGNED_SIZE);
if (desc) {
char *org_start = desc->start;
char *org_end = desc->end;
seg = (char *) ERTS_SUPERALIGNED_CEILING(org_start);
end = seg + asize;
- if (!mmap_state.reserve_physical(seg, (UWord) (org_end - seg)))
+ if (!mm->reserve_physical(seg, (UWord) (org_end - seg),
+ mm->executable))
goto supercarrier_reserve_failure;
ERTS_MMAP_SIZE_SC_SUA_INC(asize);
if (org_start != seg) {
ERTS_MMAP_ASSERT(org_start < seg);
- resize_free_seg(&mmap_state.sua.map, desc, org_start, seg);
+ resize_free_seg(&mm->sua.map, desc, org_start, seg);
desc = NULL;
}
if (end != org_end) {
UWord ad_sz = 0;
ERTS_MMAP_ASSERT(end < org_end);
if (desc)
- resize_free_seg(&mmap_state.sua.map, desc, end, org_end);
+ resize_free_seg(&mm->sua.map, desc, end, org_end);
else
- ad_sz = alloc_desc_insert_free_seg(&mmap_state.sua.map,
+ ad_sz = alloc_desc_insert_free_seg(mm, &mm->sua.map,
end, org_end);
end += ad_sz;
if (end != org_end)
- mmap_state.unreserve_physical(end,
+ mm->unreserve_physical(end,
(UWord) (org_end - end));
}
goto supercarrier_success;
@@ -1598,20 +1660,20 @@ erts_mmap(Uint32 flags, UWord *sizep)
}
ERTS_MMAP_OP_ABORT();
- erts_smp_mtx_unlock(&mmap_state.mtx);
+ erts_smp_mtx_unlock(&mm->mtx);
}
#if ERTS_HAVE_OS_MMAP
/* Map using OS primitives */
- if (!(ERTS_MMAPFLG_SUPERCARRIER_ONLY & flags) && !mmap_state.no_os_mmap) {
+ if (!(ERTS_MMAPFLG_SUPERCARRIER_ONLY & flags) && !mm->no_os_mmap) {
if (!(ERTS_MMAPFLG_SUPERALIGNED & flags)) {
- seg = os_mmap(NULL, asize, 0);
+ seg = os_mmap(NULL, asize, 0, mm->executable);
if (!seg)
goto failure;
}
else {
asize = ERTS_SUPERALIGNED_CEILING(*sizep);
- seg = os_mmap(NULL, asize, 1);
+ seg = os_mmap(NULL, asize, 1, mm->executable);
if (!seg)
goto failure;
@@ -1621,7 +1683,8 @@ erts_mmap(Uint32 flags, UWord *sizep)
os_munmap(seg, asize);
- ptr = os_mmap(NULL, asize + ERTS_SUPERALIGNED_SIZE, 1);
+ ptr = os_mmap(NULL, asize + ERTS_SUPERALIGNED_SIZE, 1,
+ mm->executable);
if (!ptr)
goto failure;
@@ -1661,25 +1724,25 @@ supercarrier_success:
#endif
ERTS_MMAP_OP_END(seg, asize);
- erts_smp_mtx_unlock(&mmap_state.mtx);
+ erts_smp_mtx_unlock(&mm->mtx);
*sizep = asize;
return (void *) seg;
supercarrier_reserve_failure:
- erts_smp_mtx_unlock(&mmap_state.mtx);
+ erts_smp_mtx_unlock(&mm->mtx);
*sizep = 0;
return NULL;
}
void
-erts_munmap(Uint32 flags, void *ptr, UWord size)
+erts_munmap(ErtsMemMapper* mm, Uint32 flags, void *ptr, UWord size)
{
ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(ptr));
ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(size));
if (!ERTS_MMAP_IN_SUPERCARRIER(ptr)) {
- ERTS_MMAP_ASSERT(!mmap_state.no_os_mmap);
+ ERTS_MMAP_ASSERT(!mm->no_os_mmap);
#if ERTS_HAVE_OS_MMAP
ERTS_MUNMAP_OP_LCK(ptr, size);
ERTS_MMAP_SIZE_OS_DEC(size);
@@ -1692,45 +1755,45 @@ erts_munmap(Uint32 flags, void *ptr, UWord size)
ErtsFreeSegDesc *prev, *next, *desc;
UWord ad_sz = 0;
- ERTS_MMAP_ASSERT(mmap_state.supercarrier);
+ ERTS_MMAP_ASSERT(mm->supercarrier);
start = (char *) ptr;
end = start + size;
- erts_smp_mtx_lock(&mmap_state.mtx);
+ erts_smp_mtx_lock(&mm->mtx);
ERTS_MUNMAP_OP(ptr, size);
if (ERTS_MMAP_IN_SUPERALIGNED_AREA(ptr)) {
- map = &mmap_state.sa.map;
+ map = &mm->sa.map;
adjacent_free_seg(map, start, end, &prev, &next);
ERTS_MMAP_SIZE_SC_SA_DEC(size);
- if (end == mmap_state.sa.top) {
+ if (end == mm->sa.top) {
ERTS_MMAP_ASSERT(!next);
if (prev) {
start = prev->start;
delete_free_seg(map, prev);
- free_desc(prev);
+ free_desc(mm, prev);
}
- mmap_state.sa.top = start;
+ mm->sa.top = start;
goto supercarrier_success;
}
}
else {
- map = &mmap_state.sua.map;
+ map = &mm->sua.map;
adjacent_free_seg(map, start, end, &prev, &next);
ERTS_MMAP_SIZE_SC_SUA_DEC(size);
- if (start == mmap_state.sua.bot) {
+ if (start == mm->sua.bot) {
ERTS_MMAP_ASSERT(!prev);
if (next) {
end = next->end;
delete_free_seg(map, next);
- free_desc(next);
+ free_desc(mm, next);
}
- mmap_state.sua.bot = end;
+ mm->sua.bot = end;
goto supercarrier_success;
}
}
@@ -1742,7 +1805,7 @@ erts_munmap(Uint32 flags, void *ptr, UWord size)
end = next->end;
if (prev) {
delete_free_seg(map, next);
- free_desc(next);
+ free_desc(mm, next);
goto save_prev;
}
desc = next;
@@ -1756,7 +1819,7 @@ erts_munmap(Uint32 flags, void *ptr, UWord size)
if (desc)
resize_free_seg(map, desc, start, end);
else
- ad_sz = alloc_desc_insert_free_seg(map, start, end);
+ ad_sz = alloc_desc_insert_free_seg(mm, map, start, end);
supercarrier_success: {
UWord unres_sz;
@@ -1764,30 +1827,32 @@ erts_munmap(Uint32 flags, void *ptr, UWord size)
ERTS_MMAP_ASSERT(size >= ad_sz);
unres_sz = size - ad_sz;
if (unres_sz)
- mmap_state.unreserve_physical(((char *) ptr) + ad_sz, unres_sz);
+ mm->unreserve_physical(((char *) ptr) + ad_sz, unres_sz);
- erts_smp_mtx_unlock(&mmap_state.mtx);
+ erts_smp_mtx_unlock(&mm->mtx);
}
}
}
static void *
-remap_move(Uint32 flags, void *ptr, UWord old_size, UWord *sizep)
+remap_move(ErtsMemMapper* mm,
+ Uint32 flags, void *ptr, UWord old_size, UWord *sizep)
{
UWord size = *sizep;
- void *new_ptr = erts_mmap(flags, &size);
+ void *new_ptr = erts_mmap(mm, flags, &size);
if (!new_ptr)
return NULL;
*sizep = size;
if (old_size < size)
size = old_size;
sys_memcpy(new_ptr, ptr, (size_t) size);
- erts_munmap(flags, ptr, old_size);
+ erts_munmap(mm, flags, ptr, old_size);
return new_ptr;
}
void *
-erts_mremap(Uint32 flags, void *ptr, UWord old_size, UWord *sizep)
+erts_mremap(ErtsMemMapper* mm,
+ Uint32 flags, void *ptr, UWord old_size, UWord *sizep)
{
void *new_ptr;
Uint32 superaligned;
@@ -1799,11 +1864,11 @@ erts_mremap(Uint32 flags, void *ptr, UWord old_size, UWord *sizep)
if (!ERTS_MMAP_IN_SUPERCARRIER(ptr)) {
- ERTS_MMAP_ASSERT(!mmap_state.no_os_mmap);
+ ERTS_MMAP_ASSERT(!mm->no_os_mmap);
- if (!(ERTS_MMAPFLG_OS_ONLY & flags) && mmap_state.supercarrier) {
- new_ptr = remap_move(ERTS_MMAPFLG_SUPERCARRIER_ONLY|flags, ptr,
- old_size, sizep);
+ if (!(ERTS_MMAPFLG_OS_ONLY & flags) && mm->supercarrier) {
+ new_ptr = remap_move(mm, ERTS_MMAPFLG_SUPERCARRIER_ONLY|flags,
+ ptr, old_size, sizep);
if (new_ptr)
return new_ptr;
}
@@ -1850,7 +1915,7 @@ erts_mremap(Uint32 flags, void *ptr, UWord old_size, UWord *sizep)
#endif
#if ERTS_HAVE_OS_MREMAP
if (superaligned)
- return remap_move(flags, new_ptr, old_size, sizep);
+ return remap_move(mm, flags, new_ptr, old_size, sizep);
else {
new_ptr = os_mremap(ptr, old_size, asize, 0);
if (!new_ptr)
@@ -1872,10 +1937,10 @@ erts_mremap(Uint32 flags, void *ptr, UWord old_size, UWord *sizep)
ErtsFreeSegDesc *prev, *next;
UWord ad_sz = 0;
- ERTS_MMAP_ASSERT(mmap_state.supercarrier);
+ ERTS_MMAP_ASSERT(mm->supercarrier);
if (ERTS_MMAPFLG_OS_ONLY & flags)
- return remap_move(flags, ptr, old_size, sizep);
+ return remap_move(mm, flags, ptr, old_size, sizep);
superaligned = (ERTS_MMAPFLG_SUPERALIGNED & flags);
@@ -1883,19 +1948,19 @@ erts_mremap(Uint32 flags, void *ptr, UWord old_size, UWord *sizep)
? ERTS_SUPERALIGNED_CEILING(*sizep)
: ERTS_PAGEALIGNED_CEILING(*sizep));
- erts_smp_mtx_lock(&mmap_state.mtx);
+ erts_smp_mtx_lock(&mm->mtx);
if (ERTS_MMAP_IN_SUPERALIGNED_AREA(ptr)
- ? (!superaligned && lookup_free_seg(&mmap_state.sua.map, asize))
- : (superaligned && lookup_free_seg(&mmap_state.sa.map, asize))) {
- erts_smp_mtx_unlock(&mmap_state.mtx);
+ ? (!superaligned && lookup_free_seg(&mm->sua.map, asize))
+ : (superaligned && lookup_free_seg(&mm->sa.map, asize))) {
+ erts_smp_mtx_unlock(&mm->mtx);
/*
* Segment currently in wrong area (due to a previous memory
* shortage), move it to the right area.
* (remap_move() will succeed)
*/
- return remap_move(ERTS_MMAPFLG_SUPERCARRIER_ONLY|flags, ptr,
- old_size, sizep);
+ return remap_move(mm, ERTS_MMAPFLG_SUPERCARRIER_ONLY|flags,
+ ptr, old_size, sizep);
}
ERTS_MREMAP_OP_START(ptr, old_size, *sizep);
@@ -1917,18 +1982,18 @@ erts_mremap(Uint32 flags, void *ptr, UWord old_size, UWord *sizep)
UWord unres_sz;
new_ptr = ptr;
if (!ERTS_MMAP_IN_SUPERALIGNED_AREA(ptr)) {
- map = &mmap_state.sua.map;
+ map = &mm->sua.map;
ERTS_MMAP_SIZE_SC_SUA_DEC(old_size - asize);
}
else {
- if (end == mmap_state.sa.top) {
- mmap_state.sa.top = new_end;
- mmap_state.unreserve_physical(((char *) ptr) + asize,
+ if (end == mm->sa.top) {
+ mm->sa.top = new_end;
+ mm->unreserve_physical(((char *) ptr) + asize,
old_size - asize);
goto supercarrier_resize_success;
}
ERTS_MMAP_SIZE_SC_SA_DEC(old_size - asize);
- map = &mmap_state.sa.map;
+ map = &mm->sa.map;
}
adjacent_free_seg(map, start, end, &prev, &next);
@@ -1936,11 +2001,11 @@ erts_mremap(Uint32 flags, void *ptr, UWord old_size, UWord *sizep)
if (next)
resize_free_seg(map, next, new_end, next->end);
else
- ad_sz = alloc_desc_insert_free_seg(map, new_end, end);
+ ad_sz = alloc_desc_insert_free_seg(mm, map, new_end, end);
ERTS_MMAP_ASSERT(old_size - asize >= ad_sz);
unres_sz = old_size - asize - ad_sz;
if (unres_sz)
- mmap_state.unreserve_physical(((char *) ptr) + asize + ad_sz,
+ mm->unreserve_physical(((char *) ptr) + asize + ad_sz,
unres_sz);
goto supercarrier_resize_success;
}
@@ -1950,17 +2015,18 @@ erts_mremap(Uint32 flags, void *ptr, UWord old_size, UWord *sizep)
ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(old_size));
ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(asize));
- adjacent_free_seg(&mmap_state.sua.map, start, end, &prev, &next);
+ adjacent_free_seg(&mm->sua.map, start, end, &prev, &next);
if (next && new_end <= next->end) {
- if (!mmap_state.reserve_physical(((char *) ptr) + old_size,
- asize - old_size))
+ if (!mm->reserve_physical(((char *) ptr) + old_size,
+ asize - old_size,
+ mm->executable))
goto supercarrier_reserve_failure;
if (new_end < next->end)
- resize_free_seg(&mmap_state.sua.map, next, new_end, next->end);
+ resize_free_seg(&mm->sua.map, next, new_end, next->end);
else {
- delete_free_seg(&mmap_state.sua.map, next);
- free_desc(next);
+ delete_free_seg(&mm->sua.map, next);
+ free_desc(mm, next);
}
new_ptr = ptr;
ERTS_MMAP_SIZE_SC_SUA_INC(asize - old_size);
@@ -1969,28 +2035,30 @@ erts_mremap(Uint32 flags, void *ptr, UWord old_size, UWord *sizep)
}
else { /* Superaligned area */
- if (end == mmap_state.sa.top) {
- if (new_end <= mmap_state.sua.bot) {
- if (!mmap_state.reserve_physical(((char *) ptr) + old_size,
- asize - old_size))
+ if (end == mm->sa.top) {
+ if (new_end <= mm->sua.bot) {
+ if (!mm->reserve_physical(((char *) ptr) + old_size,
+ asize - old_size,
+ mm->executable))
goto supercarrier_reserve_failure;
- mmap_state.sa.top = new_end;
+ mm->sa.top = new_end;
new_ptr = ptr;
ERTS_MMAP_SIZE_SC_SA_INC(asize - old_size);
goto supercarrier_resize_success;
}
}
else {
- adjacent_free_seg(&mmap_state.sa.map, start, end, &prev, &next);
+ adjacent_free_seg(&mm->sa.map, start, end, &prev, &next);
if (next && new_end <= next->end) {
- if (!mmap_state.reserve_physical(((char *) ptr) + old_size,
- asize - old_size))
+ if (!mm->reserve_physical(((char *) ptr) + old_size,
+ asize - old_size,
+ mm->executable))
goto supercarrier_reserve_failure;
if (new_end < next->end)
- resize_free_seg(&mmap_state.sa.map, next, new_end, next->end);
+ resize_free_seg(&mm->sa.map, next, new_end, next->end);
else {
- delete_free_seg(&mmap_state.sa.map, next);
- free_desc(next);
+ delete_free_seg(&mm->sa.map, next);
+ free_desc(mm, next);
}
new_ptr = ptr;
ERTS_MMAP_SIZE_SC_SA_INC(asize - old_size);
@@ -2000,12 +2068,12 @@ erts_mremap(Uint32 flags, void *ptr, UWord old_size, UWord *sizep)
}
ERTS_MMAP_OP_ABORT();
- erts_smp_mtx_unlock(&mmap_state.mtx);
+ erts_smp_mtx_unlock(&mm->mtx);
/* Failed to resize... */
}
- return remap_move(flags, ptr, old_size, sizep);
+ return remap_move(mm, flags, ptr, old_size, sizep);
supercarrier_resize_success:
@@ -2022,26 +2090,26 @@ supercarrier_resize_success:
#endif
ERTS_MREMAP_OP_END(new_ptr, asize);
- erts_smp_mtx_unlock(&mmap_state.mtx);
+ erts_smp_mtx_unlock(&mm->mtx);
*sizep = asize;
return new_ptr;
supercarrier_reserve_failure:
ERTS_MREMAP_OP_END(NULL, old_size);
- erts_smp_mtx_unlock(&mmap_state.mtx);
+ erts_smp_mtx_unlock(&mm->mtx);
*sizep = old_size;
return NULL;
}
-int erts_mmap_in_supercarrier(void *ptr)
+int erts_mmap_in_supercarrier(ErtsMemMapper* mm, void *ptr)
{
return ERTS_MMAP_IN_SUPERCARRIER(ptr);
}
-
static struct {
+ Eterm options;
Eterm total;
Eterm total_sa;
Eterm total_sua;
@@ -2075,6 +2143,7 @@ static void init_atoms(void)
erts_mtx_lock(&am.init_mutex);
if (!am.is_initialized) {
+ AM_INIT(options);
AM_INIT(total);
AM_INIT(total_sa);
AM_INIT(total_sua);
@@ -2103,8 +2172,9 @@ static void hard_dbg_mseg_init(void);
#endif
void
-erts_mmap_init(ErtsMMapInit *init)
+erts_mmap_init(ErtsMemMapper* mm, ErtsMMapInit *init, int executable)
{
+ static int is_first_call = 1;
int virtual_map = 0;
char *start = NULL, *end = NULL;
UWord pagesize;
@@ -2126,25 +2196,26 @@ erts_mmap_init(ErtsMMapInit *init)
#endif
erts_page_inv_mask = pagesize - 1;
if (pagesize & erts_page_inv_mask)
- erl_exit(-1, "erts_mmap: Invalid pagesize: %bpu\n",
+ erts_exit(1, "erts_mmap: Invalid pagesize: %bpu\n",
pagesize);
ERTS_MMAP_OP_RINGBUF_INIT();
- erts_have_erts_mmap = 0;
-
- mmap_state.supercarrier = 0;
- mmap_state.reserve_physical = reserve_noop;
- mmap_state.unreserve_physical = unreserve_noop;
+ mm->supercarrier = 0;
+ mm->reserve_physical = reserve_noop;
+ mm->unreserve_physical = unreserve_noop;
+ mm->executable = executable;
#if HAVE_MMAP && !defined(MAP_ANON)
- mmap_state.mmap_fd = open("/dev/zero", O_RDWR);
- if (mmap_state.mmap_fd < 0)
- erl_exit(-1, "erts_mmap: Failed to open /dev/zero\n");
+ mm->mmap_fd = open("/dev/zero", O_RDWR);
+ if (mm->mmap_fd < 0)
+ erts_exit(1, "erts_mmap: Failed to open /dev/zero\n");
#endif
- erts_smp_mtx_init(&mmap_state.mtx, "erts_mmap");
- erts_mtx_init(&am.init_mutex, "mmap_init_atoms");
+ erts_smp_mtx_init(&mm->mtx, "erts_mmap");
+ if (is_first_call) {
+ erts_mtx_init(&am.init_mutex, "mmap_init_atoms");
+ }
#ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION
if (init->virtual_range.start) {
@@ -2153,15 +2224,15 @@ erts_mmap_init(ErtsMMapInit *init)
ptr = (char *) ERTS_PAGEALIGNED_CEILING(init->virtual_range.start);
end = (char *) ERTS_PAGEALIGNED_FLOOR(init->virtual_range.end);
sz = end - ptr;
- start = os_mmap_virtual(ptr, sz);
+ start = os_mmap_virtual(ptr, sz, executable);
if (!start || start > ptr || start >= end)
- erl_exit(-1,
+ erts_exit(1,
"erts_mmap: Failed to create virtual range for super carrier\n");
sz = start - ptr;
if (sz)
os_munmap(end, sz);
- mmap_state.reserve_physical = os_reserve_physical;
- mmap_state.unreserve_physical = os_unreserve_physical;
+ mm->reserve_physical = os_reserve_physical;
+ mm->unreserve_physical = os_unreserve_physical;
virtual_map = 1;
}
else
@@ -2178,9 +2249,9 @@ erts_mmap_init(ErtsMMapInit *init)
sz = ERTS_PAGEALIGNED_CEILING(init->scs);
#ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION
if (!init->scrpm) {
- start = os_mmap_virtual(NULL, sz);
- mmap_state.reserve_physical = os_reserve_physical;
- mmap_state.unreserve_physical = os_unreserve_physical;
+ start = os_mmap_virtual(NULL, sz, executable);
+ mm->reserve_physical = os_reserve_physical;
+ mm->unreserve_physical = os_unreserve_physical;
virtual_map = 1;
}
else
@@ -2190,10 +2261,10 @@ erts_mmap_init(ErtsMMapInit *init)
* The whole supercarrier will by physically
* reserved all the time.
*/
- start = os_mmap(NULL, sz, 1);
+ start = os_mmap(NULL, sz, 1, executable);
}
if (!start)
- erl_exit(-1,
+ erts_exit(1,
"erts_mmap: Failed to create super carrier of size %bpu MB\n",
init->scs/1024/1024);
end = start + sz;
@@ -2206,34 +2277,32 @@ erts_mmap_init(ErtsMMapInit *init)
}
#endif
}
- if (!mmap_state.no_os_mmap)
- erts_have_erts_mmap |= ERTS_HAVE_ERTS_OS_MMAP;
#endif
- mmap_state.no.free_seg_descs = 0;
- mmap_state.no.free_segs.curr = 0;
- mmap_state.no.free_segs.max = 0;
+ mm->no.free_seg_descs = 0;
+ mm->no.free_segs.curr = 0;
+ mm->no.free_segs.max = 0;
- mmap_state.size.supercarrier.total = 0;
- mmap_state.size.supercarrier.used.total = 0;
- mmap_state.size.supercarrier.used.sa = 0;
- mmap_state.size.supercarrier.used.sua = 0;
- mmap_state.size.os.used = 0;
+ mm->size.supercarrier.total = 0;
+ mm->size.supercarrier.used.total = 0;
+ mm->size.supercarrier.used.sa = 0;
+ mm->size.supercarrier.used.sua = 0;
+ mm->size.os.used = 0;
- mmap_state.desc.new_area_hint = NULL;
+ mm->desc.new_area_hint = NULL;
if (!start) {
- mmap_state.sa.bot = NULL;
- mmap_state.sua.top = NULL;
- mmap_state.sa.bot = NULL;
- mmap_state.sua.top = NULL;
- mmap_state.no_os_mmap = 0;
- mmap_state.supercarrier = 0;
+ mm->sa.bot = NULL;
+ mm->sua.top = NULL;
+ mm->sa.bot = NULL;
+ mm->sua.top = NULL;
+ mm->no_os_mmap = 0;
+ mm->supercarrier = 0;
}
else {
size_t desc_size;
- mmap_state.no_os_mmap = init->sco;
+ mm->no_os_mmap = init->sco;
desc_size = init->scrfsd;
if (desc_size < 100)
@@ -2242,68 +2311,75 @@ erts_mmap_init(ErtsMMapInit *init)
if ((desc_size
+ ERTS_SUPERALIGNED_SIZE
+ ERTS_PAGEALIGNED_SIZE) > end - start)
- erl_exit(-1, "erts_mmap: No space for segments in super carrier\n");
+ erts_exit(1, "erts_mmap: No space for segments in super carrier\n");
- mmap_state.sa.bot = start;
- mmap_state.sa.bot += desc_size;
- mmap_state.sa.bot = (char *) ERTS_SUPERALIGNED_CEILING(mmap_state.sa.bot);
- mmap_state.sa.top = mmap_state.sa.bot;
- mmap_state.sua.top = end;
- mmap_state.sua.bot = mmap_state.sua.top;
+ mm->sa.bot = start;
+ mm->sa.bot += desc_size;
+ mm->sa.bot = (char *) ERTS_SUPERALIGNED_CEILING(mm->sa.bot);
+ mm->sa.top = mm->sa.bot;
+ mm->sua.top = end;
+ mm->sua.bot = mm->sua.top;
- mmap_state.size.supercarrier.used.total += (UWord) (mmap_state.sa.bot - start);
+ mm->size.supercarrier.used.total += (UWord) (mm->sa.bot - start);
- mmap_state.desc.free_list = NULL;
- mmap_state.desc.reserved = 0;
+ mm->desc.free_list = NULL;
+ mm->desc.reserved = 0;
if (end == (void *) 0) {
/*
* Very unlikely, but we need a guarantee
- * that `mmap_state.sua.top` always will
+ * that `mm->sua.top` always will
* compare as larger than all segment pointers
* into the super carrier...
*/
- mmap_state.sua.top -= ERTS_PAGEALIGNED_SIZE;
- mmap_state.size.supercarrier.used.total += ERTS_PAGEALIGNED_SIZE;
+ mm->sua.top -= ERTS_PAGEALIGNED_SIZE;
+ mm->size.supercarrier.used.total += ERTS_PAGEALIGNED_SIZE;
#ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION
- if (!virtual_map || os_reserve_physical(mmap_state.sua.top, ERTS_PAGEALIGNED_SIZE))
+ if (!virtual_map || os_reserve_physical(mm->sua.top, ERTS_PAGEALIGNED_SIZE, 0))
#endif
- add_free_desc_area(mmap_state.sua.top, end);
- mmap_state.desc.reserved += (end - mmap_state.sua.top) / sizeof(ErtsFreeSegDesc);
+ add_free_desc_area(mm, mm->sua.top, end);
+ mm->desc.reserved += (end - mm->sua.top) / sizeof(ErtsFreeSegDesc);
}
- mmap_state.size.supercarrier.total = (UWord) (mmap_state.sua.top - start);
+ mm->size.supercarrier.total = (UWord) (mm->sua.top - start);
/*
* Area before (and after) super carrier
* will be used for free segment descritors.
*/
#ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION
- if (virtual_map && !os_reserve_physical(start, mmap_state.sa.bot - start))
- erl_exit(-1, "erts_mmap: Failed to reserve physical memory for descriptors\n");
+ if (virtual_map && !os_reserve_physical(start, mm->sa.bot - start, 0))
+ erts_exit(1, "erts_mmap: Failed to reserve physical memory for descriptors\n");
#endif
- mmap_state.desc.unused_start = start;
- mmap_state.desc.unused_end = mmap_state.sa.bot;
- mmap_state.desc.reserved += ((mmap_state.desc.unused_end - start)
+ mm->desc.unused_start = start;
+ mm->desc.unused_end = mm->sa.bot;
+ mm->desc.reserved += ((mm->desc.unused_end - start)
/ sizeof(ErtsFreeSegDesc));
- init_free_seg_map(&mmap_state.sa.map, SA_SZ_ADDR_ORDER);
- init_free_seg_map(&mmap_state.sua.map, SZ_REVERSE_ADDR_ORDER);
+ init_free_seg_map(&mm->sa.map, SA_SZ_ADDR_ORDER);
+ init_free_seg_map(&mm->sua.map, SZ_REVERSE_ADDR_ORDER);
- mmap_state.supercarrier = 1;
- erts_have_erts_mmap |= ERTS_HAVE_ERTS_SUPERCARRIER_MMAP;
+ mm->supercarrier = 1;
- mmap_state.desc.new_area_hint = end;
+ mm->desc.new_area_hint = end;
}
#if !ERTS_HAVE_OS_MMAP
- mmap_state.no_os_mmap = 1;
+ mm->no_os_mmap = 1;
#endif
#ifdef HARD_DEBUG_MSEG
hard_dbg_mseg_init();
#endif
+
+#if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
+ if (mm == &erts_literal_mmapper) {
+ erts_literals_start = erts_literal_mmapper.sa.bot;
+ erts_literals_size = erts_literal_mmapper.sua.top - erts_literals_start;
+ }
+#endif
+ is_first_call = 0;
}
@@ -2313,7 +2389,8 @@ add_2tup(Uint **hpp, Uint *szp, Eterm *lp, Eterm el1, Eterm el2)
*lp = erts_bld_cons(hpp, szp, erts_bld_tuple(hpp, szp, 2, el1, el2), *lp);
}
-Eterm erts_mmap_info(int *print_to_p,
+Eterm erts_mmap_info(ErtsMemMapper* mm,
+ int *print_to_p,
void *print_to_arg,
Eterm** hpp, Uint* szp,
struct erts_mmap_info_struct* emis)
@@ -2322,35 +2399,41 @@ Eterm erts_mmap_info(int *print_to_p,
Eterm seg_tags[] = { am.used, am.max, am.allocated, am.reserved, am.used_sa, am.used_sua };
Eterm group[2];
Eterm group_tags[] = { am.sizes, am.free_segs };
- Eterm list[2];
- Eterm list_tags[2]; /* { am.supercarrier, am.os } */
- int lix;
+ Eterm list[3];
+ Eterm list_tags[3]; /* { am.options, am.supercarrier, am.os } */
+ int lix = 0;
Eterm res = THE_NON_VALUE;
if (!hpp) {
- erts_smp_mtx_lock(&mmap_state.mtx);
- emis->sizes[0] = mmap_state.size.supercarrier.total;
- emis->sizes[1] = mmap_state.sa.top - mmap_state.sa.bot;
- emis->sizes[2] = mmap_state.sua.top - mmap_state.sua.bot;
- emis->sizes[3] = mmap_state.size.supercarrier.used.total;
- emis->sizes[4] = mmap_state.size.supercarrier.used.sa;
- emis->sizes[5] = mmap_state.size.supercarrier.used.sua;
+ erts_smp_mtx_lock(&mm->mtx);
+ emis->sizes[0] = mm->size.supercarrier.total;
+ emis->sizes[1] = mm->sa.top - mm->sa.bot;
+ emis->sizes[2] = mm->sua.top - mm->sua.bot;
+ emis->sizes[3] = mm->size.supercarrier.used.total;
+ emis->sizes[4] = mm->size.supercarrier.used.sa;
+ emis->sizes[5] = mm->size.supercarrier.used.sua;
- emis->segs[0] = mmap_state.no.free_segs.curr;
- emis->segs[1] = mmap_state.no.free_segs.max;
- emis->segs[2] = mmap_state.no.free_seg_descs;
- emis->segs[3] = mmap_state.desc.reserved;
- emis->segs[4] = mmap_state.sa.map.nseg;
- emis->segs[5] = mmap_state.sua.map.nseg;
+ emis->segs[0] = mm->no.free_segs.curr;
+ emis->segs[1] = mm->no.free_segs.max;
+ emis->segs[2] = mm->no.free_seg_descs;
+ emis->segs[3] = mm->desc.reserved;
+ emis->segs[4] = mm->sa.map.nseg;
+ emis->segs[5] = mm->sua.map.nseg;
- emis->os_used = mmap_state.size.os.used;
- erts_smp_mtx_unlock(&mmap_state.mtx);
+ emis->os_used = mm->size.os.used;
+ erts_smp_mtx_unlock(&mm->mtx);
}
+ list[lix] = erts_mmap_info_options(mm, "option ", print_to_p, print_to_arg,
+ hpp, szp);
+ list_tags[lix] = am.options;
+ lix++;
+
+
if (print_to_p) {
int to = *print_to_p;
void *arg = print_to_arg;
- if (mmap_state.supercarrier) {
+ if (mm->supercarrier) {
const char* prefix = "supercarrier ";
erts_print(to, arg, "%stotal size: %bpu\n", prefix, emis->sizes[0]);
erts_print(to, arg, "%stotal sa size: %bpu\n", prefix, emis->sizes[1]);
@@ -2365,7 +2448,7 @@ Eterm erts_mmap_info(int *print_to_p,
erts_print(to, arg, "%ssa free segs: %bpu\n", prefix, emis->segs[4]);
erts_print(to, arg, "%ssua free segs: %bpu\n", prefix, emis->segs[5]);
}
- if (!mmap_state.no_os_mmap) {
+ if (!mm->no_os_mmap) {
erts_print(to, arg, "os mmap size used: %bpu\n", emis->os_used);
}
}
@@ -2376,8 +2459,7 @@ Eterm erts_mmap_info(int *print_to_p,
init_atoms();
}
- lix = 0;
- if (mmap_state.supercarrier) {
+ if (mm->supercarrier) {
group[0] = erts_bld_atom_uword_2tup_list(hpp, szp,
sizeof(size_tags)/sizeof(Eterm),
size_tags, emis->sizes);
@@ -2389,7 +2471,7 @@ Eterm erts_mmap_info(int *print_to_p,
lix++;
}
- if (!mmap_state.no_os_mmap) {
+ if (!mm->no_os_mmap) {
group[0] = erts_bld_atom_uword_2tup_list(hpp, szp,
1, &am.used, &emis->os_used);
list[lix] = erts_bld_2tup_list(hpp, szp, 1, group_tags, group);
@@ -2401,25 +2483,26 @@ Eterm erts_mmap_info(int *print_to_p,
return res;
}
-Eterm erts_mmap_info_options(char *prefix,
+Eterm erts_mmap_info_options(ErtsMemMapper* mm,
+ char *prefix,
int *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
{
- const UWord scs = mmap_state.sua.top - mmap_state.sa.bot;
- const Eterm sco = mmap_state.no_os_mmap ? am_true : am_false;
- const Eterm scrpm = (mmap_state.reserve_physical == reserve_noop) ? am_true : am_false;
+ const UWord scs = mm->sua.top - mm->sa.bot;
+ const Eterm sco = mm->no_os_mmap ? am_true : am_false;
+ const Eterm scrpm = (mm->reserve_physical == reserve_noop) ? am_true : am_false;
Eterm res = THE_NON_VALUE;
if (print_to_p) {
int to = *print_to_p;
void *arg = print_to_arg;
erts_print(to, arg, "%sscs: %bpu\n", prefix, scs);
- if (mmap_state.supercarrier) {
+ if (mm->supercarrier) {
erts_print(to, arg, "%ssco: %T\n", prefix, sco);
erts_print(to, arg, "%sscrpm: %T\n", prefix, scrpm);
- erts_print(to, arg, "%sscrfsd: %beu\n", prefix, mmap_state.desc.reserved);
+ erts_print(to, arg, "%sscrfsd: %beu\n", prefix, mm->desc.reserved);
}
}
@@ -2429,9 +2512,9 @@ Eterm erts_mmap_info_options(char *prefix,
}
res = NIL;
- if (mmap_state.supercarrier) {
+ if (mm->supercarrier) {
add_2tup(hpp, szp, &res, am.scrfsd,
- erts_bld_uint(hpp,szp, mmap_state.desc.reserved));
+ erts_bld_uint(hpp,szp, mm->desc.reserved));
add_2tup(hpp, szp, &res, am.scrpm, scrpm);
add_2tup(hpp, szp, &res, am.sco, sco);
}
@@ -2440,10 +2523,14 @@ Eterm erts_mmap_info_options(char *prefix,
return res;
}
+#endif /* HAVE_ERTS_MMAP */
Eterm erts_mmap_debug_info(Process* p)
{
- if (mmap_state.supercarrier) {
+#if HAVE_ERTS_MMAP
+ ErtsMemMapper* mm = &erts_dflt_mmapper;
+
+ if (mm->supercarrier) {
ERTS_DECL_AM(sabot);
ERTS_DECL_AM(satop);
ERTS_DECL_AM(suabot);
@@ -2453,18 +2540,17 @@ Eterm erts_mmap_debug_info(Process* p)
UWord values[4];
Eterm *hp, *hp_end;
Uint may_need;
- const Uint PTR_BIG_SZ = HALFWORD_HEAP ? 3 : 2;
-
- erts_smp_mtx_lock(&mmap_state.mtx);
- values[0] = (UWord)mmap_state.sa.bot;
- values[1] = (UWord)mmap_state.sa.top;
- values[2] = (UWord)mmap_state.sua.bot;
- values[3] = (UWord)mmap_state.sua.top;
- sa_list = build_free_seg_list(p, &mmap_state.sa.map);
- sua_list = build_free_seg_list(p, &mmap_state.sua.map);
- erts_smp_mtx_unlock(&mmap_state.mtx);
-
- may_need = 4*(2+3+PTR_BIG_SZ) + 2*(2+3);
+
+ erts_smp_mtx_lock(&mm->mtx);
+ values[0] = (UWord)mm->sa.bot;
+ values[1] = (UWord)mm->sa.top;
+ values[2] = (UWord)mm->sua.bot;
+ values[3] = (UWord)mm->sua.top;
+ sa_list = build_free_seg_list(p, &mm->sa.map);
+ sua_list = build_free_seg_list(p, &mm->sua.map);
+ erts_smp_mtx_unlock(&mm->mtx);
+
+ may_need = 4*(2+3+2) + 2*(2+3);
hp = HAlloc(p, may_need);
hp_end = hp + may_need;
@@ -2481,9 +2567,8 @@ Eterm erts_mmap_debug_info(Process* p)
HRelease(p, hp_end, hp);
return list;
}
- else {
- return am_undefined;
- }
+#endif
+ return am_undefined;
}
diff --git a/erts/emulator/sys/common/erl_mmap.h b/erts/emulator/sys/common/erl_mmap.h
index 66619c5161..fa51b663fa 100644
--- a/erts/emulator/sys/common/erl_mmap.h
+++ b/erts/emulator/sys/common/erl_mmap.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2013. All Rights Reserved.
+ * Copyright Ericsson AB 2013-2016. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -26,13 +26,47 @@
#define ERTS_MMAP_SUPERALIGNED_BITS (18)
/* Affects hard limits for sbct and lmbcs documented in erts_alloc.xml */
-#define ERTS_MMAPFLG_OS_ONLY (((Uint32) 1) << 0)
-#define ERTS_MMAPFLG_SUPERCARRIER_ONLY (((Uint32) 1) << 1)
-#define ERTS_MMAPFLG_SUPERALIGNED (((Uint32) 1) << 2)
+#ifndef HAVE_MMAP
+# define HAVE_MMAP 0
+#endif
+#ifndef HAVE_MREMAP
+# define HAVE_MREMAP 0
+#endif
+#if HAVE_MMAP
+# define ERTS_HAVE_OS_MMAP 1
+# define ERTS_HAVE_GENUINE_OS_MMAP 1
+# if HAVE_MREMAP
+# define ERTS_HAVE_OS_MREMAP 1
+# endif
+/*
+ * MAP_NORESERVE is undefined in FreeBSD 10.x and later.
+ * This is to enable 64bit HiPE experimentally on FreeBSD.
+ * Note that on FreeBSD MAP_NORESERVE was "never implemented"
+ * even before 11.x (and the flag does not exist in /usr/src/sys/vm/mmap.c
+ * of 10.3-STABLE r301478 either), and HiPE was working on OTP 18.3.3,
+ * so mandating MAP_NORESERVE on FreeBSD might not be needed.
+ * See the following message on how MAP_NORESERVE was treated on FreeBSD:
+ * <http://lists.llvm.org/pipermail/cfe-commits/Week-of-Mon-20150202/122958.html>
+ */
+# if defined(MAP_FIXED) && (defined(MAP_NORESERVE) || defined(__FreeBSD__))
+# define ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION 1
+# endif
+#endif
+
+#ifndef HAVE_VIRTUALALLOC
+# define HAVE_VIRTUALALLOC 0
+#endif
+#if HAVE_VIRTUALALLOC
+# define ERTS_HAVE_OS_MMAP 1
+#endif
+
+#ifdef ERTS_HAVE_GENUINE_OS_MMAP
+# define HAVE_ERTS_MMAP 1
+#else
+# define HAVE_ERTS_MMAP 0
+#endif
+
-#define ERTS_HAVE_ERTS_OS_MMAP (1 << 0)
-#define ERTS_HAVE_ERTS_SUPERCARRIER_MMAP (1 << 1)
-extern int erts_have_erts_mmap;
extern UWord erts_page_inv_mask;
typedef struct {
@@ -53,23 +87,16 @@ typedef struct {
#define ERTS_MMAP_INIT_DEFAULT_INITER \
{{NULL, NULL}, {NULL, NULL}, 0, 1, (1 << 16), 1}
-void *erts_mmap(Uint32 flags, UWord *sizep);
-void erts_munmap(Uint32 flags, void *ptr, UWord size);
-void *erts_mremap(Uint32 flags, void *ptr, UWord old_size, UWord *sizep);
-int erts_mmap_in_supercarrier(void *ptr);
-void erts_mmap_init(ErtsMMapInit*);
-struct erts_mmap_info_struct
-{
- UWord sizes[6];
- UWord segs[6];
- UWord os_used;
-};
-Eterm erts_mmap_info(int *print_to_p, void *print_to_arg,
- Eterm** hpp, Uint* szp, struct erts_mmap_info_struct*);
-Eterm erts_mmap_info_options(char *prefix, int *print_to_p, void *print_to_arg,
- Uint **hpp, Uint *szp);
-struct process;
-Eterm erts_mmap_debug_info(struct process*);
+#define ERTS_LITERAL_VIRTUAL_AREA_SIZE (UWORD_CONSTANT(1)*1024*1024*1024)
+
+#define ERTS_MMAP_INIT_LITERAL_INITER \
+ {{NULL, NULL}, {NULL, NULL}, ERTS_LITERAL_VIRTUAL_AREA_SIZE, 1, (1 << 10), 0}
+
+#define ERTS_HIPE_EXEC_VIRTUAL_AREA_SIZE (UWORD_CONSTANT(512)*1024*1024)
+
+#define ERTS_MMAP_INIT_HIPE_EXEC_INITER \
+ {{NULL, NULL}, {NULL, NULL}, ERTS_HIPE_EXEC_VIRTUAL_AREA_SIZE, 1, (1 << 10), 0}
+
#define ERTS_SUPERALIGNED_SIZE \
(1 << ERTS_MMAP_SUPERALIGNED_BITS)
@@ -97,29 +124,46 @@ Eterm erts_mmap_debug_info(struct process*);
#define ERTS_PAGEALIGNED_SIZE \
(ERTS_INV_PAGEALIGNED_MASK + 1)
-#ifndef HAVE_MMAP
-# define HAVE_MMAP 0
-#endif
-#ifndef HAVE_MREMAP
-# define HAVE_MREMAP 0
-#endif
-#if HAVE_MMAP
-# define ERTS_HAVE_OS_MMAP 1
-# define ERTS_HAVE_GENUINE_OS_MMAP 1
-# if HAVE_MREMAP
-# define ERTS_HAVE_OS_MREMAP 1
+struct process;
+Eterm erts_mmap_debug_info(struct process*);
+
+#if HAVE_ERTS_MMAP
+
+typedef struct ErtsMemMapper_ ErtsMemMapper;
+
+#define ERTS_MMAPFLG_OS_ONLY (((Uint32) 1) << 0)
+#define ERTS_MMAPFLG_SUPERCARRIER_ONLY (((Uint32) 1) << 1)
+#define ERTS_MMAPFLG_SUPERALIGNED (((Uint32) 1) << 2)
+
+void *erts_mmap(ErtsMemMapper*, Uint32 flags, UWord *sizep);
+void erts_munmap(ErtsMemMapper*, Uint32 flags, void *ptr, UWord size);
+void *erts_mremap(ErtsMemMapper*, Uint32 flags, void *ptr, UWord old_size, UWord *sizep);
+int erts_mmap_in_supercarrier(ErtsMemMapper*, void *ptr);
+void erts_mmap_init(ErtsMemMapper*, ErtsMMapInit*, int executable);
+struct erts_mmap_info_struct
+{
+ UWord sizes[6];
+ UWord segs[6];
+ UWord os_used;
+};
+Eterm erts_mmap_info(ErtsMemMapper*, int *print_to_p, void *print_to_arg,
+ Eterm** hpp, Uint* szp, struct erts_mmap_info_struct*);
+Eterm erts_mmap_info_options(ErtsMemMapper*,
+ char *prefix, int *print_to_p, void *print_to_arg,
+ Uint **hpp, Uint *szp);
+
+
+#ifdef ERTS_WANT_MEM_MAPPERS
+# include "erl_alloc_types.h"
+
+extern ErtsMemMapper erts_dflt_mmapper;
+# if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
+extern ErtsMemMapper erts_literal_mmapper;
# endif
-# if defined(MAP_FIXED) && defined(MAP_NORESERVE)
-# define ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION 1
+# ifdef ERTS_ALC_A_EXEC
+extern ErtsMemMapper erts_exec_mmapper;
# endif
-#endif
-
-#ifndef HAVE_VIRTUALALLOC
-# define HAVE_VIRTUALALLOC 0
-#endif
-#if HAVE_VIRTUALALLOC
-# define ERTS_HAVE_OS_MMAP 1
-#endif
+#endif /* ERTS_WANT_MEM_MAPPERS */
/*#define HARD_DEBUG_MSEG*/
#ifdef HARD_DEBUG_MSEG
@@ -132,4 +176,6 @@ void hard_dbg_remove_mseg(void* seg, UWord sz);
# define HARD_DBG_REMOVE_MSEG(SEG,SZ)
#endif
+#endif /* HAVE_ERTS_MMAP */
+
#endif /* ERL_MMAP_H__ */
diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c
index 0d51aad863..f3306a888c 100644
--- a/erts/emulator/sys/common/erl_mseg.c
+++ b/erts/emulator/sys/common/erl_mseg.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2016. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -31,6 +31,7 @@
# include "config.h"
#endif
+#define ERTS_WANT_MEM_MAPPERS
#include "sys.h"
#include "erl_mseg.h"
#include "global.h"
@@ -99,17 +100,12 @@ static const int debruijn[32] = {
static int atoms_initialized;
-typedef struct mem_kind_t MemKind;
-
const ErtsMsegOpt_t erts_mseg_default_opt = {
1, /* Use cache */
1, /* Preserv data */
0, /* Absolute shrink threshold */
0, /* Relative shrink threshold */
0 /* Scheduler specific */
-#if HALFWORD_HEAP
- ,0 /* need low memory */
-#endif
};
@@ -142,7 +138,14 @@ struct cache_t_ {
typedef struct ErtsMsegAllctr_t_ ErtsMsegAllctr_t;
-struct mem_kind_t {
+struct ErtsMsegAllctr_t_ {
+ int ix;
+
+ int is_init_done;
+ int is_thread_safe;
+ erts_mtx_t mtx;
+
+ int is_cache_check_scheduled;
cache_t cache[MAX_CACHE_SIZE];
cache_t cache_unpowered_node;
@@ -168,29 +171,6 @@ struct mem_kind_t {
} max_ever;
} segments;
- ErtsMsegAllctr_t *ma;
- const char* name;
- MemKind* next;
-};/*MemKind*/
-
-struct ErtsMsegAllctr_t_ {
- int ix;
-
- int is_init_done;
- int is_thread_safe;
- erts_mtx_t mtx;
-
- int is_cache_check_scheduled;
-
- MemKind* mk_list;
-
-#if HALFWORD_HEAP
- MemKind low_mem;
- MemKind hi_mem;
-#else
- MemKind the_mem;
-#endif
-
Uint max_cache_size;
Uint abs_max_cache_bad_fit;
Uint rel_max_cache_bad_fit;
@@ -302,22 +282,17 @@ schedule_cache_check(ErtsMsegAllctr_t *ma) {
/* #define ERTS_PRINT_ERTS_MMAP */
static ERTS_INLINE void *
-mseg_create(ErtsMsegAllctr_t *ma, Uint flags, MemKind* mk, UWord *sizep)
+mseg_create(ErtsMsegAllctr_t *ma, Uint flags, UWord *sizep)
{
#ifdef ERTS_PRINT_ERTS_MMAP
UWord req_size = *sizep;
#endif
void *seg;
Uint32 mmap_flags = 0;
-#if HALFWORD_HEAP
- mmap_flags |= ((mk == &ma->low_mem)
- ? ERTS_MMAPFLG_SUPERCARRIER_ONLY
- : ERTS_MMAPFLG_OS_ONLY);
-#endif
if (MSEG_FLG_IS_2POW(flags))
mmap_flags |= ERTS_MMAPFLG_SUPERALIGNED;
- seg = erts_mmap(mmap_flags, sizep);
+ seg = erts_mmap(&erts_dflt_mmapper, mmap_flags, sizep);
#ifdef ERTS_PRINT_ERTS_MMAP
erts_fprintf(stderr, "%p = erts_mmap(%s, {%bpu, %bpu});\n", seg,
@@ -331,18 +306,13 @@ mseg_create(ErtsMsegAllctr_t *ma, Uint flags, MemKind* mk, UWord *sizep)
}
static ERTS_INLINE void
-mseg_destroy(ErtsMsegAllctr_t *ma, Uint flags, MemKind* mk, void *seg_p, UWord size) {
+mseg_destroy(ErtsMsegAllctr_t *ma, Uint flags, void *seg_p, UWord size) {
Uint32 mmap_flags = 0;
-#if HALFWORD_HEAP
- mmap_flags |= ((mk == &ma->low_mem)
- ? ERTS_MMAPFLG_SUPERCARRIER_ONLY
- : ERTS_MMAPFLG_OS_ONLY);
-#endif
if (MSEG_FLG_IS_2POW(flags))
mmap_flags |= ERTS_MMAPFLG_SUPERALIGNED;
- erts_munmap(mmap_flags, seg_p, size);
+ erts_munmap(&erts_dflt_mmapper, mmap_flags, seg_p, size);
#ifdef ERTS_PRINT_ERTS_MMAP
erts_fprintf(stderr, "erts_munmap(%s, %p, %bpu);\n",
(mmap_flags & ERTS_MMAPFLG_SUPERALIGNED) ? "sa" : "sua",
@@ -353,22 +323,17 @@ mseg_destroy(ErtsMsegAllctr_t *ma, Uint flags, MemKind* mk, void *seg_p, UWord s
}
static ERTS_INLINE void *
-mseg_recreate(ErtsMsegAllctr_t *ma, Uint flags, MemKind* mk, void *old_seg, UWord old_size, UWord *sizep)
+mseg_recreate(ErtsMsegAllctr_t *ma, Uint flags, void *old_seg, UWord old_size, UWord *sizep)
{
#ifdef ERTS_PRINT_ERTS_MMAP
UWord req_size = *sizep;
#endif
void *new_seg;
Uint32 mmap_flags = 0;
-#if HALFWORD_HEAP
- mmap_flags |= ((mk == &ma->low_mem)
- ? ERTS_MMAPFLG_SUPERCARRIER_ONLY
- : ERTS_MMAPFLG_OS_ONLY);
-#endif
if (MSEG_FLG_IS_2POW(flags))
mmap_flags |= ERTS_MMAPFLG_SUPERALIGNED;
- new_seg = erts_mremap(mmap_flags, old_seg, old_size, sizep);
+ new_seg = erts_mremap(&erts_dflt_mmapper, mmap_flags, old_seg, old_size, sizep);
#ifdef ERTS_PRINT_ERTS_MMAP
erts_fprintf(stderr, "%p = erts_mremap(%s, %p, %bpu, {%bpu, %bpu});\n",
@@ -392,11 +357,8 @@ do { \
|| erts_smp_thr_progress_is_blocking() \
|| ERTS_IS_CRASH_DUMPING); \
} while (0)
-#define ERTS_DBG_MK_CHK_THR_ACCESS(MK) \
- ERTS_DBG_MA_CHK_THR_ACCESS((MK)->ma)
#else
#define ERTS_DBG_MA_CHK_THR_ACCESS(MA)
-#define ERTS_DBG_MK_CHK_THR_ACCESS(MK)
#endif
/* Cache interface */
@@ -409,10 +371,10 @@ static ERTS_INLINE void mseg_cache_clear_node(cache_t *c) {
c->prev = c;
}
-static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, UWord size, Uint flags) {
+static ERTS_INLINE int cache_bless_segment(ErtsMsegAllctr_t *ma, void *seg, UWord size, Uint flags) {
cache_t *c;
- ERTS_DBG_MK_CHK_THR_ACCESS(mk);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
ASSERT(!MSEG_FLG_IS_2POW(flags) || (MSEG_FLG_IS_2POW(flags) && MAP_IS_ALIGNED(seg) && IS_2POW(size)));
@@ -421,11 +383,11 @@ static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, UWord size, U
* Large blocks has no such cache and it is up to mseg to cache them to speed things up.
*/
- if (!erts_circleq_is_empty(&(mk->cache_free))) {
+ if (!erts_circleq_is_empty(&(ma->cache_free))) {
/* We have free slots, use one to cache the segment */
- c = erts_circleq_head(&(mk->cache_free));
+ c = erts_circleq_head(&(ma->cache_free));
erts_circleq_remove(c);
c->seg = seg;
@@ -437,29 +399,28 @@ static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, UWord size, U
ASSERT(ix < CACHE_AREAS);
ASSERT((1 << (ix + MSEG_ALIGN_BITS)) == size);
- erts_circleq_push_head(&(mk->cache_powered_node[ix]), c);
+ erts_circleq_push_head(&(ma->cache_powered_node[ix]), c);
} else
- erts_circleq_push_head(&(mk->cache_unpowered_node), c);
+ erts_circleq_push_head(&(ma->cache_unpowered_node), c);
- mk->cache_size++;
- ASSERT(mk->cache_size <= mk->ma->max_cache_size);
+ ma->cache_size++;
return 1;
- } else if (!MSEG_FLG_IS_2POW(flags) && !erts_circleq_is_empty(&(mk->cache_unpowered_node))) {
+ } else if (!MSEG_FLG_IS_2POW(flags) && !erts_circleq_is_empty(&(ma->cache_unpowered_node))) {
/* No free slots.
* Evict oldest slot from unpowered cache so we can cache an unpowered (sbc) segment */
- c = erts_circleq_tail(&(mk->cache_unpowered_node));
+ c = erts_circleq_tail(&(ma->cache_unpowered_node));
erts_circleq_remove(c);
- mseg_destroy(mk->ma, ERTS_MSEG_FLG_NONE, mk, c->seg, c->size);
+ mseg_destroy(ma, ERTS_MSEG_FLG_NONE, c->seg, c->size);
mseg_cache_clear_node(c);
c->seg = seg;
c->size = size;
- erts_circleq_push_head(&(mk->cache_unpowered_node), c);
+ erts_circleq_push_head(&(ma->cache_unpowered_node), c);
return 1;
} else if (!MSEG_FLG_IS_2POW(flags)) {
@@ -473,20 +434,20 @@ static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, UWord size, U
int i;
for( i = 0; i < CACHE_AREAS; i++) {
- if (erts_circleq_is_empty(&(mk->cache_powered_node[i])))
+ if (erts_circleq_is_empty(&(ma->cache_powered_node[i])))
continue;
- c = erts_circleq_tail(&(mk->cache_powered_node[i]));
+ c = erts_circleq_tail(&(ma->cache_powered_node[i]));
erts_circleq_remove(c);
- mseg_destroy(mk->ma, ERTS_MSEG_FLG_2POW, mk, c->seg, c->size);
+ mseg_destroy(ma, ERTS_MSEG_FLG_2POW, c->seg, c->size);
mseg_cache_clear_node(c);
c->seg = seg;
c->size = size;
- erts_circleq_push_head(&(mk->cache_unpowered_node), c);
+ erts_circleq_push_head(&(ma->cache_unpowered_node), c);
return 1;
}
@@ -495,11 +456,11 @@ static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, UWord size, U
return 0;
}
-static ERTS_INLINE void *cache_get_segment(MemKind *mk, UWord *size_p, Uint flags) {
+static ERTS_INLINE void *cache_get_segment(ErtsMsegAllctr_t *ma, UWord *size_p, Uint flags) {
UWord size = *size_p;
- ERTS_DBG_MK_CHK_THR_ACCESS(mk);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
if (MSEG_FLG_IS_2POW(flags)) {
@@ -512,10 +473,10 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, UWord *size_p, Uint flag
for( i = ix; i < CACHE_AREAS; i++) {
- if (erts_circleq_is_empty(&(mk->cache_powered_node[i])))
+ if (erts_circleq_is_empty(&(ma->cache_powered_node[i])))
continue;
- c = erts_circleq_head(&(mk->cache_powered_node[i]));
+ c = erts_circleq_head(&(ma->cache_powered_node[i]));
erts_circleq_remove(c);
ASSERT(IS_2POW(c->size));
@@ -524,31 +485,31 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, UWord *size_p, Uint flag
csize = c->size;
seg = (char*) c->seg;
- mk->cache_size--;
- mk->cache_hits++;
+ ma->cache_size--;
+ ma->cache_hits++;
/* link to free cache list */
mseg_cache_clear_node(c);
- erts_circleq_push_head(&(mk->cache_free), c);
+ erts_circleq_push_head(&(ma->cache_free), c);
- ASSERT(!(mk->cache_size < 0));
+ ASSERT(!(ma->cache_size < 0));
if (csize != size)
- mseg_destroy(mk->ma, ERTS_MSEG_FLG_2POW, mk, seg + size, csize - size);
+ mseg_destroy(ma, ERTS_MSEG_FLG_2POW, seg + size, csize - size);
return seg;
}
}
- else if (!erts_circleq_is_empty(&(mk->cache_unpowered_node))) {
+ else if (!erts_circleq_is_empty(&(ma->cache_unpowered_node))) {
void *seg;
cache_t *c;
cache_t *best = NULL;
UWord bdiff = 0;
UWord csize;
- UWord bad_max_abs = mk->ma->abs_max_cache_bad_fit;
- UWord bad_max_rel = mk->ma->rel_max_cache_bad_fit;
+ UWord bad_max_abs = ma->abs_max_cache_bad_fit;
+ UWord bad_max_rel = ma->rel_max_cache_bad_fit;
- erts_circleq_foreach(c, &(mk->cache_unpowered_node)) {
+ erts_circleq_foreach(c, &(ma->cache_unpowered_node)) {
csize = c->size;
if (csize >= size) {
if (((csize - size)*100 < bad_max_rel*size) && (csize - size) < bad_max_abs ) {
@@ -557,11 +518,11 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, UWord *size_p, Uint flag
erts_circleq_remove(c);
- mk->cache_size--;
- mk->cache_hits++;
+ ma->cache_size--;
+ ma->cache_hits++;
mseg_cache_clear_node(c);
- erts_circleq_push_head(&(mk->cache_free), c);
+ erts_circleq_push_head(&(ma->cache_free), c);
*size_p = csize;
@@ -584,7 +545,7 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, UWord *size_p, Uint flag
ASSERT(best->seg);
ASSERT(best->size > 0);
- mk->cache_hits++;
+ ma->cache_hits++;
/* Use current cache placement for remaining segment space */
@@ -608,7 +569,7 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, UWord *size_p, Uint flag
* using callbacks from aux-work in the scheduler.
*/
-static ERTS_INLINE Uint mseg_drop_one_memkind_cache_size(MemKind *mk, Uint flags, cache_t *head) {
+static ERTS_INLINE Uint mseg_drop_one_cache_size(ErtsMsegAllctr_t *ma, Uint flags, cache_t *head) {
cache_t *c = NULL;
c = erts_circleq_tail(head);
@@ -617,19 +578,19 @@ static ERTS_INLINE Uint mseg_drop_one_memkind_cache_size(MemKind *mk, Uint flags
if (erts_mtrace_enabled)
erts_mtrace_crr_free(SEGTYPE, SEGTYPE, c->seg);
- mseg_destroy(mk->ma, flags, mk, c->seg, c->size);
+ mseg_destroy(ma, flags, c->seg, c->size);
mseg_cache_clear_node(c);
- erts_circleq_push_head(&(mk->cache_free), c);
+ erts_circleq_push_head(&(ma->cache_free), c);
- mk->segments.current.watermark--;
- mk->cache_size--;
+ ma->segments.current.watermark--;
+ ma->cache_size--;
- ASSERT( mk->cache_size >= 0 );
+ ASSERT(ma->cache_size >= 0);
- return mk->cache_size;
+ return ma->cache_size;
}
-static ERTS_INLINE Uint mseg_drop_memkind_cache_size(MemKind *mk, Uint flags, cache_t *head) {
+static ERTS_INLINE Uint mseg_drop_cache_size(ErtsMsegAllctr_t *ma, Uint flags, cache_t *head) {
cache_t *c = NULL;
while (!erts_circleq_is_empty(head)) {
@@ -640,58 +601,52 @@ static ERTS_INLINE Uint mseg_drop_memkind_cache_size(MemKind *mk, Uint flags, ca
if (erts_mtrace_enabled)
erts_mtrace_crr_free(SEGTYPE, SEGTYPE, c->seg);
- mseg_destroy(mk->ma, flags, mk, c->seg, c->size);
+ mseg_destroy(ma, flags, c->seg, c->size);
mseg_cache_clear_node(c);
- erts_circleq_push_head(&(mk->cache_free), c);
-
- mk->segments.current.watermark--;
- mk->cache_size--;
+ erts_circleq_push_head(&(ma->cache_free), c);
+ ma->segments.current.watermark--;
+ ma->cache_size--;
}
- ASSERT( mk->cache_size >= 0 );
+ ASSERT(ma->cache_size >= 0);
- return mk->cache_size;
+ return ma->cache_size;
}
-/* mseg_check_memkind_cache
- * - Check if we can empty some cached segments in this
- * MemKind.
+/* mseg_check_cache
+ * - Check if we can empty some cached segments in this allocator
*/
-static Uint mseg_check_memkind_cache(MemKind *mk) {
+static Uint mseg_check_cache(ErtsMsegAllctr_t *ma) {
int i;
- ERTS_DBG_MK_CHK_THR_ACCESS(mk);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
for (i = 0; i < CACHE_AREAS; i++) {
- if (!erts_circleq_is_empty(&(mk->cache_powered_node[i])))
- return mseg_drop_one_memkind_cache_size(mk, ERTS_MSEG_FLG_2POW, &(mk->cache_powered_node[i]));
+ if (!erts_circleq_is_empty(&(ma->cache_powered_node[i])))
+ return mseg_drop_one_cache_size(ma, ERTS_MSEG_FLG_2POW, &(ma->cache_powered_node[i]));
}
- if (!erts_circleq_is_empty(&(mk->cache_unpowered_node)))
- return mseg_drop_one_memkind_cache_size(mk, ERTS_MSEG_FLG_NONE, &(mk->cache_unpowered_node));
+ if (!erts_circleq_is_empty(&(ma->cache_unpowered_node)))
+ return mseg_drop_one_cache_size(ma, ERTS_MSEG_FLG_NONE, &(ma->cache_unpowered_node));
return 0;
}
/* mseg_cache_check
* - Check if we have some cache we can purge
- * in any of the memkinds.
*/
static void mseg_cache_check(ErtsMsegAllctr_t *ma) {
- MemKind* mk;
Uint empty_cache = 1;
ERTS_MSEG_LOCK(ma);
- for (mk = ma->mk_list; mk; mk = mk->next) {
- if (mseg_check_memkind_cache(mk))
- empty_cache = 0;
- }
+ if (mseg_check_cache(ma))
+ empty_cache = 0;
/* If all MemKinds caches are empty,
* remove aux-work callback
@@ -709,7 +664,7 @@ static void mseg_cache_check(ErtsMsegAllctr_t *ma) {
/* erts_mseg_cache_check
* - This is a callback that is scheduled as aux-work from
* schedulers and is called at some interval if we have a cache
- * on this mseg-allocator and memkind.
+ * on this mseg-allocator.
* - Purpose: Empty cache slowly so we don't collect mapped areas
* and bloat memory.
*/
@@ -719,42 +674,32 @@ void erts_mseg_cache_check(void) {
}
-/* *_mseg_clear_*_cache
+/* mseg_clear_cache
* Remove cached segments from the allocator completely
*/
-static void mseg_clear_memkind_cache(MemKind *mk) {
+
+static void mseg_clear_cache(ErtsMsegAllctr_t *ma) {
int i;
+ ERTS_MSEG_LOCK(ma);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
/* drop pow2 caches */
for (i = 0; i < CACHE_AREAS; i++) {
- if (erts_circleq_is_empty(&(mk->cache_powered_node[i])))
+ if (erts_circleq_is_empty(&(ma->cache_powered_node[i])))
continue;
- mseg_drop_memkind_cache_size(mk, ERTS_MSEG_FLG_2POW, &(mk->cache_powered_node[i]));
- ASSERT(erts_circleq_is_empty(&(mk->cache_powered_node[i])));
+ mseg_drop_cache_size(ma, ERTS_MSEG_FLG_2POW, &(ma->cache_powered_node[i]));
+ ASSERT(erts_circleq_is_empty(&(ma->cache_powered_node[i])));
}
/* drop varied caches */
- if (!erts_circleq_is_empty(&(mk->cache_unpowered_node)))
- mseg_drop_memkind_cache_size(mk, ERTS_MSEG_FLG_NONE, &(mk->cache_unpowered_node));
-
- ASSERT(erts_circleq_is_empty(&(mk->cache_unpowered_node)));
- ASSERT(mk->cache_size == 0);
-}
-
-static void mseg_clear_cache(ErtsMsegAllctr_t *ma) {
- MemKind* mk;
-
- ERTS_MSEG_LOCK(ma);
- ERTS_DBG_MA_CHK_THR_ACCESS(ma);
-
+ if (!erts_circleq_is_empty(&(ma->cache_unpowered_node)))
+ mseg_drop_cache_size(ma, ERTS_MSEG_FLG_NONE, &(ma->cache_unpowered_node));
- for (mk = ma->mk_list; mk; mk = mk->next) {
- mseg_clear_memkind_cache(mk);
- }
+ ASSERT(erts_circleq_is_empty(&(ma->cache_unpowered_node)));
+ ASSERT(ma->cache_size == 0);
INC_CC(ma, clear_cache);
-
ERTS_MSEG_UNLOCK(ma);
}
@@ -763,25 +708,12 @@ void erts_mseg_clear_cache(void) {
mseg_clear_cache(ERTS_MSEG_ALLCTR_IX(0));
}
-
-
-static ERTS_INLINE MemKind* memkind(ErtsMsegAllctr_t *ma,
- const ErtsMsegOpt_t *opt)
-{
-#if HALFWORD_HEAP
- return opt->low_mem ? &ma->low_mem : &ma->hi_mem;
-#else
- return &ma->the_mem;
-#endif
-}
-
static void *
mseg_alloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, UWord *size_p,
Uint flags, const ErtsMsegOpt_t *opt)
{
UWord size;
void *seg;
- MemKind* mk = memkind(ma, opt);
INC_CC(ma, alloc);
@@ -795,10 +727,10 @@ mseg_alloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, UWord *size_p,
}
}
- if (opt->cache && mk->cache_size > 0 && (seg = cache_get_segment(mk, &size, flags)) != NULL)
+ if (opt->cache && ma->cache_size > 0 && (seg = cache_get_segment(ma, &size, flags)) != NULL)
goto done;
- seg = mseg_create(ma, flags, mk, &size);
+ seg = mseg_create(ma, flags, &size);
if (!seg)
*size_p = 0;
@@ -808,7 +740,7 @@ done:
if (erts_mtrace_enabled)
erts_mtrace_crr_alloc(seg, atype, ERTS_MTRACE_SEGMENT_ID, size);
- ERTS_MSEG_ALLOC_STAT(mk,size);
+ ERTS_MSEG_ALLOC_STAT(ma,size);
}
return seg;
@@ -819,11 +751,9 @@ static void
mseg_dealloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, UWord size,
Uint flags, const ErtsMsegOpt_t *opt)
{
- MemKind* mk = memkind(ma, opt);
+ ERTS_MSEG_DEALLOC_STAT(ma,size);
- ERTS_MSEG_DEALLOC_STAT(mk,size);
-
- if (opt->cache && cache_bless_segment(mk, seg, size, flags)) {
+ if (opt->cache && cache_bless_segment(ma, seg, size, flags)) {
schedule_cache_check(ma);
goto done;
}
@@ -831,7 +761,7 @@ mseg_dealloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, UWord size,
if (erts_mtrace_enabled)
erts_mtrace_crr_free(atype, SEGTYPE, seg);
- mseg_destroy(ma, flags, mk, seg, size);
+ mseg_destroy(ma, flags, seg, size);
done:
@@ -842,7 +772,6 @@ static void *
mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg,
UWord old_size, UWord *new_size_p, Uint flags, const ErtsMsegOpt_t *opt)
{
- MemKind* mk;
void *new_seg;
UWord new_size;
@@ -861,7 +790,6 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg,
return NULL;
}
- mk = memkind(ma, opt);
new_seg = seg;
if (!MSEG_FLG_IS_2POW(flags))
@@ -876,7 +804,7 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg,
if (new_size > old_size) {
if (opt->preserv) {
- new_seg = mseg_recreate(ma, flags, mk, (void *) seg, old_size, &new_size);
+ new_seg = mseg_recreate(ma, flags, (void *) seg, old_size, &new_size);
if (!new_seg)
new_size = old_size;
}
@@ -896,7 +824,7 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg,
new_size = old_size;
}
else {
- new_seg = mseg_recreate(ma, flags, mk, (void *) seg, old_size, &new_size);
+ new_seg = mseg_recreate(ma, flags, (void *) seg, old_size, &new_size);
if (!new_seg)
new_size = old_size;
}
@@ -910,7 +838,7 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg,
ASSERT(!MSEG_FLG_IS_2POW(flags) || IS_2POW(new_size));
*new_size_p = new_size;
- ERTS_MSEG_REALLOC_STAT(mk, old_size, new_size);
+ ERTS_MSEG_REALLOC_STAT(ma, old_size, new_size);
return new_seg;
}
@@ -1068,9 +996,7 @@ info_options(ErtsMsegAllctr_t *ma,
Uint **hpp,
Uint *szp)
{
- Eterm res;
-
- res = erts_mmap_info_options(prefix, print_to_p, print_to_arg, hpp, szp);
+ Eterm res = NIL;
if (print_to_p) {
int to = *print_to_p;
@@ -1180,88 +1106,95 @@ info_calls(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp
}
static Eterm
-info_status(ErtsMsegAllctr_t *ma, MemKind* mk, int *print_to_p, void *print_to_arg,
- int begin_new_max_period, Uint **hpp, Uint *szp)
+info_status(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg,
+ int begin_new_max_period, int only_sz, Uint **hpp, Uint *szp)
{
Eterm res = THE_NON_VALUE;
- if (mk->segments.max_ever.no < mk->segments.max.no)
- mk->segments.max_ever.no = mk->segments.max.no;
- if (mk->segments.max_ever.sz < mk->segments.max.sz)
- mk->segments.max_ever.sz = mk->segments.max.sz;
+ if (ma->segments.max_ever.no < ma->segments.max.no)
+ ma->segments.max_ever.no = ma->segments.max.no;
+ if (ma->segments.max_ever.sz < ma->segments.max.sz)
+ ma->segments.max_ever.sz = ma->segments.max.sz;
if (print_to_p) {
int to = *print_to_p;
void *arg = print_to_arg;
- erts_print(to, arg, "cached_segments: %beu\n", mk->cache_size);
- erts_print(to, arg, "cache_hits: %beu\n", mk->cache_hits);
- erts_print(to, arg, "segments: %beu %beu %beu\n",
- mk->segments.current.no, mk->segments.max.no, mk->segments.max_ever.no);
- erts_print(to, arg, "segments_size: %beu %beu %beu\n",
- mk->segments.current.sz, mk->segments.max.sz, mk->segments.max_ever.sz);
- erts_print(to, arg, "segments_watermark: %beu\n",
- mk->segments.current.watermark);
+ if (!only_sz) {
+ erts_print(to, arg, "cached_segments: %beu\n", ma->cache_size);
+ erts_print(to, arg, "cache_hits: %beu\n", ma->cache_hits);
+ erts_print(to, arg, "segments: %beu %beu %beu\n",
+ ma->segments.current.no, ma->segments.max.no, ma->segments.max_ever.no);
+ erts_print(to, arg, "segments_watermark: %beu\n",
+ ma->segments.current.watermark);
+ }
+ erts_print(to, arg, "segments_size: %beu %beu %beu\n",
+ ma->segments.current.sz, ma->segments.max.sz, ma->segments.max_ever.sz);
}
if (hpp || szp) {
res = NIL;
- add_2tup(hpp, szp, &res,
- am.segments_watermark,
- bld_unstable_uint(hpp, szp, mk->segments.current.watermark));
- add_4tup(hpp, szp, &res,
- am.segments_size,
- bld_unstable_uint(hpp, szp, mk->segments.current.sz),
- bld_unstable_uint(hpp, szp, mk->segments.max.sz),
- bld_unstable_uint(hpp, szp, mk->segments.max_ever.sz));
- add_4tup(hpp, szp, &res,
- am.segments,
- bld_unstable_uint(hpp, szp, mk->segments.current.no),
- bld_unstable_uint(hpp, szp, mk->segments.max.no),
- bld_unstable_uint(hpp, szp, mk->segments.max_ever.no));
- add_2tup(hpp, szp, &res,
- am.cache_hits,
- bld_unstable_uint(hpp, szp, mk->cache_hits));
- add_2tup(hpp, szp, &res,
- am.cached_segments,
- bld_unstable_uint(hpp, szp, mk->cache_size));
-
+ add_4tup(hpp, szp, &res,
+ am.segments_size,
+ bld_unstable_uint(hpp, szp, ma->segments.current.sz),
+ bld_unstable_uint(hpp, szp, ma->segments.max.sz),
+ bld_unstable_uint(hpp, szp, ma->segments.max_ever.sz));
+ if (!only_sz) {
+ add_2tup(hpp, szp, &res,
+ am.segments_watermark,
+ bld_unstable_uint(hpp, szp, ma->segments.current.watermark));
+ add_4tup(hpp, szp, &res,
+ am.segments,
+ bld_unstable_uint(hpp, szp, ma->segments.current.no),
+ bld_unstable_uint(hpp, szp, ma->segments.max.no),
+ bld_unstable_uint(hpp, szp, ma->segments.max_ever.no));
+ add_2tup(hpp, szp, &res,
+ am.cache_hits,
+ bld_unstable_uint(hpp, szp, ma->cache_hits));
+ add_2tup(hpp, szp, &res,
+ am.cached_segments,
+ bld_unstable_uint(hpp, szp, ma->cache_size));
+ }
}
if (begin_new_max_period) {
- mk->segments.max.no = mk->segments.current.no;
- mk->segments.max.sz = mk->segments.current.sz;
+ ma->segments.max.no = ma->segments.current.no;
+ ma->segments.max.sz = ma->segments.current.sz;
}
return res;
}
-static Eterm info_memkind(ErtsMsegAllctr_t *ma, MemKind* mk, int *print_to_p, void *print_to_arg,
- int begin_max_per, Uint **hpp, Uint *szp)
+static Eterm info_memkind(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg,
+ int begin_max_per, int only_sz, Uint **hpp, Uint *szp)
{
Eterm res = THE_NON_VALUE;
Eterm atoms[3];
Eterm values[3];
- if (print_to_p) {
- erts_print(*print_to_p, print_to_arg, "memory kind: %s\n", mk->name);
- }
- if (hpp || szp) {
- atoms[0] = am.name;
- atoms[1] = am.status;
- atoms[2] = am.calls;
- values[0] = erts_bld_string(hpp, szp, mk->name);
+ if (!only_sz) {
+ if (print_to_p) {
+ erts_print(*print_to_p, print_to_arg, "memory kind: %s\n", "all memory");
+ }
+ if (hpp || szp) {
+ atoms[0] = am.name;
+ atoms[1] = am.status;
+ atoms[2] = am.calls;
+ values[0] = erts_bld_string(hpp, szp, "all memory");
+ }
}
- values[1] = info_status(ma, mk, print_to_p, print_to_arg, begin_max_per, hpp, szp);
- values[2] = info_calls(ma, print_to_p, print_to_arg, hpp, szp);
+ res = info_status(ma, print_to_p, print_to_arg, begin_max_per, only_sz, hpp, szp);
+ if (!only_sz) {
+ values[1] = res;
+ values[2] = info_calls(ma, print_to_p, print_to_arg, hpp, szp);
- if (hpp || szp)
- res = bld_2tup_list(hpp, szp, 3, atoms, values);
+ if (hpp || szp)
+ res = bld_2tup_list(hpp, szp, 3, atoms, values);
+ }
return res;
}
-
static Eterm
info_version(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
{
@@ -1301,6 +1234,7 @@ erts_mseg_info(int ix,
int *print_to_p,
void *print_to_arg,
int begin_max_per,
+ int only_sz,
Uint **hpp,
Uint *szp)
{
@@ -1311,29 +1245,29 @@ erts_mseg_info(int ix,
Uint n = 0;
if (hpp || szp) {
-
- if (!atoms_initialized)
- init_atoms(ma);
-
- atoms[0] = am.version;
- atoms[1] = am.options;
- atoms[2] = am.memkind;
- atoms[3] = am.memkind;
+ if (!atoms_initialized)
+ init_atoms(ma);
+ }
+ if (!only_sz) {
+ if (hpp || szp) {
+ atoms[0] = am.version;
+ atoms[1] = am.options;
+ atoms[2] = am.memkind;
+ }
+ values[n++] = info_version(ma, print_to_p, print_to_arg, hpp, szp);
+ values[n++] = info_options(ma, "option ", print_to_p, print_to_arg, hpp, szp);
}
- values[n++] = info_version(ma, print_to_p, print_to_arg, hpp, szp);
- values[n++] = info_options(ma, "option ", print_to_p, print_to_arg, hpp, szp);
ERTS_MSEG_LOCK(ma);
ERTS_DBG_MA_CHK_THR_ACCESS(ma);
-#if HALFWORD_HEAP
- values[n++] = info_memkind(ma, &ma->low_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
- values[n++] = info_memkind(ma, &ma->hi_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
-#else
- values[n++] = info_memkind(ma, &ma->the_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
-#endif
- if (hpp || szp)
- res = bld_2tup_list(hpp, szp, n, atoms, values);
+ res = info_memkind(ma, print_to_p, print_to_arg, begin_max_per, only_sz, hpp, szp);
+
+ if (!only_sz) {
+ values[n++] = res;
+ if (hpp || szp)
+ res = bld_2tup_list(hpp, szp, n, atoms, values);
+ }
ERTS_MSEG_UNLOCK(ma);
@@ -1408,13 +1342,10 @@ Uint
erts_mseg_no(const ErtsMsegOpt_t *opt)
{
ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt);
- MemKind* mk;
- Uint n = 0;
+ Uint n;
ERTS_MSEG_LOCK(ma);
ERTS_DBG_MA_CHK_THR_ACCESS(ma);
- for (mk=ma->mk_list; mk; mk=mk->next) {
- n += mk->segments.current.no;
- }
+ n = ma->segments.current.no;
ERTS_MSEG_UNLOCK(ma);
return n;
}
@@ -1426,16 +1357,16 @@ erts_mseg_unit_size(void)
}
-static void mem_kind_init(ErtsMsegAllctr_t *ma, MemKind* mk, const char* name)
+static void mem_cache_init(ErtsMsegAllctr_t *ma)
{
int i;
/* Clear all cache headers */
- mseg_cache_clear_node(&(mk->cache_free));
- mseg_cache_clear_node(&(mk->cache_unpowered_node));
+ mseg_cache_clear_node(&(ma->cache_free));
+ mseg_cache_clear_node(&(ma->cache_unpowered_node));
for (i = 0; i < CACHE_AREAS; i++) {
- mseg_cache_clear_node(&(mk->cache_powered_node[i]));
+ mseg_cache_clear_node(&(ma->cache_powered_node[i]));
}
/* Populate cache free list */
@@ -1443,25 +1374,20 @@ static void mem_kind_init(ErtsMsegAllctr_t *ma, MemKind* mk, const char* name)
ASSERT(ma->max_cache_size <= MAX_CACHE_SIZE);
for (i = 0; i < ma->max_cache_size; i++) {
- mseg_cache_clear_node(&(mk->cache[i]));
- erts_circleq_push_head(&(mk->cache_free), &(mk->cache[i]));
+ mseg_cache_clear_node(&(ma->cache[i]));
+ erts_circleq_push_head(&(ma->cache_free), &(ma->cache[i]));
}
- mk->cache_size = 0;
- mk->cache_hits = 0;
-
- mk->segments.current.watermark = 0;
- mk->segments.current.no = 0;
- mk->segments.current.sz = 0;
- mk->segments.max.no = 0;
- mk->segments.max.sz = 0;
- mk->segments.max_ever.no = 0;
- mk->segments.max_ever.sz = 0;
-
- mk->ma = ma;
- mk->name = name;
- mk->next = ma->mk_list;
- ma->mk_list = mk;
+ ma->cache_size = 0;
+ ma->cache_hits = 0;
+
+ ma->segments.current.watermark = 0;
+ ma->segments.current.no = 0;
+ ma->segments.current.sz = 0;
+ ma->segments.max.no = 0;
+ ma->segments.max.sz = 0;
+ ma->segments.max_ever.no = 0;
+ ma->segments.max_ever.sz = 0;
}
void
@@ -1488,19 +1414,19 @@ erts_mseg_init(ErtsMsegInit_t *init)
erts_mtx_init(&init_atoms_mutex, "mseg_init_atoms");
-#if HALFWORD_HEAP
- if (sizeof(void *) != 8)
- erl_exit(-1,"Halfword emulator cannot be run in 32bit mode");
-
- init->mmap.virtual_range.start = (char *) sbrk(0);
- init->mmap.virtual_range.end = (char *) 0x100000000UL;
- init->mmap.sco = 0;
+#ifdef ERTS_ALC_A_EXEC
+ /* Initialize erts_exec_mapper *FIRST*, to increase probability
+ * of getting low memory for HiPE AMD64's small code model.
+ */
+ erts_mmap_init(&erts_exec_mmapper, &init->exec_mmap, 1);
+#endif
+ erts_mmap_init(&erts_dflt_mmapper, &init->dflt_mmap, 0);
+#if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
+ erts_mmap_init(&erts_literal_mmapper, &init->literal_mmap, 0);
#endif
-
- erts_mmap_init(&init->mmap);
if (!IS_2POW(GET_PAGE_SIZE))
- erl_exit(ERTS_ABORT_EXIT, "erts_mseg: Unexpected page_size %beu\n", GET_PAGE_SIZE);
+ erts_exit(ERTS_ABORT_EXIT, "erts_mseg: Unexpected page_size %beu\n", GET_PAGE_SIZE);
ASSERT((MSEG_ALIGNED_SIZE % GET_PAGE_SIZE) == 0);
@@ -1529,14 +1455,7 @@ erts_mseg_init(ErtsMsegInit_t *init)
if (ma->max_cache_size > MAX_CACHE_SIZE)
ma->max_cache_size = MAX_CACHE_SIZE;
- ma->mk_list = NULL;
-
-#if HALFWORD_HEAP
- mem_kind_init(ma, &ma->low_mem, "low memory");
- mem_kind_init(ma, &ma->hi_mem, "high memory");
-#else
- mem_kind_init(ma, &ma->the_mem, "all memory");
-#endif
+ mem_cache_init(ma);
sys_memzero((void *) &ma->calls, sizeof(ErtsMsegCalls));
}
@@ -1545,13 +1464,8 @@ erts_mseg_init(ErtsMsegInit_t *init)
static ERTS_INLINE Uint tot_cache_size(ErtsMsegAllctr_t *ma)
{
- MemKind* mk;
- Uint sz = 0;
ERTS_DBG_MA_CHK_THR_ACCESS(ma);
- for (mk=ma->mk_list; mk; mk=mk->next) {
- sz += mk->cache_size;
- }
- return sz;
+ return ma->cache_size;
}
/*
diff --git a/erts/emulator/sys/common/erl_mseg.h b/erts/emulator/sys/common/erl_mseg.h
index ba04e919fc..a43b409e94 100644
--- a/erts/emulator/sys/common/erl_mseg.h
+++ b/erts/emulator/sys/common/erl_mseg.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2016. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -42,16 +42,6 @@
#if ERTS_HAVE_MSEG_SUPER_ALIGNED
# define MSEG_ALIGN_BITS ERTS_MMAP_SUPERALIGNED_BITS
-#else
-/* If we don't use super aligned multiblock carriers
- * we will mmap with page size alignment (and thus use corresponding
- * align bits).
- *
- * Current implementation needs this to be a constant and
- * only uses this for user dev testing so setting page size
- * to 4096 (12 bits) is fine.
- */
-# define MSEG_ALIGN_BITS (12)
#endif
#if HAVE_ERTS_MSEG
@@ -69,7 +59,9 @@ typedef struct {
Uint rmcbf;
Uint mcs;
Uint nos;
- ErtsMMapInit mmap;
+ ErtsMMapInit dflt_mmap;
+ ErtsMMapInit literal_mmap;
+ ErtsMMapInit exec_mmap;
} ErtsMsegInit_t;
#define ERTS_MSEG_INIT_DEFAULT_INITIALIZER \
@@ -78,7 +70,9 @@ typedef struct {
20, /* rmcbf: Relative max cache bad fit */ \
10, /* mcs: Max cache size */ \
1000, /* cci: Cache check interval */ \
- ERTS_MMAP_INIT_DEFAULT_INITER \
+ ERTS_MMAP_INIT_DEFAULT_INITER, \
+ ERTS_MMAP_INIT_LITERAL_INITER, \
+ ERTS_MMAP_INIT_HIPE_EXEC_INITER \
}
typedef struct {
@@ -87,9 +81,6 @@ typedef struct {
UWord abs_shrink_th;
UWord rel_shrink_th;
int sched_spec;
-#if HALFWORD_HEAP
- int low_mem;
-#endif
} ErtsMsegOpt_t;
extern const ErtsMsegOpt_t erts_mseg_default_opt;
@@ -108,7 +99,7 @@ void erts_mseg_init(ErtsMsegInit_t *init);
void erts_mseg_late_init(void); /* Have to be called after all allocators,
threads and timers have been initialized. */
Eterm erts_mseg_info_options(int, int *, void*, Uint **, Uint *);
-Eterm erts_mseg_info(int, int *, void*, int, Uint **, Uint *);
+Eterm erts_mseg_info(int, int *, void*, int, int, Uint **, Uint *);
#endif /* #if HAVE_ERTS_MSEG */
diff --git a/erts/emulator/sys/common/erl_mtrace_sys_wrap.c b/erts/emulator/sys/common/erl_mtrace_sys_wrap.c
index a8c575835a..fc871f94f1 100644
--- a/erts/emulator/sys/common/erl_mtrace_sys_wrap.c
+++ b/erts/emulator/sys/common/erl_mtrace_sys_wrap.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2004-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2004-2016. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/erts/emulator/sys/common/erl_os_monotonic_time_extender.c b/erts/emulator/sys/common/erl_os_monotonic_time_extender.c
index b79485241f..d53190fdd5 100644
--- a/erts/emulator/sys/common/erl_os_monotonic_time_extender.c
+++ b/erts/emulator/sys/common/erl_os_monotonic_time_extender.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2015. All Rights Reserved.
+ * Copyright Ericsson AB 2015-2016. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -44,7 +44,7 @@ static void *os_monotonic_time_extender(void *vstatep)
erts_milli_sleep(sleep_time);
}
- erl_exit(ERTS_ABORT_EXIT, "os_monotonic_time_extender thread terminating");
+ erts_exit(ERTS_ABORT_EXIT, "os_monotonic_time_extender thread terminating");
return NULL;
}
diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c
index 21fa214364..b8a28bcc18 100644
--- a/erts/emulator/sys/common/erl_poll.c
+++ b/erts/emulator/sys/common/erl_poll.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2016. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -74,6 +74,8 @@
#include "erl_thr_progress.h"
#include "erl_driver.h"
#include "erl_alloc.h"
+#include "erl_msacc.h"
+#include "erl_misc_utils.h"
#if !defined(ERTS_POLL_USE_EPOLL) \
&& !defined(ERTS_POLL_USE_DEVPOLL) \
@@ -2131,16 +2133,19 @@ get_timeout(ErtsPollSet ps,
if (timeout > (ErtsMonotonicTime) INT_MAX)
timeout = (ErtsMonotonicTime) INT_MAX;
save_timeout_time += ERTS_MSEC_TO_MONOTONIC(timeout);
+ timeout -= ERTS_PREMATURE_TIMEOUT(timeout, 1000);
break;
case 1000000:
/* Round up to nearest even micro second */
timeout = ERTS_MONOTONIC_TO_USEC(diff_time - 1) + 1;
save_timeout_time += ERTS_USEC_TO_MONOTONIC(timeout);
+ timeout -= ERTS_PREMATURE_TIMEOUT(timeout, 1000*1000);
break;
case 1000000000:
/* Round up to nearest even nano second */
timeout = ERTS_MONOTONIC_TO_NSEC(diff_time - 1) + 1;
save_timeout_time += ERTS_NSEC_TO_MONOTONIC(timeout);
+ timeout -= ERTS_PREMATURE_TIMEOUT(timeout, 1000*1000*1000);
break;
default:
ERTS_INTERNAL_ERROR("Invalid resolution");
@@ -2238,6 +2243,7 @@ static ERTS_INLINE int
check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res)
{
int res;
+ ERTS_MSACC_PUSH_STATE_M();
if (erts_smp_atomic_read_nob(&ps->no_of_user_fds) == 0
&& timeout_time == ERTS_POLL_NO_TIMEOUT) {
/* Nothing to poll and zero timeout; done... */
@@ -2259,6 +2265,7 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res)
#ifdef ERTS_SMP
erts_thr_progress_prepare_wait(NULL);
#endif
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP);
timerfd_set(ps, &its);
res = epoll_wait(ps->kp_fd, ps->res_events, max_res, -1);
res = timerfd_clear(ps, res, max_res);
@@ -2268,10 +2275,12 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res)
}
#else /* !ERTS_POLL_USE_TIMERFD */
timeout = (int) get_timeout(ps, 1000, timeout_time);
+ if (timeout) {
#ifdef ERTS_SMP
- if (timeout)
erts_thr_progress_prepare_wait(NULL);
#endif
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP);
+ }
res = epoll_wait(ps->kp_fd, ps->res_events, max_res, timeout);
#endif /* !ERTS_POLL_USE_TIMERFD */
#elif ERTS_POLL_USE_KQUEUE /* --- kqueue ------------------------------ */
@@ -2279,10 +2288,12 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res)
if (max_res > ps->res_events_len)
grow_res_events(ps, max_res);
timeout = get_timeout_timespec(ps, &ts, timeout_time);
+ if (timeout) {
#ifdef ERTS_SMP
- if (timeout)
erts_thr_progress_prepare_wait(NULL);
#endif
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP);
+ }
res = kevent(ps->kp_fd, NULL, 0, ps->res_events, max_res, &ts);
#endif /* ----------------------------------------- */
}
@@ -2306,26 +2317,33 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res)
if (poll_res.dp_nfds > ps->res_events_len)
grow_res_events(ps, poll_res.dp_nfds);
poll_res.dp_fds = ps->res_events;
+ if (timeout) {
#ifdef ERTS_SMP
- if (timeout)
erts_thr_progress_prepare_wait(NULL);
#endif
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP);
+ }
poll_res.dp_timeout = timeout;
res = ioctl(ps->kp_fd, DP_POLL, &poll_res);
#elif ERTS_POLL_USE_POLL && defined(HAVE_PPOLL) /* --- ppoll ---------------- */
struct timespec ts;
timeout = get_timeout_timespec(ps, &ts, timeout_time);
+ if (timeout) {
#ifdef ERTS_SMP
- if (timeout)
erts_thr_progress_prepare_wait(NULL);
#endif
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP);
+ }
res = ppoll(ps->poll_fds, ps->no_poll_fds, &ts, NULL);
#elif ERTS_POLL_USE_POLL /* --- poll --------------------------------- */
timeout = (int) get_timeout(ps, 1000, timeout_time);
+
+ if (timeout) {
#ifdef ERTS_SMP
- if (timeout)
erts_thr_progress_prepare_wait(NULL);
#endif
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP);
+ }
res = poll(ps->poll_fds, ps->no_poll_fds, timeout);
#elif ERTS_POLL_USE_SELECT /* --- select ------------------------------ */
SysTimeval to;
@@ -2334,18 +2352,22 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res)
ERTS_FD_COPY(&ps->input_fds, &ps->res_input_fds);
ERTS_FD_COPY(&ps->output_fds, &ps->res_output_fds);
+ if (timeout) {
#ifdef ERTS_SMP
- if (timeout)
erts_thr_progress_prepare_wait(NULL);
#endif
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP);
+ }
res = ERTS_SELECT(ps->max_fd + 1,
&ps->res_input_fds,
&ps->res_output_fds,
NULL,
&to);
#ifdef ERTS_SMP
- if (timeout)
+ if (timeout) {
erts_thr_progress_finalize_wait(NULL);
+ ERTS_MSACC_POP_STATE_M();
+ }
if (res < 0
&& errno == EBADF
&& ERTS_POLLSET_HAVE_UPDATE_REQUESTS(ps)) {
@@ -2380,10 +2402,12 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res)
return res;
#endif /* ----------------------------------------- */
}
+ if (timeout) {
#ifdef ERTS_SMP
- if (timeout)
erts_thr_progress_finalize_wait(NULL);
#endif
+ ERTS_MSACC_POP_STATE_M();
+ }
return res;
}
}
@@ -2432,7 +2456,15 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
}
#endif
- res = check_fd_events(ps, to, no_fds);
+ while (1) {
+ res = check_fd_events(ps, to, no_fds);
+ if (res != 0)
+ break;
+ if (to == ERTS_POLL_NO_TIMEOUT)
+ break;
+ if (erts_get_monotonic_time(NULL) >= timeout_time)
+ break;
+ }
woke_up(ps);
diff --git a/erts/emulator/sys/common/erl_poll.h b/erts/emulator/sys/common/erl_poll.h
index bd3a46ef0f..c16122610d 100644
--- a/erts/emulator/sys/common/erl_poll.h
+++ b/erts/emulator/sys/common/erl_poll.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2016. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -93,7 +93,7 @@
# if defined(ERTS_USE_POLL)
# undef ERTS_POLL_USE_POLL
# define ERTS_POLL_USE_POLL 1
-# elif !defined(__WIN32__) && !defined(__OSE__)
+# elif !defined(__WIN32__)
# undef ERTS_POLL_USE_SELECT
# define ERTS_POLL_USE_SELECT 1
# endif
@@ -104,31 +104,13 @@
typedef Uint32 ErtsPollEvents;
#undef ERTS_POLL_EV_E2N
-#if defined(__WIN32__) || defined(__OSE__) /* --- win32 or ose -------- */
+#if defined(__WIN32__) /* --- win32 --------------------------------------- */
#define ERTS_POLL_EV_IN 1
#define ERTS_POLL_EV_OUT 2
#define ERTS_POLL_EV_ERR 4
#define ERTS_POLL_EV_NVAL 8
-#ifdef __OSE__
-
-typedef struct ErtsPollOseMsgList_ {
- struct ErtsPollOseMsgList_ *next;
- union SIGNAL *data;
-} ErtsPollOseMsgList;
-
-struct erts_sys_fd_type {
- SIGSELECT signo;
- ErlDrvOseEventId id;
- ErtsPollOseMsgList *msgs;
- ErlDrvOseEventId (*resolve_signal)(union SIGNAL *sig);
- ethr_mutex mtx;
- void *extra;
-};
-
-#endif
-
#elif ERTS_POLL_USE_EPOLL /* --- epoll ------------------------------- */
#include <sys/epoll.h>
diff --git a/erts/emulator/sys/common/erl_sys_common_misc.c b/erts/emulator/sys/common/erl_sys_common_misc.c
index e292741bfa..79f87eb3a9 100644
--- a/erts/emulator/sys/common/erl_sys_common_misc.c
+++ b/erts/emulator/sys/common/erl_sys_common_misc.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2016. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/erts/emulator/sys/common/erl_util_queue.h b/erts/emulator/sys/common/erl_util_queue.h
index b50e062dd5..73293e0225 100644
--- a/erts/emulator/sys/common/erl_util_queue.h
+++ b/erts/emulator/sys/common/erl_util_queue.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2013. All Rights Reserved.
+ * Copyright Ericsson AB 2013-2016. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.