aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/sys/common/erl_mseg.c
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/sys/common/erl_mseg.c')
-rw-r--r--erts/emulator/sys/common/erl_mseg.c1112
1 files changed, 856 insertions, 256 deletions
diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c
index 5dfd66bd7c..eaef6680dd 100644
--- a/erts/emulator/sys/common/erl_mseg.c
+++ b/erts/emulator/sys/common/erl_mseg.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -35,6 +35,7 @@
#include "global.h"
#include "erl_threads.h"
#include "erl_mtrace.h"
+#include "erl_time.h"
#include "big.h"
#if HAVE_ERTS_MSEG
@@ -76,17 +77,29 @@ static int atoms_initialized;
static Uint cache_check_interval;
+typedef struct mem_kind_t MemKind;
+
static void check_cache(void *unused);
-static void mseg_clear_cache(void);
+static void mseg_clear_cache(MemKind*);
static int is_cache_check_scheduled;
#ifdef ERTS_THREADS_NO_SMP
static int is_cache_check_requested;
#endif
+#if HALFWORD_HEAP
+static int initialize_pmmap(void);
+static void *pmmap(size_t size);
+static int pmunmap(void *p, size_t size);
+static void *pmremap(void *old_address, size_t old_size,
+ size_t new_size);
+#endif
+
#if HAVE_MMAP
/* Mmap ... */
#define MMAP_PROT (PROT_READ|PROT_WRITE)
+
+
#ifdef MAP_ANON
# define MMAP_FLAGS (MAP_ANON|MAP_PRIVATE)
# define MMAP_FD (-1)
@@ -102,19 +115,35 @@ static int mmap_fd;
# define HAVE_MSEG_RECREATE 0
#endif
+#if HALFWORD_HEAP
+#define CAN_PARTLY_DESTROY 0
+#else
#define CAN_PARTLY_DESTROY 1
+#endif
#else /* #if HAVE_MMAP */
#define CAN_PARTLY_DESTROY 0
#error "Not supported"
#endif /* #if HAVE_MMAP */
+#if defined(ERTS_MSEG_FAKE_SEGMENTS) && HALFWORD_HEAP
+# warning "ERTS_MSEG_FAKE_SEGMENTS will only be used for high memory segments"
+#endif
#if defined(ERTS_MSEG_FAKE_SEGMENTS)
#undef CAN_PARTLY_DESTROY
#define CAN_PARTLY_DESTROY 0
#endif
-static const ErtsMsegOpt_t default_opt = ERTS_MSEG_DEFAULT_OPT_INITIALIZER;
+const ErtsMsegOpt_t erts_mseg_default_opt = {
+ 1, /* Use cache */
+ 1, /* Preserv data */
+ 0, /* Absolute shrink threshold */
+ 0 /* Relative shrink threshold */
+#if HALFWORD_HEAP
+ ,0 /* need low memory */
+#endif
+};
+
typedef struct cache_desc_t_ {
void *seg;
@@ -145,14 +174,43 @@ static struct {
CallCounter check_cache;
} calls;
-static cache_desc_t cache_descs[MAX_CACHE_SIZE];
-static cache_desc_t *free_cache_descs;
-static cache_desc_t *cache;
-static cache_desc_t *cache_end;
-static Uint cache_hits;
-static Uint cache_size;
-static Uint min_cached_seg_size;
-static Uint max_cached_seg_size;
+struct mem_kind_t {
+ cache_desc_t cache_descs[MAX_CACHE_SIZE];
+ cache_desc_t *free_cache_descs;
+ cache_desc_t *cache;
+ cache_desc_t *cache_end;
+
+ Uint cache_size;
+ Uint min_cached_seg_size;
+ Uint max_cached_seg_size;
+ Uint cache_hits;
+
+ struct {
+ struct {
+ Uint watermark;
+ Uint no;
+ Uint sz;
+ } current;
+ struct {
+ Uint no;
+ Uint sz;
+ } max;
+ struct {
+ Uint no;
+ Uint sz;
+ } max_ever;
+ } segments;
+
+ const char* name;
+ MemKind* next;
+};/*MemKind*/
+
+#if HALFWORD_HEAP
+static MemKind low_mem, hi_mem;
+#else
+static MemKind the_mem;
+#endif
+static MemKind* mk_list = NULL;
static Uint max_cache_size;
static Uint abs_max_cache_bad_fit;
@@ -162,47 +220,32 @@ static Uint rel_max_cache_bad_fit;
static Uint min_seg_size;
#endif
-struct {
- struct {
- Uint watermark;
- Uint no;
- Uint sz;
- } current;
- struct {
- Uint no;
- Uint sz;
- } max;
- struct {
- Uint no;
- Uint sz;
- } max_ever;
-} segments;
-#define ERTS_MSEG_ALLOC_STAT(SZ) \
+#define ERTS_MSEG_ALLOC_STAT(C,SZ) \
do { \
- segments.current.no++; \
- if (segments.max.no < segments.current.no) \
- segments.max.no = segments.current.no; \
- if (segments.current.watermark < segments.current.no) \
- segments.current.watermark = segments.current.no; \
- segments.current.sz += (SZ); \
- if (segments.max.sz < segments.current.sz) \
- segments.max.sz = segments.current.sz; \
+ C->segments.current.no++; \
+ if (C->segments.max.no < C->segments.current.no) \
+ C->segments.max.no = C->segments.current.no; \
+ if (C->segments.current.watermark < C->segments.current.no) \
+ C->segments.current.watermark = C->segments.current.no; \
+ C->segments.current.sz += (SZ); \
+ if (C->segments.max.sz < C->segments.current.sz) \
+ C->segments.max.sz = C->segments.current.sz; \
} while (0)
-#define ERTS_MSEG_DEALLOC_STAT(SZ) \
+#define ERTS_MSEG_DEALLOC_STAT(C,SZ) \
do { \
- ASSERT(segments.current.no > 0); \
- segments.current.no--; \
- ASSERT(segments.current.sz >= (SZ)); \
- segments.current.sz -= (SZ); \
+ ASSERT(C->segments.current.no > 0); \
+ C->segments.current.no--; \
+ ASSERT(C->segments.current.sz >= (SZ)); \
+ C->segments.current.sz -= (SZ); \
} while (0)
-#define ERTS_MSEG_REALLOC_STAT(OSZ, NSZ) \
+#define ERTS_MSEG_REALLOC_STAT(C,OSZ, NSZ) \
do { \
- ASSERT(segments.current.sz >= (OSZ)); \
- segments.current.sz -= (OSZ); \
- segments.current.sz += (NSZ); \
+ ASSERT(C->segments.current.sz >= (OSZ)); \
+ C->segments.current.sz -= (OSZ); \
+ C->segments.current.sz += (NSZ); \
} while (0)
#define ONE_GIGA (1000000000)
@@ -232,6 +275,7 @@ static void thread_safe_init(void)
{
erts_mtx_init(&init_atoms_mutex, "mseg_init_atoms");
erts_mtx_init(&mseg_mutex, "mseg");
+
#ifdef ERTS_THREADS_NO_SMP
main_tid = erts_thr_self();
#endif
@@ -256,7 +300,7 @@ schedule_cache_check(void)
#endif
{
cache_check_timer.active = 0;
- erl_set_timer(&cache_check_timer,
+ erts_set_timer(&cache_check_timer,
check_cache,
NULL,
NULL,
@@ -287,28 +331,45 @@ check_schedule_cache_check(void)
static void
mseg_shutdown(void)
{
+ MemKind* mk;
erts_mtx_lock(&mseg_mutex);
- mseg_clear_cache();
+ for (mk=mk_list; mk; mk=mk->next) {
+ mseg_clear_cache(mk);
+ }
erts_mtx_unlock(&mseg_mutex);
}
static ERTS_INLINE void *
-mseg_create(Uint size)
+mseg_create(MemKind* mk, Uint size)
{
void *seg;
ASSERT(size % page_size == 0);
+#if HALFWORD_HEAP
+ if (mk == &low_mem) {
+ seg = pmmap(size);
+ if ((unsigned long) seg & CHECK_POINTER_MASK) {
+ erts_fprintf(stderr,"Pointer mask failure (0x%08lx)\n",(unsigned long) seg);
+ return NULL;
+ }
+ }
+ else
+#endif
+ {
#if defined(ERTS_MSEG_FAKE_SEGMENTS)
- seg = erts_sys_alloc(ERTS_ALC_N_INVALID, NULL, size);
+ seg = erts_sys_alloc(ERTS_ALC_N_INVALID, NULL, size);
#elif HAVE_MMAP
- seg = (void *) mmap((void *) 0, (size_t) size,
- MMAP_PROT, MMAP_FLAGS, MMAP_FD, 0);
- if (seg == (void *) MAP_FAILED)
- seg = NULL;
+ {
+ seg = (void *) mmap((void *) 0, (size_t) size,
+ MMAP_PROT, MMAP_FLAGS, MMAP_FD, 0);
+ if (seg == (void *) MAP_FAILED)
+ seg = NULL;
+ }
#else
-#error "Missing mseg_create() implementation"
+# error "Missing mseg_create() implementation"
#endif
+ }
INC_CC(create);
@@ -316,23 +377,29 @@ mseg_create(Uint size)
}
static ERTS_INLINE void
-mseg_destroy(void *seg, Uint size)
+mseg_destroy(MemKind* mk, void *seg, Uint size)
{
-#if defined(ERTS_MSEG_FAKE_SEGMENTS)
- erts_sys_free(ERTS_ALC_N_INVALID, NULL, seg);
-#elif HAVE_MMAP
+ int res;
-#ifdef DEBUG
- int res =
+#if HALFWORD_HEAP
+ if (mk == &low_mem) {
+ res = pmunmap((void *) seg, size);
+ }
+ else
#endif
-
- munmap((void *) seg, size);
+ {
+#ifdef ERTS_MSEG_FAKE_SEGMENTS
+ erts_sys_free(ERTS_ALC_N_INVALID, NULL, seg);
+ res = 0;
+#elif HAVE_MMAP
+ res = munmap((void *) seg, size);
+#else
+# error "Missing mseg_destroy() implementation"
+#endif
+ }
ASSERT(size % page_size == 0);
ASSERT(res == 0);
-#else
-#error "Missing mseg_destroy() implementation"
-#endif
INC_CC(destroy);
@@ -341,25 +408,44 @@ mseg_destroy(void *seg, Uint size)
#if HAVE_MSEG_RECREATE
static ERTS_INLINE void *
-mseg_recreate(void *old_seg, Uint old_size, Uint new_size)
+mseg_recreate(MemKind* mk, void *old_seg, Uint old_size, Uint new_size)
{
void *new_seg;
ASSERT(old_size % page_size == 0);
ASSERT(new_size % page_size == 0);
+#if HALFWORD_HEAP
+ if (mk == &low_mem) {
+ new_seg = (void *) pmremap((void *) old_seg,
+ (size_t) old_size,
+ (size_t) new_size);
+ }
+ else
+#endif
+ {
#if defined(ERTS_MSEG_FAKE_SEGMENTS)
- new_seg = erts_sys_realloc(ERTS_ALC_N_INVALID, NULL, old_seg, new_size);
+ new_seg = erts_sys_realloc(ERTS_ALC_N_INVALID, NULL, old_seg, new_size);
#elif HAVE_MREMAP
- new_seg = (void *) mremap((void *) old_seg,
- (size_t) old_size,
- (size_t) new_size,
- MREMAP_MAYMOVE);
- if (new_seg == (void *) MAP_FAILED)
- new_seg = NULL;
+
+ #if defined(__NetBSD__)
+ new_seg = (void *) mremap((void *) old_seg,
+ (size_t) old_size,
+ NULL,
+ (size_t) new_size,
+ 0);
+ #else
+ new_seg = (void *) mremap((void *) old_seg,
+ (size_t) old_size,
+ (size_t) new_size,
+ MREMAP_MAYMOVE);
+ #endif
+ if (new_seg == (void *) MAP_FAILED)
+ new_seg = NULL;
#else
#error "Missing mseg_recreate() implementation"
#endif
+ }
INC_CC(recreate);
@@ -370,134 +456,142 @@ mseg_recreate(void *old_seg, Uint old_size, Uint new_size)
static ERTS_INLINE cache_desc_t *
-alloc_cd(void)
+alloc_cd(MemKind* mk)
{
- cache_desc_t *cd = free_cache_descs;
+ cache_desc_t *cd = mk->free_cache_descs;
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
if (cd)
- free_cache_descs = cd->next;
+ mk->free_cache_descs = cd->next;
return cd;
}
static ERTS_INLINE void
-free_cd(cache_desc_t *cd)
+free_cd(MemKind* mk, cache_desc_t *cd)
{
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
- cd->next = free_cache_descs;
- free_cache_descs = cd;
+ cd->next = mk->free_cache_descs;
+ mk->free_cache_descs = cd;
}
static ERTS_INLINE void
-link_cd(cache_desc_t *cd)
+link_cd(MemKind* mk, cache_desc_t *cd)
{
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
- if (cache)
- cache->prev = cd;
- cd->next = cache;
+ if (mk->cache)
+ mk->cache->prev = cd;
+ cd->next = mk->cache;
cd->prev = NULL;
- cache = cd;
+ mk->cache = cd;
- if (!cache_end) {
+ if (!mk->cache_end) {
ASSERT(!cd->next);
- cache_end = cd;
+ mk->cache_end = cd;
}
- cache_size++;
+ mk->cache_size++;
}
+#if CAN_PARTLY_DESTROY
static ERTS_INLINE void
-end_link_cd(cache_desc_t *cd)
+end_link_cd(MemKind* mk, cache_desc_t *cd)
{
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
- if (cache_end)
- cache_end->next = cd;
+ if (mk->cache_end)
+ mk->cache_end->next = cd;
cd->next = NULL;
- cd->prev = cache_end;
- cache_end = cd;
+ cd->prev = mk->cache_end;
+ mk->cache_end = cd;
- if (!cache) {
+ if (!mk->cache) {
ASSERT(!cd->prev);
- cache = cd;
+ mk->cache = cd;
}
- cache_size++;
+ mk->cache_size++;
}
+#endif
static ERTS_INLINE void
-unlink_cd(cache_desc_t *cd)
+unlink_cd(MemKind* mk, cache_desc_t *cd)
{
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
if (cd->next)
cd->next->prev = cd->prev;
else
- cache_end = cd->prev;
+ mk->cache_end = cd->prev;
if (cd->prev)
cd->prev->next = cd->next;
else
- cache = cd->next;
- ASSERT(cache_size > 0);
- cache_size--;
+ mk->cache = cd->next;
+ ASSERT(mk->cache_size > 0);
+ mk->cache_size--;
}
static ERTS_INLINE void
-check_cache_limits(void)
+check_cache_limits(MemKind* mk)
{
cache_desc_t *cd;
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
- max_cached_seg_size = 0;
- min_cached_seg_size = ~((Uint) 0);
- for (cd = cache; cd; cd = cd->next) {
- if (cd->size < min_cached_seg_size)
- min_cached_seg_size = cd->size;
- if (cd->size > max_cached_seg_size)
- max_cached_seg_size = cd->size;
+ mk->max_cached_seg_size = 0;
+ mk->min_cached_seg_size = ~((Uint) 0);
+ for (cd = mk->cache; cd; cd = cd->next) {
+ if (cd->size < mk->min_cached_seg_size)
+ mk->min_cached_seg_size = cd->size;
+ if (cd->size > mk->max_cached_seg_size)
+ mk->max_cached_seg_size = cd->size;
}
-
}
static ERTS_INLINE void
-adjust_cache_size(int force_check_limits)
+adjust_cache_size(MemKind* mk, int force_check_limits)
{
cache_desc_t *cd;
int check_limits = force_check_limits;
- Sint max_cached = ((Sint) segments.current.watermark
- - (Sint) segments.current.no);
+ Sint max_cached = ((Sint) mk->segments.current.watermark
+ - (Sint) mk->segments.current.no);
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
- while (((Sint) cache_size) > max_cached && ((Sint) cache_size) > 0) {
- ASSERT(cache_end);
- cd = cache_end;
+ while (((Sint) mk->cache_size) > max_cached && ((Sint) mk->cache_size) > 0) {
+ ASSERT(mk->cache_end);
+ cd = mk->cache_end;
if (!check_limits &&
- !(min_cached_seg_size < cd->size
- && cd->size < max_cached_seg_size)) {
+ !(mk->min_cached_seg_size < cd->size
+ && cd->size < mk->max_cached_seg_size)) {
check_limits = 1;
}
if (erts_mtrace_enabled)
erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg);
- mseg_destroy(cd->seg, cd->size);
- unlink_cd(cd);
- free_cd(cd);
+ mseg_destroy(mk, cd->seg, cd->size);
+ unlink_cd(mk,cd);
+ free_cd(mk,cd);
}
if (check_limits)
- check_cache_limits();
-
+ check_cache_limits(mk);
}
static void
-check_cache(void *unused)
+check_one_cache(MemKind* mk)
+{
+ if (mk->segments.current.watermark > mk->segments.current.no)
+ mk->segments.current.watermark--;
+ adjust_cache_size(mk, 0);
+
+ if (mk->cache_size)
+ schedule_cache_check();
+}
+
+static void check_cache(void* unused)
{
+ MemKind* mk;
erts_mtx_lock(&mseg_mutex);
is_cache_check_scheduled = 0;
- if (segments.current.watermark > segments.current.no)
- segments.current.watermark--;
- adjust_cache_size(0);
-
- if (cache_size)
- schedule_cache_check();
+ for (mk=mk_list; mk; mk=mk->next) {
+ check_one_cache(mk);
+ }
INC_CC(check_cache);
@@ -505,28 +599,37 @@ check_cache(void *unused)
}
static void
-mseg_clear_cache(void)
+mseg_clear_cache(MemKind* mk)
{
- segments.current.watermark = 0;
+ mk->segments.current.watermark = 0;
- adjust_cache_size(1);
+ adjust_cache_size(mk, 1);
- ASSERT(!cache);
- ASSERT(!cache_end);
- ASSERT(!cache_size);
+ ASSERT(!mk->cache);
+ ASSERT(!mk->cache_end);
+ ASSERT(!mk->cache_size);
- segments.current.watermark = segments.current.no;
+ mk->segments.current.watermark = mk->segments.current.no;
INC_CC(clear_cache);
}
+static ERTS_INLINE MemKind* memkind(const ErtsMsegOpt_t *opt)
+{
+#if HALFWORD_HEAP
+ return opt->low_mem ? &low_mem : &hi_mem;
+#else
+ return &the_mem;
+#endif
+}
+
static void *
mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
{
-
Uint max, min, diff_size, size;
cache_desc_t *cd, *cand_cd;
void *seg;
+ MemKind* mk = memkind(opt);
INC_CC(alloc);
@@ -539,11 +642,11 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
if (!opt->cache) {
create_seg:
- adjust_cache_size(0);
- seg = mseg_create(size);
+ adjust_cache_size(mk,0);
+ seg = mseg_create(mk, size);
if (!seg) {
- mseg_clear_cache();
- seg = mseg_create(size);
+ mseg_clear_cache(mk);
+ seg = mseg_create(mk, size);
if (!seg)
size = 0;
}
@@ -552,17 +655,17 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
if (seg) {
if (erts_mtrace_enabled)
erts_mtrace_crr_alloc(seg, atype, ERTS_MTRACE_SEGMENT_ID, size);
- ERTS_MSEG_ALLOC_STAT(size);
+ ERTS_MSEG_ALLOC_STAT(mk,size);
}
return seg;
}
- if (size > max_cached_seg_size)
+ if (size > mk->max_cached_seg_size)
goto create_seg;
- if (size < min_cached_seg_size) {
+ if (size < mk->min_cached_seg_size) {
- diff_size = min_cached_seg_size - size;
+ diff_size = mk->min_cached_seg_size - size;
if (diff_size > abs_max_cache_bad_fit)
goto create_seg;
@@ -576,7 +679,7 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
min = ~((Uint) 0);
cand_cd = NULL;
- for (cd = cache; cd; cd = cd->next) {
+ for (cd = mk->cache; cd; cd = cd->next) {
if (cd->size >= size) {
if (!cand_cd) {
cand_cd = cd;
@@ -597,8 +700,8 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
min = cd->size;
}
- min_cached_seg_size = min;
- max_cached_seg_size = max;
+ mk->min_cached_seg_size = min;
+ mk->max_cached_seg_size = max;
if (!cand_cd)
goto create_seg;
@@ -607,20 +710,20 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
if (diff_size > abs_max_cache_bad_fit
|| 100*PAGES(diff_size) > rel_max_cache_bad_fit*PAGES(size)) {
- if (max_cached_seg_size < cand_cd->size)
- max_cached_seg_size = cand_cd->size;
- if (min_cached_seg_size > cand_cd->size)
- min_cached_seg_size = cand_cd->size;
+ if (mk->max_cached_seg_size < cand_cd->size)
+ mk->max_cached_seg_size = cand_cd->size;
+ if (mk->min_cached_seg_size > cand_cd->size)
+ mk->min_cached_seg_size = cand_cd->size;
goto create_seg;
}
- cache_hits++;
+ mk->cache_hits++;
size = cand_cd->size;
seg = cand_cd->seg;
- unlink_cd(cand_cd);
- free_cd(cand_cd);
+ unlink_cd(mk,cand_cd);
+ free_cd(mk,cand_cd);
*size_p = size;
@@ -630,7 +733,8 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
}
if (seg)
- ERTS_MSEG_ALLOC_STAT(size);
+ ERTS_MSEG_ALLOC_STAT(mk,size);
+
return seg;
}
@@ -639,41 +743,42 @@ static void
mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size,
const ErtsMsegOpt_t *opt)
{
+ MemKind* mk = memkind(opt);
cache_desc_t *cd;
- ERTS_MSEG_DEALLOC_STAT(size);
+ ERTS_MSEG_DEALLOC_STAT(mk,size);
if (!opt->cache || max_cache_size == 0) {
if (erts_mtrace_enabled)
erts_mtrace_crr_free(atype, SEGTYPE, seg);
- mseg_destroy(seg, size);
+ mseg_destroy(mk, seg, size);
}
else {
int check_limits = 0;
- if (size < min_cached_seg_size)
- min_cached_seg_size = size;
- if (size > max_cached_seg_size)
- max_cached_seg_size = size;
-
- if (!free_cache_descs) {
- cd = cache_end;
- if (!(min_cached_seg_size < cd->size
- && cd->size < max_cached_seg_size)) {
+ if (size < mk->min_cached_seg_size)
+ mk->min_cached_seg_size = size;
+ if (size > mk->max_cached_seg_size)
+ mk->max_cached_seg_size = size;
+
+ if (!mk->free_cache_descs) {
+ cd = mk->cache_end;
+ if (!(mk->min_cached_seg_size < cd->size
+ && cd->size < mk->max_cached_seg_size)) {
check_limits = 1;
}
if (erts_mtrace_enabled)
erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg);
- mseg_destroy(cd->seg, cd->size);
- unlink_cd(cd);
- free_cd(cd);
+ mseg_destroy(mk, cd->seg, cd->size);
+ unlink_cd(mk,cd);
+ free_cd(mk,cd);
}
- cd = alloc_cd();
+ cd = alloc_cd(mk);
ASSERT(cd);
cd->seg = seg;
cd->size = size;
- link_cd(cd);
+ link_cd(mk,cd);
if (erts_mtrace_enabled) {
erts_mtrace_crr_free(atype, SEGTYPE, seg);
@@ -683,7 +788,7 @@ mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size,
/* ASSERT(segments.current.watermark >= segments.current.no + cache_size); */
if (check_limits)
- check_cache_limits();
+ check_cache_limits(mk);
schedule_cache_check();
@@ -696,6 +801,7 @@ static void *
mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
const ErtsMsegOpt_t *opt)
{
+ MemKind* mk = memkind(opt);
void *new_seg;
Uint new_size;
@@ -733,15 +839,15 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
#if CAN_PARTLY_DESTROY
if (shrink_sz > min_seg_size
- && free_cache_descs
+ && mk->free_cache_descs
&& opt->cache) {
cache_desc_t *cd;
- cd = alloc_cd();
+ cd = alloc_cd(mk);
ASSERT(cd);
cd->seg = ((char *) seg) + new_size;
cd->size = shrink_sz;
- end_link_cd(cd);
+ end_link_cd(mk,cd);
if (erts_mtrace_enabled) {
erts_mtrace_crr_realloc(new_seg,
@@ -760,7 +866,7 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
SEGTYPE,
seg,
new_size);
- mseg_destroy(((char *) seg) + new_size, shrink_sz);
+ mseg_destroy(mk, ((char *) seg) + new_size, shrink_sz);
}
#elif HAVE_MSEG_RECREATE
@@ -794,7 +900,7 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
#if !CAN_PARTLY_DESTROY
do_recreate:
#endif
- new_seg = mseg_recreate((void *) seg, old_size, new_size);
+ new_seg = mseg_recreate(mk, (void *) seg, old_size, new_size);
if (erts_mtrace_enabled)
erts_mtrace_crr_realloc(new_seg, atype, SEGTYPE, seg, new_size);
if (!new_seg)
@@ -817,7 +923,7 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
*new_size_p = new_size;
- ERTS_MSEG_REALLOC_STAT(old_size, new_size);
+ ERTS_MSEG_REALLOC_STAT(mk, old_size, new_size);
return new_seg;
}
@@ -833,6 +939,8 @@ static struct {
Eterm mcs;
Eterm cci;
+ Eterm memkind;
+ Eterm name;
Eterm status;
Eterm cached_segments;
Eterm cache_hits;
@@ -882,6 +990,8 @@ init_atoms(void)
#endif
AM_INIT(version);
+ AM_INIT(memkind);
+ AM_INIT(name);
AM_INIT(options);
AM_INIT(amcbf);
@@ -983,10 +1093,10 @@ info_options(char *prefix,
if (print_to_p) {
int to = *print_to_p;
void *arg = print_to_arg;
- erts_print(to, arg, "%samcbf: %bpu\n", prefix, abs_max_cache_bad_fit);
- erts_print(to, arg, "%srmcbf: %bpu\n", prefix, rel_max_cache_bad_fit);
- erts_print(to, arg, "%smcs: %bpu\n", prefix, max_cache_size);
- erts_print(to, arg, "%scci: %bpu\n", prefix, cache_check_interval);
+ erts_print(to, arg, "%samcbf: %beu\n", prefix, abs_max_cache_bad_fit);
+ erts_print(to, arg, "%srmcbf: %beu\n", prefix, rel_max_cache_bad_fit);
+ erts_print(to, arg, "%smcs: %beu\n", prefix, max_cache_size);
+ erts_print(to, arg, "%scci: %beu\n", prefix, cache_check_interval);
}
if (hpp || szp) {
@@ -1022,9 +1132,9 @@ info_calls(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
#define PRINT_CC(TO, TOA, CC) \
if (calls.CC.giga_no == 0) \
- erts_print(TO, TOA, "mseg_%s calls: %bpu\n", #CC, calls.CC.no); \
+ erts_print(TO, TOA, "mseg_%s calls: %b32u\n", #CC, calls.CC.no); \
else \
- erts_print(TO, TOA, "mseg_%s calls: %bpu%09bpu\n", #CC, \
+ erts_print(TO, TOA, "mseg_%s calls: %b32u%09b32u\n", #CC, \
calls.CC.giga_no, calls.CC.no)
int to = *print_to_p;
@@ -1092,65 +1202,88 @@ info_calls(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
}
static Eterm
-info_status(int *print_to_p,
- void *print_to_arg,
- int begin_new_max_period,
- Uint **hpp,
- Uint *szp)
+info_status(MemKind* mk, int *print_to_p, void *print_to_arg,
+ int begin_new_max_period, Uint **hpp, Uint *szp)
{
Eterm res = THE_NON_VALUE;
- if (segments.max_ever.no < segments.max.no)
- segments.max_ever.no = segments.max.no;
- if (segments.max_ever.sz < segments.max.sz)
- segments.max_ever.sz = segments.max.sz;
+ if (mk->segments.max_ever.no < mk->segments.max.no)
+ mk->segments.max_ever.no = mk->segments.max.no;
+ if (mk->segments.max_ever.sz < mk->segments.max.sz)
+ mk->segments.max_ever.sz = mk->segments.max.sz;
if (print_to_p) {
int to = *print_to_p;
void *arg = print_to_arg;
- erts_print(to, arg, "cached_segments: %bpu\n", cache_size);
- erts_print(to, arg, "cache_hits: %bpu\n", cache_hits);
- erts_print(to, arg, "segments: %bpu %bpu %bpu\n",
- segments.current.no, segments.max.no, segments.max_ever.no);
- erts_print(to, arg, "segments_size: %bpu %bpu %bpu\n",
- segments.current.sz, segments.max.sz, segments.max_ever.sz);
- erts_print(to, arg, "segments_watermark: %bpu\n",
- segments.current.watermark);
+ erts_print(to, arg, "cached_segments: %beu\n", mk->cache_size);
+ erts_print(to, arg, "cache_hits: %beu\n", mk->cache_hits);
+ erts_print(to, arg, "segments: %beu %beu %beu\n",
+ mk->segments.current.no, mk->segments.max.no, mk->segments.max_ever.no);
+ erts_print(to, arg, "segments_size: %beu %beu %beu\n",
+ mk->segments.current.sz, mk->segments.max.sz, mk->segments.max_ever.sz);
+ erts_print(to, arg, "segments_watermark: %beu\n",
+ mk->segments.current.watermark);
}
if (hpp || szp) {
res = NIL;
add_2tup(hpp, szp, &res,
am.segments_watermark,
- bld_unstable_uint(hpp, szp, segments.current.watermark));
+ bld_unstable_uint(hpp, szp, mk->segments.current.watermark));
add_4tup(hpp, szp, &res,
am.segments_size,
- bld_unstable_uint(hpp, szp, segments.current.sz),
- bld_unstable_uint(hpp, szp, segments.max.sz),
- bld_unstable_uint(hpp, szp, segments.max_ever.sz));
+ bld_unstable_uint(hpp, szp, mk->segments.current.sz),
+ bld_unstable_uint(hpp, szp, mk->segments.max.sz),
+ bld_unstable_uint(hpp, szp, mk->segments.max_ever.sz));
add_4tup(hpp, szp, &res,
am.segments,
- bld_unstable_uint(hpp, szp, segments.current.no),
- bld_unstable_uint(hpp, szp, segments.max.no),
- bld_unstable_uint(hpp, szp, segments.max_ever.no));
+ bld_unstable_uint(hpp, szp, mk->segments.current.no),
+ bld_unstable_uint(hpp, szp, mk->segments.max.no),
+ bld_unstable_uint(hpp, szp, mk->segments.max_ever.no));
add_2tup(hpp, szp, &res,
am.cache_hits,
- bld_unstable_uint(hpp, szp, cache_hits));
+ bld_unstable_uint(hpp, szp, mk->cache_hits));
add_2tup(hpp, szp, &res,
am.cached_segments,
- bld_unstable_uint(hpp, szp, cache_size));
+ bld_unstable_uint(hpp, szp, mk->cache_size));
}
if (begin_new_max_period) {
- segments.max.no = segments.current.no;
- segments.max.sz = segments.current.sz;
+ mk->segments.max.no = mk->segments.current.no;
+ mk->segments.max.sz = mk->segments.current.sz;
}
return res;
}
+static Eterm info_memkind(MemKind* mk, int *print_to_p, void *print_to_arg,
+ int begin_max_per, Uint **hpp, Uint *szp)
+{
+ Eterm res = THE_NON_VALUE;
+ Eterm atoms[3];
+ Eterm values[3];
+
+ if (print_to_p) {
+ erts_print(*print_to_p, print_to_arg, "memory kind: %s\n", mk->name);
+ }
+ if (hpp || szp) {
+ atoms[0] = am.name;
+ atoms[1] = am.status;
+ atoms[2] = am.calls;
+ values[0] = erts_bld_string(hpp, szp, mk->name);
+ }
+ values[1] = info_status(mk, print_to_p, print_to_arg, begin_max_per, hpp, szp);
+ values[2] = info_calls(print_to_p, print_to_arg, hpp, szp);
+
+ if (hpp || szp)
+ res = bld_2tup_list(hpp, szp, 3, atoms, values);
+
+ return res;
+}
+
+
static Eterm
info_version(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
{
@@ -1197,6 +1330,7 @@ erts_mseg_info(int *print_to_p,
Eterm res = THE_NON_VALUE;
Eterm atoms[4];
Eterm values[4];
+ Uint n = 0;
erts_mtx_lock(&mseg_mutex);
@@ -1207,17 +1341,19 @@ erts_mseg_info(int *print_to_p,
atoms[0] = am.version;
atoms[1] = am.options;
- atoms[2] = am.status;
- atoms[3] = am.calls;
+ atoms[2] = am.memkind;
+ atoms[3] = am.memkind;
}
-
- values[0] = info_version(print_to_p, print_to_arg, hpp, szp);
- values[1] = info_options("option ", print_to_p, print_to_arg, hpp, szp);
- values[2] = info_status(print_to_p, print_to_arg, begin_max_per, hpp, szp);
- values[3] = info_calls(print_to_p, print_to_arg, hpp, szp);
-
+ values[n++] = info_version(print_to_p, print_to_arg, hpp, szp);
+ values[n++] = info_options("option ", print_to_p, print_to_arg, hpp, szp);
+#if HALFWORD_HEAP
+ values[n++] = info_memkind(&low_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
+ values[n++] = info_memkind(&hi_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
+#else
+ values[n++] = info_memkind(&the_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
+#endif
if (hpp || szp)
- res = bld_2tup_list(hpp, szp, 4, atoms, values);
+ res = bld_2tup_list(hpp, szp, n, atoms, values);
erts_mtx_unlock(&mseg_mutex);
@@ -1237,7 +1373,7 @@ erts_mseg_alloc_opt(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
void *
erts_mseg_alloc(ErtsAlcType_t atype, Uint *size_p)
{
- return erts_mseg_alloc_opt(atype, size_p, &default_opt);
+ return erts_mseg_alloc_opt(atype, size_p, &erts_mseg_default_opt);
}
void
@@ -1252,7 +1388,7 @@ erts_mseg_dealloc_opt(ErtsAlcType_t atype, void *seg, Uint size,
void
erts_mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size)
{
- erts_mseg_dealloc_opt(atype, seg, size, &default_opt);
+ erts_mseg_dealloc_opt(atype, seg, size, &erts_mseg_default_opt);
}
void *
@@ -1270,23 +1406,29 @@ void *
erts_mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size,
Uint *new_size_p)
{
- return erts_mseg_realloc_opt(atype, seg, old_size, new_size_p, &default_opt);
+ return erts_mseg_realloc_opt(atype, seg, old_size, new_size_p, &erts_mseg_default_opt);
}
void
erts_mseg_clear_cache(void)
{
+ MemKind* mk;
erts_mtx_lock(&mseg_mutex);
- mseg_clear_cache();
+ for (mk=mk_list; mk; mk=mk->next) {
+ mseg_clear_cache(mk);
+ }
erts_mtx_unlock(&mseg_mutex);
}
Uint
erts_mseg_no(void)
{
- Uint n;
+ MemKind* mk;
+ Uint n = 0;
erts_mtx_lock(&mseg_mutex);
- n = segments.current.no;
+ for (mk=mk_list; mk; mk=mk->next) {
+ n += mk->segments.current.no;
+ }
erts_mtx_unlock(&mseg_mutex);
return n;
}
@@ -1297,11 +1439,43 @@ erts_mseg_unit_size(void)
return page_size;
}
-void
-erts_mseg_init(ErtsMsegInit_t *init)
+static void mem_kind_init(MemKind* mk, const char* name)
{
unsigned i;
+ mk->cache = NULL;
+ mk->cache_end = NULL;
+ mk->max_cached_seg_size = 0;
+ mk->min_cached_seg_size = ~((Uint) 0);
+ mk->cache_size = 0;
+ mk->cache_hits = 0;
+
+ if (max_cache_size > 0) {
+ for (i = 0; i < max_cache_size - 1; i++)
+ mk->cache_descs[i].next = &mk->cache_descs[i + 1];
+ mk->cache_descs[max_cache_size - 1].next = NULL;
+ mk->free_cache_descs = &mk->cache_descs[0];
+ }
+ else
+ mk->free_cache_descs = NULL;
+
+ mk->segments.current.watermark = 0;
+ mk->segments.current.no = 0;
+ mk->segments.current.sz = 0;
+ mk->segments.max.no = 0;
+ mk->segments.max.sz = 0;
+ mk->segments.max_ever.no = 0;
+ mk->segments.max_ever.sz = 0;
+
+ mk->name = name;
+ mk->next = mk_list;
+ mk_list = mk;
+}
+
+
+void
+erts_mseg_init(ErtsMsegInit_t *init)
+{
atoms_initialized = 0;
is_init_done = 0;
@@ -1324,13 +1498,17 @@ erts_mseg_init(ErtsMsegInit_t *init)
erl_exit(ERTS_ABORT_EXIT, "erts_mseg: unable to open /dev/zero\n");
#endif
+#if HAVE_MMAP && HALFWORD_HEAP
+ initialize_pmmap();
+#endif
+
page_size = GET_PAGE_SIZE;
page_shift = 1;
while ((page_size >> page_shift) != 1) {
if ((page_size & (1 << (page_shift - 1))) != 0)
erl_exit(ERTS_ABORT_EXIT,
- "erts_mseg: Unexpected page_size %bpu\n", page_size);
+ "erts_mseg: Unexpected page_size %beu\n", page_size);
page_shift++;
}
@@ -1340,40 +1518,33 @@ erts_mseg_init(ErtsMsegInit_t *init)
min_seg_size = ~((Uint) 0);
#endif
- cache = NULL;
- cache_end = NULL;
- cache_hits = 0;
- max_cached_seg_size = 0;
- min_cached_seg_size = ~((Uint) 0);
- cache_size = 0;
+ if (max_cache_size > MAX_CACHE_SIZE)
+ max_cache_size = MAX_CACHE_SIZE;
+
+#if HALFWORD_HEAP
+ mem_kind_init(&low_mem, "low memory");
+ mem_kind_init(&hi_mem, "high memory");
+#else
+ mem_kind_init(&the_mem, "all memory");
+#endif
is_cache_check_scheduled = 0;
#ifdef ERTS_THREADS_NO_SMP
is_cache_check_requested = 0;
#endif
+}
- if (max_cache_size > MAX_CACHE_SIZE)
- max_cache_size = MAX_CACHE_SIZE;
- if (max_cache_size > 0) {
- for (i = 0; i < max_cache_size - 1; i++)
- cache_descs[i].next = &cache_descs[i + 1];
- cache_descs[max_cache_size - 1].next = NULL;
- free_cache_descs = &cache_descs[0];
+static ERTS_INLINE Uint tot_cache_size(void)
+{
+ MemKind* mk;
+ Uint sz = 0;
+ for (mk=mk_list; mk; mk=mk->next) {
+ sz += mk->cache_size;
}
- else
- free_cache_descs = NULL;
-
- segments.current.watermark = 0;
- segments.current.no = 0;
- segments.current.sz = 0;
- segments.max.no = 0;
- segments.max.sz = 0;
- segments.max_ever.no = 0;
- segments.max_ever.sz = 0;
+ return sz;
}
-
/*
* erts_mseg_late_init() have to be called after all allocators,
* threads and timers have been initialized.
@@ -1391,7 +1562,7 @@ erts_mseg_late_init(void)
#ifdef ERTS_THREADS_NO_SMP
async_handle = handle;
#endif
- if (cache_size)
+ if (tot_cache_size())
schedule_cache_check();
erts_mtx_unlock(&mseg_mutex);
}
@@ -1432,7 +1603,7 @@ erts_mseg_test(unsigned long op,
case 0x406: {
unsigned long res;
erts_mtx_lock(&mseg_mutex);
- res = (unsigned long) cache_size;
+ res = (unsigned long) tot_cache_size();
erts_mtx_unlock(&mseg_mutex);
return res;
}
@@ -1446,3 +1617,432 @@ erts_mseg_test(unsigned long op,
}
+#if HALFWORD_HEAP
+/*
+ * Very simple page oriented mmap replacer. Works in the lower
+ * 32 bit address range of a 64bit program.
+ * Implements anonymous mmap mremap and munmap with address order first fit.
+ * The free list is expected to be very short...
+ * To be used for compressed pointers in Erlang halfword emulator
+ * implementation. The MacOS X version is more of a toy, it's not really
+ * for production as the halfword erlang VM relies on Linux specific memory
+ * mapping tricks.
+ */
+
+/*#define HARDDEBUG 1*/
+
+#ifdef __APPLE__
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+#define INIT_LOCK() do {erts_mtx_init(&pmmap_mutex, "pmmap");} while(0)
+
+#define TAKE_LOCK() do {erts_mtx_lock(&pmmap_mutex);} while(0)
+
+#define RELEASE_LOCK() do {erts_mtx_unlock(&pmmap_mutex);} while(0)
+
+static erts_mtx_t pmmap_mutex; /* Also needed when !USE_THREADS */
+
+typedef struct _free_block {
+ unsigned long num; /*pages*/
+ struct _free_block *next;
+} FreeBlock;
+
+/* Assigned once and for all */
+static size_t pagsz;
+
+/* Protect with lock */
+static FreeBlock *first;
+
+static size_t round_up_to_pagesize(size_t size)
+{
+ size_t x = size / pagsz;
+
+ if ((size % pagsz)) {
+ ++x;
+ }
+
+ return pagsz * x;
+}
+
+static size_t round_down_to_pagesize(size_t size)
+{
+ size_t x = size / pagsz;
+
+ return pagsz * x;
+}
+
+static void *do_map(void *ptr, size_t sz)
+{
+ void *res;
+
+ if (round_up_to_pagesize(sz) != sz) {
+#ifdef HARDDEBUG
+ fprintf(stderr,"Mapping of address %p with size %ld "
+ "does not map complete pages\r\n",
+ (void *) ptr, (unsigned long) sz);
+#endif
+ return NULL;
+ }
+
+ if (((unsigned long) ptr) % pagsz) {
+#ifdef HARDDEBUG
+ fprintf(stderr,"Mapping of address %p with size %ld "
+ "is not page aligned\r\n",
+ (void *) ptr, (unsigned long) sz);
+#endif
+ return NULL;
+ }
+
+#if HAVE_MMAP
+ res = mmap(ptr, sz,
+ PROT_READ | PROT_WRITE, MAP_PRIVATE |
+ MAP_ANONYMOUS | MAP_FIXED,
+ -1 , 0);
+#else
+# error "Missing mmap support"
+#endif
+
+ if (res == MAP_FAILED) {
+#ifdef HARDDEBUG
+ fprintf(stderr,"Mapping of address %p with size %ld failed!\r\n",
+ (void *) ptr, (unsigned long) sz);
+#endif
+ return NULL;
+ }
+
+ return res;
+}
+
+static int do_unmap(void *ptr, size_t sz)
+{
+ void *res;
+
+ if (round_up_to_pagesize(sz) != sz) {
+#ifdef HARDDEBUG
+ fprintf(stderr,"Mapping of address %p with size %ld "
+ "does not map complete pages\r\n",
+ (void *) ptr, (unsigned long) sz);
+#endif
+ return 1;
+ }
+
+ if (((unsigned long) ptr) % pagsz) {
+#ifdef HARDDEBUG
+ fprintf(stderr,"Mapping of address %p with size %ld "
+ "is not page aligned\r\n",
+ (void *) ptr, (unsigned long) sz);
+#endif
+ return 1;
+ }
+
+
+ res = mmap(ptr, sz,
+ PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE
+ | MAP_FIXED,
+ -1 , 0);
+
+ if (res == MAP_FAILED) {
+#ifdef HARDDEBUG
+ fprintf(stderr,"Mapping of address %p with size %ld failed!\r\n",
+ (void *) ptr, (unsigned long) sz);
+#endif
+ return 1;
+ }
+
+ return 0;
+}
+
+#ifdef __APPLE__
+/*
+ * The first 4 gig's are protected on Macos X for 64bit processes :(
+ * The range 0x1000000000 - 0x10FFFFFFFF is selected as an arbitrary
+ * value of a normally unused range... Real MMAP's will avoid
+ * it and all 32bit compressed pointers can be in that range...
+ * More expensive than on Linux where expansion of compressed
+ * poiters involves no masking (as they are in the first 4 gig's).
+ * It's also very uncertain if the MAP_NORESERVE flag really has
+ * any effect in MacOS X. Swap space may always be allocated...
+ */
+#define SET_RANGE_MIN() /* nothing */
+#define RANGE_MIN 0x1000000000UL
+#define RANGE_MAX 0x1100000000UL
+#define RANGE_MASK (RANGE_MIN)
+#define EXTRA_MAP_FLAGS (MAP_FIXED)
+#else
+static size_t range_min;
+#define SET_RANGE_MIN() do { range_min = (size_t) sbrk(0); } while (0)
+#define RANGE_MIN range_min
+#define RANGE_MAX 0x100000000UL
+#define RANGE_MASK 0UL
+#define EXTRA_MAP_FLAGS (0)
+#endif
+
+static int initialize_pmmap(void)
+{
+ char *p,*q,*rptr;
+ size_t rsz;
+ FreeBlock *initial;
+
+
+ pagsz = getpagesize();
+ SET_RANGE_MIN();
+ if (sizeof(void *) != 8) {
+ erl_exit(1,"Halfword emulator cannot be run in 32bit mode");
+ }
+
+ p = (char *) RANGE_MIN;
+ q = (char *) RANGE_MAX;
+
+ rsz = round_down_to_pagesize(q - p);
+
+ rptr = mmap((void *) p, rsz,
+ PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS |
+ MAP_NORESERVE | EXTRA_MAP_FLAGS,
+ -1 , 0);
+#ifdef HARDDEBUG
+ printf("p=%p, rsz = %ld, pages = %ld, got range = %p -> %p\r\n",
+ p, (unsigned long) rsz, (unsigned long) (rsz / pagsz),
+ (void *) rptr, (void*)(rptr + rsz));
+#endif
+ if ((UWord)(rptr + rsz) > RANGE_MAX) {
+ size_t rsz_trunc = RANGE_MAX - (UWord)rptr;
+#ifdef HARDDEBUG
+ printf("Reducing mmap'ed memory from %lu to %lu Mb, reduced range = %p -> %p\r\n",
+ rsz/(1024*1024), rsz_trunc/(1024*1024), rptr, rptr+rsz_trunc);
+#endif
+ munmap((void*)RANGE_MAX, rsz - rsz_trunc);
+ rsz = rsz_trunc;
+ }
+ if (!do_map(rptr,pagsz)) {
+ erl_exit(1,"Could not actually mmap first page for halfword emulator...\n");
+ }
+ initial = (FreeBlock *) rptr;
+ initial->num = (rsz / pagsz);
+ initial->next = NULL;
+ first = initial;
+ INIT_LOCK();
+ return 0;
+}
+
+#ifdef HARDDEBUG
+static void dump_freelist(void)
+{
+ FreeBlock *p = first;
+
+ while (p) {
+ printf("p = %p\r\np->num = %ld\r\np->next = %p\r\n\r\n",
+ (void *) p, (unsigned long) p->num, (void *) p->next);
+ p = p->next;
+ }
+}
+#endif
+
+
+static void *pmmap(size_t size)
+{
+ size_t real_size = round_up_to_pagesize(size);
+ size_t num_pages = real_size / pagsz;
+ FreeBlock **block;
+ FreeBlock *tail;
+ FreeBlock *res;
+ TAKE_LOCK();
+ for (block = &first;
+ *block != NULL && (*block)->num < num_pages;
+ block = &((*block)->next))
+ ;
+ if (!(*block)) {
+ RELEASE_LOCK();
+ return NULL;
+ }
+ if ((*block)->num == num_pages) {
+ /* nice, perfect fit */
+ res = *block;
+ *block = (*block)->next;
+ } else {
+ tail = (FreeBlock *) (((char *) ((void *) (*block))) + real_size);
+ if (!do_map(tail,pagsz)) {
+#ifdef HARDDEBUG
+ fprintf(stderr, "Could not actually allocate page at %p...\r\n",
+ (void *) tail);
+#endif
+ RELEASE_LOCK();
+ return NULL;
+ }
+ tail->num = (*block)->num - num_pages;
+ tail->next = (*block)->next;
+ res = *block;
+ *block = tail;
+ }
+ RELEASE_LOCK();
+ if (!do_map(res,real_size)) {
+#ifdef HARDDEBUG
+ fprintf(stderr, "Could not actually allocate %ld at %p...\r\n",
+ (unsigned long) real_size, (void *) res);
+#endif
+ return NULL;
+ }
+
+ return (void *) res;
+}
+
+static int pmunmap(void *p, size_t size)
+{
+ size_t real_size = round_up_to_pagesize(size);
+ size_t num_pages = real_size / pagsz;
+ FreeBlock *block;
+ FreeBlock *last;
+ FreeBlock *nb = (FreeBlock *) p;
+
+ ASSERT(((unsigned long)p & CHECK_POINTER_MASK)==0);
+ if (real_size > pagsz) {
+ if (do_unmap(((char *) p) + pagsz,real_size - pagsz)) {
+ return 1;
+ }
+ }
+
+ TAKE_LOCK();
+
+ last = NULL;
+ block = first;
+ while(block != NULL && ((void *) block) < p) {
+ last = block;
+ block = block->next;
+ }
+
+ if (block != NULL &&
+ ((void *) block) == ((void *) (((char *) p) + real_size))) {
+ /* Merge new free block with following */
+ nb->num = block->num + num_pages;
+ nb->next = block->next;
+ if (do_unmap(block,pagsz)) {
+ RELEASE_LOCK();
+ return 1;
+ }
+ } else {
+ /* just link in */
+ nb->num = num_pages;
+ nb->next = block;
+ }
+ if (last != NULL) {
+ if (p == ((void *) (((char *) last) + (last->num * pagsz)))) {
+ /* Merge with previous */
+ last->num += nb->num;
+ last->next = nb->next;
+ if (do_unmap(nb,pagsz)) {
+ RELEASE_LOCK();
+ return 1;
+ }
+ } else {
+ last->next = nb;
+ }
+ } else {
+ first = nb;
+ }
+ RELEASE_LOCK();
+ return 0;
+}
+
+static void *pmremap(void *old_address, size_t old_size,
+ size_t new_size)
+{
+ size_t new_real_size = round_up_to_pagesize(new_size);
+ size_t new_num_pages = new_real_size / pagsz;
+ size_t old_real_size = round_up_to_pagesize(old_size);
+ size_t old_num_pages = old_real_size / pagsz;
+ if (new_num_pages == old_num_pages) {
+ return old_address;
+ } else if (new_num_pages < old_num_pages) { /* Shrink */
+ size_t nfb_pages = old_num_pages - new_num_pages;
+ size_t nfb_real_size = old_real_size - new_real_size;
+ void *vnfb = (void *) (((char *)old_address) + new_real_size);
+ FreeBlock *nfb = (FreeBlock *) vnfb;
+ FreeBlock **block;
+ TAKE_LOCK();
+ for (block = &first;
+ *block != NULL && (*block) < nfb;
+ block = &((*block)->next))
+ ;
+ if (!(*block) ||
+ (*block) > ((FreeBlock *)(((char *) vnfb) + nfb_real_size))) {
+ /* Normal link in */
+ if (nfb_pages > 1) {
+ if (do_unmap((void *)(((char *) vnfb) + pagsz),
+ (nfb_pages - 1)*pagsz)) {
+ return NULL;
+ }
+ }
+ nfb->next = (*block);
+ nfb->num = nfb_pages;
+ (*block) = nfb;
+ } else { /* block merge */
+ nfb->next = (*block)->next;
+ nfb->num = nfb_pages + (*block)->num;
+ /* unmap also the first page of the next freeblock */
+ (*block) = nfb;
+ if (do_unmap((void *)(((char *) vnfb) + pagsz),
+ nfb_pages*pagsz)) {
+ return NULL;
+ }
+ }
+ RELEASE_LOCK();
+ return old_address;
+ } else { /* Enlarge */
+ FreeBlock **block;
+ void *old_end = (void *) (((char *)old_address) + old_real_size);
+ TAKE_LOCK();
+ for (block = &first;
+ *block != NULL && (*block) < (FreeBlock *) old_address;
+ block = &((*block)->next))
+ ;
+ if ((*block) == NULL || old_end > ((void *) RANGE_MAX) ||
+ (*block) != old_end ||
+ (*block)->num < (new_num_pages - old_num_pages)) {
+ /* cannot extend */
+ void *result;
+ RELEASE_LOCK();
+ result = pmmap(new_size);
+ if (result == NULL) {
+ return NULL;
+ }
+ memcpy(result,old_address,old_size);
+ if (pmunmap(old_address,old_size)) {
+ /* Oups... */
+ pmunmap(result,new_size);
+ return NULL;
+ }
+ return result;
+ } else { /* extend */
+ size_t remaining_pages = (*block)->num -
+ (new_num_pages - old_num_pages);
+ if (!remaining_pages) {
+ void *p = (void *) (((char *) (*block)) + pagsz);
+ void *n = (*block)->next;
+ size_t x = ((*block)->num - 1) * pagsz;
+ if (x > 0) {
+ if (do_map(p,x) == NULL) {
+ RELEASE_LOCK();
+ return NULL;
+ }
+ }
+ (*block) = n;
+ } else {
+ FreeBlock *nfb = (FreeBlock *) ((void *)
+ (((char *) old_address) +
+ new_real_size));
+ void *p = (void *) (((char *) (*block)) + pagsz);
+ if (do_map(p,new_real_size - old_real_size) == NULL) {
+ RELEASE_LOCK();
+ return NULL;
+ }
+ nfb->num = remaining_pages;
+ nfb->next = (*block)->next;
+ (*block) = nfb;
+ }
+ RELEASE_LOCK();
+ return old_address;
+ }
+ }
+}
+
+#endif /* HALFWORD_HEAP */