aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/sys/common/erl_mseg.c
diff options
context:
space:
mode:
authorSverker Eriksson <[email protected]>2010-12-10 10:52:15 +0100
committerSverker Eriksson <[email protected]>2011-02-03 17:33:43 +0100
commit561f6fd2bc8367acd1bceea291303adb18ad0b93 (patch)
tree5d2744f9c7809b1a7d94f3905ff488fef6281a35 /erts/emulator/sys/common/erl_mseg.c
parentfff4ba0282e42e2942acebff9c10a274075c1c62 (diff)
downloadotp-561f6fd2bc8367acd1bceea291303adb18ad0b93.tar.gz
otp-561f6fd2bc8367acd1bceea291303adb18ad0b93.tar.bz2
otp-561f6fd2bc8367acd1bceea291303adb18ad0b93.zip
HALFWORD first stab at high mem alloc
Diffstat (limited to 'erts/emulator/sys/common/erl_mseg.c')
-rw-r--r--erts/emulator/sys/common/erl_mseg.c475
1 files changed, 272 insertions, 203 deletions
diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c
index bd931cc62b..00da31332a 100644
--- a/erts/emulator/sys/common/erl_mseg.c
+++ b/erts/emulator/sys/common/erl_mseg.c
@@ -77,8 +77,10 @@ static int atoms_initialized;
static Uint cache_check_interval;
+typedef struct mem_kind_t MemKind;
+
static void check_cache(void *unused);
-static void mseg_clear_cache(void);
+static void mseg_clear_cache(MemKind*);
static int is_cache_check_scheduled;
#ifdef ERTS_THREADS_NO_SMP
static int is_cache_check_requested;
@@ -160,15 +162,43 @@ static struct {
CallCounter check_cache;
} calls;
-static cache_desc_t cache_descs[MAX_CACHE_SIZE];
-static cache_desc_t *free_cache_descs;
-static cache_desc_t *cache;
-static cache_desc_t *cache_end;
-static Uint cache_hits;
-static Uint cache_size;
-static Uint min_cached_seg_size;
-static Uint max_cached_seg_size;
+struct mem_kind_t {
+ cache_desc_t cache_descs[MAX_CACHE_SIZE];
+ cache_desc_t *free_cache_descs;
+ cache_desc_t *cache;
+ cache_desc_t *cache_end;
+
+ Uint cache_size;
+ Uint min_cached_seg_size;
+ Uint max_cached_seg_size;
+
+ struct {
+ struct {
+ Uint watermark;
+ Uint no;
+ Uint sz;
+ } current;
+ struct {
+ Uint no;
+ Uint sz;
+ } max;
+ struct {
+ Uint no;
+ Uint sz;
+ } max_ever;
+ } segments;
+
+ MemKind* next;
+};/*MemKind*/
+
+#if HALFWORD_HEAP
+static MemKind low_mem, hi_mem;
+#else
+static MemKind the_mem;
+#endif
+static MemKind* mk_list = NULL;
+static Uint cache_hits;
static Uint max_cache_size;
static Uint abs_max_cache_bad_fit;
static Uint rel_max_cache_bad_fit;
@@ -177,47 +207,32 @@ static Uint rel_max_cache_bad_fit;
static Uint min_seg_size;
#endif
-struct {
- struct {
- Uint watermark;
- Uint no;
- Uint sz;
- } current;
- struct {
- Uint no;
- Uint sz;
- } max;
- struct {
- Uint no;
- Uint sz;
- } max_ever;
-} segments;
-#define ERTS_MSEG_ALLOC_STAT(SZ) \
+#define ERTS_MSEG_ALLOC_STAT(C,SZ) \
do { \
- segments.current.no++; \
- if (segments.max.no < segments.current.no) \
- segments.max.no = segments.current.no; \
- if (segments.current.watermark < segments.current.no) \
- segments.current.watermark = segments.current.no; \
- segments.current.sz += (SZ); \
- if (segments.max.sz < segments.current.sz) \
- segments.max.sz = segments.current.sz; \
+ C->segments.current.no++; \
+ if (C->segments.max.no < C->segments.current.no) \
+ C->segments.max.no = C->segments.current.no; \
+ if (C->segments.current.watermark < C->segments.current.no) \
+ C->segments.current.watermark = C->segments.current.no; \
+ C->segments.current.sz += (SZ); \
+ if (C->segments.max.sz < C->segments.current.sz) \
+ C->segments.max.sz = C->segments.current.sz; \
} while (0)
-#define ERTS_MSEG_DEALLOC_STAT(SZ) \
+#define ERTS_MSEG_DEALLOC_STAT(C,SZ) \
do { \
- ASSERT(segments.current.no > 0); \
- segments.current.no--; \
- ASSERT(segments.current.sz >= (SZ)); \
- segments.current.sz -= (SZ); \
+ ASSERT(C->segments.current.no > 0); \
+ C->segments.current.no--; \
+ ASSERT(C->segments.current.sz >= (SZ)); \
+ C->segments.current.sz -= (SZ); \
} while (0)
-#define ERTS_MSEG_REALLOC_STAT(OSZ, NSZ) \
+#define ERTS_MSEG_REALLOC_STAT(C,OSZ, NSZ) \
do { \
- ASSERT(segments.current.sz >= (OSZ)); \
- segments.current.sz -= (OSZ); \
- segments.current.sz += (NSZ); \
+ ASSERT(C->segments.current.sz >= (OSZ)); \
+ C->segments.current.sz -= (OSZ); \
+ C->segments.current.sz += (NSZ); \
} while (0)
#define ONE_GIGA (1000000000)
@@ -303,13 +318,16 @@ check_schedule_cache_check(void)
static void
mseg_shutdown(void)
{
+ MemKind* mk;
erts_mtx_lock(&mseg_mutex);
- mseg_clear_cache();
+ for (mk=mk_list; mk; mk=mk->next) {
+ mseg_clear_cache(mk);
+ }
erts_mtx_unlock(&mseg_mutex);
}
static ERTS_INLINE void *
-mseg_create(Uint size)
+mseg_create(MemKind* mk, Uint size)
{
void *seg;
@@ -319,19 +337,21 @@ mseg_create(Uint size)
seg = erts_sys_alloc(ERTS_ALC_N_INVALID, NULL, size);
#elif HAVE_MMAP
#if HALFWORD_HEAP
- seg = pmmap(size);
-#else
- seg = (void *) mmap((void *) 0, (size_t) size,
- MMAP_PROT, MMAP_FLAGS, MMAP_FD, 0);
- if (seg == (void *) MAP_FAILED)
- seg = NULL;
-#endif
-#if HALFWORD_HEAP
- if ((unsigned long) seg & CHECK_POINTER_MASK) {
- erts_fprintf(stderr,"Pointer mask failure (0x%08lx)\n",(unsigned long) seg);
- return NULL;
+ if (mk == &low_mem) {
+ seg = pmmap(size);
+ if ((unsigned long) seg & CHECK_POINTER_MASK) {
+ erts_fprintf(stderr,"Pointer mask failure (0x%08lx)\n",(unsigned long) seg);
+ return NULL;
+ }
}
+ else
#endif
+ {
+ seg = (void *) mmap((void *) 0, (size_t) size,
+ MMAP_PROT, MMAP_FLAGS, MMAP_FD, 0);
+ if (seg == (void *) MAP_FAILED)
+ seg = NULL;
+ }
#else
#error "Missing mseg_create() implementation"
#endif
@@ -412,136 +432,142 @@ mseg_recreate(void *old_seg, Uint old_size, Uint new_size)
static ERTS_INLINE cache_desc_t *
-alloc_cd(void)
+alloc_cd(MemKind* mk)
{
- cache_desc_t *cd = free_cache_descs;
+ cache_desc_t *cd = mk->free_cache_descs;
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
if (cd)
- free_cache_descs = cd->next;
+ mk->free_cache_descs = cd->next;
return cd;
}
static ERTS_INLINE void
-free_cd(cache_desc_t *cd)
+free_cd(MemKind* mk, cache_desc_t *cd)
{
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
- cd->next = free_cache_descs;
- free_cache_descs = cd;
+ cd->next = mk->free_cache_descs;
+ mk->free_cache_descs = cd;
}
static ERTS_INLINE void
-link_cd(cache_desc_t *cd)
+link_cd(MemKind* mk, cache_desc_t *cd)
{
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
- if (cache)
- cache->prev = cd;
- cd->next = cache;
+ if (mk->cache)
+ mk->cache->prev = cd;
+ cd->next = mk->cache;
cd->prev = NULL;
- cache = cd;
+ mk->cache = cd;
- if (!cache_end) {
+ if (!mk->cache_end) {
ASSERT(!cd->next);
- cache_end = cd;
+ mk->cache_end = cd;
}
- cache_size++;
+ mk->cache_size++;
}
#if CAN_PARTLY_DESTROY
static ERTS_INLINE void
-end_link_cd(cache_desc_t *cd)
+end_link_cd(MemKind* mk, cache_desc_t *cd)
{
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
- if (cache_end)
- cache_end->next = cd;
+ if (mk->cache_end)
+ mk->cache_end->next = cd;
cd->next = NULL;
- cd->prev = cache_end;
- cache_end = cd;
+ cd->prev = mk->cache_end;
+ mk->cache_end = cd;
- if (!cache) {
+ if (!mk->cache) {
ASSERT(!cd->prev);
- cache = cd;
+ mk->cache = cd;
}
- cache_size++;
+ mk->cache_size++;
}
#endif
static ERTS_INLINE void
-unlink_cd(cache_desc_t *cd)
+unlink_cd(MemKind* mk, cache_desc_t *cd)
{
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
if (cd->next)
cd->next->prev = cd->prev;
else
- cache_end = cd->prev;
+ mk->cache_end = cd->prev;
if (cd->prev)
cd->prev->next = cd->next;
else
- cache = cd->next;
- ASSERT(cache_size > 0);
- cache_size--;
+ mk->cache = cd->next;
+ ASSERT(mk->cache_size > 0);
+ mk->cache_size--;
}
static ERTS_INLINE void
-check_cache_limits(void)
+check_cache_limits(MemKind* mk)
{
cache_desc_t *cd;
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
- max_cached_seg_size = 0;
- min_cached_seg_size = ~((Uint) 0);
- for (cd = cache; cd; cd = cd->next) {
- if (cd->size < min_cached_seg_size)
- min_cached_seg_size = cd->size;
- if (cd->size > max_cached_seg_size)
- max_cached_seg_size = cd->size;
+ mk->max_cached_seg_size = 0;
+ mk->min_cached_seg_size = ~((Uint) 0);
+ for (cd = mk->cache; cd; cd = cd->next) {
+ if (cd->size < mk->min_cached_seg_size)
+ mk->min_cached_seg_size = cd->size;
+ if (cd->size > mk->max_cached_seg_size)
+ mk->max_cached_seg_size = cd->size;
}
-
}
static ERTS_INLINE void
-adjust_cache_size(int force_check_limits)
+adjust_cache_size(MemKind* mk, int force_check_limits)
{
cache_desc_t *cd;
int check_limits = force_check_limits;
- Sint max_cached = ((Sint) segments.current.watermark
- - (Sint) segments.current.no);
+ Sint max_cached = ((Sint) mk->segments.current.watermark
+ - (Sint) mk->segments.current.no);
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
- while (((Sint) cache_size) > max_cached && ((Sint) cache_size) > 0) {
- ASSERT(cache_end);
- cd = cache_end;
+ while (((Sint) mk->cache_size) > max_cached && ((Sint) mk->cache_size) > 0) {
+ ASSERT(mk->cache_end);
+ cd = mk->cache_end;
if (!check_limits &&
- !(min_cached_seg_size < cd->size
- && cd->size < max_cached_seg_size)) {
+ !(mk->min_cached_seg_size < cd->size
+ && cd->size < mk->max_cached_seg_size)) {
check_limits = 1;
}
if (erts_mtrace_enabled)
erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg);
mseg_destroy(cd->seg, cd->size);
- unlink_cd(cd);
- free_cd(cd);
+ unlink_cd(mk,cd);
+ free_cd(mk,cd);
}
if (check_limits)
- check_cache_limits();
-
+ check_cache_limits(mk);
}
static void
-check_cache(void *unused)
+check_one_cache(MemKind* mk)
{
+ if (mk->segments.current.watermark > mk->segments.current.no)
+ mk->segments.current.watermark--;
+ adjust_cache_size(mk, 0);
+
+ if (mk->cache_size)
+ schedule_cache_check();
+}
+
+static void check_cache(void* unused)
+{
+ MemKind* mk;
erts_mtx_lock(&mseg_mutex);
is_cache_check_scheduled = 0;
- if (segments.current.watermark > segments.current.no)
- segments.current.watermark--;
- adjust_cache_size(0);
-
- if (cache_size)
- schedule_cache_check();
+ for (mk=mk_list; mk; mk=mk->next) {
+ check_one_cache(mk);
+ }
INC_CC(check_cache);
@@ -549,28 +575,37 @@ check_cache(void *unused)
}
static void
-mseg_clear_cache(void)
+mseg_clear_cache(MemKind* mk)
{
- segments.current.watermark = 0;
+ mk->segments.current.watermark = 0;
- adjust_cache_size(1);
+ adjust_cache_size(mk, 1);
- ASSERT(!cache);
- ASSERT(!cache_end);
- ASSERT(!cache_size);
+ ASSERT(!mk->cache);
+ ASSERT(!mk->cache_end);
+ ASSERT(!mk->cache_size);
- segments.current.watermark = segments.current.no;
+ mk->segments.current.watermark = mk->segments.current.no;
INC_CC(clear_cache);
}
+static ERTS_INLINE MemKind* type2mk(ErtsAlcType_t atype)
+{
+#if HALFWORD_HEAP
+ return (atype == ERTS_ALC_A_ETS) ? &hi_mem : &low_mem;
+#else
+ return &the_mem;
+#endif
+}
+
static void *
mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
{
-
Uint max, min, diff_size, size;
cache_desc_t *cd, *cand_cd;
void *seg;
+ MemKind* mk = type2mk(atype);
INC_CC(alloc);
@@ -583,11 +618,11 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
if (!opt->cache) {
create_seg:
- adjust_cache_size(0);
- seg = mseg_create(size);
+ adjust_cache_size(mk,0);
+ seg = mseg_create(mk, size);
if (!seg) {
- mseg_clear_cache();
- seg = mseg_create(size);
+ mseg_clear_cache(mk);
+ seg = mseg_create(mk, size);
if (!seg)
size = 0;
}
@@ -596,17 +631,17 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
if (seg) {
if (erts_mtrace_enabled)
erts_mtrace_crr_alloc(seg, atype, ERTS_MTRACE_SEGMENT_ID, size);
- ERTS_MSEG_ALLOC_STAT(size);
+ ERTS_MSEG_ALLOC_STAT(mk,size);
}
return seg;
}
- if (size > max_cached_seg_size)
+ if (size > mk->max_cached_seg_size)
goto create_seg;
- if (size < min_cached_seg_size) {
+ if (size < mk->min_cached_seg_size) {
- diff_size = min_cached_seg_size - size;
+ diff_size = mk->min_cached_seg_size - size;
if (diff_size > abs_max_cache_bad_fit)
goto create_seg;
@@ -620,7 +655,7 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
min = ~((Uint) 0);
cand_cd = NULL;
- for (cd = cache; cd; cd = cd->next) {
+ for (cd = mk->cache; cd; cd = cd->next) {
if (cd->size >= size) {
if (!cand_cd) {
cand_cd = cd;
@@ -641,8 +676,8 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
min = cd->size;
}
- min_cached_seg_size = min;
- max_cached_seg_size = max;
+ mk->min_cached_seg_size = min;
+ mk->max_cached_seg_size = max;
if (!cand_cd)
goto create_seg;
@@ -651,10 +686,10 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
if (diff_size > abs_max_cache_bad_fit
|| 100*PAGES(diff_size) > rel_max_cache_bad_fit*PAGES(size)) {
- if (max_cached_seg_size < cand_cd->size)
- max_cached_seg_size = cand_cd->size;
- if (min_cached_seg_size > cand_cd->size)
- min_cached_seg_size = cand_cd->size;
+ if (mk->max_cached_seg_size < cand_cd->size)
+ mk->max_cached_seg_size = cand_cd->size;
+ if (mk->min_cached_seg_size > cand_cd->size)
+ mk->min_cached_seg_size = cand_cd->size;
goto create_seg;
}
@@ -663,8 +698,8 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
size = cand_cd->size;
seg = cand_cd->seg;
- unlink_cd(cand_cd);
- free_cd(cand_cd);
+ unlink_cd(mk,cand_cd);
+ free_cd(mk,cand_cd);
*size_p = size;
@@ -674,7 +709,7 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
}
if (seg)
- ERTS_MSEG_ALLOC_STAT(size);
+ ERTS_MSEG_ALLOC_STAT(mk,size);
return seg;
}
@@ -683,9 +718,10 @@ static void
mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size,
const ErtsMsegOpt_t *opt)
{
+ MemKind* mk = type2mk(atype);
cache_desc_t *cd;
- ERTS_MSEG_DEALLOC_STAT(size);
+ ERTS_MSEG_DEALLOC_STAT(mk,size);
if (!opt->cache || max_cache_size == 0) {
if (erts_mtrace_enabled)
@@ -695,29 +731,29 @@ mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size,
else {
int check_limits = 0;
- if (size < min_cached_seg_size)
- min_cached_seg_size = size;
- if (size > max_cached_seg_size)
- max_cached_seg_size = size;
-
- if (!free_cache_descs) {
- cd = cache_end;
- if (!(min_cached_seg_size < cd->size
- && cd->size < max_cached_seg_size)) {
+ if (size < mk->min_cached_seg_size)
+ mk->min_cached_seg_size = size;
+ if (size > mk->max_cached_seg_size)
+ mk->max_cached_seg_size = size;
+
+ if (!mk->free_cache_descs) {
+ cd = mk->cache_end;
+ if (!(mk->min_cached_seg_size < cd->size
+ && cd->size < mk->max_cached_seg_size)) {
check_limits = 1;
}
if (erts_mtrace_enabled)
erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg);
mseg_destroy(cd->seg, cd->size);
- unlink_cd(cd);
- free_cd(cd);
+ unlink_cd(mk,cd);
+ free_cd(mk,cd);
}
- cd = alloc_cd();
+ cd = alloc_cd(mk);
ASSERT(cd);
cd->seg = seg;
cd->size = size;
- link_cd(cd);
+ link_cd(mk,cd);
if (erts_mtrace_enabled) {
erts_mtrace_crr_free(atype, SEGTYPE, seg);
@@ -727,7 +763,7 @@ mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size,
/* ASSERT(segments.current.watermark >= segments.current.no + cache_size); */
if (check_limits)
- check_cache_limits();
+ check_cache_limits(mk);
schedule_cache_check();
@@ -740,6 +776,7 @@ static void *
mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
const ErtsMsegOpt_t *opt)
{
+ MemKind* mk = type2mk(atype);
void *new_seg;
Uint new_size;
@@ -777,15 +814,15 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
#if CAN_PARTLY_DESTROY
if (shrink_sz > min_seg_size
- && free_cache_descs
+ && mk->free_cache_descs
&& opt->cache) {
cache_desc_t *cd;
- cd = alloc_cd();
+ cd = alloc_cd(mk);
ASSERT(cd);
cd->seg = ((char *) seg) + new_size;
cd->size = shrink_sz;
- end_link_cd(cd);
+ end_link_cd(mk,cd);
if (erts_mtrace_enabled) {
erts_mtrace_crr_realloc(new_seg,
@@ -861,7 +898,7 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
*new_size_p = new_size;
- ERTS_MSEG_REALLOC_STAT(old_size, new_size);
+ ERTS_MSEG_REALLOC_STAT(mk, old_size, new_size);
return new_seg;
}
@@ -1143,53 +1180,54 @@ info_status(int *print_to_p,
Uint *szp)
{
Eterm res = THE_NON_VALUE;
+ MemKind* mk = type2mk(0); // SVERK cheat
- if (segments.max_ever.no < segments.max.no)
- segments.max_ever.no = segments.max.no;
- if (segments.max_ever.sz < segments.max.sz)
- segments.max_ever.sz = segments.max.sz;
+ if (mk->segments.max_ever.no < mk->segments.max.no)
+ mk->segments.max_ever.no = mk->segments.max.no;
+ if (mk->segments.max_ever.sz < mk->segments.max.sz)
+ mk->segments.max_ever.sz = mk->segments.max.sz;
if (print_to_p) {
int to = *print_to_p;
void *arg = print_to_arg;
- erts_print(to, arg, "cached_segments: %bpu\n", cache_size);
+ erts_print(to, arg, "cached_segments: %bpu\n", mk->cache_size);
erts_print(to, arg, "cache_hits: %bpu\n", cache_hits);
erts_print(to, arg, "segments: %bpu %bpu %bpu\n",
- segments.current.no, segments.max.no, segments.max_ever.no);
+ mk->segments.current.no, mk->segments.max.no, mk->segments.max_ever.no);
erts_print(to, arg, "segments_size: %bpu %bpu %bpu\n",
- segments.current.sz, segments.max.sz, segments.max_ever.sz);
+ mk->segments.current.sz, mk->segments.max.sz, mk->segments.max_ever.sz);
erts_print(to, arg, "segments_watermark: %bpu\n",
- segments.current.watermark);
+ mk->segments.current.watermark);
}
if (hpp || szp) {
res = NIL;
add_2tup(hpp, szp, &res,
am.segments_watermark,
- bld_unstable_uint(hpp, szp, segments.current.watermark));
+ bld_unstable_uint(hpp, szp, mk->segments.current.watermark));
add_4tup(hpp, szp, &res,
am.segments_size,
- bld_unstable_uint(hpp, szp, segments.current.sz),
- bld_unstable_uint(hpp, szp, segments.max.sz),
- bld_unstable_uint(hpp, szp, segments.max_ever.sz));
+ bld_unstable_uint(hpp, szp, mk->segments.current.sz),
+ bld_unstable_uint(hpp, szp, mk->segments.max.sz),
+ bld_unstable_uint(hpp, szp, mk->segments.max_ever.sz));
add_4tup(hpp, szp, &res,
am.segments,
- bld_unstable_uint(hpp, szp, segments.current.no),
- bld_unstable_uint(hpp, szp, segments.max.no),
- bld_unstable_uint(hpp, szp, segments.max_ever.no));
+ bld_unstable_uint(hpp, szp, mk->segments.current.no),
+ bld_unstable_uint(hpp, szp, mk->segments.max.no),
+ bld_unstable_uint(hpp, szp, mk->segments.max_ever.no));
add_2tup(hpp, szp, &res,
am.cache_hits,
bld_unstable_uint(hpp, szp, cache_hits));
add_2tup(hpp, szp, &res,
am.cached_segments,
- bld_unstable_uint(hpp, szp, cache_size));
+ bld_unstable_uint(hpp, szp, mk->cache_size));
}
if (begin_new_max_period) {
- segments.max.no = segments.current.no;
- segments.max.sz = segments.current.sz;
+ mk->segments.max.no = mk->segments.current.no;
+ mk->segments.max.sz = mk->segments.current.sz;
}
return res;
@@ -1320,17 +1358,23 @@ erts_mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size,
void
erts_mseg_clear_cache(void)
{
+ MemKind* mk;
erts_mtx_lock(&mseg_mutex);
- mseg_clear_cache();
+ for (mk=mk_list; mk; mk=mk->next) {
+ mseg_clear_cache(mk);
+ }
erts_mtx_unlock(&mseg_mutex);
}
Uint
erts_mseg_no(void)
{
- Uint n;
+ MemKind* mk;
+ Uint n = 0;
erts_mtx_lock(&mseg_mutex);
- n = segments.current.no;
+ for (mk=mk_list; mk; mk=mk->next) {
+ n += mk->segments.current.no;
+ }
erts_mtx_unlock(&mseg_mutex);
return n;
}
@@ -1341,11 +1385,41 @@ erts_mseg_unit_size(void)
return page_size;
}
-void
-erts_mseg_init(ErtsMsegInit_t *init)
+static void mem_kind_init(MemKind* mk)
{
unsigned i;
+ mk->cache = NULL;
+ mk->cache_end = NULL;
+ mk->max_cached_seg_size = 0;
+ mk->min_cached_seg_size = ~((Uint) 0);
+ mk->cache_size = 0;
+
+ if (max_cache_size > 0) {
+ for (i = 0; i < max_cache_size - 1; i++)
+ mk->cache_descs[i].next = &mk->cache_descs[i + 1];
+ mk->cache_descs[max_cache_size - 1].next = NULL;
+ mk->free_cache_descs = &mk->cache_descs[0];
+ }
+ else
+ mk->free_cache_descs = NULL;
+
+ mk->segments.current.watermark = 0;
+ mk->segments.current.no = 0;
+ mk->segments.current.sz = 0;
+ mk->segments.max.no = 0;
+ mk->segments.max.sz = 0;
+ mk->segments.max_ever.no = 0;
+ mk->segments.max_ever.sz = 0;
+
+ mk->next = mk_list;
+ mk_list = mk;
+}
+
+
+void
+erts_mseg_init(ErtsMsegInit_t *init)
+{
atoms_initialized = 0;
is_init_done = 0;
@@ -1388,40 +1462,35 @@ erts_mseg_init(ErtsMsegInit_t *init)
min_seg_size = ~((Uint) 0);
#endif
- cache = NULL;
- cache_end = NULL;
+ if (max_cache_size > MAX_CACHE_SIZE)
+ max_cache_size = MAX_CACHE_SIZE;
+
cache_hits = 0;
- max_cached_seg_size = 0;
- min_cached_seg_size = ~((Uint) 0);
- cache_size = 0;
+
+#if HALFWORD_HEAP
+ mem_kind_init(&low_mem);
+ mem_kind_init(&hi_mem);
+#else
+ mem_kind_init(&the_mem);
+#endif
is_cache_check_scheduled = 0;
#ifdef ERTS_THREADS_NO_SMP
is_cache_check_requested = 0;
#endif
+}
- if (max_cache_size > MAX_CACHE_SIZE)
- max_cache_size = MAX_CACHE_SIZE;
- if (max_cache_size > 0) {
- for (i = 0; i < max_cache_size - 1; i++)
- cache_descs[i].next = &cache_descs[i + 1];
- cache_descs[max_cache_size - 1].next = NULL;
- free_cache_descs = &cache_descs[0];
+static ERTS_INLINE Uint tot_cache_size(void)
+{
+ MemKind* mk;
+ Uint sz = 0;
+ for (mk=mk_list; mk; mk=mk->next) {
+ sz += mk->cache_size;
}
- else
- free_cache_descs = NULL;
-
- segments.current.watermark = 0;
- segments.current.no = 0;
- segments.current.sz = 0;
- segments.max.no = 0;
- segments.max.sz = 0;
- segments.max_ever.no = 0;
- segments.max_ever.sz = 0;
+ return sz;
}
-
/*
* erts_mseg_late_init() have to be called after all allocators,
* threads and timers have been initialized.
@@ -1439,7 +1508,7 @@ erts_mseg_late_init(void)
#ifdef ERTS_THREADS_NO_SMP
async_handle = handle;
#endif
- if (cache_size)
+ if (tot_cache_size())
schedule_cache_check();
erts_mtx_unlock(&mseg_mutex);
}
@@ -1480,7 +1549,7 @@ erts_mseg_test(unsigned long op,
case 0x406: {
unsigned long res;
erts_mtx_lock(&mseg_mutex);
- res = (unsigned long) cache_size;
+ res = (unsigned long) tot_cache_size();
erts_mtx_unlock(&mseg_mutex);
return res;
}