aboutsummaryrefslogtreecommitdiffstats
path: root/erts
diff options
context:
space:
mode:
authorBjörn-Egil Dahlberg <[email protected]>2012-12-03 16:03:01 +0100
committerBjörn-Egil Dahlberg <[email protected]>2012-12-14 15:12:59 +0100
commite3b44b75af1c1444136029687aca9b6051f3f39a (patch)
treed076721055fee422a42ce20565d5f91cd88c3644 /erts
parente976eb64736435b4c79bb53947a77d8bc04a0481 (diff)
downloadotp-e3b44b75af1c1444136029687aca9b6051f3f39a.tar.gz
otp-e3b44b75af1c1444136029687aca9b6051f3f39a.tar.bz2
otp-e3b44b75af1c1444136029687aca9b6051f3f39a.zip
erts: Add mseg cache for large sbc segments
* Not a power of two (unpowered) segements
Diffstat (limited to 'erts')
-rw-r--r--erts/emulator/sys/common/erl_mseg.c115
1 files changed, 103 insertions, 12 deletions
diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c
index c8dd50bf7a..7db9da6aba 100644
--- a/erts/emulator/sys/common/erl_mseg.c
+++ b/erts/emulator/sys/common/erl_mseg.c
@@ -183,6 +183,7 @@ typedef struct ErtsMsegAllctr_t_ ErtsMsegAllctr_t;
struct mem_kind_t {
cache_t cache[MAX_CACHE_SIZE];
+ cache_t *cache_unpowered;
cache_t *cache_area[CACHE_AREAS];
cache_t *cache_free;
@@ -529,13 +530,15 @@ static ERTS_INLINE void mseg_cache_free_descriptor(MemKind *mk, cache_t *c) {
static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, Uint size) {
+ cache_t *c;
ERTS_DBG_MK_CHK_THR_ACCESS(mk);
- if (IS_2POW(size)) {
- int ix = SIZE_TO_CACHE_AREA_IDX(size);
- cache_t *c;
- if (ix < CACHE_AREAS && mk->cache_free) {
- ASSERT( (1 << (ix + MSEG_ALIGN_BITS)) == size);
+ if (mk->cache_free) {
+ if (IS_2POW(size)) {
+ int ix = SIZE_TO_CACHE_AREA_IDX(size);
+
+ ASSERT(ix < CACHE_AREAS);
+ ASSERT((1 << (ix + MSEG_ALIGN_BITS)) == size);
/* unlink from free cache list */
c = mseg_cache_alloc_descriptor(mk);
@@ -548,6 +551,23 @@ static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, Uint size) {
mk->cache_area[ix] = c;
mk->cache_size++;
+ ASSERT(mk->cache_size <= mk->ma->max_cache_size);
+
+ return 1;
+ } else {
+ /* unlink from free cache list */
+ c = mseg_cache_alloc_descriptor(mk);
+
+ /* link to cache area */
+ c->seg = seg;
+ c->size = size;
+ c->next = mk->cache_unpowered;
+
+ mk->cache_unpowered = c;
+ mk->cache_size++;
+
+ ASSERT(mk->cache_size <= mk->ma->max_cache_size);
+
return 1;
}
}
@@ -555,7 +575,9 @@ static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, Uint size) {
return 0;
}
-static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint size) {
+static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint *size_p) {
+
+ Uint size = *size_p;
ERTS_DBG_MK_CHK_THR_ACCESS(mk);
if (IS_2POW(size)) {
@@ -598,8 +620,46 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint size) {
ASSERT(csize == size);
return seg;
}
- }
+ }
+ else if (mk->cache_unpowered) {
+ void *seg;
+ cache_t *c, *pc;
+ Uint csize;
+ Uint bad_max_abs = mk->ma->abs_max_cache_bad_fit;
+ /* Uint bad_max_rel = mk->ma->rel_max_cache_bad_fit; */
+
+ c = mk->cache_unpowered;
+ pc = c;
+
+ while (c) {
+ csize = c->size;
+ if (csize >= size && (csize - size) < bad_max_abs ) {
+
+ /* unlink from cache area */
+ seg = c->seg;
+
+
+ if (pc == c) {
+ mk->cache_unpowered = c->next;
+ } else {
+ pc->next = c->next;
+ }
+ c->next = NULL;
+ mk->cache_size--;
+ mk->cache_hits++;
+
+ /* link to free cache list */
+ mseg_cache_free_descriptor(mk, c);
+ *size_p = csize;
+
+ return seg;
+ }
+
+ pc = c;
+ c = c->next;
+ }
+ }
return NULL;
}
@@ -607,6 +667,30 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint size) {
* Slowly remove segments cached in the allocator by
* using callbacks from aux-work in the scheduler.
*/
+
+static ERTS_INLINE Uint mseg_drop_one_memkind_cache_size(MemKind *mk, cache_t **head) {
+ cache_t *c = NULL;
+
+ c = *head;
+
+ ASSERT( c != NULL );
+
+ *head = c->next;
+
+ if (erts_mtrace_enabled)
+ erts_mtrace_crr_free(SEGTYPE, SEGTYPE, c->seg);
+
+ mseg_destroy(mk->ma, mk, c->seg, c->size);
+ mseg_cache_free_descriptor(mk, c);
+
+ mk->segments.current.watermark--;
+ mk->cache_size--;
+
+ ASSERT( mk->cache_size >= 0 );
+
+ return mk->cache_size;
+}
+
static ERTS_INLINE Uint mseg_drop_memkind_cache_size(MemKind *mk, int ix) {
cache_t *c = NULL, *next = NULL;
@@ -644,12 +728,15 @@ static ERTS_INLINE Uint mseg_drop_memkind_cache_size(MemKind *mk, int ix) {
static Uint mseg_check_memkind_cache(MemKind *mk) {
int i;
- /* remove biggest first (less likly to be reused) */
ERTS_DBG_MK_CHK_THR_ACCESS(mk);
- for (i = CACHE_AREAS - 1; i > 0; i--) {
+
+ if (mk->cache_unpowered)
+ return mseg_drop_one_memkind_cache_size(mk, &(mk->cache_unpowered));
+
+ for (i = 0; i < CACHE_AREAS; i++) {
if (mk->cache_area[i] != NULL)
- return mseg_drop_memkind_cache_size(mk, i);
+ return mseg_drop_one_memkind_cache_size(mk, &(mk->cache_area[i]));
}
return 0;
@@ -769,7 +856,7 @@ mseg_alloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, Uint *size_p,
ma->min_seg_size = size;
#endif
- if (opt->cache && mk->cache_size > 0 && (seg = cache_get_segment(mk, size)) != NULL)
+ if (opt->cache && mk->cache_size > 0 && (seg = cache_get_segment(mk, &size)) != NULL)
goto done;
if ((seg = mseg_create(ma, mk, size)) == NULL)
@@ -1451,13 +1538,17 @@ static void mem_kind_init(ErtsMsegAllctr_t *ma, MemKind* mk, const char* name)
mk->cache_free = NULL;
- for (i = 0; i < MAX_CACHE_SIZE; i++) {
+ ASSERT(ma->max_cache_size <= MAX_CACHE_SIZE);
+
+ for (i = 0; i < ma->max_cache_size; i++) {
mk->cache[i].seg = NULL;
mk->cache[i].size = 0;
mk->cache[i].next = mk->cache_free;
mk->cache_free = &(mk->cache[i]);
}
+ mk->cache_unpowered = NULL;
+
mk->cache_size = 0;
mk->cache_hits = 0;