From 8d4210b25a791c37deca8a37b14565fd6fad2d1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn-Egil=20Dahlberg?= Date: Wed, 31 Oct 2012 16:46:38 +0100 Subject: erts: Remove faked MSEG_ALLOC * Not used except in valgrind but there mseg is disabled completely via Meamin. --- erts/emulator/sys/common/erl_mseg.c | 24 +++--------------------- 1 file changed, 3 insertions(+), 21 deletions(-) (limited to 'erts/emulator/sys/common') diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c index db2854fa40..1235a32e8c 100644 --- a/erts/emulator/sys/common/erl_mseg.c +++ b/erts/emulator/sys/common/erl_mseg.c @@ -116,15 +116,6 @@ static int mmap_fd; #error "Not supported" #endif /* #if HAVE_MMAP */ -#if defined(ERTS_MSEG_FAKE_SEGMENTS) && HALFWORD_HEAP -# warning "ERTS_MSEG_FAKE_SEGMENTS will only be used for high memory segments" -#endif - -#if defined(ERTS_MSEG_FAKE_SEGMENTS) -#undef CAN_PARTLY_DESTROY -#define CAN_PARTLY_DESTROY 0 -#endif - const ErtsMsegOpt_t erts_mseg_default_opt = { 1, /* Use cache */ 1, /* Preserv data */ @@ -349,9 +340,7 @@ mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size) else #endif { -#if defined(ERTS_MSEG_FAKE_SEGMENTS) - seg = erts_sys_alloc(ERTS_ALC_N_INVALID, NULL, size); -#elif HAVE_MMAP +#if HAVE_MMAP { seg = (void *) mmap((void *) 0, (size_t) size, MMAP_PROT, MMAP_FLAGS, MMAP_FD, 0); @@ -385,12 +374,7 @@ mseg_destroy(ErtsMsegAllctr_t *ma, MemKind* mk, void *seg, Uint size) else #endif { -#ifdef ERTS_MSEG_FAKE_SEGMENTS - erts_sys_free(ERTS_ALC_N_INVALID, NULL, seg); -#ifdef DEBUG - res = 0; -#endif -#elif HAVE_MMAP +#ifdef HAVE_MMAP #ifdef DEBUG res = #endif @@ -426,9 +410,7 @@ mseg_recreate(ErtsMsegAllctr_t *ma, MemKind* mk, void *old_seg, Uint old_size, U else #endif { -#if defined(ERTS_MSEG_FAKE_SEGMENTS) - new_seg = erts_sys_realloc(ERTS_ALC_N_INVALID, NULL, old_seg, new_size); -#elif HAVE_MREMAP +#if HAVE_MREMAP #if defined(__NetBSD__) new_seg = (void *) mremap((void *) old_seg, -- cgit v1.2.3 From c3762df91e6ccc322a08456d429ceb856316aae7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn-Egil=20Dahlberg?= Date: Thu, 1 Nov 2012 15:41:00 +0100 Subject: erts: Let mseg allocate larger alignments --- erts/emulator/sys/common/erl_mseg.c | 59 ++++++++++++++++++++++++++++++++----- 1 file changed, 51 insertions(+), 8 deletions(-) (limited to 'erts/emulator/sys/common') diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c index 1235a32e8c..e77287f2ef 100644 --- a/erts/emulator/sys/common/erl_mseg.c +++ b/erts/emulator/sys/common/erl_mseg.c @@ -57,6 +57,9 @@ /* Implement some other way to get the real page size if needed! */ #endif +#define ALIGN_BITS (14) +#define ALIGNED_SIZE (1 << ALIGN_BITS) /* 16kB */ + #define MAX_CACHE_SIZE 30 #undef MIN @@ -71,6 +74,12 @@ #define PAGE_CEILING(X) PAGE_FLOOR((X) + INV_PAGE_MASK) #define PAGES(X) ((X) >> page_shift) +#define INV_ALIGNED_MASK ((Uint) ((ALIGNED_SIZE) - 1)) +#define ALIGNED_MASK (~INV_ALIGNED_MASK) +#define ALIGNED_FLOOR(X) (((Uint)(X)) & ALIGNED_MASK) +#define ALIGNED_CEILING(X) ALIGNED_FLOOR((X) + INV_ALIGNED_MASK) +#define MAP_IS_ALIGNED(X) ((((Uint)X) & (ALIGNED_SIZE - 1)) == 0) + static int atoms_initialized; typedef struct mem_kind_t MemKind; @@ -322,12 +331,45 @@ schedule_cache_check(ErtsMsegAllctr_t *ma) } } +static ERTS_INLINE void * +mmap_align(void *addr, size_t length, int prot, int flags, int fd, off_t offset) { + + void *seg, *aseg; + Uint diff; + + seg = mmap(addr, length, prot, flags, fd, offset); + + if (MAP_IS_ALIGNED(seg) || seg == MAP_FAILED) { + return seg; + } + + munmap(seg, length); + + seg = mmap(addr, length + ALIGNED_SIZE, prot, flags, fd, offset); + if (seg == MAP_FAILED) { + return seg; + } + + /* ceil to aligned pointer */ + aseg = (void *)(((Uint)(seg + ALIGNED_SIZE)) & (~(ALIGNED_SIZE - 1))); + diff = aseg - seg; + + if (diff > 0) { + munmap(seg, diff); + } + + if (ALIGNED_SIZE - diff > 0) { + munmap((void *) (aseg + length), ALIGNED_SIZE - diff); + } + + return aseg; +} + static ERTS_INLINE void * mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size) { void *seg; - - ASSERT(size % page_size == 0); + ASSERT(size % ALIGNED_SIZE == 0); #if HALFWORD_HEAP if (mk == &ma->low_mem) { @@ -342,7 +384,7 @@ mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size) { #if HAVE_MMAP { - seg = (void *) mmap((void *) 0, (size_t) size, + seg = (void *) mmap_align((void *) 0, (size_t) size, MMAP_PROT, MMAP_FLAGS, MMAP_FD, 0); if (seg == (void *) MAP_FAILED) seg = NULL; @@ -358,8 +400,7 @@ mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size) } static ERTS_INLINE void -mseg_destroy(ErtsMsegAllctr_t *ma, MemKind* mk, void *seg, Uint size) -{ +mseg_destroy(ErtsMsegAllctr_t *ma, MemKind* mk, void *seg, Uint size) { #ifdef DEBUG int res; #endif @@ -651,7 +692,7 @@ mseg_alloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, Uint *size_p, INC_CC(ma, alloc); - size = PAGE_CEILING(*size_p); + size = ALIGNED_CEILING(*size_p); #if CAN_PARTLY_DESTROY if (size < ma->min_seg_size) @@ -837,7 +878,7 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, mk = memkind(ma, opt); new_seg = seg; - new_size = PAGE_CEILING(*new_size_p); + new_size = ALIGNED_CEILING(*new_size_p); if (new_size == old_size) ; @@ -1478,7 +1519,7 @@ erts_mseg_no(const ErtsMsegOpt_t *opt) Uint erts_mseg_unit_size(void) { - return page_size; + return ALIGNED_SIZE; } static void mem_kind_init(ErtsMsegAllctr_t *ma, MemKind* mk, const char* name) @@ -1553,8 +1594,10 @@ erts_mseg_init(ErtsMsegInit_t *init) #endif page_size = GET_PAGE_SIZE; + ASSERT( (ALIGNED_SIZE % page_size) == 0); page_shift = 1; + /* page size alignment assertion */ while ((page_size >> page_shift) != 1) { if ((page_size & (1 << (page_shift - 1))) != 0) erl_exit(ERTS_ABORT_EXIT, -- cgit v1.2.3 From a758faacea2e77039113995596ad87f67a48eec3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn-Egil=20Dahlberg?= Date: Thu, 1 Nov 2012 19:30:03 +0100 Subject: erts: Teach mseg alloc the value of halfword beam --- erts/emulator/sys/common/erl_mseg.c | 222 +++++++++++++++--------------------- 1 file changed, 92 insertions(+), 130 deletions(-) (limited to 'erts/emulator/sys/common') diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c index e77287f2ef..612b98cb12 100644 --- a/erts/emulator/sys/common/erl_mseg.c +++ b/erts/emulator/sys/common/erl_mseg.c @@ -78,7 +78,7 @@ #define ALIGNED_MASK (~INV_ALIGNED_MASK) #define ALIGNED_FLOOR(X) (((Uint)(X)) & ALIGNED_MASK) #define ALIGNED_CEILING(X) ALIGNED_FLOOR((X) + INV_ALIGNED_MASK) -#define MAP_IS_ALIGNED(X) ((((Uint)X) & (ALIGNED_SIZE - 1)) == 0) +#define MAP_IS_ALIGNED(X) ((((unsigned long)X) & (ALIGNED_SIZE - 1)) == 0) static int atoms_initialized; @@ -335,7 +335,7 @@ static ERTS_INLINE void * mmap_align(void *addr, size_t length, int prot, int flags, int fd, off_t offset) { void *seg, *aseg; - Uint diff; + unsigned long diff; seg = mmap(addr, length, prot, flags, fd, offset); @@ -345,13 +345,12 @@ mmap_align(void *addr, size_t length, int prot, int flags, int fd, off_t offset) munmap(seg, length); - seg = mmap(addr, length + ALIGNED_SIZE, prot, flags, fd, offset); - if (seg == MAP_FAILED) { + if ((seg = mmap(addr, length + ALIGNED_SIZE, prot, flags, fd, offset)) == MAP_FAILED) { return seg; } /* ceil to aligned pointer */ - aseg = (void *)(((Uint)(seg + ALIGNED_SIZE)) & (~(ALIGNED_SIZE - 1))); + aseg = (void *)(((unsigned long)(seg + ALIGNED_SIZE)) & (~(ALIGNED_SIZE - 1))); diff = aseg - seg; if (diff > 0) { @@ -401,25 +400,17 @@ mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size) static ERTS_INLINE void mseg_destroy(ErtsMsegAllctr_t *ma, MemKind* mk, void *seg, Uint size) { -#ifdef DEBUG - int res; -#endif + ERTS_DECLARE_DUMMY(int res); #if HALFWORD_HEAP if (mk == &ma->low_mem) { -#ifdef DEBUG - res = -#endif - pmunmap((void *) seg, size); + res = pmunmap((void *) seg, size); } else #endif { #ifdef HAVE_MMAP -#ifdef DEBUG - res = -#endif - munmap((void *) seg, size); + res = munmap((void *) seg, size); #else # error "Missing mseg_destroy() implementation" #endif @@ -439,8 +430,8 @@ mseg_recreate(ErtsMsegAllctr_t *ma, MemKind* mk, void *old_seg, Uint old_size, U { void *new_seg; - ASSERT(old_size % page_size == 0); - ASSERT(new_size % page_size == 0); + ASSERT(old_size % ALIGNED_SIZE == 0); + ASSERT(new_size % ALIGNED_SIZE == 0); #if HALFWORD_HEAP if (mk == &ma->low_mem) { @@ -1594,7 +1585,7 @@ erts_mseg_init(ErtsMsegInit_t *init) #endif page_size = GET_PAGE_SIZE; - ASSERT( (ALIGNED_SIZE % page_size) == 0); + ASSERT((ALIGNED_SIZE % page_size) == 0); page_shift = 1; /* page size alignment assertion */ @@ -1732,7 +1723,40 @@ erts_mseg_test(unsigned long op, * mapping tricks. */ -/*#define HARDDEBUG 1*/ +/* #define HARDDEBUG 1 */ + +#ifdef HARDDEBUG +static void dump_freelist(void) +{ + FreeBlock *p = first; + + while (p) { + fprintf(stderr, "p = %p\r\np->num = %ld\r\np->next = %p\r\n\r\n", + (void *) p, (unsigned long) p->num, (void *) p->next); + p = p->next; + } +} + +#define HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(PTR, SZ) \ + fprintf(stderr,"Mapping of address %p with size %ld " \ + "does not map complete pages (%s:%d)\r\n", \ + (void *) (PTR), (unsigned long) (SZ),__FILE__, __LINE__) + +#define HARDDEBUG_HW_UNALIGNED_ALIGNMENT(PTR, SZ) \ + fprintf(stderr,"Mapping of address %p with size %ld " \ + "is not page aligned (%s:%d)\r\n", \ + (void *) (PTR), (unsigned long) (SZ),__FILE__, __LINE__) + +#define HARDDEBUG_MAP_FAILED(PTR, SZ) \ + fprintf(stderr, "Could not actually map memory " \ + "at address %p with size %ld (%s:%d) ..\r\n", \ + (void *) (PTR), (unsigned long) (SZ),__FILE__, __LINE__) +#else +#define HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(PTR, SZ) do{}while(0) +#define HARDDEBUG_HW_UNALIGNED_ALIGNMENT(PTR, SZ) do{}while(0) +#define HARDDEBUG_MAP_FAILED(PTR, SZ) do{}while(0) +#endif + #ifdef __APPLE__ #define MAP_ANONYMOUS MAP_ANON @@ -1751,49 +1775,20 @@ typedef struct _free_block { struct _free_block *next; } FreeBlock; -/* Assigned once and for all */ -static size_t pagsz; - /* Protect with lock */ static FreeBlock *first; -static size_t round_up_to_pagesize(size_t size) -{ - size_t x = size / pagsz; - - if ((size % pagsz)) { - ++x; - } - - return pagsz * x; -} - -static size_t round_down_to_pagesize(size_t size) -{ - size_t x = size / pagsz; - - return pagsz * x; -} - static void *do_map(void *ptr, size_t sz) { void *res; - if (round_up_to_pagesize(sz) != sz) { -#ifdef HARDDEBUG - fprintf(stderr,"Mapping of address %p with size %ld " - "does not map complete pages\r\n", - (void *) ptr, (unsigned long) sz); -#endif + if (ALIGNED_CEILING(sz) != sz) { + HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(ptr, sz); return NULL; } - if (((unsigned long) ptr) % pagsz) { -#ifdef HARDDEBUG - fprintf(stderr,"Mapping of address %p with size %ld " - "is not page aligned\r\n", - (void *) ptr, (unsigned long) sz); -#endif + if (((unsigned long) ptr) % ALIGNED_SIZE) { + HARDDEBUG_HW_UNALIGNED_ALIGNMENT(ptr, sz); return NULL; } @@ -1807,10 +1802,7 @@ static void *do_map(void *ptr, size_t sz) #endif if (res == MAP_FAILED) { -#ifdef HARDDEBUG - fprintf(stderr,"Mapping of address %p with size %ld failed!\r\n", - (void *) ptr, (unsigned long) sz); -#endif + HARDDEBUG_MAP_FAILED(ptr, sz); return NULL; } @@ -1821,35 +1813,22 @@ static int do_unmap(void *ptr, size_t sz) { void *res; - if (round_up_to_pagesize(sz) != sz) { -#ifdef HARDDEBUG - fprintf(stderr,"Mapping of address %p with size %ld " - "does not map complete pages\r\n", - (void *) ptr, (unsigned long) sz); -#endif + if (ALIGNED_CEILING(sz) != sz) { + HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(ptr, sz); return 1; } - if (((unsigned long) ptr) % pagsz) { -#ifdef HARDDEBUG - fprintf(stderr,"Mapping of address %p with size %ld " - "is not page aligned\r\n", - (void *) ptr, (unsigned long) sz); -#endif + if (((unsigned long) ptr) % ALIGNED_SIZE) { + HARDDEBUG_HW_UNALIGNED_ALIGNMENT(ptr, sz); return 1; } - res = mmap(ptr, sz, - PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE - | MAP_FIXED, + PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, -1 , 0); if (res == MAP_FAILED) { -#ifdef HARDDEBUG - fprintf(stderr,"Mapping of address %p with size %ld failed!\r\n", - (void *) ptr, (unsigned long) sz); -#endif + HARDDEBUG_MAP_FAILED(ptr, sz); return 1; } @@ -1887,8 +1866,6 @@ static int initialize_pmmap(void) size_t rsz; FreeBlock *initial; - - pagsz = getpagesize(); SET_RANGE_MIN(); if (sizeof(void *) != 8) { erl_exit(1,"Halfword emulator cannot be run in 32bit mode"); @@ -1897,15 +1874,15 @@ static int initialize_pmmap(void) p = (char *) RANGE_MIN; q = (char *) RANGE_MAX; - rsz = round_down_to_pagesize(q - p); + rsz = ALIGNED_FLOOR(q - p); - rptr = mmap((void *) p, rsz, + rptr = mmap_align((void *) p, rsz, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | EXTRA_MAP_FLAGS, -1 , 0); #ifdef HARDDEBUG printf("p=%p, rsz = %ld, pages = %ld, got range = %p -> %p\r\n", - p, (unsigned long) rsz, (unsigned long) (rsz / pagsz), + p, (unsigned long) rsz, (unsigned long) (rsz / ALIGNED_SIZE), (void *) rptr, (void*)(rptr + rsz)); #endif if ((UWord)(rptr + rsz) > RANGE_MAX) { @@ -1917,39 +1894,27 @@ static int initialize_pmmap(void) munmap((void*)RANGE_MAX, rsz - rsz_trunc); rsz = rsz_trunc; } - if (!do_map(rptr,pagsz)) { + if (!do_map(rptr, ALIGNED_SIZE)) { erl_exit(1,"Could not actually mmap first page for halfword emulator...\n"); } initial = (FreeBlock *) rptr; - initial->num = (rsz / pagsz); + initial->num = (rsz / ALIGNED_SIZE); initial->next = NULL; first = initial; INIT_LOCK(); return 0; } -#ifdef HARDDEBUG -static void dump_freelist(void) -{ - FreeBlock *p = first; - - while (p) { - printf("p = %p\r\np->num = %ld\r\np->next = %p\r\n\r\n", - (void *) p, (unsigned long) p->num, (void *) p->next); - p = p->next; - } -} -#endif - - static void *pmmap(size_t size) { - size_t real_size = round_up_to_pagesize(size); - size_t num_pages = real_size / pagsz; + size_t real_size = ALIGNED_CEILING(size); + size_t num_pages = real_size / ALIGNED_SIZE; FreeBlock **block; FreeBlock *tail; FreeBlock *res; + TAKE_LOCK(); + for (block = &first; *block != NULL && (*block)->num < num_pages; block = &((*block)->next)) @@ -1960,29 +1925,25 @@ static void *pmmap(size_t size) } if ((*block)->num == num_pages) { /* nice, perfect fit */ - res = *block; + res = *block; *block = (*block)->next; } else { tail = (FreeBlock *) (((char *) ((void *) (*block))) + real_size); - if (!do_map(tail,pagsz)) { -#ifdef HARDDEBUG - fprintf(stderr, "Could not actually allocate page at %p...\r\n", - (void *) tail); -#endif + if (!do_map(tail, ALIGNED_SIZE)) { + HARDDEBUG_MAP_FAILED(tail, ALIGNED_SIZE); RELEASE_LOCK(); return NULL; } - tail->num = (*block)->num - num_pages; + tail->num = (*block)->num - num_pages; tail->next = (*block)->next; res = *block; *block = tail; } + RELEASE_LOCK(); - if (!do_map(res,real_size)) { -#ifdef HARDDEBUG - fprintf(stderr, "Could not actually allocate %ld at %p...\r\n", - (unsigned long) real_size, (void *) res); -#endif + + if (!do_map(res, real_size)) { + HARDDEBUG_MAP_FAILED(res, real_size); return NULL; } @@ -1991,15 +1952,17 @@ static void *pmmap(size_t size) static int pmunmap(void *p, size_t size) { - size_t real_size = round_up_to_pagesize(size); - size_t num_pages = real_size / pagsz; + size_t real_size = ALIGNED_CEILING(size); + size_t num_pages = real_size / ALIGNED_SIZE; + FreeBlock *block; FreeBlock *last; FreeBlock *nb = (FreeBlock *) p; ASSERT(((unsigned long)p & CHECK_POINTER_MASK)==0); - if (real_size > pagsz) { - if (do_unmap(((char *) p) + pagsz,real_size - pagsz)) { + + if (real_size > ALIGNED_SIZE) { + if (do_unmap(((char *) p) + ALIGNED_SIZE, real_size - ALIGNED_SIZE)) { return 1; } } @@ -2018,7 +1981,7 @@ static int pmunmap(void *p, size_t size) /* Merge new free block with following */ nb->num = block->num + num_pages; nb->next = block->next; - if (do_unmap(block,pagsz)) { + if (do_unmap(block, ALIGNED_SIZE)) { RELEASE_LOCK(); return 1; } @@ -2028,11 +1991,11 @@ static int pmunmap(void *p, size_t size) nb->next = block; } if (last != NULL) { - if (p == ((void *) (((char *) last) + (last->num * pagsz)))) { + if (p == ((void *) (((char *) last) + (last->num * ALIGNED_SIZE)))) { /* Merge with previous */ last->num += nb->num; last->next = nb->next; - if (do_unmap(nb,pagsz)) { + if (do_unmap(nb, ALIGNED_SIZE)) { RELEASE_LOCK(); return 1; } @@ -2049,10 +2012,10 @@ static int pmunmap(void *p, size_t size) static void *pmremap(void *old_address, size_t old_size, size_t new_size) { - size_t new_real_size = round_up_to_pagesize(new_size); - size_t new_num_pages = new_real_size / pagsz; - size_t old_real_size = round_up_to_pagesize(old_size); - size_t old_num_pages = old_real_size / pagsz; + size_t new_real_size = ALIGNED_CEILING(new_size); + size_t new_num_pages = new_real_size / ALIGNED_SIZE; + size_t old_real_size = ALIGNED_CEILING(old_size); + size_t old_num_pages = old_real_size / ALIGNED_SIZE; if (new_num_pages == old_num_pages) { return old_address; } else if (new_num_pages < old_num_pages) { /* Shrink */ @@ -2070,8 +2033,8 @@ static void *pmremap(void *old_address, size_t old_size, (*block) > ((FreeBlock *)(((char *) vnfb) + nfb_real_size))) { /* Normal link in */ if (nfb_pages > 1) { - if (do_unmap((void *)(((char *) vnfb) + pagsz), - (nfb_pages - 1)*pagsz)) { + if (do_unmap((void *)(((char *) vnfb) + ALIGNED_SIZE), + (nfb_pages - 1)*ALIGNED_SIZE)) { return NULL; } } @@ -2083,8 +2046,8 @@ static void *pmremap(void *old_address, size_t old_size, nfb->num = nfb_pages + (*block)->num; /* unmap also the first page of the next freeblock */ (*block) = nfb; - if (do_unmap((void *)(((char *) vnfb) + pagsz), - nfb_pages*pagsz)) { + if (do_unmap((void *)(((char *) vnfb) + ALIGNED_SIZE), + nfb_pages*ALIGNED_SIZE)) { return NULL; } } @@ -2119,9 +2082,9 @@ static void *pmremap(void *old_address, size_t old_size, size_t remaining_pages = (*block)->num - (new_num_pages - old_num_pages); if (!remaining_pages) { - void *p = (void *) (((char *) (*block)) + pagsz); + void *p = (void *) (((char *) (*block)) + ALIGNED_SIZE); void *n = (*block)->next; - size_t x = ((*block)->num - 1) * pagsz; + size_t x = ((*block)->num - 1) * ALIGNED_SIZE; if (x > 0) { if (do_map(p,x) == NULL) { RELEASE_LOCK(); @@ -2133,7 +2096,7 @@ static void *pmremap(void *old_address, size_t old_size, FreeBlock *nfb = (FreeBlock *) ((void *) (((char *) old_address) + new_real_size)); - void *p = (void *) (((char *) (*block)) + pagsz); + void *p = (void *) (((char *) (*block)) + ALIGNED_SIZE); if (do_map(p,new_real_size - old_real_size) == NULL) { RELEASE_LOCK(); return NULL; @@ -2147,5 +2110,4 @@ static void *pmremap(void *old_address, size_t old_size, } } } - #endif /* HALFWORD_HEAP */ -- cgit v1.2.3 From 770ee09dfbe5c9024432ac8696be6b91d2ec9a9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn-Egil=20Dahlberg?= Date: Fri, 2 Nov 2012 17:38:46 +0100 Subject: erts: New mseg allocator cache * utilize the power of two --- erts/emulator/sys/common/erl_mseg.c | 707 ++++++++++++++++-------------------- erts/emulator/sys/common/erl_mseg.h | 13 +- 2 files changed, 324 insertions(+), 396 deletions(-) (limited to 'erts/emulator/sys/common') diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c index 612b98cb12..58ee6600d2 100644 --- a/erts/emulator/sys/common/erl_mseg.c +++ b/erts/emulator/sys/common/erl_mseg.c @@ -57,35 +57,51 @@ /* Implement some other way to get the real page size if needed! */ #endif -#define ALIGN_BITS (14) -#define ALIGNED_SIZE (1 << ALIGN_BITS) /* 16kB */ +#define IS_2POW(X) (((X) & ((X) - 1)) == 0) +static ERTS_INLINE Uint ceil_2pow(Uint x) { + int i = 1 << (4 + (sizeof(Uint) != 4 ? 1 : 0)); + x--; + do { x |= x >> i; } while(i >>= 1); + return x + 1; +} +static const int debruijn[32] = { + 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, + 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 +}; + +#define LOG2(X) (debruijn[((Uint32)(((X) & -(X)) * 0x077CB531U)) >> 27]) + +#define ALIGN_BITS (17) +#define ALIGNED_SIZE (1 << ALIGN_BITS) /* 128kB */ -#define MAX_CACHE_SIZE 30 +#define CACHE_AREAS (32 - ALIGN_BITS) + +#define SIZE_TO_CACHE_AREA_IDX(S) (LOG2((S)) - ALIGN_BITS) +#define MAX_CACHE_SIZE (30) #undef MIN #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) #undef MAX #define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) -#undef PAGE_MASK -#define INV_PAGE_MASK ((Uint) (page_size - 1)) -#define PAGE_MASK (~INV_PAGE_MASK) -#define PAGE_FLOOR(X) ((X) & PAGE_MASK) -#define PAGE_CEILING(X) PAGE_FLOOR((X) + INV_PAGE_MASK) -#define PAGES(X) ((X) >> page_shift) - -#define INV_ALIGNED_MASK ((Uint) ((ALIGNED_SIZE) - 1)) +#define INV_ALIGNED_MASK ((UWord) ((ALIGNED_SIZE) - 1)) #define ALIGNED_MASK (~INV_ALIGNED_MASK) -#define ALIGNED_FLOOR(X) (((Uint)(X)) & ALIGNED_MASK) +#define ALIGNED_FLOOR(X) (((UWord)(X)) & ALIGNED_MASK) #define ALIGNED_CEILING(X) ALIGNED_FLOOR((X) + INV_ALIGNED_MASK) -#define MAP_IS_ALIGNED(X) ((((unsigned long)X) & (ALIGNED_SIZE - 1)) == 0) +#define MAP_IS_ALIGNED(X) (((UWord)(X) & (ALIGNED_SIZE - 1)) == 0) + +#define MSEG_FLG_IS_2POW(X) ((X) & ERTS_MSEG_FLG_2POW) + +#ifdef DEBUG +#define DBG(F,...) fprintf(stderr, (F), __VA_ARGS__ ) +#else +#define DBG(F,...) do{}while(0) +#endif static int atoms_initialized; typedef struct mem_kind_t MemKind; -static void mseg_clear_cache(MemKind*); - #if HALFWORD_HEAP static int initialize_pmmap(void); static void *pmmap(size_t size); @@ -137,26 +153,17 @@ const ErtsMsegOpt_t erts_mseg_default_opt = { }; -typedef struct cache_desc_t_ { - void *seg; - Uint size; - struct cache_desc_t_ *next; - struct cache_desc_t_ *prev; -} cache_desc_t; - typedef struct { Uint32 giga_no; Uint32 no; } CallCounter; -static Uint page_size; -static Uint page_shift; - typedef struct { CallCounter alloc; CallCounter dealloc; CallCounter realloc; CallCounter create; + CallCounter create_resize; CallCounter destroy; #if HAVE_MSEG_RECREATE CallCounter recreate; @@ -165,17 +172,24 @@ typedef struct { CallCounter check_cache; } ErtsMsegCalls; +typedef struct cache_t_ cache_t; + +struct cache_t_ { + Uint size; + void *seg; + cache_t *next; +}; + + typedef struct ErtsMsegAllctr_t_ ErtsMsegAllctr_t; struct mem_kind_t { - cache_desc_t cache_descs[MAX_CACHE_SIZE]; - cache_desc_t *free_cache_descs; - cache_desc_t *cache; - cache_desc_t *cache_end; - - Uint cache_size; - Uint min_cached_seg_size; - Uint max_cached_seg_size; + + cache_t cache[MAX_CACHE_SIZE]; + cache_t *cache_area[CACHE_AREAS]; + cache_t *cache_free; + + Sint cache_size; Uint cache_hits; struct { @@ -320,8 +334,7 @@ static erts_mtx_t init_atoms_mutex; /* Also needed when !USE_THREADS */ static ERTS_INLINE void -schedule_cache_check(ErtsMsegAllctr_t *ma) -{ +schedule_cache_check(ErtsMsegAllctr_t *ma) { if (!ma->is_cache_check_scheduled && ma->is_init_done) { erts_set_aux_work_timeout(ma->ix, @@ -331,8 +344,11 @@ schedule_cache_check(ErtsMsegAllctr_t *ma) } } +/* remove ErtsMsegAllctr_t from arguments? + * only used for statistics + */ static ERTS_INLINE void * -mmap_align(void *addr, size_t length, int prot, int flags, int fd, off_t offset) { +mmap_align(ErtsMsegAllctr_t *ma, void *addr, size_t length, int prot, int flags, int fd, off_t offset) { void *seg, *aseg; unsigned long diff; @@ -343,6 +359,9 @@ mmap_align(void *addr, size_t length, int prot, int flags, int fd, off_t offset) return seg; } + if (ma) + INC_CC(ma, create_resize); + munmap(seg, length); if ((seg = mmap(addr, length + ALIGNED_SIZE, prot, flags, fd, offset)) == MAP_FAILED) { @@ -383,7 +402,7 @@ mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size) { #if HAVE_MMAP { - seg = (void *) mmap_align((void *) 0, (size_t) size, + seg = (void *) mmap_align(ma, (void *) 0, (size_t) size, MMAP_PROT, MMAP_FLAGS, MMAP_FD, 0); if (seg == (void *) MAP_FAILED) seg = NULL; @@ -416,7 +435,7 @@ mseg_destroy(ErtsMsegAllctr_t *ma, MemKind* mk, void *seg, Uint size) { #endif } - ASSERT(size % page_size == 0); + ASSERT(size % ALIGNED_SIZE == 0); ASSERT(res == 0); INC_CC(ma, destroy); @@ -489,151 +508,178 @@ do { \ #define ERTS_DBG_MK_CHK_THR_ACCESS(MK) #endif -static ERTS_INLINE cache_desc_t * -alloc_cd(MemKind* mk) -{ - cache_desc_t *cd = mk->free_cache_descs; +/* NEW CACHE interface */ + +static ERTS_INLINE cache_t *mseg_cache_alloc_descriptor(MemKind *mk) { + cache_t *c = mk->cache_free; + ERTS_DBG_MK_CHK_THR_ACCESS(mk); - if (cd) - mk->free_cache_descs = cd->next; - return cd; + if (c) + mk->cache_free = c->next; + + return c; } -static ERTS_INLINE void -free_cd(MemKind* mk, cache_desc_t *cd) -{ +static ERTS_INLINE void mseg_cache_free_descriptor(MemKind *mk, cache_t *c) { ERTS_DBG_MK_CHK_THR_ACCESS(mk); - cd->next = mk->free_cache_descs; - mk->free_cache_descs = cd; + ASSERT(c); + + c->seg = NULL; + c->size = 0; + c->next = mk->cache_free; + mk->cache_free = c; } +static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, Uint size) { -static ERTS_INLINE void -link_cd(MemKind* mk, cache_desc_t *cd) -{ ERTS_DBG_MK_CHK_THR_ACCESS(mk); - if (mk->cache) - mk->cache->prev = cd; - cd->next = mk->cache; - cd->prev = NULL; - mk->cache = cd; + if IS_2POW(size) { + int ix = SIZE_TO_CACHE_AREA_IDX(size); + cache_t *c; - if (!mk->cache_end) { - ASSERT(!cd->next); - mk->cache_end = cd; - } + if (ix < CACHE_AREAS && mk->cache_free) { + ASSERT( (1 << (ix + ALIGN_BITS)) == size); - mk->cache_size++; -} + /* unlink from free cache list */ + c = mseg_cache_alloc_descriptor(mk); -#if CAN_PARTLY_DESTROY -static ERTS_INLINE void -end_link_cd(MemKind* mk, cache_desc_t *cd) -{ - ERTS_DBG_MK_CHK_THR_ACCESS(mk); - if (mk->cache_end) - mk->cache_end->next = cd; - cd->next = NULL; - cd->prev = mk->cache_end; - mk->cache_end = cd; + /* link to cache area */ + c->seg = seg; + c->size = size; + c->next = mk->cache_area[ix]; - if (!mk->cache) { - ASSERT(!cd->prev); - mk->cache = cd; + mk->cache_area[ix] = c; + mk->cache_size++; + + return 1; + } } - mk->cache_size++; + return 0; } -#endif -static ERTS_INLINE void -unlink_cd(MemKind* mk, cache_desc_t *cd) -{ - ERTS_DBG_MK_CHK_THR_ACCESS(mk); - if (cd->next) - cd->next->prev = cd->prev; - else - mk->cache_end = cd->prev; - - if (cd->prev) - cd->prev->next = cd->next; - else - mk->cache = cd->next; - ASSERT(mk->cache_size > 0); - mk->cache_size--; -} +static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint size) { -static ERTS_INLINE void -check_cache_limits(MemKind* mk) -{ - cache_desc_t *cd; ERTS_DBG_MK_CHK_THR_ACCESS(mk); - mk->max_cached_seg_size = 0; - mk->min_cached_seg_size = ~((Uint) 0); - for (cd = mk->cache; cd; cd = cd->next) { - if (cd->size < mk->min_cached_seg_size) - mk->min_cached_seg_size = cd->size; - if (cd->size > mk->max_cached_seg_size) - mk->max_cached_seg_size = cd->size; + if (IS_2POW(size)) { + + int i, ix = SIZE_TO_CACHE_AREA_IDX(size); + void *seg; + cache_t *c; + Uint csize; + + for( i = ix; i < CACHE_AREAS; i++) { + + if ((c = mk->cache_area[i]) == NULL) + continue; + + ASSERT(IS_2POW(c->size)); + + /* unlink from cache area */ + csize = c->size; + seg = c->seg; + c->next = NULL; + mk->cache_area[i] = c->next; + mk->cache_size--; + mk->cache_hits++; + + /* link to free cache list */ + mseg_cache_free_descriptor(mk, c); + + ASSERT(!(mk->cache_size < 0)); + + /* divvy up the cache - if needed */ + while( i > ix) { + csize = csize >> 1; + /* try to cache half of it */ + if (!cache_bless_segment(mk, (char *)seg + csize, csize)) { + /* wouldn't cache .. destroy it instead */ + mseg_destroy(mk->ma, mk, (char *)seg + csize, csize); + } + i--; + } + + ASSERT(csize == size); + return seg; + } } + return NULL; } -static ERTS_INLINE void -adjust_cache_size(MemKind* mk, int force_check_limits) -{ - cache_desc_t *cd; - int check_limits = force_check_limits; - Sint max_cached = ((Sint) mk->segments.current.watermark - - (Sint) mk->segments.current.no); - ERTS_DBG_MK_CHK_THR_ACCESS(mk); - while (((Sint) mk->cache_size) > max_cached && ((Sint) mk->cache_size) > 0) { - ASSERT(mk->cache_end); - cd = mk->cache_end; - if (!check_limits && - !(mk->min_cached_seg_size < cd->size - && cd->size < mk->max_cached_seg_size)) { - check_limits = 1; - } +/* *_mseg_check_*_cache + * Slowly remove segments cached in the allocator by + * using callbacks from aux-work in the scheduler. + */ +static ERTS_INLINE Uint mseg_drop_memkind_cache_size(MemKind *mk, int ix) { + cache_t *c = NULL, *next = NULL; + + c = mk->cache_area[ix]; + ASSERT( c != NULL ); + + while (c) { + + next = c->next; + if (erts_mtrace_enabled) - erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg); - mseg_destroy(mk->ma, mk, cd->seg, cd->size); - unlink_cd(mk,cd); - free_cd(mk,cd); - } + erts_mtrace_crr_free(SEGTYPE, SEGTYPE, c->seg); - if (check_limits) - check_cache_limits(mk); -} + mseg_destroy(mk->ma, mk, c->seg, c->size); + mseg_cache_free_descriptor(mk, c); -static Uint -check_one_cache(MemKind* mk) -{ - if (mk->segments.current.watermark > mk->segments.current.no) mk->segments.current.watermark--; - adjust_cache_size(mk, 0); + mk->cache_size--; + + c = next; + } + + mk->cache_area[ix] = NULL; + + ASSERT( mk->cache_size >= 0 ); - if (mk->cache_size) - schedule_cache_check(mk->ma); return mk->cache_size; } -static void do_cache_check(ErtsMsegAllctr_t *ma) -{ - int empty_cache = 1; +/* mseg_check_memkind_cache + * - Check if we can empty some cached segments in this + * MemKind. + */ + + +static Uint mseg_check_memkind_cache(MemKind *mk) { + int i; + /* remove biggest first (less likly to be reused) */ + + ERTS_DBG_MK_CHK_THR_ACCESS(mk); + for (i = CACHE_AREAS - 1; i > 0; i--) { + if (mk->cache_area[i] != NULL) + return mseg_drop_memkind_cache_size(mk, i); + } + + return 0; +} + +/* mseg_cache_check + * - Check if we have some cache we can purge + * in any of the memkinds. + */ + +static void mseg_cache_check(ErtsMsegAllctr_t *ma) { MemKind* mk; + Uint empty_cache = 1; ERTS_MSEG_LOCK(ma); - for (mk=ma->mk_list; mk; mk=mk->next) { - if (check_one_cache(mk)) + for (mk = ma->mk_list; mk; mk = mk->next) { + if (mseg_check_memkind_cache(mk)) empty_cache = 0; } + /* If all MemKinds caches are empty, + * remove aux-work callback + */ if (empty_cache) { ma->is_cache_check_scheduled = 0; - erts_set_aux_work_timeout(ma->ix, - ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK, - 0); + erts_set_aux_work_timeout(ma->ix, ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK, 0); } INC_CC(ma, check_cache); @@ -641,27 +687,57 @@ static void do_cache_check(ErtsMsegAllctr_t *ma) ERTS_MSEG_UNLOCK(ma); } -void erts_mseg_cache_check(void) -{ - do_cache_check(ERTS_MSEG_ALLCTR_SS()); +/* erts_mseg_cache_check + * - This is a callback that is scheduled as aux-work from + * schedulers and is called at some interval if we have a cache + * on this mseg-allocator and memkind. + * - Purpose: Empty cache slowly so we don't collect mapped areas + * and bloat memory. + */ + +void erts_mseg_cache_check(void) { + mseg_cache_check(ERTS_MSEG_ALLCTR_SS()); } -static void -mseg_clear_cache(MemKind* mk) -{ - mk->segments.current.watermark = 0; - adjust_cache_size(mk, 1); +/* *_mseg_clear_*_cache + * Remove cached segments from the allocator completely + */ + +static void mseg_clear_memkind_cache(MemKind *mk) { + int i; + + for (i = 0; i < CACHE_AREAS; i++) { + if (mk->cache_area[i] == NULL) + continue; + + mseg_drop_memkind_cache_size(mk, i); + } +} + +static void mseg_clear_cache(ErtsMsegAllctr_t *ma) { + MemKind* mk; - ASSERT(!mk->cache); - ASSERT(!mk->cache_end); - ASSERT(!mk->cache_size); + ERTS_MSEG_LOCK(ma); + ERTS_DBG_MA_CHK_THR_ACCESS(ma); - mk->segments.current.watermark = mk->segments.current.no; - INC_CC(mk->ma, clear_cache); + for (mk = ma->mk_list; mk; mk = mk->next) { + mseg_clear_memkind_cache(mk); + } + + INC_CC(ma, clear_cache); + + ERTS_MSEG_UNLOCK(ma); +} + +void erts_mseg_clear_cache(void) { + mseg_clear_cache(ERTS_MSEG_ALLCTR_SS()); + mseg_clear_cache(ERTS_MSEG_ALLCTR_IX(0)); } + + static ERTS_INLINE MemKind* memkind(ErtsMsegAllctr_t *ma, const ErtsMsegOpt_t *opt) { @@ -674,116 +750,40 @@ static ERTS_INLINE MemKind* memkind(ErtsMsegAllctr_t *ma, static void * mseg_alloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, Uint *size_p, - const ErtsMsegOpt_t *opt) + Uint flags, const ErtsMsegOpt_t *opt) { - Uint max, min, diff_size, size; - cache_desc_t *cd, *cand_cd; + Uint size; void *seg; MemKind* mk = memkind(ma, opt); INC_CC(ma, alloc); + /* Carrier align */ size = ALIGNED_CEILING(*size_p); + /* Cache optim (if applicable) */ + if (MSEG_FLG_IS_2POW(flags) && !IS_2POW(size)) + size = ceil_2pow(size); + #if CAN_PARTLY_DESTROY if (size < ma->min_seg_size) ma->min_seg_size = size; #endif + + if (opt->cache && mk->cache_size > 0 && (seg = cache_get_segment(mk, size)) != NULL) + goto done; - if (!opt->cache) { - create_seg: - adjust_cache_size(mk,0); - seg = mseg_create(ma, mk, size); - if (!seg) { - mseg_clear_cache(mk); - seg = mseg_create(ma, mk, size); - if (!seg) - size = 0; - } - - *size_p = size; - if (seg) { - if (erts_mtrace_enabled) - erts_mtrace_crr_alloc(seg, atype, ERTS_MTRACE_SEGMENT_ID, size); - ERTS_MSEG_ALLOC_STAT(mk,size); - } - return seg; - } - - if (size > mk->max_cached_seg_size) - goto create_seg; - - if (size < mk->min_cached_seg_size) { - - diff_size = mk->min_cached_seg_size - size; - - if (diff_size > ma->abs_max_cache_bad_fit) - goto create_seg; - - if (100*PAGES(diff_size) > ma->rel_max_cache_bad_fit*PAGES(size)) - goto create_seg; - - } - - max = 0; - min = ~((Uint) 0); - cand_cd = NULL; - - for (cd = mk->cache; cd; cd = cd->next) { - if (cd->size >= size) { - if (!cand_cd) { - cand_cd = cd; - continue; - } - else if (cd->size < cand_cd->size) { - if (max < cand_cd->size) - max = cand_cd->size; - if (min > cand_cd->size) - min = cand_cd->size; - cand_cd = cd; - continue; - } - } - if (max < cd->size) - max = cd->size; - if (min > cd->size) - min = cd->size; - } - - mk->min_cached_seg_size = min; - mk->max_cached_seg_size = max; - - if (!cand_cd) - goto create_seg; - - diff_size = cand_cd->size - size; - - if (diff_size > ma->abs_max_cache_bad_fit - || 100*PAGES(diff_size) > ma->rel_max_cache_bad_fit*PAGES(size)) { - if (mk->max_cached_seg_size < cand_cd->size) - mk->max_cached_seg_size = cand_cd->size; - if (mk->min_cached_seg_size > cand_cd->size) - mk->min_cached_seg_size = cand_cd->size; - goto create_seg; - } - - mk->cache_hits++; - - size = cand_cd->size; - seg = cand_cd->seg; - - unlink_cd(mk,cand_cd); - free_cd(mk,cand_cd); + if ((seg = mseg_create(ma, mk, size)) == NULL) + size = 0; +done: *size_p = size; + if (seg) { + if (erts_mtrace_enabled) + erts_mtrace_crr_alloc(seg, atype, ERTS_MTRACE_SEGMENT_ID, size); - if (erts_mtrace_enabled) { - erts_mtrace_crr_free(SEGTYPE, SEGTYPE, seg); - erts_mtrace_crr_alloc(seg, atype, SEGTYPE, size); - } - - if (seg) ERTS_MSEG_ALLOC_STAT(mk,size); + } return seg; } @@ -794,69 +794,35 @@ mseg_dealloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, Uint size, const ErtsMsegOpt_t *opt) { MemKind* mk = memkind(ma, opt); - cache_desc_t *cd; + ERTS_MSEG_DEALLOC_STAT(mk,size); - if (!opt->cache || ma->max_cache_size == 0) { - if (erts_mtrace_enabled) - erts_mtrace_crr_free(atype, SEGTYPE, seg); - mseg_destroy(ma, mk, seg, size); + if (opt->cache && cache_bless_segment(mk, seg, size)) { + schedule_cache_check(ma); + goto done; } - else { - int check_limits = 0; - - if (size < mk->min_cached_seg_size) - mk->min_cached_seg_size = size; - if (size > mk->max_cached_seg_size) - mk->max_cached_seg_size = size; - - if (!mk->free_cache_descs) { - cd = mk->cache_end; - if (!(mk->min_cached_seg_size < cd->size - && cd->size < mk->max_cached_seg_size)) { - check_limits = 1; - } - if (erts_mtrace_enabled) - erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg); - mseg_destroy(ma, mk, cd->seg, cd->size); - unlink_cd(mk,cd); - free_cd(mk,cd); - } - - cd = alloc_cd(mk); - ASSERT(cd); - cd->seg = seg; - cd->size = size; - link_cd(mk,cd); - - if (erts_mtrace_enabled) { - erts_mtrace_crr_free(atype, SEGTYPE, seg); - erts_mtrace_crr_alloc(seg, SEGTYPE, SEGTYPE, size); - } - - /* ASSERT(segments.current.watermark >= segments.current.no + cache_size); */ - if (check_limits) - check_cache_limits(mk); + if (erts_mtrace_enabled) + erts_mtrace_crr_free(atype, SEGTYPE, seg); - schedule_cache_check(ma); + mseg_destroy(ma, mk, seg, size); - } +done: INC_CC(ma, dealloc); } static void * mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, - Uint old_size, Uint *new_size_p, const ErtsMsegOpt_t *opt) + Uint old_size, Uint *new_size_p, Uint flags, const ErtsMsegOpt_t *opt) { MemKind* mk; void *new_seg; Uint new_size; if (!seg || !old_size) { - new_seg = mseg_alloc(ma, atype, new_size_p, opt); + new_seg = mseg_alloc(ma, atype, new_size_p, flags, opt); DEC_CC(ma, alloc); return new_seg; } @@ -869,8 +835,14 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, mk = memkind(ma, opt); new_seg = seg; + + /* Carrier align */ new_size = ALIGNED_CEILING(*new_size_p); + /* Cache optim (if applicable) */ + if (MSEG_FLG_IS_2POW(flags) && !IS_2POW(new_size)) + new_size = ceil_2pow(new_size); + if (new_size == old_size) ; else if (new_size < old_size) { @@ -880,53 +852,25 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, if (new_size < ma->min_seg_size) ma->min_seg_size = new_size; #endif - + /* +Mrsbcst */ if (shrink_sz < opt->abs_shrink_th - && 100*PAGES(shrink_sz) < opt->rel_shrink_th*PAGES(old_size)) { + && 100*shrink_sz < opt->rel_shrink_th*old_size) { new_size = old_size; } else { #if CAN_PARTLY_DESTROY - if (shrink_sz > ma->min_seg_size - && mk->free_cache_descs - && opt->cache) { - cache_desc_t *cd; - - cd = alloc_cd(mk); - ASSERT(cd); - cd->seg = ((char *) seg) + new_size; - cd->size = shrink_sz; - end_link_cd(mk,cd); - - if (erts_mtrace_enabled) { - erts_mtrace_crr_realloc(new_seg, - atype, - SEGTYPE, - seg, - new_size); - erts_mtrace_crr_alloc(cd->seg, SEGTYPE, SEGTYPE, cd->size); - } - schedule_cache_check(ma); - } - else { - if (erts_mtrace_enabled) - erts_mtrace_crr_realloc(new_seg, - atype, - SEGTYPE, - seg, - new_size); - mseg_destroy(ma, mk, ((char *) seg) + new_size, shrink_sz); - } + if (erts_mtrace_enabled) + erts_mtrace_crr_realloc(new_seg, atype, SEGTYPE, seg, new_size); -#elif HAVE_MSEG_RECREATE + mseg_destroy(ma, mk, ((char *) seg) + new_size, shrink_sz); +#elif HAVE_MSEG_RECREATE goto do_recreate; - #else - new_seg = mseg_alloc(ma, atype, &new_size, opt); + new_seg = mseg_alloc(ma, atype, &new_size, flags, opt); if (!new_seg) new_size = old_size; else { @@ -944,7 +888,7 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, if (!opt->preserv) { mseg_dealloc(ma, atype, seg, old_size, opt); - new_seg = mseg_alloc(ma, atype, &new_size, opt); + new_seg = mseg_alloc(ma, atype, &new_size, flags, opt); } else { #if HAVE_MSEG_RECREATE @@ -957,13 +901,11 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, if (!new_seg) new_size = old_size; #else - new_seg = mseg_alloc(ma, atype, &new_size, opt); + new_seg = mseg_alloc(ma, atype, &new_size, flags, opt); if (!new_seg) new_size = old_size; else { - sys_memcpy(((char *) new_seg), - ((char *) seg), - MIN(new_size, old_size)); + sys_memcpy(((char *) new_seg), ((char *) seg), MIN(new_size, old_size)); mseg_dealloc(ma, atype, seg, old_size, opt); } #endif @@ -972,6 +914,7 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, INC_CC(ma, realloc); + ASSERT(!MSEG_FLG_IS_2POW(flags) || IS_2POW(new_size)); *new_size_p = new_size; ERTS_MSEG_REALLOC_STAT(mk, old_size, new_size); @@ -1004,6 +947,7 @@ static struct { Eterm mseg_dealloc; Eterm mseg_realloc; Eterm mseg_create; + Eterm mseg_create_resize; Eterm mseg_destroy; #if HAVE_MSEG_RECREATE Eterm mseg_recreate; @@ -1060,6 +1004,7 @@ init_atoms(ErtsMsegAllctr_t *ma) AM_INIT(mseg_dealloc); AM_INIT(mseg_realloc); AM_INIT(mseg_create); + AM_INIT(mseg_create_resize); AM_INIT(mseg_destroy); #if HAVE_MSEG_RECREATE AM_INIT(mseg_recreate); @@ -1079,14 +1024,12 @@ init_atoms(ErtsMsegAllctr_t *ma) erts_mtx_unlock(&init_atoms_mutex); } - #define bld_uint erts_bld_uint #define bld_cons erts_bld_cons #define bld_tuple erts_bld_tuple #define bld_string erts_bld_string #define bld_2tup_list erts_bld_2tup_list - /* * bld_unstable_uint() (instead of bld_uint()) is used when values may * change between size check and actual build. This because a value @@ -1130,6 +1073,7 @@ add_4tup(Uint **hpp, Uint *szp, Eterm *lp, *lp = bld_cons(hpp, szp, bld_tuple(hpp, szp, 4, el1, el2, el3, el4), *lp); } + static Eterm info_options(ErtsMsegAllctr_t *ma, char *prefix, @@ -1190,6 +1134,7 @@ info_calls(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp PRINT_CC(to, arg, dealloc); PRINT_CC(to, arg, realloc); PRINT_CC(to, arg, create); + PRINT_CC(to, arg, create_resize); PRINT_CC(to, arg, destroy); #if HAVE_MSEG_RECREATE PRINT_CC(to, arg, recreate); @@ -1229,6 +1174,10 @@ info_calls(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp bld_unstable_uint(hpp, szp, ma->calls.create.giga_no), bld_unstable_uint(hpp, szp, ma->calls.create.no)); + add_3tup(hpp, szp, &res, + am.mseg_create_resize, + bld_unstable_uint(hpp, szp, ma->calls.create_resize.giga_no), + bld_unstable_uint(hpp, szp, ma->calls.create_resize.no)); add_3tup(hpp, szp, &res, am.mseg_realloc, @@ -1415,21 +1364,21 @@ erts_mseg_info(int ix, } void * -erts_mseg_alloc_opt(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt) +erts_mseg_alloc_opt(ErtsAlcType_t atype, Uint *size_p, Uint flags, const ErtsMsegOpt_t *opt) { ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt); void *seg; ERTS_MSEG_LOCK(ma); ERTS_DBG_MA_CHK_THR_ACCESS(ma); - seg = mseg_alloc(ma, atype, size_p, opt); + seg = mseg_alloc(ma, atype, size_p, flags, opt); ERTS_MSEG_UNLOCK(ma); return seg; } void * -erts_mseg_alloc(ErtsAlcType_t atype, Uint *size_p) +erts_mseg_alloc(ErtsAlcType_t atype, Uint *size_p, Uint flags) { - return erts_mseg_alloc_opt(atype, size_p, &erts_mseg_default_opt); + return erts_mseg_alloc_opt(atype, size_p, flags, &erts_mseg_default_opt); } void @@ -1452,44 +1401,24 @@ erts_mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size) void * erts_mseg_realloc_opt(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p, + Uint flags, const ErtsMsegOpt_t *opt) { ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt); void *new_seg; ERTS_MSEG_LOCK(ma); ERTS_DBG_MA_CHK_THR_ACCESS(ma); - new_seg = mseg_realloc(ma, atype, seg, old_size, new_size_p, opt); + new_seg = mseg_realloc(ma, atype, seg, old_size, new_size_p, flags, opt); ERTS_MSEG_UNLOCK(ma); return new_seg; } void * erts_mseg_realloc(ErtsAlcType_t atype, void *seg, - Uint old_size, Uint *new_size_p) + Uint old_size, Uint *new_size_p, Uint flags) { return erts_mseg_realloc_opt(atype, seg, old_size, new_size_p, - &erts_mseg_default_opt); -} - -void -erts_mseg_clear_cache(void) -{ - ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_SS(); - MemKind* mk; - -start: - - ERTS_MSEG_LOCK(ma); - ERTS_DBG_MA_CHK_THR_ACCESS(ma); - for (mk=ma->mk_list; mk; mk=mk->next) { - mseg_clear_cache(mk); - } - ERTS_MSEG_UNLOCK(ma); - - if (ma->ix != 0) { - ma = ERTS_MSEG_ALLCTR_IX(0); - goto start; - } + flags, &erts_mseg_default_opt); } Uint @@ -1515,23 +1444,23 @@ erts_mseg_unit_size(void) static void mem_kind_init(ErtsMsegAllctr_t *ma, MemKind* mk, const char* name) { - unsigned i; + int i; - mk->cache = NULL; - mk->cache_end = NULL; - mk->max_cached_seg_size = 0; - mk->min_cached_seg_size = ~((Uint) 0); - mk->cache_size = 0; - mk->cache_hits = 0; + for (i = 0; i < CACHE_AREAS; i++) { + mk->cache_area[i] = NULL; + } - if (ma->max_cache_size > 0) { - for (i = 0; i < ma->max_cache_size - 1; i++) - mk->cache_descs[i].next = &mk->cache_descs[i + 1]; - mk->cache_descs[ma->max_cache_size - 1].next = NULL; - mk->free_cache_descs = &mk->cache_descs[0]; + mk->cache_free = NULL; + + for (i = 0; i < MAX_CACHE_SIZE; i++) { + mk->cache[i].seg = NULL; + mk->cache[i].size = 0; + mk->cache[i].next = mk->cache_free; + mk->cache_free = &(mk->cache[i]); } - else - mk->free_cache_descs = NULL; + + mk->cache_size = 0; + mk->cache_hits = 0; mk->segments.current.watermark = 0; mk->segments.current.no = 0; @@ -1584,17 +1513,10 @@ erts_mseg_init(ErtsMsegInit_t *init) initialize_pmmap(); #endif - page_size = GET_PAGE_SIZE; - ASSERT((ALIGNED_SIZE % page_size) == 0); + if (!IS_2POW(GET_PAGE_SIZE)) + erl_exit(ERTS_ABORT_EXIT, "erts_mseg: Unexpected page_size %beu\n", GET_PAGE_SIZE); - page_shift = 1; - /* page size alignment assertion */ - while ((page_size >> page_shift) != 1) { - if ((page_size & (1 << (page_shift - 1))) != 0) - erl_exit(ERTS_ABORT_EXIT, - "erts_mseg: Unexpected page_size %beu\n", page_size); - page_shift++; - } + ASSERT((ALIGNED_SIZE % GET_PAGE_SIZE) == 0); for (i = 0; i < no_mseg_allocators; i++) { ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_IX(i); @@ -1679,7 +1601,7 @@ erts_mseg_test(unsigned long op, case 0x400: /* Have erts_mseg */ return (unsigned long) 1; case 0x401: - return (unsigned long) erts_mseg_alloc(ERTS_ALC_A_INVALID, (Uint *) a1); + return (unsigned long) erts_mseg_alloc(ERTS_ALC_A_INVALID, (Uint *) a1, (Uint) 0); case 0x402: erts_mseg_dealloc(ERTS_ALC_A_INVALID, (void *) a1, (Uint) a2); return (unsigned long) 0; @@ -1687,7 +1609,8 @@ erts_mseg_test(unsigned long op, return (unsigned long) erts_mseg_realloc(ERTS_ALC_A_INVALID, (void *) a1, (Uint) a2, - (Uint *) a3); + (Uint *) a3, + (Uint) 0); case 0x404: erts_mseg_clear_cache(); return (unsigned long) 0; @@ -1876,7 +1799,7 @@ static int initialize_pmmap(void) rsz = ALIGNED_FLOOR(q - p); - rptr = mmap_align((void *) p, rsz, + rptr = mmap_align(NULL, (void *) p, rsz, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | EXTRA_MAP_FLAGS, -1 , 0); diff --git a/erts/emulator/sys/common/erl_mseg.h b/erts/emulator/sys/common/erl_mseg.h index 741080fb78..644887462a 100644 --- a/erts/emulator/sys/common/erl_mseg.h +++ b/erts/emulator/sys/common/erl_mseg.h @@ -38,6 +38,11 @@ #if HAVE_ERTS_MSEG + +#define ERTS_MSEG_FLG_NONE ((Uint)(0)) +#define ERTS_MSEG_FLG_2POW ((Uint)(1 << 0)) + + #define ERTS_MSEG_VSN_STR "0.9" typedef struct { @@ -68,13 +73,13 @@ typedef struct { extern const ErtsMsegOpt_t erts_mseg_default_opt; -void *erts_mseg_alloc(ErtsAlcType_t, Uint *); -void *erts_mseg_alloc_opt(ErtsAlcType_t, Uint *, const ErtsMsegOpt_t *); +void *erts_mseg_alloc(ErtsAlcType_t, Uint *, Uint); +void *erts_mseg_alloc_opt(ErtsAlcType_t, Uint *, Uint, const ErtsMsegOpt_t *); void erts_mseg_dealloc(ErtsAlcType_t, void *, Uint); void erts_mseg_dealloc_opt(ErtsAlcType_t, void *, Uint, const ErtsMsegOpt_t *); -void *erts_mseg_realloc(ErtsAlcType_t, void *, Uint, Uint *); +void *erts_mseg_realloc(ErtsAlcType_t, void *, Uint, Uint *, Uint); void *erts_mseg_realloc_opt(ErtsAlcType_t, void *, Uint, Uint *, - const ErtsMsegOpt_t *); + Uint, const ErtsMsegOpt_t *); void erts_mseg_clear_cache(void); void erts_mseg_cache_check(void); Uint erts_mseg_no( const ErtsMsegOpt_t *); -- cgit v1.2.3 From de03018e74bea97795895f1611abf2a50a484449 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Tue, 6 Nov 2012 22:42:47 +0100 Subject: erts: Add carrier offset to internal allocation headers --- erts/emulator/sys/common/erl_mseg.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'erts/emulator/sys/common') diff --git a/erts/emulator/sys/common/erl_mseg.h b/erts/emulator/sys/common/erl_mseg.h index 644887462a..460e4a514a 100644 --- a/erts/emulator/sys/common/erl_mseg.h +++ b/erts/emulator/sys/common/erl_mseg.h @@ -32,8 +32,10 @@ #if HAVE_MMAP # define HAVE_ERTS_MSEG 1 +# define HAVE_SUPER_ALIGNED_MB_CARRIERS 1 #else # define HAVE_ERTS_MSEG 0 +# define HAVE_SUPER_ALIGNED_MB_CARRIERS 0 #endif #if HAVE_ERTS_MSEG -- cgit v1.2.3 From 4ca2d66fb51e1f3bb85d420c339fb73c5fb6bc62 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Thu, 8 Nov 2012 11:24:40 +0100 Subject: erts: Move carrier alignment define to erl_msg.h --- erts/emulator/sys/common/erl_mseg.c | 27 +++++++++++++-------------- erts/emulator/sys/common/erl_mseg.h | 3 +++ 2 files changed, 16 insertions(+), 14 deletions(-) (limited to 'erts/emulator/sys/common') diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c index 58ee6600d2..ae6a377abf 100644 --- a/erts/emulator/sys/common/erl_mseg.c +++ b/erts/emulator/sys/common/erl_mseg.c @@ -57,6 +57,19 @@ /* Implement some other way to get the real page size if needed! */ #endif +#undef MIN +#define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) +#undef MAX +#define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) + +#define ALIGN_BITS MSEG_ALIGN_BITS +#define ALIGNED_SIZE MSEG_ALIGNED_SIZE +#define INV_ALIGNED_MASK ((UWord) ((ALIGNED_SIZE) - 1)) +#define ALIGNED_MASK (~INV_ALIGNED_MASK) +#define ALIGNED_FLOOR(X) (((UWord)(X)) & ALIGNED_MASK) +#define ALIGNED_CEILING(X) ALIGNED_FLOOR((X) + INV_ALIGNED_MASK) +#define MAP_IS_ALIGNED(X) (((UWord)(X) & (ALIGNED_SIZE - 1)) == 0) + #define IS_2POW(X) (((X) & ((X) - 1)) == 0) static ERTS_INLINE Uint ceil_2pow(Uint x) { int i = 1 << (4 + (sizeof(Uint) != 4 ? 1 : 0)); @@ -71,25 +84,11 @@ static const int debruijn[32] = { #define LOG2(X) (debruijn[((Uint32)(((X) & -(X)) * 0x077CB531U)) >> 27]) -#define ALIGN_BITS (17) -#define ALIGNED_SIZE (1 << ALIGN_BITS) /* 128kB */ - #define CACHE_AREAS (32 - ALIGN_BITS) #define SIZE_TO_CACHE_AREA_IDX(S) (LOG2((S)) - ALIGN_BITS) #define MAX_CACHE_SIZE (30) -#undef MIN -#define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) -#undef MAX -#define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) - -#define INV_ALIGNED_MASK ((UWord) ((ALIGNED_SIZE) - 1)) -#define ALIGNED_MASK (~INV_ALIGNED_MASK) -#define ALIGNED_FLOOR(X) (((UWord)(X)) & ALIGNED_MASK) -#define ALIGNED_CEILING(X) ALIGNED_FLOOR((X) + INV_ALIGNED_MASK) -#define MAP_IS_ALIGNED(X) (((UWord)(X) & (ALIGNED_SIZE - 1)) == 0) - #define MSEG_FLG_IS_2POW(X) ((X) & ERTS_MSEG_FLG_2POW) #ifdef DEBUG diff --git a/erts/emulator/sys/common/erl_mseg.h b/erts/emulator/sys/common/erl_mseg.h index 460e4a514a..91e335b225 100644 --- a/erts/emulator/sys/common/erl_mseg.h +++ b/erts/emulator/sys/common/erl_mseg.h @@ -33,13 +33,16 @@ #if HAVE_MMAP # define HAVE_ERTS_MSEG 1 # define HAVE_SUPER_ALIGNED_MB_CARRIERS 1 +# define MSEG_ALIGN_BITS (17) /*SVERK Configure me! */ #else # define HAVE_ERTS_MSEG 0 # define HAVE_SUPER_ALIGNED_MB_CARRIERS 0 +# define MSEG_ALIGN_BITS (12) /*SVERK Configure me! */ #endif #if HAVE_ERTS_MSEG +#define MSEG_ALIGNED_SIZE (1 << MSEG_ALIGN_BITS) #define ERTS_MSEG_FLG_NONE ((Uint)(0)) #define ERTS_MSEG_FLG_2POW ((Uint)(1 << 0)) -- cgit v1.2.3 From 1ff4fffe893346160e5136a3e4a1999e8927b5ec Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Wed, 14 Nov 2012 19:25:09 +0100 Subject: erts: Add carrier pointer to header of free block --- erts/emulator/sys/common/erl_mseg.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'erts/emulator/sys/common') diff --git a/erts/emulator/sys/common/erl_mseg.h b/erts/emulator/sys/common/erl_mseg.h index 91e335b225..f260c6506f 100644 --- a/erts/emulator/sys/common/erl_mseg.h +++ b/erts/emulator/sys/common/erl_mseg.h @@ -33,10 +33,14 @@ #if HAVE_MMAP # define HAVE_ERTS_MSEG 1 # define HAVE_SUPER_ALIGNED_MB_CARRIERS 1 -# define MSEG_ALIGN_BITS (17) /*SVERK Configure me! */ #else # define HAVE_ERTS_MSEG 0 # define HAVE_SUPER_ALIGNED_MB_CARRIERS 0 +#endif + +#if HAVE_SUPER_ALIGNED_MB_CARRIERS +# define MSEG_ALIGN_BITS (17) /*SVERK Configure me! */ +#else # define MSEG_ALIGN_BITS (12) /*SVERK Configure me! */ #endif -- cgit v1.2.3 From 87004cf5c70a528bb34fee9c629257c494a3f61a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn-Egil=20Dahlberg?= Date: Wed, 14 Nov 2012 15:44:36 +0100 Subject: erts: Use MSEG_ALIGN_BITS and MSEG_ALIGNED_SIZE * Don't redfine ALIGN_BITS or ALIGNED_SIZE, the global defined MSEG_* constants are just as good. --- erts/emulator/sys/common/erl_mseg.c | 78 ++++++++++++++++++------------------- 1 file changed, 38 insertions(+), 40 deletions(-) (limited to 'erts/emulator/sys/common') diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c index ae6a377abf..5fff8e5ed7 100644 --- a/erts/emulator/sys/common/erl_mseg.c +++ b/erts/emulator/sys/common/erl_mseg.c @@ -62,13 +62,11 @@ #undef MAX #define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) -#define ALIGN_BITS MSEG_ALIGN_BITS -#define ALIGNED_SIZE MSEG_ALIGNED_SIZE -#define INV_ALIGNED_MASK ((UWord) ((ALIGNED_SIZE) - 1)) +#define INV_ALIGNED_MASK ((UWord) ((MSEG_ALIGNED_SIZE) - 1)) #define ALIGNED_MASK (~INV_ALIGNED_MASK) #define ALIGNED_FLOOR(X) (((UWord)(X)) & ALIGNED_MASK) #define ALIGNED_CEILING(X) ALIGNED_FLOOR((X) + INV_ALIGNED_MASK) -#define MAP_IS_ALIGNED(X) (((UWord)(X) & (ALIGNED_SIZE - 1)) == 0) +#define MAP_IS_ALIGNED(X) (((UWord)(X) & (MSEG_ALIGNED_SIZE - 1)) == 0) #define IS_2POW(X) (((X) & ((X) - 1)) == 0) static ERTS_INLINE Uint ceil_2pow(Uint x) { @@ -84,9 +82,9 @@ static const int debruijn[32] = { #define LOG2(X) (debruijn[((Uint32)(((X) & -(X)) * 0x077CB531U)) >> 27]) -#define CACHE_AREAS (32 - ALIGN_BITS) +#define CACHE_AREAS (32 - MSEG_ALIGN_BITS) -#define SIZE_TO_CACHE_AREA_IDX(S) (LOG2((S)) - ALIGN_BITS) +#define SIZE_TO_CACHE_AREA_IDX(S) (LOG2((S)) - MSEG_ALIGN_BITS) #define MAX_CACHE_SIZE (30) #define MSEG_FLG_IS_2POW(X) ((X) & ERTS_MSEG_FLG_2POW) @@ -363,20 +361,20 @@ mmap_align(ErtsMsegAllctr_t *ma, void *addr, size_t length, int prot, int flags, munmap(seg, length); - if ((seg = mmap(addr, length + ALIGNED_SIZE, prot, flags, fd, offset)) == MAP_FAILED) { + if ((seg = mmap(addr, length + MSEG_ALIGNED_SIZE, prot, flags, fd, offset)) == MAP_FAILED) { return seg; } /* ceil to aligned pointer */ - aseg = (void *)(((unsigned long)(seg + ALIGNED_SIZE)) & (~(ALIGNED_SIZE - 1))); + aseg = (void *)(((unsigned long)(seg + MSEG_ALIGNED_SIZE)) & (~(MSEG_ALIGNED_SIZE - 1))); diff = aseg - seg; if (diff > 0) { munmap(seg, diff); } - if (ALIGNED_SIZE - diff > 0) { - munmap((void *) (aseg + length), ALIGNED_SIZE - diff); + if (MSEG_ALIGNED_SIZE - diff > 0) { + munmap((void *) (aseg + length), MSEG_ALIGNED_SIZE - diff); } return aseg; @@ -386,7 +384,7 @@ static ERTS_INLINE void * mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size) { void *seg; - ASSERT(size % ALIGNED_SIZE == 0); + ASSERT(size % MSEG_ALIGNED_SIZE == 0); #if HALFWORD_HEAP if (mk == &ma->low_mem) { @@ -434,7 +432,7 @@ mseg_destroy(ErtsMsegAllctr_t *ma, MemKind* mk, void *seg, Uint size) { #endif } - ASSERT(size % ALIGNED_SIZE == 0); + ASSERT(size % MSEG_ALIGNED_SIZE == 0); ASSERT(res == 0); INC_CC(ma, destroy); @@ -448,8 +446,8 @@ mseg_recreate(ErtsMsegAllctr_t *ma, MemKind* mk, void *old_seg, Uint old_size, U { void *new_seg; - ASSERT(old_size % ALIGNED_SIZE == 0); - ASSERT(new_size % ALIGNED_SIZE == 0); + ASSERT(old_size % MSEG_ALIGNED_SIZE == 0); + ASSERT(new_size % MSEG_ALIGNED_SIZE == 0); #if HALFWORD_HEAP if (mk == &ma->low_mem) { @@ -537,7 +535,7 @@ static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, Uint size) { cache_t *c; if (ix < CACHE_AREAS && mk->cache_free) { - ASSERT( (1 << (ix + ALIGN_BITS)) == size); + ASSERT( (1 << (ix + MSEG_ALIGN_BITS)) == size); /* unlink from free cache list */ c = mseg_cache_alloc_descriptor(mk); @@ -1438,7 +1436,7 @@ erts_mseg_no(const ErtsMsegOpt_t *opt) Uint erts_mseg_unit_size(void) { - return ALIGNED_SIZE; + return MSEG_ALIGNED_SIZE; } static void mem_kind_init(ErtsMsegAllctr_t *ma, MemKind* mk, const char* name) @@ -1515,7 +1513,7 @@ erts_mseg_init(ErtsMsegInit_t *init) if (!IS_2POW(GET_PAGE_SIZE)) erl_exit(ERTS_ABORT_EXIT, "erts_mseg: Unexpected page_size %beu\n", GET_PAGE_SIZE); - ASSERT((ALIGNED_SIZE % GET_PAGE_SIZE) == 0); + ASSERT((MSEG_ALIGNED_SIZE % GET_PAGE_SIZE) == 0); for (i = 0; i < no_mseg_allocators; i++) { ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_IX(i); @@ -1709,7 +1707,7 @@ static void *do_map(void *ptr, size_t sz) return NULL; } - if (((unsigned long) ptr) % ALIGNED_SIZE) { + if (((unsigned long) ptr) % MSEG_ALIGNED_SIZE) { HARDDEBUG_HW_UNALIGNED_ALIGNMENT(ptr, sz); return NULL; } @@ -1740,7 +1738,7 @@ static int do_unmap(void *ptr, size_t sz) return 1; } - if (((unsigned long) ptr) % ALIGNED_SIZE) { + if (((unsigned long) ptr) % MSEG_ALIGNED_SIZE) { HARDDEBUG_HW_UNALIGNED_ALIGNMENT(ptr, sz); return 1; } @@ -1804,7 +1802,7 @@ static int initialize_pmmap(void) -1 , 0); #ifdef HARDDEBUG printf("p=%p, rsz = %ld, pages = %ld, got range = %p -> %p\r\n", - p, (unsigned long) rsz, (unsigned long) (rsz / ALIGNED_SIZE), + p, (unsigned long) rsz, (unsigned long) (rsz / MSEG_ALIGNED_SIZE), (void *) rptr, (void*)(rptr + rsz)); #endif if ((UWord)(rptr + rsz) > RANGE_MAX) { @@ -1816,11 +1814,11 @@ static int initialize_pmmap(void) munmap((void*)RANGE_MAX, rsz - rsz_trunc); rsz = rsz_trunc; } - if (!do_map(rptr, ALIGNED_SIZE)) { + if (!do_map(rptr, MSEG_ALIGNED_SIZE)) { erl_exit(1,"Could not actually mmap first page for halfword emulator...\n"); } initial = (FreeBlock *) rptr; - initial->num = (rsz / ALIGNED_SIZE); + initial->num = (rsz / MSEG_ALIGNED_SIZE); initial->next = NULL; first = initial; INIT_LOCK(); @@ -1830,7 +1828,7 @@ static int initialize_pmmap(void) static void *pmmap(size_t size) { size_t real_size = ALIGNED_CEILING(size); - size_t num_pages = real_size / ALIGNED_SIZE; + size_t num_pages = real_size / MSEG_ALIGNED_SIZE; FreeBlock **block; FreeBlock *tail; FreeBlock *res; @@ -1851,8 +1849,8 @@ static void *pmmap(size_t size) *block = (*block)->next; } else { tail = (FreeBlock *) (((char *) ((void *) (*block))) + real_size); - if (!do_map(tail, ALIGNED_SIZE)) { - HARDDEBUG_MAP_FAILED(tail, ALIGNED_SIZE); + if (!do_map(tail, MSEG_ALIGNED_SIZE)) { + HARDDEBUG_MAP_FAILED(tail, MSEG_ALIGNED_SIZE); RELEASE_LOCK(); return NULL; } @@ -1875,7 +1873,7 @@ static void *pmmap(size_t size) static int pmunmap(void *p, size_t size) { size_t real_size = ALIGNED_CEILING(size); - size_t num_pages = real_size / ALIGNED_SIZE; + size_t num_pages = real_size / MSEG_ALIGNED_SIZE; FreeBlock *block; FreeBlock *last; @@ -1883,8 +1881,8 @@ static int pmunmap(void *p, size_t size) ASSERT(((unsigned long)p & CHECK_POINTER_MASK)==0); - if (real_size > ALIGNED_SIZE) { - if (do_unmap(((char *) p) + ALIGNED_SIZE, real_size - ALIGNED_SIZE)) { + if (real_size > MSEG_ALIGNED_SIZE) { + if (do_unmap(((char *) p) + MSEG_ALIGNED_SIZE, real_size - MSEG_ALIGNED_SIZE)) { return 1; } } @@ -1903,7 +1901,7 @@ static int pmunmap(void *p, size_t size) /* Merge new free block with following */ nb->num = block->num + num_pages; nb->next = block->next; - if (do_unmap(block, ALIGNED_SIZE)) { + if (do_unmap(block, MSEG_ALIGNED_SIZE)) { RELEASE_LOCK(); return 1; } @@ -1913,11 +1911,11 @@ static int pmunmap(void *p, size_t size) nb->next = block; } if (last != NULL) { - if (p == ((void *) (((char *) last) + (last->num * ALIGNED_SIZE)))) { + if (p == ((void *) (((char *) last) + (last->num * MSEG_ALIGNED_SIZE)))) { /* Merge with previous */ last->num += nb->num; last->next = nb->next; - if (do_unmap(nb, ALIGNED_SIZE)) { + if (do_unmap(nb, MSEG_ALIGNED_SIZE)) { RELEASE_LOCK(); return 1; } @@ -1935,9 +1933,9 @@ static void *pmremap(void *old_address, size_t old_size, size_t new_size) { size_t new_real_size = ALIGNED_CEILING(new_size); - size_t new_num_pages = new_real_size / ALIGNED_SIZE; + size_t new_num_pages = new_real_size / MSEG_ALIGNED_SIZE; size_t old_real_size = ALIGNED_CEILING(old_size); - size_t old_num_pages = old_real_size / ALIGNED_SIZE; + size_t old_num_pages = old_real_size / MSEG_ALIGNED_SIZE; if (new_num_pages == old_num_pages) { return old_address; } else if (new_num_pages < old_num_pages) { /* Shrink */ @@ -1955,8 +1953,8 @@ static void *pmremap(void *old_address, size_t old_size, (*block) > ((FreeBlock *)(((char *) vnfb) + nfb_real_size))) { /* Normal link in */ if (nfb_pages > 1) { - if (do_unmap((void *)(((char *) vnfb) + ALIGNED_SIZE), - (nfb_pages - 1)*ALIGNED_SIZE)) { + if (do_unmap((void *)(((char *) vnfb) + MSEG_ALIGNED_SIZE), + (nfb_pages - 1)*MSEG_ALIGNED_SIZE)) { return NULL; } } @@ -1968,8 +1966,8 @@ static void *pmremap(void *old_address, size_t old_size, nfb->num = nfb_pages + (*block)->num; /* unmap also the first page of the next freeblock */ (*block) = nfb; - if (do_unmap((void *)(((char *) vnfb) + ALIGNED_SIZE), - nfb_pages*ALIGNED_SIZE)) { + if (do_unmap((void *)(((char *) vnfb) + MSEG_ALIGNED_SIZE), + nfb_pages*MSEG_ALIGNED_SIZE)) { return NULL; } } @@ -2004,9 +2002,9 @@ static void *pmremap(void *old_address, size_t old_size, size_t remaining_pages = (*block)->num - (new_num_pages - old_num_pages); if (!remaining_pages) { - void *p = (void *) (((char *) (*block)) + ALIGNED_SIZE); + void *p = (void *) (((char *) (*block)) + MSEG_ALIGNED_SIZE); void *n = (*block)->next; - size_t x = ((*block)->num - 1) * ALIGNED_SIZE; + size_t x = ((*block)->num - 1) * MSEG_ALIGNED_SIZE; if (x > 0) { if (do_map(p,x) == NULL) { RELEASE_LOCK(); @@ -2018,7 +2016,7 @@ static void *pmremap(void *old_address, size_t old_size, FreeBlock *nfb = (FreeBlock *) ((void *) (((char *) old_address) + new_real_size)); - void *p = (void *) (((char *) (*block)) + ALIGNED_SIZE); + void *p = (void *) (((char *) (*block)) + MSEG_ALIGNED_SIZE); if (do_map(p,new_real_size - old_real_size) == NULL) { RELEASE_LOCK(); return NULL; -- cgit v1.2.3 From c519fd567d661d7d75a678455b2e9651ba3b421d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn-Egil=20Dahlberg?= Date: Wed, 14 Nov 2012 15:47:37 +0100 Subject: erts: Don't let zero be considered a power of two --- erts/emulator/sys/common/erl_mseg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'erts/emulator/sys/common') diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c index 5fff8e5ed7..84830d91ec 100644 --- a/erts/emulator/sys/common/erl_mseg.c +++ b/erts/emulator/sys/common/erl_mseg.c @@ -68,7 +68,7 @@ #define ALIGNED_CEILING(X) ALIGNED_FLOOR((X) + INV_ALIGNED_MASK) #define MAP_IS_ALIGNED(X) (((UWord)(X) & (MSEG_ALIGNED_SIZE - 1)) == 0) -#define IS_2POW(X) (((X) & ((X) - 1)) == 0) +#define IS_2POW(X) ((X) && !((X) & ((X) - 1))) static ERTS_INLINE Uint ceil_2pow(Uint x) { int i = 1 << (4 + (sizeof(Uint) != 4 ? 1 : 0)); x--; -- cgit v1.2.3 From 9d487e5c1385c074f50388a777e808a40cebaf8b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn-Egil=20Dahlberg?= Date: Wed, 14 Nov 2012 17:08:17 +0100 Subject: erts: Use aligned bits as constant in mseg_alloc HAVE_SUPER_ALIGNED_MB_CARRIERS is always true with mmap and thus aligned bits is a constant and so is "page" size for mmap. Conflicts: erts/emulator/sys/common/erl_mseg.h --- erts/emulator/sys/common/erl_mseg.h | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'erts/emulator/sys/common') diff --git a/erts/emulator/sys/common/erl_mseg.h b/erts/emulator/sys/common/erl_mseg.h index f260c6506f..8f8cb0e121 100644 --- a/erts/emulator/sys/common/erl_mseg.h +++ b/erts/emulator/sys/common/erl_mseg.h @@ -39,9 +39,17 @@ #endif #if HAVE_SUPER_ALIGNED_MB_CARRIERS -# define MSEG_ALIGN_BITS (17) /*SVERK Configure me! */ +# define MSEG_ALIGN_BITS (17) #else -# define MSEG_ALIGN_BITS (12) /*SVERK Configure me! */ +/* If we don't use super aligned multiblock carriers + * we will mmap with page size alignment (and thus use corresponding + * align bits). + * + * Current implementation needs this to be a constant and + * only uses this for user dev testing so setting page size + * to 4096 (12 bits) is fine. + */ +# define MSEG_ALIGN_BITS (12) #endif #if HAVE_ERTS_MSEG -- cgit v1.2.3 From 38930473052af252b8f527200f4db9ba29f435ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn-Egil=20Dahlberg?= Date: Wed, 14 Nov 2012 18:15:08 +0100 Subject: erts: Remove unused mseg options amcbf and rmcbf --- erts/emulator/sys/common/erl_mseg.c | 20 +------------------- erts/emulator/sys/common/erl_mseg.h | 6 +----- 2 files changed, 2 insertions(+), 24 deletions(-) (limited to 'erts/emulator/sys/common') diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c index 84830d91ec..2ff88ee435 100644 --- a/erts/emulator/sys/common/erl_mseg.c +++ b/erts/emulator/sys/common/erl_mseg.c @@ -229,9 +229,6 @@ struct ErtsMsegAllctr_t_ { #endif Uint max_cache_size; - Uint abs_max_cache_bad_fit; - Uint rel_max_cache_bad_fit; - ErtsMsegCalls calls; #if CAN_PARTLY_DESTROY @@ -530,7 +527,7 @@ static ERTS_INLINE void mseg_cache_free_descriptor(MemKind *mk, cache_t *c) { static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, Uint size) { ERTS_DBG_MK_CHK_THR_ACCESS(mk); - if IS_2POW(size) { + if (IS_2POW(size)) { int ix = SIZE_TO_CACHE_AREA_IDX(size); cache_t *c; @@ -925,8 +922,6 @@ static struct { Eterm version; Eterm options; - Eterm amcbf; - Eterm rmcbf; Eterm mcs; Eterm memkind; @@ -985,8 +980,6 @@ init_atoms(ErtsMsegAllctr_t *ma) AM_INIT(name); AM_INIT(options); - AM_INIT(amcbf); - AM_INIT(rmcbf); AM_INIT(mcs); AM_INIT(status); @@ -1084,8 +1077,6 @@ info_options(ErtsMsegAllctr_t *ma, if (print_to_p) { int to = *print_to_p; void *arg = print_to_arg; - erts_print(to, arg, "%samcbf: %beu\n", prefix, ma->abs_max_cache_bad_fit); - erts_print(to, arg, "%srmcbf: %beu\n", prefix, ma->rel_max_cache_bad_fit); erts_print(to, arg, "%smcs: %beu\n", prefix, ma->max_cache_size); } @@ -1098,13 +1089,6 @@ info_options(ErtsMsegAllctr_t *ma, add_2tup(hpp, szp, &res, am.mcs, bld_uint(hpp, szp, ma->max_cache_size)); - add_2tup(hpp, szp, &res, - am.rmcbf, - bld_uint(hpp, szp, ma->rel_max_cache_bad_fit)); - add_2tup(hpp, szp, &res, - am.amcbf, - bld_uint(hpp, szp, ma->abs_max_cache_bad_fit)); - } return res; @@ -1533,8 +1517,6 @@ erts_mseg_init(ErtsMsegInit_t *init) /* Options ... */ - ma->abs_max_cache_bad_fit = init->amcbf; - ma->rel_max_cache_bad_fit = init->rmcbf; ma->max_cache_size = init->mcs; if (ma->max_cache_size > MAX_CACHE_SIZE) diff --git a/erts/emulator/sys/common/erl_mseg.h b/erts/emulator/sys/common/erl_mseg.h index 8f8cb0e121..80a6c42741 100644 --- a/erts/emulator/sys/common/erl_mseg.h +++ b/erts/emulator/sys/common/erl_mseg.h @@ -60,19 +60,15 @@ #define ERTS_MSEG_FLG_2POW ((Uint)(1 << 0)) -#define ERTS_MSEG_VSN_STR "0.9" +#define ERTS_MSEG_VSN_STR "0.10" typedef struct { - Uint amcbf; - Uint rmcbf; Uint mcs; Uint nos; } ErtsMsegInit_t; #define ERTS_MSEG_INIT_DEFAULT_INITIALIZER \ { \ - 4*1024*1024, /* amcbf: Absolute max cache bad fit */ \ - 20, /* rmcbf: Relative max cache bad fit */ \ 5, /* mcs: Max cache size */ \ 1000 /* cci: Cache check interval */ \ } -- cgit v1.2.3 From f5bf29d4db2a257879641ae4fea7d6937027295c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn-Egil=20Dahlberg?= Date: Fri, 16 Nov 2012 18:52:55 +0100 Subject: erts: Fix mseg cache. Malplaced NULL pointer --- erts/emulator/sys/common/erl_mseg.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'erts/emulator/sys/common') diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c index 2ff88ee435..7d03116daa 100644 --- a/erts/emulator/sys/common/erl_mseg.c +++ b/erts/emulator/sys/common/erl_mseg.c @@ -572,8 +572,8 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint size) { /* unlink from cache area */ csize = c->size; seg = c->seg; - c->next = NULL; mk->cache_area[i] = c->next; + c->next = NULL; mk->cache_size--; mk->cache_hits++; @@ -592,11 +592,11 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint size) { } i--; } - ASSERT(csize == size); return seg; } } + return NULL; } @@ -707,6 +707,8 @@ static void mseg_clear_memkind_cache(MemKind *mk) { mseg_drop_memkind_cache_size(mk, i); } + + ASSERT(mk->cache_size == 0); } static void mseg_clear_cache(ErtsMsegAllctr_t *ma) { -- cgit v1.2.3 From e976eb64736435b4c79bb53947a77d8bc04a0481 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn-Egil=20Dahlberg?= Date: Mon, 3 Dec 2012 18:31:02 +0100 Subject: erts: Reintroduce mseg options amcbf and rmcbf Used with new sbc cache --- erts/emulator/sys/common/erl_mseg.c | 18 ++++++++++++++++++ erts/emulator/sys/common/erl_mseg.h | 6 +++++- 2 files changed, 23 insertions(+), 1 deletion(-) (limited to 'erts/emulator/sys/common') diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c index 7d03116daa..c8dd50bf7a 100644 --- a/erts/emulator/sys/common/erl_mseg.c +++ b/erts/emulator/sys/common/erl_mseg.c @@ -229,6 +229,9 @@ struct ErtsMsegAllctr_t_ { #endif Uint max_cache_size; + Uint abs_max_cache_bad_fit; + Uint rel_max_cache_bad_fit; + ErtsMsegCalls calls; #if CAN_PARTLY_DESTROY @@ -924,6 +927,8 @@ static struct { Eterm version; Eterm options; + Eterm amcbf; + Eterm rmcbf; Eterm mcs; Eterm memkind; @@ -982,6 +987,8 @@ init_atoms(ErtsMsegAllctr_t *ma) AM_INIT(name); AM_INIT(options); + AM_INIT(amcbf); + AM_INIT(rmcbf); AM_INIT(mcs); AM_INIT(status); @@ -1079,6 +1086,8 @@ info_options(ErtsMsegAllctr_t *ma, if (print_to_p) { int to = *print_to_p; void *arg = print_to_arg; + erts_print(to, arg, "%samcbf: %beu\n", prefix, ma->abs_max_cache_bad_fit); + erts_print(to, arg, "%srmcbf: %beu\n", prefix, ma->rel_max_cache_bad_fit); erts_print(to, arg, "%smcs: %beu\n", prefix, ma->max_cache_size); } @@ -1091,6 +1100,13 @@ info_options(ErtsMsegAllctr_t *ma, add_2tup(hpp, szp, &res, am.mcs, bld_uint(hpp, szp, ma->max_cache_size)); + add_2tup(hpp, szp, &res, + am.rmcbf, + bld_uint(hpp, szp, ma->rel_max_cache_bad_fit)); + add_2tup(hpp, szp, &res, + am.amcbf, + bld_uint(hpp, szp, ma->abs_max_cache_bad_fit)); + } return res; @@ -1519,6 +1535,8 @@ erts_mseg_init(ErtsMsegInit_t *init) /* Options ... */ + ma->abs_max_cache_bad_fit = init->amcbf; + ma->rel_max_cache_bad_fit = init->rmcbf; ma->max_cache_size = init->mcs; if (ma->max_cache_size > MAX_CACHE_SIZE) diff --git a/erts/emulator/sys/common/erl_mseg.h b/erts/emulator/sys/common/erl_mseg.h index 80a6c42741..8f8cb0e121 100644 --- a/erts/emulator/sys/common/erl_mseg.h +++ b/erts/emulator/sys/common/erl_mseg.h @@ -60,15 +60,19 @@ #define ERTS_MSEG_FLG_2POW ((Uint)(1 << 0)) -#define ERTS_MSEG_VSN_STR "0.10" +#define ERTS_MSEG_VSN_STR "0.9" typedef struct { + Uint amcbf; + Uint rmcbf; Uint mcs; Uint nos; } ErtsMsegInit_t; #define ERTS_MSEG_INIT_DEFAULT_INITIALIZER \ { \ + 4*1024*1024, /* amcbf: Absolute max cache bad fit */ \ + 20, /* rmcbf: Relative max cache bad fit */ \ 5, /* mcs: Max cache size */ \ 1000 /* cci: Cache check interval */ \ } -- cgit v1.2.3 From e3b44b75af1c1444136029687aca9b6051f3f39a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn-Egil=20Dahlberg?= Date: Mon, 3 Dec 2012 16:03:01 +0100 Subject: erts: Add mseg cache for large sbc segments * Not a power of two (unpowered) segements --- erts/emulator/sys/common/erl_mseg.c | 115 ++++++++++++++++++++++++++++++++---- 1 file changed, 103 insertions(+), 12 deletions(-) (limited to 'erts/emulator/sys/common') diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c index c8dd50bf7a..7db9da6aba 100644 --- a/erts/emulator/sys/common/erl_mseg.c +++ b/erts/emulator/sys/common/erl_mseg.c @@ -183,6 +183,7 @@ typedef struct ErtsMsegAllctr_t_ ErtsMsegAllctr_t; struct mem_kind_t { cache_t cache[MAX_CACHE_SIZE]; + cache_t *cache_unpowered; cache_t *cache_area[CACHE_AREAS]; cache_t *cache_free; @@ -529,13 +530,15 @@ static ERTS_INLINE void mseg_cache_free_descriptor(MemKind *mk, cache_t *c) { static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, Uint size) { + cache_t *c; ERTS_DBG_MK_CHK_THR_ACCESS(mk); - if (IS_2POW(size)) { - int ix = SIZE_TO_CACHE_AREA_IDX(size); - cache_t *c; - if (ix < CACHE_AREAS && mk->cache_free) { - ASSERT( (1 << (ix + MSEG_ALIGN_BITS)) == size); + if (mk->cache_free) { + if (IS_2POW(size)) { + int ix = SIZE_TO_CACHE_AREA_IDX(size); + + ASSERT(ix < CACHE_AREAS); + ASSERT((1 << (ix + MSEG_ALIGN_BITS)) == size); /* unlink from free cache list */ c = mseg_cache_alloc_descriptor(mk); @@ -548,6 +551,23 @@ static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, Uint size) { mk->cache_area[ix] = c; mk->cache_size++; + ASSERT(mk->cache_size <= mk->ma->max_cache_size); + + return 1; + } else { + /* unlink from free cache list */ + c = mseg_cache_alloc_descriptor(mk); + + /* link to cache area */ + c->seg = seg; + c->size = size; + c->next = mk->cache_unpowered; + + mk->cache_unpowered = c; + mk->cache_size++; + + ASSERT(mk->cache_size <= mk->ma->max_cache_size); + return 1; } } @@ -555,7 +575,9 @@ static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, Uint size) { return 0; } -static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint size) { +static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint *size_p) { + + Uint size = *size_p; ERTS_DBG_MK_CHK_THR_ACCESS(mk); if (IS_2POW(size)) { @@ -598,8 +620,46 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint size) { ASSERT(csize == size); return seg; } - } + } + else if (mk->cache_unpowered) { + void *seg; + cache_t *c, *pc; + Uint csize; + Uint bad_max_abs = mk->ma->abs_max_cache_bad_fit; + /* Uint bad_max_rel = mk->ma->rel_max_cache_bad_fit; */ + + c = mk->cache_unpowered; + pc = c; + + while (c) { + csize = c->size; + if (csize >= size && (csize - size) < bad_max_abs ) { + + /* unlink from cache area */ + seg = c->seg; + + + if (pc == c) { + mk->cache_unpowered = c->next; + } else { + pc->next = c->next; + } + c->next = NULL; + mk->cache_size--; + mk->cache_hits++; + + /* link to free cache list */ + mseg_cache_free_descriptor(mk, c); + *size_p = csize; + + return seg; + } + + pc = c; + c = c->next; + } + } return NULL; } @@ -607,6 +667,30 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint size) { * Slowly remove segments cached in the allocator by * using callbacks from aux-work in the scheduler. */ + +static ERTS_INLINE Uint mseg_drop_one_memkind_cache_size(MemKind *mk, cache_t **head) { + cache_t *c = NULL; + + c = *head; + + ASSERT( c != NULL ); + + *head = c->next; + + if (erts_mtrace_enabled) + erts_mtrace_crr_free(SEGTYPE, SEGTYPE, c->seg); + + mseg_destroy(mk->ma, mk, c->seg, c->size); + mseg_cache_free_descriptor(mk, c); + + mk->segments.current.watermark--; + mk->cache_size--; + + ASSERT( mk->cache_size >= 0 ); + + return mk->cache_size; +} + static ERTS_INLINE Uint mseg_drop_memkind_cache_size(MemKind *mk, int ix) { cache_t *c = NULL, *next = NULL; @@ -644,12 +728,15 @@ static ERTS_INLINE Uint mseg_drop_memkind_cache_size(MemKind *mk, int ix) { static Uint mseg_check_memkind_cache(MemKind *mk) { int i; - /* remove biggest first (less likly to be reused) */ ERTS_DBG_MK_CHK_THR_ACCESS(mk); - for (i = CACHE_AREAS - 1; i > 0; i--) { + + if (mk->cache_unpowered) + return mseg_drop_one_memkind_cache_size(mk, &(mk->cache_unpowered)); + + for (i = 0; i < CACHE_AREAS; i++) { if (mk->cache_area[i] != NULL) - return mseg_drop_memkind_cache_size(mk, i); + return mseg_drop_one_memkind_cache_size(mk, &(mk->cache_area[i])); } return 0; @@ -769,7 +856,7 @@ mseg_alloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, Uint *size_p, ma->min_seg_size = size; #endif - if (opt->cache && mk->cache_size > 0 && (seg = cache_get_segment(mk, size)) != NULL) + if (opt->cache && mk->cache_size > 0 && (seg = cache_get_segment(mk, &size)) != NULL) goto done; if ((seg = mseg_create(ma, mk, size)) == NULL) @@ -1451,13 +1538,17 @@ static void mem_kind_init(ErtsMsegAllctr_t *ma, MemKind* mk, const char* name) mk->cache_free = NULL; - for (i = 0; i < MAX_CACHE_SIZE; i++) { + ASSERT(ma->max_cache_size <= MAX_CACHE_SIZE); + + for (i = 0; i < ma->max_cache_size; i++) { mk->cache[i].seg = NULL; mk->cache[i].size = 0; mk->cache[i].next = mk->cache_free; mk->cache_free = &(mk->cache[i]); } + mk->cache_unpowered = NULL; + mk->cache_size = 0; mk->cache_hits = 0; -- cgit v1.2.3 From 6c95e5c7bc376a2b04cdd9b23d0441fa9bee9e78 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn-Egil=20Dahlberg?= Date: Tue, 4 Dec 2012 17:02:12 +0100 Subject: erts: Do not cache segments that are misaligned * SBC may realloc carriers to misaligned addresses which is perfectly fine. However, those segments may not be cached because MBC allocations might find them and MBC's *needs* correct alignment. --- erts/emulator/sys/common/erl_mseg.c | 102 +++++++++++++++++++++--------------- 1 file changed, 61 insertions(+), 41 deletions(-) (limited to 'erts/emulator/sys/common') diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c index 7db9da6aba..a79ff5a82f 100644 --- a/erts/emulator/sys/common/erl_mseg.c +++ b/erts/emulator/sys/common/erl_mseg.c @@ -348,37 +348,32 @@ schedule_cache_check(ErtsMsegAllctr_t *ma) { static ERTS_INLINE void * mmap_align(ErtsMsegAllctr_t *ma, void *addr, size_t length, int prot, int flags, int fd, off_t offset) { - void *seg, *aseg; - unsigned long diff; + void *p, *q; + UWord d; - seg = mmap(addr, length, prot, flags, fd, offset); + p = mmap(addr, length, prot, flags, fd, offset); - if (MAP_IS_ALIGNED(seg) || seg == MAP_FAILED) { - return seg; - } + if (MAP_IS_ALIGNED(p) || p == MAP_FAILED) + return p; if (ma) INC_CC(ma, create_resize); - munmap(seg, length); + munmap(p, length); - if ((seg = mmap(addr, length + MSEG_ALIGNED_SIZE, prot, flags, fd, offset)) == MAP_FAILED) { - return seg; - } + if ((p = mmap(addr, length + MSEG_ALIGNED_SIZE, prot, flags, fd, offset)) == MAP_FAILED) + return MAP_FAILED; - /* ceil to aligned pointer */ - aseg = (void *)(((unsigned long)(seg + MSEG_ALIGNED_SIZE)) & (~(MSEG_ALIGNED_SIZE - 1))); - diff = aseg - seg; + q = (void *)ALIGNED_CEILING(p); + d = q - p; - if (diff > 0) { - munmap(seg, diff); - } + if (d > 0) + munmap(p, d); - if (MSEG_ALIGNED_SIZE - diff > 0) { - munmap((void *) (aseg + length), MSEG_ALIGNED_SIZE - diff); - } + if (MSEG_ALIGNED_SIZE - d > 0) + munmap((void *) (q + length), MSEG_ALIGNED_SIZE - d); - return aseg; + return q; } static ERTS_INLINE void * @@ -394,8 +389,7 @@ mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size) erts_fprintf(stderr,"Pointer mask failure (0x%08lx)\n",(unsigned long) seg); return NULL; } - } - else + } else #endif { #if HAVE_MMAP @@ -404,6 +398,8 @@ mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size) MMAP_PROT, MMAP_FLAGS, MMAP_FD, 0); if (seg == (void *) MAP_FAILED) seg = NULL; + + ASSERT(MAP_IS_ALIGNED(seg) || !seg); } #else # error "Missing mseg_create() implementation" @@ -441,6 +437,28 @@ mseg_destroy(ErtsMsegAllctr_t *ma, MemKind* mk, void *seg, Uint size) { } #if HAVE_MSEG_RECREATE +#if defined(__NetBsd__) +#define MREMAP_FLAGS (0) +#else +#define MREMAP_FLAGS (MREMAP_MAYMOVE) +#endif + + +/* mseg_recreate + * May return *unaligned* segments as in address not aligned to MSEG_ALIGNMENT + * it is still page aligned + * + * This is fine for single block carriers as long as we don't cache misaligned + * segments (since multiblock carriers may use them) + * + * For multiblock carriers we *need* MSEG_ALIGNMENT but mbc's will never be + * reallocated. + * + * This should probably be fixed the following way: + * 1) Use an option to segment allocation - NEED_ALIGNMENT + * 2) Add mremap_align which takes care of aligning a new a mremaped area + * 3) Fix the cache to handle of aligned and unaligned segments + */ static ERTS_INLINE void * mseg_recreate(ErtsMsegAllctr_t *ma, MemKind* mk, void *old_seg, Uint old_size, Uint new_size) @@ -460,19 +478,11 @@ mseg_recreate(ErtsMsegAllctr_t *ma, MemKind* mk, void *old_seg, Uint old_size, U #endif { #if HAVE_MREMAP - - #if defined(__NetBSD__) - new_seg = (void *) mremap((void *) old_seg, - (size_t) old_size, - NULL, - (size_t) new_size, - 0); - #else - new_seg = (void *) mremap((void *) old_seg, - (size_t) old_size, - (size_t) new_size, - MREMAP_MAYMOVE); - #endif +#if defined(__NetBSD__) + new_seg = mremap(old_seg, (size_t)old_size, NULL, new_size, MREMAP_FLAGS); +#else + new_seg = mremap(old_seg, (size_t)old_size, (size_t)new_size, MREMAP_FLAGS); +#endif if (new_seg == (void *) MAP_FAILED) new_seg = NULL; #else @@ -533,7 +543,7 @@ static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, Uint size) { cache_t *c; ERTS_DBG_MK_CHK_THR_ACCESS(mk); - if (mk->cache_free) { + if (mk->cache_free && MAP_IS_ALIGNED(seg)) { if (IS_2POW(size)) { int ix = SIZE_TO_CACHE_AREA_IDX(size); @@ -636,8 +646,7 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint *size_p) { if (csize >= size && (csize - size) < bad_max_abs ) { /* unlink from cache area */ - seg = c->seg; - + seg = c->seg; if (pc == c) { mk->cache_unpowered = c->next; @@ -907,12 +916,15 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, void *new_seg; Uint new_size; + /* Just allocate a new segment if we didn't have one before */ if (!seg || !old_size) { new_seg = mseg_alloc(ma, atype, new_size_p, flags, opt); DEC_CC(ma, alloc); return new_seg; } + + /* Dealloc old segment if new segment is of size 0 */ if (!(*new_size_p)) { mseg_dealloc(ma, atype, seg, old_size, opt); DEC_CC(ma, dealloc); @@ -955,8 +967,10 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, #elif HAVE_MSEG_RECREATE goto do_recreate; #else - new_seg = mseg_alloc(ma, atype, &new_size, flags, opt); + + ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg); + if (!new_seg) new_size = old_size; else { @@ -965,9 +979,7 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, MIN(new_size, old_size)); mseg_dealloc(ma, atype, seg, old_size, opt); } - #endif - } } else { @@ -975,6 +987,7 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, if (!opt->preserv) { mseg_dealloc(ma, atype, seg, old_size, opt); new_seg = mseg_alloc(ma, atype, &new_size, flags, opt); + ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg); } else { #if HAVE_MSEG_RECREATE @@ -982,12 +995,19 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, do_recreate: #endif new_seg = mseg_recreate(ma, mk, (void *) seg, old_size, new_size); + /* ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg); + * will not always be aligned and it ok for now + */ + if (erts_mtrace_enabled) erts_mtrace_crr_realloc(new_seg, atype, SEGTYPE, seg, new_size); if (!new_seg) new_size = old_size; #else new_seg = mseg_alloc(ma, atype, &new_size, flags, opt); + + ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg); + if (!new_seg) new_size = old_size; else { -- cgit v1.2.3 From 0fb68ef1f8c7f44c6f7edc1cc461d2b598b96fd9 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Tue, 4 Dec 2012 17:57:51 +0100 Subject: erts: Set super alignment (256kb) and limits for sbct (8Mb) and lmbcs (128Mb) --- erts/emulator/sys/common/erl_mseg.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'erts/emulator/sys/common') diff --git a/erts/emulator/sys/common/erl_mseg.h b/erts/emulator/sys/common/erl_mseg.h index 8f8cb0e121..6f373f13f9 100644 --- a/erts/emulator/sys/common/erl_mseg.h +++ b/erts/emulator/sys/common/erl_mseg.h @@ -39,7 +39,8 @@ #endif #if HAVE_SUPER_ALIGNED_MB_CARRIERS -# define MSEG_ALIGN_BITS (17) +# define MSEG_ALIGN_BITS (18) + /* Affects hard limits for sbct and lmbcs documented in erts_alloc.xml */ #else /* If we don't use super aligned multiblock carriers * we will mmap with page size alignment (and thus use corresponding -- cgit v1.2.3 From f7bbf8938b5dc0b4b28f5c3f932cce4b7b2def37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn-Egil=20Dahlberg?= Date: Wed, 12 Dec 2012 20:06:43 +0100 Subject: erts: Clear entire mseg cache upon request --- erts/emulator/sys/common/erl_mseg.c | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) (limited to 'erts/emulator/sys/common') diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c index a79ff5a82f..94f9f76a20 100644 --- a/erts/emulator/sys/common/erl_mseg.c +++ b/erts/emulator/sys/common/erl_mseg.c @@ -636,14 +636,16 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint *size_p) { cache_t *c, *pc; Uint csize; Uint bad_max_abs = mk->ma->abs_max_cache_bad_fit; - /* Uint bad_max_rel = mk->ma->rel_max_cache_bad_fit; */ + Uint bad_max_rel = mk->ma->rel_max_cache_bad_fit; c = mk->cache_unpowered; pc = c; while (c) { csize = c->size; - if (csize >= size && (csize - size) < bad_max_abs ) { + if (csize >= size && + ((csize - size)*100 < bad_max_rel*size) && + (csize - size) < bad_max_abs ) { /* unlink from cache area */ seg = c->seg; @@ -700,10 +702,10 @@ static ERTS_INLINE Uint mseg_drop_one_memkind_cache_size(MemKind *mk, cache_t ** return mk->cache_size; } -static ERTS_INLINE Uint mseg_drop_memkind_cache_size(MemKind *mk, int ix) { +static ERTS_INLINE Uint mseg_drop_memkind_cache_size(MemKind *mk, cache_t **head) { cache_t *c = NULL, *next = NULL; - c = mk->cache_area[ix]; + c = *head; ASSERT( c != NULL ); while (c) { @@ -722,7 +724,7 @@ static ERTS_INLINE Uint mseg_drop_memkind_cache_size(MemKind *mk, int ix) { c = next; } - mk->cache_area[ix] = NULL; + *head = NULL; ASSERT( mk->cache_size >= 0 ); @@ -740,14 +742,14 @@ static Uint mseg_check_memkind_cache(MemKind *mk) { ERTS_DBG_MK_CHK_THR_ACCESS(mk); - if (mk->cache_unpowered) - return mseg_drop_one_memkind_cache_size(mk, &(mk->cache_unpowered)); - for (i = 0; i < CACHE_AREAS; i++) { if (mk->cache_area[i] != NULL) return mseg_drop_one_memkind_cache_size(mk, &(mk->cache_area[i])); } + if (mk->cache_unpowered) + return mseg_drop_one_memkind_cache_size(mk, &(mk->cache_unpowered)); + return 0; } @@ -800,13 +802,19 @@ void erts_mseg_cache_check(void) { static void mseg_clear_memkind_cache(MemKind *mk) { int i; + /* drop pow2 caches */ for (i = 0; i < CACHE_AREAS; i++) { if (mk->cache_area[i] == NULL) continue; - mseg_drop_memkind_cache_size(mk, i); + mseg_drop_memkind_cache_size(mk, &(mk->cache_area[i])); + ASSERT(mk->cache_area[i] == NULL); } + /* drop varied caches */ + if(mk->cache_unpowered) + mseg_drop_memkind_cache_size(mk, &(mk->cache_unpowered)); + ASSERT(mk->cache_unpowered == NULL); ASSERT(mk->cache_size == 0); } -- cgit v1.2.3