diff options
Diffstat (limited to 'erts/emulator/sys/common')
-rw-r--r-- | erts/emulator/sys/common/erl_check_io.c | 7 | ||||
-rw-r--r-- | erts/emulator/sys/common/erl_mseg.c | 1112 | ||||
-rw-r--r-- | erts/emulator/sys/common/erl_mseg.h | 25 | ||||
-rw-r--r-- | erts/emulator/sys/common/erl_poll.c | 310 | ||||
-rw-r--r-- | erts/emulator/sys/common/erl_sys_common_misc.c | 107 |
5 files changed, 1136 insertions, 425 deletions
diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c index 218bd79584..cd4de21d65 100644 --- a/erts/emulator/sys/common/erl_check_io.c +++ b/erts/emulator/sys/common/erl_check_io.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2006-2009. All Rights Reserved. + * Copyright Ericsson AB 2006-2011. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -1137,6 +1137,11 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait) restart: +#ifdef ERTS_BREAK_REQUESTED + if (ERTS_BREAK_REQUESTED) + erts_do_break_handling(); +#endif + /* Figure out timeout value */ if (do_wait) { erts_time_remaining(&wait_time); diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c index 5dfd66bd7c..eaef6680dd 100644 --- a/erts/emulator/sys/common/erl_mseg.c +++ b/erts/emulator/sys/common/erl_mseg.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2002-2010. All Rights Reserved. + * Copyright Ericsson AB 2002-2011. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -35,6 +35,7 @@ #include "global.h" #include "erl_threads.h" #include "erl_mtrace.h" +#include "erl_time.h" #include "big.h" #if HAVE_ERTS_MSEG @@ -76,17 +77,29 @@ static int atoms_initialized; static Uint cache_check_interval; +typedef struct mem_kind_t MemKind; + static void check_cache(void *unused); -static void mseg_clear_cache(void); +static void mseg_clear_cache(MemKind*); static int is_cache_check_scheduled; #ifdef ERTS_THREADS_NO_SMP static int is_cache_check_requested; #endif +#if HALFWORD_HEAP +static int initialize_pmmap(void); +static void *pmmap(size_t size); +static int pmunmap(void *p, size_t size); +static void *pmremap(void *old_address, size_t old_size, + size_t new_size); +#endif + #if HAVE_MMAP /* Mmap ... */ #define MMAP_PROT (PROT_READ|PROT_WRITE) + + #ifdef MAP_ANON # define MMAP_FLAGS (MAP_ANON|MAP_PRIVATE) # define MMAP_FD (-1) @@ -102,19 +115,35 @@ static int mmap_fd; # define HAVE_MSEG_RECREATE 0 #endif +#if HALFWORD_HEAP +#define CAN_PARTLY_DESTROY 0 +#else #define CAN_PARTLY_DESTROY 1 +#endif #else /* #if HAVE_MMAP */ #define CAN_PARTLY_DESTROY 0 #error "Not supported" #endif /* #if HAVE_MMAP */ +#if defined(ERTS_MSEG_FAKE_SEGMENTS) && HALFWORD_HEAP +# warning "ERTS_MSEG_FAKE_SEGMENTS will only be used for high memory segments" +#endif #if defined(ERTS_MSEG_FAKE_SEGMENTS) #undef CAN_PARTLY_DESTROY #define CAN_PARTLY_DESTROY 0 #endif -static const ErtsMsegOpt_t default_opt = ERTS_MSEG_DEFAULT_OPT_INITIALIZER; +const ErtsMsegOpt_t erts_mseg_default_opt = { + 1, /* Use cache */ + 1, /* Preserv data */ + 0, /* Absolute shrink threshold */ + 0 /* Relative shrink threshold */ +#if HALFWORD_HEAP + ,0 /* need low memory */ +#endif +}; + typedef struct cache_desc_t_ { void *seg; @@ -145,14 +174,43 @@ static struct { CallCounter check_cache; } calls; -static cache_desc_t cache_descs[MAX_CACHE_SIZE]; -static cache_desc_t *free_cache_descs; -static cache_desc_t *cache; -static cache_desc_t *cache_end; -static Uint cache_hits; -static Uint cache_size; -static Uint min_cached_seg_size; -static Uint max_cached_seg_size; +struct mem_kind_t { + cache_desc_t cache_descs[MAX_CACHE_SIZE]; + cache_desc_t *free_cache_descs; + cache_desc_t *cache; + cache_desc_t *cache_end; + + Uint cache_size; + Uint min_cached_seg_size; + Uint max_cached_seg_size; + Uint cache_hits; + + struct { + struct { + Uint watermark; + Uint no; + Uint sz; + } current; + struct { + Uint no; + Uint sz; + } max; + struct { + Uint no; + Uint sz; + } max_ever; + } segments; + + const char* name; + MemKind* next; +};/*MemKind*/ + +#if HALFWORD_HEAP +static MemKind low_mem, hi_mem; +#else +static MemKind the_mem; +#endif +static MemKind* mk_list = NULL; static Uint max_cache_size; static Uint abs_max_cache_bad_fit; @@ -162,47 +220,32 @@ static Uint rel_max_cache_bad_fit; static Uint min_seg_size; #endif -struct { - struct { - Uint watermark; - Uint no; - Uint sz; - } current; - struct { - Uint no; - Uint sz; - } max; - struct { - Uint no; - Uint sz; - } max_ever; -} segments; -#define ERTS_MSEG_ALLOC_STAT(SZ) \ +#define ERTS_MSEG_ALLOC_STAT(C,SZ) \ do { \ - segments.current.no++; \ - if (segments.max.no < segments.current.no) \ - segments.max.no = segments.current.no; \ - if (segments.current.watermark < segments.current.no) \ - segments.current.watermark = segments.current.no; \ - segments.current.sz += (SZ); \ - if (segments.max.sz < segments.current.sz) \ - segments.max.sz = segments.current.sz; \ + C->segments.current.no++; \ + if (C->segments.max.no < C->segments.current.no) \ + C->segments.max.no = C->segments.current.no; \ + if (C->segments.current.watermark < C->segments.current.no) \ + C->segments.current.watermark = C->segments.current.no; \ + C->segments.current.sz += (SZ); \ + if (C->segments.max.sz < C->segments.current.sz) \ + C->segments.max.sz = C->segments.current.sz; \ } while (0) -#define ERTS_MSEG_DEALLOC_STAT(SZ) \ +#define ERTS_MSEG_DEALLOC_STAT(C,SZ) \ do { \ - ASSERT(segments.current.no > 0); \ - segments.current.no--; \ - ASSERT(segments.current.sz >= (SZ)); \ - segments.current.sz -= (SZ); \ + ASSERT(C->segments.current.no > 0); \ + C->segments.current.no--; \ + ASSERT(C->segments.current.sz >= (SZ)); \ + C->segments.current.sz -= (SZ); \ } while (0) -#define ERTS_MSEG_REALLOC_STAT(OSZ, NSZ) \ +#define ERTS_MSEG_REALLOC_STAT(C,OSZ, NSZ) \ do { \ - ASSERT(segments.current.sz >= (OSZ)); \ - segments.current.sz -= (OSZ); \ - segments.current.sz += (NSZ); \ + ASSERT(C->segments.current.sz >= (OSZ)); \ + C->segments.current.sz -= (OSZ); \ + C->segments.current.sz += (NSZ); \ } while (0) #define ONE_GIGA (1000000000) @@ -232,6 +275,7 @@ static void thread_safe_init(void) { erts_mtx_init(&init_atoms_mutex, "mseg_init_atoms"); erts_mtx_init(&mseg_mutex, "mseg"); + #ifdef ERTS_THREADS_NO_SMP main_tid = erts_thr_self(); #endif @@ -256,7 +300,7 @@ schedule_cache_check(void) #endif { cache_check_timer.active = 0; - erl_set_timer(&cache_check_timer, + erts_set_timer(&cache_check_timer, check_cache, NULL, NULL, @@ -287,28 +331,45 @@ check_schedule_cache_check(void) static void mseg_shutdown(void) { + MemKind* mk; erts_mtx_lock(&mseg_mutex); - mseg_clear_cache(); + for (mk=mk_list; mk; mk=mk->next) { + mseg_clear_cache(mk); + } erts_mtx_unlock(&mseg_mutex); } static ERTS_INLINE void * -mseg_create(Uint size) +mseg_create(MemKind* mk, Uint size) { void *seg; ASSERT(size % page_size == 0); +#if HALFWORD_HEAP + if (mk == &low_mem) { + seg = pmmap(size); + if ((unsigned long) seg & CHECK_POINTER_MASK) { + erts_fprintf(stderr,"Pointer mask failure (0x%08lx)\n",(unsigned long) seg); + return NULL; + } + } + else +#endif + { #if defined(ERTS_MSEG_FAKE_SEGMENTS) - seg = erts_sys_alloc(ERTS_ALC_N_INVALID, NULL, size); + seg = erts_sys_alloc(ERTS_ALC_N_INVALID, NULL, size); #elif HAVE_MMAP - seg = (void *) mmap((void *) 0, (size_t) size, - MMAP_PROT, MMAP_FLAGS, MMAP_FD, 0); - if (seg == (void *) MAP_FAILED) - seg = NULL; + { + seg = (void *) mmap((void *) 0, (size_t) size, + MMAP_PROT, MMAP_FLAGS, MMAP_FD, 0); + if (seg == (void *) MAP_FAILED) + seg = NULL; + } #else -#error "Missing mseg_create() implementation" +# error "Missing mseg_create() implementation" #endif + } INC_CC(create); @@ -316,23 +377,29 @@ mseg_create(Uint size) } static ERTS_INLINE void -mseg_destroy(void *seg, Uint size) +mseg_destroy(MemKind* mk, void *seg, Uint size) { -#if defined(ERTS_MSEG_FAKE_SEGMENTS) - erts_sys_free(ERTS_ALC_N_INVALID, NULL, seg); -#elif HAVE_MMAP + int res; -#ifdef DEBUG - int res = +#if HALFWORD_HEAP + if (mk == &low_mem) { + res = pmunmap((void *) seg, size); + } + else #endif - - munmap((void *) seg, size); + { +#ifdef ERTS_MSEG_FAKE_SEGMENTS + erts_sys_free(ERTS_ALC_N_INVALID, NULL, seg); + res = 0; +#elif HAVE_MMAP + res = munmap((void *) seg, size); +#else +# error "Missing mseg_destroy() implementation" +#endif + } ASSERT(size % page_size == 0); ASSERT(res == 0); -#else -#error "Missing mseg_destroy() implementation" -#endif INC_CC(destroy); @@ -341,25 +408,44 @@ mseg_destroy(void *seg, Uint size) #if HAVE_MSEG_RECREATE static ERTS_INLINE void * -mseg_recreate(void *old_seg, Uint old_size, Uint new_size) +mseg_recreate(MemKind* mk, void *old_seg, Uint old_size, Uint new_size) { void *new_seg; ASSERT(old_size % page_size == 0); ASSERT(new_size % page_size == 0); +#if HALFWORD_HEAP + if (mk == &low_mem) { + new_seg = (void *) pmremap((void *) old_seg, + (size_t) old_size, + (size_t) new_size); + } + else +#endif + { #if defined(ERTS_MSEG_FAKE_SEGMENTS) - new_seg = erts_sys_realloc(ERTS_ALC_N_INVALID, NULL, old_seg, new_size); + new_seg = erts_sys_realloc(ERTS_ALC_N_INVALID, NULL, old_seg, new_size); #elif HAVE_MREMAP - new_seg = (void *) mremap((void *) old_seg, - (size_t) old_size, - (size_t) new_size, - MREMAP_MAYMOVE); - if (new_seg == (void *) MAP_FAILED) - new_seg = NULL; + + #if defined(__NetBSD__) + new_seg = (void *) mremap((void *) old_seg, + (size_t) old_size, + NULL, + (size_t) new_size, + 0); + #else + new_seg = (void *) mremap((void *) old_seg, + (size_t) old_size, + (size_t) new_size, + MREMAP_MAYMOVE); + #endif + if (new_seg == (void *) MAP_FAILED) + new_seg = NULL; #else #error "Missing mseg_recreate() implementation" #endif + } INC_CC(recreate); @@ -370,134 +456,142 @@ mseg_recreate(void *old_seg, Uint old_size, Uint new_size) static ERTS_INLINE cache_desc_t * -alloc_cd(void) +alloc_cd(MemKind* mk) { - cache_desc_t *cd = free_cache_descs; + cache_desc_t *cd = mk->free_cache_descs; ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex)); if (cd) - free_cache_descs = cd->next; + mk->free_cache_descs = cd->next; return cd; } static ERTS_INLINE void -free_cd(cache_desc_t *cd) +free_cd(MemKind* mk, cache_desc_t *cd) { ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex)); - cd->next = free_cache_descs; - free_cache_descs = cd; + cd->next = mk->free_cache_descs; + mk->free_cache_descs = cd; } static ERTS_INLINE void -link_cd(cache_desc_t *cd) +link_cd(MemKind* mk, cache_desc_t *cd) { ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex)); - if (cache) - cache->prev = cd; - cd->next = cache; + if (mk->cache) + mk->cache->prev = cd; + cd->next = mk->cache; cd->prev = NULL; - cache = cd; + mk->cache = cd; - if (!cache_end) { + if (!mk->cache_end) { ASSERT(!cd->next); - cache_end = cd; + mk->cache_end = cd; } - cache_size++; + mk->cache_size++; } +#if CAN_PARTLY_DESTROY static ERTS_INLINE void -end_link_cd(cache_desc_t *cd) +end_link_cd(MemKind* mk, cache_desc_t *cd) { ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex)); - if (cache_end) - cache_end->next = cd; + if (mk->cache_end) + mk->cache_end->next = cd; cd->next = NULL; - cd->prev = cache_end; - cache_end = cd; + cd->prev = mk->cache_end; + mk->cache_end = cd; - if (!cache) { + if (!mk->cache) { ASSERT(!cd->prev); - cache = cd; + mk->cache = cd; } - cache_size++; + mk->cache_size++; } +#endif static ERTS_INLINE void -unlink_cd(cache_desc_t *cd) +unlink_cd(MemKind* mk, cache_desc_t *cd) { ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex)); if (cd->next) cd->next->prev = cd->prev; else - cache_end = cd->prev; + mk->cache_end = cd->prev; if (cd->prev) cd->prev->next = cd->next; else - cache = cd->next; - ASSERT(cache_size > 0); - cache_size--; + mk->cache = cd->next; + ASSERT(mk->cache_size > 0); + mk->cache_size--; } static ERTS_INLINE void -check_cache_limits(void) +check_cache_limits(MemKind* mk) { cache_desc_t *cd; ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex)); - max_cached_seg_size = 0; - min_cached_seg_size = ~((Uint) 0); - for (cd = cache; cd; cd = cd->next) { - if (cd->size < min_cached_seg_size) - min_cached_seg_size = cd->size; - if (cd->size > max_cached_seg_size) - max_cached_seg_size = cd->size; + mk->max_cached_seg_size = 0; + mk->min_cached_seg_size = ~((Uint) 0); + for (cd = mk->cache; cd; cd = cd->next) { + if (cd->size < mk->min_cached_seg_size) + mk->min_cached_seg_size = cd->size; + if (cd->size > mk->max_cached_seg_size) + mk->max_cached_seg_size = cd->size; } - } static ERTS_INLINE void -adjust_cache_size(int force_check_limits) +adjust_cache_size(MemKind* mk, int force_check_limits) { cache_desc_t *cd; int check_limits = force_check_limits; - Sint max_cached = ((Sint) segments.current.watermark - - (Sint) segments.current.no); + Sint max_cached = ((Sint) mk->segments.current.watermark + - (Sint) mk->segments.current.no); ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex)); - while (((Sint) cache_size) > max_cached && ((Sint) cache_size) > 0) { - ASSERT(cache_end); - cd = cache_end; + while (((Sint) mk->cache_size) > max_cached && ((Sint) mk->cache_size) > 0) { + ASSERT(mk->cache_end); + cd = mk->cache_end; if (!check_limits && - !(min_cached_seg_size < cd->size - && cd->size < max_cached_seg_size)) { + !(mk->min_cached_seg_size < cd->size + && cd->size < mk->max_cached_seg_size)) { check_limits = 1; } if (erts_mtrace_enabled) erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg); - mseg_destroy(cd->seg, cd->size); - unlink_cd(cd); - free_cd(cd); + mseg_destroy(mk, cd->seg, cd->size); + unlink_cd(mk,cd); + free_cd(mk,cd); } if (check_limits) - check_cache_limits(); - + check_cache_limits(mk); } static void -check_cache(void *unused) +check_one_cache(MemKind* mk) +{ + if (mk->segments.current.watermark > mk->segments.current.no) + mk->segments.current.watermark--; + adjust_cache_size(mk, 0); + + if (mk->cache_size) + schedule_cache_check(); +} + +static void check_cache(void* unused) { + MemKind* mk; erts_mtx_lock(&mseg_mutex); is_cache_check_scheduled = 0; - if (segments.current.watermark > segments.current.no) - segments.current.watermark--; - adjust_cache_size(0); - - if (cache_size) - schedule_cache_check(); + for (mk=mk_list; mk; mk=mk->next) { + check_one_cache(mk); + } INC_CC(check_cache); @@ -505,28 +599,37 @@ check_cache(void *unused) } static void -mseg_clear_cache(void) +mseg_clear_cache(MemKind* mk) { - segments.current.watermark = 0; + mk->segments.current.watermark = 0; - adjust_cache_size(1); + adjust_cache_size(mk, 1); - ASSERT(!cache); - ASSERT(!cache_end); - ASSERT(!cache_size); + ASSERT(!mk->cache); + ASSERT(!mk->cache_end); + ASSERT(!mk->cache_size); - segments.current.watermark = segments.current.no; + mk->segments.current.watermark = mk->segments.current.no; INC_CC(clear_cache); } +static ERTS_INLINE MemKind* memkind(const ErtsMsegOpt_t *opt) +{ +#if HALFWORD_HEAP + return opt->low_mem ? &low_mem : &hi_mem; +#else + return &the_mem; +#endif +} + static void * mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt) { - Uint max, min, diff_size, size; cache_desc_t *cd, *cand_cd; void *seg; + MemKind* mk = memkind(opt); INC_CC(alloc); @@ -539,11 +642,11 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt) if (!opt->cache) { create_seg: - adjust_cache_size(0); - seg = mseg_create(size); + adjust_cache_size(mk,0); + seg = mseg_create(mk, size); if (!seg) { - mseg_clear_cache(); - seg = mseg_create(size); + mseg_clear_cache(mk); + seg = mseg_create(mk, size); if (!seg) size = 0; } @@ -552,17 +655,17 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt) if (seg) { if (erts_mtrace_enabled) erts_mtrace_crr_alloc(seg, atype, ERTS_MTRACE_SEGMENT_ID, size); - ERTS_MSEG_ALLOC_STAT(size); + ERTS_MSEG_ALLOC_STAT(mk,size); } return seg; } - if (size > max_cached_seg_size) + if (size > mk->max_cached_seg_size) goto create_seg; - if (size < min_cached_seg_size) { + if (size < mk->min_cached_seg_size) { - diff_size = min_cached_seg_size - size; + diff_size = mk->min_cached_seg_size - size; if (diff_size > abs_max_cache_bad_fit) goto create_seg; @@ -576,7 +679,7 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt) min = ~((Uint) 0); cand_cd = NULL; - for (cd = cache; cd; cd = cd->next) { + for (cd = mk->cache; cd; cd = cd->next) { if (cd->size >= size) { if (!cand_cd) { cand_cd = cd; @@ -597,8 +700,8 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt) min = cd->size; } - min_cached_seg_size = min; - max_cached_seg_size = max; + mk->min_cached_seg_size = min; + mk->max_cached_seg_size = max; if (!cand_cd) goto create_seg; @@ -607,20 +710,20 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt) if (diff_size > abs_max_cache_bad_fit || 100*PAGES(diff_size) > rel_max_cache_bad_fit*PAGES(size)) { - if (max_cached_seg_size < cand_cd->size) - max_cached_seg_size = cand_cd->size; - if (min_cached_seg_size > cand_cd->size) - min_cached_seg_size = cand_cd->size; + if (mk->max_cached_seg_size < cand_cd->size) + mk->max_cached_seg_size = cand_cd->size; + if (mk->min_cached_seg_size > cand_cd->size) + mk->min_cached_seg_size = cand_cd->size; goto create_seg; } - cache_hits++; + mk->cache_hits++; size = cand_cd->size; seg = cand_cd->seg; - unlink_cd(cand_cd); - free_cd(cand_cd); + unlink_cd(mk,cand_cd); + free_cd(mk,cand_cd); *size_p = size; @@ -630,7 +733,8 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt) } if (seg) - ERTS_MSEG_ALLOC_STAT(size); + ERTS_MSEG_ALLOC_STAT(mk,size); + return seg; } @@ -639,41 +743,42 @@ static void mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size, const ErtsMsegOpt_t *opt) { + MemKind* mk = memkind(opt); cache_desc_t *cd; - ERTS_MSEG_DEALLOC_STAT(size); + ERTS_MSEG_DEALLOC_STAT(mk,size); if (!opt->cache || max_cache_size == 0) { if (erts_mtrace_enabled) erts_mtrace_crr_free(atype, SEGTYPE, seg); - mseg_destroy(seg, size); + mseg_destroy(mk, seg, size); } else { int check_limits = 0; - if (size < min_cached_seg_size) - min_cached_seg_size = size; - if (size > max_cached_seg_size) - max_cached_seg_size = size; - - if (!free_cache_descs) { - cd = cache_end; - if (!(min_cached_seg_size < cd->size - && cd->size < max_cached_seg_size)) { + if (size < mk->min_cached_seg_size) + mk->min_cached_seg_size = size; + if (size > mk->max_cached_seg_size) + mk->max_cached_seg_size = size; + + if (!mk->free_cache_descs) { + cd = mk->cache_end; + if (!(mk->min_cached_seg_size < cd->size + && cd->size < mk->max_cached_seg_size)) { check_limits = 1; } if (erts_mtrace_enabled) erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg); - mseg_destroy(cd->seg, cd->size); - unlink_cd(cd); - free_cd(cd); + mseg_destroy(mk, cd->seg, cd->size); + unlink_cd(mk,cd); + free_cd(mk,cd); } - cd = alloc_cd(); + cd = alloc_cd(mk); ASSERT(cd); cd->seg = seg; cd->size = size; - link_cd(cd); + link_cd(mk,cd); if (erts_mtrace_enabled) { erts_mtrace_crr_free(atype, SEGTYPE, seg); @@ -683,7 +788,7 @@ mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size, /* ASSERT(segments.current.watermark >= segments.current.no + cache_size); */ if (check_limits) - check_cache_limits(); + check_cache_limits(mk); schedule_cache_check(); @@ -696,6 +801,7 @@ static void * mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p, const ErtsMsegOpt_t *opt) { + MemKind* mk = memkind(opt); void *new_seg; Uint new_size; @@ -733,15 +839,15 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p, #if CAN_PARTLY_DESTROY if (shrink_sz > min_seg_size - && free_cache_descs + && mk->free_cache_descs && opt->cache) { cache_desc_t *cd; - cd = alloc_cd(); + cd = alloc_cd(mk); ASSERT(cd); cd->seg = ((char *) seg) + new_size; cd->size = shrink_sz; - end_link_cd(cd); + end_link_cd(mk,cd); if (erts_mtrace_enabled) { erts_mtrace_crr_realloc(new_seg, @@ -760,7 +866,7 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p, SEGTYPE, seg, new_size); - mseg_destroy(((char *) seg) + new_size, shrink_sz); + mseg_destroy(mk, ((char *) seg) + new_size, shrink_sz); } #elif HAVE_MSEG_RECREATE @@ -794,7 +900,7 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p, #if !CAN_PARTLY_DESTROY do_recreate: #endif - new_seg = mseg_recreate((void *) seg, old_size, new_size); + new_seg = mseg_recreate(mk, (void *) seg, old_size, new_size); if (erts_mtrace_enabled) erts_mtrace_crr_realloc(new_seg, atype, SEGTYPE, seg, new_size); if (!new_seg) @@ -817,7 +923,7 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p, *new_size_p = new_size; - ERTS_MSEG_REALLOC_STAT(old_size, new_size); + ERTS_MSEG_REALLOC_STAT(mk, old_size, new_size); return new_seg; } @@ -833,6 +939,8 @@ static struct { Eterm mcs; Eterm cci; + Eterm memkind; + Eterm name; Eterm status; Eterm cached_segments; Eterm cache_hits; @@ -882,6 +990,8 @@ init_atoms(void) #endif AM_INIT(version); + AM_INIT(memkind); + AM_INIT(name); AM_INIT(options); AM_INIT(amcbf); @@ -983,10 +1093,10 @@ info_options(char *prefix, if (print_to_p) { int to = *print_to_p; void *arg = print_to_arg; - erts_print(to, arg, "%samcbf: %bpu\n", prefix, abs_max_cache_bad_fit); - erts_print(to, arg, "%srmcbf: %bpu\n", prefix, rel_max_cache_bad_fit); - erts_print(to, arg, "%smcs: %bpu\n", prefix, max_cache_size); - erts_print(to, arg, "%scci: %bpu\n", prefix, cache_check_interval); + erts_print(to, arg, "%samcbf: %beu\n", prefix, abs_max_cache_bad_fit); + erts_print(to, arg, "%srmcbf: %beu\n", prefix, rel_max_cache_bad_fit); + erts_print(to, arg, "%smcs: %beu\n", prefix, max_cache_size); + erts_print(to, arg, "%scci: %beu\n", prefix, cache_check_interval); } if (hpp || szp) { @@ -1022,9 +1132,9 @@ info_calls(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp) #define PRINT_CC(TO, TOA, CC) \ if (calls.CC.giga_no == 0) \ - erts_print(TO, TOA, "mseg_%s calls: %bpu\n", #CC, calls.CC.no); \ + erts_print(TO, TOA, "mseg_%s calls: %b32u\n", #CC, calls.CC.no); \ else \ - erts_print(TO, TOA, "mseg_%s calls: %bpu%09bpu\n", #CC, \ + erts_print(TO, TOA, "mseg_%s calls: %b32u%09b32u\n", #CC, \ calls.CC.giga_no, calls.CC.no) int to = *print_to_p; @@ -1092,65 +1202,88 @@ info_calls(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp) } static Eterm -info_status(int *print_to_p, - void *print_to_arg, - int begin_new_max_period, - Uint **hpp, - Uint *szp) +info_status(MemKind* mk, int *print_to_p, void *print_to_arg, + int begin_new_max_period, Uint **hpp, Uint *szp) { Eterm res = THE_NON_VALUE; - if (segments.max_ever.no < segments.max.no) - segments.max_ever.no = segments.max.no; - if (segments.max_ever.sz < segments.max.sz) - segments.max_ever.sz = segments.max.sz; + if (mk->segments.max_ever.no < mk->segments.max.no) + mk->segments.max_ever.no = mk->segments.max.no; + if (mk->segments.max_ever.sz < mk->segments.max.sz) + mk->segments.max_ever.sz = mk->segments.max.sz; if (print_to_p) { int to = *print_to_p; void *arg = print_to_arg; - erts_print(to, arg, "cached_segments: %bpu\n", cache_size); - erts_print(to, arg, "cache_hits: %bpu\n", cache_hits); - erts_print(to, arg, "segments: %bpu %bpu %bpu\n", - segments.current.no, segments.max.no, segments.max_ever.no); - erts_print(to, arg, "segments_size: %bpu %bpu %bpu\n", - segments.current.sz, segments.max.sz, segments.max_ever.sz); - erts_print(to, arg, "segments_watermark: %bpu\n", - segments.current.watermark); + erts_print(to, arg, "cached_segments: %beu\n", mk->cache_size); + erts_print(to, arg, "cache_hits: %beu\n", mk->cache_hits); + erts_print(to, arg, "segments: %beu %beu %beu\n", + mk->segments.current.no, mk->segments.max.no, mk->segments.max_ever.no); + erts_print(to, arg, "segments_size: %beu %beu %beu\n", + mk->segments.current.sz, mk->segments.max.sz, mk->segments.max_ever.sz); + erts_print(to, arg, "segments_watermark: %beu\n", + mk->segments.current.watermark); } if (hpp || szp) { res = NIL; add_2tup(hpp, szp, &res, am.segments_watermark, - bld_unstable_uint(hpp, szp, segments.current.watermark)); + bld_unstable_uint(hpp, szp, mk->segments.current.watermark)); add_4tup(hpp, szp, &res, am.segments_size, - bld_unstable_uint(hpp, szp, segments.current.sz), - bld_unstable_uint(hpp, szp, segments.max.sz), - bld_unstable_uint(hpp, szp, segments.max_ever.sz)); + bld_unstable_uint(hpp, szp, mk->segments.current.sz), + bld_unstable_uint(hpp, szp, mk->segments.max.sz), + bld_unstable_uint(hpp, szp, mk->segments.max_ever.sz)); add_4tup(hpp, szp, &res, am.segments, - bld_unstable_uint(hpp, szp, segments.current.no), - bld_unstable_uint(hpp, szp, segments.max.no), - bld_unstable_uint(hpp, szp, segments.max_ever.no)); + bld_unstable_uint(hpp, szp, mk->segments.current.no), + bld_unstable_uint(hpp, szp, mk->segments.max.no), + bld_unstable_uint(hpp, szp, mk->segments.max_ever.no)); add_2tup(hpp, szp, &res, am.cache_hits, - bld_unstable_uint(hpp, szp, cache_hits)); + bld_unstable_uint(hpp, szp, mk->cache_hits)); add_2tup(hpp, szp, &res, am.cached_segments, - bld_unstable_uint(hpp, szp, cache_size)); + bld_unstable_uint(hpp, szp, mk->cache_size)); } if (begin_new_max_period) { - segments.max.no = segments.current.no; - segments.max.sz = segments.current.sz; + mk->segments.max.no = mk->segments.current.no; + mk->segments.max.sz = mk->segments.current.sz; } return res; } +static Eterm info_memkind(MemKind* mk, int *print_to_p, void *print_to_arg, + int begin_max_per, Uint **hpp, Uint *szp) +{ + Eterm res = THE_NON_VALUE; + Eterm atoms[3]; + Eterm values[3]; + + if (print_to_p) { + erts_print(*print_to_p, print_to_arg, "memory kind: %s\n", mk->name); + } + if (hpp || szp) { + atoms[0] = am.name; + atoms[1] = am.status; + atoms[2] = am.calls; + values[0] = erts_bld_string(hpp, szp, mk->name); + } + values[1] = info_status(mk, print_to_p, print_to_arg, begin_max_per, hpp, szp); + values[2] = info_calls(print_to_p, print_to_arg, hpp, szp); + + if (hpp || szp) + res = bld_2tup_list(hpp, szp, 3, atoms, values); + + return res; +} + + static Eterm info_version(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp) { @@ -1197,6 +1330,7 @@ erts_mseg_info(int *print_to_p, Eterm res = THE_NON_VALUE; Eterm atoms[4]; Eterm values[4]; + Uint n = 0; erts_mtx_lock(&mseg_mutex); @@ -1207,17 +1341,19 @@ erts_mseg_info(int *print_to_p, atoms[0] = am.version; atoms[1] = am.options; - atoms[2] = am.status; - atoms[3] = am.calls; + atoms[2] = am.memkind; + atoms[3] = am.memkind; } - - values[0] = info_version(print_to_p, print_to_arg, hpp, szp); - values[1] = info_options("option ", print_to_p, print_to_arg, hpp, szp); - values[2] = info_status(print_to_p, print_to_arg, begin_max_per, hpp, szp); - values[3] = info_calls(print_to_p, print_to_arg, hpp, szp); - + values[n++] = info_version(print_to_p, print_to_arg, hpp, szp); + values[n++] = info_options("option ", print_to_p, print_to_arg, hpp, szp); +#if HALFWORD_HEAP + values[n++] = info_memkind(&low_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp); + values[n++] = info_memkind(&hi_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp); +#else + values[n++] = info_memkind(&the_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp); +#endif if (hpp || szp) - res = bld_2tup_list(hpp, szp, 4, atoms, values); + res = bld_2tup_list(hpp, szp, n, atoms, values); erts_mtx_unlock(&mseg_mutex); @@ -1237,7 +1373,7 @@ erts_mseg_alloc_opt(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt) void * erts_mseg_alloc(ErtsAlcType_t atype, Uint *size_p) { - return erts_mseg_alloc_opt(atype, size_p, &default_opt); + return erts_mseg_alloc_opt(atype, size_p, &erts_mseg_default_opt); } void @@ -1252,7 +1388,7 @@ erts_mseg_dealloc_opt(ErtsAlcType_t atype, void *seg, Uint size, void erts_mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size) { - erts_mseg_dealloc_opt(atype, seg, size, &default_opt); + erts_mseg_dealloc_opt(atype, seg, size, &erts_mseg_default_opt); } void * @@ -1270,23 +1406,29 @@ void * erts_mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p) { - return erts_mseg_realloc_opt(atype, seg, old_size, new_size_p, &default_opt); + return erts_mseg_realloc_opt(atype, seg, old_size, new_size_p, &erts_mseg_default_opt); } void erts_mseg_clear_cache(void) { + MemKind* mk; erts_mtx_lock(&mseg_mutex); - mseg_clear_cache(); + for (mk=mk_list; mk; mk=mk->next) { + mseg_clear_cache(mk); + } erts_mtx_unlock(&mseg_mutex); } Uint erts_mseg_no(void) { - Uint n; + MemKind* mk; + Uint n = 0; erts_mtx_lock(&mseg_mutex); - n = segments.current.no; + for (mk=mk_list; mk; mk=mk->next) { + n += mk->segments.current.no; + } erts_mtx_unlock(&mseg_mutex); return n; } @@ -1297,11 +1439,43 @@ erts_mseg_unit_size(void) return page_size; } -void -erts_mseg_init(ErtsMsegInit_t *init) +static void mem_kind_init(MemKind* mk, const char* name) { unsigned i; + mk->cache = NULL; + mk->cache_end = NULL; + mk->max_cached_seg_size = 0; + mk->min_cached_seg_size = ~((Uint) 0); + mk->cache_size = 0; + mk->cache_hits = 0; + + if (max_cache_size > 0) { + for (i = 0; i < max_cache_size - 1; i++) + mk->cache_descs[i].next = &mk->cache_descs[i + 1]; + mk->cache_descs[max_cache_size - 1].next = NULL; + mk->free_cache_descs = &mk->cache_descs[0]; + } + else + mk->free_cache_descs = NULL; + + mk->segments.current.watermark = 0; + mk->segments.current.no = 0; + mk->segments.current.sz = 0; + mk->segments.max.no = 0; + mk->segments.max.sz = 0; + mk->segments.max_ever.no = 0; + mk->segments.max_ever.sz = 0; + + mk->name = name; + mk->next = mk_list; + mk_list = mk; +} + + +void +erts_mseg_init(ErtsMsegInit_t *init) +{ atoms_initialized = 0; is_init_done = 0; @@ -1324,13 +1498,17 @@ erts_mseg_init(ErtsMsegInit_t *init) erl_exit(ERTS_ABORT_EXIT, "erts_mseg: unable to open /dev/zero\n"); #endif +#if HAVE_MMAP && HALFWORD_HEAP + initialize_pmmap(); +#endif + page_size = GET_PAGE_SIZE; page_shift = 1; while ((page_size >> page_shift) != 1) { if ((page_size & (1 << (page_shift - 1))) != 0) erl_exit(ERTS_ABORT_EXIT, - "erts_mseg: Unexpected page_size %bpu\n", page_size); + "erts_mseg: Unexpected page_size %beu\n", page_size); page_shift++; } @@ -1340,40 +1518,33 @@ erts_mseg_init(ErtsMsegInit_t *init) min_seg_size = ~((Uint) 0); #endif - cache = NULL; - cache_end = NULL; - cache_hits = 0; - max_cached_seg_size = 0; - min_cached_seg_size = ~((Uint) 0); - cache_size = 0; + if (max_cache_size > MAX_CACHE_SIZE) + max_cache_size = MAX_CACHE_SIZE; + +#if HALFWORD_HEAP + mem_kind_init(&low_mem, "low memory"); + mem_kind_init(&hi_mem, "high memory"); +#else + mem_kind_init(&the_mem, "all memory"); +#endif is_cache_check_scheduled = 0; #ifdef ERTS_THREADS_NO_SMP is_cache_check_requested = 0; #endif +} - if (max_cache_size > MAX_CACHE_SIZE) - max_cache_size = MAX_CACHE_SIZE; - if (max_cache_size > 0) { - for (i = 0; i < max_cache_size - 1; i++) - cache_descs[i].next = &cache_descs[i + 1]; - cache_descs[max_cache_size - 1].next = NULL; - free_cache_descs = &cache_descs[0]; +static ERTS_INLINE Uint tot_cache_size(void) +{ + MemKind* mk; + Uint sz = 0; + for (mk=mk_list; mk; mk=mk->next) { + sz += mk->cache_size; } - else - free_cache_descs = NULL; - - segments.current.watermark = 0; - segments.current.no = 0; - segments.current.sz = 0; - segments.max.no = 0; - segments.max.sz = 0; - segments.max_ever.no = 0; - segments.max_ever.sz = 0; + return sz; } - /* * erts_mseg_late_init() have to be called after all allocators, * threads and timers have been initialized. @@ -1391,7 +1562,7 @@ erts_mseg_late_init(void) #ifdef ERTS_THREADS_NO_SMP async_handle = handle; #endif - if (cache_size) + if (tot_cache_size()) schedule_cache_check(); erts_mtx_unlock(&mseg_mutex); } @@ -1432,7 +1603,7 @@ erts_mseg_test(unsigned long op, case 0x406: { unsigned long res; erts_mtx_lock(&mseg_mutex); - res = (unsigned long) cache_size; + res = (unsigned long) tot_cache_size(); erts_mtx_unlock(&mseg_mutex); return res; } @@ -1446,3 +1617,432 @@ erts_mseg_test(unsigned long op, } +#if HALFWORD_HEAP +/* + * Very simple page oriented mmap replacer. Works in the lower + * 32 bit address range of a 64bit program. + * Implements anonymous mmap mremap and munmap with address order first fit. + * The free list is expected to be very short... + * To be used for compressed pointers in Erlang halfword emulator + * implementation. The MacOS X version is more of a toy, it's not really + * for production as the halfword erlang VM relies on Linux specific memory + * mapping tricks. + */ + +/*#define HARDDEBUG 1*/ + +#ifdef __APPLE__ +#define MAP_ANONYMOUS MAP_ANON +#endif + +#define INIT_LOCK() do {erts_mtx_init(&pmmap_mutex, "pmmap");} while(0) + +#define TAKE_LOCK() do {erts_mtx_lock(&pmmap_mutex);} while(0) + +#define RELEASE_LOCK() do {erts_mtx_unlock(&pmmap_mutex);} while(0) + +static erts_mtx_t pmmap_mutex; /* Also needed when !USE_THREADS */ + +typedef struct _free_block { + unsigned long num; /*pages*/ + struct _free_block *next; +} FreeBlock; + +/* Assigned once and for all */ +static size_t pagsz; + +/* Protect with lock */ +static FreeBlock *first; + +static size_t round_up_to_pagesize(size_t size) +{ + size_t x = size / pagsz; + + if ((size % pagsz)) { + ++x; + } + + return pagsz * x; +} + +static size_t round_down_to_pagesize(size_t size) +{ + size_t x = size / pagsz; + + return pagsz * x; +} + +static void *do_map(void *ptr, size_t sz) +{ + void *res; + + if (round_up_to_pagesize(sz) != sz) { +#ifdef HARDDEBUG + fprintf(stderr,"Mapping of address %p with size %ld " + "does not map complete pages\r\n", + (void *) ptr, (unsigned long) sz); +#endif + return NULL; + } + + if (((unsigned long) ptr) % pagsz) { +#ifdef HARDDEBUG + fprintf(stderr,"Mapping of address %p with size %ld " + "is not page aligned\r\n", + (void *) ptr, (unsigned long) sz); +#endif + return NULL; + } + +#if HAVE_MMAP + res = mmap(ptr, sz, + PROT_READ | PROT_WRITE, MAP_PRIVATE | + MAP_ANONYMOUS | MAP_FIXED, + -1 , 0); +#else +# error "Missing mmap support" +#endif + + if (res == MAP_FAILED) { +#ifdef HARDDEBUG + fprintf(stderr,"Mapping of address %p with size %ld failed!\r\n", + (void *) ptr, (unsigned long) sz); +#endif + return NULL; + } + + return res; +} + +static int do_unmap(void *ptr, size_t sz) +{ + void *res; + + if (round_up_to_pagesize(sz) != sz) { +#ifdef HARDDEBUG + fprintf(stderr,"Mapping of address %p with size %ld " + "does not map complete pages\r\n", + (void *) ptr, (unsigned long) sz); +#endif + return 1; + } + + if (((unsigned long) ptr) % pagsz) { +#ifdef HARDDEBUG + fprintf(stderr,"Mapping of address %p with size %ld " + "is not page aligned\r\n", + (void *) ptr, (unsigned long) sz); +#endif + return 1; + } + + + res = mmap(ptr, sz, + PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE + | MAP_FIXED, + -1 , 0); + + if (res == MAP_FAILED) { +#ifdef HARDDEBUG + fprintf(stderr,"Mapping of address %p with size %ld failed!\r\n", + (void *) ptr, (unsigned long) sz); +#endif + return 1; + } + + return 0; +} + +#ifdef __APPLE__ +/* + * The first 4 gig's are protected on Macos X for 64bit processes :( + * The range 0x1000000000 - 0x10FFFFFFFF is selected as an arbitrary + * value of a normally unused range... Real MMAP's will avoid + * it and all 32bit compressed pointers can be in that range... + * More expensive than on Linux where expansion of compressed + * poiters involves no masking (as they are in the first 4 gig's). + * It's also very uncertain if the MAP_NORESERVE flag really has + * any effect in MacOS X. Swap space may always be allocated... + */ +#define SET_RANGE_MIN() /* nothing */ +#define RANGE_MIN 0x1000000000UL +#define RANGE_MAX 0x1100000000UL +#define RANGE_MASK (RANGE_MIN) +#define EXTRA_MAP_FLAGS (MAP_FIXED) +#else +static size_t range_min; +#define SET_RANGE_MIN() do { range_min = (size_t) sbrk(0); } while (0) +#define RANGE_MIN range_min +#define RANGE_MAX 0x100000000UL +#define RANGE_MASK 0UL +#define EXTRA_MAP_FLAGS (0) +#endif + +static int initialize_pmmap(void) +{ + char *p,*q,*rptr; + size_t rsz; + FreeBlock *initial; + + + pagsz = getpagesize(); + SET_RANGE_MIN(); + if (sizeof(void *) != 8) { + erl_exit(1,"Halfword emulator cannot be run in 32bit mode"); + } + + p = (char *) RANGE_MIN; + q = (char *) RANGE_MAX; + + rsz = round_down_to_pagesize(q - p); + + rptr = mmap((void *) p, rsz, + PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | + MAP_NORESERVE | EXTRA_MAP_FLAGS, + -1 , 0); +#ifdef HARDDEBUG + printf("p=%p, rsz = %ld, pages = %ld, got range = %p -> %p\r\n", + p, (unsigned long) rsz, (unsigned long) (rsz / pagsz), + (void *) rptr, (void*)(rptr + rsz)); +#endif + if ((UWord)(rptr + rsz) > RANGE_MAX) { + size_t rsz_trunc = RANGE_MAX - (UWord)rptr; +#ifdef HARDDEBUG + printf("Reducing mmap'ed memory from %lu to %lu Mb, reduced range = %p -> %p\r\n", + rsz/(1024*1024), rsz_trunc/(1024*1024), rptr, rptr+rsz_trunc); +#endif + munmap((void*)RANGE_MAX, rsz - rsz_trunc); + rsz = rsz_trunc; + } + if (!do_map(rptr,pagsz)) { + erl_exit(1,"Could not actually mmap first page for halfword emulator...\n"); + } + initial = (FreeBlock *) rptr; + initial->num = (rsz / pagsz); + initial->next = NULL; + first = initial; + INIT_LOCK(); + return 0; +} + +#ifdef HARDDEBUG +static void dump_freelist(void) +{ + FreeBlock *p = first; + + while (p) { + printf("p = %p\r\np->num = %ld\r\np->next = %p\r\n\r\n", + (void *) p, (unsigned long) p->num, (void *) p->next); + p = p->next; + } +} +#endif + + +static void *pmmap(size_t size) +{ + size_t real_size = round_up_to_pagesize(size); + size_t num_pages = real_size / pagsz; + FreeBlock **block; + FreeBlock *tail; + FreeBlock *res; + TAKE_LOCK(); + for (block = &first; + *block != NULL && (*block)->num < num_pages; + block = &((*block)->next)) + ; + if (!(*block)) { + RELEASE_LOCK(); + return NULL; + } + if ((*block)->num == num_pages) { + /* nice, perfect fit */ + res = *block; + *block = (*block)->next; + } else { + tail = (FreeBlock *) (((char *) ((void *) (*block))) + real_size); + if (!do_map(tail,pagsz)) { +#ifdef HARDDEBUG + fprintf(stderr, "Could not actually allocate page at %p...\r\n", + (void *) tail); +#endif + RELEASE_LOCK(); + return NULL; + } + tail->num = (*block)->num - num_pages; + tail->next = (*block)->next; + res = *block; + *block = tail; + } + RELEASE_LOCK(); + if (!do_map(res,real_size)) { +#ifdef HARDDEBUG + fprintf(stderr, "Could not actually allocate %ld at %p...\r\n", + (unsigned long) real_size, (void *) res); +#endif + return NULL; + } + + return (void *) res; +} + +static int pmunmap(void *p, size_t size) +{ + size_t real_size = round_up_to_pagesize(size); + size_t num_pages = real_size / pagsz; + FreeBlock *block; + FreeBlock *last; + FreeBlock *nb = (FreeBlock *) p; + + ASSERT(((unsigned long)p & CHECK_POINTER_MASK)==0); + if (real_size > pagsz) { + if (do_unmap(((char *) p) + pagsz,real_size - pagsz)) { + return 1; + } + } + + TAKE_LOCK(); + + last = NULL; + block = first; + while(block != NULL && ((void *) block) < p) { + last = block; + block = block->next; + } + + if (block != NULL && + ((void *) block) == ((void *) (((char *) p) + real_size))) { + /* Merge new free block with following */ + nb->num = block->num + num_pages; + nb->next = block->next; + if (do_unmap(block,pagsz)) { + RELEASE_LOCK(); + return 1; + } + } else { + /* just link in */ + nb->num = num_pages; + nb->next = block; + } + if (last != NULL) { + if (p == ((void *) (((char *) last) + (last->num * pagsz)))) { + /* Merge with previous */ + last->num += nb->num; + last->next = nb->next; + if (do_unmap(nb,pagsz)) { + RELEASE_LOCK(); + return 1; + } + } else { + last->next = nb; + } + } else { + first = nb; + } + RELEASE_LOCK(); + return 0; +} + +static void *pmremap(void *old_address, size_t old_size, + size_t new_size) +{ + size_t new_real_size = round_up_to_pagesize(new_size); + size_t new_num_pages = new_real_size / pagsz; + size_t old_real_size = round_up_to_pagesize(old_size); + size_t old_num_pages = old_real_size / pagsz; + if (new_num_pages == old_num_pages) { + return old_address; + } else if (new_num_pages < old_num_pages) { /* Shrink */ + size_t nfb_pages = old_num_pages - new_num_pages; + size_t nfb_real_size = old_real_size - new_real_size; + void *vnfb = (void *) (((char *)old_address) + new_real_size); + FreeBlock *nfb = (FreeBlock *) vnfb; + FreeBlock **block; + TAKE_LOCK(); + for (block = &first; + *block != NULL && (*block) < nfb; + block = &((*block)->next)) + ; + if (!(*block) || + (*block) > ((FreeBlock *)(((char *) vnfb) + nfb_real_size))) { + /* Normal link in */ + if (nfb_pages > 1) { + if (do_unmap((void *)(((char *) vnfb) + pagsz), + (nfb_pages - 1)*pagsz)) { + return NULL; + } + } + nfb->next = (*block); + nfb->num = nfb_pages; + (*block) = nfb; + } else { /* block merge */ + nfb->next = (*block)->next; + nfb->num = nfb_pages + (*block)->num; + /* unmap also the first page of the next freeblock */ + (*block) = nfb; + if (do_unmap((void *)(((char *) vnfb) + pagsz), + nfb_pages*pagsz)) { + return NULL; + } + } + RELEASE_LOCK(); + return old_address; + } else { /* Enlarge */ + FreeBlock **block; + void *old_end = (void *) (((char *)old_address) + old_real_size); + TAKE_LOCK(); + for (block = &first; + *block != NULL && (*block) < (FreeBlock *) old_address; + block = &((*block)->next)) + ; + if ((*block) == NULL || old_end > ((void *) RANGE_MAX) || + (*block) != old_end || + (*block)->num < (new_num_pages - old_num_pages)) { + /* cannot extend */ + void *result; + RELEASE_LOCK(); + result = pmmap(new_size); + if (result == NULL) { + return NULL; + } + memcpy(result,old_address,old_size); + if (pmunmap(old_address,old_size)) { + /* Oups... */ + pmunmap(result,new_size); + return NULL; + } + return result; + } else { /* extend */ + size_t remaining_pages = (*block)->num - + (new_num_pages - old_num_pages); + if (!remaining_pages) { + void *p = (void *) (((char *) (*block)) + pagsz); + void *n = (*block)->next; + size_t x = ((*block)->num - 1) * pagsz; + if (x > 0) { + if (do_map(p,x) == NULL) { + RELEASE_LOCK(); + return NULL; + } + } + (*block) = n; + } else { + FreeBlock *nfb = (FreeBlock *) ((void *) + (((char *) old_address) + + new_real_size)); + void *p = (void *) (((char *) (*block)) + pagsz); + if (do_map(p,new_real_size - old_real_size) == NULL) { + RELEASE_LOCK(); + return NULL; + } + nfb->num = remaining_pages; + nfb->next = (*block)->next; + (*block) = nfb; + } + RELEASE_LOCK(); + return old_address; + } + } +} + +#endif /* HALFWORD_HEAP */ diff --git a/erts/emulator/sys/common/erl_mseg.h b/erts/emulator/sys/common/erl_mseg.h index 1c5aa63e90..8f116030a8 100644 --- a/erts/emulator/sys/common/erl_mseg.h +++ b/erts/emulator/sys/common/erl_mseg.h @@ -1,19 +1,19 @@ /* * %CopyrightBegin% - * - * Copyright Ericsson AB 2002-2009. All Rights Reserved. - * + * + * Copyright Ericsson AB 2002-2011. All Rights Reserved. + * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in * compliance with the License. You should have received a copy of the * Erlang Public License along with this software. If not, it can be * retrieved online at http://www.erlang.org/. - * + * * Software distributed under the License is distributed on an "AS IS" * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See * the License for the specific language governing rights and limitations * under the License. - * + * * %CopyrightEnd% */ @@ -58,17 +58,14 @@ typedef struct { typedef struct { int cache; int preserv; - Uint abs_shrink_th; - Uint rel_shrink_th; + UWord abs_shrink_th; + UWord rel_shrink_th; +#if HALFWORD_HEAP + int low_mem; +#endif } ErtsMsegOpt_t; -#define ERTS_MSEG_DEFAULT_OPT_INITIALIZER \ -{ \ - 1, /* Use cache */ \ - 1, /* Preserv data */ \ - 0, /* Absolute shrink threshold */ \ - 0 /* Relative shrink threshold */ \ -} +extern const ErtsMsegOpt_t erts_mseg_default_opt; void *erts_mseg_alloc(ErtsAlcType_t, Uint *); void *erts_mseg_alloc_opt(ErtsAlcType_t, Uint *, const ErtsMsegOpt_t *); diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c index 5cca33d7eb..f5c785d683 100644 --- a/erts/emulator/sys/common/erl_poll.c +++ b/erts/emulator/sys/common/erl_poll.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2006-2010. All Rights Reserved. + * Copyright Ericsson AB 2006-2011. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -124,20 +124,11 @@ erts_smp_mtx_unlock(&(PS)->mtx) #define ERTS_POLLSET_SET_POLLED_CHK(PS) \ - ((int) erts_smp_atomic_xchg(&(PS)->polled, (long) 1)) + ((int) erts_atomic32_xchg(&(PS)->polled, (erts_aint32_t) 1)) #define ERTS_POLLSET_UNSET_POLLED(PS) \ - erts_smp_atomic_set(&(PS)->polled, (long) 0) + erts_atomic32_set(&(PS)->polled, (erts_aint32_t) 0) #define ERTS_POLLSET_IS_POLLED(PS) \ - ((int) erts_smp_atomic_read(&(PS)->polled)) - -#define ERTS_POLLSET_SET_POLLER_WOKEN_CHK(PS) \ - ((int) erts_smp_atomic_xchg(&(PS)->woken, (long) 1)) -#define ERTS_POLLSET_SET_POLLER_WOKEN(PS) \ - erts_smp_atomic_set(&(PS)->woken, (long) 1) -#define ERTS_POLLSET_UNSET_POLLER_WOKEN(PS) \ - erts_smp_atomic_set(&(PS)->woken, (long) 0) -#define ERTS_POLLSET_IS_POLLER_WOKEN(PS) \ - ((int) erts_smp_atomic_read(&(PS)->woken)) + ((int) erts_atomic32_read(&(PS)->polled)) #else @@ -147,64 +138,21 @@ #define ERTS_POLLSET_UNSET_POLLED(PS) #define ERTS_POLLSET_IS_POLLED(PS) 0 -#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT - -/* - * Ideally, the ERTS_POLLSET_SET_POLLER_WOKEN_CHK(PS) operation would - * be atomic. This operation isn't, but we will do okay anyway. The - * "woken check" is only an optimization. The only requirement we have: - * If (PS)->woken is set to a value != 0 when interrupting, we have to - * write on the the wakeup pipe at least once. Multiple writes are okay. - */ -#define ERTS_POLLSET_SET_POLLER_WOKEN_CHK(PS) ((PS)->woken++) -#define ERTS_POLLSET_SET_POLLER_WOKEN(PS) ((PS)->woken = 1, (void) 0) -#define ERTS_POLLSET_UNSET_POLLER_WOKEN(PS) ((PS)->woken = 0, (void) 0) -#define ERTS_POLLSET_IS_POLLER_WOKEN(PS) ((PS)->woken) - -#else - -#define ERTS_POLLSET_SET_POLLER_WOKEN_CHK(PS) 1 -#define ERTS_POLLSET_SET_POLLER_WOKEN(PS) -#define ERTS_POLLSET_UNSET_POLLER_WOKEN(PS) -#define ERTS_POLLSET_IS_POLLER_WOKEN(PS) 1 - -#endif - #endif #if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE #define ERTS_POLLSET_SET_HAVE_UPDATE_REQUESTS(PS) \ - erts_smp_atomic_set(&(PS)->have_update_requests, (long) 1) + erts_smp_atomic32_set(&(PS)->have_update_requests, (erts_aint32_t) 1) #define ERTS_POLLSET_UNSET_HAVE_UPDATE_REQUESTS(PS) \ - erts_smp_atomic_set(&(PS)->have_update_requests, (long) 0) + erts_smp_atomic32_set(&(PS)->have_update_requests, (erts_aint32_t) 0) #define ERTS_POLLSET_HAVE_UPDATE_REQUESTS(PS) \ - ((int) erts_smp_atomic_read(&(PS)->have_update_requests)) + ((int) erts_smp_atomic32_read(&(PS)->have_update_requests)) #else #define ERTS_POLLSET_SET_HAVE_UPDATE_REQUESTS(PS) #define ERTS_POLLSET_UNSET_HAVE_UPDATE_REQUESTS(PS) #define ERTS_POLLSET_HAVE_UPDATE_REQUESTS(PS) 0 #endif -#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT && !defined(ERTS_SMP) - -#define ERTS_POLLSET_UNSET_INTERRUPTED_CHK(PS) unset_interrupted_chk((PS)) -#define ERTS_POLLSET_UNSET_INTERRUPTED(PS) ((PS)->interrupt = 0, (void) 0) -#define ERTS_POLLSET_SET_INTERRUPTED(PS) ((PS)->interrupt = 1, (void) 0) -#define ERTS_POLLSET_IS_INTERRUPTED(PS) ((PS)->interrupt) - -#else - -#define ERTS_POLLSET_UNSET_INTERRUPTED_CHK(PS) \ - ((int) erts_smp_atomic_xchg(&(PS)->interrupt, (long) 0)) -#define ERTS_POLLSET_UNSET_INTERRUPTED(PS) \ - erts_smp_atomic_set(&(PS)->interrupt, (long) 0) -#define ERTS_POLLSET_SET_INTERRUPTED(PS) \ - erts_smp_atomic_set(&(PS)->interrupt, (long) 1) -#define ERTS_POLLSET_IS_INTERRUPTED(PS) \ - ((int) erts_smp_atomic_read(&(PS)->interrupt)) - -#endif - #if ERTS_POLL_USE_FALLBACK # if ERTS_POLL_USE_POLL # define ERTS_POLL_NEED_FALLBACK(PS) ((PS)->no_poll_fds > 1) @@ -276,7 +224,7 @@ struct ErtsPollSet_ { ErtsPollSet next; int internal_fd_limit; ErtsFdStatus *fds_status; - int no_of_user_fds; + erts_smp_atomic_t no_of_user_fds; int fds_status_len; #if ERTS_POLL_USE_KERNEL_POLL int kp_fd; @@ -308,14 +256,12 @@ struct ErtsPollSet_ { #if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE ErtsPollSetUpdateRequestsBlock update_requests; ErtsPollSetUpdateRequestsBlock *curr_upd_req_block; - erts_smp_atomic_t have_update_requests; + erts_smp_atomic32_t have_update_requests; #endif #ifdef ERTS_SMP - erts_smp_atomic_t polled; - erts_smp_atomic_t woken; + erts_atomic32_t polled; erts_smp_mtx_t mtx; #elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT - volatile int woken; #endif #if ERTS_POLL_USE_WAKEUP_PIPE int wake_fds[2]; @@ -323,12 +269,12 @@ struct ErtsPollSet_ { #if ERTS_POLL_USE_FALLBACK int fallback_used; #endif -#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT && !defined(ERTS_SMP) - volatile int interrupt; -#else - erts_smp_atomic_t interrupt; +#ifdef ERTS_SMP + erts_atomic32_t wakeup_state; +#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT + volatile int wakeup_state; #endif - erts_smp_atomic_t timeout; + erts_smp_atomic32_t timeout; #ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS erts_smp_atomic_t no_avoided_wakeups; erts_smp_atomic_t no_avoided_interrupts; @@ -336,20 +282,6 @@ struct ErtsPollSet_ { #endif }; -#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT && !defined(ERTS_SMP) - -static ERTS_INLINE int -unset_interrupted_chk(ErtsPollSet ps) -{ - /* This operation isn't atomic, but we have no need at all for an - atomic operation here... */ - int res = ps->interrupt; - ps->interrupt = 0; - return res; -} - -#endif - void erts_silence_warn_unused_result(long unused); static void fatal_error(char *format, ...); static void fatal_error_async_signal_safe(char *error_str); @@ -406,6 +338,64 @@ static void check_poll_status(ErtsPollSet ps); static void print_misc_debug_info(void); #endif +#define ERTS_POLL_NOT_WOKEN 0 +#define ERTS_POLL_WOKEN -1 +#define ERTS_POLL_WOKEN_INTR 1 + +static ERTS_INLINE void +reset_wakeup_state(ErtsPollSet ps) +{ +#ifdef ERTS_SMP + erts_atomic32_set(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN); + ERTS_THR_MEMORY_BARRIER; +#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT + ps->wakeup_state = 0; +#endif +} + +static ERTS_INLINE int +is_woken(ErtsPollSet ps) +{ +#ifdef ERTS_SMP + return erts_atomic32_read_acqb(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN; +#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT + return ps->wakeup_state != ERTS_POLL_NOT_WOKEN; +#else + return 0; +#endif +} + +static ERTS_INLINE int +is_interrupted_reset(ErtsPollSet ps) +{ +#ifdef ERTS_SMP + return (erts_atomic32_xchg(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN) + == ERTS_POLL_WOKEN_INTR); +#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT + int res = ps->wakeup_state == ERTS_POLL_WOKEN_INTR; + ps->wakeup_state = ERTS_POLL_NOT_WOKEN; + return res; +#else + return 0; +#endif +} + +static ERTS_INLINE void +woke_up(ErtsPollSet ps) +{ +#ifdef ERTS_SMP + erts_aint32_t wakeup_state = erts_atomic32_read(&ps->wakeup_state); + if (wakeup_state == ERTS_POLL_NOT_WOKEN) + (void) erts_atomic32_cmpxchg(&ps->wakeup_state, + ERTS_POLL_WOKEN, + ERTS_POLL_NOT_WOKEN); + ASSERT(erts_atomic32_read(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN); +#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT + if (ps->wakeup_state == ERTS_POLL_NOT_WOKEN) + ps->wakeup_state = ERTS_POLL_WOKEN; +#endif +} + /* * --- Wakeup pipe ----------------------------------------------------------- */ @@ -413,14 +403,34 @@ static void print_misc_debug_info(void); #if ERTS_POLL_USE_WAKEUP_PIPE static ERTS_INLINE void -wake_poller(ErtsPollSet ps) +wake_poller(ErtsPollSet ps, int interrupted) { + int wake; +#ifdef ERTS_SMP + erts_aint32_t wakeup_state; + if (!interrupted) + wakeup_state = erts_atomic32_cmpxchg_relb(&ps->wakeup_state, + ERTS_POLL_WOKEN, + ERTS_POLL_NOT_WOKEN); + else { + /* + * We might unnecessarily write to the pipe, however, + * that isn't problematic. + */ + wakeup_state = erts_atomic32_read(&ps->wakeup_state); + erts_atomic32_set_relb(&ps->wakeup_state, ERTS_POLL_WOKEN_INTR); + } + wake = wakeup_state == ERTS_POLL_NOT_WOKEN; +#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT + wake = ps->wakeup_state == ERTS_POLL_NOT_WOKEN; + ps->wakeup_state = interrupted ? ERTS_POLL_WOKEN_INTR : ERTS_POLL_NOT_WOKEN; +#endif /* * NOTE: This function might be called from signal handlers in the * non-smp case; therefore, it has to be async-signal safe in * the non-smp case. */ - if (!ERTS_POLLSET_SET_POLLER_WOKEN_CHK(ps)) { + if (wake) { ssize_t res; if (ps->wake_fds[1] < 0) return; /* Not initialized yet */ @@ -756,7 +766,7 @@ write_batch_buf(ErtsPollSet ps, ErtsPollBatchBuf *bbp) short filter; int fd = (int) ebuf[i].ident; - switch ((int) ebuf[i].udata) { + switch ((int) (long) ebuf[i].udata) { /* * Since we use a lazy update approach EV_DELETE will @@ -795,7 +805,7 @@ write_batch_buf(ErtsPollSet ps, ErtsPollBatchBuf *bbp) if (fd == (int) ebuf[j].ident) { ebuf[j].udata = (void *) ERTS_POLL_KQ_OP_HANDLED; if (!(ebuf[j].flags & EV_ERROR)) { - switch ((int) ebuf[j].udata) { + switch ((int) (long) ebuf[j].udata) { case ERTS_POLL_KQ_OP_ADD2_W: filter = EVFILT_WRITE; goto rm_add_fb; @@ -813,7 +823,8 @@ write_batch_buf(ErtsPollSet ps, ErtsPollBatchBuf *bbp) } } /* The other add succeded... */ - filter = (((int) ebuf[i].udata == ERTS_POLL_KQ_OP_ADD2_W) + filter = ((((int) (long) ebuf[i].udata) + == ERTS_POLL_KQ_OP_ADD2_W) ? EVFILT_READ : EVFILT_WRITE); rm_add_fb: @@ -828,7 +839,7 @@ write_batch_buf(ErtsPollSet ps, ErtsPollBatchBuf *bbp) ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_USEFLBCK; ASSERT(ps->fds_status[fd].used_events); ps->fds_status[fd].used_events = 0; - ps->no_of_user_fds--; + erts_smp_atomic_dec(&ps->no_of_user_fds); update_fallback_pollset(ps, fd); ASSERT(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK); break; @@ -878,11 +889,11 @@ batch_update_pollset(ErtsPollSet ps, int fd, ErtsPollBatchBuf *bbp) events = ERTS_POLL_EV_E2N(ps->fds_status[fd].events); if (!events) { buf[buf_len].events = POLLREMOVE; - ps->no_of_user_fds--; + erts_smp_atomic_dec(&ps->no_of_user_fds); } else if (!ps->fds_status[fd].used_events) { buf[buf_len].events = events; - ps->no_of_user_fds++; + erts_smp_atomic_inc(&ps->no_of_user_fds); } else { if ((ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_RST) @@ -972,12 +983,12 @@ batch_update_pollset(ErtsPollSet ps, int fd, ErtsPollBatchBuf *bbp) } if (used_events) { if (!events) { - ps->no_of_user_fds--; + erts_smp_atomic_dec(&ps->no_of_user_fds); } } else { if (events) - ps->no_of_user_fds++; + erts_smp_atomic_inc(&ps->no_of_user_fds); } ASSERT((events & ~(ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) == 0); ASSERT((used_events & ~(ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) == 0); @@ -1051,7 +1062,7 @@ update_pollset(ErtsPollSet ps, int fd) epe.data.fd = epe_templ.data.fd; res = epoll_ctl(ps->kp_fd, EPOLL_CTL_DEL, fd, &epe); } while (res != 0 && errno == EINTR); - ps->no_of_user_fds--; + erts_smp_atomic_dec(&ps->no_of_user_fds); ps->fds_status[fd].used_events = 0; } @@ -1059,11 +1070,11 @@ update_pollset(ErtsPollSet ps, int fd) /* A note on EPOLL_CTL_DEL: linux kernel versions before 2.6.9 need a non-NULL event pointer even though it is ignored... */ op = EPOLL_CTL_DEL; - ps->no_of_user_fds--; + erts_smp_atomic_dec(&ps->no_of_user_fds); } else if (!ps->fds_status[fd].used_events) { op = EPOLL_CTL_ADD; - ps->no_of_user_fds++; + erts_smp_atomic_inc(&ps->no_of_user_fds); } else { op = EPOLL_CTL_MOD; @@ -1113,7 +1124,7 @@ update_pollset(ErtsPollSet ps, int fd) /* Fall through ... */ case EPOLL_CTL_ADD: { ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_USEFLBCK; - ps->no_of_user_fds--; + erts_smp_atomic_dec(&ps->no_of_user_fds); #if ERTS_POLL_USE_CONCURRENT_UPDATE if (!*update_fallback) { *update_fallback = 1; @@ -1201,7 +1212,7 @@ static int update_pollset(ErtsPollSet ps, int fd) #if ERTS_POLL_USE_FALLBACK ASSERT(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK); #endif - ps->no_of_user_fds--; + erts_smp_atomic_dec(&ps->no_of_user_fds); last_pix = --ps->no_poll_fds; if (pix != last_pix) { /* Move last pix to this pix */ @@ -1228,7 +1239,7 @@ static int update_pollset(ErtsPollSet ps, int fd) ASSERT(!(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK) || fd == ps->kp_fd); #endif - ps->no_of_user_fds++; + erts_smp_atomic_inc(&ps->no_of_user_fds); ps->fds_status[fd].pix = pix = ps->no_poll_fds++; if (pix >= ps->poll_fds_len) grow_poll_fds(ps, pix); @@ -1279,7 +1290,7 @@ static int update_pollset(ErtsPollSet ps, int fd) if (!ps->fds_status[fd].used_events) { ASSERT(events); - ps->no_of_user_fds++; + erts_smp_atomic_inc(&ps->no_of_user_fds); #if ERTS_POLL_USE_FALLBACK ps->no_select_fds++; ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_INFLBCK; @@ -1287,7 +1298,7 @@ static int update_pollset(ErtsPollSet ps, int fd) } else if (!events) { ASSERT(ps->fds_status[fd].used_events); - ps->no_of_user_fds--; + erts_smp_atomic_dec(&ps->no_of_user_fds); ps->fds_status[fd].events = events; #if ERTS_POLL_USE_FALLBACK ps->no_select_fds--; @@ -1363,9 +1374,7 @@ handle_update_requests(ErtsPollSet ps) #endif /* ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE */ static ERTS_INLINE ErtsPollEvents -poll_control(ErtsPollSet ps, int fd, ErtsPollEvents events, int on, - int *have_set_have_update_requests, - int *do_wake) +poll_control(ErtsPollSet ps, int fd, ErtsPollEvents events, int on, int *do_wake) { ErtsPollEvents new_events; @@ -1469,7 +1478,6 @@ ERTS_POLL_EXPORT(erts_poll_controlv)(ErtsPollSet ps, int len) { int i; - int hshur = 0; int do_wake; int final_do_wake = 0; @@ -1481,17 +1489,17 @@ ERTS_POLL_EXPORT(erts_poll_controlv)(ErtsPollSet ps, pcev[i].fd, pcev[i].events, pcev[i].on, - &hshur, &do_wake); final_do_wake |= do_wake; } + ERTS_POLLSET_UNLOCK(ps); + #ifdef ERTS_SMP if (final_do_wake) - wake_poller(ps); + wake_poller(ps, 0); #endif /* ERTS_SMP */ - ERTS_POLLSET_UNLOCK(ps); } ErtsPollEvents @@ -1502,20 +1510,20 @@ ERTS_POLL_EXPORT(erts_poll_control)(ErtsPollSet ps, int* do_wake) /* In: Wake up polling thread */ /* Out: Poller is woken */ { - int hshur = 0; ErtsPollEvents res; ERTS_POLLSET_LOCK(ps); - res = poll_control(ps, fd, events, on, &hshur, do_wake); + res = poll_control(ps, fd, events, on, do_wake); + + ERTS_POLLSET_UNLOCK(ps); #ifdef ERTS_SMP if (*do_wake) { - wake_poller(ps); + wake_poller(ps, 0); } #endif /* ERTS_SMP */ - ERTS_POLLSET_UNLOCK(ps); return res; } @@ -1888,14 +1896,17 @@ static ERTS_INLINE int check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked) { ASSERT(!*ps_locked); - if (ps->no_of_user_fds == 0 && tv->tv_usec == 0 && tv->tv_sec == 0) { + if (erts_smp_atomic_read(&ps->no_of_user_fds) == 0 + && tv->tv_usec == 0 && tv->tv_sec == 0) { /* Nothing to poll and zero timeout; done... */ return 0; } else { long timeout = tv->tv_sec*1000 + tv->tv_usec/1000; + if (timeout > ERTS_AINT32_T_MAX) + timeout = ERTS_AINT32_T_MAX; ASSERT(timeout >= 0); - erts_smp_atomic_set(&ps->timeout, timeout); + erts_smp_atomic32_set_relb(&ps->timeout, (erts_aint32_t) timeout); #if ERTS_POLL_USE_FALLBACK if (!(ps->fallback_used = ERTS_POLL_NEED_FALLBACK(ps))) { @@ -1926,7 +1937,7 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked) * the maximum number of file descriptors in the poll set. */ struct dvpoll poll_res; - int nfds = ps->no_of_user_fds; + int nfds = (int) erts_smp_atomic_read(&ps->no_of_user_fds); #ifdef ERTS_SMP nfds++; /* Wakeup pipe */ #endif @@ -2017,15 +2028,14 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps, (int) tv->tv_sec*1000 + tv->tv_usec/1000); #endif - ERTS_POLLSET_UNSET_POLLER_WOKEN(ps); if (ERTS_POLLSET_SET_POLLED_CHK(ps)) { res = EINVAL; /* Another thread is in erts_poll_wait() on this pollset... */ goto done; } - if (ERTS_POLLSET_IS_INTERRUPTED(ps)) { - /* Interrupt use zero timeout */ + if (is_woken(ps)) { + /* Use zero timeout */ itv.tv_sec = 0; itv.tv_usec = 0; tvp = &itv; @@ -2042,7 +2052,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps, ps_locked = 0; res = check_fd_events(ps, tvp, no_fds, &ps_locked); - ERTS_POLLSET_SET_POLLER_WOKEN(ps); + woke_up(ps); if (res == 0) { res = ETIMEDOUT; @@ -2074,9 +2084,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps, check_poll_result(pr, no_fds); #endif - res = (no_fds == 0 - ? (ERTS_POLLSET_UNSET_INTERRUPTED_CHK(ps) ? EINTR : EAGAIN) - : 0); + res = (no_fds == 0 ? (is_interrupted_reset(ps) ? EINTR : EAGAIN) : 0); *len = no_fds; } @@ -2087,7 +2095,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps, #endif done: - erts_smp_atomic_set(&ps->timeout, LONG_MAX); + erts_smp_atomic32_set_relb(&ps->timeout, ERTS_AINT32_T_MAX); #ifdef ERTS_POLL_DEBUG_PRINT erts_printf("Leaving %s = erts_poll_wait()\n", res == 0 ? "0" : erl_errno_id(res)); @@ -2103,20 +2111,17 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps, void ERTS_POLL_EXPORT(erts_poll_interrupt)(ErtsPollSet ps, int set) { +#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP) /* * NOTE: This function might be called from signal handlers in the * non-smp case; therefore, it has to be async-signal safe in * the non-smp case. */ - if (set) { - ERTS_POLLSET_SET_INTERRUPTED(ps); -#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP) - wake_poller(ps); + if (!set) + reset_wakeup_state(ps); + else + wake_poller(ps, 1); #endif - } - else { - ERTS_POLLSET_UNSET_INTERRUPTED(ps); - } } /* @@ -2125,15 +2130,16 @@ ERTS_POLL_EXPORT(erts_poll_interrupt)(ErtsPollSet ps, int set) * is not guaranteed that it will timeout before 'msec' milli seconds. */ void -ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet ps, int set, long msec) +ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet ps, + int set, + long msec) { - if (set) { - if (erts_smp_atomic_read(&ps->timeout) > msec) { - ERTS_POLLSET_SET_INTERRUPTED(ps); #if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP) - wake_poller(ps); -#endif - } + if (!set) + reset_wakeup_state(ps); + else { + if (erts_smp_atomic32_read_acqb(&ps->timeout) > (erts_aint32_t) msec) + wake_poller(ps, 1); #ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS else { if (ERTS_POLLSET_IS_POLLED(ps)) @@ -2143,9 +2149,7 @@ ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet ps, int set, long msec) erts_smp_atomic_inc(&ps->no_interrupt_timed); #endif } - else { - ERTS_POLLSET_UNSET_INTERRUPTED(ps); - } +#endif } int @@ -2204,7 +2208,7 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void) ps->internal_fd_limit = 0; ps->fds_status = NULL; ps->fds_status_len = 0; - ps->no_of_user_fds = 0; + erts_smp_atomic_init(&ps->no_of_user_fds, 0); #if ERTS_POLL_USE_KERNEL_POLL ps->kp_fd = -1; #if ERTS_POLL_USE_EPOLL @@ -2256,14 +2260,16 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void) ps->update_requests.next = NULL; ps->update_requests.len = 0; ps->curr_upd_req_block = &ps->update_requests; - erts_smp_atomic_init(&ps->have_update_requests, 0); + erts_smp_atomic32_init(&ps->have_update_requests, 0); #endif #ifdef ERTS_SMP - erts_smp_atomic_init(&ps->polled, 0); - erts_smp_atomic_init(&ps->woken, 0); + erts_atomic32_init(&ps->polled, 0); erts_smp_mtx_init(&ps->mtx, "pollset"); +#endif +#ifdef ERTS_SMP + erts_atomic32_init(&ps->wakeup_state, (erts_aint32_t) 0); #elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT - ps->woken = 0; + ps->wakeup_state = 0; #endif #if ERTS_POLL_USE_WAKEUP_PIPE create_wakeup_pipe(ps); @@ -2285,12 +2291,7 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void) ps->internal_fd_limit = kp_fd + 1; ps->kp_fd = kp_fd; #endif -#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT && !defined(ERTS_SMP) - ps->interrupt = 0; -#else - erts_smp_atomic_init(&ps->interrupt, 0); -#endif - erts_smp_atomic_init(&ps->timeout, LONG_MAX); + erts_smp_atomic32_init(&ps->timeout, ERTS_AINT32_T_MAX); #ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS erts_smp_atomic_init(&ps->no_avoided_wakeups, 0); erts_smp_atomic_init(&ps->no_avoided_interrupts, 0); @@ -2302,7 +2303,7 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void) #if ERTS_POLL_USE_FALLBACK ps->fallback_used = 0; #endif - ps->no_of_user_fds = 0; /* Don't count wakeup pipe and fallback fd */ + erts_smp_atomic_set(&ps->no_of_user_fds, 0); /* Don't count wakeup pipe and fallback fd */ erts_smp_spin_lock(&pollsets_lock); ps->next = pollsets; @@ -2405,6 +2406,7 @@ ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet ps, ErtsPollInfo *pip) while (urqbp) { size += sizeof(ErtsPollSetUpdateRequestsBlock); pending_updates += urqbp->len; + urqbp = urqbp->next; } } #endif @@ -2447,7 +2449,7 @@ ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet ps, ErtsPollInfo *pip) pip->memory_size = size; - pip->poll_set_size = ps->no_of_user_fds; + pip->poll_set_size = (int) erts_smp_atomic_read(&ps->no_of_user_fds); #ifdef ERTS_SMP pip->poll_set_size++; /* Wakeup pipe */ #endif diff --git a/erts/emulator/sys/common/erl_sys_common_misc.c b/erts/emulator/sys/common/erl_sys_common_misc.c new file mode 100644 index 0000000000..461e763f03 --- /dev/null +++ b/erts/emulator/sys/common/erl_sys_common_misc.c @@ -0,0 +1,107 @@ +/* + * %CopyrightBegin% + * + * Copyright Ericsson AB 2006-2010. All Rights Reserved. + * + * The contents of this file are subject to the Erlang Public License, + * Version 1.1, (the "License"); you may not use this file except in + * compliance with the License. You should have received a copy of the + * Erlang Public License along with this software. If not, it can be + * retrieved online at http://www.erlang.org/. + * + * Software distributed under the License is distributed on an "AS IS" + * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + * the License for the specific language governing rights and limitations + * under the License. + * + * %CopyrightEnd% + */ + + + +/* + * Darwin needs conversion! + * http://developer.apple.com/library/mac/#qa/qa2001/qa1235.html + */ + +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif + +#include "sys.h" +#include "global.h" + +#if defined(__APPLE__) && defined(__MACH__) && !defined(__DARWIN__) +#define __DARWIN__ 1 +#endif + +#if !defined(__WIN32__) +#include <locale.h> +#if !defined(HAVE_SETLOCALE) || !defined(HAVE_NL_LANGINFO) || !defined(HAVE_LANGINFO_H) +#define PRIMITIVE_UTF8_CHECK 1 +#else +#include <langinfo.h> +#endif +#endif + +/* Written once and only once */ + +static int filename_encoding = ERL_FILENAME_UNKNOWN; +#if defined(__WIN32__) || defined(__DARWIN__) +static int user_filename_encoding = ERL_FILENAME_UTF8; /* Default unicode on windows */ +#else +static int user_filename_encoding = ERL_FILENAME_LATIN1; +#endif +void erts_set_user_requested_filename_encoding(int encoding) +{ + user_filename_encoding = encoding; +} + +int erts_get_user_requested_filename_encoding(void) +{ + return user_filename_encoding; +} + +void erts_init_sys_common_misc(void) +{ +#if defined(__WIN32__) + /* win_efile will totally fail if this is not set. */ + filename_encoding = ERL_FILENAME_WIN_WCHAR; +#else + if (user_filename_encoding != ERL_FILENAME_UNKNOWN) { + filename_encoding = user_filename_encoding; + } else { + char *l; + filename_encoding = ERL_FILENAME_LATIN1; +# ifdef PRIMITIVE_UTF8_CHECK + setlocale(LC_CTYPE, ""); /* Set international environment, + ignore result */ + if (((l = getenv("LC_ALL")) && *l) || + ((l = getenv("LC_CTYPE")) && *l) || + ((l = getenv("LANG")) && *l)) { + if (strstr(l, "UTF-8")) { + filename_encoding = ERL_FILENAME_UTF8; + } + } + +# else + l = setlocale(LC_CTYPE, ""); /* Set international environment */ + if (l != NULL) { + if (strcmp(nl_langinfo(CODESET), "UTF-8") == 0) { + filename_encoding = ERL_FILENAME_UTF8; + } + } +# endif + } +# if defined(__DARWIN__) + if (filename_encoding == ERL_FILENAME_UTF8) { + filename_encoding = ERL_FILENAME_UTF8_MAC; + } +# endif +#endif +} + +int erts_get_native_filename_encoding(void) +{ + return filename_encoding; +} |