aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/sys
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/sys')
-rw-r--r--erts/emulator/sys/common/erl_mseg.c626
-rw-r--r--erts/emulator/sys/common/erl_poll.c284
-rw-r--r--erts/emulator/sys/unix/erl_unix_sys.h7
-rw-r--r--erts/emulator/sys/unix/sys.c290
-rw-r--r--erts/emulator/sys/unix/sys_float.c7
-rw-r--r--erts/emulator/sys/vxworks/sys.c2
-rw-r--r--erts/emulator/sys/win32/erl_poll.c382
-rw-r--r--erts/emulator/sys/win32/sys_interrupt.c8
8 files changed, 752 insertions, 854 deletions
diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c
index b1ee165489..8421eb415c 100644
--- a/erts/emulator/sys/common/erl_mseg.c
+++ b/erts/emulator/sys/common/erl_mseg.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -35,6 +35,7 @@
#include "global.h"
#include "erl_threads.h"
#include "erl_mtrace.h"
+#include "erl_time.h"
#include "big.h"
#if HAVE_ERTS_MSEG
@@ -76,8 +77,10 @@ static int atoms_initialized;
static Uint cache_check_interval;
+typedef struct mem_kind_t MemKind;
+
static void check_cache(void *unused);
-static void mseg_clear_cache(void);
+static void mseg_clear_cache(MemKind*);
static int is_cache_check_scheduled;
#ifdef ERTS_THREADS_NO_SMP
static int is_cache_check_requested;
@@ -159,14 +162,43 @@ static struct {
CallCounter check_cache;
} calls;
-static cache_desc_t cache_descs[MAX_CACHE_SIZE];
-static cache_desc_t *free_cache_descs;
-static cache_desc_t *cache;
-static cache_desc_t *cache_end;
-static Uint cache_hits;
-static Uint cache_size;
-static Uint min_cached_seg_size;
-static Uint max_cached_seg_size;
+struct mem_kind_t {
+ cache_desc_t cache_descs[MAX_CACHE_SIZE];
+ cache_desc_t *free_cache_descs;
+ cache_desc_t *cache;
+ cache_desc_t *cache_end;
+
+ Uint cache_size;
+ Uint min_cached_seg_size;
+ Uint max_cached_seg_size;
+ Uint cache_hits;
+
+ struct {
+ struct {
+ Uint watermark;
+ Uint no;
+ Uint sz;
+ } current;
+ struct {
+ Uint no;
+ Uint sz;
+ } max;
+ struct {
+ Uint no;
+ Uint sz;
+ } max_ever;
+ } segments;
+
+ const char* name;
+ MemKind* next;
+};/*MemKind*/
+
+#if HALFWORD_HEAP
+static MemKind low_mem, hi_mem;
+#else
+static MemKind the_mem;
+#endif
+static MemKind* mk_list = NULL;
static Uint max_cache_size;
static Uint abs_max_cache_bad_fit;
@@ -176,47 +208,32 @@ static Uint rel_max_cache_bad_fit;
static Uint min_seg_size;
#endif
-struct {
- struct {
- Uint watermark;
- Uint no;
- Uint sz;
- } current;
- struct {
- Uint no;
- Uint sz;
- } max;
- struct {
- Uint no;
- Uint sz;
- } max_ever;
-} segments;
-#define ERTS_MSEG_ALLOC_STAT(SZ) \
+#define ERTS_MSEG_ALLOC_STAT(C,SZ) \
do { \
- segments.current.no++; \
- if (segments.max.no < segments.current.no) \
- segments.max.no = segments.current.no; \
- if (segments.current.watermark < segments.current.no) \
- segments.current.watermark = segments.current.no; \
- segments.current.sz += (SZ); \
- if (segments.max.sz < segments.current.sz) \
- segments.max.sz = segments.current.sz; \
+ C->segments.current.no++; \
+ if (C->segments.max.no < C->segments.current.no) \
+ C->segments.max.no = C->segments.current.no; \
+ if (C->segments.current.watermark < C->segments.current.no) \
+ C->segments.current.watermark = C->segments.current.no; \
+ C->segments.current.sz += (SZ); \
+ if (C->segments.max.sz < C->segments.current.sz) \
+ C->segments.max.sz = C->segments.current.sz; \
} while (0)
-#define ERTS_MSEG_DEALLOC_STAT(SZ) \
+#define ERTS_MSEG_DEALLOC_STAT(C,SZ) \
do { \
- ASSERT(segments.current.no > 0); \
- segments.current.no--; \
- ASSERT(segments.current.sz >= (SZ)); \
- segments.current.sz -= (SZ); \
+ ASSERT(C->segments.current.no > 0); \
+ C->segments.current.no--; \
+ ASSERT(C->segments.current.sz >= (SZ)); \
+ C->segments.current.sz -= (SZ); \
} while (0)
-#define ERTS_MSEG_REALLOC_STAT(OSZ, NSZ) \
+#define ERTS_MSEG_REALLOC_STAT(C,OSZ, NSZ) \
do { \
- ASSERT(segments.current.sz >= (OSZ)); \
- segments.current.sz -= (OSZ); \
- segments.current.sz += (NSZ); \
+ ASSERT(C->segments.current.sz >= (OSZ)); \
+ C->segments.current.sz -= (OSZ); \
+ C->segments.current.sz += (NSZ); \
} while (0)
#define ONE_GIGA (1000000000)
@@ -271,7 +288,7 @@ schedule_cache_check(void)
#endif
{
cache_check_timer.active = 0;
- erl_set_timer(&cache_check_timer,
+ erts_set_timer(&cache_check_timer,
check_cache,
NULL,
NULL,
@@ -302,13 +319,16 @@ check_schedule_cache_check(void)
static void
mseg_shutdown(void)
{
+ MemKind* mk;
erts_mtx_lock(&mseg_mutex);
- mseg_clear_cache();
+ for (mk=mk_list; mk; mk=mk->next) {
+ mseg_clear_cache(mk);
+ }
erts_mtx_unlock(&mseg_mutex);
}
static ERTS_INLINE void *
-mseg_create(Uint size)
+mseg_create(MemKind* mk, Uint size)
{
void *seg;
@@ -318,19 +338,21 @@ mseg_create(Uint size)
seg = erts_sys_alloc(ERTS_ALC_N_INVALID, NULL, size);
#elif HAVE_MMAP
#if HALFWORD_HEAP
- seg = pmmap(size);
-#else
- seg = (void *) mmap((void *) 0, (size_t) size,
- MMAP_PROT, MMAP_FLAGS, MMAP_FD, 0);
- if (seg == (void *) MAP_FAILED)
- seg = NULL;
-#endif
-#if HALFWORD_HEAP
- if ((unsigned long) seg & CHECK_POINTER_MASK) {
- erts_fprintf(stderr,"Pointer mask failure (0x%08lx)\n",(unsigned long) seg);
- return NULL;
+ if (mk == &low_mem) {
+ seg = pmmap(size);
+ if ((unsigned long) seg & CHECK_POINTER_MASK) {
+ erts_fprintf(stderr,"Pointer mask failure (0x%08lx)\n",(unsigned long) seg);
+ return NULL;
+ }
}
+ else
#endif
+ {
+ seg = (void *) mmap((void *) 0, (size_t) size,
+ MMAP_PROT, MMAP_FLAGS, MMAP_FD, 0);
+ if (seg == (void *) MAP_FAILED)
+ seg = NULL;
+ }
#else
#error "Missing mseg_create() implementation"
#endif
@@ -341,20 +363,23 @@ mseg_create(Uint size)
}
static ERTS_INLINE void
-mseg_destroy(void *seg, Uint size)
+mseg_destroy(MemKind* mk, void *seg, Uint size)
{
#if defined(ERTS_MSEG_FAKE_SEGMENTS)
erts_sys_free(ERTS_ALC_N_INVALID, NULL, seg);
#elif HAVE_MMAP
+ int res;
-#ifdef DEBUG
- int res =
-#endif
#if HALFWORD_HEAP
- pmunmap((void *) seg, size);
-#else
- munmap((void *) seg, size);
+ if (mk == &low_mem) {
+ res = pmunmap((void *) seg, size);
+ }
+ else
#endif
+ {
+ res = munmap((void *) seg, size);
+ }
+
ASSERT(size % page_size == 0);
ASSERT(res == 0);
#else
@@ -368,7 +393,7 @@ mseg_destroy(void *seg, Uint size)
#if HAVE_MSEG_RECREATE
static ERTS_INLINE void *
-mseg_recreate(void *old_seg, Uint old_size, Uint new_size)
+mseg_recreate(MemKind* mk, void *old_seg, Uint old_size, Uint new_size)
{
void *new_seg;
@@ -379,25 +404,29 @@ mseg_recreate(void *old_seg, Uint old_size, Uint new_size)
new_seg = erts_sys_realloc(ERTS_ALC_N_INVALID, NULL, old_seg, new_size);
#elif HAVE_MREMAP
#if HALFWORD_HEAP
- new_seg = (void *) pmremap((void *) old_seg,
- (size_t) old_size,
- (size_t) new_size);
-#elif defined(__NetBSD__)
- new_seg = (void *) mremap((void *) old_seg,
- (size_t) old_size,
- NULL,
- (size_t) new_size,
- 0);
- if (new_seg == (void *) MAP_FAILED)
- new_seg = NULL;
-#else
- new_seg = (void *) mremap((void *) old_seg,
- (size_t) old_size,
- (size_t) new_size,
- MREMAP_MAYMOVE);
- if (new_seg == (void *) MAP_FAILED)
- new_seg = NULL;
+ if (mk == &low_mem) {
+ new_seg = (void *) pmremap((void *) old_seg,
+ (size_t) old_size,
+ (size_t) new_size);
+ }
+ else
#endif
+ {
+ #if defined(__NetBSD__)
+ new_seg = (void *) mremap((void *) old_seg,
+ (size_t) old_size,
+ NULL,
+ (size_t) new_size,
+ 0);
+ #else
+ new_seg = (void *) mremap((void *) old_seg,
+ (size_t) old_size,
+ (size_t) new_size,
+ MREMAP_MAYMOVE);
+ #endif
+ if (new_seg == (void *) MAP_FAILED)
+ new_seg = NULL;
+ }
#else
#error "Missing mseg_recreate() implementation"
#endif
@@ -411,134 +440,142 @@ mseg_recreate(void *old_seg, Uint old_size, Uint new_size)
static ERTS_INLINE cache_desc_t *
-alloc_cd(void)
+alloc_cd(MemKind* mk)
{
- cache_desc_t *cd = free_cache_descs;
+ cache_desc_t *cd = mk->free_cache_descs;
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
if (cd)
- free_cache_descs = cd->next;
+ mk->free_cache_descs = cd->next;
return cd;
}
static ERTS_INLINE void
-free_cd(cache_desc_t *cd)
+free_cd(MemKind* mk, cache_desc_t *cd)
{
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
- cd->next = free_cache_descs;
- free_cache_descs = cd;
+ cd->next = mk->free_cache_descs;
+ mk->free_cache_descs = cd;
}
static ERTS_INLINE void
-link_cd(cache_desc_t *cd)
+link_cd(MemKind* mk, cache_desc_t *cd)
{
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
- if (cache)
- cache->prev = cd;
- cd->next = cache;
+ if (mk->cache)
+ mk->cache->prev = cd;
+ cd->next = mk->cache;
cd->prev = NULL;
- cache = cd;
+ mk->cache = cd;
- if (!cache_end) {
+ if (!mk->cache_end) {
ASSERT(!cd->next);
- cache_end = cd;
+ mk->cache_end = cd;
}
- cache_size++;
+ mk->cache_size++;
}
+#if CAN_PARTLY_DESTROY
static ERTS_INLINE void
-end_link_cd(cache_desc_t *cd)
+end_link_cd(MemKind* mk, cache_desc_t *cd)
{
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
- if (cache_end)
- cache_end->next = cd;
+ if (mk->cache_end)
+ mk->cache_end->next = cd;
cd->next = NULL;
- cd->prev = cache_end;
- cache_end = cd;
+ cd->prev = mk->cache_end;
+ mk->cache_end = cd;
- if (!cache) {
+ if (!mk->cache) {
ASSERT(!cd->prev);
- cache = cd;
+ mk->cache = cd;
}
- cache_size++;
+ mk->cache_size++;
}
+#endif
static ERTS_INLINE void
-unlink_cd(cache_desc_t *cd)
+unlink_cd(MemKind* mk, cache_desc_t *cd)
{
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
if (cd->next)
cd->next->prev = cd->prev;
else
- cache_end = cd->prev;
+ mk->cache_end = cd->prev;
if (cd->prev)
cd->prev->next = cd->next;
else
- cache = cd->next;
- ASSERT(cache_size > 0);
- cache_size--;
+ mk->cache = cd->next;
+ ASSERT(mk->cache_size > 0);
+ mk->cache_size--;
}
static ERTS_INLINE void
-check_cache_limits(void)
+check_cache_limits(MemKind* mk)
{
cache_desc_t *cd;
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
- max_cached_seg_size = 0;
- min_cached_seg_size = ~((Uint) 0);
- for (cd = cache; cd; cd = cd->next) {
- if (cd->size < min_cached_seg_size)
- min_cached_seg_size = cd->size;
- if (cd->size > max_cached_seg_size)
- max_cached_seg_size = cd->size;
+ mk->max_cached_seg_size = 0;
+ mk->min_cached_seg_size = ~((Uint) 0);
+ for (cd = mk->cache; cd; cd = cd->next) {
+ if (cd->size < mk->min_cached_seg_size)
+ mk->min_cached_seg_size = cd->size;
+ if (cd->size > mk->max_cached_seg_size)
+ mk->max_cached_seg_size = cd->size;
}
-
}
static ERTS_INLINE void
-adjust_cache_size(int force_check_limits)
+adjust_cache_size(MemKind* mk, int force_check_limits)
{
cache_desc_t *cd;
int check_limits = force_check_limits;
- Sint max_cached = ((Sint) segments.current.watermark
- - (Sint) segments.current.no);
+ Sint max_cached = ((Sint) mk->segments.current.watermark
+ - (Sint) mk->segments.current.no);
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
- while (((Sint) cache_size) > max_cached && ((Sint) cache_size) > 0) {
- ASSERT(cache_end);
- cd = cache_end;
+ while (((Sint) mk->cache_size) > max_cached && ((Sint) mk->cache_size) > 0) {
+ ASSERT(mk->cache_end);
+ cd = mk->cache_end;
if (!check_limits &&
- !(min_cached_seg_size < cd->size
- && cd->size < max_cached_seg_size)) {
+ !(mk->min_cached_seg_size < cd->size
+ && cd->size < mk->max_cached_seg_size)) {
check_limits = 1;
}
if (erts_mtrace_enabled)
erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg);
- mseg_destroy(cd->seg, cd->size);
- unlink_cd(cd);
- free_cd(cd);
+ mseg_destroy(mk, cd->seg, cd->size);
+ unlink_cd(mk,cd);
+ free_cd(mk,cd);
}
if (check_limits)
- check_cache_limits();
-
+ check_cache_limits(mk);
}
static void
-check_cache(void *unused)
+check_one_cache(MemKind* mk)
{
+ if (mk->segments.current.watermark > mk->segments.current.no)
+ mk->segments.current.watermark--;
+ adjust_cache_size(mk, 0);
+
+ if (mk->cache_size)
+ schedule_cache_check();
+}
+
+static void check_cache(void* unused)
+{
+ MemKind* mk;
erts_mtx_lock(&mseg_mutex);
is_cache_check_scheduled = 0;
- if (segments.current.watermark > segments.current.no)
- segments.current.watermark--;
- adjust_cache_size(0);
-
- if (cache_size)
- schedule_cache_check();
+ for (mk=mk_list; mk; mk=mk->next) {
+ check_one_cache(mk);
+ }
INC_CC(check_cache);
@@ -546,28 +583,45 @@ check_cache(void *unused)
}
static void
-mseg_clear_cache(void)
+mseg_clear_cache(MemKind* mk)
{
- segments.current.watermark = 0;
+ mk->segments.current.watermark = 0;
- adjust_cache_size(1);
+ adjust_cache_size(mk, 1);
- ASSERT(!cache);
- ASSERT(!cache_end);
- ASSERT(!cache_size);
+ ASSERT(!mk->cache);
+ ASSERT(!mk->cache_end);
+ ASSERT(!mk->cache_size);
- segments.current.watermark = segments.current.no;
+ mk->segments.current.watermark = mk->segments.current.no;
INC_CC(clear_cache);
}
+static ERTS_INLINE MemKind* type2mk(ErtsAlcType_t atype)
+{
+#if HALFWORD_HEAP
+ switch (atype) {
+ case ERTS_ALC_A_ETS:
+ case ERTS_ALC_A_BINARY:
+ case ERTS_ALC_A_FIXED_SIZE:
+ case ERTS_ALC_A_DRIVER:
+ return &hi_mem;
+ default:
+ return &low_mem;
+ }
+#else
+ return &the_mem;
+#endif
+}
+
static void *
mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
{
-
Uint max, min, diff_size, size;
cache_desc_t *cd, *cand_cd;
void *seg;
+ MemKind* mk = type2mk(atype);
INC_CC(alloc);
@@ -580,11 +634,11 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
if (!opt->cache) {
create_seg:
- adjust_cache_size(0);
- seg = mseg_create(size);
+ adjust_cache_size(mk,0);
+ seg = mseg_create(mk, size);
if (!seg) {
- mseg_clear_cache();
- seg = mseg_create(size);
+ mseg_clear_cache(mk);
+ seg = mseg_create(mk, size);
if (!seg)
size = 0;
}
@@ -593,17 +647,17 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
if (seg) {
if (erts_mtrace_enabled)
erts_mtrace_crr_alloc(seg, atype, ERTS_MTRACE_SEGMENT_ID, size);
- ERTS_MSEG_ALLOC_STAT(size);
+ ERTS_MSEG_ALLOC_STAT(mk,size);
}
return seg;
}
- if (size > max_cached_seg_size)
+ if (size > mk->max_cached_seg_size)
goto create_seg;
- if (size < min_cached_seg_size) {
+ if (size < mk->min_cached_seg_size) {
- diff_size = min_cached_seg_size - size;
+ diff_size = mk->min_cached_seg_size - size;
if (diff_size > abs_max_cache_bad_fit)
goto create_seg;
@@ -617,7 +671,7 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
min = ~((Uint) 0);
cand_cd = NULL;
- for (cd = cache; cd; cd = cd->next) {
+ for (cd = mk->cache; cd; cd = cd->next) {
if (cd->size >= size) {
if (!cand_cd) {
cand_cd = cd;
@@ -638,8 +692,8 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
min = cd->size;
}
- min_cached_seg_size = min;
- max_cached_seg_size = max;
+ mk->min_cached_seg_size = min;
+ mk->max_cached_seg_size = max;
if (!cand_cd)
goto create_seg;
@@ -648,20 +702,20 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
if (diff_size > abs_max_cache_bad_fit
|| 100*PAGES(diff_size) > rel_max_cache_bad_fit*PAGES(size)) {
- if (max_cached_seg_size < cand_cd->size)
- max_cached_seg_size = cand_cd->size;
- if (min_cached_seg_size > cand_cd->size)
- min_cached_seg_size = cand_cd->size;
+ if (mk->max_cached_seg_size < cand_cd->size)
+ mk->max_cached_seg_size = cand_cd->size;
+ if (mk->min_cached_seg_size > cand_cd->size)
+ mk->min_cached_seg_size = cand_cd->size;
goto create_seg;
}
- cache_hits++;
+ mk->cache_hits++;
size = cand_cd->size;
seg = cand_cd->seg;
- unlink_cd(cand_cd);
- free_cd(cand_cd);
+ unlink_cd(mk,cand_cd);
+ free_cd(mk,cand_cd);
*size_p = size;
@@ -671,7 +725,7 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
}
if (seg)
- ERTS_MSEG_ALLOC_STAT(size);
+ ERTS_MSEG_ALLOC_STAT(mk,size);
return seg;
}
@@ -680,41 +734,42 @@ static void
mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size,
const ErtsMsegOpt_t *opt)
{
+ MemKind* mk = type2mk(atype);
cache_desc_t *cd;
- ERTS_MSEG_DEALLOC_STAT(size);
+ ERTS_MSEG_DEALLOC_STAT(mk,size);
if (!opt->cache || max_cache_size == 0) {
if (erts_mtrace_enabled)
erts_mtrace_crr_free(atype, SEGTYPE, seg);
- mseg_destroy(seg, size);
+ mseg_destroy(mk, seg, size);
}
else {
int check_limits = 0;
- if (size < min_cached_seg_size)
- min_cached_seg_size = size;
- if (size > max_cached_seg_size)
- max_cached_seg_size = size;
-
- if (!free_cache_descs) {
- cd = cache_end;
- if (!(min_cached_seg_size < cd->size
- && cd->size < max_cached_seg_size)) {
+ if (size < mk->min_cached_seg_size)
+ mk->min_cached_seg_size = size;
+ if (size > mk->max_cached_seg_size)
+ mk->max_cached_seg_size = size;
+
+ if (!mk->free_cache_descs) {
+ cd = mk->cache_end;
+ if (!(mk->min_cached_seg_size < cd->size
+ && cd->size < mk->max_cached_seg_size)) {
check_limits = 1;
}
if (erts_mtrace_enabled)
erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg);
- mseg_destroy(cd->seg, cd->size);
- unlink_cd(cd);
- free_cd(cd);
+ mseg_destroy(mk, cd->seg, cd->size);
+ unlink_cd(mk,cd);
+ free_cd(mk,cd);
}
- cd = alloc_cd();
+ cd = alloc_cd(mk);
ASSERT(cd);
cd->seg = seg;
cd->size = size;
- link_cd(cd);
+ link_cd(mk,cd);
if (erts_mtrace_enabled) {
erts_mtrace_crr_free(atype, SEGTYPE, seg);
@@ -724,7 +779,7 @@ mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size,
/* ASSERT(segments.current.watermark >= segments.current.no + cache_size); */
if (check_limits)
- check_cache_limits();
+ check_cache_limits(mk);
schedule_cache_check();
@@ -737,6 +792,7 @@ static void *
mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
const ErtsMsegOpt_t *opt)
{
+ MemKind* mk = type2mk(atype);
void *new_seg;
Uint new_size;
@@ -774,15 +830,15 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
#if CAN_PARTLY_DESTROY
if (shrink_sz > min_seg_size
- && free_cache_descs
+ && mk->free_cache_descs
&& opt->cache) {
cache_desc_t *cd;
- cd = alloc_cd();
+ cd = alloc_cd(mk);
ASSERT(cd);
cd->seg = ((char *) seg) + new_size;
cd->size = shrink_sz;
- end_link_cd(cd);
+ end_link_cd(mk,cd);
if (erts_mtrace_enabled) {
erts_mtrace_crr_realloc(new_seg,
@@ -801,7 +857,7 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
SEGTYPE,
seg,
new_size);
- mseg_destroy(((char *) seg) + new_size, shrink_sz);
+ mseg_destroy(mk, ((char *) seg) + new_size, shrink_sz);
}
#elif HAVE_MSEG_RECREATE
@@ -835,7 +891,7 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
#if !CAN_PARTLY_DESTROY
do_recreate:
#endif
- new_seg = mseg_recreate((void *) seg, old_size, new_size);
+ new_seg = mseg_recreate(mk, (void *) seg, old_size, new_size);
if (erts_mtrace_enabled)
erts_mtrace_crr_realloc(new_seg, atype, SEGTYPE, seg, new_size);
if (!new_seg)
@@ -858,7 +914,7 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
*new_size_p = new_size;
- ERTS_MSEG_REALLOC_STAT(old_size, new_size);
+ ERTS_MSEG_REALLOC_STAT(mk, old_size, new_size);
return new_seg;
}
@@ -874,6 +930,8 @@ static struct {
Eterm mcs;
Eterm cci;
+ Eterm memkind;
+ Eterm name;
Eterm status;
Eterm cached_segments;
Eterm cache_hits;
@@ -923,6 +981,8 @@ init_atoms(void)
#endif
AM_INIT(version);
+ AM_INIT(memkind);
+ AM_INIT(name);
AM_INIT(options);
AM_INIT(amcbf);
@@ -1133,65 +1193,88 @@ info_calls(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
}
static Eterm
-info_status(int *print_to_p,
- void *print_to_arg,
- int begin_new_max_period,
- Uint **hpp,
- Uint *szp)
+info_status(MemKind* mk, int *print_to_p, void *print_to_arg,
+ int begin_new_max_period, Uint **hpp, Uint *szp)
{
Eterm res = THE_NON_VALUE;
- if (segments.max_ever.no < segments.max.no)
- segments.max_ever.no = segments.max.no;
- if (segments.max_ever.sz < segments.max.sz)
- segments.max_ever.sz = segments.max.sz;
+ if (mk->segments.max_ever.no < mk->segments.max.no)
+ mk->segments.max_ever.no = mk->segments.max.no;
+ if (mk->segments.max_ever.sz < mk->segments.max.sz)
+ mk->segments.max_ever.sz = mk->segments.max.sz;
if (print_to_p) {
int to = *print_to_p;
void *arg = print_to_arg;
- erts_print(to, arg, "cached_segments: %bpu\n", cache_size);
- erts_print(to, arg, "cache_hits: %bpu\n", cache_hits);
+ erts_print(to, arg, "cached_segments: %bpu\n", mk->cache_size);
+ erts_print(to, arg, "cache_hits: %bpu\n", mk->cache_hits);
erts_print(to, arg, "segments: %bpu %bpu %bpu\n",
- segments.current.no, segments.max.no, segments.max_ever.no);
+ mk->segments.current.no, mk->segments.max.no, mk->segments.max_ever.no);
erts_print(to, arg, "segments_size: %bpu %bpu %bpu\n",
- segments.current.sz, segments.max.sz, segments.max_ever.sz);
+ mk->segments.current.sz, mk->segments.max.sz, mk->segments.max_ever.sz);
erts_print(to, arg, "segments_watermark: %bpu\n",
- segments.current.watermark);
+ mk->segments.current.watermark);
}
if (hpp || szp) {
res = NIL;
add_2tup(hpp, szp, &res,
am.segments_watermark,
- bld_unstable_uint(hpp, szp, segments.current.watermark));
+ bld_unstable_uint(hpp, szp, mk->segments.current.watermark));
add_4tup(hpp, szp, &res,
am.segments_size,
- bld_unstable_uint(hpp, szp, segments.current.sz),
- bld_unstable_uint(hpp, szp, segments.max.sz),
- bld_unstable_uint(hpp, szp, segments.max_ever.sz));
+ bld_unstable_uint(hpp, szp, mk->segments.current.sz),
+ bld_unstable_uint(hpp, szp, mk->segments.max.sz),
+ bld_unstable_uint(hpp, szp, mk->segments.max_ever.sz));
add_4tup(hpp, szp, &res,
am.segments,
- bld_unstable_uint(hpp, szp, segments.current.no),
- bld_unstable_uint(hpp, szp, segments.max.no),
- bld_unstable_uint(hpp, szp, segments.max_ever.no));
+ bld_unstable_uint(hpp, szp, mk->segments.current.no),
+ bld_unstable_uint(hpp, szp, mk->segments.max.no),
+ bld_unstable_uint(hpp, szp, mk->segments.max_ever.no));
add_2tup(hpp, szp, &res,
am.cache_hits,
- bld_unstable_uint(hpp, szp, cache_hits));
+ bld_unstable_uint(hpp, szp, mk->cache_hits));
add_2tup(hpp, szp, &res,
am.cached_segments,
- bld_unstable_uint(hpp, szp, cache_size));
+ bld_unstable_uint(hpp, szp, mk->cache_size));
}
if (begin_new_max_period) {
- segments.max.no = segments.current.no;
- segments.max.sz = segments.current.sz;
+ mk->segments.max.no = mk->segments.current.no;
+ mk->segments.max.sz = mk->segments.current.sz;
}
return res;
}
+static Eterm info_memkind(MemKind* mk, int *print_to_p, void *print_to_arg,
+ int begin_max_per, Uint **hpp, Uint *szp)
+{
+ Eterm res = THE_NON_VALUE;
+ Eterm atoms[3];
+ Eterm values[3];
+
+ if (print_to_p) {
+ erts_print(*print_to_p, print_to_arg, "memory kind: %s\n", mk->name);
+ }
+ if (hpp || szp) {
+ atoms[0] = am.name;
+ atoms[1] = am.status;
+ atoms[2] = am.calls;
+ values[0] = erts_bld_string(hpp, szp, mk->name);
+ }
+ values[1] = info_status(mk, print_to_p, print_to_arg, begin_max_per, hpp, szp);
+ values[2] = info_calls(print_to_p, print_to_arg, hpp, szp);
+
+ if (hpp || szp)
+ res = bld_2tup_list(hpp, szp, 3, atoms, values);
+
+ return res;
+}
+
+
static Eterm
info_version(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
{
@@ -1238,6 +1321,7 @@ erts_mseg_info(int *print_to_p,
Eterm res = THE_NON_VALUE;
Eterm atoms[4];
Eterm values[4];
+ Uint n = 0;
erts_mtx_lock(&mseg_mutex);
@@ -1248,17 +1332,19 @@ erts_mseg_info(int *print_to_p,
atoms[0] = am.version;
atoms[1] = am.options;
- atoms[2] = am.status;
- atoms[3] = am.calls;
+ atoms[2] = am.memkind;
+ atoms[3] = am.memkind;
}
-
- values[0] = info_version(print_to_p, print_to_arg, hpp, szp);
- values[1] = info_options("option ", print_to_p, print_to_arg, hpp, szp);
- values[2] = info_status(print_to_p, print_to_arg, begin_max_per, hpp, szp);
- values[3] = info_calls(print_to_p, print_to_arg, hpp, szp);
-
+ values[n++] = info_version(print_to_p, print_to_arg, hpp, szp);
+ values[n++] = info_options("option ", print_to_p, print_to_arg, hpp, szp);
+#if HALFWORD_HEAP
+ values[n++] = info_memkind(&low_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
+ values[n++] = info_memkind(&hi_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
+#else
+ values[n++] = info_memkind(&the_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
+#endif
if (hpp || szp)
- res = bld_2tup_list(hpp, szp, 4, atoms, values);
+ res = bld_2tup_list(hpp, szp, n, atoms, values);
erts_mtx_unlock(&mseg_mutex);
@@ -1317,17 +1403,23 @@ erts_mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size,
void
erts_mseg_clear_cache(void)
{
+ MemKind* mk;
erts_mtx_lock(&mseg_mutex);
- mseg_clear_cache();
+ for (mk=mk_list; mk; mk=mk->next) {
+ mseg_clear_cache(mk);
+ }
erts_mtx_unlock(&mseg_mutex);
}
Uint
erts_mseg_no(void)
{
- Uint n;
+ MemKind* mk;
+ Uint n = 0;
erts_mtx_lock(&mseg_mutex);
- n = segments.current.no;
+ for (mk=mk_list; mk; mk=mk->next) {
+ n += mk->segments.current.no;
+ }
erts_mtx_unlock(&mseg_mutex);
return n;
}
@@ -1338,11 +1430,43 @@ erts_mseg_unit_size(void)
return page_size;
}
-void
-erts_mseg_init(ErtsMsegInit_t *init)
+static void mem_kind_init(MemKind* mk, const char* name)
{
unsigned i;
+ mk->cache = NULL;
+ mk->cache_end = NULL;
+ mk->max_cached_seg_size = 0;
+ mk->min_cached_seg_size = ~((Uint) 0);
+ mk->cache_size = 0;
+ mk->cache_hits = 0;
+
+ if (max_cache_size > 0) {
+ for (i = 0; i < max_cache_size - 1; i++)
+ mk->cache_descs[i].next = &mk->cache_descs[i + 1];
+ mk->cache_descs[max_cache_size - 1].next = NULL;
+ mk->free_cache_descs = &mk->cache_descs[0];
+ }
+ else
+ mk->free_cache_descs = NULL;
+
+ mk->segments.current.watermark = 0;
+ mk->segments.current.no = 0;
+ mk->segments.current.sz = 0;
+ mk->segments.max.no = 0;
+ mk->segments.max.sz = 0;
+ mk->segments.max_ever.no = 0;
+ mk->segments.max_ever.sz = 0;
+
+ mk->name = name;
+ mk->next = mk_list;
+ mk_list = mk;
+}
+
+
+void
+erts_mseg_init(ErtsMsegInit_t *init)
+{
atoms_initialized = 0;
is_init_done = 0;
@@ -1385,40 +1509,33 @@ erts_mseg_init(ErtsMsegInit_t *init)
min_seg_size = ~((Uint) 0);
#endif
- cache = NULL;
- cache_end = NULL;
- cache_hits = 0;
- max_cached_seg_size = 0;
- min_cached_seg_size = ~((Uint) 0);
- cache_size = 0;
+ if (max_cache_size > MAX_CACHE_SIZE)
+ max_cache_size = MAX_CACHE_SIZE;
+
+#if HALFWORD_HEAP
+ mem_kind_init(&low_mem, "low memory");
+ mem_kind_init(&hi_mem, "high memory");
+#else
+ mem_kind_init(&the_mem, "all memory");
+#endif
is_cache_check_scheduled = 0;
#ifdef ERTS_THREADS_NO_SMP
is_cache_check_requested = 0;
#endif
+}
- if (max_cache_size > MAX_CACHE_SIZE)
- max_cache_size = MAX_CACHE_SIZE;
- if (max_cache_size > 0) {
- for (i = 0; i < max_cache_size - 1; i++)
- cache_descs[i].next = &cache_descs[i + 1];
- cache_descs[max_cache_size - 1].next = NULL;
- free_cache_descs = &cache_descs[0];
+static ERTS_INLINE Uint tot_cache_size(void)
+{
+ MemKind* mk;
+ Uint sz = 0;
+ for (mk=mk_list; mk; mk=mk->next) {
+ sz += mk->cache_size;
}
- else
- free_cache_descs = NULL;
-
- segments.current.watermark = 0;
- segments.current.no = 0;
- segments.current.sz = 0;
- segments.max.no = 0;
- segments.max.sz = 0;
- segments.max_ever.no = 0;
- segments.max_ever.sz = 0;
+ return sz;
}
-
/*
* erts_mseg_late_init() have to be called after all allocators,
* threads and timers have been initialized.
@@ -1436,7 +1553,7 @@ erts_mseg_late_init(void)
#ifdef ERTS_THREADS_NO_SMP
async_handle = handle;
#endif
- if (cache_size)
+ if (tot_cache_size())
schedule_cache_check();
erts_mtx_unlock(&mseg_mutex);
}
@@ -1477,7 +1594,7 @@ erts_mseg_test(unsigned long op,
case 0x406: {
unsigned long res;
erts_mtx_lock(&mseg_mutex);
- res = (unsigned long) cache_size;
+ res = (unsigned long) tot_cache_size();
erts_mtx_unlock(&mseg_mutex);
return res;
}
@@ -1756,6 +1873,7 @@ static int pmunmap(void *p, size_t size)
FreeBlock *last;
FreeBlock *nb = (FreeBlock *) p;
+ ASSERT(((unsigned long)p & CHECK_POINTER_MASK)==0);
if (real_size > pagsz) {
if (do_unmap(((char *) p) + pagsz,real_size - pagsz)) {
return 1;
diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c
index 4d0ca97889..77ac2de5f6 100644
--- a/erts/emulator/sys/common/erl_poll.c
+++ b/erts/emulator/sys/common/erl_poll.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -124,25 +124,11 @@
erts_smp_mtx_unlock(&(PS)->mtx)
#define ERTS_POLLSET_SET_POLLED_CHK(PS) \
- ((int) erts_smp_atomic_xchg(&(PS)->polled, (erts_aint_t) 1))
+ ((int) erts_atomic32_xchg(&(PS)->polled, (erts_aint32_t) 1))
#define ERTS_POLLSET_UNSET_POLLED(PS) \
- erts_smp_atomic_set(&(PS)->polled, (erts_aint_t) 0)
+ erts_atomic32_set(&(PS)->polled, (erts_aint32_t) 0)
#define ERTS_POLLSET_IS_POLLED(PS) \
- ((int) erts_smp_atomic_read(&(PS)->polled))
-
-#define ERTS_POLLSET_SET_POLLER_WOKEN_CHK(PS) set_poller_woken_chk((PS))
-#define ERTS_POLLSET_SET_POLLER_WOKEN(PS) \
-do { \
- ERTS_THR_MEMORY_BARRIER; \
- erts_smp_atomic_set(&(PS)->woken, (erts_aint_t) 1); \
-} while (0)
-#define ERTS_POLLSET_UNSET_POLLER_WOKEN(PS) \
-do { \
- erts_smp_atomic_set(&(PS)->woken, (erts_aint_t) 0); \
- ERTS_THR_MEMORY_BARRIER; \
-} while (0)
-#define ERTS_POLLSET_IS_POLLER_WOKEN(PS) \
- ((int) erts_smp_atomic_read(&(PS)->woken))
+ ((int) erts_atomic32_read(&(PS)->polled))
#else
@@ -152,69 +138,21 @@ do { \
#define ERTS_POLLSET_UNSET_POLLED(PS)
#define ERTS_POLLSET_IS_POLLED(PS) 0
-#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
-
-/*
- * Ideally, the ERTS_POLLSET_SET_POLLER_WOKEN_CHK(PS) operation would
- * be atomic. This operation isn't, but we will do okay anyway. The
- * "woken check" is only an optimization. The only requirement we have:
- * If (PS)->woken is set to a value != 0 when interrupting, we have to
- * write on the the wakeup pipe at least once. Multiple writes are okay.
- */
-#define ERTS_POLLSET_SET_POLLER_WOKEN_CHK(PS) ((PS)->woken++)
-#define ERTS_POLLSET_SET_POLLER_WOKEN(PS) ((PS)->woken = 1, (void) 0)
-#define ERTS_POLLSET_UNSET_POLLER_WOKEN(PS) ((PS)->woken = 0, (void) 0)
-#define ERTS_POLLSET_IS_POLLER_WOKEN(PS) ((PS)->woken)
-
-#else
-
-#define ERTS_POLLSET_SET_POLLER_WOKEN_CHK(PS) 1
-#define ERTS_POLLSET_SET_POLLER_WOKEN(PS)
-#define ERTS_POLLSET_UNSET_POLLER_WOKEN(PS)
-#define ERTS_POLLSET_IS_POLLER_WOKEN(PS) 1
-
-#endif
-
#endif
#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
#define ERTS_POLLSET_SET_HAVE_UPDATE_REQUESTS(PS) \
- erts_smp_atomic_set(&(PS)->have_update_requests, (erts_aint_t) 1)
+ erts_smp_atomic32_set(&(PS)->have_update_requests, (erts_aint32_t) 1)
#define ERTS_POLLSET_UNSET_HAVE_UPDATE_REQUESTS(PS) \
- erts_smp_atomic_set(&(PS)->have_update_requests, (erts_aint_t) 0)
+ erts_smp_atomic32_set(&(PS)->have_update_requests, (erts_aint32_t) 0)
#define ERTS_POLLSET_HAVE_UPDATE_REQUESTS(PS) \
- ((int) erts_smp_atomic_read(&(PS)->have_update_requests))
+ ((int) erts_smp_atomic32_read(&(PS)->have_update_requests))
#else
#define ERTS_POLLSET_SET_HAVE_UPDATE_REQUESTS(PS)
#define ERTS_POLLSET_UNSET_HAVE_UPDATE_REQUESTS(PS)
#define ERTS_POLLSET_HAVE_UPDATE_REQUESTS(PS) 0
#endif
-#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT && !defined(ERTS_SMP)
-
-#define ERTS_POLLSET_UNSET_INTERRUPTED_CHK(PS) unset_interrupted_chk((PS))
-#define ERTS_POLLSET_UNSET_INTERRUPTED(PS) ((PS)->interrupt = 0, (void) 0)
-#define ERTS_POLLSET_SET_INTERRUPTED(PS) ((PS)->interrupt = 1, (void) 0)
-#define ERTS_POLLSET_IS_INTERRUPTED(PS) ((PS)->interrupt)
-
-#else
-
-#define ERTS_POLLSET_UNSET_INTERRUPTED_CHK(PS) unset_interrupted_chk((PS))
-#define ERTS_POLLSET_UNSET_INTERRUPTED(PS) \
-do { \
- erts_smp_atomic_set(&(PS)->interrupt, (erts_aint_t) 0); \
- ERTS_THR_MEMORY_BARRIER; \
-} while (0)
-#define ERTS_POLLSET_SET_INTERRUPTED(PS) \
-do { \
- ERTS_THR_MEMORY_BARRIER; \
- erts_smp_atomic_set(&(PS)->interrupt, (erts_aint_t) 1); \
-} while (0)
-#define ERTS_POLLSET_IS_INTERRUPTED(PS) \
- ((int) erts_smp_atomic_read(&(PS)->interrupt))
-
-#endif
-
#if ERTS_POLL_USE_FALLBACK
# if ERTS_POLL_USE_POLL
# define ERTS_POLL_NEED_FALLBACK(PS) ((PS)->no_poll_fds > 1)
@@ -318,14 +256,12 @@ struct ErtsPollSet_ {
#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
ErtsPollSetUpdateRequestsBlock update_requests;
ErtsPollSetUpdateRequestsBlock *curr_upd_req_block;
- erts_smp_atomic_t have_update_requests;
+ erts_smp_atomic32_t have_update_requests;
#endif
#ifdef ERTS_SMP
- erts_smp_atomic_t polled;
- erts_smp_atomic_t woken;
+ erts_atomic32_t polled;
erts_smp_mtx_t mtx;
#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- volatile int woken;
#endif
#if ERTS_POLL_USE_WAKEUP_PIPE
int wake_fds[2];
@@ -333,12 +269,12 @@ struct ErtsPollSet_ {
#if ERTS_POLL_USE_FALLBACK
int fallback_used;
#endif
-#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT && !defined(ERTS_SMP)
- volatile int interrupt;
-#else
- erts_smp_atomic_t interrupt;
+#ifdef ERTS_SMP
+ erts_atomic32_t wakeup_state;
+#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ volatile int wakeup_state;
#endif
- erts_smp_atomic_t timeout;
+ erts_smp_atomic32_t timeout;
#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
erts_smp_atomic_t no_avoided_wakeups;
erts_smp_atomic_t no_avoided_interrupts;
@@ -346,34 +282,6 @@ struct ErtsPollSet_ {
#endif
};
-static ERTS_INLINE int
-unset_interrupted_chk(ErtsPollSet ps)
-{
- int res;
-#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT && !defined(ERTS_SMP)
- /* This operation isn't atomic, but we have no need at all for an
- atomic operation here... */
- res = ps->interrupt;
- ps->interrupt = 0;
-#else
- res = (int) erts_smp_atomic_xchg(&ps->interrupt, (erts_aint_t) 0);
- ERTS_THR_MEMORY_BARRIER;
-#endif
- return res;
-
-}
-
-#ifdef ERTS_SMP
-
-static ERTS_INLINE int
-set_poller_woken_chk(ErtsPollSet ps)
-{
- ERTS_THR_MEMORY_BARRIER;
- return (int) erts_smp_atomic_xchg(&ps->woken, (erts_aint_t) 1);
-}
-
-#endif
-
void erts_silence_warn_unused_result(long unused);
static void fatal_error(char *format, ...);
static void fatal_error_async_signal_safe(char *error_str);
@@ -430,6 +338,63 @@ static void check_poll_status(ErtsPollSet ps);
static void print_misc_debug_info(void);
#endif
+#define ERTS_POLL_NOT_WOKEN 0
+#define ERTS_POLL_WOKEN -1
+#define ERTS_POLL_WOKEN_INTR 1
+
+static ERTS_INLINE void
+reset_wakeup_state(ErtsPollSet ps)
+{
+#ifdef ERTS_SMP
+ erts_atomic32_set(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
+#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ ps->wakeup_state = 0;
+#endif
+}
+
+static ERTS_INLINE int
+is_woken(ErtsPollSet ps)
+{
+#ifdef ERTS_SMP
+ return erts_atomic32_read_acqb(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN;
+#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ return ps->wakeup_state != ERTS_POLL_NOT_WOKEN;
+#else
+ return 0;
+#endif
+}
+
+static ERTS_INLINE int
+is_interrupted_reset(ErtsPollSet ps)
+{
+#ifdef ERTS_SMP
+ return (erts_atomic32_xchg(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN)
+ == ERTS_POLL_WOKEN_INTR);
+#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ int res = ps->wakeup_state == ERTS_POLL_WOKEN_INTR;
+ ps->wakeup_state = ERTS_POLL_NOT_WOKEN;
+ return res;
+#else
+ return 0;
+#endif
+}
+
+static ERTS_INLINE void
+woke_up(ErtsPollSet ps)
+{
+#ifdef ERTS_SMP
+ erts_aint32_t wakeup_state = erts_atomic32_read(&ps->wakeup_state);
+ if (wakeup_state == ERTS_POLL_NOT_WOKEN)
+ (void) erts_atomic32_cmpxchg(&ps->wakeup_state,
+ ERTS_POLL_WOKEN,
+ ERTS_POLL_NOT_WOKEN);
+ ASSERT(erts_atomic32_read(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN);
+#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ if (ps->wakeup_state == ERTS_POLL_NOT_WOKEN)
+ ps->wakeup_state = ERTS_POLL_WOKEN;
+#endif
+}
+
/*
* --- Wakeup pipe -----------------------------------------------------------
*/
@@ -437,14 +402,34 @@ static void print_misc_debug_info(void);
#if ERTS_POLL_USE_WAKEUP_PIPE
static ERTS_INLINE void
-wake_poller(ErtsPollSet ps)
+wake_poller(ErtsPollSet ps, int interrupted)
{
+ int wake;
+#ifdef ERTS_SMP
+ erts_aint32_t wakeup_state;
+ if (!interrupted)
+ wakeup_state = erts_atomic32_cmpxchg_relb(&ps->wakeup_state,
+ ERTS_POLL_WOKEN,
+ ERTS_POLL_NOT_WOKEN);
+ else {
+ /*
+ * We might unnecessarily write to the pipe, however,
+ * that isn't problematic.
+ */
+ wakeup_state = erts_atomic32_read(&ps->wakeup_state);
+ erts_atomic32_set_relb(&ps->wakeup_state, ERTS_POLL_WOKEN_INTR);
+ }
+ wake = wakeup_state == ERTS_POLL_NOT_WOKEN;
+#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ wake = ps->wakeup_state == ERTS_POLL_NOT_WOKEN;
+ ps->wakeup_state = interrupted ? ERTS_POLL_WOKEN_INTR : ERTS_POLL_NOT_WOKEN;
+#endif
/*
* NOTE: This function might be called from signal handlers in the
* non-smp case; therefore, it has to be async-signal safe in
* the non-smp case.
*/
- if (!ERTS_POLLSET_SET_POLLER_WOKEN_CHK(ps)) {
+ if (wake) {
ssize_t res;
if (ps->wake_fds[1] < 0)
return; /* Not initialized yet */
@@ -1387,9 +1372,7 @@ handle_update_requests(ErtsPollSet ps)
#endif /* ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE */
static ERTS_INLINE ErtsPollEvents
-poll_control(ErtsPollSet ps, int fd, ErtsPollEvents events, int on,
- int *have_set_have_update_requests,
- int *do_wake)
+poll_control(ErtsPollSet ps, int fd, ErtsPollEvents events, int on, int *do_wake)
{
ErtsPollEvents new_events;
@@ -1493,7 +1476,6 @@ ERTS_POLL_EXPORT(erts_poll_controlv)(ErtsPollSet ps,
int len)
{
int i;
- int hshur = 0;
int do_wake;
int final_do_wake = 0;
@@ -1505,17 +1487,17 @@ ERTS_POLL_EXPORT(erts_poll_controlv)(ErtsPollSet ps,
pcev[i].fd,
pcev[i].events,
pcev[i].on,
- &hshur,
&do_wake);
final_do_wake |= do_wake;
}
+ ERTS_POLLSET_UNLOCK(ps);
+
#ifdef ERTS_SMP
if (final_do_wake)
- wake_poller(ps);
+ wake_poller(ps, 0);
#endif /* ERTS_SMP */
- ERTS_POLLSET_UNLOCK(ps);
}
ErtsPollEvents
@@ -1526,20 +1508,20 @@ ERTS_POLL_EXPORT(erts_poll_control)(ErtsPollSet ps,
int* do_wake) /* In: Wake up polling thread */
/* Out: Poller is woken */
{
- int hshur = 0;
ErtsPollEvents res;
ERTS_POLLSET_LOCK(ps);
- res = poll_control(ps, fd, events, on, &hshur, do_wake);
+ res = poll_control(ps, fd, events, on, do_wake);
+
+ ERTS_POLLSET_UNLOCK(ps);
#ifdef ERTS_SMP
if (*do_wake) {
- wake_poller(ps);
+ wake_poller(ps, 0);
}
#endif /* ERTS_SMP */
- ERTS_POLLSET_UNLOCK(ps);
return res;
}
@@ -1918,9 +1900,11 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
return 0;
}
else {
- erts_aint_t timeout = tv->tv_sec*1000 + tv->tv_usec/1000;
+ long timeout = tv->tv_sec*1000 + tv->tv_usec/1000;
+ if (timeout > ERTS_AINT32_T_MAX)
+ timeout = ERTS_AINT32_T_MAX;
ASSERT(timeout >= 0);
- erts_smp_atomic_set(&ps->timeout, timeout);
+ erts_smp_atomic32_set_relb(&ps->timeout, (erts_aint32_t) timeout);
#if ERTS_POLL_USE_FALLBACK
if (!(ps->fallback_used = ERTS_POLL_NEED_FALLBACK(ps))) {
@@ -2042,15 +2026,14 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
(int) tv->tv_sec*1000 + tv->tv_usec/1000);
#endif
- ERTS_POLLSET_UNSET_POLLER_WOKEN(ps);
if (ERTS_POLLSET_SET_POLLED_CHK(ps)) {
res = EINVAL; /* Another thread is in erts_poll_wait()
on this pollset... */
goto done;
}
- if (ERTS_POLLSET_IS_INTERRUPTED(ps)) {
- /* Interrupt use zero timeout */
+ if (is_woken(ps)) {
+ /* Use zero timeout */
itv.tv_sec = 0;
itv.tv_usec = 0;
tvp = &itv;
@@ -2067,7 +2050,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
ps_locked = 0;
res = check_fd_events(ps, tvp, no_fds, &ps_locked);
- ERTS_POLLSET_SET_POLLER_WOKEN(ps);
+ woke_up(ps);
if (res == 0) {
res = ETIMEDOUT;
@@ -2099,9 +2082,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
check_poll_result(pr, no_fds);
#endif
- res = (no_fds == 0
- ? (ERTS_POLLSET_UNSET_INTERRUPTED_CHK(ps) ? EINTR : EAGAIN)
- : 0);
+ res = (no_fds == 0 ? (is_interrupted_reset(ps) ? EINTR : EAGAIN) : 0);
*len = no_fds;
}
@@ -2112,7 +2093,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
#endif
done:
- erts_smp_atomic_set(&ps->timeout, ERTS_AINT_T_MAX);
+ erts_smp_atomic32_set_relb(&ps->timeout, ERTS_AINT32_T_MAX);
#ifdef ERTS_POLL_DEBUG_PRINT
erts_printf("Leaving %s = erts_poll_wait()\n",
res == 0 ? "0" : erl_errno_id(res));
@@ -2128,20 +2109,17 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
void
ERTS_POLL_EXPORT(erts_poll_interrupt)(ErtsPollSet ps, int set)
{
+#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP)
/*
* NOTE: This function might be called from signal handlers in the
* non-smp case; therefore, it has to be async-signal safe in
* the non-smp case.
*/
- if (set) {
- ERTS_POLLSET_SET_INTERRUPTED(ps);
-#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP)
- wake_poller(ps);
+ if (!set)
+ reset_wakeup_state(ps);
+ else
+ wake_poller(ps, 1);
#endif
- }
- else {
- ERTS_POLLSET_UNSET_INTERRUPTED(ps);
- }
}
/*
@@ -2154,13 +2132,12 @@ ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet ps,
int set,
long msec)
{
- if (set) {
- if (erts_smp_atomic_read(&ps->timeout) > (erts_aint_t) msec) {
- ERTS_POLLSET_SET_INTERRUPTED(ps);
#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP)
- wake_poller(ps);
-#endif
- }
+ if (!set)
+ reset_wakeup_state(ps);
+ else {
+ if (erts_smp_atomic32_read_acqb(&ps->timeout) > (erts_aint32_t) msec)
+ wake_poller(ps, 1);
#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
else {
if (ERTS_POLLSET_IS_POLLED(ps))
@@ -2170,9 +2147,7 @@ ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet ps,
erts_smp_atomic_inc(&ps->no_interrupt_timed);
#endif
}
- else {
- ERTS_POLLSET_UNSET_INTERRUPTED(ps);
- }
+#endif
}
int
@@ -2283,14 +2258,16 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
ps->update_requests.next = NULL;
ps->update_requests.len = 0;
ps->curr_upd_req_block = &ps->update_requests;
- erts_smp_atomic_init(&ps->have_update_requests, 0);
+ erts_smp_atomic32_init(&ps->have_update_requests, 0);
#endif
#ifdef ERTS_SMP
- erts_smp_atomic_init(&ps->polled, 0);
- erts_smp_atomic_init(&ps->woken, 0);
+ erts_atomic32_init(&ps->polled, 0);
erts_smp_mtx_init(&ps->mtx, "pollset");
+#endif
+#ifdef ERTS_SMP
+ erts_atomic32_init(&ps->wakeup_state, (erts_aint32_t) 0);
#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- ps->woken = 0;
+ ps->wakeup_state = 0;
#endif
#if ERTS_POLL_USE_WAKEUP_PIPE
create_wakeup_pipe(ps);
@@ -2312,12 +2289,7 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
ps->internal_fd_limit = kp_fd + 1;
ps->kp_fd = kp_fd;
#endif
-#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT && !defined(ERTS_SMP)
- ps->interrupt = 0;
-#else
- erts_smp_atomic_init(&ps->interrupt, 0);
-#endif
- erts_smp_atomic_init(&ps->timeout, ERTS_AINT_T_MAX);
+ erts_smp_atomic32_init(&ps->timeout, ERTS_AINT32_T_MAX);
#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
erts_smp_atomic_init(&ps->no_avoided_wakeups, 0);
erts_smp_atomic_init(&ps->no_avoided_interrupts, 0);
diff --git a/erts/emulator/sys/unix/erl_unix_sys.h b/erts/emulator/sys/unix/erl_unix_sys.h
index 2d5ef882f6..824678a0bb 100644
--- a/erts/emulator/sys/unix/erl_unix_sys.h
+++ b/erts/emulator/sys/unix/erl_unix_sys.h
@@ -329,11 +329,4 @@ extern int exit_async(void);
#define ERTS_EXIT_AFTER_DUMP _exit
-#ifdef ERTS_TIMER_THREAD
-struct erts_iwait; /* opaque for clients */
-extern struct erts_iwait *erts_iwait_init(void);
-extern void erts_iwait_wait(struct erts_iwait *iwait, struct timeval *delay);
-extern void erts_iwait_interrupt(struct erts_iwait *iwait);
-#endif /* ERTS_TIMER_THREAD */
-
#endif /* #ifndef _ERL_UNIX_SYS_H */
diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c
index d021baa6bf..bafbbb0f6c 100644
--- a/erts/emulator/sys/unix/sys.c
+++ b/erts/emulator/sys/unix/sys.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -53,6 +53,11 @@
#define WANT_NONBLOCKING /* must define this to pull in defs from sys.h */
#include "sys.h"
+#if defined(__APPLE__) && defined(__MACH__) && !defined(__DARWIN__)
+#define __DARWIN__ 1
+#endif
+
+
#ifdef USE_THREADS
#include "erl_threads.h"
#endif
@@ -160,14 +165,14 @@ static int debug_log = 0;
#endif
#ifdef ERTS_SMP
-erts_smp_atomic_t erts_got_sigusr1;
+erts_smp_atomic32_t erts_got_sigusr1;
#define ERTS_SET_GOT_SIGUSR1 \
- erts_smp_atomic_set(&erts_got_sigusr1, 1)
+ erts_smp_atomic32_set(&erts_got_sigusr1, 1)
#define ERTS_UNSET_GOT_SIGUSR1 \
- erts_smp_atomic_set(&erts_got_sigusr1, 0)
-static erts_smp_atomic_t have_prepared_crash_dump;
+ erts_smp_atomic32_set(&erts_got_sigusr1, 0)
+static erts_smp_atomic32_t have_prepared_crash_dump;
#define ERTS_PREPARED_CRASH_DUMP \
- ((int) erts_smp_atomic_xchg(&have_prepared_crash_dump, 1))
+ ((int) erts_smp_atomic32_xchg(&have_prepared_crash_dump, 1))
#else
volatile int erts_got_sigusr1;
#define ERTS_SET_GOT_SIGUSR1 (erts_got_sigusr1 = 1)
@@ -235,11 +240,11 @@ static int max_files = -1;
* a few variables used by the break handler
*/
#ifdef ERTS_SMP
-erts_smp_atomic_t erts_break_requested;
+erts_smp_atomic32_t erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED \
- erts_smp_atomic_set(&erts_break_requested, (erts_aint_t) 1)
+ erts_smp_atomic32_set(&erts_break_requested, (erts_aint32_t) 1)
#define ERTS_UNSET_BREAK_REQUESTED \
- erts_smp_atomic_set(&erts_break_requested, (erts_aint_t) 0)
+ erts_smp_atomic32_set(&erts_break_requested, (erts_aint32_t) 0)
#else
volatile int erts_break_requested = 0;
#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1)
@@ -504,9 +509,9 @@ erts_sys_pre_init(void)
#endif
}
#ifdef ERTS_SMP
- erts_smp_atomic_init(&erts_break_requested, 0);
- erts_smp_atomic_init(&erts_got_sigusr1, 0);
- erts_smp_atomic_init(&have_prepared_crash_dump, 0);
+ erts_smp_atomic32_init(&erts_break_requested, 0);
+ erts_smp_atomic32_init(&erts_got_sigusr1, 0);
+ erts_smp_atomic32_init(&have_prepared_crash_dump, 0);
#else
erts_break_requested = 0;
erts_got_sigusr1 = 0;
@@ -2989,11 +2994,27 @@ init_smp_sig_notify(void)
NULL,
&thr_opts);
}
+#ifdef __DARWIN__
+
+int erts_darwin_main_thread_pipe[2];
+int erts_darwin_main_thread_result_pipe[2];
+static void initialize_darwin_main_thread_pipes(void)
+{
+ if (pipe(erts_darwin_main_thread_pipe) < 0 ||
+ pipe(erts_darwin_main_thread_result_pipe) < 0) {
+ erl_exit(1,"Fatal error initializing Darwin main thread stealing");
+ }
+}
+
+#endif
void
erts_sys_main_thread(void)
{
erts_thread_disable_fpe();
+#ifdef __DARWIN__
+ initialize_darwin_main_thread_pipes();
+#endif
/* Become signal receiver thread... */
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_set_thread_name("signal_receiver");
@@ -3002,6 +3023,27 @@ erts_sys_main_thread(void)
smp_sig_notify(0); /* Notify initialized */
while (1) {
/* Wait for a signal to arrive... */
+#ifdef __DARWIN__
+ /*
+ * The wx driver needs to be able to steal the main thread for Cocoa to
+ * work properly.
+ */
+ fd_set readfds;
+ int res;
+
+ FD_ZERO(&readfds);
+ FD_SET(erts_darwin_main_thread_pipe[0], &readfds);
+ res = select(erts_darwin_main_thread_pipe[0] + 1, &readfds, NULL, NULL, NULL);
+ if (res > 0 && FD_ISSET(erts_darwin_main_thread_pipe[0],&readfds)) {
+ void* (*func)(void*);
+ void* arg;
+ void *resp;
+ read(erts_darwin_main_thread_pipe[0],&func,sizeof(void* (*)(void*)));
+ read(erts_darwin_main_thread_pipe[0],&arg, sizeof(void*));
+ resp = (*func)(arg);
+ write(erts_darwin_main_thread_result_pipe[1],&resp,sizeof(void *));
+ }
+#else
#ifdef DEBUG
int res =
#else
@@ -3010,6 +3052,7 @@ erts_sys_main_thread(void)
select(0, NULL, NULL, NULL, NULL);
ASSERT(res < 0);
ASSERT(errno == EINTR);
+#endif
}
}
@@ -3109,226 +3152,3 @@ erl_sys_args(int* argc, char** argv)
}
*argc = j;
}
-
-#ifdef ERTS_TIMER_THREAD
-
-/*
- * Interruptible-wait facility: low-level synchronisation state
- * and methods that are implementation dependent.
- *
- * Constraint: Every implementation must define 'struct erts_iwait'
- * with a field 'erts_smp_atomic_t state;'.
- */
-
-/* values for struct erts_iwait's state field */
-#define IWAIT_WAITING 0
-#define IWAIT_AWAKE 1
-#define IWAIT_INTERRUPT 2
-
-#if 0 /* XXX: needs feature test in erts/configure.in */
-
-/*
- * This is an implementation of the interruptible wait facility on
- * top of Linux-specific futexes.
- */
-#include <asm/unistd.h>
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-static int sys_futex(void *futex, int op, int val, const struct timespec *timeout)
-{
- return syscall(__NR_futex, futex, op, val, timeout);
-}
-
-struct erts_iwait {
- erts_smp_atomic_t state; /* &state.counter is our futex */
-};
-
-static void iwait_lowlevel_init(struct erts_iwait *iwait) { /* empty */ }
-
-static void iwait_lowlevel_wait(struct erts_iwait *iwait, struct timeval *delay)
-{
- struct timespec timeout;
- int res;
-
- timeout.tv_sec = delay->tv_sec;
- timeout.tv_nsec = delay->tv_usec * 1000;
- res = sys_futex((void*)&iwait->state.counter, FUTEX_WAIT, IWAIT_WAITING, &timeout);
- if (res < 0 && errno != ETIMEDOUT && errno != EWOULDBLOCK && errno != EINTR)
- perror("FUTEX_WAIT");
-}
-
-static void iwait_lowlevel_interrupt(struct erts_iwait *iwait)
-{
- int res = sys_futex((void*)&iwait->state.counter, FUTEX_WAKE, 1, NULL);
- if (res < 0)
- perror("FUTEX_WAKE");
-}
-
-#else /* using poll() or select() */
-
-/*
- * This is an implementation of the interruptible wait facility on
- * top of pipe(), poll() or select(), read(), and write().
- */
-struct erts_iwait {
- erts_smp_atomic_t state;
- int read_fd; /* wait polls and reads this fd */
- int write_fd; /* interrupt writes this fd */
-};
-
-static void iwait_lowlevel_init(struct erts_iwait *iwait)
-{
- int fds[2];
-
- if (pipe(fds) < 0) {
- perror("pipe()");
- exit(1);
- }
- iwait->read_fd = fds[0];
- iwait->write_fd = fds[1];
-}
-
-#if defined(ERTS_USE_POLL)
-
-#include <sys/poll.h>
-#define PERROR_POLL "poll()"
-
-static int iwait_lowlevel_poll(int read_fd, struct timeval *delay)
-{
- struct pollfd pollfd;
- int timeout;
-
- pollfd.fd = read_fd;
- pollfd.events = POLLIN;
- pollfd.revents = 0;
- timeout = delay->tv_sec * 1000 + delay->tv_usec / 1000;
- return poll(&pollfd, 1, timeout);
-}
-
-#else /* !ERTS_USE_POLL */
-
-#include <sys/select.h>
-#define PERROR_POLL "select()"
-
-static int iwait_lowlevel_poll(int read_fd, struct timeval *delay)
-{
- fd_set readfds;
-
- FD_ZERO(&readfds);
- FD_SET(read_fd, &readfds);
- return select(read_fd + 1, &readfds, NULL, NULL, delay);
-}
-
-#endif /* !ERTS_USE_POLL */
-
-static void iwait_lowlevel_wait(struct erts_iwait *iwait, struct timeval *delay)
-{
- int res;
- char buf[64];
-
- res = iwait_lowlevel_poll(iwait->read_fd, delay);
- if (res > 0)
- (void)read(iwait->read_fd, buf, sizeof buf);
- else if (res < 0 && errno != EINTR)
- perror(PERROR_POLL);
-}
-
-static void iwait_lowlevel_interrupt(struct erts_iwait *iwait)
-{
- int res = write(iwait->write_fd, "!", 1);
- if (res < 0)
- perror("write()");
-}
-
-#endif /* using poll() or select() */
-
-#if 0 /* not using poll() or select() */
-/*
- * This is an implementation of the interruptible wait facility on
- * top of pthread_cond_timedwait(). This has two problems:
- * 1. pthread_cond_timedwait() requires an absolute time point,
- * so the relative delay must be converted to absolute time.
- * Worse, this breaks if the machine's time is adjusted while
- * we're preparing to wait.
- * 2. Each cond operation requires additional mutex lock/unlock operations.
- *
- * Problem 2 is probably not too bad on Linux (they'll just become
- * relatively cheap futex operations), but problem 1 is the real killer.
- * Only use this implementation if no better alternatives are available!
- */
-struct erts_iwait {
- erts_smp_atomic_t state;
- pthread_cond_t cond;
- pthread_mutex_t mutex;
-};
-
-static void iwait_lowlevel_init(struct erts_iwait *iwait)
-{
- iwait->cond = (pthread_cond_t) PTHREAD_COND_INITIALIZER;
- iwait->mutex = (pthread_mutex_t) PTHREAD_MUTEX_INITIALIZER;
-}
-
-static void iwait_lowlevel_wait(struct erts_iwait *iwait, struct timeval *delay)
-{
- struct timeval tmp;
- struct timespec timeout;
-
- /* Due to pthread_cond_timedwait()'s use of absolute
- time, this must be the real gettimeofday(), _not_
- the "smoothed" one beam/erl_time_sup.c implements. */
- gettimeofday(&tmp, NULL);
-
- tmp.tv_sec += delay->tv_sec;
- tmp.tv_usec += delay->tv_usec;
- if (tmp.tv_usec >= 1000*1000) {
- tmp.tv_usec -= 1000*1000;
- tmp.tv_sec += 1;
- }
- timeout.tv_sec = tmp.tv_sec;
- timeout.tv_nsec = tmp.tv_usec * 1000;
- pthread_mutex_lock(&iwait->mutex);
- pthread_cond_timedwait(&iwait->cond, &iwait->mutex, &timeout);
- pthread_mutex_unlock(&iwait->mutex);
-}
-
-static void iwait_lowlevel_interrupt(struct erts_iwait *iwait)
-{
- pthread_mutex_lock(&iwait->mutex);
- pthread_cond_signal(&iwait->cond);
- pthread_mutex_unlock(&iwait->mutex);
-}
-
-#endif /* not using POLL */
-
-/*
- * Interruptible-wait facility. This is just a wrapper around the
- * low-level synchronisation code, where we maintain our logical
- * state in order to suppress some state transitions.
- */
-
-struct erts_iwait *erts_iwait_init(void)
-{
- struct erts_iwait *iwait = malloc(sizeof *iwait);
- if (!iwait) {
- perror("malloc");
- exit(1);
- }
- iwait_lowlevel_init(iwait);
- erts_smp_atomic_init(&iwait->state, IWAIT_AWAKE);
- return iwait;
-}
-
-void erts_iwait_wait(struct erts_iwait *iwait, struct timeval *delay)
-{
- if (erts_smp_atomic_xchg(&iwait->state, IWAIT_WAITING) != IWAIT_INTERRUPT)
- iwait_lowlevel_wait(iwait, delay);
- erts_smp_atomic_set(&iwait->state, IWAIT_AWAKE);
-}
-
-void erts_iwait_interrupt(struct erts_iwait *iwait)
-{
- if (erts_smp_atomic_xchg(&iwait->state, IWAIT_INTERRUPT) == IWAIT_WAITING)
- iwait_lowlevel_interrupt(iwait);
-}
-
-#endif /* ERTS_TIMER_THREAD */
diff --git a/erts/emulator/sys/unix/sys_float.c b/erts/emulator/sys/unix/sys_float.c
index 6e9376b0f3..8ec7b31ce0 100644
--- a/erts/emulator/sys/unix/sys_float.c
+++ b/erts/emulator/sys/unix/sys_float.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2001-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2001-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -36,11 +36,6 @@ erts_sys_init_float(void)
# endif
}
-static ERTS_INLINE void set_current_fp_exception(unsigned long pc)
-{
- /* nothing to do */
-}
-
#else /* !NO_FPE_SIGNALS */
#ifdef ERTS_SMP
diff --git a/erts/emulator/sys/vxworks/sys.c b/erts/emulator/sys/vxworks/sys.c
index 411b4b37cf..c6e7b65f32 100644
--- a/erts/emulator/sys/vxworks/sys.c
+++ b/erts/emulator/sys/vxworks/sys.c
@@ -85,7 +85,7 @@ EXTERN_FUNCTION(void, erl_exit, (int n, char*, _DOTS_));
EXTERN_FUNCTION(void, erl_error, (char*, va_list));
EXTERN_FUNCTION(int, driver_interrupt, (int, int));
EXTERN_FUNCTION(void, increment_time, (int));
-EXTERN_FUNCTION(int, next_time, (_VOID_));
+EXTERN_FUNCTION(int, erts_next_time, (_VOID_));
EXTERN_FUNCTION(void, set_reclaim_free_function, (FreeFunction));
EXTERN_FUNCTION(int, erl_mem_info_get, (MEM_PART_STATS *));
EXTERN_FUNCTION(void, erl_crash_dump, (char* file, int line, char* fmt, ...));
diff --git a/erts/emulator/sys/win32/erl_poll.c b/erts/emulator/sys/win32/erl_poll.c
index d84ae2ede2..1f2877b682 100644
--- a/erts/emulator/sys/win32/erl_poll.c
+++ b/erts/emulator/sys/win32/erl_poll.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2007-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2007-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -274,7 +274,6 @@ struct ErtsPollSet_ {
Waiter** waiter;
int allocated_waiters; /* Size ow waiter array */
int num_waiters; /* Number of waiter threads. */
- erts_atomic_t sys_io_ready; /* Tells us there is I/O ready (already). */
int restore_events; /* Tells us to restore waiters events
next time around */
HANDLE event_io_ready; /* To be used when waiting for io */
@@ -282,12 +281,11 @@ struct ErtsPollSet_ {
volatile int standby_wait_counter; /* Number of threads to wait for */
CRITICAL_SECTION standby_crit; /* CS to guard the counter */
HANDLE standby_wait_event; /* Event signalled when counte == 0 */
+ erts_atomic32_t wakeup_state;
#ifdef ERTS_SMP
- erts_smp_atomic_t woken;
erts_smp_mtx_t mtx;
- erts_smp_atomic_t interrupt;
#endif
- erts_smp_atomic_t timeout;
+ erts_smp_atomic32_t timeout;
};
#ifdef ERTS_SMP
@@ -296,126 +294,24 @@ struct ErtsPollSet_ {
erts_smp_mtx_lock(&(PS)->mtx)
#define ERTS_POLLSET_UNLOCK(PS) \
erts_smp_mtx_unlock(&(PS)->mtx)
-#define ERTS_POLLSET_SET_POLLED_CHK(PS) \
- ((int) erts_smp_atomic_xchg(&(PS)->polled, (erts_aint_t) 1))
-#define ERTS_POLLSET_SET_POLLED(PS) \
- erts_smp_atomic_set(&(PS)->polled, (erts_aint_t) 1)
-#define ERTS_POLLSET_UNSET_POLLED(PS) \
- erts_smp_atomic_set(&(PS)->polled, (erts_aint_t) 0)
-#define ERTS_POLLSET_IS_POLLED(PS) \
- ((int) erts_smp_atomic_read(&(PS)->polled))
-
-#define ERTS_POLLSET_SET_POLLER_WOKEN_CHK(PS) set_poller_woken_chk((PS))
-#define ERTS_POLLSET_SET_POLLER_WOKEN(PS) \
-do { \
- ERTS_THR_MEMORY_BARRIER; \
- erts_smp_atomic_set(&(PS)->woken, (erts_aint_t) 1); \
-} while (0)
-#define ERTS_POLLSET_UNSET_POLLER_WOKEN(PS) \
-do { \
- erts_smp_atomic_set(&(PS)->woken, (erts_aint_t) 0); \
- ERTS_THR_MEMORY_BARRIER; \
-} while (0)
-#define ERTS_POLLSET_IS_POLLER_WOKEN(PS) \
- ((int) erts_smp_atomic_read(&(PS)->woken))
-
-#define ERTS_POLLSET_UNSET_INTERRUPTED_CHK(PS) unset_interrupted_chk((PS))
-#define ERTS_POLLSET_UNSET_INTERRUPTED(PS) \
-do { \
- erts_smp_atomic_set(&(PS)->interrupt, (erts_aint_t) 0); \
- ERTS_THR_MEMORY_BARRIER; \
-} while (0)
-#define ERTS_POLLSET_SET_INTERRUPTED(PS) \
-do { \
- ERTS_THR_MEMORY_BARRIER; \
- erts_smp_atomic_set(&(PS)->interrupt, (erts_aint_t) 1); \
-} while (0)
-#define ERTS_POLLSET_IS_INTERRUPTED(PS) \
- ((int) erts_smp_atomic_read(&(PS)->interrupt))
-
-static ERTS_INLINE int
-unset_interrupted_chk(ErtsPollSet ps)
-{
- int res = (int) erts_smp_atomic_xchg(&ps->interrupt, (erts_aint_t) 0);
- ERTS_THR_MEMORY_BARRIER;
- return res;
-
-}
-
-static ERTS_INLINE int
-set_poller_woken_chk(ErtsPollSet ps)
-{
- ERTS_THR_MEMORY_BARRIER;
- return (int) erts_smp_atomic_xchg(&ps->woken, (erts_aint_t) 1);
-}
#else
#define ERTS_POLLSET_LOCK(PS)
#define ERTS_POLLSET_UNLOCK(PS)
-#define ERTS_POLLSET_SET_POLLED_CHK(PS) 0
-#define ERTS_POLLSET_UNSET_POLLED(PS)
-#define ERTS_POLLSET_IS_POLLED(PS) 0
-#define ERTS_POLLSET_SET_POLLER_WOKEN_CHK(PS) 1
-#define ERTS_POLLSET_SET_POLLER_WOKEN(PS)
-#define ERTS_POLLSET_UNSET_POLLER_WOKEN(PS)
-#define ERTS_POLLSET_IS_POLLER_WOKEN(PS) 1
-
#endif
/*
- * While atomics are not yet implemented for windows in the common library...
- *
- * MSDN doc states that SMP machines and old compilers require
- * InterLockedExchange to properly read and write interlocked
- * variables, otherwise the processors might reschedule
- * the access and order of atomics access is destroyed...
- * While they only mention it in white-papers, the problem
- * in VS2003 is due to the IA64 arch, so we can still count
- * on the CPU not rescheduling the access to volatile in X86 arch using
- * even the slightly older compiler...
- *
- * So here's (hopefully) a subset of the generally working atomic
- * variable access...
- */
-
-#if defined(__GNUC__)
-# if defined(__i386__) || defined(__x86_64__)
-# define VOLATILE_IN_SEQUENCE 1
-# else
-# define VOLATILE_IN_SEQUENCE 0
-# endif
-#elif defined(_MSC_VER)
-# if _MSC_VER < 1300
-# define VOLATILE_IN_SEQUENCE 0 /* Dont trust really old compilers */
-# else
-# if defined(_M_IX86)
-# define VOLATILE_IN_SEQUENCE 1
-# else /* I.e. IA64 */
-# if _MSC_VER >= 1400
-# define VOLATILE_IN_SEQUENCE 1
-# else
-# define VOLATILE_IN_SEQUENCE 0
-# endif
-# endif
-# endif
-#else
-# define VOLATILE_IN_SEQUENCE 0
-#endif
-
-
-
-/*
* Communication with sys_interrupt
*/
#ifdef ERTS_SMP
-extern erts_smp_atomic_t erts_break_requested;
+extern erts_smp_atomic32_t erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED \
- erts_smp_atomic_set(&erts_break_requested, (erts_aint_t) 1)
+ erts_smp_atomic32_set(&erts_break_requested, (erts_aint32_t) 1)
#define ERTS_UNSET_BREAK_REQUESTED \
- erts_smp_atomic_set(&erts_break_requested, (erts_aint_t) 0)
+ erts_smp_atomic32_set(&erts_break_requested, (erts_aint32_t) 0)
#else
extern volatile int erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1)
@@ -424,7 +320,7 @@ extern volatile int erts_break_requested;
static erts_mtx_t break_waiter_lock;
static HANDLE break_happened_event;
-static erts_atomic_t break_waiter_state;
+static erts_atomic32_t break_waiter_state;
#define BREAK_WAITER_GOT_BREAK 1
#define BREAK_WAITER_GOT_HALT 2
@@ -467,29 +363,168 @@ do { \
wait_standby(PS); \
} while(0)
-#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT && !defined(ERTS_SMP)
+#define ERTS_POLL_NOT_WOKEN ((erts_aint32_t) 0)
+#define ERTS_POLL_WOKEN_IO_READY ((erts_aint32_t) 1)
+#define ERTS_POLL_WOKEN_INTR ((erts_aint32_t) 2)
+#define ERTS_POLL_WOKEN_TIMEDOUT ((erts_aint32_t) 3)
static ERTS_INLINE int
-unset_interrupted_chk(ErtsPollSet ps)
+is_io_ready(ErtsPollSet ps)
{
- /* This operation isn't atomic, but we have no need at all for an
- atomic operation here... */
- int res = ps->interrupt;
- ps->interrupt = 0;
- return res;
+ return erts_atomic32_read(&ps->wakeup_state) == ERTS_POLL_WOKEN_IO_READY;
}
+static ERTS_INLINE void
+woke_up(ErtsPollSet ps)
+{
+ if (erts_atomic32_read(&ps->wakeup_state) == ERTS_POLL_NOT_WOKEN)
+ erts_atomic32_cmpxchg(&ps->wakeup_state,
+ ERTS_POLL_WOKEN_TIMEDOUT,
+ ERTS_POLL_NOT_WOKEN);
+#ifdef DEBUG
+ {
+ erts_aint32_t wakeup_state = erts_atomic32_read(&ps->wakeup_state);
+ switch (wakeup_state) {
+ case ERTS_POLL_WOKEN_IO_READY:
+ case ERTS_POLL_WOKEN_INTR:
+ case ERTS_POLL_WOKEN_TIMEDOUT:
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
#endif
+}
+
+static ERTS_INLINE int
+wakeup_cause(ErtsPollSet ps)
+{
+ int res;
+ erts_aint32_t wakeup_state = erts_atomic32_read(&ps->wakeup_state);
+ switch (wakeup_state) {
+ case ERTS_POLL_WOKEN_IO_READY:
+ res = 0;
+ break;
+ case ERTS_POLL_WOKEN_INTR:
+ res = EINTR;
+ break;
+ case ERTS_POLL_WOKEN_TIMEDOUT:
+ res = ETIMEDOUT;
+ break;
+ default:
+ res = 0;
+ erl_exit(ERTS_ABORT_EXIT,
+ "%s:%d: Internal error: Invalid wakeup_state=%d\n",
+ __FILE__, __LINE__, (int) wakeup_state);
+ }
+ return res;
+}
+
+static ERTS_INLINE DWORD
+poll_wait_timeout(ErtsPollSet ps, SysTimeval *tvp)
+{
+ time_t timeout = tvp->tv_sec * 1000 + tvp->tv_usec / 1000;
+
+ if (timeout <= 0) {
+ woke_up(ps);
+ return (DWORD) 0;
+ }
+
+ ResetEvent(ps->event_io_ready);
+ /*
+ * Since we don't know the internals of ResetEvent() we issue
+ * a memory barrier as a safety precaution ensuring that
+ * the load of wakeup_state wont be reordered with stores made
+ * by ResetEvent().
+ */
+ ERTS_THR_MEMORY_BARRIER;
+ if (erts_atomic32_read(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN)
+ return (DWORD) 0;
+
+ if (timeout > ERTS_AINT32_T_MAX) /* Also prevents DWORD overflow */
+ timeout = ERTS_AINT32_T_MAX;
+
+ erts_smp_atomic32_set_relb(&ps->timeout, (erts_aint32_t) timeout);
+ return (DWORD) timeout;
+}
-#ifdef ERTS_SMP
static ERTS_INLINE void
-wake_poller(ErtsPollSet ps)
+wake_poller(ErtsPollSet ps, int io_ready)
{
- if (!ERTS_POLLSET_SET_POLLER_WOKEN_CHK(ps)) {
+ erts_aint32_t wakeup_state = erts_atomic32_read(&ps->wakeup_state);
+ if (io_ready) {
+ /* We may set the event multiple times. This is, however, harmless. */
+ erts_atomic32_set(&ps->wakeup_state, ERTS_POLL_WOKEN_IO_READY);
+ }
+ else {
+ while (wakeup_state != ERTS_POLL_WOKEN_IO_READY
+ && wakeup_state != ERTS_POLL_WOKEN_INTR) {
+ erts_aint32_t act = erts_atomic32_cmpxchg(&ps->wakeup_state,
+ ERTS_POLL_WOKEN_INTR,
+ wakeup_state);
+ if (act == wakeup_state) {
+ wakeup_state = act;
+ break;
+ }
+ wakeup_state = act;
+ }
+ }
+ if (wakeup_state == ERTS_POLL_NOT_WOKEN) {
+ /*
+ * Since we don't know the internals of SetEvent() we issue
+ * a memory barrier as a safety precaution ensuring that
+ * the store we just made to wakeup_state wont be reordered
+ * with loads in SetEvent().
+ */
+ ERTS_THR_MEMORY_BARRIER;
SetEvent(ps->event_io_ready);
}
}
-#endif
+
+static ERTS_INLINE void
+reset_io_ready(ErtsPollSet ps)
+{
+ erts_atomic32_set(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
+}
+
+static ERTS_INLINE void
+restore_io_ready(ErtsPollSet ps)
+{
+ erts_atomic32_set(&ps->wakeup_state, ERTS_POLL_WOKEN_IO_READY);
+}
+
+/*
+ * notify_io_ready() is used by threads waiting for events, when
+ * notifying a poller thread about I/O ready.
+ */
+static ERTS_INLINE void
+notify_io_ready(ErtsPollSet ps)
+{
+ wake_poller(ps, 1);
+}
+
+static ERTS_INLINE void
+reset_interrupt(ErtsPollSet ps)
+{
+ /* We need to keep io-ready if set */
+ erts_aint32_t wakeup_state = erts_atomic32_read(&ps->wakeup_state);
+ while (wakeup_state != ERTS_POLL_WOKEN_IO_READY
+ && wakeup_state != ERTS_POLL_NOT_WOKEN) {
+ erts_aint32_t act = erts_atomic32_cmpxchg(&ps->wakeup_state,
+ ERTS_POLL_NOT_WOKEN,
+ wakeup_state);
+ if (wakeup_state == act)
+ break;
+ wakeup_state = act;
+ }
+}
+
+static ERTS_INLINE void
+set_interrupt(ErtsPollSet ps)
+{
+ wake_poller(ps, 0);
+}
static void setup_standby_wait(ErtsPollSet ps, int num_threads)
{
@@ -653,14 +688,14 @@ static void *break_waiter(void *param)
case WAIT_OBJECT_0:
ResetEvent(harr[0]);
erts_mtx_lock(&break_waiter_lock);
- erts_atomic_set(&break_waiter_state,BREAK_WAITER_GOT_BREAK);
+ erts_atomic32_set(&break_waiter_state,BREAK_WAITER_GOT_BREAK);
SetEvent(break_happened_event);
erts_mtx_unlock(&break_waiter_lock);
break;
case (WAIT_OBJECT_0+1):
ResetEvent(harr[1]);
erts_mtx_lock(&break_waiter_lock);
- erts_atomic_set(&break_waiter_state,BREAK_WAITER_GOT_HALT);
+ erts_atomic32_set(&break_waiter_state,BREAK_WAITER_GOT_HALT);
SetEvent(break_happened_event);
erts_mtx_unlock(&break_waiter_lock);
break;
@@ -767,12 +802,7 @@ event_happened:
consistency_check(w);
#endif
ASSERT(WAIT_OBJECT_0 < i && i < WAIT_OBJECT_0+w->active_events);
- if (!erts_atomic_xchg(&ps->sys_io_ready,1)) {
- HARDDEBUGF(("SET EventIoReady (%d)",erts_atomic_read(&ps->sys_io_ready)));
- SetEvent(ps->event_io_ready);
- } else {
- HARDDEBUGF(("DONT SET EventIoReady"));
- }
+ notify_io_ready(ps);
/*
* The main thread wont start working on our arrays untill we're
@@ -967,15 +997,10 @@ static int cancel_driver_select(ErtsPollSet ps, HANDLE event)
void erts_poll_interrupt(ErtsPollSet ps, int set /* bool */)
{
HARDTRACEF(("In erts_poll_interrupt(%d)",set));
-#ifdef ERTS_SMP
- if (set) {
- ERTS_POLLSET_SET_INTERRUPTED(ps);
- wake_poller(ps);
- }
- else {
- ERTS_POLLSET_UNSET_INTERRUPTED(ps);
- }
-#endif
+ if (!set)
+ reset_interrupt(ps);
+ else
+ set_interrupt(ps);
HARDTRACEF(("Out erts_poll_interrupt(%d)",set));
}
@@ -984,17 +1009,10 @@ void erts_poll_interrupt_timed(ErtsPollSet ps,
long msec)
{
HARDTRACEF(("In erts_poll_interrupt_timed(%d,%ld)",set,msec));
-#ifdef ERTS_SMP
- if (set) {
- if (erts_smp_atomic_read(&ps->timeout) > (erts_aint_t) msec) {
- ERTS_POLLSET_SET_INTERRUPTED(ps);
- wake_poller(ps);
- }
- }
- else {
- ERTS_POLLSET_UNSET_INTERRUPTED(ps);
- }
-#endif
+ if (!set)
+ reset_interrupt(ps);
+ else if (erts_smp_atomic32_read_acqb(&ps->timeout) > (erts_aint32_t) msec)
+ set_interrupt(ps);
HARDTRACEF(("Out erts_poll_interrupt_timed"));
}
@@ -1068,10 +1086,8 @@ void erts_poll_controlv(ErtsPollSet ps,
int erts_poll_wait(ErtsPollSet ps,
ErtsPollResFd pr[],
int *len,
- SysTimeval *utvp)
+ SysTimeval *tvp)
{
- SysTimeval *tvp = utvp;
- SysTimeval itv;
int no_fds;
DWORD timeout;
EventData* ev;
@@ -1084,7 +1100,7 @@ int erts_poll_wait(ErtsPollSet ps,
HARDTRACEF(("In erts_poll_wait"));
ERTS_POLLSET_LOCK(ps);
- if (!erts_atomic_read(&ps->sys_io_ready) && ps->restore_events) {
+ if (!is_io_ready(ps) && ps->restore_events) {
HARDDEBUGF(("Restore events: %d",ps->num_waiters));
ps->restore_events = 0;
for (i = 0; i < ps->num_waiters; ++i) {
@@ -1102,7 +1118,7 @@ int erts_poll_wait(ErtsPollSet ps,
if (w->highwater != w->active_events) {
HARDDEBUGF(("Oups!"));
/* Oups, got signalled before we took the lock, can't reset */
- if(erts_atomic_read(&ps->sys_io_ready) == 0) {
+ if(!is_io_ready(ps)) {
erl_exit(1,"Internal error: "
"Inconsistent io structures in erl_poll.\n");
}
@@ -1127,39 +1143,27 @@ int erts_poll_wait(ErtsPollSet ps,
no_fds = ERTS_POLL_MAX_RES;
#endif
+ timeout = poll_wait_timeout(ps, tvp);
- ResetEvent(ps->event_io_ready);
- ERTS_POLLSET_UNSET_POLLER_WOKEN(ps);
-
-#ifdef ERTS_SMP
- if (ERTS_POLLSET_IS_INTERRUPTED(ps)) {
- /* Interrupt use zero timeout */
- itv.tv_sec = 0;
- itv.tv_usec = 0;
- tvp = &itv;
- }
-#endif
-
- timeout = tvp->tv_sec * 1000 + tvp->tv_usec / 1000;
/*HARDDEBUGF(("timeout = %ld",(long) timeout));*/
- erts_smp_atomic_set(&ps->timeout, timeout);
- if (timeout > 0 && ! erts_atomic_read(&ps->sys_io_ready) && ! erts_atomic_read(&break_waiter_state)) {
+ if (timeout > 0 && !erts_atomic32_read(&break_waiter_state)) {
HANDLE harr[2] = {ps->event_io_ready, break_happened_event};
int num_h = 2;
- HARDDEBUGF(("Start waiting %d [%d]",num_h, (long) timeout));
+ HARDDEBUGF(("Start waiting %d [%d]",num_h, (int) timeout));
ERTS_POLLSET_UNLOCK(ps);
WaitForMultipleObjects(num_h, harr, FALSE, timeout);
ERTS_POLLSET_LOCK(ps);
- HARDDEBUGF(("Stop waiting %d [%d]",num_h, (long) timeout));
+ HARDDEBUGF(("Stop waiting %d [%d]",num_h, (int) timeout));
+ woke_up(ps);
}
ERTS_UNSET_BREAK_REQUESTED;
- if(erts_atomic_read(&break_waiter_state)) {
+ if(erts_atomic32_read(&break_waiter_state)) {
erts_mtx_lock(&break_waiter_lock);
- break_state = erts_atomic_read(&break_waiter_state);
- erts_atomic_set(&break_waiter_state,0);
+ break_state = erts_atomic32_read(&break_waiter_state);
+ erts_atomic32_set(&break_waiter_state,0);
ResetEvent(break_happened_event);
erts_mtx_unlock(&break_waiter_lock);
switch (break_state) {
@@ -1174,15 +1178,13 @@ int erts_poll_wait(ErtsPollSet ps,
}
}
- ERTS_POLLSET_SET_POLLER_WOKEN(ps);
-
- if (!erts_atomic_read(&ps->sys_io_ready)) {
- res = EINTR;
- HARDDEBUGF(("EINTR!"));
- goto done;
+ res = wakeup_cause(ps);
+ if (res != 0) {
+ HARDDEBUGF(("%s!", res == EINTR ? "EINTR" : "ETIMEDOUT"));
+ goto done;
}
- erts_atomic_set(&ps->sys_io_ready,0);
+ reset_io_ready(ps);
n = ps->num_waiters;
@@ -1204,9 +1206,9 @@ int erts_poll_wait(ErtsPollSet ps,
if (num >= no_fds) {
w->highwater=j+1;
erts_mtx_unlock(&w->mtx);
- /* This might mean we still have data to report, set
- back the global flag! */
- erts_atomic_set(&ps->sys_io_ready,1);
+ /* This might mean we still have data to report,
+ restore flag indicating I/O ready! */
+ restore_io_ready(ps);
HARDDEBUGF(("To many FD's to report!"));
goto done;
}
@@ -1228,7 +1230,7 @@ int erts_poll_wait(ErtsPollSet ps,
erts_mtx_unlock(&w->mtx);
}
done:
- erts_smp_atomic_set(&ps->timeout, ERTS_AINT_T_MAX);
+ erts_smp_atomic32_set(&ps->timeout, ERTS_AINT32_T_MAX);
*len = num;
ERTS_POLLSET_UNLOCK(ps);
HARDTRACEF(("Out erts_poll_wait"));
@@ -1306,15 +1308,13 @@ ErtsPollSet erts_poll_create_pollset(void)
ps->standby_wait_counter = 0;
ps->event_io_ready = CreateManualEvent(FALSE);
ps->standby_wait_event = CreateManualEvent(FALSE);
- erts_atomic_init(&ps->sys_io_ready,0);
ps->restore_events = 0;
+ erts_atomic32_init(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
#ifdef ERTS_SMP
- erts_smp_atomic_init(&ps->woken, 0);
erts_smp_mtx_init(&ps->mtx, "pollset");
- erts_smp_atomic_init(&ps->interrupt, 0);
#endif
- erts_smp_atomic_init(&ps->timeout, ERTS_AINT_T_MAX);
+ erts_smp_atomic32_init(&ps->timeout, ERTS_AINT32_T_MAX);
HARDTRACEF(("Out erts_poll_create_pollset"));
return ps;
@@ -1366,7 +1366,7 @@ void erts_poll_init(void)
erts_mtx_init(&break_waiter_lock,"break_waiter_lock");
break_happened_event = CreateManualEvent(FALSE);
- erts_atomic_init(&break_waiter_state, 0);
+ erts_atomic32_init(&break_waiter_state, 0);
erts_thr_create(&thread, &break_waiter, NULL, NULL);
ERTS_UNSET_BREAK_REQUESTED;
diff --git a/erts/emulator/sys/win32/sys_interrupt.c b/erts/emulator/sys/win32/sys_interrupt.c
index 262f84babc..943c338794 100644
--- a/erts/emulator/sys/win32/sys_interrupt.c
+++ b/erts/emulator/sys/win32/sys_interrupt.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -31,11 +31,11 @@
#endif
#ifdef ERTS_SMP
-erts_smp_atomic_t erts_break_requested;
+erts_smp_atomic32_t erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED \
- erts_smp_atomic_set(&erts_break_requested, (erts_aint_t) 1)
+ erts_smp_atomic32_set(&erts_break_requested, (erts_aint32_t) 1)
#define ERTS_UNSET_BREAK_REQUESTED \
- erts_smp_atomic_set(&erts_break_requested, (erts_aint_t) 0)
+ erts_smp_atomic32_set(&erts_break_requested, (erts_aint32_t) 0)
#else
volatile int erts_break_requested = 0;
#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1)