aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/sys
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/sys')
-rw-r--r--erts/emulator/sys/common/erl_check_io.c74
-rw-r--r--erts/emulator/sys/common/erl_check_io.h15
-rw-r--r--erts/emulator/sys/common/erl_mseg.c682
-rw-r--r--erts/emulator/sys/common/erl_mseg.h11
-rw-r--r--erts/emulator/sys/common/erl_poll.c287
-rw-r--r--erts/emulator/sys/common/erl_poll.h7
-rw-r--r--erts/emulator/sys/unix/erl_unix_sys.h7
-rw-r--r--erts/emulator/sys/unix/sys.c320
-rw-r--r--erts/emulator/sys/vxworks/erl_vxworks_sys.h3
-rw-r--r--erts/emulator/sys/vxworks/sys.c24
-rw-r--r--erts/emulator/sys/win32/erl_poll.c74
-rw-r--r--erts/emulator/sys/win32/erl_win32_sys_ddll.c5
-rw-r--r--erts/emulator/sys/win32/erl_win_dyn_driver.h44
-rw-r--r--erts/emulator/sys/win32/erl_win_sys.h41
-rwxr-xr-x[-rw-r--r--]erts/emulator/sys/win32/sys.c219
-rw-r--r--erts/emulator/sys/win32/sys_env.c16
-rw-r--r--erts/emulator/sys/win32/sys_float.c5
-rw-r--r--erts/emulator/sys/win32/sys_interrupt.c12
-rw-r--r--erts/emulator/sys/win32/sys_time.c327
19 files changed, 1202 insertions, 971 deletions
diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c
index cd4de21d65..c1336c60d9 100644
--- a/erts/emulator/sys/common/erl_check_io.c
+++ b/erts/emulator/sys/common/erl_check_io.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -35,6 +35,8 @@
#include "sys.h"
#include "global.h"
#include "erl_check_io.h"
+#include "erl_thr_progress.h"
+#include "dtrace-wrapper.h"
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
# define ERTS_DRV_EV_STATE_EXTRA_SIZE 128
@@ -66,6 +68,9 @@ typedef char EventStateFlags;
#define ERTS_CIO_POLL_CTL ERTS_POLL_EXPORT(erts_poll_control)
#define ERTS_CIO_POLL_WAIT ERTS_POLL_EXPORT(erts_poll_wait)
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+#define ERTS_CIO_POLL_AS_INTR ERTS_POLL_EXPORT(erts_poll_async_sig_interrupt)
+#endif
#define ERTS_CIO_POLL_INTR ERTS_POLL_EXPORT(erts_poll_interrupt)
#define ERTS_CIO_POLL_INTR_TMD ERTS_POLL_EXPORT(erts_poll_interrupt_timed)
#define ERTS_CIO_NEW_POLLSET ERTS_POLL_EXPORT(erts_poll_create_pollset)
@@ -218,7 +223,7 @@ remember_removed(ErtsDrvEventState *state, struct pollset_info* psi)
#ifdef ERTS_SMP
struct removed_fd *fdlp;
ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(fd_mtx(state->fd)));
- if (erts_smp_atomic_read(&psi->in_poll_wait)) {
+ if (erts_smp_atomic_read_nob(&psi->in_poll_wait)) {
state->remove_cnt++;
ASSERT(state->remove_cnt > 0);
fdlp = removed_fd_alloc();
@@ -310,7 +315,8 @@ forget_removed(struct pollset_info* psi)
erts_smp_mtx_unlock(mtx);
if (drv_ptr) {
int was_unmasked = erts_block_fpe();
- (*drv_ptr->stop_select) (fd, NULL);
+ DTRACE1(driver_stop_select, drv_ptr->name);
+ (*drv_ptr->stop_select) ((ErlDrvEvent) fd, NULL);
erts_unblock_fpe(was_unmasked);
if (drv_ptr->handle) {
erts_ddll_dereference_driver(drv_ptr->handle);
@@ -333,7 +339,7 @@ grow_drv_ev_state(int min_ix)
new_len = max_fds;
erts_smp_mtx_lock(&drv_ev_state_grow_lock);
- if (erts_smp_atomic_read(&drv_ev_state_len) <= min_ix) {
+ if (erts_smp_atomic_read_nob(&drv_ev_state_len) <= min_ix) {
for (i=0; i<DRV_EV_STATE_LOCK_CNT; i++) { /* lock all fd's */
erts_smp_mtx_lock(&drv_ev_state_locks[i].lck);
}
@@ -343,7 +349,7 @@ grow_drv_ev_state(int min_ix)
sizeof(ErtsDrvEventState)*new_len)
: erts_alloc(ERTS_ALC_T_DRV_EV_STATE,
sizeof(ErtsDrvEventState)*new_len));
- for (i = erts_smp_atomic_read(&drv_ev_state_len); i < new_len; i++) {
+ for (i = erts_smp_atomic_read_nob(&drv_ev_state_len); i < new_len; i++) {
drv_ev_state[i].fd = (ErtsSysFdType) i;
drv_ev_state[i].driver.select = NULL;
drv_ev_state[i].events = 0;
@@ -351,7 +357,7 @@ grow_drv_ev_state(int min_ix)
drv_ev_state[i].type = ERTS_EV_TYPE_NONE;
drv_ev_state[i].flags = 0;
}
- erts_smp_atomic_set(&drv_ev_state_len, new_len);
+ erts_smp_atomic_set_nob(&drv_ev_state_len, new_len);
for (i=0; i<DRV_EV_STATE_LOCK_CNT; i++) {
erts_smp_mtx_unlock(&drv_ev_state_locks[i].lck);
}
@@ -492,12 +498,15 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
ErtsDrvEventState *state;
int wake_poller;
int ret;
+#ifdef USE_VM_PROBES
+ DTRACE_CHARBUF(name, 64);
+#endif
ERTS_SMP_LC_ASSERT(erts_drvport2port(ix)
&& erts_lc_is_port_locked(erts_drvport2port(ix)));
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
- if ((unsigned)fd >= (unsigned)erts_smp_atomic_read(&drv_ev_state_len)) {
+ if ((unsigned)fd >= (unsigned)erts_smp_atomic_read_nob(&drv_ev_state_len)) {
if (fd < 0) {
return -1;
}
@@ -521,6 +530,10 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
if (IS_FD_UNKNOWN(state)) {
/* fast track to stop_select callback */
stop_select_fn = erts_drvport2port(ix)->drv_ptr->stop_select;
+#ifdef USE_VM_PROBES
+ strncpy(name, erts_drvport2port(ix)->drv_ptr->name, sizeof(name)-1);
+ name[sizeof(name)-1] = '\0';
+#endif
ret = 0;
goto done_unknown;
}
@@ -657,6 +670,10 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
/* Safe to close fd now as it is not in pollset
or there was no need to eject fd (kernel poll) */
stop_select_fn = drv_ptr->stop_select;
+#ifdef USE_VM_PROBES
+ strncpy(name, erts_drvport2port(ix)->drv_ptr->name, sizeof(name)-1);
+ name[sizeof(name)-1] = '\0';
+#endif
}
else {
/* Not safe to close fd, postpone stop_select callback. */
@@ -682,6 +699,7 @@ done_unknown:
erts_smp_mtx_unlock(fd_mtx(fd));
if (stop_select_fn) {
int was_unmasked = erts_block_fpe();
+ DTRACE1(driver_stop_select, name);
(*stop_select_fn)(e, NULL);
erts_unblock_fpe(was_unmasked);
}
@@ -709,7 +727,7 @@ ERTS_CIO_EXPORT(driver_event)(ErlDrvPort ix,
&& erts_lc_is_port_locked(erts_drvport2port(ix)));
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
- if ((unsigned)fd >= (unsigned)erts_smp_atomic_read(&drv_ev_state_len)) {
+ if ((unsigned)fd >= (unsigned)erts_smp_atomic_read_nob(&drv_ev_state_len)) {
if (fd < 0)
return -1;
if (fd >= max_fds) {
@@ -1115,6 +1133,14 @@ eready(Eterm id, ErtsDrvEventState *state, ErlDrvEventData event_data)
static void bad_fd_in_pollset( ErtsDrvEventState *, Eterm, Eterm, ErtsPollEvents);
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+void
+ERTS_CIO_EXPORT(erts_check_io_async_sig_interrupt)(void)
+{
+ ERTS_CIO_POLL_AS_INTR(pollset.ps);
+}
+#endif
+
void
ERTS_CIO_EXPORT(erts_check_io_interrupt)(int set)
{
@@ -1122,7 +1148,8 @@ ERTS_CIO_EXPORT(erts_check_io_interrupt)(int set)
}
void
-ERTS_CIO_EXPORT(erts_check_io_interrupt_timed)(int set, long msec)
+ERTS_CIO_EXPORT(erts_check_io_interrupt_timed)(int set,
+ erts_short_time_t msec)
{
ERTS_CIO_POLL_INTR_TMD(pollset.ps, set, msec);
}
@@ -1153,17 +1180,15 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_check_exact(NULL, 0); /* No locks should be locked */
#endif
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
pollres_len = sizeof(pollres)/sizeof(ErtsPollResFd);
- erts_smp_atomic_set(&pollset.in_poll_wait, 1);
+ erts_smp_atomic_set_nob(&pollset.in_poll_wait, 1);
poll_ret = ERTS_CIO_POLL_WAIT(pollset.ps, pollres, &pollres_len, &wait_time);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_check_exact(NULL, 0); /* No locks should be locked */
#endif
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
erts_deliver_time(); /* sync the machine's idea of time */
@@ -1173,7 +1198,7 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
#endif
if (poll_ret != 0) {
- erts_smp_atomic_set(&pollset.in_poll_wait, 0);
+ erts_smp_atomic_set_nob(&pollset.in_poll_wait, 0);
forget_removed(&pollset);
if (poll_ret == EAGAIN) {
goto restart;
@@ -1304,7 +1329,7 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
#endif
}
- erts_smp_atomic_set(&pollset.in_poll_wait, 0);
+ erts_smp_atomic_set_nob(&pollset.in_poll_wait, 0);
forget_removed(&pollset);
}
@@ -1419,7 +1444,7 @@ static void drv_ev_state_free(void *des)
void
ERTS_CIO_EXPORT(erts_init_check_io)(void)
{
- erts_smp_atomic_init(&pollset.in_poll_wait, 0);
+ erts_smp_atomic_init_nob(&pollset.in_poll_wait, 0);
ERTS_CIO_POLL_INIT();
pollset.ps = ERTS_CIO_NEW_POLLSET();
@@ -1441,7 +1466,7 @@ ERTS_CIO_EXPORT(erts_init_check_io)(void)
#endif
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
max_fds = ERTS_CIO_POLL_MAX_FDS();
- erts_smp_atomic_init(&drv_ev_state_len, 0);
+ erts_smp_atomic_init_nob(&drv_ev_state_len, 0);
drv_ev_state = NULL;
erts_smp_mtx_init(&drv_ev_state_grow_lock, "drv_ev_state_grow");
#else
@@ -1479,7 +1504,7 @@ ERTS_CIO_EXPORT(erts_check_io_size)(void)
ERTS_CIO_POLL_INFO(pollset.ps, &pi);
res = pi.memory_size;
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
- res += sizeof(ErtsDrvEventState) * erts_smp_atomic_read(&drv_ev_state_len);
+ res += sizeof(ErtsDrvEventState) * erts_smp_atomic_read_nob(&drv_ev_state_len);
#else
res += safe_hash_table_sz(&drv_ev_state_tab);
{
@@ -1506,7 +1531,7 @@ ERTS_CIO_EXPORT(erts_check_io_info)(void *proc)
ERTS_CIO_POLL_INFO(pollset.ps, &pi);
memory_size = pi.memory_size;
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
- memory_size += sizeof(ErtsDrvEventState) * erts_smp_atomic_read(&drv_ev_state_len);
+ memory_size += sizeof(ErtsDrvEventState) * erts_smp_atomic_read_nob(&drv_ev_state_len);
#else
memory_size += safe_hash_table_sz(&drv_ev_state_tab);
{
@@ -1870,13 +1895,12 @@ ERTS_CIO_EXPORT(erts_check_io_debug)(void)
erts_printf("--- fds in pollset --------------------------------------\n");
-#ifdef ERTS_SMP
-# ifdef ERTS_ENABLE_LOCK_CHECK
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
erts_lc_check_exact(NULL, 0); /* No locks should be locked */
-# endif
- erts_block_system(0); /* stop the world to avoid messy locking */
#endif
+ erts_smp_thr_progress_block(); /* stop the world to avoid messy locking */
+
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
counters.epep = erts_alloc(ERTS_ALC_T_TMP, sizeof(ErtsPollEvents)*max_fds);
ERTS_POLL_EXPORT(erts_poll_get_selected_events)(pollset.ps, counters.epep, max_fds);
@@ -1886,7 +1910,7 @@ ERTS_CIO_EXPORT(erts_check_io_debug)(void)
counters.num_errors = 0;
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
- len = erts_smp_atomic_read(&drv_ev_state_len);
+ len = erts_smp_atomic_read_nob(&drv_ev_state_len);
for (fd = 0; fd < len; fd++) {
doit_erts_check_io_debug((void *) &drv_ev_state[fd], (void *) &counters);
}
@@ -1898,9 +1922,7 @@ ERTS_CIO_EXPORT(erts_check_io_debug)(void)
safe_hash_for_each(&drv_ev_state_tab, &doit_erts_check_io_debug, (void *) &counters);
#endif
-#ifdef ERTS_SMP
- erts_release_system();
-#endif
+ erts_smp_thr_progress_unblock();
erts_printf("\n");
erts_printf("used fds=%d\n", counters.used_fds);
diff --git a/erts/emulator/sys/common/erl_check_io.h b/erts/emulator/sys/common/erl_check_io.h
index 9b45a63913..edab7947ba 100644
--- a/erts/emulator/sys/common/erl_check_io.h
+++ b/erts/emulator/sys/common/erl_check_io.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -40,10 +40,14 @@ Eterm erts_check_io_info_kp(void *);
Eterm erts_check_io_info_nkp(void *);
int erts_check_io_max_files_kp(void);
int erts_check_io_max_files_nkp(void);
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+void erts_check_io_async_sig_interrupt_kp(void);
+void erts_check_io_async_sig_interrupt_nkp(void);
+#endif
void erts_check_io_interrupt_kp(int);
void erts_check_io_interrupt_nkp(int);
-void erts_check_io_interrupt_timed_kp(int, long);
-void erts_check_io_interrupt_timed_nkp(int, long);
+void erts_check_io_interrupt_timed_kp(int, erts_short_time_t);
+void erts_check_io_interrupt_timed_nkp(int, erts_short_time_t);
void erts_check_io_kp(int);
void erts_check_io_nkp(int);
void erts_init_check_io_kp(void);
@@ -56,8 +60,11 @@ int erts_check_io_debug_nkp(void);
Uint erts_check_io_size(void);
Eterm erts_check_io_info(void *);
int erts_check_io_max_files(void);
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+void erts_check_io_async_sig_interrupt(void);
+#endif
void erts_check_io_interrupt(int);
-void erts_check_io_interrupt_timed(int, long);
+void erts_check_io_interrupt_timed(int, erts_short_time_t);
void erts_check_io(int);
void erts_init_check_io(void);
diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c
index eaef6680dd..db2854fa40 100644
--- a/erts/emulator/sys/common/erl_mseg.c
+++ b/erts/emulator/sys/common/erl_mseg.c
@@ -36,14 +36,12 @@
#include "erl_threads.h"
#include "erl_mtrace.h"
#include "erl_time.h"
+#include "erl_alloc.h"
#include "big.h"
+#include "erl_thr_progress.h"
#if HAVE_ERTS_MSEG
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
-# define ERTS_THREADS_NO_SMP
-#endif
-
#define SEGTYPE ERTS_MTRACE_SEGMENT_ID
#ifndef HAVE_GETPAGESIZE
@@ -75,16 +73,9 @@
static int atoms_initialized;
-static Uint cache_check_interval;
-
typedef struct mem_kind_t MemKind;
-static void check_cache(void *unused);
static void mseg_clear_cache(MemKind*);
-static int is_cache_check_scheduled;
-#ifdef ERTS_THREADS_NO_SMP
-static int is_cache_check_requested;
-#endif
#if HALFWORD_HEAP
static int initialize_pmmap(void);
@@ -138,7 +129,8 @@ const ErtsMsegOpt_t erts_mseg_default_opt = {
1, /* Use cache */
1, /* Preserv data */
0, /* Absolute shrink threshold */
- 0 /* Relative shrink threshold */
+ 0, /* Relative shrink threshold */
+ 0 /* Scheduler specific */
#if HALFWORD_HEAP
,0 /* need low memory */
#endif
@@ -157,11 +149,10 @@ typedef struct {
Uint32 no;
} CallCounter;
-static int is_init_done;
static Uint page_size;
static Uint page_shift;
-static struct {
+typedef struct {
CallCounter alloc;
CallCounter dealloc;
CallCounter realloc;
@@ -172,7 +163,9 @@ static struct {
#endif
CallCounter clear_cache;
CallCounter check_cache;
-} calls;
+} ErtsMsegCalls;
+
+typedef struct ErtsMsegAllctr_t_ ErtsMsegAllctr_t;
struct mem_kind_t {
cache_desc_t cache_descs[MAX_CACHE_SIZE];
@@ -201,25 +194,84 @@ struct mem_kind_t {
} max_ever;
} segments;
+ ErtsMsegAllctr_t *ma;
const char* name;
MemKind* next;
};/*MemKind*/
+struct ErtsMsegAllctr_t_ {
+ int ix;
+
+ int is_init_done;
+ int is_thread_safe;
+ erts_mtx_t mtx;
+
+ int is_cache_check_scheduled;
+
+ MemKind* mk_list;
+
#if HALFWORD_HEAP
-static MemKind low_mem, hi_mem;
+ MemKind low_mem;
+ MemKind hi_mem;
#else
-static MemKind the_mem;
+ MemKind the_mem;
#endif
-static MemKind* mk_list = NULL;
-static Uint max_cache_size;
-static Uint abs_max_cache_bad_fit;
-static Uint rel_max_cache_bad_fit;
+ Uint max_cache_size;
+ Uint abs_max_cache_bad_fit;
+ Uint rel_max_cache_bad_fit;
+
+ ErtsMsegCalls calls;
#if CAN_PARTLY_DESTROY
-static Uint min_seg_size;
+ Uint min_seg_size;
+#endif
+
+};
+
+typedef union {
+ ErtsMsegAllctr_t mseg_alloc;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsMsegAllctr_t))];
+} ErtsAlgndMsegAllctr_t;
+
+static int no_mseg_allocators;
+static ErtsAlgndMsegAllctr_t *aligned_mseg_allctr;
+
+#ifdef ERTS_SMP
+
+#define ERTS_MSEG_ALLCTR_IX(IX) \
+ (&aligned_mseg_allctr[(IX)].mseg_alloc)
+
+#define ERTS_MSEG_ALLCTR_SS() \
+ ERTS_MSEG_ALLCTR_IX((int) erts_get_scheduler_id())
+
+#define ERTS_MSEG_ALLCTR_OPT(OPT) \
+ ((OPT)->sched_spec ? ERTS_MSEG_ALLCTR_SS() : ERTS_MSEG_ALLCTR_IX(0))
+
+#else
+
+#define ERTS_MSEG_ALLCTR_IX(IX) \
+ (&aligned_mseg_allctr[0].mseg_alloc)
+
+#define ERTS_MSEG_ALLCTR_SS() \
+ (&aligned_mseg_allctr[0].mseg_alloc)
+
+#define ERTS_MSEG_ALLCTR_OPT(OPT) \
+ (&aligned_mseg_allctr[0].mseg_alloc)
+
#endif
+#define ERTS_MSEG_LOCK(MA) \
+do { \
+ if ((MA)->is_thread_safe) \
+ erts_mtx_lock(&(MA)->mtx); \
+} while (0)
+
+#define ERTS_MSEG_UNLOCK(MA) \
+do { \
+ if ((MA)->is_thread_safe) \
+ erts_mtx_unlock(&(MA)->mtx); \
+} while (0)
#define ERTS_MSEG_ALLOC_STAT(C,SZ) \
do { \
@@ -250,104 +302,44 @@ do { \
#define ONE_GIGA (1000000000)
-#define ZERO_CC(CC) (calls.CC.no = 0, calls.CC.giga_no = 0)
+#define ZERO_CC(MA, CC) ((MA)->calls.CC.no = 0, \
+ (MA)->calls.CC.giga_no = 0)
-#define INC_CC(CC) (calls.CC.no == ONE_GIGA - 1 \
- ? (calls.CC.giga_no++, calls.CC.no = 0) \
- : calls.CC.no++)
+#define INC_CC(MA, CC) ((MA)->calls.CC.no == ONE_GIGA - 1 \
+ ? ((MA)->calls.CC.giga_no++, \
+ (MA)->calls.CC.no = 0) \
+ : (MA)->calls.CC.no++)
-#define DEC_CC(CC) (calls.CC.no == 0 \
- ? (calls.CC.giga_no--, \
- calls.CC.no = ONE_GIGA - 1) \
- : calls.CC.no--)
+#define DEC_CC(MA, CC) ((MA)->calls.CC.no == 0 \
+ ? ((MA)->calls.CC.giga_no--, \
+ (MA)->calls.CC.no = ONE_GIGA - 1) \
+ : (MA)->calls.CC.no--)
-static erts_mtx_t mseg_mutex; /* Also needed when !USE_THREADS */
static erts_mtx_t init_atoms_mutex; /* Also needed when !USE_THREADS */
-#ifdef USE_THREADS
-#ifdef ERTS_THREADS_NO_SMP
-static erts_tid_t main_tid;
-static int async_handle = -1;
-#endif
-
-static void thread_safe_init(void)
-{
- erts_mtx_init(&init_atoms_mutex, "mseg_init_atoms");
- erts_mtx_init(&mseg_mutex, "mseg");
-
-#ifdef ERTS_THREADS_NO_SMP
- main_tid = erts_thr_self();
-#endif
-}
-
-#endif
-
-static ErlTimer cache_check_timer;
static ERTS_INLINE void
-schedule_cache_check(void)
-{
- if (!is_cache_check_scheduled && is_init_done) {
-#ifdef ERTS_THREADS_NO_SMP
- if (!erts_equal_tids(erts_thr_self(), main_tid)) {
- if (!is_cache_check_requested) {
- is_cache_check_requested = 1;
- sys_async_ready(async_handle);
- }
- }
- else
-#endif
- {
- cache_check_timer.active = 0;
- erts_set_timer(&cache_check_timer,
- check_cache,
- NULL,
- NULL,
- cache_check_interval);
- is_cache_check_scheduled = 1;
-#ifdef ERTS_THREADS_NO_SMP
- is_cache_check_requested = 0;
-#endif
- }
- }
-}
-
-#ifdef ERTS_THREADS_NO_SMP
-
-static void
-check_schedule_cache_check(void)
+schedule_cache_check(ErtsMsegAllctr_t *ma)
{
- erts_mtx_lock(&mseg_mutex);
- if (is_cache_check_requested
- && !is_cache_check_scheduled) {
- schedule_cache_check();
- }
- erts_mtx_unlock(&mseg_mutex);
-}
-
-#endif
-static void
-mseg_shutdown(void)
-{
- MemKind* mk;
- erts_mtx_lock(&mseg_mutex);
- for (mk=mk_list; mk; mk=mk->next) {
- mseg_clear_cache(mk);
+ if (!ma->is_cache_check_scheduled && ma->is_init_done) {
+ erts_set_aux_work_timeout(ma->ix,
+ ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK,
+ 1);
+ ma->is_cache_check_scheduled = 1;
}
- erts_mtx_unlock(&mseg_mutex);
}
static ERTS_INLINE void *
-mseg_create(MemKind* mk, Uint size)
+mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size)
{
void *seg;
ASSERT(size % page_size == 0);
#if HALFWORD_HEAP
- if (mk == &low_mem) {
+ if (mk == &ma->low_mem) {
seg = pmmap(size);
if ((unsigned long) seg & CHECK_POINTER_MASK) {
erts_fprintf(stderr,"Pointer mask failure (0x%08lx)\n",(unsigned long) seg);
@@ -371,28 +363,38 @@ mseg_create(MemKind* mk, Uint size)
#endif
}
- INC_CC(create);
+ INC_CC(ma, create);
return seg;
}
static ERTS_INLINE void
-mseg_destroy(MemKind* mk, void *seg, Uint size)
+mseg_destroy(ErtsMsegAllctr_t *ma, MemKind* mk, void *seg, Uint size)
{
+#ifdef DEBUG
int res;
+#endif
#if HALFWORD_HEAP
- if (mk == &low_mem) {
- res = pmunmap((void *) seg, size);
+ if (mk == &ma->low_mem) {
+#ifdef DEBUG
+ res =
+#endif
+ pmunmap((void *) seg, size);
}
else
#endif
{
#ifdef ERTS_MSEG_FAKE_SEGMENTS
erts_sys_free(ERTS_ALC_N_INVALID, NULL, seg);
+#ifdef DEBUG
res = 0;
+#endif
#elif HAVE_MMAP
- res = munmap((void *) seg, size);
+#ifdef DEBUG
+ res =
+#endif
+ munmap((void *) seg, size);
#else
# error "Missing mseg_destroy() implementation"
#endif
@@ -401,14 +403,14 @@ mseg_destroy(MemKind* mk, void *seg, Uint size)
ASSERT(size % page_size == 0);
ASSERT(res == 0);
- INC_CC(destroy);
+ INC_CC(ma, destroy);
}
#if HAVE_MSEG_RECREATE
static ERTS_INLINE void *
-mseg_recreate(MemKind* mk, void *old_seg, Uint old_size, Uint new_size)
+mseg_recreate(ErtsMsegAllctr_t *ma, MemKind* mk, void *old_seg, Uint old_size, Uint new_size)
{
void *new_seg;
@@ -416,7 +418,7 @@ mseg_recreate(MemKind* mk, void *old_seg, Uint old_size, Uint new_size)
ASSERT(new_size % page_size == 0);
#if HALFWORD_HEAP
- if (mk == &low_mem) {
+ if (mk == &ma->low_mem) {
new_seg = (void *) pmremap((void *) old_seg,
(size_t) old_size,
(size_t) new_size);
@@ -447,19 +449,37 @@ mseg_recreate(MemKind* mk, void *old_seg, Uint old_size, Uint new_size)
#endif
}
- INC_CC(recreate);
+ INC_CC(ma, recreate);
return new_seg;
}
#endif /* #if HAVE_MSEG_RECREATE */
+#ifdef DEBUG
+#define ERTS_DBG_MA_CHK_THR_ACCESS(MA) \
+do { \
+ if ((MA)->is_thread_safe) \
+ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&(MA)->mtx) \
+ || erts_smp_thr_progress_is_blocking() \
+ || ERTS_IS_CRASH_DUMPING); \
+ else \
+ ERTS_LC_ASSERT((MA)->ix == (int) erts_get_scheduler_id() \
+ || erts_smp_thr_progress_is_blocking() \
+ || ERTS_IS_CRASH_DUMPING); \
+} while (0)
+#define ERTS_DBG_MK_CHK_THR_ACCESS(MK) \
+ ERTS_DBG_MA_CHK_THR_ACCESS((MK)->ma)
+#else
+#define ERTS_DBG_MA_CHK_THR_ACCESS(MA)
+#define ERTS_DBG_MK_CHK_THR_ACCESS(MK)
+#endif
static ERTS_INLINE cache_desc_t *
alloc_cd(MemKind* mk)
{
cache_desc_t *cd = mk->free_cache_descs;
- ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
+ ERTS_DBG_MK_CHK_THR_ACCESS(mk);
if (cd)
mk->free_cache_descs = cd->next;
return cd;
@@ -468,7 +488,7 @@ alloc_cd(MemKind* mk)
static ERTS_INLINE void
free_cd(MemKind* mk, cache_desc_t *cd)
{
- ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
+ ERTS_DBG_MK_CHK_THR_ACCESS(mk);
cd->next = mk->free_cache_descs;
mk->free_cache_descs = cd;
}
@@ -477,7 +497,7 @@ free_cd(MemKind* mk, cache_desc_t *cd)
static ERTS_INLINE void
link_cd(MemKind* mk, cache_desc_t *cd)
{
- ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
+ ERTS_DBG_MK_CHK_THR_ACCESS(mk);
if (mk->cache)
mk->cache->prev = cd;
cd->next = mk->cache;
@@ -496,7 +516,7 @@ link_cd(MemKind* mk, cache_desc_t *cd)
static ERTS_INLINE void
end_link_cd(MemKind* mk, cache_desc_t *cd)
{
- ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
+ ERTS_DBG_MK_CHK_THR_ACCESS(mk);
if (mk->cache_end)
mk->cache_end->next = cd;
cd->next = NULL;
@@ -515,7 +535,7 @@ end_link_cd(MemKind* mk, cache_desc_t *cd)
static ERTS_INLINE void
unlink_cd(MemKind* mk, cache_desc_t *cd)
{
- ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
+ ERTS_DBG_MK_CHK_THR_ACCESS(mk);
if (cd->next)
cd->next->prev = cd->prev;
else
@@ -533,7 +553,7 @@ static ERTS_INLINE void
check_cache_limits(MemKind* mk)
{
cache_desc_t *cd;
- ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
+ ERTS_DBG_MK_CHK_THR_ACCESS(mk);
mk->max_cached_seg_size = 0;
mk->min_cached_seg_size = ~((Uint) 0);
for (cd = mk->cache; cd; cd = cd->next) {
@@ -551,7 +571,7 @@ adjust_cache_size(MemKind* mk, int force_check_limits)
int check_limits = force_check_limits;
Sint max_cached = ((Sint) mk->segments.current.watermark
- (Sint) mk->segments.current.no);
- ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
+ ERTS_DBG_MK_CHK_THR_ACCESS(mk);
while (((Sint) mk->cache_size) > max_cached && ((Sint) mk->cache_size) > 0) {
ASSERT(mk->cache_end);
cd = mk->cache_end;
@@ -562,7 +582,7 @@ adjust_cache_size(MemKind* mk, int force_check_limits)
}
if (erts_mtrace_enabled)
erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg);
- mseg_destroy(mk, cd->seg, cd->size);
+ mseg_destroy(mk->ma, mk, cd->seg, cd->size);
unlink_cd(mk,cd);
free_cd(mk,cd);
}
@@ -571,7 +591,7 @@ adjust_cache_size(MemKind* mk, int force_check_limits)
check_cache_limits(mk);
}
-static void
+static Uint
check_one_cache(MemKind* mk)
{
if (mk->segments.current.watermark > mk->segments.current.no)
@@ -579,23 +599,37 @@ check_one_cache(MemKind* mk)
adjust_cache_size(mk, 0);
if (mk->cache_size)
- schedule_cache_check();
+ schedule_cache_check(mk->ma);
+ return mk->cache_size;
}
-static void check_cache(void* unused)
+static void do_cache_check(ErtsMsegAllctr_t *ma)
{
+ int empty_cache = 1;
MemKind* mk;
- erts_mtx_lock(&mseg_mutex);
- is_cache_check_scheduled = 0;
+ ERTS_MSEG_LOCK(ma);
- for (mk=mk_list; mk; mk=mk->next) {
- check_one_cache(mk);
+ for (mk=ma->mk_list; mk; mk=mk->next) {
+ if (check_one_cache(mk))
+ empty_cache = 0;
}
- INC_CC(check_cache);
+ if (empty_cache) {
+ ma->is_cache_check_scheduled = 0;
+ erts_set_aux_work_timeout(ma->ix,
+ ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK,
+ 0);
+ }
+
+ INC_CC(ma, check_cache);
+
+ ERTS_MSEG_UNLOCK(ma);
+}
- erts_mtx_unlock(&mseg_mutex);
+void erts_mseg_cache_check(void)
+{
+ do_cache_check(ERTS_MSEG_ALLCTR_SS());
}
static void
@@ -611,42 +645,44 @@ mseg_clear_cache(MemKind* mk)
mk->segments.current.watermark = mk->segments.current.no;
- INC_CC(clear_cache);
+ INC_CC(mk->ma, clear_cache);
}
-static ERTS_INLINE MemKind* memkind(const ErtsMsegOpt_t *opt)
+static ERTS_INLINE MemKind* memkind(ErtsMsegAllctr_t *ma,
+ const ErtsMsegOpt_t *opt)
{
#if HALFWORD_HEAP
- return opt->low_mem ? &low_mem : &hi_mem;
+ return opt->low_mem ? &ma->low_mem : &ma->hi_mem;
#else
- return &the_mem;
+ return &ma->the_mem;
#endif
}
static void *
-mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
+mseg_alloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, Uint *size_p,
+ const ErtsMsegOpt_t *opt)
{
Uint max, min, diff_size, size;
cache_desc_t *cd, *cand_cd;
void *seg;
- MemKind* mk = memkind(opt);
+ MemKind* mk = memkind(ma, opt);
- INC_CC(alloc);
+ INC_CC(ma, alloc);
size = PAGE_CEILING(*size_p);
#if CAN_PARTLY_DESTROY
- if (size < min_seg_size)
- min_seg_size = size;
+ if (size < ma->min_seg_size)
+ ma->min_seg_size = size;
#endif
if (!opt->cache) {
create_seg:
adjust_cache_size(mk,0);
- seg = mseg_create(mk, size);
+ seg = mseg_create(ma, mk, size);
if (!seg) {
mseg_clear_cache(mk);
- seg = mseg_create(mk, size);
+ seg = mseg_create(ma, mk, size);
if (!seg)
size = 0;
}
@@ -667,10 +703,10 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
diff_size = mk->min_cached_seg_size - size;
- if (diff_size > abs_max_cache_bad_fit)
+ if (diff_size > ma->abs_max_cache_bad_fit)
goto create_seg;
- if (100*PAGES(diff_size) > rel_max_cache_bad_fit*PAGES(size))
+ if (100*PAGES(diff_size) > ma->rel_max_cache_bad_fit*PAGES(size))
goto create_seg;
}
@@ -708,8 +744,8 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
diff_size = cand_cd->size - size;
- if (diff_size > abs_max_cache_bad_fit
- || 100*PAGES(diff_size) > rel_max_cache_bad_fit*PAGES(size)) {
+ if (diff_size > ma->abs_max_cache_bad_fit
+ || 100*PAGES(diff_size) > ma->rel_max_cache_bad_fit*PAGES(size)) {
if (mk->max_cached_seg_size < cand_cd->size)
mk->max_cached_seg_size = cand_cd->size;
if (mk->min_cached_seg_size > cand_cd->size)
@@ -740,18 +776,18 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
static void
-mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size,
+mseg_dealloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, Uint size,
const ErtsMsegOpt_t *opt)
{
- MemKind* mk = memkind(opt);
+ MemKind* mk = memkind(ma, opt);
cache_desc_t *cd;
ERTS_MSEG_DEALLOC_STAT(mk,size);
- if (!opt->cache || max_cache_size == 0) {
+ if (!opt->cache || ma->max_cache_size == 0) {
if (erts_mtrace_enabled)
erts_mtrace_crr_free(atype, SEGTYPE, seg);
- mseg_destroy(mk, seg, size);
+ mseg_destroy(ma, mk, seg, size);
}
else {
int check_limits = 0;
@@ -769,7 +805,7 @@ mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size,
}
if (erts_mtrace_enabled)
erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg);
- mseg_destroy(mk, cd->seg, cd->size);
+ mseg_destroy(ma, mk, cd->seg, cd->size);
unlink_cd(mk,cd);
free_cd(mk,cd);
}
@@ -790,33 +826,34 @@ mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size,
if (check_limits)
check_cache_limits(mk);
- schedule_cache_check();
+ schedule_cache_check(ma);
}
- INC_CC(dealloc);
+ INC_CC(ma, dealloc);
}
static void *
-mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
- const ErtsMsegOpt_t *opt)
+mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg,
+ Uint old_size, Uint *new_size_p, const ErtsMsegOpt_t *opt)
{
- MemKind* mk = memkind(opt);
+ MemKind* mk;
void *new_seg;
Uint new_size;
if (!seg || !old_size) {
- new_seg = mseg_alloc(atype, new_size_p, opt);
- DEC_CC(alloc);
+ new_seg = mseg_alloc(ma, atype, new_size_p, opt);
+ DEC_CC(ma, alloc);
return new_seg;
}
if (!(*new_size_p)) {
- mseg_dealloc(atype, seg, old_size, opt);
- DEC_CC(dealloc);
+ mseg_dealloc(ma, atype, seg, old_size, opt);
+ DEC_CC(ma, dealloc);
return NULL;
}
+ mk = memkind(ma, opt);
new_seg = seg;
new_size = PAGE_CEILING(*new_size_p);
@@ -826,8 +863,8 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
Uint shrink_sz = old_size - new_size;
#if CAN_PARTLY_DESTROY
- if (new_size < min_seg_size)
- min_seg_size = new_size;
+ if (new_size < ma->min_seg_size)
+ ma->min_seg_size = new_size;
#endif
if (shrink_sz < opt->abs_shrink_th
@@ -838,7 +875,7 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
#if CAN_PARTLY_DESTROY
- if (shrink_sz > min_seg_size
+ if (shrink_sz > ma->min_seg_size
&& mk->free_cache_descs
&& opt->cache) {
cache_desc_t *cd;
@@ -857,7 +894,7 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
new_size);
erts_mtrace_crr_alloc(cd->seg, SEGTYPE, SEGTYPE, cd->size);
}
- schedule_cache_check();
+ schedule_cache_check(ma);
}
else {
if (erts_mtrace_enabled)
@@ -866,7 +903,7 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
SEGTYPE,
seg,
new_size);
- mseg_destroy(mk, ((char *) seg) + new_size, shrink_sz);
+ mseg_destroy(ma, mk, ((char *) seg) + new_size, shrink_sz);
}
#elif HAVE_MSEG_RECREATE
@@ -875,14 +912,14 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
#else
- new_seg = mseg_alloc(atype, &new_size, opt);
+ new_seg = mseg_alloc(ma, atype, &new_size, opt);
if (!new_seg)
new_size = old_size;
else {
sys_memcpy(((char *) new_seg),
((char *) seg),
MIN(new_size, old_size));
- mseg_dealloc(atype, seg, old_size, opt);
+ mseg_dealloc(ma, atype, seg, old_size, opt);
}
#endif
@@ -892,34 +929,34 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
else {
if (!opt->preserv) {
- mseg_dealloc(atype, seg, old_size, opt);
- new_seg = mseg_alloc(atype, &new_size, opt);
+ mseg_dealloc(ma, atype, seg, old_size, opt);
+ new_seg = mseg_alloc(ma, atype, &new_size, opt);
}
else {
#if HAVE_MSEG_RECREATE
#if !CAN_PARTLY_DESTROY
do_recreate:
#endif
- new_seg = mseg_recreate(mk, (void *) seg, old_size, new_size);
+ new_seg = mseg_recreate(ma, mk, (void *) seg, old_size, new_size);
if (erts_mtrace_enabled)
erts_mtrace_crr_realloc(new_seg, atype, SEGTYPE, seg, new_size);
if (!new_seg)
new_size = old_size;
#else
- new_seg = mseg_alloc(atype, &new_size, opt);
+ new_seg = mseg_alloc(ma, atype, &new_size, opt);
if (!new_seg)
new_size = old_size;
else {
sys_memcpy(((char *) new_seg),
((char *) seg),
MIN(new_size, old_size));
- mseg_dealloc(atype, seg, old_size, opt);
+ mseg_dealloc(ma, atype, seg, old_size, opt);
}
#endif
}
}
- INC_CC(realloc);
+ INC_CC(ma, realloc);
*new_size_p = new_size;
@@ -937,7 +974,6 @@ static struct {
Eterm amcbf;
Eterm rmcbf;
Eterm mcs;
- Eterm cci;
Eterm memkind;
Eterm name;
@@ -973,13 +1009,13 @@ static void ERTS_INLINE atom_init(Eterm *atom, char *name)
#define AM_INIT(AM) atom_init(&am.AM, #AM)
static void
-init_atoms(void)
+init_atoms(ErtsMsegAllctr_t *ma)
{
#ifdef DEBUG
Eterm *atom;
#endif
- erts_mtx_unlock(&mseg_mutex);
+ ERTS_MSEG_UNLOCK(ma);
erts_mtx_lock(&init_atoms_mutex);
if (!atoms_initialized) {
@@ -997,7 +1033,6 @@ init_atoms(void)
AM_INIT(amcbf);
AM_INIT(rmcbf);
AM_INIT(mcs);
- AM_INIT(cci);
AM_INIT(status);
AM_INIT(cached_segments);
@@ -1025,7 +1060,7 @@ init_atoms(void)
#endif
}
- erts_mtx_lock(&mseg_mutex);
+ ERTS_MSEG_LOCK(ma);
atoms_initialized = 1;
erts_mtx_unlock(&init_atoms_mutex);
}
@@ -1082,7 +1117,8 @@ add_4tup(Uint **hpp, Uint *szp, Eterm *lp,
}
static Eterm
-info_options(char *prefix,
+info_options(ErtsMsegAllctr_t *ma,
+ char *prefix,
int *print_to_p,
void *print_to_arg,
Uint **hpp,
@@ -1093,30 +1129,26 @@ info_options(char *prefix,
if (print_to_p) {
int to = *print_to_p;
void *arg = print_to_arg;
- erts_print(to, arg, "%samcbf: %beu\n", prefix, abs_max_cache_bad_fit);
- erts_print(to, arg, "%srmcbf: %beu\n", prefix, rel_max_cache_bad_fit);
- erts_print(to, arg, "%smcs: %beu\n", prefix, max_cache_size);
- erts_print(to, arg, "%scci: %beu\n", prefix, cache_check_interval);
+ erts_print(to, arg, "%samcbf: %beu\n", prefix, ma->abs_max_cache_bad_fit);
+ erts_print(to, arg, "%srmcbf: %beu\n", prefix, ma->rel_max_cache_bad_fit);
+ erts_print(to, arg, "%smcs: %beu\n", prefix, ma->max_cache_size);
}
if (hpp || szp) {
if (!atoms_initialized)
- init_atoms();
+ init_atoms(ma);
res = NIL;
add_2tup(hpp, szp, &res,
- am.cci,
- bld_uint(hpp, szp, cache_check_interval));
- add_2tup(hpp, szp, &res,
am.mcs,
- bld_uint(hpp, szp, max_cache_size));
+ bld_uint(hpp, szp, ma->max_cache_size));
add_2tup(hpp, szp, &res,
am.rmcbf,
- bld_uint(hpp, szp, rel_max_cache_bad_fit));
+ bld_uint(hpp, szp, ma->rel_max_cache_bad_fit));
add_2tup(hpp, szp, &res,
am.amcbf,
- bld_uint(hpp, szp, abs_max_cache_bad_fit));
+ bld_uint(hpp, szp, ma->abs_max_cache_bad_fit));
}
@@ -1124,18 +1156,18 @@ info_options(char *prefix,
}
static Eterm
-info_calls(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
+info_calls(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
{
Eterm res = THE_NON_VALUE;
if (print_to_p) {
-#define PRINT_CC(TO, TOA, CC) \
- if (calls.CC.giga_no == 0) \
- erts_print(TO, TOA, "mseg_%s calls: %b32u\n", #CC, calls.CC.no); \
- else \
+#define PRINT_CC(TO, TOA, CC) \
+ if (ma->calls.CC.giga_no == 0) \
+ erts_print(TO, TOA, "mseg_%s calls: %b32u\n", #CC, ma->calls.CC.no); \
+ else \
erts_print(TO, TOA, "mseg_%s calls: %b32u%09b32u\n", #CC, \
- calls.CC.giga_no, calls.CC.no)
+ ma->calls.CC.giga_no, ma->calls.CC.no)
int to = *print_to_p;
void *arg = print_to_arg;
@@ -1161,48 +1193,48 @@ info_calls(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
add_3tup(hpp, szp, &res,
am.mseg_check_cache,
- bld_unstable_uint(hpp, szp, calls.check_cache.giga_no),
- bld_unstable_uint(hpp, szp, calls.check_cache.no));
+ bld_unstable_uint(hpp, szp, ma->calls.check_cache.giga_no),
+ bld_unstable_uint(hpp, szp, ma->calls.check_cache.no));
add_3tup(hpp, szp, &res,
am.mseg_clear_cache,
- bld_unstable_uint(hpp, szp, calls.clear_cache.giga_no),
- bld_unstable_uint(hpp, szp, calls.clear_cache.no));
+ bld_unstable_uint(hpp, szp, ma->calls.clear_cache.giga_no),
+ bld_unstable_uint(hpp, szp, ma->calls.clear_cache.no));
#if HAVE_MSEG_RECREATE
add_3tup(hpp, szp, &res,
am.mseg_recreate,
- bld_unstable_uint(hpp, szp, calls.recreate.giga_no),
- bld_unstable_uint(hpp, szp, calls.recreate.no));
+ bld_unstable_uint(hpp, szp, ma->calls.recreate.giga_no),
+ bld_unstable_uint(hpp, szp, ma->calls.recreate.no));
#endif
add_3tup(hpp, szp, &res,
am.mseg_destroy,
- bld_unstable_uint(hpp, szp, calls.destroy.giga_no),
- bld_unstable_uint(hpp, szp, calls.destroy.no));
+ bld_unstable_uint(hpp, szp, ma->calls.destroy.giga_no),
+ bld_unstable_uint(hpp, szp, ma->calls.destroy.no));
add_3tup(hpp, szp, &res,
am.mseg_create,
- bld_unstable_uint(hpp, szp, calls.create.giga_no),
- bld_unstable_uint(hpp, szp, calls.create.no));
+ bld_unstable_uint(hpp, szp, ma->calls.create.giga_no),
+ bld_unstable_uint(hpp, szp, ma->calls.create.no));
add_3tup(hpp, szp, &res,
am.mseg_realloc,
- bld_unstable_uint(hpp, szp, calls.realloc.giga_no),
- bld_unstable_uint(hpp, szp, calls.realloc.no));
+ bld_unstable_uint(hpp, szp, ma->calls.realloc.giga_no),
+ bld_unstable_uint(hpp, szp, ma->calls.realloc.no));
add_3tup(hpp, szp, &res,
am.mseg_dealloc,
- bld_unstable_uint(hpp, szp, calls.dealloc.giga_no),
- bld_unstable_uint(hpp, szp, calls.dealloc.no));
+ bld_unstable_uint(hpp, szp, ma->calls.dealloc.giga_no),
+ bld_unstable_uint(hpp, szp, ma->calls.dealloc.no));
add_3tup(hpp, szp, &res,
am.mseg_alloc,
- bld_unstable_uint(hpp, szp, calls.alloc.giga_no),
- bld_unstable_uint(hpp, szp, calls.alloc.no));
+ bld_unstable_uint(hpp, szp, ma->calls.alloc.giga_no),
+ bld_unstable_uint(hpp, szp, ma->calls.alloc.no));
}
return res;
}
static Eterm
-info_status(MemKind* mk, int *print_to_p, void *print_to_arg,
+info_status(ErtsMsegAllctr_t *ma, MemKind* mk, int *print_to_p, void *print_to_arg,
int begin_new_max_period, Uint **hpp, Uint *szp)
{
Eterm res = THE_NON_VALUE;
@@ -1258,7 +1290,7 @@ info_status(MemKind* mk, int *print_to_p, void *print_to_arg,
return res;
}
-static Eterm info_memkind(MemKind* mk, int *print_to_p, void *print_to_arg,
+static Eterm info_memkind(ErtsMsegAllctr_t *ma, MemKind* mk, int *print_to_p, void *print_to_arg,
int begin_max_per, Uint **hpp, Uint *szp)
{
Eterm res = THE_NON_VALUE;
@@ -1274,8 +1306,8 @@ static Eterm info_memkind(MemKind* mk, int *print_to_p, void *print_to_arg,
atoms[2] = am.calls;
values[0] = erts_bld_string(hpp, szp, mk->name);
}
- values[1] = info_status(mk, print_to_p, print_to_arg, begin_max_per, hpp, szp);
- values[2] = info_calls(print_to_p, print_to_arg, hpp, szp);
+ values[1] = info_status(ma, mk, print_to_p, print_to_arg, begin_max_per, hpp, szp);
+ values[2] = info_calls(ma, print_to_p, print_to_arg, hpp, szp);
if (hpp || szp)
res = bld_2tup_list(hpp, szp, 3, atoms, values);
@@ -1285,7 +1317,7 @@ static Eterm info_memkind(MemKind* mk, int *print_to_p, void *print_to_arg,
static Eterm
-info_version(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
+info_version(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
{
Eterm res = THE_NON_VALUE;
@@ -1306,56 +1338,64 @@ info_version(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
\* */
Eterm
-erts_mseg_info_options(int *print_to_p, void *print_to_arg,
+erts_mseg_info_options(int ix,
+ int *print_to_p, void *print_to_arg,
Uint **hpp, Uint *szp)
{
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_IX(ix);
Eterm res;
- erts_mtx_lock(&mseg_mutex);
+ ERTS_MSEG_LOCK(ma);
- res = info_options("option ", print_to_p, print_to_arg, hpp, szp);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
- erts_mtx_unlock(&mseg_mutex);
+ res = info_options(ma, "option ", print_to_p, print_to_arg, hpp, szp);
+
+ ERTS_MSEG_UNLOCK(ma);
return res;
}
Eterm
-erts_mseg_info(int *print_to_p,
+erts_mseg_info(int ix,
+ int *print_to_p,
void *print_to_arg,
int begin_max_per,
Uint **hpp,
Uint *szp)
{
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_IX(ix);
Eterm res = THE_NON_VALUE;
Eterm atoms[4];
Eterm values[4];
Uint n = 0;
- erts_mtx_lock(&mseg_mutex);
+ ERTS_MSEG_LOCK(ma);
+
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
if (hpp || szp) {
if (!atoms_initialized)
- init_atoms();
+ init_atoms(ma);
atoms[0] = am.version;
atoms[1] = am.options;
atoms[2] = am.memkind;
atoms[3] = am.memkind;
}
- values[n++] = info_version(print_to_p, print_to_arg, hpp, szp);
- values[n++] = info_options("option ", print_to_p, print_to_arg, hpp, szp);
+ values[n++] = info_version(ma, print_to_p, print_to_arg, hpp, szp);
+ values[n++] = info_options(ma, "option ", print_to_p, print_to_arg, hpp, szp);
#if HALFWORD_HEAP
- values[n++] = info_memkind(&low_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
- values[n++] = info_memkind(&hi_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
+ values[n++] = info_memkind(ma, &ma->low_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
+ values[n++] = info_memkind(ma, &ma->hi_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
#else
- values[n++] = info_memkind(&the_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
+ values[n++] = info_memkind(ma, &ma->the_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
#endif
if (hpp || szp)
res = bld_2tup_list(hpp, szp, n, atoms, values);
- erts_mtx_unlock(&mseg_mutex);
+ ERTS_MSEG_UNLOCK(ma);
return res;
}
@@ -1363,10 +1403,12 @@ erts_mseg_info(int *print_to_p,
void *
erts_mseg_alloc_opt(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
{
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt);
void *seg;
- erts_mtx_lock(&mseg_mutex);
- seg = mseg_alloc(atype, size_p, opt);
- erts_mtx_unlock(&mseg_mutex);
+ ERTS_MSEG_LOCK(ma);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
+ seg = mseg_alloc(ma, atype, size_p, opt);
+ ERTS_MSEG_UNLOCK(ma);
return seg;
}
@@ -1377,12 +1419,14 @@ erts_mseg_alloc(ErtsAlcType_t atype, Uint *size_p)
}
void
-erts_mseg_dealloc_opt(ErtsAlcType_t atype, void *seg, Uint size,
- const ErtsMsegOpt_t *opt)
+erts_mseg_dealloc_opt(ErtsAlcType_t atype, void *seg,
+ Uint size, const ErtsMsegOpt_t *opt)
{
- erts_mtx_lock(&mseg_mutex);
- mseg_dealloc(atype, seg, size, opt);
- erts_mtx_unlock(&mseg_mutex);
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt);
+ ERTS_MSEG_LOCK(ma);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
+ mseg_dealloc(ma, atype, seg, size, opt);
+ ERTS_MSEG_UNLOCK(ma);
}
void
@@ -1392,44 +1436,60 @@ erts_mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size)
}
void *
-erts_mseg_realloc_opt(ErtsAlcType_t atype, void *seg, Uint old_size,
- Uint *new_size_p, const ErtsMsegOpt_t *opt)
+erts_mseg_realloc_opt(ErtsAlcType_t atype, void *seg,
+ Uint old_size, Uint *new_size_p,
+ const ErtsMsegOpt_t *opt)
{
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt);
void *new_seg;
- erts_mtx_lock(&mseg_mutex);
- new_seg = mseg_realloc(atype, seg, old_size, new_size_p, opt);
- erts_mtx_unlock(&mseg_mutex);
+ ERTS_MSEG_LOCK(ma);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
+ new_seg = mseg_realloc(ma, atype, seg, old_size, new_size_p, opt);
+ ERTS_MSEG_UNLOCK(ma);
return new_seg;
}
void *
-erts_mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size,
- Uint *new_size_p)
+erts_mseg_realloc(ErtsAlcType_t atype, void *seg,
+ Uint old_size, Uint *new_size_p)
{
- return erts_mseg_realloc_opt(atype, seg, old_size, new_size_p, &erts_mseg_default_opt);
+ return erts_mseg_realloc_opt(atype, seg, old_size, new_size_p,
+ &erts_mseg_default_opt);
}
void
erts_mseg_clear_cache(void)
{
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_SS();
MemKind* mk;
- erts_mtx_lock(&mseg_mutex);
- for (mk=mk_list; mk; mk=mk->next) {
+
+start:
+
+ ERTS_MSEG_LOCK(ma);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
+ for (mk=ma->mk_list; mk; mk=mk->next) {
mseg_clear_cache(mk);
}
- erts_mtx_unlock(&mseg_mutex);
+ ERTS_MSEG_UNLOCK(ma);
+
+ if (ma->ix != 0) {
+ ma = ERTS_MSEG_ALLCTR_IX(0);
+ goto start;
+ }
}
Uint
-erts_mseg_no(void)
+erts_mseg_no(const ErtsMsegOpt_t *opt)
{
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt);
MemKind* mk;
Uint n = 0;
- erts_mtx_lock(&mseg_mutex);
- for (mk=mk_list; mk; mk=mk->next) {
+ ERTS_MSEG_LOCK(ma);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
+ for (mk=ma->mk_list; mk; mk=mk->next) {
n += mk->segments.current.no;
}
- erts_mtx_unlock(&mseg_mutex);
+ ERTS_MSEG_UNLOCK(ma);
return n;
}
@@ -1439,7 +1499,7 @@ erts_mseg_unit_size(void)
return page_size;
}
-static void mem_kind_init(MemKind* mk, const char* name)
+static void mem_kind_init(ErtsMsegAllctr_t *ma, MemKind* mk, const char* name)
{
unsigned i;
@@ -1450,10 +1510,10 @@ static void mem_kind_init(MemKind* mk, const char* name)
mk->cache_size = 0;
mk->cache_hits = 0;
- if (max_cache_size > 0) {
- for (i = 0; i < max_cache_size - 1; i++)
+ if (ma->max_cache_size > 0) {
+ for (i = 0; i < ma->max_cache_size - 1; i++)
mk->cache_descs[i].next = &mk->cache_descs[i + 1];
- mk->cache_descs[max_cache_size - 1].next = NULL;
+ mk->cache_descs[ma->max_cache_size - 1].next = NULL;
mk->free_cache_descs = &mk->cache_descs[0];
}
else
@@ -1467,30 +1527,38 @@ static void mem_kind_init(MemKind* mk, const char* name)
mk->segments.max_ever.no = 0;
mk->segments.max_ever.sz = 0;
+ mk->ma = ma;
mk->name = name;
- mk->next = mk_list;
- mk_list = mk;
+ mk->next = ma->mk_list;
+ ma->mk_list = mk;
}
+
+
void
erts_mseg_init(ErtsMsegInit_t *init)
{
- atoms_initialized = 0;
- is_init_done = 0;
+ int i;
+ UWord x;
- /* Options ... */
+#ifdef ERTS_SMP
+ no_mseg_allocators = init->nos + 1;
+#else
+ no_mseg_allocators = 1;
+#endif
- abs_max_cache_bad_fit = init->amcbf;
- rel_max_cache_bad_fit = init->rmcbf;
- max_cache_size = init->mcs;
- cache_check_interval = init->cci;
+ x = (UWord) malloc(sizeof(ErtsAlgndMsegAllctr_t)
+ *no_mseg_allocators
+ + (ERTS_CACHE_LINE_SIZE-1));
+ if (x & ERTS_CACHE_LINE_MASK)
+ x = (x & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE;
+ ASSERT((x & ERTS_CACHE_LINE_MASK) == 0);
+ aligned_mseg_allctr = (ErtsAlgndMsegAllctr_t *) x;
- /* */
+ atoms_initialized = 0;
-#ifdef USE_THREADS
- thread_safe_init();
-#endif
+ erts_mtx_init(&init_atoms_mutex, "mseg_init_atoms");
#if HAVE_MMAP && !defined(MAP_ANON)
mmap_fd = open("/dev/zero", O_RDWR);
@@ -1512,34 +1580,55 @@ erts_mseg_init(ErtsMsegInit_t *init)
page_shift++;
}
- sys_memzero((void *) &calls, sizeof(calls));
+ for (i = 0; i < no_mseg_allocators; i++) {
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_IX(i);
-#if CAN_PARTLY_DESTROY
- min_seg_size = ~((Uint) 0);
-#endif
+ ma->ix = i;
+
+ ma->is_init_done = 0;
+
+ if (i != 0)
+ ma->is_thread_safe = 0;
+ else {
+ ma->is_thread_safe = 1;
+ erts_mtx_init(&ma->mtx, "mseg");
+ }
+
+ ma->is_cache_check_scheduled = 0;
+
+ /* Options ... */
- if (max_cache_size > MAX_CACHE_SIZE)
- max_cache_size = MAX_CACHE_SIZE;
+ ma->abs_max_cache_bad_fit = init->amcbf;
+ ma->rel_max_cache_bad_fit = init->rmcbf;
+ ma->max_cache_size = init->mcs;
+
+ if (ma->max_cache_size > MAX_CACHE_SIZE)
+ ma->max_cache_size = MAX_CACHE_SIZE;
+
+ ma->mk_list = NULL;
#if HALFWORD_HEAP
- mem_kind_init(&low_mem, "low memory");
- mem_kind_init(&hi_mem, "high memory");
+ mem_kind_init(ma, &ma->low_mem, "low memory");
+ mem_kind_init(ma, &ma->hi_mem, "high memory");
#else
- mem_kind_init(&the_mem, "all memory");
+ mem_kind_init(ma, &ma->the_mem, "all memory");
#endif
- is_cache_check_scheduled = 0;
-#ifdef ERTS_THREADS_NO_SMP
- is_cache_check_requested = 0;
+ sys_memzero((void *) &ma->calls, sizeof(ErtsMsegCalls));
+
+#if CAN_PARTLY_DESTROY
+ ma->min_seg_size = ~((Uint) 0);
#endif
+ }
}
-static ERTS_INLINE Uint tot_cache_size(void)
+static ERTS_INLINE Uint tot_cache_size(ErtsMsegAllctr_t *ma)
{
MemKind* mk;
Uint sz = 0;
- for (mk=mk_list; mk; mk=mk->next) {
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
+ for (mk=ma->mk_list; mk; mk=mk->next) {
sz += mk->cache_size;
}
return sz;
@@ -1552,25 +1641,13 @@ static ERTS_INLINE Uint tot_cache_size(void)
void
erts_mseg_late_init(void)
{
-#ifdef ERTS_THREADS_NO_SMP
- int handle =
- erts_register_async_ready_callback(
- check_schedule_cache_check);
-#endif
- erts_mtx_lock(&mseg_mutex);
- is_init_done = 1;
-#ifdef ERTS_THREADS_NO_SMP
- async_handle = handle;
-#endif
- if (tot_cache_size())
- schedule_cache_check();
- erts_mtx_unlock(&mseg_mutex);
-}
-
-void
-erts_mseg_exit(void)
-{
- mseg_shutdown();
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_SS();
+ ERTS_MSEG_LOCK(ma);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
+ ma->is_init_done = 1;
+ if (tot_cache_size(ma))
+ schedule_cache_check(ma);
+ ERTS_MSEG_UNLOCK(ma);
}
#endif /* #if HAVE_ERTS_MSEG */
@@ -1599,12 +1676,13 @@ erts_mseg_test(unsigned long op,
erts_mseg_clear_cache();
return (unsigned long) 0;
case 0x405:
- return (unsigned long) erts_mseg_no();
+ return (unsigned long) erts_mseg_no(&erts_mseg_default_opt);
case 0x406: {
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_IX(0);
unsigned long res;
- erts_mtx_lock(&mseg_mutex);
- res = (unsigned long) tot_cache_size();
- erts_mtx_unlock(&mseg_mutex);
+ ERTS_MSEG_LOCK(ma);
+ res = (unsigned long) tot_cache_size(ma);
+ ERTS_MSEG_UNLOCK(ma);
return res;
}
#else /* #if HAVE_ERTS_MSEG */
diff --git a/erts/emulator/sys/common/erl_mseg.h b/erts/emulator/sys/common/erl_mseg.h
index 8f116030a8..741080fb78 100644
--- a/erts/emulator/sys/common/erl_mseg.h
+++ b/erts/emulator/sys/common/erl_mseg.h
@@ -44,7 +44,7 @@ typedef struct {
Uint amcbf;
Uint rmcbf;
Uint mcs;
- Uint cci;
+ Uint nos;
} ErtsMsegInit_t;
#define ERTS_MSEG_INIT_DEFAULT_INITIALIZER \
@@ -60,6 +60,7 @@ typedef struct {
int preserv;
UWord abs_shrink_th;
UWord rel_shrink_th;
+ int sched_spec;
#if HALFWORD_HEAP
int low_mem;
#endif
@@ -75,14 +76,14 @@ void *erts_mseg_realloc(ErtsAlcType_t, void *, Uint, Uint *);
void *erts_mseg_realloc_opt(ErtsAlcType_t, void *, Uint, Uint *,
const ErtsMsegOpt_t *);
void erts_mseg_clear_cache(void);
-Uint erts_mseg_no(void);
+void erts_mseg_cache_check(void);
+Uint erts_mseg_no( const ErtsMsegOpt_t *);
Uint erts_mseg_unit_size(void);
void erts_mseg_init(ErtsMsegInit_t *init);
void erts_mseg_late_init(void); /* Have to be called after all allocators,
threads and timers have been initialized. */
-void erts_mseg_exit(void);
-Eterm erts_mseg_info_options(int *, void*, Uint **, Uint *);
-Eterm erts_mseg_info(int *, void*, int, Uint **, Uint *);
+Eterm erts_mseg_info_options(int, int *, void*, Uint **, Uint *);
+Eterm erts_mseg_info(int, int *, void*, int, Uint **, Uint *);
#endif /* #if HAVE_ERTS_MSEG */
diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c
index f5c785d683..26858052c4 100644
--- a/erts/emulator/sys/common/erl_poll.c
+++ b/erts/emulator/sys/common/erl_poll.c
@@ -68,6 +68,7 @@
# endif
# endif
#endif
+#include "erl_thr_progress.h"
#include "erl_driver.h"
#include "erl_alloc.h"
@@ -114,7 +115,7 @@
#endif
#define ERTS_POLL_USE_WAKEUP_PIPE \
- (ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP))
+ (ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(USE_THREADS))
#ifdef ERTS_SMP
@@ -124,11 +125,11 @@
erts_smp_mtx_unlock(&(PS)->mtx)
#define ERTS_POLLSET_SET_POLLED_CHK(PS) \
- ((int) erts_atomic32_xchg(&(PS)->polled, (erts_aint32_t) 1))
+ ((int) erts_atomic32_xchg_nob(&(PS)->polled, (erts_aint32_t) 1))
#define ERTS_POLLSET_UNSET_POLLED(PS) \
- erts_atomic32_set(&(PS)->polled, (erts_aint32_t) 0)
+ erts_atomic32_set_nob(&(PS)->polled, (erts_aint32_t) 0)
#define ERTS_POLLSET_IS_POLLED(PS) \
- ((int) erts_atomic32_read(&(PS)->polled))
+ ((int) erts_atomic32_read_nob(&(PS)->polled))
#else
@@ -142,11 +143,11 @@
#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
#define ERTS_POLLSET_SET_HAVE_UPDATE_REQUESTS(PS) \
- erts_smp_atomic32_set(&(PS)->have_update_requests, (erts_aint32_t) 1)
+ erts_smp_atomic32_set_nob(&(PS)->have_update_requests, (erts_aint32_t) 1)
#define ERTS_POLLSET_UNSET_HAVE_UPDATE_REQUESTS(PS) \
- erts_smp_atomic32_set(&(PS)->have_update_requests, (erts_aint32_t) 0)
+ erts_smp_atomic32_set_nob(&(PS)->have_update_requests, (erts_aint32_t) 0)
#define ERTS_POLLSET_HAVE_UPDATE_REQUESTS(PS) \
- ((int) erts_smp_atomic32_read(&(PS)->have_update_requests))
+ ((int) erts_smp_atomic32_read_nob(&(PS)->have_update_requests))
#else
#define ERTS_POLLSET_SET_HAVE_UPDATE_REQUESTS(PS)
#define ERTS_POLLSET_UNSET_HAVE_UPDATE_REQUESTS(PS)
@@ -261,7 +262,6 @@ struct ErtsPollSet_ {
#ifdef ERTS_SMP
erts_atomic32_t polled;
erts_smp_mtx_t mtx;
-#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
#endif
#if ERTS_POLL_USE_WAKEUP_PIPE
int wake_fds[2];
@@ -269,10 +269,8 @@ struct ErtsPollSet_ {
#if ERTS_POLL_USE_FALLBACK
int fallback_used;
#endif
-#ifdef ERTS_SMP
+#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
erts_atomic32_t wakeup_state;
-#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- volatile int wakeup_state;
#endif
erts_smp_atomic32_t timeout;
#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
@@ -345,21 +343,16 @@ static void print_misc_debug_info(void);
static ERTS_INLINE void
reset_wakeup_state(ErtsPollSet ps)
{
-#ifdef ERTS_SMP
- erts_atomic32_set(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
- ERTS_THR_MEMORY_BARRIER;
-#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- ps->wakeup_state = 0;
+#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ erts_atomic32_set_mb(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
#endif
}
static ERTS_INLINE int
is_woken(ErtsPollSet ps)
{
-#ifdef ERTS_SMP
+#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
return erts_atomic32_read_acqb(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN;
-#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- return ps->wakeup_state != ERTS_POLL_NOT_WOKEN;
#else
return 0;
#endif
@@ -368,13 +361,9 @@ is_woken(ErtsPollSet ps)
static ERTS_INLINE int
is_interrupted_reset(ErtsPollSet ps)
{
-#ifdef ERTS_SMP
- return (erts_atomic32_xchg(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN)
+#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ return (erts_atomic32_xchg_nob(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN)
== ERTS_POLL_WOKEN_INTR);
-#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- int res = ps->wakeup_state == ERTS_POLL_WOKEN_INTR;
- ps->wakeup_state = ERTS_POLL_NOT_WOKEN;
- return res;
#else
return 0;
#endif
@@ -383,16 +372,13 @@ is_interrupted_reset(ErtsPollSet ps)
static ERTS_INLINE void
woke_up(ErtsPollSet ps)
{
-#ifdef ERTS_SMP
- erts_aint32_t wakeup_state = erts_atomic32_read(&ps->wakeup_state);
+#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ erts_aint32_t wakeup_state = erts_atomic32_read_nob(&ps->wakeup_state);
if (wakeup_state == ERTS_POLL_NOT_WOKEN)
- (void) erts_atomic32_cmpxchg(&ps->wakeup_state,
- ERTS_POLL_WOKEN,
- ERTS_POLL_NOT_WOKEN);
- ASSERT(erts_atomic32_read(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN);
-#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- if (ps->wakeup_state == ERTS_POLL_NOT_WOKEN)
- ps->wakeup_state = ERTS_POLL_WOKEN;
+ (void) erts_atomic32_cmpxchg_nob(&ps->wakeup_state,
+ ERTS_POLL_WOKEN,
+ ERTS_POLL_NOT_WOKEN);
+ ASSERT(erts_atomic32_read_nob(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN);
#endif
}
@@ -403,28 +389,27 @@ woke_up(ErtsPollSet ps)
#if ERTS_POLL_USE_WAKEUP_PIPE
static ERTS_INLINE void
-wake_poller(ErtsPollSet ps, int interrupted)
+wake_poller(ErtsPollSet ps, int interrupted, int async_signal_safe)
{
int wake;
-#ifdef ERTS_SMP
- erts_aint32_t wakeup_state;
- if (!interrupted)
- wakeup_state = erts_atomic32_cmpxchg_relb(&ps->wakeup_state,
- ERTS_POLL_WOKEN,
- ERTS_POLL_NOT_WOKEN);
+ if (async_signal_safe)
+ wake = 1;
else {
- /*
- * We might unnecessarily write to the pipe, however,
- * that isn't problematic.
- */
- wakeup_state = erts_atomic32_read(&ps->wakeup_state);
- erts_atomic32_set_relb(&ps->wakeup_state, ERTS_POLL_WOKEN_INTR);
+ erts_aint32_t wakeup_state;
+ if (!interrupted)
+ wakeup_state = erts_atomic32_cmpxchg_relb(&ps->wakeup_state,
+ ERTS_POLL_WOKEN,
+ ERTS_POLL_NOT_WOKEN);
+ else {
+ /*
+ * We might unnecessarily write to the pipe, however,
+ * that isn't problematic.
+ */
+ wakeup_state = erts_atomic32_read_nob(&ps->wakeup_state);
+ erts_atomic32_set_relb(&ps->wakeup_state, ERTS_POLL_WOKEN_INTR);
+ }
+ wake = wakeup_state == ERTS_POLL_NOT_WOKEN;
}
- wake = wakeup_state == ERTS_POLL_NOT_WOKEN;
-#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- wake = ps->wakeup_state == ERTS_POLL_NOT_WOKEN;
- ps->wakeup_state = interrupted ? ERTS_POLL_WOKEN_INTR : ERTS_POLL_NOT_WOKEN;
-#endif
/*
* NOTE: This function might be called from signal handlers in the
* non-smp case; therefore, it has to be async-signal safe in
@@ -439,9 +424,17 @@ wake_poller(ErtsPollSet ps, int interrupted)
res = write(ps->wake_fds[1], "!", 1);
} while (res < 0 && errno == EINTR);
if (res <= 0 && errno != ERRNO_BLOCK) {
- fatal_error_async_signal_safe(__FILE__
- ":XXX:wake_poller(): "
- "Failed to write on wakeup pipe\n");
+ if (async_signal_safe)
+ fatal_error_async_signal_safe(__FILE__
+ ":XXX:wake_poller(): "
+ "Failed to write on wakeup pipe\n");
+ else
+ fatal_error("%s:%d:wake_poller(): "
+ "Failed to write to wakeup pipe fd=%d: "
+ "%s (%d)\n",
+ __FILE__, __LINE__,
+ ps->wake_fds[1],
+ erl_errno_id(errno), errno);
}
}
}
@@ -449,11 +442,18 @@ wake_poller(ErtsPollSet ps, int interrupted)
static ERTS_INLINE void
cleanup_wakeup_pipe(ErtsPollSet ps)
{
+#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ int intr = 0;
+#endif
int fd = ps->wake_fds[0];
int res;
do {
char buf[32];
res = read(fd, buf, sizeof(buf));
+#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ if (res > 0)
+ intr = 1;
+#endif
} while (res > 0 || (res < 0 && errno == EINTR));
if (res < 0 && errno != ERRNO_BLOCK) {
fatal_error("%s:%d:cleanup_wakeup_pipe(): "
@@ -463,6 +463,10 @@ cleanup_wakeup_pipe(ErtsPollSet ps)
fd,
erl_errno_id(errno), errno);
}
+#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ if (intr)
+ erts_atomic32_set_nob(&ps->wakeup_state, ERTS_POLL_WOKEN_INTR);
+#endif
}
static void
@@ -839,7 +843,7 @@ write_batch_buf(ErtsPollSet ps, ErtsPollBatchBuf *bbp)
ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_USEFLBCK;
ASSERT(ps->fds_status[fd].used_events);
ps->fds_status[fd].used_events = 0;
- erts_smp_atomic_dec(&ps->no_of_user_fds);
+ erts_smp_atomic_dec_nob(&ps->no_of_user_fds);
update_fallback_pollset(ps, fd);
ASSERT(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK);
break;
@@ -889,11 +893,11 @@ batch_update_pollset(ErtsPollSet ps, int fd, ErtsPollBatchBuf *bbp)
events = ERTS_POLL_EV_E2N(ps->fds_status[fd].events);
if (!events) {
buf[buf_len].events = POLLREMOVE;
- erts_smp_atomic_dec(&ps->no_of_user_fds);
+ erts_smp_atomic_dec_nob(&ps->no_of_user_fds);
}
else if (!ps->fds_status[fd].used_events) {
buf[buf_len].events = events;
- erts_smp_atomic_inc(&ps->no_of_user_fds);
+ erts_smp_atomic_inc_nob(&ps->no_of_user_fds);
}
else {
if ((ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_RST)
@@ -983,12 +987,12 @@ batch_update_pollset(ErtsPollSet ps, int fd, ErtsPollBatchBuf *bbp)
}
if (used_events) {
if (!events) {
- erts_smp_atomic_dec(&ps->no_of_user_fds);
+ erts_smp_atomic_dec_nob(&ps->no_of_user_fds);
}
}
else {
if (events)
- erts_smp_atomic_inc(&ps->no_of_user_fds);
+ erts_smp_atomic_inc_nob(&ps->no_of_user_fds);
}
ASSERT((events & ~(ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) == 0);
ASSERT((used_events & ~(ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) == 0);
@@ -1062,7 +1066,7 @@ update_pollset(ErtsPollSet ps, int fd)
epe.data.fd = epe_templ.data.fd;
res = epoll_ctl(ps->kp_fd, EPOLL_CTL_DEL, fd, &epe);
} while (res != 0 && errno == EINTR);
- erts_smp_atomic_dec(&ps->no_of_user_fds);
+ erts_smp_atomic_dec_nob(&ps->no_of_user_fds);
ps->fds_status[fd].used_events = 0;
}
@@ -1070,11 +1074,11 @@ update_pollset(ErtsPollSet ps, int fd)
/* A note on EPOLL_CTL_DEL: linux kernel versions before 2.6.9
need a non-NULL event pointer even though it is ignored... */
op = EPOLL_CTL_DEL;
- erts_smp_atomic_dec(&ps->no_of_user_fds);
+ erts_smp_atomic_dec_nob(&ps->no_of_user_fds);
}
else if (!ps->fds_status[fd].used_events) {
op = EPOLL_CTL_ADD;
- erts_smp_atomic_inc(&ps->no_of_user_fds);
+ erts_smp_atomic_inc_nob(&ps->no_of_user_fds);
}
else {
op = EPOLL_CTL_MOD;
@@ -1124,7 +1128,7 @@ update_pollset(ErtsPollSet ps, int fd)
/* Fall through ... */
case EPOLL_CTL_ADD: {
ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_USEFLBCK;
- erts_smp_atomic_dec(&ps->no_of_user_fds);
+ erts_smp_atomic_dec_nob(&ps->no_of_user_fds);
#if ERTS_POLL_USE_CONCURRENT_UPDATE
if (!*update_fallback) {
*update_fallback = 1;
@@ -1212,7 +1216,7 @@ static int update_pollset(ErtsPollSet ps, int fd)
#if ERTS_POLL_USE_FALLBACK
ASSERT(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK);
#endif
- erts_smp_atomic_dec(&ps->no_of_user_fds);
+ erts_smp_atomic_dec_nob(&ps->no_of_user_fds);
last_pix = --ps->no_poll_fds;
if (pix != last_pix) {
/* Move last pix to this pix */
@@ -1239,7 +1243,7 @@ static int update_pollset(ErtsPollSet ps, int fd)
ASSERT(!(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK)
|| fd == ps->kp_fd);
#endif
- erts_smp_atomic_inc(&ps->no_of_user_fds);
+ erts_smp_atomic_inc_nob(&ps->no_of_user_fds);
ps->fds_status[fd].pix = pix = ps->no_poll_fds++;
if (pix >= ps->poll_fds_len)
grow_poll_fds(ps, pix);
@@ -1290,7 +1294,7 @@ static int update_pollset(ErtsPollSet ps, int fd)
if (!ps->fds_status[fd].used_events) {
ASSERT(events);
- erts_smp_atomic_inc(&ps->no_of_user_fds);
+ erts_smp_atomic_inc_nob(&ps->no_of_user_fds);
#if ERTS_POLL_USE_FALLBACK
ps->no_select_fds++;
ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_INFLBCK;
@@ -1298,7 +1302,7 @@ static int update_pollset(ErtsPollSet ps, int fd)
}
else if (!events) {
ASSERT(ps->fds_status[fd].used_events);
- erts_smp_atomic_dec(&ps->no_of_user_fds);
+ erts_smp_atomic_dec_nob(&ps->no_of_user_fds);
ps->fds_status[fd].events = events;
#if ERTS_POLL_USE_FALLBACK
ps->no_select_fds--;
@@ -1497,7 +1501,7 @@ ERTS_POLL_EXPORT(erts_poll_controlv)(ErtsPollSet ps,
#ifdef ERTS_SMP
if (final_do_wake)
- wake_poller(ps, 0);
+ wake_poller(ps, 0, 0);
#endif /* ERTS_SMP */
}
@@ -1520,7 +1524,7 @@ ERTS_POLL_EXPORT(erts_poll_control)(ErtsPollSet ps,
#ifdef ERTS_SMP
if (*do_wake) {
- wake_poller(ps, 0);
+ wake_poller(ps, 0, 0);
}
#endif /* ERTS_SMP */
@@ -1893,10 +1897,10 @@ save_poll_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res,
}
static ERTS_INLINE int
-check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
+check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res)
{
- ASSERT(!*ps_locked);
- if (erts_smp_atomic_read(&ps->no_of_user_fds) == 0
+ int res;
+ if (erts_smp_atomic_read_nob(&ps->no_of_user_fds) == 0
&& tv->tv_usec == 0 && tv->tv_sec == 0) {
/* Nothing to poll and zero timeout; done... */
return 0;
@@ -1915,16 +1919,23 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
timeout = INT_MAX;
if (max_res > ps->res_events_len)
grow_res_events(ps, max_res);
- return epoll_wait(ps->kp_fd, ps->res_events, max_res, (int)timeout);
+#ifdef ERTS_SMP
+ if (timeout)
+ erts_thr_progress_prepare_wait(NULL);
+#endif
+ res = epoll_wait(ps->kp_fd, ps->res_events, max_res, (int)timeout);
#elif ERTS_POLL_USE_KQUEUE /* --- kqueue ------------------------------ */
struct timespec ts;
- ts.tv_sec = tv->tv_sec;
- ts.tv_nsec = tv->tv_usec*1000;
if (max_res > ps->res_events_len)
grow_res_events(ps, max_res);
- return kevent(ps->kp_fd, NULL, 0, ps->res_events, max_res, &ts);
+#ifdef ERTS_SMP
+ if (timeout)
+ erts_thr_progress_prepare_wait(NULL);
+#endif
+ ts.tv_sec = tv->tv_sec;
+ ts.tv_nsec = tv->tv_usec*1000;
+ res = kevent(ps->kp_fd, NULL, 0, ps->res_events, max_res, &ts);
#endif /* ----------------------------------------- */
-
}
else /* use fallback (i.e. poll() or select()) */
#endif /* ERTS_POLL_USE_FALLBACK */
@@ -1937,8 +1948,8 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
* the maximum number of file descriptors in the poll set.
*/
struct dvpoll poll_res;
- int nfds = (int) erts_smp_atomic_read(&ps->no_of_user_fds);
-#ifdef ERTS_SMP
+ int nfds = (int) erts_smp_atomic_read_nob(&ps->no_of_user_fds);
+#if ERTS_POLL_USE_WAKEUP_PIPE
nfds++; /* Wakeup pipe */
#endif
if (timeout > INT_MAX)
@@ -1947,22 +1958,38 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
if (poll_res.dp_nfds > ps->res_events_len)
grow_res_events(ps, poll_res.dp_nfds);
poll_res.dp_fds = ps->res_events;
+#ifdef ERTS_SMP
+ if (timeout)
+ erts_thr_progress_prepare_wait(NULL);
+#endif
poll_res.dp_timeout = (int) timeout;
- return ioctl(ps->kp_fd, DP_POLL, &poll_res);
+ res = ioctl(ps->kp_fd, DP_POLL, &poll_res);
#elif ERTS_POLL_USE_POLL /* --- poll -------------------------------- */
if (timeout > INT_MAX)
timeout = INT_MAX;
- return poll(ps->poll_fds, ps->no_poll_fds, (int) timeout);
+#ifdef ERTS_SMP
+ if (timeout)
+ erts_thr_progress_prepare_wait(NULL);
+#endif
+ res = poll(ps->poll_fds, ps->no_poll_fds, (int) timeout);
#elif ERTS_POLL_USE_SELECT /* --- select ------------------------------ */
- int res;
+ SysTimeval to = *tv;
+
ps->res_input_fds = ps->input_fds;
ps->res_output_fds = ps->output_fds;
+
+#ifdef ERTS_SMP
+ if (to.tv_sec || to.tv_usec)
+ erts_thr_progress_prepare_wait(NULL);
+#endif
res = select(ps->max_fd + 1,
&ps->res_input_fds,
&ps->res_output_fds,
NULL,
- tv);
+ &to);
#ifdef ERTS_SMP
+ if (to.tv_sec || to.tv_usec)
+ erts_thr_progress_finalize_wait(NULL);
if (res < 0
&& errno == EBADF
&& ERTS_POLLSET_HAVE_UPDATE_REQUESTS(ps)) {
@@ -1978,15 +2005,16 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
* have triggered, we fake an EAGAIN error and let the caller
* restart us.
*/
- SysTimeval zero_tv = {0, 0};
- *ps_locked = 1;
+ to.tv_sec = 0;
+ to.tv_usec = 0;
ERTS_POLLSET_LOCK(ps);
handle_update_requests(ps);
+ ERTS_POLLSET_UNLOCK(ps);
res = select(ps->max_fd + 1,
&ps->res_input_fds,
&ps->res_output_fds,
NULL,
- &zero_tv);
+ &to);
if (res == 0) {
errno = EAGAIN;
res = -1;
@@ -1996,6 +2024,11 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
return res;
#endif /* ----------------------------------------- */
}
+#ifdef ERTS_SMP
+ if (timeout)
+ erts_thr_progress_finalize_wait(NULL);
+#endif
+ return res;
}
}
@@ -2007,7 +2040,9 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
{
int res, no_fds;
int ebadf = 0;
- int ps_locked;
+#ifdef ERTS_SMP
+ int ps_locked = 0;
+#endif
SysTimeval *tvp;
SysTimeval itv;
@@ -2049,8 +2084,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
}
#endif
- ps_locked = 0;
- res = check_fd_events(ps, tvp, no_fds, &ps_locked);
+ res = check_fd_events(ps, tvp, no_fds);
woke_up(ps);
@@ -2072,10 +2106,8 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
#endif
#ifdef ERTS_SMP
- if (!ps_locked) {
- ps_locked = 1;
- ERTS_POLLSET_LOCK(ps);
- }
+ ps_locked = 1;
+ ERTS_POLLSET_LOCK(ps);
#endif
no_fds = save_poll_result(ps, pr, no_fds, res, ebadf);
@@ -2111,19 +2143,26 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
void
ERTS_POLL_EXPORT(erts_poll_interrupt)(ErtsPollSet ps, int set)
{
-#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP)
- /*
- * NOTE: This function might be called from signal handlers in the
- * non-smp case; therefore, it has to be async-signal safe in
- * the non-smp case.
- */
+#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
if (!set)
reset_wakeup_state(ps);
else
- wake_poller(ps, 1);
+ wake_poller(ps, 1, 0);
#endif
}
+#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+void
+ERTS_POLL_EXPORT(erts_poll_async_sig_interrupt)(ErtsPollSet ps)
+{
+ /*
+ * NOTE: This function is called from signal handlers, it,
+ * therefore, it has to be async-signal safe.
+ */
+ wake_poller(ps, 1, 1);
+}
+#endif
+
/*
* erts_poll_interrupt_timed():
* If 'set' != 0, interrupt thread blocked in erts_poll_wait() if it
@@ -2132,21 +2171,21 @@ ERTS_POLL_EXPORT(erts_poll_interrupt)(ErtsPollSet ps, int set)
void
ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet ps,
int set,
- long msec)
+ erts_short_time_t msec)
{
#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP)
if (!set)
reset_wakeup_state(ps);
else {
if (erts_smp_atomic32_read_acqb(&ps->timeout) > (erts_aint32_t) msec)
- wake_poller(ps, 1);
+ wake_poller(ps, 1, 0);
#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
else {
if (ERTS_POLLSET_IS_POLLED(ps))
- erts_smp_atomic_inc(&ps->no_avoided_wakeups);
- erts_smp_atomic_inc(&ps->no_avoided_interrupts);
+ erts_smp_atomic_inc_nob(&ps->no_avoided_wakeups);
+ erts_smp_atomic_inc_nob(&ps->no_avoided_interrupts);
}
- erts_smp_atomic_inc(&ps->no_interrupt_timed);
+ erts_smp_atomic_inc_nob(&ps->no_interrupt_timed);
#endif
}
#endif
@@ -2208,7 +2247,7 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
ps->internal_fd_limit = 0;
ps->fds_status = NULL;
ps->fds_status_len = 0;
- erts_smp_atomic_init(&ps->no_of_user_fds, 0);
+ erts_smp_atomic_init_nob(&ps->no_of_user_fds, 0);
#if ERTS_POLL_USE_KERNEL_POLL
ps->kp_fd = -1;
#if ERTS_POLL_USE_EPOLL
@@ -2260,16 +2299,14 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
ps->update_requests.next = NULL;
ps->update_requests.len = 0;
ps->curr_upd_req_block = &ps->update_requests;
- erts_smp_atomic32_init(&ps->have_update_requests, 0);
+ erts_smp_atomic32_init_nob(&ps->have_update_requests, 0);
#endif
#ifdef ERTS_SMP
- erts_atomic32_init(&ps->polled, 0);
+ erts_atomic32_init_nob(&ps->polled, 0);
erts_smp_mtx_init(&ps->mtx, "pollset");
#endif
-#ifdef ERTS_SMP
- erts_atomic32_init(&ps->wakeup_state, (erts_aint32_t) 0);
-#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- ps->wakeup_state = 0;
+#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ erts_atomic32_init_nob(&ps->wakeup_state, (erts_aint32_t) 0);
#endif
#if ERTS_POLL_USE_WAKEUP_PIPE
create_wakeup_pipe(ps);
@@ -2291,11 +2328,11 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
ps->internal_fd_limit = kp_fd + 1;
ps->kp_fd = kp_fd;
#endif
- erts_smp_atomic32_init(&ps->timeout, ERTS_AINT32_T_MAX);
+ erts_smp_atomic32_init_nob(&ps->timeout, ERTS_AINT32_T_MAX);
#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
- erts_smp_atomic_init(&ps->no_avoided_wakeups, 0);
- erts_smp_atomic_init(&ps->no_avoided_interrupts, 0);
- erts_smp_atomic_init(&ps->no_interrupt_timed, 0);
+ erts_smp_atomic_init_nob(&ps->no_avoided_wakeups, 0);
+ erts_smp_atomic_init_nob(&ps->no_avoided_interrupts, 0);
+ erts_smp_atomic_init_nob(&ps->no_interrupt_timed, 0);
#endif
#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
handle_update_requests(ps);
@@ -2303,7 +2340,7 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
#if ERTS_POLL_USE_FALLBACK
ps->fallback_used = 0;
#endif
- erts_smp_atomic_set(&ps->no_of_user_fds, 0); /* Don't count wakeup pipe and fallback fd */
+ erts_smp_atomic_set_nob(&ps->no_of_user_fds, 0); /* Don't count wakeup pipe and fallback fd */
erts_smp_spin_lock(&pollsets_lock);
ps->next = pollsets;
@@ -2449,8 +2486,8 @@ ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet ps, ErtsPollInfo *pip)
pip->memory_size = size;
- pip->poll_set_size = (int) erts_smp_atomic_read(&ps->no_of_user_fds);
-#ifdef ERTS_SMP
+ pip->poll_set_size = (int) erts_smp_atomic_read_nob(&ps->no_of_user_fds);
+#if ERTS_POLL_USE_WAKEUP_PIPE
pip->poll_set_size++; /* Wakeup pipe */
#endif
@@ -2507,9 +2544,9 @@ ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet ps, ErtsPollInfo *pip)
pip->max_fds = max_fds;
#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
- pip->no_avoided_wakeups = erts_smp_atomic_read(&ps->no_avoided_wakeups);
- pip->no_avoided_interrupts = erts_smp_atomic_read(&ps->no_avoided_interrupts);
- pip->no_interrupt_timed = erts_smp_atomic_read(&ps->no_interrupt_timed);
+ pip->no_avoided_wakeups = erts_smp_atomic_read_nob(&ps->no_avoided_wakeups);
+ pip->no_avoided_interrupts = erts_smp_atomic_read_nob(&ps->no_avoided_interrupts);
+ pip->no_interrupt_timed = erts_smp_atomic_read_nob(&ps->no_interrupt_timed);
#endif
ERTS_POLLSET_UNLOCK(ps);
@@ -2529,7 +2566,7 @@ fatal_error(char *format, ...)
{
va_list ap;
- if (ERTS_IS_CRASH_DUMPING || ERTS_GOT_SIGUSR1) {
+ if (ERTS_SOMEONE_IS_CRASH_DUMPING || ERTS_GOT_SIGUSR1) {
/*
* Crash dump writing and reception of sigusr1 (which will
* result in a crash dump) closes all file descriptors. This
@@ -2549,7 +2586,7 @@ fatal_error(char *format, ...)
static void
fatal_error_async_signal_safe(char *error_str)
{
- if (ERTS_IS_CRASH_DUMPING || ERTS_GOT_SIGUSR1) {
+ if (ERTS_SOMEONE_IS_CRASH_DUMPING || ERTS_GOT_SIGUSR1) {
/* See comment above in fatal_error() */
return;
}
diff --git a/erts/emulator/sys/common/erl_poll.h b/erts/emulator/sys/common/erl_poll.h
index 725a77a152..8dde619105 100644
--- a/erts/emulator/sys/common/erl_poll.h
+++ b/erts/emulator/sys/common/erl_poll.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -216,11 +216,14 @@ typedef struct {
#endif
} ErtsPollInfo;
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+void ERTS_POLL_EXPORT(erts_poll_async_sig_interrupt)(ErtsPollSet);
+#endif
void ERTS_POLL_EXPORT(erts_poll_interrupt)(ErtsPollSet,
int);
void ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet,
int,
- long);
+ erts_short_time_t);
ErtsPollEvents ERTS_POLL_EXPORT(erts_poll_control)(ErtsPollSet,
ErtsSysFdType,
ErtsPollEvents,
diff --git a/erts/emulator/sys/unix/erl_unix_sys.h b/erts/emulator/sys/unix/erl_unix_sys.h
index d8d51b192c..c8fcec8547 100644
--- a/erts/emulator/sys/unix/erl_unix_sys.h
+++ b/erts/emulator/sys/unix/erl_unix_sys.h
@@ -129,10 +129,12 @@
#define HAVE_ERTS_CHECK_IO_DEBUG
int erts_check_io_debug(void);
-
-#ifndef ENABLE_CHILD_WAITER_THREAD
+#ifndef ERTS_SMP
# undef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
# define ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+#endif
+
+#ifndef ENABLE_CHILD_WAITER_THREAD
# ifdef ERTS_SMP
# define ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
void erts_check_children(void);
@@ -144,6 +146,7 @@ typedef void *GETENV_STATE;
/*
** For the erl_timer_sup module.
*/
+typedef time_t erts_time_t;
typedef struct timeval SysTimeval;
diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c
index bafbbb0f6c..f94e0f2296 100644
--- a/erts/emulator/sys/unix/sys.c
+++ b/erts/emulator/sys/unix/sys.c
@@ -52,6 +52,7 @@
#define ERTS_WANT_GOT_SIGUSR1
#define WANT_NONBLOCKING /* must define this to pull in defs from sys.h */
#include "sys.h"
+#include "erl_thr_progress.h"
#if defined(__APPLE__) && defined(__MACH__) && !defined(__DARWIN__)
#define __DARWIN__ 1
@@ -127,7 +128,6 @@ static ErtsSysReportExit *report_exit_list;
static ErtsSysReportExit *report_exit_transit_list;
#endif
-extern int check_async_ready(void);
extern int driver_interrupt(int, int);
extern void do_break(void);
@@ -167,12 +167,12 @@ static int debug_log = 0;
#ifdef ERTS_SMP
erts_smp_atomic32_t erts_got_sigusr1;
#define ERTS_SET_GOT_SIGUSR1 \
- erts_smp_atomic32_set(&erts_got_sigusr1, 1)
+ erts_smp_atomic32_set_mb(&erts_got_sigusr1, 1)
#define ERTS_UNSET_GOT_SIGUSR1 \
- erts_smp_atomic32_set(&erts_got_sigusr1, 0)
+ erts_smp_atomic32_set_mb(&erts_got_sigusr1, 0)
static erts_smp_atomic32_t have_prepared_crash_dump;
#define ERTS_PREPARED_CRASH_DUMP \
- ((int) erts_smp_atomic32_xchg(&have_prepared_crash_dump, 1))
+ ((int) erts_smp_atomic32_xchg_nob(&have_prepared_crash_dump, 1))
#else
volatile int erts_got_sigusr1;
#define ERTS_SET_GOT_SIGUSR1 (erts_got_sigusr1 = 1)
@@ -242,9 +242,9 @@ static int max_files = -1;
#ifdef ERTS_SMP
erts_smp_atomic32_t erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED \
- erts_smp_atomic32_set(&erts_break_requested, (erts_aint32_t) 1)
+ erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 1)
#define ERTS_UNSET_BREAK_REQUESTED \
- erts_smp_atomic32_set(&erts_break_requested, (erts_aint32_t) 0)
+ erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 0)
#else
volatile int erts_break_requested = 0;
#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1)
@@ -263,8 +263,9 @@ int erts_use_kernel_poll = 0;
struct {
int (*select)(ErlDrvPort, ErlDrvEvent, int, int);
int (*event)(ErlDrvPort, ErlDrvEvent, ErlDrvEventData);
+ void (*check_io_as_interrupt)(void);
void (*check_io_interrupt)(int);
- void (*check_io_interrupt_tmd)(int, long);
+ void (*check_io_interrupt_tmd)(int, erts_short_time_t);
void (*check_io)(int);
Uint (*size)(void);
Eterm (*info)(void *);
@@ -302,6 +303,9 @@ init_check_io(void)
if (erts_use_kernel_poll) {
io_func.select = driver_select_kp;
io_func.event = driver_event_kp;
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+ io_func.check_io_as_interrupt = erts_check_io_async_sig_interrupt_kp;
+#endif
io_func.check_io_interrupt = erts_check_io_interrupt_kp;
io_func.check_io_interrupt_tmd = erts_check_io_interrupt_timed_kp;
io_func.check_io = erts_check_io_kp;
@@ -314,6 +318,9 @@ init_check_io(void)
else {
io_func.select = driver_select_nkp;
io_func.event = driver_event_nkp;
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+ io_func.check_io_as_interrupt = erts_check_io_async_sig_interrupt_nkp;
+#endif
io_func.check_io_interrupt = erts_check_io_interrupt_nkp;
io_func.check_io_interrupt_tmd = erts_check_io_interrupt_timed_nkp;
io_func.check_io = erts_check_io_nkp;
@@ -325,6 +332,11 @@ init_check_io(void)
}
}
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+#define ERTS_CHK_IO_AS_INTR() (*io_func.check_io_as_interrupt)()
+#else
+#define ERTS_CHK_IO_AS_INTR() (*io_func.check_io_interrupt)(1)
+#endif
#define ERTS_CHK_IO_INTR (*io_func.check_io_interrupt)
#define ERTS_CHK_IO_INTR_TMD (*io_func.check_io_interrupt_tmd)
#define ERTS_CHK_IO (*io_func.check_io)
@@ -339,6 +351,11 @@ init_check_io(void)
max_files = erts_check_io_max_files();
}
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+#define ERTS_CHK_IO_AS_INTR() erts_check_io_async_sig_interrupt()
+#else
+#define ERTS_CHK_IO_AS_INTR() erts_check_io_interrupt(1)
+#endif
#define ERTS_CHK_IO_INTR erts_check_io_interrupt
#define ERTS_CHK_IO_INTR_TMD erts_check_io_interrupt_timed
#define ERTS_CHK_IO erts_check_io
@@ -346,15 +363,15 @@ init_check_io(void)
#endif
-#ifdef ERTS_SMP
void
erts_sys_schedule_interrupt(int set)
{
ERTS_CHK_IO_INTR(set);
}
+#ifdef ERTS_SMP
void
-erts_sys_schedule_interrupt_timed(int set, long msec)
+erts_sys_schedule_interrupt_timed(int set, erts_short_time_t msec)
{
ERTS_CHK_IO_INTR_TMD(set, msec);
}
@@ -364,7 +381,7 @@ Uint
erts_sys_misc_mem_sz(void)
{
Uint res = ERTS_CHK_IO_SZ();
- res += erts_smp_atomic_read(&sys_misc_mem_sz);
+ res += erts_smp_atomic_read_mb(&sys_misc_mem_sz);
return res;
}
@@ -509,9 +526,9 @@ erts_sys_pre_init(void)
#endif
}
#ifdef ERTS_SMP
- erts_smp_atomic32_init(&erts_break_requested, 0);
- erts_smp_atomic32_init(&erts_got_sigusr1, 0);
- erts_smp_atomic32_init(&have_prepared_crash_dump, 0);
+ erts_smp_atomic32_init_nob(&erts_break_requested, 0);
+ erts_smp_atomic32_init_nob(&erts_got_sigusr1, 0);
+ erts_smp_atomic32_init_nob(&have_prepared_crash_dump, 0);
#else
erts_break_requested = 0;
erts_got_sigusr1 = 0;
@@ -521,13 +538,12 @@ erts_sys_pre_init(void)
children_died = 0;
#endif
#endif /* USE_THREADS */
- erts_smp_atomic_init(&sys_misc_mem_sz, 0);
+ erts_smp_atomic_init_nob(&sys_misc_mem_sz, 0);
}
void
erl_sys_init(void)
{
- erts_smp_rwmtx_init(&environ_rwmtx, "environ");
#if !DISABLE_VFORK
{
int res;
@@ -553,7 +569,7 @@ erl_sys_init(void)
+ sizeof(CHILD_SETUP_PROG_NAME)
+ 1);
child_setup_prog = erts_alloc(ERTS_ALC_T_CS_PROG_PATH, csp_path_sz);
- erts_smp_atomic_add(&sys_misc_mem_sz, csp_path_sz);
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, csp_path_sz);
sprintf(child_setup_prog,
"%s%c%s",
bindir,
@@ -732,7 +748,7 @@ break_requested(void)
erl_exit(ERTS_INTR_EXIT, "");
ERTS_SET_BREAK_REQUESTED;
- ERTS_CHK_IO_INTR(1); /* Make sure we don't sleep in poll */
+ ERTS_CHK_IO_AS_INTR(); /* Make sure we don't sleep in poll */
}
/* set up signal handlers for break and quit */
@@ -932,18 +948,13 @@ void
os_flavor(char* namebuf, /* Where to return the name. */
unsigned size) /* Size of name buffer. */
{
- static int called = 0;
- static struct utsname uts; /* Information about the system. */
-
- if (!called) {
- char* s;
+ struct utsname uts; /* Information about the system. */
+ char* s;
- (void) uname(&uts);
- called = 1;
- for (s = uts.sysname; *s; s++) {
- if (isupper((int) *s)) {
- *s = tolower((int) *s);
- }
+ (void) uname(&uts);
+ for (s = uts.sysname; *s; s++) {
+ if (isupper((int) *s)) {
+ *s = tolower((int) *s);
}
}
strcpy(namebuf, uts.sysname);
@@ -1022,14 +1033,15 @@ static struct driver_data {
/* Driver interfaces */
static ErlDrvData spawn_start(ErlDrvPort, char*, SysDriverOpts*);
static ErlDrvData fd_start(ErlDrvPort, char*, SysDriverOpts*);
-static int fd_control(ErlDrvData, unsigned int, char *, int, char **, int);
+static ErlDrvSSizeT fd_control(ErlDrvData, unsigned int, char *, ErlDrvSizeT,
+ char **, ErlDrvSizeT);
static ErlDrvData vanilla_start(ErlDrvPort, char*, SysDriverOpts*);
static int spawn_init(void);
static void fd_stop(ErlDrvData);
static void stop(ErlDrvData);
static void ready_input(ErlDrvData, ErlDrvEvent);
static void ready_output(ErlDrvData, ErlDrvEvent);
-static void output(ErlDrvData, char*, int);
+static void output(ErlDrvData, char*, ErlDrvSizeT);
static void outputv(ErlDrvData, ErlIOVec*);
static void stop_select(ErlDrvEvent, void*);
@@ -1108,31 +1120,6 @@ struct erl_drv_entry vanilla_driver_entry = {
stop_select
};
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
-static int async_drv_init(void);
-static ErlDrvData async_drv_start(ErlDrvPort, char*, SysDriverOpts*);
-static void async_drv_stop(ErlDrvData);
-static void async_drv_input(ErlDrvData, ErlDrvEvent);
-
-/* INTERNAL use only */
-
-struct erl_drv_entry async_driver_entry = {
- async_drv_init,
- async_drv_start,
- async_drv_stop,
- NULL,
- async_drv_input,
- NULL,
- "async",
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL
-};
-#endif
-
/* Handle SIGCHLD signals. */
#if (defined(SIG_SIGSET) || defined(SIG_SIGNAL))
static RETSIGTYPE onchld(void)
@@ -1146,7 +1133,7 @@ static RETSIGTYPE onchld(int signum)
smp_sig_notify('C');
#else
children_died = 1;
- ERTS_CHK_IO_INTR(1); /* Make sure we don't sleep in poll */
+ ERTS_CHK_IO_AS_INTR(); /* Make sure we don't sleep in poll */
#endif
}
@@ -1216,8 +1203,8 @@ static int spawn_init()
sys_sigset(SIGPIPE, SIG_IGN); /* Ignore - we'll handle the write failure */
driver_data = (struct driver_data *)
erts_alloc(ERTS_ALC_T_DRV_TAB, max_files * sizeof(struct driver_data));
- erts_smp_atomic_add(&sys_misc_mem_sz,
- max_files * sizeof(struct driver_data));
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz,
+ max_files * sizeof(struct driver_data));
for (i = 0; i < max_files; i++)
driver_data[i].pid = -1;
@@ -1740,10 +1727,10 @@ static int fd_get_window_size(int fd, Uint32 *width, Uint32 *height)
return -1;
}
-static int fd_control(ErlDrvData drv_data,
- unsigned int command,
- char *buf, int len,
- char **rbuf, int rlen)
+static ErlDrvSSizeT fd_control(ErlDrvData drv_data,
+ unsigned int command,
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen)
{
int fd = (int)(long)drv_data;
char resbuff[2*sizeof(Uint32)];
@@ -1925,8 +1912,8 @@ static void clear_fd_data(int fd)
{
if (fd_data[fd].sz > 0) {
erts_free(ERTS_ALC_T_FD_ENTRY_BUF, (void *) fd_data[fd].buf);
- ASSERT(erts_smp_atomic_read(&sys_misc_mem_sz) >= fd_data[fd].sz);
- erts_smp_atomic_add(&sys_misc_mem_sz, -1*fd_data[fd].sz);
+ ASSERT(erts_smp_atomic_read_nob(&sys_misc_mem_sz) >= fd_data[fd].sz);
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, -1*fd_data[fd].sz);
}
fd_data[fd].buf = NULL;
fd_data[fd].sz = 0;
@@ -2015,18 +2002,20 @@ static void outputv(ErlDrvData e, ErlIOVec* ev)
int ix = driver_data[fd].port_num;
int pb = driver_data[fd].packet_bytes;
int ofd = driver_data[fd].ofd;
- int n;
- int sz;
+ ssize_t n;
+ ErlDrvSizeT sz;
char lb[4];
char* lbp;
- int len = ev->size;
+ ErlDrvSizeT len = ev->size;
/* (len > ((unsigned long)-1 >> (4-pb)*8)) */
+ /* if (pb >= 0 && (len & (((ErlDrvSizeT)1 << (pb*8))) - 1) != len) {*/
if (((pb == 2) && (len > 0xffff)) || (pb == 1 && len > 0xff)) {
driver_failure_posix(ix, EINVAL);
return; /* -1; */
}
- put_int32(len, lb);
+ /* Handles 0 <= pb <= 4 only */
+ put_int32((Uint32) len, lb);
lbp = lb + (4-pb);
ev->iov[0].iov_base = lbp;
@@ -2057,14 +2046,14 @@ static void outputv(ErlDrvData e, ErlIOVec* ev)
}
-static void output(ErlDrvData e, char* buf, int len)
+static void output(ErlDrvData e, char* buf, ErlDrvSizeT len)
{
int fd = (int)(long)e;
int ix = driver_data[fd].port_num;
int pb = driver_data[fd].packet_bytes;
int ofd = driver_data[fd].ofd;
- int n;
- int sz;
+ ssize_t n;
+ ErlDrvSizeT sz;
char lb[4];
char* lbp;
struct iovec iv[2];
@@ -2261,7 +2250,7 @@ static void ready_input(ErlDrvData e, ErlDrvEvent ready_fd)
port_inp_failure(port_num, ready_fd, -1);
}
else {
- erts_smp_atomic_add(&sys_misc_mem_sz, h);
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, h);
sys_memcpy(buf, cpos, bytes_left);
fd_data[ready_fd].buf = buf;
fd_data[ready_fd].sz = h;
@@ -2317,87 +2306,6 @@ static void stop_select(ErlDrvEvent fd, void* _)
close((int)fd);
}
-/*
-** Async opertation support
-*/
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
-static void
-sys_async_ready_failed(int fd, int r, int err)
-{
- char buf[120];
- sprintf(buf, "sys_async_ready(): Fatal error: fd=%d, r=%d, errno=%d\n",
- fd, r, err);
- erts_silence_warn_unused_result(write(2, buf, strlen(buf)));
- abort();
-}
-
-/* called from threads !! */
-void sys_async_ready(int fd)
-{
- int r;
- while (1) {
- r = write(fd, "0", 1); /* signal main thread fd MUST be async_fd[1] */
- if (r == 1) {
- DEBUGF(("sys_async_ready(): r = 1\r\n"));
- break;
- }
- if (r < 0 && errno == EINTR) {
- DEBUGF(("sys_async_ready(): r = %d\r\n", r));
- continue;
- }
- sys_async_ready_failed(fd, r, errno);
- }
-}
-
-static int async_drv_init(void)
-{
- async_fd[0] = -1;
- async_fd[1] = -1;
- return 0;
-}
-
-static ErlDrvData async_drv_start(ErlDrvPort port_num,
- char* name, SysDriverOpts* opts)
-{
- if (async_fd[0] != -1)
- return ERL_DRV_ERROR_GENERAL;
- if (pipe(async_fd) < 0)
- return ERL_DRV_ERROR_GENERAL;
-
- DEBUGF(("async_drv_start: %d\r\n", port_num));
-
- SET_NONBLOCKING(async_fd[0]);
- driver_select(port_num, async_fd[0], ERL_DRV_READ, 1);
-
- if (init_async(async_fd[1]) < 0)
- return ERL_DRV_ERROR_GENERAL;
- return (ErlDrvData)port_num;
-}
-
-static void async_drv_stop(ErlDrvData e)
-{
- int port_num = (int)(long)e;
-
- DEBUGF(("async_drv_stop: %d\r\n", port_num));
-
- exit_async();
-
- driver_select(port_num, async_fd[0], ERL_DRV_READ, 0);
-
- close(async_fd[0]);
- close(async_fd[1]);
- async_fd[0] = async_fd[1] = -1;
-}
-
-
-static void async_drv_input(ErlDrvData e, ErlDrvEvent fd)
-{
- char *buf[32];
- DEBUGF(("async_drv_input\r\n"));
- while (read((int) fd, (void *) buf, 32) > 0); /* fd MUST be async_fd[0] */
- check_async_ready(); /* invoke all async_ready */
-}
-#endif
void erts_do_break_handling(void)
{
@@ -2409,11 +2317,7 @@ void erts_do_break_handling(void)
* therefore, make sure that all threads but this one are blocked before
* proceeding!
*/
- erts_smp_block_system(0);
- /*
- * NOTE: since we allow gc we are not allowed to lock
- * (any) process main locks while blocking system...
- */
+ erts_smp_thr_progress_block();
/* during break we revert to initial settings */
/* this is done differently for oldshell */
@@ -2441,7 +2345,7 @@ void erts_do_break_handling(void)
tcsetattr(0,TCSANOW,&temp_mode);
}
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
}
/* Fills in the systems representation of the jam/beam process identifier.
@@ -2465,7 +2369,7 @@ erts_sys_putenv(char *buffer, int sep_ix)
#else
Uint sz = strlen(buffer)+1;
env = erts_alloc(ERTS_ALC_T_PUTENV_STR, sz);
- erts_smp_atomic_add(&sys_misc_mem_sz, sz);
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, sz);
strcpy(env,buffer);
#endif
erts_smp_rwmtx_rwlock(&environ_rwmtx);
@@ -2475,12 +2379,10 @@ erts_sys_putenv(char *buffer, int sep_ix)
}
int
-erts_sys_getenv(char *key, char *value, size_t *size)
+erts_sys_getenv__(char *key, char *value, size_t *size)
{
- char *orig_value;
int res;
- erts_smp_rwmtx_rlock(&environ_rwmtx);
- orig_value = getenv(key);
+ char *orig_value = getenv(key);
if (!orig_value)
res = -1;
else {
@@ -2495,6 +2397,15 @@ erts_sys_getenv(char *key, char *value, size_t *size)
res = 0;
}
}
+ return res;
+}
+
+int
+erts_sys_getenv(char *key, char *value, size_t *size)
+{
+ int res;
+ erts_smp_rwmtx_rlock(&environ_rwmtx);
+ res = erts_sys_getenv__(key, value, size);
erts_smp_rwmtx_runlock(&environ_rwmtx);
return res;
}
@@ -2504,33 +2415,8 @@ sys_init_io(void)
{
fd_data = (struct fd_data *)
erts_alloc(ERTS_ALC_T_FD_TAB, max_files * sizeof(struct fd_data));
- erts_smp_atomic_add(&sys_misc_mem_sz,
- max_files * sizeof(struct fd_data));
-
-#ifdef USE_THREADS
-#ifdef ERTS_SMP
- if (init_async(-1) < 0)
- erl_exit(1, "Failed to initialize async-threads\n");
-#else
- {
- /* This is speical stuff, starting a driver from the
- * system routines, but is a nice way of handling stuff
- * the erlang way
- */
- SysDriverOpts dopts;
- int ret;
-
- sys_memset((void*)&dopts, 0, sizeof(SysDriverOpts));
- add_driver_entry(&async_driver_entry);
- ret = erts_open_driver(NULL, NIL, "async", &dopts, NULL);
- DEBUGF(("open_driver = %d\n", ret));
- if (ret < 0)
- erl_exit(1, "Failed to open async driver\n");
- erts_port[ret].status |= ERTS_PORT_SFLG_IMMORTAL;
- }
-#endif
-#endif
-
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz,
+ max_files * sizeof(struct fd_data));
}
#if (0) /* unused? */
@@ -2757,15 +2643,7 @@ initiate_report_exit_status(ErtsSysReportExit *rep, int status)
rep->next = report_exit_transit_list;
rep->status = status;
report_exit_transit_list = rep;
- /*
- * We need the scheduler thread to call check_children().
- * If the scheduler thread is sleeping in a poll with a
- * timeout, we need to wake the scheduler thread. We use the
- * functionality of the async driver to do this, instead of
- * implementing yet another driver doing the same thing. A
- * little bit ugly, but it works...
- */
- sys_async_ready(async_fd[1]);
+ erts_sys_schedule_interrupt(1);
}
static int check_children(void)
@@ -2852,20 +2730,11 @@ erl_sys_schedule(int runnable)
{
#ifdef ERTS_SMP
ERTS_CHK_IO(!runnable);
- ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
#else
- ERTS_CHK_IO_INTR(0);
- if (runnable) {
- ERTS_CHK_IO(0); /* Poll for I/O */
- check_async_ready(); /* Check async completions */
- } else {
- int wait_for_io = !check_async_ready();
- if (wait_for_io)
- wait_for_io = !check_children();
- ERTS_CHK_IO(wait_for_io);
- }
- (void) check_children();
+ ERTS_CHK_IO(runnable ? 0 : !check_children());
#endif
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
+ (void) check_children();
}
@@ -2893,8 +2762,8 @@ smp_sig_notify(char c)
static void *
signal_dispatcher_thread_func(void *unused)
{
- int initialized = 0;
#if !CHLDWTHR
+ int initialized = 0;
int notify_check_children = 0;
#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -2922,20 +2791,20 @@ signal_dispatcher_thread_func(void *unused)
* to other threads.
*
* NOTE 2: The signal dispatcher thread is not a blockable
- * thread (i.e., it hasn't called
- * erts_register_blockable_thread()). This is
- * intentional. We want to be able to interrupt
- * writing of a crash dump by hitting C-c twice.
- * Since it isn't a blockable thread it is important
- * that it doesn't change the state of any data that
- * a blocking thread expects to have exclusive access
- * to (unless the signal dispatcher itself explicitly
- * is blocking all blockable threads).
+ * thread (i.e., not a thread managed by the
+ * erl_thr_progress module). This is intentional.
+ * We want to be able to interrupt writing of a crash
+ * dump by hitting C-c twice. Since it isn't a
+ * blockable thread it is important that it doesn't
+ * change the state of any data that a blocking thread
+ * expects to have exclusive access to (unless the
+ * signal dispatcher itself explicitly is blocking all
+ * blockable threads).
*/
switch (buf[i]) {
case 0: /* Emulator initialized */
- initialized = 1;
#if !CHLDWTHR
+ initialized = 1;
if (!notify_check_children)
#endif
break;
@@ -2970,7 +2839,7 @@ signal_dispatcher_thread_func(void *unused)
buf[i]);
}
}
- ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
}
return NULL;
}
@@ -3090,6 +2959,8 @@ erl_sys_args(int* argc, char** argv)
{
int i, j;
+ erts_smp_rwmtx_init(&environ_rwmtx, "environ");
+
i = 1;
ASSERT(argc && argv);
@@ -3151,4 +3022,5 @@ erl_sys_args(int* argc, char** argv)
argv[j++] = argv[i];
}
*argc = j;
+
}
diff --git a/erts/emulator/sys/vxworks/erl_vxworks_sys.h b/erts/emulator/sys/vxworks/erl_vxworks_sys.h
index ae46403600..3d53238ea6 100644
--- a/erts/emulator/sys/vxworks/erl_vxworks_sys.h
+++ b/erts/emulator/sys/vxworks/erl_vxworks_sys.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -158,6 +158,7 @@ typedef struct _vxworks_tms {
typedef long long SysHrTime;
+typedef time_t erts_time_t;
typedef struct timeval SysTimeval;
extern int sys_init_hrtime(void);
diff --git a/erts/emulator/sys/vxworks/sys.c b/erts/emulator/sys/vxworks/sys.c
index c6e7b65f32..739b026fb1 100644
--- a/erts/emulator/sys/vxworks/sys.c
+++ b/erts/emulator/sys/vxworks/sys.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -238,6 +238,12 @@ erl_sys_args(int* argc, char** argv)
ASSERT(max_files <= erts_vxworks_max_files);
}
+void
+erts_sys_schedule_interrupt(int set)
+{
+ erts_check_io_interrupt(set);
+}
+
/*
* Called from schedule() when it runs out of runnable processes,
* or when Erlang code has performed INPUT_REDUCTIONS reduction
@@ -246,7 +252,6 @@ erl_sys_args(int* argc, char** argv)
void
erl_sys_schedule(int runnable)
{
- erts_check_io_interrupt(0);
erts_check_io(!runnable);
}
@@ -309,7 +314,7 @@ static void request_break(void)
fprintf(stderr,"break!\n");
#endif
erts_break_requested = 1;
- erts_check_io_interrupt(1); /* Make sure we don't sleep in erts_poll_wait */
+ erts_check_io_async_sig_interrupt(1); /* Make sure we don't sleep in erts_poll_wait */
}
static void do_quit(void)
@@ -560,7 +565,7 @@ static void fd_stop(ErlDrvData);
static void stop(ErlDrvData);
static void ready_input(ErlDrvData fd, ErlDrvEvent ready_fd);
static void ready_output(ErlDrvData fd, ErlDrvEvent ready_fd);
-static void output(ErlDrvData fd, char *buf, int len);
+static void output(ErlDrvData fd, char *buf, ErlDrvSizeT len);
static void stop_select(ErlDrvEvent, void*);
struct erl_drv_entry spawn_driver_entry = {
@@ -1182,7 +1187,7 @@ static int sched_write(int port_num,int fd, char *buf, int len, int pb)
}
/* Fd is the value returned as drv_data by the start func */
-static void output(ErlDrvData drv_data, char *buf, int len)
+static void output(ErlDrvData drv_data, char *buf, ErlDrvSizeT len)
{
int buf_done, port_num, wval, pb, ofd;
byte lb[4];
@@ -1515,6 +1520,12 @@ erts_sys_getenv(char *key, char *value, size_t *size)
return res;
}
+int
+erts_sys_getenv__(char *key, char *value, size_t *size)
+{
+ return erts_sys_getenv(key, value, size);
+}
+
void
sys_init_io(void)
{
@@ -2025,9 +2036,6 @@ int erl_memory_show(int p0, int p1, int p2, int p3, int p4, int p5,
erts_printf("The memory block used by elib is save_malloc'ed "
"at 0x%08x.\n", (unsigned int) alloc_pool_ptr);
}
-#ifdef NO_FIX_ALLOC
- erts_printf("Fix_alloc is disabled in this build\n");
-#endif
erts_printf("Statistics from elib_malloc:\n");
ELIB_LOCK;
diff --git a/erts/emulator/sys/win32/erl_poll.c b/erts/emulator/sys/win32/erl_poll.c
index 074e2e247f..7a1d129cd5 100644
--- a/erts/emulator/sys/win32/erl_poll.c
+++ b/erts/emulator/sys/win32/erl_poll.c
@@ -309,9 +309,9 @@ struct ErtsPollSet_ {
#ifdef ERTS_SMP
extern erts_smp_atomic32_t erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED \
- erts_smp_atomic32_set(&erts_break_requested, (erts_aint32_t) 1)
+ erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 1)
#define ERTS_UNSET_BREAK_REQUESTED \
- erts_smp_atomic32_set(&erts_break_requested, (erts_aint32_t) 0)
+ erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 0)
#else
extern volatile int erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1)
@@ -371,19 +371,19 @@ do { \
static ERTS_INLINE int
is_io_ready(ErtsPollSet ps)
{
- return erts_atomic32_read(&ps->wakeup_state) == ERTS_POLL_WOKEN_IO_READY;
+ return erts_atomic32_read_nob(&ps->wakeup_state) == ERTS_POLL_WOKEN_IO_READY;
}
static ERTS_INLINE void
woke_up(ErtsPollSet ps)
{
- if (erts_atomic32_read(&ps->wakeup_state) == ERTS_POLL_NOT_WOKEN)
- erts_atomic32_cmpxchg(&ps->wakeup_state,
- ERTS_POLL_WOKEN_TIMEDOUT,
- ERTS_POLL_NOT_WOKEN);
+ if (erts_atomic32_read_nob(&ps->wakeup_state) == ERTS_POLL_NOT_WOKEN)
+ erts_atomic32_cmpxchg_nob(&ps->wakeup_state,
+ ERTS_POLL_WOKEN_TIMEDOUT,
+ ERTS_POLL_NOT_WOKEN);
#ifdef DEBUG
{
- erts_aint32_t wakeup_state = erts_atomic32_read(&ps->wakeup_state);
+ erts_aint32_t wakeup_state = erts_atomic32_read_nob(&ps->wakeup_state);
switch (wakeup_state) {
case ERTS_POLL_WOKEN_IO_READY:
case ERTS_POLL_WOKEN_INTR:
@@ -401,7 +401,7 @@ static ERTS_INLINE int
wakeup_cause(ErtsPollSet ps)
{
int res;
- erts_aint32_t wakeup_state = erts_atomic32_read(&ps->wakeup_state);
+ erts_aint32_t wakeup_state = erts_atomic32_read_nob(&ps->wakeup_state);
switch (wakeup_state) {
case ERTS_POLL_WOKEN_IO_READY:
res = 0;
@@ -439,11 +439,11 @@ poll_wait_timeout(ErtsPollSet ps, SysTimeval *tvp)
* by ResetEvent().
*/
ERTS_THR_MEMORY_BARRIER;
- if (erts_atomic32_read(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN)
+ if (erts_atomic32_read_nob(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN)
return (DWORD) 0;
- if (timeout > ERTS_AINT32_T_MAX) /* Also prevents DWORD overflow */
- timeout = ERTS_AINT32_T_MAX;
+ if (timeout > ((time_t) ERTS_AINT32_T_MAX))
+ timeout = ERTS_AINT32_T_MAX; /* Also prevents DWORD overflow */
erts_smp_atomic32_set_relb(&ps->timeout, (erts_aint32_t) timeout);
return (DWORD) timeout;
@@ -455,17 +455,17 @@ wake_poller(ErtsPollSet ps, int io_ready)
erts_aint32_t wakeup_state;
if (io_ready) {
/* We may set the event multiple times. This is, however, harmless. */
- wakeup_state = erts_atomic32_read(&ps->wakeup_state);
+ wakeup_state = erts_atomic32_read_nob(&ps->wakeup_state);
erts_atomic32_set_relb(&ps->wakeup_state, ERTS_POLL_WOKEN_IO_READY);
}
else {
ERTS_THR_MEMORY_BARRIER;
- wakeup_state = erts_atomic32_read(&ps->wakeup_state);
+ wakeup_state = erts_atomic32_read_nob(&ps->wakeup_state);
while (wakeup_state != ERTS_POLL_WOKEN_IO_READY
&& wakeup_state != ERTS_POLL_WOKEN_INTR) {
- erts_aint32_t act = erts_atomic32_cmpxchg(&ps->wakeup_state,
- ERTS_POLL_WOKEN_INTR,
- wakeup_state);
+ erts_aint32_t act = erts_atomic32_cmpxchg_nob(&ps->wakeup_state,
+ ERTS_POLL_WOKEN_INTR,
+ wakeup_state);
if (act == wakeup_state) {
wakeup_state = act;
break;
@@ -488,13 +488,13 @@ wake_poller(ErtsPollSet ps, int io_ready)
static ERTS_INLINE void
reset_io_ready(ErtsPollSet ps)
{
- erts_atomic32_set(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
+ erts_atomic32_set_nob(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
}
static ERTS_INLINE void
restore_io_ready(ErtsPollSet ps)
{
- erts_atomic32_set(&ps->wakeup_state, ERTS_POLL_WOKEN_IO_READY);
+ erts_atomic32_set_nob(&ps->wakeup_state, ERTS_POLL_WOKEN_IO_READY);
}
/*
@@ -511,12 +511,12 @@ static ERTS_INLINE void
reset_interrupt(ErtsPollSet ps)
{
/* We need to keep io-ready if set */
- erts_aint32_t wakeup_state = erts_atomic32_read(&ps->wakeup_state);
+ erts_aint32_t wakeup_state = erts_atomic32_read_nob(&ps->wakeup_state);
while (wakeup_state != ERTS_POLL_WOKEN_IO_READY
&& wakeup_state != ERTS_POLL_NOT_WOKEN) {
- erts_aint32_t act = erts_atomic32_cmpxchg(&ps->wakeup_state,
- ERTS_POLL_NOT_WOKEN,
- wakeup_state);
+ erts_aint32_t act = erts_atomic32_cmpxchg_nob(&ps->wakeup_state,
+ ERTS_POLL_NOT_WOKEN,
+ wakeup_state);
if (wakeup_state == act)
break;
wakeup_state = act;
@@ -692,7 +692,7 @@ static void *break_waiter(void *param)
case WAIT_OBJECT_0:
ResetEvent(harr[0]);
erts_mtx_lock(&break_waiter_lock);
- erts_atomic32_set(&break_waiter_state,BREAK_WAITER_GOT_BREAK);
+ erts_atomic32_set_nob(&break_waiter_state,BREAK_WAITER_GOT_BREAK);
ERTS_THR_MEMORY_BARRIER;
SetEvent(break_happened_event);
erts_mtx_unlock(&break_waiter_lock);
@@ -700,7 +700,7 @@ static void *break_waiter(void *param)
case (WAIT_OBJECT_0+1):
ResetEvent(harr[1]);
erts_mtx_lock(&break_waiter_lock);
- erts_atomic32_set(&break_waiter_state,BREAK_WAITER_GOT_HALT);
+ erts_atomic32_set_nob(&break_waiter_state,BREAK_WAITER_GOT_HALT);
ERTS_THR_MEMORY_BARRIER;
SetEvent(break_happened_event);
erts_mtx_unlock(&break_waiter_lock);
@@ -1012,7 +1012,7 @@ void erts_poll_interrupt(ErtsPollSet ps, int set /* bool */)
void erts_poll_interrupt_timed(ErtsPollSet ps,
int set /* bool */,
- long msec)
+ erts_short_time_t msec)
{
HARDTRACEF(("In erts_poll_interrupt_timed(%d,%ld)",set,msec));
if (!set)
@@ -1153,23 +1153,29 @@ int erts_poll_wait(ErtsPollSet ps,
/*HARDDEBUGF(("timeout = %ld",(long) timeout));*/
- if (timeout > 0 && !erts_atomic32_read(&break_waiter_state)) {
+ if (timeout > 0 && !erts_atomic32_read_nob(&break_waiter_state)) {
HANDLE harr[2] = {ps->event_io_ready, break_happened_event};
int num_h = 2;
HARDDEBUGF(("Start waiting %d [%d]",num_h, (int) timeout));
ERTS_POLLSET_UNLOCK(ps);
+#ifdef ERTS_SMP
+ erts_thr_progress_prepare_wait(NULL);
+#endif
WaitForMultipleObjects(num_h, harr, FALSE, timeout);
+#ifdef ERTS_SMP
+ erts_thr_progress_finalize_wait(NULL);
+#endif
ERTS_POLLSET_LOCK(ps);
HARDDEBUGF(("Stop waiting %d [%d]",num_h, (int) timeout));
woke_up(ps);
}
ERTS_UNSET_BREAK_REQUESTED;
- if(erts_atomic32_read(&break_waiter_state)) {
+ if(erts_atomic32_read_nob(&break_waiter_state)) {
erts_mtx_lock(&break_waiter_lock);
- break_state = erts_atomic32_read(&break_waiter_state);
- erts_atomic32_set(&break_waiter_state,0);
+ break_state = erts_atomic32_read_nob(&break_waiter_state);
+ erts_atomic32_set_nob(&break_waiter_state,0);
ResetEvent(break_happened_event);
erts_mtx_unlock(&break_waiter_lock);
switch (break_state) {
@@ -1236,7 +1242,7 @@ int erts_poll_wait(ErtsPollSet ps,
erts_mtx_unlock(&w->mtx);
}
done:
- erts_smp_atomic32_set(&ps->timeout, ERTS_AINT32_T_MAX);
+ erts_smp_atomic32_set_nob(&ps->timeout, ERTS_AINT32_T_MAX);
*len = num;
ERTS_POLLSET_UNLOCK(ps);
HARDTRACEF(("Out erts_poll_wait"));
@@ -1316,11 +1322,11 @@ ErtsPollSet erts_poll_create_pollset(void)
ps->standby_wait_event = CreateManualEvent(FALSE);
ps->restore_events = 0;
- erts_atomic32_init(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
+ erts_atomic32_init_nob(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
#ifdef ERTS_SMP
erts_smp_mtx_init(&ps->mtx, "pollset");
#endif
- erts_smp_atomic32_init(&ps->timeout, ERTS_AINT32_T_MAX);
+ erts_smp_atomic32_init_nob(&ps->timeout, ERTS_AINT32_T_MAX);
HARDTRACEF(("Out erts_poll_create_pollset"));
return ps;
@@ -1372,7 +1378,7 @@ void erts_poll_init(void)
erts_mtx_init(&break_waiter_lock,"break_waiter_lock");
break_happened_event = CreateManualEvent(FALSE);
- erts_atomic32_init(&break_waiter_state, 0);
+ erts_atomic32_init_nob(&break_waiter_state, 0);
erts_thr_create(&thread, &break_waiter, NULL, NULL);
ERTS_UNSET_BREAK_REQUESTED;
diff --git a/erts/emulator/sys/win32/erl_win32_sys_ddll.c b/erts/emulator/sys/win32/erl_win32_sys_ddll.c
index a19f49af10..d00eed932b 100644
--- a/erts/emulator/sys/win32/erl_win32_sys_ddll.c
+++ b/erts/emulator/sys/win32/erl_win32_sys_ddll.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -25,6 +25,9 @@
#include <windows.h>
#define GET_ERTS_ALC_TEST
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
#include "sys.h"
#include "global.h"
#include "erl_alloc.h"
diff --git a/erts/emulator/sys/win32/erl_win_dyn_driver.h b/erts/emulator/sys/win32/erl_win_dyn_driver.h
index ecb06868d5..ec5141838a 100644
--- a/erts/emulator/sys/win32/erl_win_dyn_driver.h
+++ b/erts/emulator/sys/win32/erl_win_dyn_driver.h
@@ -39,11 +39,11 @@ WDD_TYPEDEF(int, driver_exit, (ErlDrvPort, int));
WDD_TYPEDEF(int, driver_failure_eof, (ErlDrvPort));
WDD_TYPEDEF(int, driver_select, (ErlDrvPort, ErlDrvEvent, int, int));
WDD_TYPEDEF(int, driver_event, (ErlDrvPort, ErlDrvEvent,ErlDrvEventData));
-WDD_TYPEDEF(int, driver_output, (ErlDrvPort, char *, int));
-WDD_TYPEDEF(int, driver_output2, (ErlDrvPort, char *, int,char *, int));
-WDD_TYPEDEF(int, driver_output_binary, (ErlDrvPort, char *, int,ErlDrvBinary*, int, int));
-WDD_TYPEDEF(int, driver_outputv, (ErlDrvPort, char*, int, ErlIOVec *,int));
-WDD_TYPEDEF(int, driver_vec_to_buf, (ErlIOVec *, char *, int));
+WDD_TYPEDEF(int, driver_output, (ErlDrvPort, char *, ErlDrvSizeT));
+WDD_TYPEDEF(int, driver_output2, (ErlDrvPort, char *, ErlDrvSizeT ,char *, ErlDrvSizeT));
+WDD_TYPEDEF(int, driver_output_binary, (ErlDrvPort, char *, ErlDrvSizeT, ErlDrvBinary*, ErlDrvSizeT, ErlDrvSizeT));
+WDD_TYPEDEF(int, driver_outputv, (ErlDrvPort, char*, ErlDrvSizeT, ErlIOVec *, ErlDrvSizeT));
+WDD_TYPEDEF(ErlDrvSizeT, driver_vec_to_buf, (ErlIOVec *, char *, ErlDrvSizeT));
WDD_TYPEDEF(int, driver_set_timer, (ErlDrvPort, unsigned long));
WDD_TYPEDEF(int, driver_cancel_timer, (ErlDrvPort));
WDD_TYPEDEF(int, driver_read_timer, (ErlDrvPort, unsigned long *));
@@ -51,22 +51,22 @@ WDD_TYPEDEF(char *, erl_errno_id, (int));
WDD_TYPEDEF(void, set_busy_port, (ErlDrvPort, int));
WDD_TYPEDEF(void, set_port_control_flags, (ErlDrvPort, int));
WDD_TYPEDEF(int, get_port_flags, (ErlDrvPort));
-WDD_TYPEDEF(ErlDrvBinary *, driver_alloc_binary, (int));
-WDD_TYPEDEF(ErlDrvBinary *, driver_realloc_binary, (ErlDrvBinary *, int));
+WDD_TYPEDEF(ErlDrvBinary *, driver_alloc_binary, (ErlDrvSizeT));
+WDD_TYPEDEF(ErlDrvBinary *, driver_realloc_binary, (ErlDrvBinary *, ErlDrvSizeT));
WDD_TYPEDEF(void, driver_free_binary, (ErlDrvBinary *));
-WDD_TYPEDEF(void *, driver_alloc, (size_t));
-WDD_TYPEDEF(void *, driver_realloc, (void *, size_t));
+WDD_TYPEDEF(void *, driver_alloc, (ErlDrvSizeT));
+WDD_TYPEDEF(void *, driver_realloc, (void *, ErlDrvSizeT));
WDD_TYPEDEF(void, driver_free, (void *));
-WDD_TYPEDEF(int, driver_enq, (ErlDrvPort, char*, int));
-WDD_TYPEDEF(int, driver_pushq, (ErlDrvPort, char*, int));
-WDD_TYPEDEF(int, driver_deq, (ErlDrvPort, int));
-WDD_TYPEDEF(int, driver_sizeq, (ErlDrvPort));
-WDD_TYPEDEF(int, driver_enq_bin, (ErlDrvPort, ErlDrvBinary *, int,int));
-WDD_TYPEDEF(int, driver_pushq_bin, (ErlDrvPort, ErlDrvBinary *, int,int));
-WDD_TYPEDEF(int, driver_peekqv, (ErlDrvPort, ErlIOVec *));
+WDD_TYPEDEF(int, driver_enq, (ErlDrvPort, char*, ErlDrvSizeT));
+WDD_TYPEDEF(int, driver_pushq, (ErlDrvPort, char*, ErlDrvSizeT));
+WDD_TYPEDEF(ErlDrvSizeT, driver_deq, (ErlDrvPort, ErlDrvSizeT));
+WDD_TYPEDEF(ErlDrvSizeT, driver_sizeq, (ErlDrvPort));
+WDD_TYPEDEF(int, driver_enq_bin, (ErlDrvPort, ErlDrvBinary *, ErlDrvSizeT, ErlDrvSizeT));
+WDD_TYPEDEF(int, driver_pushq_bin, (ErlDrvPort, ErlDrvBinary *, ErlDrvSizeT, ErlDrvSizeT));
+WDD_TYPEDEF(ErlDrvSizeT, driver_peekqv, (ErlDrvPort, ErlIOVec *));
WDD_TYPEDEF(SysIOVec *, driver_peekq, (ErlDrvPort, int *));
-WDD_TYPEDEF(int, driver_enqv, (ErlDrvPort, ErlIOVec *, int));
-WDD_TYPEDEF(int, driver_pushqv, (ErlDrvPort, ErlIOVec *, int));
+WDD_TYPEDEF(int, driver_enqv, (ErlDrvPort, ErlIOVec *, ErlDrvSizeT));
+WDD_TYPEDEF(int, driver_pushqv, (ErlDrvPort, ErlIOVec *, ErlDrvSizeT));
WDD_TYPEDEF(void, add_driver_entry, (ErlDrvEntry *));
WDD_TYPEDEF(int, remove_driver_entry, (ErlDrvEntry *));
WDD_TYPEDEF(ErlDrvTermData, driver_mk_atom, (char*));
@@ -83,10 +83,10 @@ WDD_TYPEDEF(void *, driver_dl_open, (char *));
WDD_TYPEDEF(void *, driver_dl_sym, (void *, char *));
WDD_TYPEDEF(int, driver_dl_close, (void *));
WDD_TYPEDEF(char *, driver_dl_error, (void));
-WDD_TYPEDEF(unsigned long, erts_alc_test, (unsigned long,
- unsigned long,
- unsigned long,
- unsigned long));
+WDD_TYPEDEF(ErlDrvUInt, erts_alc_test, (ErlDrvUInt,
+ ErlDrvUInt,
+ ErlDrvUInt,
+ ErlDrvUInt));
WDD_TYPEDEF(ErlDrvSInt, driver_binary_get_refc, (ErlDrvBinary *dbp));
WDD_TYPEDEF(ErlDrvSInt, driver_binary_inc_refc, (ErlDrvBinary *dbp));
WDD_TYPEDEF(ErlDrvSInt, driver_binary_dec_refc, (ErlDrvBinary *dbp));
diff --git a/erts/emulator/sys/win32/erl_win_sys.h b/erts/emulator/sys/win32/erl_win_sys.h
index 92d8577537..03298a6c54 100644
--- a/erts/emulator/sys/win32/erl_win_sys.h
+++ b/erts/emulator/sys/win32/erl_win_sys.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -117,9 +117,30 @@ int erts_check_io_debug(void);
#define SYS_CLK_TCK 1000
#define SYS_CLOCK_RESOLUTION 1
+#if SIZEOF_TIME_T != 8
+# error "Unexpected sizeof(time_t)"
+#endif
+
+/*
+ * gcc uses a 4 byte time_t and vc++ uses an 8 byte time_t.
+ * Types seen in beam_emu.c *need* to have the same size
+ * as in the rest of the system...
+ */
+typedef __int64 erts_time_t;
+
+struct tm *sys_localtime_r(time_t *epochs, struct tm *ptm);
+struct tm *sys_gmtime_r(time_t *epochs, struct tm *ptm);
+time_t sys_mktime( struct tm *ptm);
+
+#define localtime_r sys_localtime_r
+#define HAVE_LOCALTIME_R 1
+#define gmtime_r sys_gmtime_r
+#define HAVE_GMTIME_R
+#define mktime sys_mktime
+
typedef struct {
- long tv_sec;
- long tv_usec;
+ erts_time_t tv_sec;
+ erts_time_t tv_usec;
} SysTimeval;
typedef struct {
@@ -169,10 +190,12 @@ void erts_sys_env_init(void);
extern volatile int erl_fp_exception;
#include <float.h>
-#if defined (__GNUC__)
+/* I suspect this test isn't right, it might depend on the version of GCC
+ rather than if it's a MINGW gcc, but I havent been able to pinpoint the
+ exact point where _finite was added to the headers in cygwin... */
+#if defined (__GNUC__) && !defined(__MINGW32__)
int _finite(double x);
#endif
-#endif
/*#define NO_FPE_SIGNALS*/
#define erts_get_current_fp_exception() NULL
@@ -191,13 +214,6 @@ int _finite(double x);
#define erts_sys_block_fpe() 0
#define erts_sys_unblock_fpe(x) do{}while(0)
-#define SIZEOF_SHORT 2
-#define SIZEOF_INT 4
-#define SIZEOF_LONG 4
-#define SIZEOF_VOID_P 4
-#define SIZEOF_SIZE_T 4
-#define SIZEOF_OFF_T 4
-
/*
* Seems to be missing.
*/
@@ -210,3 +226,4 @@ typedef long ssize_t;
int init_async(int);
int exit_async(void);
#endif
+#endif
diff --git a/erts/emulator/sys/win32/sys.c b/erts/emulator/sys/win32/sys.c
index a2159d063c..b106f0932d 100644..100755
--- a/erts/emulator/sys/win32/sys.c
+++ b/erts/emulator/sys/win32/sys.c
@@ -198,7 +198,7 @@ Uint
erts_sys_misc_mem_sz(void)
{
Uint res = (Uint) erts_check_io_size();
- res += (Uint) erts_smp_atomic_read(&sys_misc_mem_sz);
+ res += (Uint) erts_smp_atomic_read_mb(&sys_misc_mem_sz);
return res;
}
@@ -216,6 +216,9 @@ void sys_tty_reset(int exit_code)
void erl_sys_args(int* argc, char** argv)
{
char *event_name;
+
+ erts_sys_env_init();
+
nohup = get_and_remove_option(argc, argv, "-nohup");
#ifdef DEBUG
@@ -471,7 +474,7 @@ static int spawn_init(void);
static int fd_init(void);
static void fd_stop(ErlDrvData);
static void stop(ErlDrvData);
-static void output(ErlDrvData, char*, int);
+static void output(ErlDrvData, char*, ErlDrvSizeT);
static void ready_input(ErlDrvData, ErlDrvEvent);
static void ready_output(ErlDrvData, ErlDrvEvent);
static void stop_select(ErlDrvEvent, void*);
@@ -566,51 +569,6 @@ struct erl_drv_entry vanilla_driver_entry = {
stop_select
};
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
-
-static int async_drv_init(void);
-static ErlDrvData async_drv_start(ErlDrvPort, char*, SysDriverOpts*);
-static void async_drv_stop(ErlDrvData);
-static void async_drv_input(ErlDrvData, ErlDrvEvent);
-
-/* INTERNAL use only */
-
-void null_output(ErlDrvData drv_data, char* buf, int len)
-{
-}
-
-void null_ready_output(ErlDrvData drv_data, ErlDrvEvent event)
-{
-}
-
-struct erl_drv_entry async_driver_entry = {
- async_drv_init,
- async_drv_start,
- async_drv_stop,
- null_output,
- async_drv_input,
- null_ready_output,
- "async",
- NULL, /* finish */
- NULL, /* handle */
- NULL, /* control */
- NULL, /* timeout */
- NULL, /* outputv */
- NULL, /* ready_async */
- NULL, /* flush */
- NULL, /* call */
- NULL, /* event */
- ERL_DRV_EXTENDED_MARKER,
- ERL_DRV_EXTENDED_MAJOR_VERSION,
- ERL_DRV_EXTENDED_MINOR_VERSION,
- 0, /* ERL_DRV_FLAGs */
- NULL,
- NULL, /* process_exit */
- stop_select
-};
-
-#endif
-
/*
* Initialises a DriverData structure.
*
@@ -648,7 +606,7 @@ new_driver_data(int port_num, int packet_bytes, int wait_objs_required, int use_
erts_smp_mtx_unlock(&sys_driver_data_lock);
return NULL;
}
- erts_smp_atomic_add(&sys_misc_mem_sz, dp->inBufSize);
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, dp->inBufSize);
dp->outBufSize = 0;
dp->outbuf = NULL;
dp->port_num = port_num;
@@ -733,8 +691,8 @@ release_driver_data(DriverData* dp)
#endif
if (dp->inbuf != NULL) {
- ASSERT(erts_smp_atomic_read(&sys_misc_mem_sz) >= dp->inBufSize);
- erts_smp_atomic_add(&sys_misc_mem_sz, -1*dp->inBufSize);
+ ASSERT(erts_smp_atomic_read_nob(&sys_misc_mem_sz) >= dp->inBufSize);
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, -1*dp->inBufSize);
DRV_BUF_FREE(dp->inbuf);
dp->inBufSize = 0;
dp->inbuf = NULL;
@@ -742,8 +700,8 @@ release_driver_data(DriverData* dp)
ASSERT(dp->inBufSize == 0);
if (dp->outbuf != NULL) {
- ASSERT(erts_smp_atomic_read(&sys_misc_mem_sz) >= dp->outBufSize);
- erts_smp_atomic_add(&sys_misc_mem_sz, -1*dp->outBufSize);
+ ASSERT(erts_smp_atomic_read_nob(&sys_misc_mem_sz) >= dp->outBufSize);
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, -1*dp->outBufSize);
DRV_BUF_FREE(dp->outbuf);
dp->outBufSize = 0;
dp->outbuf = NULL;
@@ -1162,7 +1120,8 @@ spawn_init(void)
#endif
driver_data = (struct driver_data *)
erts_alloc(ERTS_ALC_T_DRV_TAB, max_files * sizeof(struct driver_data));
- erts_smp_atomic_add(&sys_misc_mem_sz, max_files*sizeof(struct driver_data));
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz,
+ max_files*sizeof(struct driver_data));
for (i = 0; i < max_files; i++)
driver_data[i].port_num = PORT_FREE;
@@ -1675,17 +1634,6 @@ create_child_process
WaitForSingleObject(hProcess, 50);
}
- /*
- * When an application spawns a process repeatedly, a new thread
- * instance will be created for each process but the previous
- * instances may not be cleaned up. This results in a significant
- * virtual memory loss each time the process is spawned. If there
- * is a WaitForInputIdle() call between CreateProcess() and
- * CloseHandle(), the problem does not occur. PSS ID Number: Q124121
- */
-
- WaitForInputIdle(piProcInfo.hProcess, 5000);
-
return ok;
}
@@ -1698,7 +1646,7 @@ create_child_process
static int create_pipe(HANDLE *phRead, HANDLE *phWrite, BOOL inheritRead, BOOL overlapped_io)
{
SECURITY_ATTRIBUTES sa = {sizeof(SECURITY_ATTRIBUTES), NULL, TRUE};
- char pipe_name[128]; /* Name of pipe. */
+ char pipe_name[256]; /* Name of pipe. */
Uint calls;
/*
@@ -1735,9 +1683,9 @@ static int create_pipe(HANDLE *phRead, HANDLE *phWrite, BOOL inheritRead, BOOL o
* Otherwise, create named pipes.
*/
- calls = (Uint) erts_smp_atomic_inctest(&pipe_creation_counter);
- sprintf(pipe_name, "\\\\.\\pipe\\erlang44_%d_%d",
- getpid(), calls);
+ calls = (UWord) erts_smp_atomic_inc_read_nob(&pipe_creation_counter);
+ erts_snprintf(pipe_name, sizeof(pipe_name),
+ "\\\\.\\pipe\\erlang44_%d_%bpu", getpid(), calls);
DEBUGF(("Creating pipe %s\n", pipe_name));
sa.bInheritHandle = inheritRead;
@@ -2483,13 +2431,13 @@ threaded_exiter(LPVOID param)
*/
static void
-output(ErlDrvData drv_data, char* buf, int len)
+output(ErlDrvData drv_data, char* buf, ErlDrvSizeT len)
/* long drv_data; /* The slot to use in the driver data table.
* For Windows NT, this is *NOT* a file handle.
* The handle is found in the driver data.
*/
-/* char *buf; /* Pointer to data to write to the port program. */
-/* int len; /* Number of bytes to write. */
+/* char *buf; /* Pointer to data to write to the port program. */
+/* ErlDrvSizeT len; /* Number of bytes to write. */
{
DriverData* dp;
int pb; /* The header size for this port. */
@@ -2529,7 +2477,7 @@ output(ErlDrvData drv_data, char* buf, int len)
}
dp->outBufSize = pb+len;
- erts_smp_atomic_add(&sys_misc_mem_sz, dp->outBufSize);
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, dp->outBufSize);
/*
* Store header bytes (if any).
@@ -2558,8 +2506,8 @@ output(ErlDrvData drv_data, char* buf, int len)
} else {
dp->out.ov.Offset += pb+len; /* For vanilla driver. */
/* XXX OffsetHigh should be changed too. */
- ASSERT(erts_smp_atomic_read(&sys_misc_mem_sz) >= dp->outBufSize);
- erts_smp_atomic_add(&sys_misc_mem_sz, -1*dp->outBufSize);
+ ASSERT(erts_smp_atomic_read_nob(&sys_misc_mem_sz) >= dp->outBufSize);
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, -1*dp->outBufSize);
DRV_BUF_FREE(dp->outbuf);
dp->outBufSize = 0;
dp->outbuf = NULL;
@@ -2673,9 +2621,9 @@ ready_input(ErlDrvData drv_data, ErlDrvEvent ready_event)
error = ERROR_NOT_ENOUGH_MEMORY;
break; /* Break out of loop into error handler. */
}
- ASSERT(erts_smp_atomic_read(&sys_misc_mem_sz) >= dp->inBufSize);
- erts_smp_atomic_add(&sys_misc_mem_sz,
- dp->totalNeeded - dp->inBufSize);
+ ASSERT(erts_smp_atomic_read_nob(&sys_misc_mem_sz) >= dp->inBufSize);
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz,
+ dp->totalNeeded - dp->inBufSize);
dp->inBufSize = dp->totalNeeded;
dp->inbuf = new_buf;
}
@@ -2771,12 +2719,12 @@ ready_output(ErlDrvData drv_data, ErlDrvEvent ready_event)
DEBUGF(("ready_output(%d, 0x%x)\n", drv_data, ready_event));
set_busy_port(dp->port_num, 0);
if (!(dp->outbuf)) {
- /* Happens because event sometimes get signalled during a succesful
+ /* Happens because event sometimes get signalled during a successful
write... */
return;
}
- ASSERT(erts_smp_atomic_read(&sys_misc_mem_sz) >= dp->outBufSize);
- erts_smp_atomic_add(&sys_misc_mem_sz, -1*dp->outBufSize);
+ ASSERT(erts_smp_atomic_read_nob(&sys_misc_mem_sz) >= dp->outBufSize);
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, -1*dp->outBufSize);
DRV_BUF_FREE(dp->outbuf);
dp->outBufSize = 0;
dp->outbuf = NULL;
@@ -2824,30 +2772,6 @@ sys_init_io(void)
We estimate the number to twice the amount of ports.
We really dont know on windows, do we? */
max_files = 2*erts_max_ports;
-
-#ifdef USE_THREADS
-#ifdef ERTS_SMP
- if (init_async(-1) < 0)
- erl_exit(1, "Failed to initialize async-threads\n");
-#else
- {
- /* This is special stuff, starting a driver from the
- * system routines, but is a nice way of handling stuff
- * the erlang way
- */
- SysDriverOpts dopts;
- int ret;
-
- sys_memset((void*)&dopts, 0, sizeof(SysDriverOpts));
- add_driver_entry(&async_driver_entry);
- ret = erts_open_driver(NULL, NIL, "async", &dopts, NULL);
- DEBUGF(("open_driver = %d\n", ret));
- if (ret < 0)
- erl_exit(1, "Failed to open async driver\n");
- erts_port[ret].status |= ERTS_PORT_SFLG_IMMORTAL;
- }
-#endif
-#endif
}
#ifdef ERTS_SMP
@@ -2926,8 +2850,8 @@ Preload* sys_preloaded(void)
(num_preloaded+1)*sizeof(Preload));
res_name = erts_alloc(ERTS_ALC_T_PRELOADED,
(num_preloaded+1)*sizeof(unsigned));
- erts_smp_atomic_add(&sys_misc_mem_sz,
- (num_preloaded+1)*sizeof(Preload)
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz,
+ (num_preloaded+1)*sizeof(Preload)
+ (num_preloaded+1)*sizeof(unsigned));
for (i = 0; i < num_preloaded; i++) {
int n;
@@ -2939,7 +2863,7 @@ Preload* sys_preloaded(void)
n = GETWORD(data);
data += 2;
preloaded[i].name = erts_alloc(ERTS_ALC_T_PRELOADED, n+1);
- erts_smp_atomic_add(&sys_misc_mem_sz, n+1);
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, n+1);
sys_memcpy(preloaded[i].name, data, n);
preloaded[i].name[n] = '\0';
data += n;
@@ -3281,7 +3205,7 @@ erts_sys_pre_init(void)
#endif
}
#endif
- erts_smp_atomic_init(&sys_misc_mem_sz, 0);
+ erts_smp_atomic_init_nob(&sys_misc_mem_sz, 0);
}
void noinherit_std_handle(DWORD type)
@@ -3297,8 +3221,6 @@ void erl_sys_init(void)
{
HANDLE handle;
- erts_sys_env_init();
-
noinherit_std_handle(STD_OUTPUT_HANDLE);
noinherit_std_handle(STD_INPUT_HANDLE);
noinherit_std_handle(STD_ERROR_HANDLE);
@@ -3310,7 +3232,7 @@ void erl_sys_init(void)
erts_smp_tsd_key_create(&win32_errstr_key);
InitializeCriticalSection(&htbc_lock);
#endif
- erts_smp_atomic_init(&pipe_creation_counter,0);
+ erts_smp_atomic_init_nob(&pipe_creation_counter,0);
/*
* Test if we have named pipes or not.
*/
@@ -3360,15 +3282,15 @@ void erl_sys_init(void)
SEM_FAILCRITICALERRORS | SEM_NOOPENFILEERRORBOX);
}
-#ifdef ERTS_SMP
void
erts_sys_schedule_interrupt(int set)
{
erts_check_io_interrupt(set);
}
+#ifdef ERTS_SMP
void
-erts_sys_schedule_interrupt_timed(int set, long msec)
+erts_sys_schedule_interrupt_timed(int set, erts_short_time_t msec)
{
erts_check_io_interrupt_timed(set, msec);
}
@@ -3382,76 +3304,7 @@ erts_sys_schedule_interrupt_timed(int set, long msec)
void
erl_sys_schedule(int runnable)
{
-#ifdef ERTS_SMP
erts_check_io(!runnable);
- ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
-#else
- erts_check_io_interrupt(0);
- if (runnable) {
- erts_check_io(0); /* Poll for I/O */
- check_async_ready(); /* Check async completions */
- } else {
- erts_check_io(check_async_ready() ? 0 : 1);
- }
-#endif
-}
-
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
-/*
- * Async operation support.
- */
-
-static ErlDrvEvent async_drv_event;
-
-void
-sys_async_ready(int fd)
-{
- SetEvent((HANDLE)async_drv_event);
-}
-
-static int
-async_drv_init(void)
-{
- async_drv_event = (ErlDrvEvent) NULL;
- return 0;
-}
-
-static ErlDrvData
-async_drv_start(ErlDrvPort port_num, char* name, SysDriverOpts* opts)
-{
- if (async_drv_event != (ErlDrvEvent) NULL) {
- return ERL_DRV_ERROR_GENERAL;
- }
- if ((async_drv_event = (ErlDrvEvent)CreateAutoEvent(FALSE)) == (ErlDrvEvent) NULL) {
- return ERL_DRV_ERROR_GENERAL;
- }
-
- driver_select(port_num, async_drv_event, ERL_DRV_READ|ERL_DRV_USE, 1);
- if (init_async(async_drv_event) < 0) {
- return ERL_DRV_ERROR_GENERAL;
- }
- return (ErlDrvData)port_num;
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
}
-static void
-async_drv_stop(ErlDrvData port_num)
-{
- exit_async();
- driver_select((ErlDrvPort)port_num, async_drv_event, ERL_DRV_READ|ERL_DRV_USE, 0);
- /*CloseHandle((HANDLE)async_drv_event);*/
- async_drv_event = (ErlDrvEvent) NULL;
-}
-
-
-static void
-async_drv_input(ErlDrvData port_num, ErlDrvEvent e)
-{
- check_async_ready();
-
- /*
- * Our event is auto-resetting.
- */
-}
-
-#endif
-
diff --git a/erts/emulator/sys/win32/sys_env.c b/erts/emulator/sys/win32/sys_env.c
index 02c8433a10..064745d418 100644
--- a/erts/emulator/sys/win32/sys_env.c
+++ b/erts/emulator/sys/win32/sys_env.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -55,19 +55,17 @@ erts_sys_putenv(char *key_value, int sep_ix)
}
int
-erts_sys_getenv(char *key, char *value, size_t *size)
+erts_sys_getenv__(char *key, char *value, size_t *size)
{
size_t req_size = 0;
int res = 0;
DWORD new_size;
- erts_smp_rwmtx_rlock(&environ_rwmtx);
SetLastError(0);
new_size = GetEnvironmentVariable((LPCTSTR) key,
(LPTSTR) value,
(DWORD) *size);
res = !new_size && GetLastError() == ERROR_ENVVAR_NOT_FOUND ? -1 : 0;
- erts_smp_rwmtx_runlock(&environ_rwmtx);
if (res < 0)
return res;
res = new_size > *size ? 1 : 0;
@@ -75,6 +73,16 @@ erts_sys_getenv(char *key, char *value, size_t *size)
return res;
}
+int
+erts_sys_getenv(char *key, char *value, size_t *size)
+{
+ int res;
+ erts_smp_rwmtx_rlock(&environ_rwmtx);
+ res = erts_sys_getenv__(key, value, size);
+ erts_smp_rwmtx_runlock(&environ_rwmtx);
+ return res;
+}
+
struct win32_getenv_state {
char *env;
char *next;
diff --git a/erts/emulator/sys/win32/sys_float.c b/erts/emulator/sys/win32/sys_float.c
index 9e67ca7f48..6558ad2d99 100644
--- a/erts/emulator/sys/win32/sys_float.c
+++ b/erts/emulator/sys/win32/sys_float.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -18,6 +18,9 @@
*/
/* Float conversions */
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
#include "sys.h"
#include "signal.h"
diff --git a/erts/emulator/sys/win32/sys_interrupt.c b/erts/emulator/sys/win32/sys_interrupt.c
index 943c338794..347c31053b 100644
--- a/erts/emulator/sys/win32/sys_interrupt.c
+++ b/erts/emulator/sys/win32/sys_interrupt.c
@@ -19,8 +19,12 @@
/*
* Purpose: Interrupt handling in windows.
*/
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
#include "sys.h"
#include "erl_alloc.h"
+#include "erl_thr_progress.h"
#include "erl_driver.h"
#include "../../drivers/win32/win_con.h"
@@ -33,9 +37,9 @@
#ifdef ERTS_SMP
erts_smp_atomic32_t erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED \
- erts_smp_atomic32_set(&erts_break_requested, (erts_aint32_t) 1)
+ erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 1)
#define ERTS_UNSET_BREAK_REQUESTED \
- erts_smp_atomic32_set(&erts_break_requested, (erts_aint32_t) 0)
+ erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 0)
#else
volatile int erts_break_requested = 0;
#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1)
@@ -52,14 +56,14 @@ void erts_do_break_handling(void)
* therefore, make sure that all threads but this one are blocked before
* proceeding!
*/
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
/* call the break handling function, reset the flag */
do_break();
ResetEvent(erts_sys_break_event);
ERTS_UNSET_BREAK_REQUESTED;
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
}
diff --git a/erts/emulator/sys/win32/sys_time.c b/erts/emulator/sys/win32/sys_time.c
index 50e43065b5..b5123dc45d 100644
--- a/erts/emulator/sys/win32/sys_time.c
+++ b/erts/emulator/sys/win32/sys_time.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -20,6 +20,9 @@
* Purpose: System-dependent time functions.
*/
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
#include "sys.h"
#include "assert.h"
@@ -32,28 +35,336 @@
/******************* Routines for time measurement *********************/
#define EPOCH_JULIAN_DIFF LL_LITERAL(11644473600)
+#define TICKS_PER_SECOND LL_LITERAL(10000000)
+#define SECONDS_PER_DAY LL_LITERAL(86400)
+
+#define ULI_TO_FILETIME(ft,ull) \
+ do { \
+ (ft).dwLowDateTime = (ull).LowPart; \
+ (ft).dwHighDateTime = (ull).HighPart; \
+ } while (0)
+
+#define FILETIME_TO_ULI(ull,ft) \
+ do { \
+ (ull).LowPart = (ft).dwLowDateTime; \
+ (ull).HighPart = (ft).dwHighDateTime; \
+ } while (0)
+
+#define EPOCH_TO_FILETIME(ft, epoch) \
+ do { \
+ ULARGE_INTEGER ull; \
+ ull.QuadPart = (((epoch) + EPOCH_JULIAN_DIFF) * TICKS_PER_SECOND); \
+ ULI_TO_FILETIME(ft,ull); \
+ } while(0)
+
+#define FILETIME_TO_EPOCH(epoch, ft) \
+ do { \
+ ULARGE_INTEGER ull; \
+ FILETIME_TO_ULI(ull,ft); \
+ (epoch) = ((ull.QuadPart / TICKS_PER_SECOND) - EPOCH_JULIAN_DIFF); \
+ } while(0)
+
static SysHrTime wrap = 0;
static DWORD last_tick_count = 0;
+/* Getting timezone information is a heavy operation, so we want to do this
+ only once */
+
+static TIME_ZONE_INFORMATION static_tzi;
+static int have_static_tzi = 0;
+
+static int days_in_month[2][13] = {
+ {0,31,28,31,30,31,30,31,31,30,31,30,31},
+ {0,31,29,31,30,31,30,31,31,30,31,30,31}};
+
int
sys_init_time(void)
{
+ if(GetTimeZoneInformation(&static_tzi) &&
+ static_tzi.StandardDate.wMonth != 0 &&
+ static_tzi.DaylightDate.wMonth != 0) {
+ have_static_tzi = 1;
+ }
return 1;
}
+/* Returns a switchtimes for DST as UTC filetimes given data from a
+ TIME_ZONE_INFORMATION, see sys_localtime_r for usage. */
+static void
+get_dst_switchtime(DWORD year,
+ SYSTEMTIME dstinfo, LONG bias,
+ FILETIME *utc_switchtime)
+{
+ DWORD occu;
+ DWORD weekday,wday_1st;
+ DWORD day, days_in;
+ FILETIME tmp,tmp2;
+ ULARGE_INTEGER ull;
+ int leap_year = 0;
+ if (dstinfo.wYear != 0) {
+ /* A year specific transition, in which case the data in the structure
+ is already properly set for a specific year. Compare year
+ with parameter and see if they correspond, in that case generate a
+ filetime directly, otherwise set the filetime to 0 */
+ if (year != dstinfo.wYear) {
+ utc_switchtime->dwLowDateTime = utc_switchtime->dwHighDateTime = 0;
+ return;
+ }
+ } else {
+ occu = dstinfo.wDay;
+ weekday = dstinfo.wDayOfWeek;
+
+ dstinfo.wDayOfWeek = 0;
+ dstinfo.wDay = 1;
+ dstinfo.wYear = year;
+
+ SystemTimeToFileTime(&dstinfo,&tmp);
+ ull.LowPart = tmp.dwLowDateTime;
+ ull.HighPart = tmp.dwHighDateTime;
+
+ ull.QuadPart /= (TICKS_PER_SECOND*SECONDS_PER_DAY); /* Julian Day */
+ wday_1st = (DWORD) ((ull.QuadPart + LL_LITERAL(1)) % LL_LITERAL(7));
+ day = (weekday >= wday_1st) ?
+ weekday - wday_1st + 1 :
+ weekday - wday_1st + 8;
+ --occu;
+ if (((dstinfo.wYear % 4) == 0 && (dstinfo.wYear % 100) > 0) ||
+ ((dstinfo.wYear % 400) == 0)) {
+ leap_year = 1;
+ }
+ days_in = days_in_month[leap_year][dstinfo.wMonth];
+ while (occu > 0 && (day + 7 <= days_in)) {
+ --occu;
+ day += 7;
+ }
+ dstinfo.wDay = day;
+ }
+ SystemTimeToFileTime(&dstinfo,&tmp);
+ /* correct for bias */
+ ull.LowPart = tmp.dwLowDateTime;
+ ull.HighPart = tmp.dwHighDateTime;
+ ull.QuadPart += (((LONGLONG) bias) * LL_LITERAL(60) * TICKS_PER_SECOND);
+ utc_switchtime->dwLowDateTime = ull.LowPart;
+ utc_switchtime->dwHighDateTime = ull.HighPart;
+ return;
+}
+
+/* This function gives approximately the correct year from a FILETIME
+ Around the actual new year, it may return the wrong value, but that's OK
+ as DST never switches around new year. */
+static DWORD
+approx_year(FILETIME ft)
+{
+ ULARGE_INTEGER ull;
+ FILETIME_TO_ULI(ull,ft);
+ ull.QuadPart /= LL_LITERAL(1000);
+ ull.QuadPart /= SECONDS_PER_DAY;
+ ull.QuadPart /= LL_LITERAL(3652425);
+ ull.QuadPart += 1601;
+ return (DWORD) ull.QuadPart;
+}
+
+struct tm *
+sys_localtime_r(time_t *epochs, struct tm *ptm)
+{
+ FILETIME ft,lft;
+ SYSTEMTIME st;
+
+ if ((((*epochs) + EPOCH_JULIAN_DIFF) * TICKS_PER_SECOND) < 0LL) {
+ fprintf(stderr,"1\r\n"); fflush(stderr);
+ return NULL;
+ }
+
+ EPOCH_TO_FILETIME(ft,*epochs);
+ ptm->tm_isdst = 0;
+ if (have_static_tzi) {
+ FILETIME dst_start, dst_stop;
+ ULARGE_INTEGER ull;
+ DWORD year = approx_year(ft);
+ get_dst_switchtime(year,static_tzi.DaylightDate,
+ static_tzi.Bias+static_tzi.StandardBias,&dst_start);
+ get_dst_switchtime(year,static_tzi.StandardDate,
+ static_tzi.Bias+static_tzi.StandardBias+
+ static_tzi.DaylightBias,
+ &dst_stop);
+ FILETIME_TO_ULI(ull,ft);
+
+ if (CompareFileTime(&ft,&dst_start) >= 0 &&
+ CompareFileTime(&ft,&dst_stop) < 0) {
+ ull.QuadPart -=
+ ((LONGLONG) static_tzi.Bias+static_tzi.StandardBias+
+ static_tzi.DaylightBias) *
+ LL_LITERAL(60) * TICKS_PER_SECOND;
+ ptm->tm_isdst = 1;
+ } else {
+ ull.QuadPart -=
+ ((LONGLONG) static_tzi.Bias+static_tzi.StandardBias)
+ * LL_LITERAL(60) * TICKS_PER_SECOND;
+ }
+ ULI_TO_FILETIME(ft,ull);
+ } else {
+ if (!FileTimeToLocalFileTime(&ft,&lft)) {
+ return NULL;
+ }
+ ft = lft;
+ }
+
+ if (!FileTimeToSystemTime(&ft,&st)) {
+ return NULL;
+ }
+
+ ptm->tm_year = (int) st.wYear - 1900;
+ ptm->tm_mon = (int) st.wMonth - 1;
+ ptm->tm_mday = (int) st.wDay;
+ ptm->tm_hour = (int) st.wHour;
+ ptm->tm_min = (int) st.wMinute;
+ ptm->tm_sec = (int) st.wSecond;
+ ptm->tm_wday = (int) st.wDayOfWeek;
+ {
+ int yday = ptm->tm_mday - 1;
+ int m = ptm->tm_mon;
+ int leap_year = 0;
+ if (((st.wYear % 4) == 0 && (st.wYear % 100) > 0) ||
+ ((st.wYear % 400) == 0)) {
+ leap_year = 1;
+ }
+ while (m > 0) {
+ yday +=days_in_month[leap_year][m];
+ --m;
+ }
+ ptm->tm_yday = yday;
+ }
+ return ptm;
+}
+
+struct tm *
+sys_gmtime_r(time_t *epochs, struct tm *ptm)
+{
+ FILETIME ft;
+ SYSTEMTIME st;
+
+ if ((((*epochs) + EPOCH_JULIAN_DIFF) * TICKS_PER_SECOND) < 0LL) {
+ return NULL;
+ }
+
+ EPOCH_TO_FILETIME(ft,*epochs);
+
+ if (!FileTimeToSystemTime(&ft,&st)) {
+ return NULL;
+ }
+
+ ptm->tm_year = (int) st.wYear - 1900;
+ ptm->tm_mon = (int) st.wMonth - 1;
+ ptm->tm_mday = (int) st.wDay;
+ ptm->tm_hour = (int) st.wHour;
+ ptm->tm_min = (int) st.wMinute;
+ ptm->tm_sec = (int) st.wSecond;
+ ptm->tm_wday = (int) st.wDayOfWeek;
+ ptm->tm_isdst = 0;
+ {
+ int yday = ptm->tm_mday - 1;
+ int m = ptm->tm_mon;
+ int leap_year = 0;
+ if (((st.wYear % 4) == 0 && (st.wYear % 100) > 0) ||
+ ((st.wYear % 400) == 0)) {
+ leap_year = 1;
+ }
+ while (m > 0) {
+ yday +=days_in_month[leap_year][m];
+ --m;
+ }
+ ptm->tm_yday = yday;
+ }
+
+ return ptm;
+}
+
+time_t
+sys_mktime(struct tm *ptm)
+{
+ FILETIME ft;
+ SYSTEMTIME st;
+ int dst = 0;
+ time_t epochs;
+
+ memset(&st,0,sizeof(st));
+ /* Convert relevant parts of truct tm to SYSTEMTIME */
+ st.wYear = (USHORT) (ptm->tm_year + 1900);
+ st.wMonth = (USHORT) (ptm->tm_mon + 1);
+ st.wDay = (USHORT) ptm->tm_mday;
+ st.wHour = (USHORT) ptm->tm_hour;
+ st.wMinute = (USHORT) ptm->tm_min;
+ st.wSecond = (USHORT) ptm->tm_sec;
+
+ SystemTimeToFileTime(&st,&ft);
+
+ /* ft is now some kind of local file time, but it may be wrong depending
+ on what is in the tm_dst field. We need to manually convert it to
+ UTC before turning it into epochs */
+
+ if (have_static_tzi) {
+ FILETIME dst_start, dst_stop;
+ ULARGE_INTEGER ull_start,ull_stop,ull_ft;
+
+ FILETIME_TO_ULI(ull_ft,ft);
+
+ /* Correct everything except DST */
+ ull_ft.QuadPart += (static_tzi.Bias+static_tzi.StandardBias)
+ * LL_LITERAL(60) * TICKS_PER_SECOND;
+
+ /* Determine if DST is active */
+ if (ptm->tm_isdst >= 0) {
+ dst = ptm->tm_isdst;
+ } else if (static_tzi.DaylightDate.wMonth != 0){
+ /* This is how windows mktime does it, meaning it does not
+ take nonexisting local times into account */
+ get_dst_switchtime(st.wYear,static_tzi.DaylightDate,
+ static_tzi.Bias+static_tzi.StandardBias,
+ &dst_start);
+ get_dst_switchtime(st.wYear,static_tzi.StandardDate,
+ static_tzi.Bias+static_tzi.StandardBias+
+ static_tzi.DaylightBias,
+ &dst_stop);
+ FILETIME_TO_ULI(ull_start,dst_start);
+ FILETIME_TO_ULI(ull_stop,dst_stop);
+ if ((ull_ft.QuadPart >= ull_start.QuadPart) &&
+ (ull_ft.QuadPart < ull_stop.QuadPart)) {
+ /* We are in DST */
+ dst = 1;
+ }
+ }
+ /* Correct for DST */
+ if (dst) {
+ ull_ft.QuadPart += static_tzi.DaylightBias *
+ LL_LITERAL(60) * TICKS_PER_SECOND;
+ }
+ epochs = ((ull_ft.QuadPart / TICKS_PER_SECOND) - EPOCH_JULIAN_DIFF);
+ } else {
+ /* No DST, life is easy... */
+ FILETIME lft;
+ LocalFileTimeToFileTime(&ft,&lft);
+ FILETIME_TO_EPOCH(epochs,lft);
+ }
+ /* Normalize the struct tm */
+ sys_localtime_r(&epochs,ptm);
+ return epochs;
+}
+
void
sys_gettimeofday(SysTimeval *tv)
{
SYSTEMTIME t;
FILETIME ft;
- LONGLONG lft;
+ ULARGE_INTEGER ull;
GetSystemTime(&t);
SystemTimeToFileTime(&t, &ft);
- memcpy(&lft, &ft, sizeof(lft));
- tv->tv_usec = (long) ((lft / LL_LITERAL(10)) % LL_LITERAL(1000000));
- tv->tv_sec = (long) ((lft / LL_LITERAL(10000000)) - EPOCH_JULIAN_DIFF);
+ FILETIME_TO_ULI(ull,ft);
+ tv->tv_usec = (long) ((ull.QuadPart / LL_LITERAL(10)) %
+ LL_LITERAL(1000000));
+ tv->tv_sec = (long) ((ull.QuadPart / LL_LITERAL(10000000)) -
+ EPOCH_JULIAN_DIFF);
}
SysHrTime
@@ -88,9 +399,3 @@ sys_times(SysTimes *buffer) {
buffer->tms_stime = (clock_t) (system & LL_LITERAL(0x7FFFFFFF));
return kernel_ticks;
}
-
-
-
-
-
-